author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
259,948
17.10.2018 09:57:02
25,200
9d17eba121dab054c21307b9696ba7471dff4a74
compressio: do not schedule new I/Os when there is no worker (stream closed).
[ { "change_type": "MODIFY", "old_path": "pkg/compressio/compressio.go", "new_path": "pkg/compressio/compressio.go", "diff": "@@ -323,10 +323,10 @@ func (p *pool) schedule(c *chunk, callback func(*chunk) error) error {\ninputChan chan *chunk\noutputChan chan result\n)\n- if c != nil {\n+ if c != nil && len(p.workers) != 0 {\ninputChan = p.workers[(p.nextInput+1)%len(p.workers)].input\n}\n- if callback != nil && p.nextOutput != p.nextInput {\n+ if callback != nil && p.nextOutput != p.nextInput && len(p.workers) != 0 {\noutputChan = p.workers[(p.nextOutput+1)%len(p.workers)].output\n}\nif inputChan == nil && outputChan == nil {\n" } ]
Go
Apache License 2.0
google/gvisor
compressio: do not schedule new I/Os when there is no worker (stream closed). PiperOrigin-RevId: 217536677 Change-Id: Ib9a5a2542df12d0bc5592b91463ffd646e2ec295
259,891
17.10.2018 10:50:24
25,200
9b3550f70bf1612e2c474b3826b0347b21503401
runsc: Add --pid flag to runsc kill. pid allows specific processes to be signalled rather than the container root process or all processes in the container. containerd needs to SIGKILL exec'd processes that timeout and check whether processes are still alive.
[ { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -756,8 +756,22 @@ func (l *Loader) signalProcess(cid string, pid, signo int32, sendToFGProcess boo\nep, ok := l.processes[eid]\nl.mu.Unlock()\n+ // The caller may be signaling a process not started directly via exec.\n+ // In this case, find the process in the container's PID namespace and\n+ // signal it.\nif !ok {\n- return fmt.Errorf(\"failed to signal container %q PID %d: no such PID\", cid, pid)\n+ ep, ok := l.processes[execID{cid: cid}]\n+ if !ok {\n+ return fmt.Errorf(\"no container with ID: %q\", cid)\n+ }\n+ tg := ep.tg.PIDNamespace().ThreadGroupWithID(kernel.ThreadID(pid))\n+ if tg == nil {\n+ return fmt.Errorf(\"failed to signal container %q PID %d: no such process\", cid, pid)\n+ }\n+ if tg.Leader().ContainerID() != cid {\n+ return fmt.Errorf(\"process %d is part of a different container: %q\", pid, tg.Leader().ContainerID())\n+ }\n+ return tg.SendSignal(&arch.SignalInfo{Signo: signo})\n}\nif !sendToFGProcess {\n" }, { "change_type": "MODIFY", "old_path": "runsc/cmd/kill.go", "new_path": "runsc/cmd/kill.go", "diff": "@@ -31,6 +31,7 @@ import (\n// Kill implements subcommands.Command for the \"kill\" command.\ntype Kill struct {\nall bool\n+ pid int\n}\n// Name implements subcommands.Command.Name.\n@@ -51,6 +52,7 @@ func (*Kill) Usage() string {\n// SetFlags implements subcommands.Command.SetFlags.\nfunc (k *Kill) SetFlags(f *flag.FlagSet) {\nf.BoolVar(&k.all, \"all\", false, \"send the specified signal to all processes inside the container\")\n+ f.IntVar(&k.pid, \"pid\", 0, \"send the specified signal to a specific process\")\n}\n// Execute implements subcommands.Command.Execute.\n@@ -63,6 +65,10 @@ func (k *Kill) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nid := f.Arg(0)\nconf := args[0].(*boot.Config)\n+ if k.pid != 0 && k.all {\n+ Fatalf(\"it is invalid to specify both --all and --pid\")\n+ }\n+\nc, err := container.Load(conf.RootDir, id)\nif err != nil {\nFatalf(\"error loading container: %v\", err)\n@@ -80,9 +86,16 @@ func (k *Kill) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nif err != nil {\nFatalf(\"%v\", err)\n}\n- if err := c.Signal(sig, k.all); err != nil {\n+\n+ if k.pid != 0 {\n+ if err := c.SignalProcess(sig, int32(k.pid)); err != nil {\n+ Fatalf(\"failed to signal pid %d: %v\", k.pid, err)\n+ }\n+ } else {\n+ if err := c.SignalContainer(sig, k.all); err != nil {\nFatalf(\"%v\", err)\n}\n+ }\nreturn subcommands.ExitSuccess\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/container.go", "new_path": "runsc/container/container.go", "diff": "@@ -174,7 +174,7 @@ func Load(rootDir, id string) (*Container, error) {\n} else if c.Status == Running {\n// Container state should reflect the actual state of the application, so\n// we don't consider gofer process here.\n- if err := c.Signal(syscall.Signal(0), false); err != nil {\n+ if err := c.SignalContainer(syscall.Signal(0), false); err != nil {\nc.changeStatus(Stopped)\n}\n}\n@@ -445,7 +445,7 @@ func (c *Container) SandboxPid() int {\nfunc (c *Container) Wait() (syscall.WaitStatus, error) {\nlog.Debugf(\"Wait on container %q\", c.ID)\nif !c.isSandboxRunning() {\n- return 0, fmt.Errorf(\"container is not running\")\n+ return 0, fmt.Errorf(\"sandbox is not running\")\n}\nreturn c.Sandbox.Wait(c.ID)\n}\n@@ -455,7 +455,7 @@ func (c *Container) Wait() (syscall.WaitStatus, error) {\nfunc (c *Container) WaitRootPID(pid int32, clearStatus bool) (syscall.WaitStatus, error) {\nlog.Debugf(\"Wait on PID %d in sandbox %q\", pid, c.Sandbox.ID)\nif !c.isSandboxRunning() {\n- return 0, fmt.Errorf(\"container is not running\")\n+ return 0, fmt.Errorf(\"sandbox is not running\")\n}\nreturn c.Sandbox.WaitPID(c.Sandbox.ID, pid, clearStatus)\n}\n@@ -465,16 +465,16 @@ func (c *Container) WaitRootPID(pid int32, clearStatus bool) (syscall.WaitStatus\nfunc (c *Container) WaitPID(pid int32, clearStatus bool) (syscall.WaitStatus, error) {\nlog.Debugf(\"Wait on PID %d in container %q\", pid, c.ID)\nif !c.isSandboxRunning() {\n- return 0, fmt.Errorf(\"container is not running\")\n+ return 0, fmt.Errorf(\"sandbox is not running\")\n}\nreturn c.Sandbox.WaitPID(c.ID, pid, clearStatus)\n}\n-// Signal sends the signal to the container. If all is true and signal is\n-// SIGKILL, then waits for all processes to exit before returning.\n-// Signal returns an error if the container is already stopped.\n+// SignalContainer sends the signal to the container. If all is true and signal\n+// is SIGKILL, then waits for all processes to exit before returning.\n+// SignalContainer returns an error if the container is already stopped.\n// TODO: Distinguish different error types.\n-func (c *Container) Signal(sig syscall.Signal, all bool) error {\n+func (c *Container) SignalContainer(sig syscall.Signal, all bool) error {\nlog.Debugf(\"Signal container %q: %v\", c.ID, sig)\n// Signaling container in Stopped state is allowed. When all=false,\n// an error will be returned anyway; when all=true, this allows\n@@ -485,11 +485,23 @@ func (c *Container) Signal(sig syscall.Signal, all bool) error {\nreturn err\n}\nif !c.isSandboxRunning() {\n- return fmt.Errorf(\"container is not running\")\n+ return fmt.Errorf(\"sandbox is not running\")\n}\nreturn c.Sandbox.SignalContainer(c.ID, sig, all)\n}\n+// SignalProcess sends sig to a specific process in the container.\n+func (c *Container) SignalProcess(sig syscall.Signal, pid int32) error {\n+ log.Debugf(\"Signal process %d in container %q: %v\", pid, c.ID, sig)\n+ if err := c.requireStatus(\"signal a process inside\", Running); err != nil {\n+ return err\n+ }\n+ if !c.isSandboxRunning() {\n+ return fmt.Errorf(\"sandbox is not running\")\n+ }\n+ return c.Sandbox.SignalProcess(c.ID, int32(pid), sig, false)\n+}\n+\n// ForwardSignals forwards all signals received by the current process to the\n// container process inside the sandbox. It returns a function that will stop\n// forwarding signals.\n@@ -663,7 +675,7 @@ func (c *Container) waitForStopped() error {\nb := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)\nop := func() error {\nif c.isSandboxRunning() {\n- if err := c.Signal(syscall.Signal(0), false); err == nil {\n+ if err := c.SignalContainer(syscall.Signal(0), false); err == nil {\nreturn fmt.Errorf(\"container is still running\")\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/container_test.go", "new_path": "runsc/container/container_test.go", "diff": "@@ -354,7 +354,7 @@ func TestLifecycle(t *testing.T) {\n<-ch\ntime.Sleep(100 * time.Millisecond)\n// Send the container a SIGTERM which will cause it to stop.\n- if err := c.Signal(syscall.SIGTERM, false); err != nil {\n+ if err := c.SignalContainer(syscall.SIGTERM, false); err != nil {\nt.Fatalf(\"error sending signal %v to container: %v\", syscall.SIGTERM, err)\n}\n// Wait for it to die.\n@@ -559,6 +559,62 @@ func TestExec(t *testing.T) {\n}\n}\n+// TestKillPid verifies that we can signal individual exec'd processes.\n+func TestKillPid(t *testing.T) {\n+ for _, conf := range configs(overlay) {\n+ t.Logf(\"Running test with conf: %+v\", conf)\n+\n+ app, err := testutil.FindFile(\"runsc/container/test_app\")\n+ if err != nil {\n+ t.Fatal(\"error finding test_app:\", err)\n+ }\n+\n+ const nProcs = 4\n+ spec := testutil.NewSpecWithArgs(app, \"task-tree\", \"--depth\", strconv.Itoa(nProcs-1), \"--width=1\", \"--pause=true\")\n+ rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+ defer os.RemoveAll(bundleDir)\n+\n+ // Create and start the container.\n+ cont, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\", \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer cont.Destroy()\n+ if err := cont.Start(conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n+ }\n+\n+ // Verify that all processes are running.\n+ if err := waitForProcessCount(cont, nProcs); err != nil {\n+ t.Fatalf(\"timed out waiting for processes to start: %v\", err)\n+ }\n+\n+ // Kill the child process with the largest PID.\n+ procs, err := cont.Processes()\n+ if err != nil {\n+ t.Fatalf(\"failed to get process list: %v\", err)\n+ }\n+ var pid int32\n+ for _, p := range procs {\n+ if pid < int32(p.PID) {\n+ pid = int32(p.PID)\n+ }\n+ }\n+ if err := cont.SignalProcess(syscall.SIGKILL, pid); err != nil {\n+ t.Fatalf(\"failed to signal process %d: %v\", pid, err)\n+ }\n+\n+ // Verify that one process is gone.\n+ if err := waitForProcessCount(cont, nProcs-1); err != nil {\n+ t.Fatal(err)\n+ }\n+ }\n+}\n+\n// TestCheckpointRestore creates a container that continuously writes successive integers\n// to a file. To test checkpoint and restore functionality, the container is\n// checkpointed and the last number printed to the file is recorded. Then, it is restored in two\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/multi_container_test.go", "new_path": "runsc/container/multi_container_test.go", "diff": "@@ -335,7 +335,7 @@ func TestMultiContainerSignal(t *testing.T) {\n}\n// Kill process 2.\n- if err := containers[1].Signal(syscall.SIGKILL, false); err != nil {\n+ if err := containers[1].SignalContainer(syscall.SIGKILL, false); err != nil {\nt.Errorf(\"failed to kill process 2: %v\", err)\n}\n@@ -368,12 +368,12 @@ func TestMultiContainerSignal(t *testing.T) {\n// Now that process 2 is gone, ensure we get an error trying to\n// signal it again.\n- if err := containers[1].Signal(syscall.SIGKILL, false); err == nil {\n+ if err := containers[1].SignalContainer(syscall.SIGKILL, false); err == nil {\nt.Errorf(\"container %q shouldn't exist, but we were able to signal it\", containers[1].ID)\n}\n// Kill process 1.\n- if err := containers[0].Signal(syscall.SIGKILL, false); err != nil {\n+ if err := containers[0].SignalContainer(syscall.SIGKILL, false); err != nil {\nt.Errorf(\"failed to kill process 1: %v\", err)\n}\n@@ -395,7 +395,7 @@ func TestMultiContainerSignal(t *testing.T) {\n}\n// The sentry should be gone, so signaling should yield an error.\n- if err := containers[0].Signal(syscall.SIGKILL, false); err == nil {\n+ if err := containers[0].SignalContainer(syscall.SIGKILL, false); err == nil {\nt.Errorf(\"sandbox %q shouldn't exist, but we were able to signal it\", containers[0].Sandbox.ID)\n}\n}\n@@ -577,7 +577,7 @@ func TestMultiContainerKillAll(t *testing.T) {\nif tc.killContainer {\n// First kill the init process to make the container be stopped with\n// processes still running inside.\n- containers[1].Signal(syscall.SIGKILL, false)\n+ containers[1].SignalContainer(syscall.SIGKILL, false)\nop := func() error {\nc, err := Load(conf.RootDir, ids[1])\nif err != nil {\n@@ -598,7 +598,7 @@ func TestMultiContainerKillAll(t *testing.T) {\nt.Fatalf(\"failed to load child container %q: %v\", c.ID, err)\n}\n// Kill'Em All\n- if err := c.Signal(syscall.SIGKILL, true); err != nil {\n+ if err := c.SignalContainer(syscall.SIGKILL, true); err != nil {\nt.Fatalf(\"failed to send SIGKILL to container %q: %v\", c.ID, err)\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/test_app.go", "new_path": "runsc/container/test_app.go", "diff": "@@ -125,6 +125,7 @@ func server(listener net.Listener, out *os.File) {\ntype taskTree struct {\ndepth int\nwidth int\n+ pause bool\n}\n// Name implements subcommands.Command.\n@@ -146,6 +147,7 @@ func (*taskTree) Usage() string {\nfunc (c *taskTree) SetFlags(f *flag.FlagSet) {\nf.IntVar(&c.depth, \"depth\", 1, \"number of levels to create\")\nf.IntVar(&c.width, \"width\", 1, \"number of tasks at each level\")\n+ f.BoolVar(&c.pause, \"pause\", false, \"whether the tasks should pause perpetually\")\n}\n// Execute implements subcommands.Command.\n@@ -164,7 +166,8 @@ func (c *taskTree) Execute(ctx context.Context, f *flag.FlagSet, args ...interfa\ncmd := exec.Command(\n\"/proc/self/exe\", c.Name(),\n\"--depth\", strconv.Itoa(c.depth-1),\n- \"--width\", strconv.Itoa(c.width))\n+ \"--width\", strconv.Itoa(c.width),\n+ \"--pause\", strconv.FormatBool(c.pause))\ncmd.Stdout = os.Stdout\ncmd.Stderr = os.Stderr\n@@ -177,6 +180,11 @@ func (c *taskTree) Execute(ctx context.Context, f *flag.FlagSet, args ...interfa\nfor _, c := range cmds {\nc.Wait()\n}\n+\n+ if c.pause {\n+ select {}\n+ }\n+\nreturn subcommands.ExitSuccess\n}\n" } ]
Go
Apache License 2.0
google/gvisor
runsc: Add --pid flag to runsc kill. --pid allows specific processes to be signalled rather than the container root process or all processes in the container. containerd needs to SIGKILL exec'd processes that timeout and check whether processes are still alive. PiperOrigin-RevId: 217547636 Change-Id: I2058ebb548b51c8eb748f5884fb88bad0b532e45
259,891
17.10.2018 10:54:19
25,200
8cbca46b6d99bcf0b2647ffa247b0963f872916b
Remove incorrect TODO.
[ { "change_type": "MODIFY", "old_path": "runsc/boot/events.go", "new_path": "runsc/boot/events.go", "diff": "@@ -29,7 +29,6 @@ type Event struct {\n// Stats is the runc specific stats structure for stability when encoding and\n// decoding stats.\n-// TODO: Many fields aren't obtainable due to a lack of cgroups.\ntype Stats struct {\nMemory Memory `json:\"memory\"`\nPids Pids `json:\"pids\"`\n" } ]
Go
Apache License 2.0
google/gvisor
Remove incorrect TODO. PiperOrigin-RevId: 217548429 Change-Id: Ie640c881fdc4fc70af58c8ca834df1ac531e519a
259,881
17.10.2018 11:51:43
25,200
578fe5a50dcf8e104b6bce3802987b0f8c069ade
Fix PTRACE_GETREGSET write size The existing logic is backwards and writes iov_len == 0 for a full write.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/ptrace.go", "new_path": "pkg/sentry/kernel/ptrace.go", "diff": "@@ -921,7 +921,13 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {\nif err != nil {\nreturn err\n}\n- ar.End -= usermem.Addr(n)\n+\n+ // Update iovecs to represent the range of the written register set.\n+ end, ok := ar.Start.AddLength(uint64(n))\n+ if !ok {\n+ panic(fmt.Sprintf(\"%#x + %#x overflows. Invalid reg size > %#x\", ar.Start, n, ar.Length()))\n+ }\n+ ar.End = end\nreturn t.CopyOutIovecs(data, usermem.AddrRangeSeqOf(ar))\ncase linux.PTRACE_SETREGS:\n" } ]
Go
Apache License 2.0
google/gvisor
Fix PTRACE_GETREGSET write size The existing logic is backwards and writes iov_len == 0 for a full write. PiperOrigin-RevId: 217560377 Change-Id: I5a39c31bf0ba9063a8495993bfef58dc8ab7c5fa
259,881
17.10.2018 13:05:14
25,200
8fa6f6fe769ede042b651e5b82bd93721e3aa339
Reflow comment to 80 columns
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/strace/strace.go", "new_path": "pkg/sentry/strace/strace.go", "diff": "@@ -568,8 +568,10 @@ func (s SyscallMap) SyscallExit(context interface{}, t *kernel.Task, sysno, rval\n}\n}\n-// ConvertToSysnoMap converts the names to a map keyed on the syscall number and value set to true.\n-// The map is in a convenient format to call SyscallFlagsTable.Enable().\n+// ConvertToSysnoMap converts the names to a map keyed on the syscall number\n+// and value set to true.\n+//\n+// The map is in a convenient format to pass to SyscallFlagsTable.Enable().\nfunc (s SyscallMap) ConvertToSysnoMap(syscalls []string) (map[uintptr]bool, error) {\nif syscalls == nil {\n// Sentinel: no list.\n" } ]
Go
Apache License 2.0
google/gvisor
Reflow comment to 80 columns PiperOrigin-RevId: 217573168 Change-Id: Ic1914d0ef71bab020e3ee11cf9c4a50a702bd8dd
259,854
17.10.2018 13:24:52
25,200
8c85f5e9ce1d7e25010ac295006555a46034bc39
Fix typos in socket_test
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/host/socket_test.go", "new_path": "pkg/sentry/fs/host/socket_test.go", "diff": "@@ -71,18 +71,18 @@ func TestSocketIsBlocking(t *testing.T) {\nt.Fatalf(\"newSocket(%v) failed => %v\", pair[0], err)\n}\ndefer sock.DecRef()\n- // Test that the socket now is non blocking.\n+ // Test that the socket now is non-blocking.\nif fl, err = getFl(pair[0]); err != nil {\nt.Fatalf(\"getFl: fcntl(%v, GETFL) => %v\", pair[0], err)\n}\nif fl&syscall.O_NONBLOCK != syscall.O_NONBLOCK {\n- t.Errorf(\"Expected socket %v to have becoming non blocking\", pair[0])\n+ t.Errorf(\"Expected socket %v to have become non-blocking\", pair[0])\n}\nif fl, err = getFl(pair[1]); err != nil {\nt.Fatalf(\"getFl: fcntl(%v, GETFL) => %v\", pair[1], err)\n}\nif fl&syscall.O_NONBLOCK == syscall.O_NONBLOCK {\n- t.Errorf(\"Did not expect socket %v to become non blocking\", pair[1])\n+ t.Errorf(\"Did not expect socket %v to become non-blocking\", pair[1])\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Fix typos in socket_test PiperOrigin-RevId: 217576188 Change-Id: I82e45c306c5c9161e207311c7dbb8a983820c1df
259,854
17.10.2018 16:30:11
25,200
f7419fec26d1fd0d12936cc44f2c3481bbade033
Use generic ilist in Unix transport queue This should improve performance.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/transport/BUILD", "new_path": "pkg/sentry/socket/unix/transport/BUILD", "diff": "package(licenses = [\"notice\"]) # Apache 2.0\nload(\"//tools/go_stateify:defs.bzl\", \"go_library\")\n+load(\"//tools/go_generics:defs.bzl\", \"go_template_instance\")\n+\n+go_template_instance(\n+ name = \"transport_message_list\",\n+ out = \"transport_message_list.go\",\n+ package = \"transport\",\n+ prefix = \"message\",\n+ template = \"//pkg/ilist:generic_list\",\n+ types = {\n+ \"Element\": \"*message\",\n+ \"Linker\": \"*message\",\n+ },\n+)\ngo_library(\nname = \"transport\",\n@@ -9,6 +22,7 @@ go_library(\n\"connectioned_state.go\",\n\"connectionless.go\",\n\"queue.go\",\n+ \"transport_message_list.go\",\n\"unix.go\",\n],\nimportpath = \"gvisor.googlesource.com/gvisor/pkg/sentry/socket/unix/transport\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/transport/queue.go", "new_path": "pkg/sentry/socket/unix/transport/queue.go", "diff": "@@ -17,7 +17,6 @@ package transport\nimport (\n\"sync\"\n- \"gvisor.googlesource.com/gvisor/pkg/ilist\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n\"gvisor.googlesource.com/gvisor/pkg/waiter\"\n)\n@@ -33,7 +32,7 @@ type queue struct {\nclosed bool\nused int64\nlimit int64\n- dataList ilist.List\n+ dataList messageList\n}\n// newQueue allocates and initializes a new queue.\n@@ -61,7 +60,7 @@ func (q *queue) Close() {\nfunc (q *queue) Reset() {\nq.mu.Lock()\nfor cur := q.dataList.Front(); cur != nil; cur = cur.Next() {\n- cur.(*message).Release()\n+ cur.Release()\n}\nq.dataList.Reset()\nq.used = 0\n@@ -165,7 +164,7 @@ func (q *queue) Dequeue() (e *message, notify bool, err *tcpip.Error) {\nnotify = !q.bufWritable()\n- e = q.dataList.Front().(*message)\n+ e = q.dataList.Front()\nq.dataList.Remove(e)\nq.used -= e.Length()\n@@ -189,7 +188,7 @@ func (q *queue) Peek() (*message, *tcpip.Error) {\nreturn nil, err\n}\n- return q.dataList.Front().(*message).Peek(), nil\n+ return q.dataList.Front().Peek(), nil\n}\n// QueuedSize returns the number of bytes currently in the queue, that is, the\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/transport/unix.go", "new_path": "pkg/sentry/socket/unix/transport/unix.go", "diff": "@@ -19,7 +19,6 @@ import (\n\"sync\"\n\"sync/atomic\"\n- \"gvisor.googlesource.com/gvisor/pkg/ilist\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/buffer\"\n\"gvisor.googlesource.com/gvisor/pkg/waiter\"\n@@ -243,7 +242,7 @@ type BoundEndpoint interface {\n//\n// +stateify savable\ntype message struct {\n- ilist.Entry\n+ messageEntry\n// Data is the Message payload.\nData buffer.View\n" } ]
Go
Apache License 2.0
google/gvisor
Use generic ilist in Unix transport queue This should improve performance. PiperOrigin-RevId: 217610560 Change-Id: I370f196ea2396f1715a460b168ecbee197f94d6c
259,854
20.10.2018 17:57:19
25,200
d7c11c741752813e56b7d8726a575a520260c56a
Refcount Unix transport queue This allows us to release messages in the queue when all users close.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/transport/BUILD", "new_path": "pkg/sentry/socket/unix/transport/BUILD", "diff": "@@ -29,6 +29,7 @@ go_library(\nvisibility = [\"//:sandbox\"],\ndeps = [\n\"//pkg/ilist\",\n+ \"//pkg/refs\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/buffer\",\n\"//pkg/waiter\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/transport/connectioned.go", "new_path": "pkg/sentry/socket/unix/transport/connectioned.go", "diff": "@@ -145,10 +145,12 @@ func NewPair(stype SockType, uid UniqueIDProvider) (Endpoint, Endpoint) {\nb.receiver = &queueReceiver{q2}\n}\n+ q2.IncRef()\na.connected = &connectedEndpoint{\nendpoint: b,\nwriteQueue: q2,\n}\n+ q1.IncRef()\nb.connected = &connectedEndpoint{\nendpoint: a,\nwriteQueue: q1,\n@@ -282,12 +284,14 @@ func (e *connectionedEndpoint) BidirectionalConnect(ce ConnectingEndpoint, retur\nidGenerator: e.idGenerator,\nstype: e.stype,\n}\n+\nreadQueue := newQueue(ce.WaiterQueue(), ne.Queue, initialLimit)\n- writeQueue := newQueue(ne.Queue, ce.WaiterQueue(), initialLimit)\nne.connected = &connectedEndpoint{\nendpoint: ce,\nwriteQueue: readQueue,\n}\n+\n+ writeQueue := newQueue(ne.Queue, ce.WaiterQueue(), initialLimit)\nif e.stype == SockStream {\nne.receiver = &streamQueueReceiver{queueReceiver: queueReceiver{readQueue: writeQueue}}\n} else {\n@@ -297,10 +301,12 @@ func (e *connectionedEndpoint) BidirectionalConnect(ce ConnectingEndpoint, retur\nselect {\ncase e.acceptedChan <- ne:\n// Commit state.\n+ writeQueue.IncRef()\nconnected := &connectedEndpoint{\nendpoint: ne,\nwriteQueue: writeQueue,\n}\n+ readQueue.IncRef()\nif e.stype == SockStream {\nreturnConnect(&streamQueueReceiver{queueReceiver: queueReceiver{readQueue: readQueue}}, connected)\n} else {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/transport/connectionless.go", "new_path": "pkg/sentry/socket/unix/transport/connectionless.go", "diff": "@@ -82,9 +82,13 @@ func (e *connectionlessEndpoint) UnidirectionalConnect() (ConnectedEndpoint, *tc\nif r == nil {\nreturn nil, tcpip.ErrConnectionRefused\n}\n+ q := r.(*queueReceiver).readQueue\n+ if !q.TryIncRef() {\n+ return nil, tcpip.ErrConnectionRefused\n+ }\nreturn &connectedEndpoint{\nendpoint: e,\n- writeQueue: r.(*queueReceiver).readQueue,\n+ writeQueue: q,\n}, nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/transport/queue.go", "new_path": "pkg/sentry/socket/unix/transport/queue.go", "diff": "@@ -17,6 +17,7 @@ package transport\nimport (\n\"sync\"\n+ \"gvisor.googlesource.com/gvisor/pkg/refs\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n\"gvisor.googlesource.com/gvisor/pkg/waiter\"\n)\n@@ -25,6 +26,8 @@ import (\n//\n// +stateify savable\ntype queue struct {\n+ refs.AtomicRefCount\n+\nReaderQueue *waiter.Queue\nWriterQueue *waiter.Queue\n@@ -67,6 +70,13 @@ func (q *queue) Reset() {\nq.mu.Unlock()\n}\n+// DecRef implements RefCounter.DecRef with destructor q.Reset.\n+func (q *queue) DecRef() {\n+ q.DecRefWithDestructor(q.Reset)\n+ // We don't need to notify after resetting because no one cares about\n+ // this queue after all references have been dropped.\n+}\n+\n// IsReadable determines if q is currently readable.\nfunc (q *queue) IsReadable() bool {\nq.mu.Lock()\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/transport/unix.go", "new_path": "pkg/sentry/socket/unix/transport/unix.go", "diff": "@@ -381,7 +381,9 @@ func (q *queueReceiver) RecvMaxQueueSize() int64 {\n}\n// Release implements Receiver.Release.\n-func (*queueReceiver) Release() {}\n+func (q *queueReceiver) Release() {\n+ q.readQueue.DecRef()\n+}\n// streamQueueReceiver implements Receiver for stream sockets.\n//\n@@ -694,7 +696,9 @@ func (e *connectedEndpoint) SendMaxQueueSize() int64 {\n}\n// Release implements ConnectedEndpoint.Release.\n-func (*connectedEndpoint) Release() {}\n+func (e *connectedEndpoint) Release() {\n+ e.writeQueue.DecRef()\n+}\n// baseEndpoint is an embeddable unix endpoint base used in both the connected and connectionless\n// unix domain socket Endpoint implementations.\n@@ -945,4 +949,6 @@ func (e *baseEndpoint) GetRemoteAddress() (tcpip.FullAddress, *tcpip.Error) {\n}\n// Release implements BoundEndpoint.Release.\n-func (*baseEndpoint) Release() {}\n+func (*baseEndpoint) Release() {\n+ // Binding a baseEndpoint doesn't take a reference.\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Refcount Unix transport queue This allows us to release messages in the queue when all users close. PiperOrigin-RevId: 218033550 Change-Id: I2f6e87650fced87a3977e3b74c64775c7b885c1b
259,884
21.10.2018 19:41:44
25,200
c2c0f9cb7e8320de06ef280c6184bb6aeda71627
Updated cleanup code to be more explicit about ignoring errors. Errors are shown as being ignored by assigning to the blank identifier.
[ { "change_type": "MODIFY", "old_path": "runsc/cgroup/cgroup.go", "new_path": "runsc/cgroup/cgroup.go", "diff": "@@ -190,7 +190,9 @@ func (c *Cgroup) Install(res *specs.LinuxResources) error {\n// Mark that cgroup resources are owned by me.\nlog.Debugf(\"Creating cgroup %q\", c.Name)\nc.Own = true\n- clean := specutils.MakeCleanup(func() { c.Uninstall() })\n+ // The Cleanup object cleans up partially created cgroups when an error occurs.\n+ // Errors occuring during cleanup itself are ignored.\n+ clean := specutils.MakeCleanup(func() { _ = c.Uninstall() })\ndefer clean.Clean()\nfor key, ctrl := range controllers {\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/container.go", "new_path": "runsc/container/container.go", "diff": "@@ -262,7 +262,9 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo\nStatus: Creating,\nOwner: os.Getenv(\"USER\"),\n}\n- cu := specutils.MakeCleanup(func() { c.Destroy() })\n+ // The Cleanup object cleans up partially created containers when an error occurs.\n+ // Any errors occuring during cleanup itself are ignored.\n+ cu := specutils.MakeCleanup(func() { _ = c.Destroy() })\ndefer cu.Clean()\n// If the metadata annotations indicate that this container should be\n@@ -424,6 +426,8 @@ func Run(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocke\nif err != nil {\nreturn 0, fmt.Errorf(\"error creating container: %v\", err)\n}\n+ // Clean up partially created container if an error ocurrs.\n+ // Any errors returned by Destroy() itself are ignored.\ndefer c.Destroy()\nif err := c.Start(conf); err != nil {\n" }, { "change_type": "MODIFY", "old_path": "runsc/sandbox/sandbox.go", "new_path": "runsc/sandbox/sandbox.go", "diff": "@@ -68,7 +68,9 @@ type Sandbox struct {\n// sandbox.\nfunc Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, userLog string, ioFiles []*os.File) (*Sandbox, error) {\ns := &Sandbox{ID: id}\n- c := specutils.MakeCleanup(func() { s.destroy() })\n+ // The Cleanup object cleans up partially created sandboxes when an error occurs.\n+ // Any errors occuring during cleanup itself are ignored.\n+ c := specutils.MakeCleanup(func() { _ = s.destroy() })\ndefer c.Clean()\nif cg, ok := cgroup.New(spec); ok {\n" } ]
Go
Apache License 2.0
google/gvisor
Updated cleanup code to be more explicit about ignoring errors. Errors are shown as being ignored by assigning to the blank identifier. PiperOrigin-RevId: 218103819 Change-Id: I7cc7b9d8ac503a03de5504ebdeb99ed30a531cf2
259,858
23.10.2018 11:26:31
25,200
ce3a762038006429b1eb3b855d4e9c5d700edfda
Remove artificial name length check. This should be determined by the filesystem.
[ { "change_type": "MODIFY", "old_path": "pkg/p9/handlers.go", "new_path": "pkg/p9/handlers.go", "diff": "@@ -27,8 +27,6 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n)\n-const maximumNameLength = 255\n-\n// ExtractErrno extracts a syscall.Errno from a error, best effort.\nfunc ExtractErrno(err error) syscall.Errno {\nswitch err {\n@@ -109,14 +107,11 @@ func (t *Tflush) handle(cs *connState) message {\n// checkSafeName validates the name and returns nil or returns an error.\nfunc checkSafeName(name string) error {\n- if name == \"\" || strings.Contains(name, \"/\") || name == \".\" || name == \"..\" {\n- return syscall.EINVAL\n- }\n- if len(name) > maximumNameLength {\n- return syscall.ENAMETOOLONG\n- }\n+ if name != \"\" && !strings.Contains(name, \"/\") && name != \".\" && name != \"..\" {\nreturn nil\n}\n+ return syscall.EINVAL\n+}\n// handle implements handler.handle.\nfunc (t *Tclunk) handle(cs *connState) message {\n@@ -979,11 +974,6 @@ func (t *Tstatfs) handle(cs *connState) message {\nreturn newErr(err)\n}\n- // Constrain the name length.\n- if st.NameLength > maximumNameLength {\n- st.NameLength = maximumNameLength\n- }\n-\nreturn &Rstatfs{st}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Remove artificial name length check. This should be determined by the filesystem. PiperOrigin-RevId: 218376553 Change-Id: I55d176e2cdf8acdd6642789af057b98bb8ca25b8
259,858
23.10.2018 12:51:26
25,200
1369e17504f994024aea84bb0e4aa9ddce00f70a
Remove blanket TODO, as it is self-evident.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/udp/endpoint.go", "new_path": "pkg/tcpip/transport/udp/endpoint.go", "diff": "@@ -378,7 +378,6 @@ func (e *endpoint) Peek([][]byte) (uintptr, tcpip.ControlMessages, *tcpip.Error)\n// SetSockOpt sets a socket option. Currently not supported.\nfunc (e *endpoint) SetSockOpt(opt interface{}) *tcpip.Error {\n- // TODO: Actually implement this.\nswitch v := opt.(type) {\ncase tcpip.V6OnlyOption:\n// We only recognize this option on v6 endpoints.\n" } ]
Go
Apache License 2.0
google/gvisor
Remove blanket TODO, as it is self-evident. PiperOrigin-RevId: 218390517 Change-Id: Ic891c1626e62a6c4ed57f8180740872bcd1be177
259,858
24.10.2018 15:51:46
25,200
e7191f058f550cc3a203a854a1d81f7746c96e53
Use TRAP to simplify vsyscall emulation.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ptrace/subprocess.go", "new_path": "pkg/sentry/platform/ptrace/subprocess.go", "diff": "@@ -357,15 +357,13 @@ func (t *thread) destroy() {\n// init initializes trace options.\nfunc (t *thread) init() {\n- // Set our TRACESYSGOOD option to differeniate real SIGTRAP. Also, we\n- // require the SECCOMP option to ensure that seccomp violations\n- // generate a ptrace event.\n+ // Set our TRACESYSGOOD option to differeniate real SIGTRAP.\n_, _, errno := syscall.RawSyscall6(\nsyscall.SYS_PTRACE,\nsyscall.PTRACE_SETOPTIONS,\nuintptr(t.tid),\n0,\n- syscall.PTRACE_O_TRACESYSGOOD|_PTRACE_O_TRACESECCOMP,\n+ syscall.PTRACE_O_TRACESYSGOOD,\n0, 0)\nif errno != 0 {\npanic(fmt.Sprintf(\"ptrace set options failed: %v\", errno))\n@@ -522,12 +520,6 @@ func (s *subprocess) switchToApp(c *context, ac arch.Context) bool {\n// Ensure registers are sane.\nupdateSyscallRegs(regs)\nreturn true\n- } else if sig == (seccompEvent | syscall.SIGTRAP) {\n- // Seccomp is enabled, and caught the system call. This\n- // is an emulated vsyscall call, since those are caught\n- // only by seccomp and explicitly set to trace.\n- updateSyscallRegs(regs)\n- return true\n} else if sig == syscall.SIGSTOP {\n// SIGSTOP was delivered to another thread in the same thread\n// group, which initiated another group stop. Just ignore it.\n@@ -544,9 +536,17 @@ func (s *subprocess) switchToApp(c *context, ac arch.Context) bool {\n// either delivered from the kernel or from this process. We\n// don't respect other signals.\nif c.signalInfo.Code > 0 {\n- return false // kernel.\n+ // The signal was generated by the kernel. We inspect\n+ // the signal information, and may patch it in order to\n+ // faciliate vsyscall emulation. See patchSignalInfo.\n+ patchSignalInfo(regs, &c.signalInfo)\n+ return false\n} else if c.signalInfo.Code <= 0 && c.signalInfo.Pid() == int32(os.Getpid()) {\n- return false // this process.\n+ // The signal was generated by this process. That means\n+ // that it was an interrupt or something else that we\n+ // should bail for. Note that we ignore signals\n+ // generated by other processes.\n+ return false\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ptrace/subprocess_linux.go", "new_path": "pkg/sentry/platform/ptrace/subprocess_linux.go", "diff": "@@ -27,11 +27,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/platform/procid\"\n)\n-const (\n- syscallEvent syscall.Signal = 0x80\n- seccompEvent syscall.Signal = 0x700 // 0x7 (PTRACE_SECCOMP_EVENT) << 8\n- _PTRACE_O_TRACESECCOMP = 0x80 // 1 << 0x7 (PTRACE_SECCOMP_EVENT)\n-)\n+const syscallEvent syscall.Signal = 0x80\n// probeSeccomp returns true iff seccomp is run after ptrace notifications,\n// which is generally the case for kernel version >= 4.8. This check is dynamic\n@@ -81,6 +77,27 @@ func probeSeccomp() bool {\n}\n}\n+// patchSignalInfo patches the signal info to account for hitting the seccomp\n+// filters from vsyscall emulation, specified below. We allow for SIGSYS as a\n+// synchronous trap, but patch the structure to appear like a SIGSEGV with the\n+// Rip as the faulting address.\n+//\n+// Note that this should only be called after verifying that the signalInfo has\n+// been generated by the kernel.\n+func patchSignalInfo(regs *syscall.PtraceRegs, signalInfo *arch.SignalInfo) {\n+ if linux.Signal(signalInfo.Signo) == linux.SIGSYS {\n+ signalInfo.Signo = int32(linux.SIGSEGV)\n+\n+ // Unwind the kernel emulation, if any has occurred. A SIGSYS is delivered\n+ // with the si_call_addr field pointing to the current RIP. This field\n+ // aligns with the si_addr field for a SIGSEGV, so we don't need to touch\n+ // anything there. We do need to unwind emulation however, so we set the\n+ // instruction pointer to the faulting value, and \"unpop\" the stack.\n+ regs.Rip = signalInfo.Addr()\n+ regs.Rsp -= 8\n+ }\n+}\n+\n// createStub creates a fresh stub processes.\n//\n// Precondition: the runtime OS thread must be locked.\n@@ -131,7 +148,7 @@ func attachedThread(flags uintptr, defaultAction uint32) (*thread, error) {\nsyscall.SYS_TIME: {},\n309: {}, // SYS_GETCPU.\n},\n- Action: uint32(linux.SECCOMP_RET_TRACE),\n+ Action: uint32(linux.SECCOMP_RET_TRAP),\nVsyscall: true,\n},\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Use TRAP to simplify vsyscall emulation. PiperOrigin-RevId: 218592058 Change-Id: I373a2d813aa6cc362500dd5a894c0b214a1959d7
259,853
25.10.2018 11:45:37
25,200
479cd52a6075066e93ce0c1bd0f183bb5df4fcc7
Uninstall() should not fail if a cgroup directory doesn't exist It can be occurred if two controllers are mounted together or if Uninstall() is called on a error path.
[ { "change_type": "MODIFY", "old_path": "runsc/cgroup/cgroup.go", "new_path": "runsc/cgroup/cgroup.go", "diff": "@@ -229,7 +229,11 @@ func (c *Cgroup) Uninstall() error {\ndefer cancel()\nb := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)\nif err := backoff.Retry(func() error {\n- return syscall.Rmdir(path)\n+ err := syscall.Rmdir(path)\n+ if os.IsNotExist(err) {\n+ return nil\n+ }\n+ return err\n}, b); err != nil {\nreturn fmt.Errorf(\"error removing cgroup path %q: %v\", path, err)\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/cgroup/cgroup_test.go", "new_path": "runsc/cgroup/cgroup_test.go", "diff": "@@ -18,6 +18,17 @@ import (\n\"testing\"\n)\n+func TestUninstallEnoent(t *testing.T) {\n+ c := Cgroup{\n+ // set a non-existent name\n+ Name: \"runsc-test-uninstall-656e6f656e740a\",\n+ Own: true,\n+ }\n+ if err := c.Uninstall(); err != nil {\n+ t.Errorf(\"Uninstall() failed: %v\", err)\n+ }\n+}\n+\nfunc TestCountCpuset(t *testing.T) {\nfor _, tc := range []struct {\nstr string\n" } ]
Go
Apache License 2.0
google/gvisor
Uninstall() should not fail if a cgroup directory doesn't exist It can be occurred if two controllers are mounted together or if Uninstall() is called on a error path. PiperOrigin-RevId: 218723886 Change-Id: I69d7a3c0685a7da38527ea8b7b301dbe96268285
259,881
26.10.2018 12:17:51
25,200
624cc329d89bff5f2b0e787d255e718514ec585b
Order feature strings by block
[ { "change_type": "MODIFY", "old_path": "pkg/cpuid/cpuid.go", "new_path": "pkg/cpuid/cpuid.go", "diff": "@@ -228,6 +228,37 @@ var linuxBlockOrder = []block{1, 6, 0, 5, 2, 4}\n// names of the basic features in Linux defined in\n// arch/x86/kernel/cpu/capflags.c.\nvar x86FeatureStrings = map[Feature]string{\n+ // Block 0.\n+ X86FeatureSSE3: \"pni\",\n+ X86FeaturePCLMULDQ: \"pclmulqdq\",\n+ X86FeatureDTES64: \"dtes64\",\n+ X86FeatureMONITOR: \"monitor\",\n+ X86FeatureDSCPL: \"ds_cpl\",\n+ X86FeatureVMX: \"vmx\",\n+ X86FeatureSMX: \"smx\",\n+ X86FeatureEST: \"est\",\n+ X86FeatureTM2: \"tm2\",\n+ X86FeatureSSSE3: \"ssse3\",\n+ X86FeatureCNXTID: \"cid\",\n+ X86FeatureFMA: \"fma\",\n+ X86FeatureCX16: \"cx16\",\n+ X86FeatureXTPR: \"xtpr\",\n+ X86FeaturePDCM: \"pdcm\",\n+ X86FeaturePCID: \"pcid\",\n+ X86FeatureDCA: \"dca\",\n+ X86FeatureSSE4_1: \"sse4_1\",\n+ X86FeatureSSE4_2: \"sse4_2\",\n+ X86FeatureX2APIC: \"x2apic\",\n+ X86FeatureMOVBE: \"movbe\",\n+ X86FeaturePOPCNT: \"popcnt\",\n+ X86FeatureTSCD: \"tsc_deadline_timer\",\n+ X86FeatureAES: \"aes\",\n+ X86FeatureXSAVE: \"xsave\",\n+ X86FeatureAVX: \"avx\",\n+ X86FeatureF16C: \"f16c\",\n+ X86FeatureRDRAND: \"rdrand\",\n+\n+ // Block 1.\nX86FeatureFPU: \"fpu\",\nX86FeatureVME: \"vme\",\nX86FeatureDE: \"de\",\n@@ -258,34 +289,8 @@ var x86FeatureStrings = map[Feature]string{\nX86FeatureTM: \"tm\",\nX86FeatureIA64: \"ia64\",\nX86FeaturePBE: \"pbe\",\n- X86FeatureSSE3: \"pni\",\n- X86FeaturePCLMULDQ: \"pclmulqdq\",\n- X86FeatureDTES64: \"dtes64\",\n- X86FeatureMONITOR: \"monitor\",\n- X86FeatureDSCPL: \"ds_cpl\",\n- X86FeatureVMX: \"vmx\",\n- X86FeatureSMX: \"smx\",\n- X86FeatureEST: \"est\",\n- X86FeatureTM2: \"tm2\",\n- X86FeatureSSSE3: \"ssse3\",\n- X86FeatureCNXTID: \"cid\",\n- X86FeatureFMA: \"fma\",\n- X86FeatureCX16: \"cx16\",\n- X86FeatureXTPR: \"xtpr\",\n- X86FeaturePDCM: \"pdcm\",\n- X86FeaturePCID: \"pcid\",\n- X86FeatureDCA: \"dca\",\n- X86FeatureSSE4_1: \"sse4_1\",\n- X86FeatureSSE4_2: \"sse4_2\",\n- X86FeatureX2APIC: \"x2apic\",\n- X86FeatureMOVBE: \"movbe\",\n- X86FeaturePOPCNT: \"popcnt\",\n- X86FeatureTSCD: \"tsc_deadline_timer\",\n- X86FeatureAES: \"aes\",\n- X86FeatureXSAVE: \"xsave\",\n- X86FeatureAVX: \"avx\",\n- X86FeatureF16C: \"f16c\",\n- X86FeatureRDRAND: \"rdrand\",\n+\n+ // Block 2.\nX86FeatureFSGSBase: \"fsgsbase\",\nX86FeatureTSC_ADJUST: \"tsc_adjust\",\nX86FeatureBMI1: \"bmi1\",\n@@ -305,33 +310,52 @@ var x86FeatureStrings = map[Feature]string{\nX86FeatureADX: \"adx\",\nX86FeatureSMAP: \"smap\",\nX86FeatureCLWB: \"clwb\",\n+ X86FeatureAVX512PF: \"avx512pf\",\n+ X86FeatureAVX512ER: \"avx512er\",\nX86FeatureAVX512CD: \"avx512cd\",\n+ X86FeatureSHA: \"sha_ni\",\nX86FeatureAVX512BW: \"avx512bw\",\nX86FeatureAVX512VL: \"avx512vl\",\n- X86FeatureSYSCALL: \"syscall\",\n- X86FeatureNX: \"nx\",\n- X86FeatureGBPAGES: \"pdpe1gb\",\n- X86FeatureRDTSCP: \"rdtscp\",\n- X86FeatureLM: \"lm\",\n+\n+ // Block 4.\nX86FeatureXSAVEOPT: \"xsaveopt\",\nX86FeatureXSAVEC: \"xsavec\",\nX86FeatureXGETBV1: \"xgetbv1\",\n+\n+ // Block 5.\nX86FeatureLAHF64: \"lahf_lm\", // LAHF/SAHF in long mode\nX86FeatureLZCNT: \"abm\", // Advanced bit manipulation\nX86FeaturePREFETCHW: \"3dnowprefetch\",\n+\n+ // Block 6.\n+ X86FeatureSYSCALL: \"syscall\",\n+ X86FeatureNX: \"nx\",\n+ X86FeatureGBPAGES: \"pdpe1gb\",\n+ X86FeatureRDTSCP: \"rdtscp\",\n+ X86FeatureLM: \"lm\",\n}\n// These flags are parse only---they can be used for setting / unsetting the\n// flags, but will not get printed out in /proc/cpuinfo.\nvar x86FeatureParseOnlyStrings = map[Feature]string{\n- X86FeaturePKU: \"pku\",\n- X86FeatureXSAVES: \"xsaves\",\n- X86FeatureFPCSDS: \"fpcsds\",\n- X86FeatureOSXSAVE: \"osxsave\",\n- X86FeatureIPT: \"pt\",\n+ // Block 0.\nX86FeatureSDBG: \"sdbg\",\n+ X86FeatureOSXSAVE: \"osxsave\",\n+\n+ // Block 2.\nX86FeatureFDP_EXCPTN_ONLY: \"fdp_excptn_only\",\n+ X86FeatureFPCSDS: \"fpcsds\",\n+ X86FeatureIPT: \"pt\",\nX86FeatureCLFLUSHOPT: \"clfushopt\",\n+\n+ // Block 3.\n+ X86FeaturePREFETCHWT1: \"prefetchwt1\",\n+ X86FeatureAVX512VBMI: \"avx512vbmi\",\n+ X86FeatureUMIP: \"umip\",\n+ X86FeaturePKU: \"pku\",\n+\n+ // Block 4.\n+ X86FeatureXSAVES: \"xsaves\",\n}\n// These are the default values of various FeatureSet fields.\n" }, { "change_type": "MODIFY", "old_path": "runsc/cgroup/BUILD", "new_path": "runsc/cgroup/BUILD", "diff": "@@ -22,4 +22,5 @@ go_test(\nsize = \"small\",\nsrcs = [\"cgroup_test.go\"],\nembed = [\":cgroup\"],\n+ tags = [\"local\"],\n)\n" } ]
Go
Apache License 2.0
google/gvisor
Order feature strings by block PiperOrigin-RevId: 218894181 Change-Id: I97d0c74175f4aa528363f768a0a85d6953ea0bfd
259,881
26.10.2018 14:25:26
25,200
e60525e4ddd9a7fff8b27d0be8119ce3203a2f5c
Add block 3 features to /proc/cpuinfo Linux added these block 3 features to the end of /proc/cpuinfo in This also fixes that block 3 features were completely missing from FeatureSet.FlagsString(false) because FlagsString only prints Linux blocks regardless of the cpuinfo option.
[ { "change_type": "MODIFY", "old_path": "pkg/cpuid/cpuid.go", "new_path": "pkg/cpuid/cpuid.go", "diff": "@@ -222,7 +222,7 @@ const (\n// which doesn't match well here, so for the /proc/cpuinfo generation we simply\n// re-map the blocks to Linux's ordering and then go through the bits in each\n// block.\n-var linuxBlockOrder = []block{1, 6, 0, 5, 2, 4}\n+var linuxBlockOrder = []block{1, 6, 0, 5, 2, 4, 3}\n// To make emulation of /proc/cpuinfo easy down the line, these names match the\n// names of the basic features in Linux defined in\n@@ -317,6 +317,12 @@ var x86FeatureStrings = map[Feature]string{\nX86FeatureAVX512BW: \"avx512bw\",\nX86FeatureAVX512VL: \"avx512vl\",\n+ // Block 3.\n+ X86FeaturePREFETCHWT1: \"prefetchwt1\",\n+ X86FeatureAVX512VBMI: \"avx512vbmi\",\n+ X86FeatureUMIP: \"umip\",\n+ X86FeaturePKU: \"pku\",\n+\n// Block 4.\nX86FeatureXSAVEOPT: \"xsaveopt\",\nX86FeatureXSAVEC: \"xsavec\",\n@@ -348,12 +354,6 @@ var x86FeatureParseOnlyStrings = map[Feature]string{\nX86FeatureIPT: \"pt\",\nX86FeatureCLFLUSHOPT: \"clfushopt\",\n- // Block 3.\n- X86FeaturePREFETCHWT1: \"prefetchwt1\",\n- X86FeatureAVX512VBMI: \"avx512vbmi\",\n- X86FeatureUMIP: \"umip\",\n- X86FeaturePKU: \"pku\",\n-\n// Block 4.\nX86FeatureXSAVES: \"xsaves\",\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Add block 3 features to /proc/cpuinfo Linux added these block 3 features to the end of /proc/cpuinfo in dfb4a70f20c5b3880da56ee4c9484bdb4e8f1e65. This also fixes that block 3 features were completely missing from FeatureSet.FlagsString(false) because FlagsString only prints Linux blocks regardless of the cpuinfo option. PiperOrigin-RevId: 218913816 Change-Id: I2f9c38c7c9da4b247a140877d4aca782e80684bd
259,891
29.10.2018 10:30:58
25,200
b42a2a32038a8d9098d94c0435fe99e1e2b9a7f2
Removes outdated TODO.
[ { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -668,13 +668,6 @@ func (l *Loader) waitContainer(cid string, waitStatus *uint32) error {\n}\nfunc (l *Loader) waitPID(tgid kernel.ThreadID, cid string, clearStatus bool, waitStatus *uint32) error {\n- // TODO: Containers all currently share a PID namespace.\n- // When per-container PID namespaces are supported, wait should use cid\n- // to find the appropriate PID namespace.\n- /*if cid != l.sandboxID {\n- return errors.New(\"non-sandbox PID namespaces are not yet implemented\")\n- }*/\n-\n// If the process was started via runsc exec, it will have an\n// entry in l.processes.\nl.mu.Lock()\n" } ]
Go
Apache License 2.0
google/gvisor
Removes outdated TODO. PiperOrigin-RevId: 219151173 Change-Id: I73014ea648ae485692ea0d44860c87f4365055cb
260,013
29.10.2018 11:54:28
25,200
422863373f0c90693a21feecb70e89387e3dfd13
Add copybara rules to export C++ test code.
[ { "change_type": "MODIFY", "old_path": "WORKSPACE", "new_path": "WORKSPACE", "diff": "@@ -9,6 +9,7 @@ http_archive(\nurl = \"https://github.com/bazelbuild/bazel-gazelle/releases/download/0.15.0/bazel-gazelle-0.15.0.tar.gz\",\nsha256 = \"6e875ab4b6bf64a38c352887760f21203ab054676d9c1b274963907e0768740d\",\n)\n+\nload(\"@io_bazel_rules_go//go:def.bzl\", \"go_rules_dependencies\", \"go_register_toolchains\")\ngo_rules_dependencies()\ngo_register_toolchains(go_version=\"1.11.1\")\n@@ -93,3 +94,40 @@ go_repository(\nimportpath = \"golang.org/x/sys\",\ncommit = \"0dd5e194bbf5eb84a39666eb4c98a4d007e4203a\",\n)\n+\n+# System Call test dependencies\n+http_archive(\n+ name = \"com_github_gflags_gflags\",\n+ sha256 = \"6e16c8bc91b1310a44f3965e616383dbda48f83e8c1eaa2370a215057b00cabe\",\n+ strip_prefix = \"gflags-77592648e3f3be87d6c7123eb81cbad75f9aef5a\",\n+ urls = [\n+ \"https://mirror.bazel.build/github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz\",\n+ \"https://github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz\",\n+ ],\n+)\n+\n+http_archive(\n+ name = \"com_google_absl\",\n+ strip_prefix = \"abseil-cpp-master\",\n+ urls = [\"https://github.com/abseil/abseil-cpp/archive/master.zip\"],\n+)\n+\n+http_archive(\n+ name = \"com_google_googletest\",\n+ sha256 = \"353ab86e35cea1cd386115279cf4b16695bbf21b897bfbf2721cf4cb5f64ade8\",\n+ strip_prefix = \"googletest-997d343dd680e541ef96ce71ee54a91daf2577a0\",\n+ urls = [\n+ \"https://mirror.bazel.build/github.com/google/googletest/archive/997d343dd680e541ef96ce71ee54a91daf2577a0.zip\",\n+ \"https://github.com/google/googletest/archive/997d343dd680e541ef96ce71ee54a91daf2577a0.zip\",\n+ ],\n+)\n+\n+http_archive(\n+ name = \"com_google_glog\",\n+ sha256 = \"1ee310e5d0a19b9d584a855000434bb724aa744745d5b8ab1855c85bff8a8e21\",\n+ strip_prefix = \"glog-028d37889a1e80e8a07da1b8945ac706259e5fd8\",\n+ urls = [\n+ \"https://mirror.bazel.build/github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz\",\n+ \"https://github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz\",\n+ ],\n+)\n\\ No newline at end of file\n" } ]
Go
Apache License 2.0
google/gvisor
Add copybara rules to export C++ test code. PiperOrigin-RevId: 219166541 Change-Id: Ieebadaed4096eb48f00fba663e1c76fb41438078
259,891
29.10.2018 11:54:35
25,200
805a27c441c2ca133a3a37dc45f79286b5044a70
Install containerd and crictl when running tests in Kokoro.
[ { "change_type": "MODIFY", "old_path": "kokoro/run_tests.sh", "new_path": "kokoro/run_tests.sh", "diff": "@@ -48,6 +48,60 @@ set +e\nbazel test --test_output=errors //...\nexit_code=${?}\n+# This function spawns a subshell to install crictl and containerd.\n+installCrictl() (\n+ # Fail on any error.\n+ set -e\n+ # Display commands to stderr.\n+ set -x\n+\n+ # Install containerd.\n+ # libseccomp2 needs to be downgraded in order to install libseccomp-dev.\n+ sudo -n -E apt-get install -y --force-yes libseccomp2=2.1.1-1ubuntu1~trusty4\n+ sudo -n -E apt-get install -y btrfs-tools libseccomp-dev\n+ # go get will exit with a status of 1 despite succeeding, so ignore errors.\n+ go get -d github.com/containerd/containerd || true\n+ cd ${GOPATH}/src/github.com/containerd/containerd\n+ git checkout tags/v1.1.4\n+ make\n+ sudo -n -E make install\n+\n+ # Install crictl.\n+ # go get will exit with a status of 1 despite succeeding, so ignore errors.\n+ go get -d github.com/kubernetes-sigs/cri-tools || true\n+ cd ${GOPATH}/src/github.com/kubernetes-sigs/cri-tools\n+ git checkout tags/v1.11.0\n+ make\n+ sudo -n -E make install\n+\n+ # Install gvisor-containerd-shim.\n+ local shim_path=/tmp/gvisor-containerd-shim\n+ wget https://storage.googleapis.com/cri-containerd-staging/gvisor-containerd-shim/gvisor-containerd-shim -O ${shim_path}\n+ chmod +x ${shim_path}\n+ sudo -n -E mv ${shim_path} /usr/local/bin\n+\n+ # Configure containerd.\n+ local shim_config_path=/etc/containerd\n+ local shim_config_tmp_path=/tmp/gvisor-containerd-shim.toml\n+ sudo -n -E mkdir -p ${shim_config_path}\n+ cat > ${shim_config_tmp_path} <<-EOF\n+ runc_shim = \"/usr/local/bin/containerd-shim\"\n+\n+ [runsc_config]\n+ debug = \"true\"\n+ debug-log = \"/tmp/runsc-log/\"\n+ strace = \"true\"\n+ file-access = \"shared\"\n+EOF\n+ sudo mv ${shim_config_tmp_path} ${shim_config_path}\n+)\n+\n+# Install containerd and crictl.\n+if [[ ${exit_code} -eq 0 ]]; then\n+ installCrictl\n+ exit_code=${?}\n+fi\n+\n# Execute local tests that require docker.\nif [[ ${exit_code} -eq 0 ]]; then\n# These names are used to exclude tests not supported in certain\n" } ]
Go
Apache License 2.0
google/gvisor
Install containerd and crictl when running tests in Kokoro. PiperOrigin-RevId: 219166563 Change-Id: If4922eb5dd119b00f2c4bc7a5e016016ce9b9e45
259,858
30.10.2018 22:45:51
25,200
0091db9cbddb6c9fb4c96fbde980780c98006eda
kvm: use private futexes. Use private futexes for performance and to align with other runtime uses.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/machine_unsafe.go", "new_path": "pkg/sentry/platform/kvm/machine_unsafe.go", "diff": "@@ -88,7 +88,7 @@ func (c *vCPU) notify() {\n_, _, errno := syscall.RawSyscall6(\nsyscall.SYS_FUTEX,\nuintptr(unsafe.Pointer(&c.state)),\n- linux.FUTEX_WAKE,\n+ linux.FUTEX_WAKE|linux.FUTEX_PRIVATE_FLAG,\n^uintptr(0), // Number of waiters.\n0, 0, 0)\nif errno != 0 {\n@@ -106,7 +106,7 @@ func (c *vCPU) waitUntilNot(state uint32) {\n_, _, errno := syscall.Syscall6(\nsyscall.SYS_FUTEX,\nuintptr(unsafe.Pointer(&c.state)),\n- linux.FUTEX_WAIT,\n+ linux.FUTEX_WAIT|linux.FUTEX_PRIVATE_FLAG,\nuintptr(state),\n0, 0, 0)\nif errno != 0 && errno != syscall.EINTR && errno != syscall.EAGAIN {\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/filter/config.go", "new_path": "runsc/boot/filter/config.go", "diff": "@@ -438,7 +438,6 @@ func ptraceFilters() seccomp.SyscallRules {\nfunc kvmFilters() seccomp.SyscallRules {\nreturn seccomp.SyscallRules{\nsyscall.SYS_ARCH_PRCTL: {},\n- syscall.SYS_FUTEX: {},\nsyscall.SYS_IOCTL: {},\nsyscall.SYS_MMAP: {},\nsyscall.SYS_RT_SIGSUSPEND: {},\n" } ]
Go
Apache License 2.0
google/gvisor
kvm: use private futexes. Use private futexes for performance and to align with other runtime uses. PiperOrigin-RevId: 219422634 Change-Id: Ief2af5e8302847ea6dc246e8d1ee4d64684ca9dd
259,858
31.10.2018 10:07:06
25,200
e9dbd5ab67bc31e59910930e6c1b551c0fd05ee6
kvm: avoid siginfo allocations.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/context.go", "new_path": "pkg/sentry/platform/kvm/context.go", "diff": "@@ -29,6 +29,9 @@ type context struct {\n// machine is the parent machine, and is immutable.\nmachine *machine\n+ // info is the arch.SignalInfo cached for this context.\n+ info arch.SignalInfo\n+\n// interrupt is the interrupt context.\ninterrupt interrupt.Forwarder\n}\n@@ -65,7 +68,7 @@ func (c *context) Switch(as platform.AddressSpace, ac arch.Context, _ int32) (*a\n}\n// Take the blue pill.\n- si, at, err := cpu.SwitchToUser(switchOpts)\n+ at, err := cpu.SwitchToUser(switchOpts, &c.info)\n// Clear the address space.\ncpu.active.set(nil)\n@@ -75,7 +78,7 @@ func (c *context) Switch(as platform.AddressSpace, ac arch.Context, _ int32) (*a\n// All done.\nc.interrupt.Disable()\n- return si, at, err\n+ return &c.info, at, err\n}\n// Interrupt interrupts the running context.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/kvm_test.go", "new_path": "pkg/sentry/platform/kvm/kvm_test.go", "diff": "@@ -156,12 +156,13 @@ func applicationTest(t testHarness, useHostMappings bool, target func(), fn func\nfunc TestApplicationSyscall(t *testing.T) {\napplicationTest(t, true, testutil.SyscallLoop, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool {\n- if _, _, err := c.SwitchToUser(ring0.SwitchOpts{\n+ var si arch.SignalInfo\n+ if _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\nFullRestore: true,\n- }); err == platform.ErrContextInterrupt {\n+ }, &si); err == platform.ErrContextInterrupt {\nreturn true // Retry.\n} else if err != nil {\nt.Errorf(\"application syscall with full restore failed: %v\", err)\n@@ -169,11 +170,12 @@ func TestApplicationSyscall(t *testing.T) {\nreturn false\n})\napplicationTest(t, true, testutil.SyscallLoop, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool {\n- if _, _, err := c.SwitchToUser(ring0.SwitchOpts{\n+ var si arch.SignalInfo\n+ if _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\n- }); err == platform.ErrContextInterrupt {\n+ }, &si); err == platform.ErrContextInterrupt {\nreturn true // Retry.\n} else if err != nil {\nt.Errorf(\"application syscall with partial restore failed: %v\", err)\n@@ -185,27 +187,29 @@ func TestApplicationSyscall(t *testing.T) {\nfunc TestApplicationFault(t *testing.T) {\napplicationTest(t, true, testutil.Touch, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool {\ntestutil.SetTouchTarget(regs, nil) // Cause fault.\n- if si, _, err := c.SwitchToUser(ring0.SwitchOpts{\n+ var si arch.SignalInfo\n+ if _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\nFullRestore: true,\n- }); err == platform.ErrContextInterrupt {\n+ }, &si); err == platform.ErrContextInterrupt {\nreturn true // Retry.\n- } else if err != platform.ErrContextSignal || (si != nil && si.Signo != int32(syscall.SIGSEGV)) {\n+ } else if err != platform.ErrContextSignal || si.Signo != int32(syscall.SIGSEGV) {\nt.Errorf(\"application fault with full restore got (%v, %v), expected (%v, SIGSEGV)\", err, si, platform.ErrContextSignal)\n}\nreturn false\n})\napplicationTest(t, true, testutil.Touch, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool {\ntestutil.SetTouchTarget(regs, nil) // Cause fault.\n- if si, _, err := c.SwitchToUser(ring0.SwitchOpts{\n+ var si arch.SignalInfo\n+ if _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\n- }); err == platform.ErrContextInterrupt {\n+ }, &si); err == platform.ErrContextInterrupt {\nreturn true // Retry.\n- } else if err != platform.ErrContextSignal || (si != nil && si.Signo != int32(syscall.SIGSEGV)) {\n+ } else if err != platform.ErrContextSignal || si.Signo != int32(syscall.SIGSEGV) {\nt.Errorf(\"application fault with partial restore got (%v, %v), expected (%v, SIGSEGV)\", err, si, platform.ErrContextSignal)\n}\nreturn false\n@@ -216,11 +220,12 @@ func TestRegistersSyscall(t *testing.T) {\napplicationTest(t, true, testutil.TwiddleRegsSyscall, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool {\ntestutil.SetTestRegs(regs) // Fill values for all registers.\nfor {\n- if _, _, err := c.SwitchToUser(ring0.SwitchOpts{\n+ var si arch.SignalInfo\n+ if _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\n- }); err == platform.ErrContextInterrupt {\n+ }, &si); err == platform.ErrContextInterrupt {\ncontinue // Retry.\n} else if err != nil {\nt.Errorf(\"application register check with partial restore got unexpected error: %v\", err)\n@@ -238,12 +243,13 @@ func TestRegistersFault(t *testing.T) {\napplicationTest(t, true, testutil.TwiddleRegsFault, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool {\ntestutil.SetTestRegs(regs) // Fill values for all registers.\nfor {\n- if si, _, err := c.SwitchToUser(ring0.SwitchOpts{\n+ var si arch.SignalInfo\n+ if _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\nFullRestore: true,\n- }); err == platform.ErrContextInterrupt {\n+ }, &si); err == platform.ErrContextInterrupt {\ncontinue // Retry.\n} else if err != platform.ErrContextSignal || si.Signo != int32(syscall.SIGSEGV) {\nt.Errorf(\"application register check with full restore got unexpected error: %v\", err)\n@@ -261,12 +267,13 @@ func TestSegments(t *testing.T) {\napplicationTest(t, true, testutil.TwiddleSegments, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool {\ntestutil.SetTestSegments(regs)\nfor {\n- if _, _, err := c.SwitchToUser(ring0.SwitchOpts{\n+ var si arch.SignalInfo\n+ if _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\nFullRestore: true,\n- }); err == platform.ErrContextInterrupt {\n+ }, &si); err == platform.ErrContextInterrupt {\ncontinue // Retry.\n} else if err != nil {\nt.Errorf(\"application segment check with full restore got unexpected error: %v\", err)\n@@ -286,11 +293,12 @@ func TestBounce(t *testing.T) {\ntime.Sleep(time.Millisecond)\nc.BounceToKernel()\n}()\n- if _, _, err := c.SwitchToUser(ring0.SwitchOpts{\n+ var si arch.SignalInfo\n+ if _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\n- }); err != platform.ErrContextInterrupt {\n+ }, &si); err != platform.ErrContextInterrupt {\nt.Errorf(\"application partial restore: got %v, wanted %v\", err, platform.ErrContextInterrupt)\n}\nreturn false\n@@ -300,12 +308,13 @@ func TestBounce(t *testing.T) {\ntime.Sleep(time.Millisecond)\nc.BounceToKernel()\n}()\n- if _, _, err := c.SwitchToUser(ring0.SwitchOpts{\n+ var si arch.SignalInfo\n+ if _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\nFullRestore: true,\n- }); err != platform.ErrContextInterrupt {\n+ }, &si); err != platform.ErrContextInterrupt {\nt.Errorf(\"application full restore: got %v, wanted %v\", err, platform.ErrContextInterrupt)\n}\nreturn false\n@@ -331,11 +340,12 @@ func TestBounceStress(t *testing.T) {\nc.BounceToKernel()\n}()\nrandomSleep()\n- if _, _, err := c.SwitchToUser(ring0.SwitchOpts{\n+ var si arch.SignalInfo\n+ if _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\n- }); err != platform.ErrContextInterrupt {\n+ }, &si); err != platform.ErrContextInterrupt {\nt.Errorf(\"application partial restore: got %v, wanted %v\", err, platform.ErrContextInterrupt)\n}\nc.unlock()\n@@ -351,11 +361,12 @@ func TestInvalidate(t *testing.T) {\napplicationTest(t, true, testutil.Touch, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool {\ntestutil.SetTouchTarget(regs, &data) // Read legitimate value.\nfor {\n- if _, _, err := c.SwitchToUser(ring0.SwitchOpts{\n+ var si arch.SignalInfo\n+ if _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\n- }); err == platform.ErrContextInterrupt {\n+ }, &si); err == platform.ErrContextInterrupt {\ncontinue // Retry.\n} else if err != nil {\nt.Errorf(\"application partial restore: got %v, wanted nil\", err)\n@@ -365,12 +376,13 @@ func TestInvalidate(t *testing.T) {\n// Unmap the page containing data & invalidate.\npt.Unmap(usermem.Addr(reflect.ValueOf(&data).Pointer() & ^uintptr(usermem.PageSize-1)), usermem.PageSize)\nfor {\n- if _, _, err := c.SwitchToUser(ring0.SwitchOpts{\n+ var si arch.SignalInfo\n+ if _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\nFlush: true,\n- }); err == platform.ErrContextInterrupt {\n+ }, &si); err == platform.ErrContextInterrupt {\ncontinue // Retry.\n} else if err != platform.ErrContextSignal {\nt.Errorf(\"application partial restore: got %v, wanted %v\", err, platform.ErrContextSignal)\n@@ -388,27 +400,29 @@ func IsFault(err error, si *arch.SignalInfo) bool {\nfunc TestEmptyAddressSpace(t *testing.T) {\napplicationTest(t, false, testutil.SyscallLoop, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool {\n- if si, _, err := c.SwitchToUser(ring0.SwitchOpts{\n+ var si arch.SignalInfo\n+ if _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\n- }); err == platform.ErrContextInterrupt {\n+ }, &si); err == platform.ErrContextInterrupt {\nreturn true // Retry.\n- } else if !IsFault(err, si) {\n+ } else if !IsFault(err, &si) {\nt.Errorf(\"first fault with partial restore failed got %v\", err)\nt.Logf(\"registers: %#v\", &regs)\n}\nreturn false\n})\napplicationTest(t, false, testutil.SyscallLoop, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool {\n- if si, _, err := c.SwitchToUser(ring0.SwitchOpts{\n+ var si arch.SignalInfo\n+ if _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\nFullRestore: true,\n- }); err == platform.ErrContextInterrupt {\n+ }, &si); err == platform.ErrContextInterrupt {\nreturn true // Retry.\n- } else if !IsFault(err, si) {\n+ } else if !IsFault(err, &si) {\nt.Errorf(\"first fault with full restore failed got %v\", err)\nt.Logf(\"registers: %#v\", &regs)\n}\n@@ -459,11 +473,12 @@ func BenchmarkApplicationSyscall(b *testing.B) {\na int // Count for ErrContextInterrupt.\n)\napplicationTest(b, true, testutil.SyscallLoop, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool {\n- if _, _, err := c.SwitchToUser(ring0.SwitchOpts{\n+ var si arch.SignalInfo\n+ if _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\n- }); err == platform.ErrContextInterrupt {\n+ }, &si); err == platform.ErrContextInterrupt {\na++\nreturn true // Ignore.\n} else if err != nil {\n@@ -495,11 +510,12 @@ func BenchmarkWorldSwitchToUserRoundtrip(b *testing.B) {\na int\n)\napplicationTest(b, true, testutil.SyscallLoop, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool {\n- if _, _, err := c.SwitchToUser(ring0.SwitchOpts{\n+ var si arch.SignalInfo\n+ if _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\n- }); err == platform.ErrContextInterrupt {\n+ }, &si); err == platform.ErrContextInterrupt {\na++\nreturn true // Ignore.\n} else if err != nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/machine_amd64.go", "new_path": "pkg/sentry/platform/kvm/machine_amd64.go", "diff": "@@ -156,19 +156,19 @@ func (c *vCPU) initArchState() error {\n// nonCanonical generates a canonical address return.\n//\n//go:nosplit\n-func nonCanonical(addr uint64, signal int32) (*arch.SignalInfo, usermem.AccessType, error) {\n- info := &arch.SignalInfo{\n+func nonCanonical(addr uint64, signal int32, info *arch.SignalInfo) (usermem.AccessType, error) {\n+ *info = arch.SignalInfo{\nSigno: signal,\nCode: arch.SignalInfoKernel,\n}\ninfo.SetAddr(addr) // Include address.\n- return info, usermem.NoAccess, platform.ErrContextSignal\n+ return usermem.NoAccess, platform.ErrContextSignal\n}\n// fault generates an appropriate fault return.\n//\n//go:nosplit\n-func (c *vCPU) fault(signal int32) (*arch.SignalInfo, usermem.AccessType, error) {\n+func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (usermem.AccessType, error) {\nbluepill(c) // Probably no-op, but may not be.\nfaultAddr := ring0.ReadCR2()\ncode, user := c.ErrorCode()\n@@ -176,11 +176,10 @@ func (c *vCPU) fault(signal int32) (*arch.SignalInfo, usermem.AccessType, error)\n// The last fault serviced by this CPU was not a user\n// fault, so we can't reliably trust the faultAddr or\n// the code provided here. We need to re-execute.\n- return nil, usermem.NoAccess, platform.ErrContextInterrupt\n- }\n- info := &arch.SignalInfo{\n- Signo: signal,\n+ return usermem.NoAccess, platform.ErrContextInterrupt\n}\n+ // Reset the pointed SignalInfo.\n+ *info = arch.SignalInfo{Signo: signal}\ninfo.SetAddr(uint64(faultAddr))\naccessType := usermem.AccessType{\nRead: code&(1<<1) == 0,\n@@ -192,20 +191,20 @@ func (c *vCPU) fault(signal int32) (*arch.SignalInfo, usermem.AccessType, error)\n} else {\ninfo.Code = 2 // SEGV_ACCERR.\n}\n- return info, accessType, platform.ErrContextSignal\n+ return accessType, platform.ErrContextSignal\n}\n// SwitchToUser unpacks architectural-details.\n-func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts) (*arch.SignalInfo, usermem.AccessType, error) {\n+func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) (usermem.AccessType, error) {\n// Check for canonical addresses.\nif regs := switchOpts.Registers; !ring0.IsCanonical(regs.Rip) {\n- return nonCanonical(regs.Rip, int32(syscall.SIGSEGV))\n+ return nonCanonical(regs.Rip, int32(syscall.SIGSEGV), info)\n} else if !ring0.IsCanonical(regs.Rsp) {\n- return nonCanonical(regs.Rsp, int32(syscall.SIGBUS))\n+ return nonCanonical(regs.Rsp, int32(syscall.SIGBUS), info)\n} else if !ring0.IsCanonical(regs.Fs_base) {\n- return nonCanonical(regs.Fs_base, int32(syscall.SIGBUS))\n+ return nonCanonical(regs.Fs_base, int32(syscall.SIGBUS), info)\n} else if !ring0.IsCanonical(regs.Gs_base) {\n- return nonCanonical(regs.Gs_base, int32(syscall.SIGBUS))\n+ return nonCanonical(regs.Gs_base, int32(syscall.SIGBUS), info)\n}\n// Assign PCIDs.\n@@ -231,25 +230,25 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts) (*arch.SignalInfo, user\nswitch vector {\ncase ring0.Syscall, ring0.SyscallInt80:\n// Fast path: system call executed.\n- return nil, usermem.NoAccess, nil\n+ return usermem.NoAccess, nil\ncase ring0.PageFault:\n- return c.fault(int32(syscall.SIGSEGV))\n+ return c.fault(int32(syscall.SIGSEGV), info)\ncase ring0.Debug, ring0.Breakpoint:\n- info := &arch.SignalInfo{\n+ *info = arch.SignalInfo{\nSigno: int32(syscall.SIGTRAP),\nCode: 1, // TRAP_BRKPT (breakpoint).\n}\ninfo.SetAddr(switchOpts.Registers.Rip) // Include address.\n- return info, usermem.AccessType{}, platform.ErrContextSignal\n+ return usermem.AccessType{}, platform.ErrContextSignal\ncase ring0.GeneralProtectionFault,\nring0.SegmentNotPresent,\nring0.BoundRangeExceeded,\nring0.InvalidTSS,\nring0.StackSegmentFault:\n- info := &arch.SignalInfo{\n+ *info = arch.SignalInfo{\nSigno: int32(syscall.SIGSEGV),\nCode: arch.SignalInfoKernel,\n}\n@@ -258,52 +257,52 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts) (*arch.SignalInfo, user\n// When CPUID faulting is enabled, we will generate a #GP(0) when\n// userspace executes a CPUID instruction. This is handled above,\n// because we need to be able to map and read user memory.\n- return info, usermem.AccessType{}, platform.ErrContextSignalCPUID\n+ return usermem.AccessType{}, platform.ErrContextSignalCPUID\n}\n- return info, usermem.AccessType{}, platform.ErrContextSignal\n+ return usermem.AccessType{}, platform.ErrContextSignal\ncase ring0.InvalidOpcode:\n- info := &arch.SignalInfo{\n+ *info = arch.SignalInfo{\nSigno: int32(syscall.SIGILL),\nCode: 1, // ILL_ILLOPC (illegal opcode).\n}\ninfo.SetAddr(switchOpts.Registers.Rip) // Include address.\n- return info, usermem.AccessType{}, platform.ErrContextSignal\n+ return usermem.AccessType{}, platform.ErrContextSignal\ncase ring0.DivideByZero:\n- info := &arch.SignalInfo{\n+ *info = arch.SignalInfo{\nSigno: int32(syscall.SIGFPE),\nCode: 1, // FPE_INTDIV (divide by zero).\n}\ninfo.SetAddr(switchOpts.Registers.Rip) // Include address.\n- return info, usermem.AccessType{}, platform.ErrContextSignal\n+ return usermem.AccessType{}, platform.ErrContextSignal\ncase ring0.Overflow:\n- info := &arch.SignalInfo{\n+ *info = arch.SignalInfo{\nSigno: int32(syscall.SIGFPE),\nCode: 1, // FPE_INTOVF (integer overflow).\n}\ninfo.SetAddr(switchOpts.Registers.Rip) // Include address.\n- return info, usermem.AccessType{}, platform.ErrContextSignal\n+ return usermem.AccessType{}, platform.ErrContextSignal\ncase ring0.X87FloatingPointException,\nring0.SIMDFloatingPointException:\n- info := &arch.SignalInfo{\n+ *info = arch.SignalInfo{\nSigno: int32(syscall.SIGFPE),\nCode: 7, // FPE_FLTINV (invalid operation).\n}\ninfo.SetAddr(switchOpts.Registers.Rip) // Include address.\n- return info, usermem.AccessType{}, platform.ErrContextSignal\n+ return usermem.AccessType{}, platform.ErrContextSignal\ncase ring0.Vector(bounce): // ring0.VirtualizationException\n- return nil, usermem.NoAccess, platform.ErrContextInterrupt\n+ return usermem.NoAccess, platform.ErrContextInterrupt\ncase ring0.AlignmentCheck:\n- info := &arch.SignalInfo{\n+ *info = arch.SignalInfo{\nSigno: int32(syscall.SIGBUS),\nCode: 2, // BUS_ADRERR (physical address does not exist).\n}\n- return info, usermem.NoAccess, platform.ErrContextSignal\n+ return usermem.NoAccess, platform.ErrContextSignal\ncase ring0.NMI:\n// An NMI is generated only when a fault is not servicable by\n@@ -311,7 +310,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts) (*arch.SignalInfo, user\n// really not. This could happen, e.g. if some file is\n// truncated (and would generate a SIGBUS) and we map it\n// directly into the instance.\n- return c.fault(int32(syscall.SIGBUS))\n+ return c.fault(int32(syscall.SIGBUS), info)\ncase ring0.DeviceNotAvailable,\nring0.DoubleFault,\n" } ]
Go
Apache License 2.0
google/gvisor
kvm: avoid siginfo allocations. PiperOrigin-RevId: 219492587 Change-Id: I47f6fc0b74a4907ab0aff03d5f26453bdb983bb5
259,992
31.10.2018 11:27:10
25,200
ccc3d7ca11a2a623587c651a6690aaa46d2c2665
Make lazy open the mode of operation for fsgofer With recent changes to 9P server, path walks are now safe inside open, create, rename and setattr calls. To simplify the code, remove the lazyopen=false mode that was used for bind mounts, and converge all mounts to using lazy open.
[ { "change_type": "MODIFY", "old_path": "runsc/cmd/gofer.go", "new_path": "runsc/cmd/gofer.go", "diff": "@@ -124,9 +124,6 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nats = append(ats, fsgofer.NewAttachPoint(\"/\", fsgofer.Config{\nROMount: spec.Root.Readonly,\nPanicOnWrite: g.panicOnWrite,\n- // Docker uses overlay2 by default for the root mount, and overlay2 does a copy-up when\n- // each file is opened as writable. Thus, we open files lazily to avoid copy-up.\n- LazyOpenForWrite: true,\n}))\nlog.Infof(\"Serving %q mapped to %q on FD %d (ro: %t)\", \"/\", root, g.ioFDs[0], spec.Root.Readonly)\n@@ -136,7 +133,6 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\ncfg := fsgofer.Config{\nROMount: isReadonlyMount(m.Options),\nPanicOnWrite: g.panicOnWrite,\n- LazyOpenForWrite: false,\n}\nats = append(ats, fsgofer.NewAttachPoint(m.Destination, cfg))\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/BUILD", "new_path": "runsc/fsgofer/BUILD", "diff": "@@ -17,6 +17,7 @@ go_library(\n\"//pkg/fd\",\n\"//pkg/log\",\n\"//pkg/p9\",\n+ \"//pkg/syserr\",\n\"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/fsgofer.go", "new_path": "runsc/fsgofer/fsgofer.go", "diff": "@@ -77,13 +77,6 @@ type Config struct {\n// PanicOnWrite panics on attempts to write to RO mounts.\nPanicOnWrite bool\n-\n- // LazyOpenForWrite makes the underlying file to be opened in RDONLY\n- // mode initially and be reopened in case write access is desired.\n- // This is done to workaround the behavior in 'overlay2' that\n- // copies the entire file up eagerly when it's opened in write mode\n- // even if the file is never actually written to.\n- LazyOpenForWrite bool\n}\ntype attachPoint struct {\n@@ -182,9 +175,10 @@ func (a *attachPoint) makeQID(stat syscall.Stat_t) p9.QID {\n// localFile implements p9.File wrapping a local file. The underlying file\n// is opened during Walk() and stored in 'controlFile' to be used with other\n-// operations. The mode in which the file is opened varies depending on the\n-// configuration (see below). 'controlFile' is dup'ed when Walk(nil) is called\n-// to clone the file.\n+// operations. The control file is opened as readonly, unless it's a symlink\n+// which requires O_PATH. 'controlFile' is dup'ed when Walk(nil) is called\n+// to clone the file. This reduces the number of walks that need to be done by\n+// the host file system when files are reused.\n//\n// 'openedFile' is assigned when Open() is called. If requested open mode is\n// a subset of controlFile's mode, it's possible to use the same file. If mode\n@@ -193,22 +187,10 @@ func (a *attachPoint) makeQID(stat syscall.Stat_t) p9.QID {\n// operations. Before the file is opened and after it's closed, 'mode' is set to\n// an invalid value to prevent an unopened file from being used.\n//\n-// localFile has 2 modes of operation based on the configuration:\n-//\n-// ** conf.lazyRWOpen == false **\n-// This is the preferred mode. 'controlFile' is opened in RW mode in Walk()\n-// and used across all functions. The file is never reopened as the mode will\n-// always be a super set of the requested open mode. This reduces the number of\n-// syscalls required per operation and makes it resilient to renames anywhere\n-// in the path to the file.\n-//\n-// ** conf.lazyRWOpen == true **\n-// This mode is used for better performance with 'overlay2' storage driver.\n-// overlay2 eagerly copies the entire file up when it's opened in write mode\n-// which makes the mode above perform badly when serveral of files are opened\n-// for read (esp. startup). In this mode, 'controlFile' is opened as readonly\n-// (or O_PATH for symlinks). Reopening the file is required if write mode\n-// is requested in Open().\n+// The reason that the control file is never opened as read-write is for better\n+// performance with 'overlay2' storage driver. overlay2 eagerly copies the\n+// entire file up when it's opened in write mode, and would perform badly when\n+// multiple files are being opened for read-only (esp. startup).\ntype localFile struct {\np9.DefaultWalkGetAttr\n@@ -238,23 +220,14 @@ type localFile struct {\nfunc openAnyFile(parent *localFile, name string) (*os.File, string, error) {\n// Attempt to open file in the following mode in order:\n- // 1. RDWR: for files with rw mounts and LazyOpenForWrite disabled\n- // 2. RDONLY: for directories, ro mounts or LazyOpenForWrite enabled\n- // 3. PATH: for symlinks\n- modes := []int{syscall.O_RDWR, syscall.O_RDONLY, unix.O_PATH}\n- symlinkIdx := len(modes) - 1\n-\n- startIdx := 0\n- conf := parent.attachPoint.conf\n- if conf.ROMount || conf.LazyOpenForWrite {\n- // Skip attempt to open in RDWR based on configuration.\n- startIdx = 1\n- }\n+ // 1. RDONLY: for all files, works for directories and ro mounts too\n+ // 2. PATH: for symlinks\n+ modes := []int{syscall.O_RDONLY, unix.O_PATH}\nvar err error\nvar fd int\n- for i := startIdx; i < len(modes); i++ {\n- fd, err = syscall.Openat(parent.controlFD(), name, openFlags|modes[i], 0)\n+ for i, mode := range modes {\n+ fd, err = syscall.Openat(parent.controlFD(), name, openFlags|mode, 0)\nif err == nil {\n// openat succeeded, we're done.\nbreak\n@@ -263,16 +236,10 @@ func openAnyFile(parent *localFile, name string) (*os.File, string, error) {\ncase syscall.ENOENT:\n// File doesn't exist, no point in retrying.\nreturn nil, \"\", e\n- case syscall.ELOOP:\n- if i < symlinkIdx {\n- // File was opened with O_NOFOLLOW, so this error can only happen when\n- // trying ot open a symlink. Jump straight to flags compatible with symlink.\n- i = symlinkIdx - 1\n- }\n}\n- // openat failed. Try again with next mode, preserving 'err' in\n- // case this was the last attempt.\n- log.Debugf(\"Attempt %d to open file failed, mode: %#x, path: %s/%s, err: %v\", i, openFlags|modes[i], parent.controlFile.Name(), name, err)\n+ // openat failed. Try again with next mode, preserving 'err' in case this\n+ // was the last attempt.\n+ log.Debugf(\"Attempt %d to open file failed, mode: %#x, path: %s/%s, err: %v\", i, openFlags|mode, parent.controlFile.Name(), name, err)\n}\nif err != nil {\n// All attempts to open file have failed, return the last error.\n@@ -353,13 +320,13 @@ func (l *localFile) Open(mode p9.OpenFlags) (*fd.FD, p9.QID, uint32, error) {\n// Check if control file can be used or if a new open must be created.\nvar newFile *os.File\n- if mode == p9.ReadOnly || !l.attachPoint.conf.LazyOpenForWrite {\n+ if mode == p9.ReadOnly {\nlog.Debugf(\"Open reusing control file, mode: %v, %q\", mode, l.controlFile.Name())\nnewFile = l.controlFile\n} else {\n- // Ideally reopen would call name_to_handle_at (with empty name) and open_by_handle_at\n- // to reopen the file without using 'hostPath'. However, name_to_handle_at and\n- // open_by_handle_at aren't supported by overlay2.\n+ // Ideally reopen would call name_to_handle_at (with empty name) and\n+ // open_by_handle_at to reopen the file without using 'hostPath'. However,\n+ // name_to_handle_at and open_by_handle_at aren't supported by overlay2.\nlog.Debugf(\"Open reopening file, mode: %v, %q\", mode, l.controlFile.Name())\nvar err error\n@@ -397,9 +364,10 @@ func (l *localFile) Create(name string, mode p9.OpenFlags, perm p9.FileMode, uid\nreturn nil, nil, p9.QID{}, 0, syscall.EBADF\n}\n- // Use a single file for both 'controlFile' and 'openedFile'. Mode must include read for control\n- // and whichever else was requested by caller. Note that resulting file might have a wider mode\n- // than needed for each particular case.\n+ // Use a single file for both 'controlFile' and 'openedFile'. Mode must\n+ // include read for control and whichever else was requested by caller. Note\n+ // that resulting file might have a wider mode than needed for each particular\n+ // case.\nflags := openFlags | syscall.O_CREAT | syscall.O_EXCL\nif mode == p9.WriteOnly {\nflags |= syscall.O_RDWR\n@@ -622,9 +590,9 @@ func (l *localFile) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error {\n}\nfd := l.controlFD()\n- if conf.LazyOpenForWrite && l.ft == regular {\n- // Regular files are opened in RO mode when lazy open is set.\n- // Thus it needs to be reopened here for write.\n+ if l.ft == regular {\n+ // Regular files are opened in RO mode, thus it needs to be reopened here\n+ // for write.\nf, err := os.OpenFile(l.hostPath, openFlags|os.O_WRONLY, 0)\nif err != nil {\nreturn extractErrno(err)\n@@ -728,8 +696,6 @@ func (l *localFile) Rename(p9.File, string) error {\n}\n// RenameAt implements p9.File.RenameAt.\n-//\n-// TODO: change to renameat(2).\nfunc (l *localFile) RenameAt(oldName string, directory p9.File, newName string) error {\nconf := l.attachPoint.conf\nif conf.ROMount {\n@@ -740,9 +706,7 @@ func (l *localFile) RenameAt(oldName string, directory p9.File, newName string)\n}\nnewParent := directory.(*localFile)\n- oldPath := path.Join(l.hostPath, oldName)\n- newPath := path.Join(newParent.hostPath, newName)\n- if err := syscall.Rename(oldPath, newPath); err != nil {\n+ if err := renameat(l.controlFD(), oldName, newParent.controlFD(), newName); err != nil {\nreturn extractErrno(err)\n}\nreturn nil\n@@ -863,7 +827,8 @@ func (l *localFile) Readdir(offset uint64, count uint32) ([]p9.Dirent, error) {\n}\n// Readdirnames is a cursor over directories, so seek back to 0 to ensure it's\n- // reading all directory contents. Take a lock because this operation is stateful.\n+ // reading all directory contents. Take a lock because this operation is\n+ // stateful.\nl.readDirMu.Lock()\nif _, err := l.openedFile.Seek(0, 0); err != nil {\nl.readDirMu.Unlock()\n@@ -944,7 +909,7 @@ func (l *localFile) Renamed(newDir p9.File, newName string) {\nfunc extractErrno(err error) syscall.Errno {\nif err == nil {\n// This should never happen. The likely result will be that\n- // some user gets the frustration \"error: SUCCESS\" message.\n+ // some user gets the frustrating \"error: SUCCESS\" message.\nlog.Warningf(\"extractErrno called with nil error!\")\nreturn 0\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/fsgofer_test.go", "new_path": "runsc/fsgofer/fsgofer_test.go", "diff": "@@ -48,14 +48,8 @@ var (\n// allConfs is set in init() above.\nallConfs []Config\n- rwConfs = []Config{\n- {ROMount: false, LazyOpenForWrite: false},\n- {ROMount: false, LazyOpenForWrite: true},\n- }\n- roConfs = []Config{\n- {ROMount: true, LazyOpenForWrite: false},\n- {ROMount: true, LazyOpenForWrite: true},\n- }\n+ rwConfs = []Config{{ROMount: false}}\n+ roConfs = []Config{{ROMount: true}}\n)\ntype state struct {\n@@ -66,7 +60,7 @@ type state struct {\n}\nfunc (s state) String() string {\n- return fmt.Sprintf(\"lazyopen(%v)-%v\", s.conf.LazyOpenForWrite, s.ft)\n+ return fmt.Sprintf(\"type(%v)\", s.ft)\n}\nfunc runAll(t *testing.T, test func(*testing.T, state)) {\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/fsgofer_unsafe.go", "new_path": "runsc/fsgofer/fsgofer_unsafe.go", "diff": "@@ -19,20 +19,29 @@ import (\n\"unsafe\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n+ \"gvisor.googlesource.com/gvisor/pkg/syserr\"\n)\nfunc statAt(dirFd int, name string) (syscall.Stat_t, error) {\nnameBytes, err := syscall.BytePtrFromString(name)\nif err != nil {\n- return syscall.Stat_t{}, extractErrno(err)\n+ return syscall.Stat_t{}, err\n}\n- namePtr := uintptr(unsafe.Pointer(nameBytes))\n+ namePtr := unsafe.Pointer(nameBytes)\nvar stat syscall.Stat_t\n- statPtr := uintptr(unsafe.Pointer(&stat))\n+ statPtr := unsafe.Pointer(&stat)\n- if _, _, err := syscall.Syscall6(syscall.SYS_NEWFSTATAT, uintptr(dirFd), namePtr, statPtr, linux.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 {\n- return syscall.Stat_t{}, err\n+ if _, _, errno := syscall.Syscall6(\n+ syscall.SYS_NEWFSTATAT,\n+ uintptr(dirFd),\n+ uintptr(namePtr),\n+ uintptr(statPtr),\n+ linux.AT_SYMLINK_NOFOLLOW,\n+ 0,\n+ 0); errno != 0 {\n+\n+ return syscall.Stat_t{}, syserr.FromHost(errno).ToError()\n}\nreturn stat, nil\n}\n@@ -40,19 +49,59 @@ func statAt(dirFd int, name string) (syscall.Stat_t, error) {\nfunc utimensat(dirFd int, name string, times [2]syscall.Timespec, flags int) error {\n// utimensat(2) doesn't accept empty name, instead name must be nil to make it\n// operate directly on 'dirFd' unlike other *at syscalls.\n- var namePtr uintptr\n+ var namePtr unsafe.Pointer\nif name != \"\" {\nnameBytes, err := syscall.BytePtrFromString(name)\nif err != nil {\n- return extractErrno(err)\n+ return err\n}\n- namePtr = uintptr(unsafe.Pointer(nameBytes))\n+ namePtr = unsafe.Pointer(nameBytes)\n}\n- timesPtr := uintptr(unsafe.Pointer(&times[0]))\n+ timesPtr := unsafe.Pointer(&times[0])\n+\n+ if _, _, errno := syscall.Syscall6(\n+ syscall.SYS_UTIMENSAT,\n+ uintptr(dirFd),\n+ uintptr(namePtr),\n+ uintptr(timesPtr),\n+ uintptr(flags),\n+ 0,\n+ 0); errno != 0 {\n+\n+ return syserr.FromHost(errno).ToError()\n+ }\n+ return nil\n+}\n- if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(dirFd), namePtr, timesPtr, uintptr(flags), 0, 0); err != 0 {\n+func renameat(oldDirFD int, oldName string, newDirFD int, newName string) error {\n+ var oldNamePtr unsafe.Pointer\n+ if oldName != \"\" {\n+ nameBytes, err := syscall.BytePtrFromString(oldName)\n+ if err != nil {\n+ return err\n+ }\n+ oldNamePtr = unsafe.Pointer(nameBytes)\n+ }\n+ var newNamePtr unsafe.Pointer\n+ if newName != \"\" {\n+ nameBytes, err := syscall.BytePtrFromString(newName)\n+ if err != nil {\nreturn err\n}\n+ newNamePtr = unsafe.Pointer(nameBytes)\n+ }\n+\n+ if _, _, errno := syscall.Syscall6(\n+ syscall.SYS_RENAMEAT,\n+ uintptr(oldDirFD),\n+ uintptr(oldNamePtr),\n+ uintptr(newDirFD),\n+ uintptr(newNamePtr),\n+ 0,\n+ 0); errno != 0 {\n+\n+ return syserr.FromHost(errno).ToError()\n+ }\nreturn nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Make lazy open the mode of operation for fsgofer With recent changes to 9P server, path walks are now safe inside open, create, rename and setattr calls. To simplify the code, remove the lazyopen=false mode that was used for bind mounts, and converge all mounts to using lazy open. PiperOrigin-RevId: 219508628 Change-Id: I073e7e1e2e9a9972d150eaf4cb29e553997a9b76
259,858
31.10.2018 15:49:10
25,200
c4bbb54168a9014048d2144110e70daf5a5b8211
kvm: add detailed traces on vCPU errors. This improves debuggability greatly.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/bluepill.go", "new_path": "pkg/sentry/platform/kvm/bluepill.go", "diff": "@@ -19,6 +19,7 @@ import (\n\"reflect\"\n\"syscall\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/platform/safecopy\"\n)\n@@ -28,14 +29,55 @@ func bluepill(*vCPU)\n// sighandler is the signal entry point.\nfunc sighandler()\n+// dieTrampoline is the assembly trampoline. This calls dieHandler.\n+//\n+// This uses an architecture-specific calling convention, documented in\n+// dieArchSetup and the assembly implementation for dieTrampoline.\n+func dieTrampoline()\n+\n+var (\n// savedHandler is a pointer to the previous handler.\n//\n// This is called by bluepillHandler.\n-var savedHandler uintptr\n+ savedHandler uintptr\n+\n+ // dieTrampolineAddr is the address of dieTrampoline.\n+ dieTrampolineAddr uintptr\n+)\n+\n+// dieHandler is called by dieTrampoline.\n+//\n+//go:nosplit\n+func dieHandler(c *vCPU) {\n+ throw(c.dieMessage)\n+}\n+\n+// die is called to set the vCPU up to panic.\n+//\n+// This loads vCPU state, and sets up a call for the trampoline.\n+//\n+//go:nosplit\n+func (c *vCPU) die(context *arch.SignalContext64, msg string) {\n+ // Save the death message, which will be thrown.\n+ c.dieMessage = msg\n+\n+ // Reload all registers to have an accurate stack trace when we return\n+ // to host mode. This means that the stack should be unwound correctly.\n+ var guestRegs userRegs\n+ if errno := c.getUserRegisters(&guestRegs); errno != 0 {\n+ throw(msg)\n+ }\n+\n+ // Setup the trampoline.\n+ dieArchSetup(c, context, &guestRegs)\n+}\nfunc init() {\n// Install the handler.\nif err := safecopy.ReplaceSignalHandler(syscall.SIGSEGV, reflect.ValueOf(sighandler).Pointer(), &savedHandler); err != nil {\npanic(fmt.Sprintf(\"Unable to set handler for signal %d: %v\", syscall.SIGSEGV, err))\n}\n+\n+ // Extract the address for the trampoline.\n+ dieTrampolineAddr = reflect.ValueOf(dieTrampoline).Pointer()\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/bluepill_amd64_unsafe.go", "new_path": "pkg/sentry/platform/kvm/bluepill_amd64_unsafe.go", "diff": "@@ -20,9 +20,37 @@ import (\n\"unsafe\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0\"\n)\n// bluepillArchContext returns the arch-specific context.\n+//\n+//go:nosplit\nfunc bluepillArchContext(context unsafe.Pointer) *arch.SignalContext64 {\nreturn &((*arch.UContext64)(context).MContext)\n}\n+\n+// dieArchSetup initialies the state for dieTrampoline.\n+//\n+// The amd64 dieTrampoline requires the vCPU to be set in BX, and the last RIP\n+// to be in AX. The trampoline then simulates a call to dieHandler from the\n+// provided RIP.\n+//\n+//go:nosplit\n+func dieArchSetup(c *vCPU, context *arch.SignalContext64, guestRegs *userRegs) {\n+ // If the vCPU is in user mode, we set the stack to the stored stack\n+ // value in the vCPU itself. We don't want to unwind the user stack.\n+ if guestRegs.RFLAGS&ring0.UserFlagsSet == ring0.UserFlagsSet {\n+ regs := c.CPU.Registers()\n+ context.Rax = regs.Rax\n+ context.Rsp = regs.Rsp\n+ context.Rbp = regs.Rbp\n+ } else {\n+ context.Rax = guestRegs.RIP\n+ context.Rsp = guestRegs.RSP\n+ context.Rbp = guestRegs.RBP\n+ context.Eflags = guestRegs.RFLAGS\n+ }\n+ context.Rbx = uint64(uintptr(unsafe.Pointer(c)))\n+ context.Rip = uint64(dieTrampolineAddr)\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/bluepill_unsafe.go", "new_path": "pkg/sentry/platform/kvm/bluepill_unsafe.go", "diff": "@@ -113,9 +113,11 @@ func bluepillHandler(context unsafe.Pointer) {\nswitch c.runData.exitReason {\ncase _KVM_EXIT_EXCEPTION:\n- throw(\"exception\")\n+ c.die(bluepillArchContext(context), \"exception\")\n+ return\ncase _KVM_EXIT_IO:\n- throw(\"I/O\")\n+ c.die(bluepillArchContext(context), \"I/O\")\n+ return\ncase _KVM_EXIT_INTERNAL_ERROR:\n// An internal error is typically thrown when emulation\n// fails. This can occur via the MMIO path below (and\n@@ -123,9 +125,11 @@ func bluepillHandler(context unsafe.Pointer) {\n// are not mapped). We would actually prefer that no\n// emulation occur, and don't mind at all if it fails.\ncase _KVM_EXIT_HYPERCALL:\n- throw(\"hypercall\")\n+ c.die(bluepillArchContext(context), \"hypercall\")\n+ return\ncase _KVM_EXIT_DEBUG:\n- throw(\"debug\")\n+ c.die(bluepillArchContext(context), \"debug\")\n+ return\ncase _KVM_EXIT_HLT:\n// Copy out registers.\nbluepillArchExit(c, bluepillArchContext(context))\n@@ -145,9 +149,11 @@ func bluepillHandler(context unsafe.Pointer) {\natomic.AddUint32(&c.faults, 1)\n// For MMIO, the physical address is the first data item.\n- virtual, ok := handleBluepillFault(c.machine, uintptr(c.runData.data[0]))\n+ physical := uintptr(c.runData.data[0])\n+ virtual, ok := handleBluepillFault(c.machine, physical)\nif !ok {\n- throw(\"physical address not valid\")\n+ c.die(bluepillArchContext(context), \"invalid physical address\")\n+ return\n}\n// We now need to fill in the data appropriately. KVM\n@@ -158,7 +164,7 @@ func bluepillHandler(context unsafe.Pointer) {\n// not create invalid page table mappings.\ndata := (*[8]byte)(unsafe.Pointer(&c.runData.data[1]))\nlength := (uintptr)((uint32)(c.runData.data[2]))\n- write := (uint8)((c.runData.data[2] >> 32 & 0xff)) != 0\n+ write := (uint8)(((c.runData.data[2] >> 32) & 0xff)) != 0\nfor i := uintptr(0); i < length; i++ {\nb := bytePtr(uintptr(virtual) + i)\nif write {\n@@ -182,11 +188,14 @@ func bluepillHandler(context unsafe.Pointer) {\n// Clear previous injection request.\nc.runData.requestInterruptWindow = 0\ncase _KVM_EXIT_SHUTDOWN:\n- throw(\"shutdown\")\n+ c.die(bluepillArchContext(context), \"shutdown\")\n+ return\ncase _KVM_EXIT_FAIL_ENTRY:\n- throw(\"entry failed\")\n+ c.die(bluepillArchContext(context), \"entry failed\")\n+ return\ndefault:\n- throw(\"unknown failure\")\n+ c.die(bluepillArchContext(context), \"unknown\")\n+ return\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/kvm_const.go", "new_path": "pkg/sentry/platform/kvm/kvm_const.go", "diff": "@@ -31,6 +31,7 @@ const (\n_KVM_SET_USER_MEMORY_REGION = 0x4020ae46\n_KVM_SET_REGS = 0x4090ae82\n_KVM_SET_SREGS = 0x4138ae84\n+ _KVM_GET_REGS = 0x8090ae81\n_KVM_GET_SUPPORTED_CPUID = 0xc008ae05\n_KVM_SET_CPUID2 = 0x4008ae90\n_KVM_SET_SIGNAL_MASK = 0x4004ae8b\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/machine.go", "new_path": "pkg/sentry/platform/kvm/machine.go", "diff": "@@ -120,6 +120,9 @@ type vCPU struct {\n// vCPUArchState is the architecture-specific state.\nvCPUArchState\n+\n+ // dieMessage is thrown from die.\n+ dieMessage string\n}\n// newVCPU creates a returns a new vCPU.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/machine_amd64_unsafe.go", "new_path": "pkg/sentry/platform/kvm/machine_amd64_unsafe.go", "diff": "@@ -73,30 +73,6 @@ func (c *vCPU) loadSegments(tid uint64) {\natomic.StoreUint64(&c.tid, tid)\n}\n-// setUserRegisters sets user registers in the vCPU.\n-func (c *vCPU) setUserRegisters(uregs *userRegs) error {\n- if _, _, errno := syscall.RawSyscall(\n- syscall.SYS_IOCTL,\n- uintptr(c.fd),\n- _KVM_SET_REGS,\n- uintptr(unsafe.Pointer(uregs))); errno != 0 {\n- return fmt.Errorf(\"error setting user registers: %v\", errno)\n- }\n- return nil\n-}\n-\n-// setSystemRegisters sets system registers.\n-func (c *vCPU) setSystemRegisters(sregs *systemRegs) error {\n- if _, _, errno := syscall.RawSyscall(\n- syscall.SYS_IOCTL,\n- uintptr(c.fd),\n- _KVM_SET_SREGS,\n- uintptr(unsafe.Pointer(sregs))); errno != 0 {\n- return fmt.Errorf(\"error setting system registers: %v\", errno)\n- }\n- return nil\n-}\n-\n// setCPUID sets the CPUID to be used by the guest.\nfunc (c *vCPU) setCPUID() error {\nif _, _, errno := syscall.RawSyscall(\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/machine_unsafe.go", "new_path": "pkg/sentry/platform/kvm/machine_unsafe.go", "diff": "@@ -57,6 +57,46 @@ func unmapRunData(r *runData) error {\nreturn nil\n}\n+// setUserRegisters sets user registers in the vCPU.\n+func (c *vCPU) setUserRegisters(uregs *userRegs) error {\n+ if _, _, errno := syscall.RawSyscall(\n+ syscall.SYS_IOCTL,\n+ uintptr(c.fd),\n+ _KVM_SET_REGS,\n+ uintptr(unsafe.Pointer(uregs))); errno != 0 {\n+ return fmt.Errorf(\"error setting user registers: %v\", errno)\n+ }\n+ return nil\n+}\n+\n+// getUserRegisters reloads user registers in the vCPU.\n+//\n+// This is safe to call from a nosplit context.\n+//\n+//go:nosplit\n+func (c *vCPU) getUserRegisters(uregs *userRegs) syscall.Errno {\n+ if _, _, errno := syscall.RawSyscall(\n+ syscall.SYS_IOCTL,\n+ uintptr(c.fd),\n+ _KVM_GET_REGS,\n+ uintptr(unsafe.Pointer(uregs))); errno != 0 {\n+ return errno\n+ }\n+ return 0\n+}\n+\n+// setSystemRegisters sets system registers.\n+func (c *vCPU) setSystemRegisters(sregs *systemRegs) error {\n+ if _, _, errno := syscall.RawSyscall(\n+ syscall.SYS_IOCTL,\n+ uintptr(c.fd),\n+ _KVM_SET_SREGS,\n+ uintptr(unsafe.Pointer(sregs))); errno != 0 {\n+ return fmt.Errorf(\"error setting system registers: %v\", errno)\n+ }\n+ return nil\n+}\n+\n// atomicAddressSpace is an atomic address space pointer.\ntype atomicAddressSpace struct {\npointer unsafe.Pointer\n" } ]
Go
Apache License 2.0
google/gvisor
kvm: add detailed traces on vCPU errors. This improves debuggability greatly. PiperOrigin-RevId: 219551560 Change-Id: I2ecaffdd1c17b0d9f25911538ea6f693e2bc699f
259,858
31.10.2018 15:58:21
25,200
fb613020c7db323c705adf6ae0f954bee4ab5fec
kvm: simplify floating point logic. This reduces the number of floating point save/restore cycles required (since we don't need to restore immediately following the switch, this always happens in a known context) and allows the kernel hooks to capture state. This lets us remove calls like "Current()".
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/bluepill_amd64.go", "new_path": "pkg/sentry/platform/kvm/bluepill_amd64.go", "diff": "@@ -47,8 +47,8 @@ func redpill() {\n// bluepillArchEnter is called during bluepillEnter.\n//\n//go:nosplit\n-func bluepillArchEnter(context *arch.SignalContext64) (c *vCPU) {\n- c = vCPUPtr(uintptr(context.Rax))\n+func bluepillArchEnter(context *arch.SignalContext64) *vCPU {\n+ c := vCPUPtr(uintptr(context.Rax))\nregs := c.CPU.Registers()\nregs.R8 = context.R8\nregs.R9 = context.R9\n@@ -73,50 +73,41 @@ func bluepillArchEnter(context *arch.SignalContext64) (c *vCPU) {\nregs.Cs = uint64(ring0.Kcode)\nregs.Ds = uint64(ring0.Udata)\nregs.Es = uint64(ring0.Udata)\n- regs.Fs = uint64(ring0.Udata)\nregs.Ss = uint64(ring0.Kdata)\n-\n- // ring0 uses GS exclusively, so we use GS_base to store the location\n- // of the floating point address.\n- //\n- // The address will be restored directly after running the VCPU, and\n- // will be saved again prior to halting. We rely on the fact that the\n- // SaveFloatingPointer/LoadFloatingPoint functions use the most\n- // efficient mechanism available (including compression) so the state\n- // size is guaranteed to be less than what's pointed to here.\n- regs.Gs_base = uint64(context.Fpstate)\n- return\n+ return c\n}\n-// bluepillSyscall handles kernel syscalls.\n+// KernelSyscall handles kernel syscalls.\n//\n//go:nosplit\n-func bluepillSyscall() {\n- regs := ring0.Current().Registers()\n+func (c *vCPU) KernelSyscall() {\n+ regs := c.Registers()\nif regs.Rax != ^uint64(0) {\nregs.Rip -= 2 // Rewind.\n}\n- ring0.SaveFloatingPoint(bytePtr(uintptr(regs.Gs_base)))\n+ // We only trigger a bluepill entry in the bluepill function, and can\n+ // therefore be guaranteed that there is no floating point state to be\n+ // loaded on resuming from halt. We only worry about saving on exit.\n+ ring0.SaveFloatingPoint((*byte)(c.floatingPointState))\nring0.Halt()\nring0.WriteFS(uintptr(regs.Fs_base)) // Reload host segment.\n- ring0.LoadFloatingPoint(bytePtr(uintptr(regs.Gs_base)))\n}\n-// bluepillException handles kernel exceptions.\n+// KernelException handles kernel exceptions.\n//\n//go:nosplit\n-func bluepillException(vector ring0.Vector) {\n- regs := ring0.Current().Registers()\n+func (c *vCPU) KernelException(vector ring0.Vector) {\n+ regs := c.Registers()\nif vector == ring0.Vector(bounce) {\n// These should not interrupt kernel execution; point the Rip\n// to zero to ensure that we get a reasonable panic when we\n- // attempt to return.\n+ // attempt to return and a full stack trace.\nregs.Rip = 0\n}\n- ring0.SaveFloatingPoint(bytePtr(uintptr(regs.Gs_base)))\n+ // See above.\n+ ring0.SaveFloatingPoint((*byte)(c.floatingPointState))\nring0.Halt()\nring0.WriteFS(uintptr(regs.Fs_base)) // Reload host segment.\n- ring0.LoadFloatingPoint(bytePtr(uintptr(regs.Gs_base)))\n}\n// bluepillArchExit is called during bluepillEnter.\n@@ -142,4 +133,9 @@ func bluepillArchExit(c *vCPU, context *arch.SignalContext64) {\ncontext.Rsp = regs.Rsp\ncontext.Rip = regs.Rip\ncontext.Eflags = regs.Eflags\n+\n+ // Set the context pointer to the saved floating point state. This is\n+ // where the guest data has been serialized, the kernel will restore\n+ // from this new pointer value.\n+ context.Fpstate = uint64(uintptrValue((*byte)(c.floatingPointState)))\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/bluepill_unsafe.go", "new_path": "pkg/sentry/platform/kvm/bluepill_unsafe.go", "diff": "@@ -37,6 +37,13 @@ func bytePtr(addr uintptr) *byte {\nreturn (*byte)(unsafe.Pointer(addr))\n}\n+// uintptrValue returns a uintptr for the given address.\n+//\n+//go:nosplit\n+func uintptrValue(addr *byte) uintptr {\n+ return (uintptr)(unsafe.Pointer(addr))\n+}\n+\n// bluepillHandler is called from the signal stub.\n//\n// The world may be stopped while this is executing, and it executes on the\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/machine.go", "new_path": "pkg/sentry/platform/kvm/machine.go", "diff": "@@ -142,9 +142,7 @@ func (m *machine) newVCPU() *vCPU {\nfd: int(fd),\nmachine: m,\n}\n- c.CPU.Init(&m.kernel)\n- c.CPU.KernelSyscall = bluepillSyscall\n- c.CPU.KernelException = bluepillException\n+ c.CPU.Init(&m.kernel, c)\nm.vCPUsByID[c.id] = c\n// Ensure the signal mask is correct.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/machine_amd64.go", "new_path": "pkg/sentry/platform/kvm/machine_amd64.go", "diff": "@@ -63,6 +63,10 @@ type vCPUArchState struct {\n//\n// This starts above fixedKernelPCID.\nPCIDs *pagetables.PCIDs\n+\n+ // floatingPointState is the floating point state buffer used in guest\n+ // to host transitions. See usage in bluepill_amd64.go.\n+ floatingPointState *arch.FloatingPointData\n}\nconst (\n@@ -149,6 +153,12 @@ func (c *vCPU) initArchState() error {\nreturn err\n}\n+ // Allocate some floating point state save area for the local vCPU.\n+ // This will be saved prior to leaving the guest, and we restore from\n+ // this always. We cannot use the pointer in the context alone because\n+ // we don't know how large the area there is in reality.\n+ c.floatingPointState = arch.NewFloatingPointData()\n+\n// Set the time offset to the host native time.\nreturn c.setSystemTime()\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ring0/defs.go", "new_path": "pkg/sentry/platform/ring0/defs.go", "diff": "@@ -38,6 +38,33 @@ type Kernel struct {\nKernelArchState\n}\n+// Hooks are hooks for kernel functions.\n+type Hooks interface {\n+ // KernelSyscall is called for kernel system calls.\n+ //\n+ // Return from this call will restore registers and return to the kernel: the\n+ // registers must be modified directly.\n+ //\n+ // If this function is not provided, a kernel exception results in halt.\n+ //\n+ // This must be go:nosplit, as this will be on the interrupt stack.\n+ // Closures are permitted, as the pointer to the closure frame is not\n+ // passed on the stack.\n+ KernelSyscall()\n+\n+ // KernelException handles an exception during kernel execution.\n+ //\n+ // Return from this call will restore registers and return to the kernel: the\n+ // registers must be modified directly.\n+ //\n+ // If this function is not provided, a kernel exception results in halt.\n+ //\n+ // This must be go:nosplit, as this will be on the interrupt stack.\n+ // Closures are permitted, as the pointer to the closure frame is not\n+ // passed on the stack.\n+ KernelException(Vector)\n+}\n+\n// CPU is the per-CPU struct.\ntype CPU struct {\n// self is a self reference.\n@@ -58,29 +85,8 @@ type CPU struct {\n// calls and exceptions via the Registers function.\nregisters syscall.PtraceRegs\n- // KernelException handles an exception during kernel execution.\n- //\n- // Return from this call will restore registers and return to the kernel: the\n- // registers must be modified directly.\n- //\n- // If this function is not provided, a kernel exception results in halt.\n- //\n- // This must be go:nosplit, as this will be on the interrupt stack.\n- // Closures are permitted, as the pointer to the closure frame is not\n- // passed on the stack.\n- KernelException func(Vector)\n-\n- // KernelSyscall is called for kernel system calls.\n- //\n- // Return from this call will restore registers and return to the kernel: the\n- // registers must be modified directly.\n- //\n- // If this function is not provided, a kernel exception results in halt.\n- //\n- // This must be go:nosplit, as this will be on the interrupt stack.\n- // Closures are permitted, as the pointer to the closure frame is not\n- // passed on the stack.\n- KernelSyscall func()\n+ // hooks are kernel hooks.\n+ hooks Hooks\n}\n// Registers returns a modifiable-copy of the kernel registers.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ring0/kernel.go", "new_path": "pkg/sentry/platform/ring0/kernel.go", "diff": "@@ -26,31 +26,41 @@ func (k *Kernel) Init(opts KernelOpts) {\n// Halt halts execution.\nfunc Halt()\n-// Current returns the current CPU.\n+// defaultHooks implements hooks.\n+type defaultHooks struct{}\n+\n+// KernelSyscall implements Hooks.KernelSyscall.\n+//\n+//go:nosplit\n+func (defaultHooks) KernelSyscall() { Halt() }\n+\n+// KernelException implements Hooks.KernelException.\n//\n-// Its use is only legal in the KernelSyscall and KernelException contexts,\n-// which must all be guarded go:nosplit.\n-func Current() *CPU\n+//go:nosplit\n+func (defaultHooks) KernelException(Vector) { Halt() }\n-// defaultSyscall is the default syscall hook.\n+// kernelSyscall is a trampoline.\n//\n//go:nosplit\n-func defaultSyscall() { Halt() }\n+func kernelSyscall(c *CPU) { c.hooks.KernelSyscall() }\n-// defaultException is the default exception hook.\n+// kernelException is a trampoline.\n//\n//go:nosplit\n-func defaultException(Vector) { Halt() }\n+func kernelException(c *CPU, vector Vector) { c.hooks.KernelException(vector) }\n// Init initializes a new CPU.\n//\n// Init allows embedding in other objects.\n-func (c *CPU) Init(k *Kernel) {\n+func (c *CPU) Init(k *Kernel, hooks Hooks) {\nc.self = c // Set self reference.\nc.kernel = k // Set kernel reference.\nc.init() // Perform architectural init.\n- // Defaults.\n- c.KernelSyscall = defaultSyscall\n- c.KernelException = defaultException\n+ // Require hooks.\n+ if hooks != nil {\n+ c.hooks = hooks\n+ } else {\n+ c.hooks = defaultHooks{}\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ring0/kernel_amd64.go", "new_path": "pkg/sentry/platform/ring0/kernel_amd64.go", "diff": "@@ -204,7 +204,7 @@ func (c *CPU) SwitchToUser(switchOpts SwitchOpts) (vector Vector) {\nfunc start(c *CPU) {\n// Save per-cpu & FS segment.\nWriteGS(kernelAddr(c))\n- WriteFS(uintptr(c.Registers().Fs_base))\n+ WriteFS(uintptr(c.registers.Fs_base))\n// Initialize floating point.\n//\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ring0/offsets_amd64.go", "new_path": "pkg/sentry/platform/ring0/offsets_amd64.go", "diff": "@@ -34,8 +34,6 @@ func Emit(w io.Writer) {\nfmt.Fprintf(w, \"#define CPU_STACK_TOP 0x%02x\\n\", reflect.ValueOf(&c.stack[0]).Pointer()-reflect.ValueOf(c).Pointer()+uintptr(len(c.stack)))\nfmt.Fprintf(w, \"#define CPU_ERROR_CODE 0x%02x\\n\", reflect.ValueOf(&c.errorCode).Pointer()-reflect.ValueOf(c).Pointer())\nfmt.Fprintf(w, \"#define CPU_ERROR_TYPE 0x%02x\\n\", reflect.ValueOf(&c.errorType).Pointer()-reflect.ValueOf(c).Pointer())\n- fmt.Fprintf(w, \"#define CPU_KERNEL_EXCEPTION 0x%02x\\n\", reflect.ValueOf(&c.KernelException).Pointer()-reflect.ValueOf(c).Pointer())\n- fmt.Fprintf(w, \"#define CPU_KERNEL_SYSCALL 0x%02x\\n\", reflect.ValueOf(&c.KernelSyscall).Pointer()-reflect.ValueOf(c).Pointer())\nfmt.Fprintf(w, \"\\n// Bits.\\n\")\nfmt.Fprintf(w, \"#define _RFLAGS_IF 0x%02x\\n\", _RFLAGS_IF)\n" } ]
Go
Apache License 2.0
google/gvisor
kvm: simplify floating point logic. This reduces the number of floating point save/restore cycles required (since we don't need to restore immediately following the switch, this always happens in a known context) and allows the kernel hooks to capture state. This lets us remove calls like "Current()". PiperOrigin-RevId: 219552844 Change-Id: I7676fa2f6c18b9919718458aa888b832a7db8cab
259,854
31.10.2018 18:21:11
25,200
eeddae1199d9b84ee84011be0019328724ebdcf3
Use syserr style error translation in netstack's rawfile Replacing map lookups with slice indexing is higher performance.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/rawfile/errors.go", "new_path": "pkg/tcpip/link/rawfile/errors.go", "diff": "package rawfile\nimport (\n+ \"fmt\"\n\"syscall\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n)\n-var translations = map[syscall.Errno]*tcpip.Error{\n- syscall.EEXIST: tcpip.ErrDuplicateAddress,\n- syscall.ENETUNREACH: tcpip.ErrNoRoute,\n- syscall.EINVAL: tcpip.ErrInvalidEndpointState,\n- syscall.EALREADY: tcpip.ErrAlreadyConnecting,\n- syscall.EISCONN: tcpip.ErrAlreadyConnected,\n- syscall.EADDRINUSE: tcpip.ErrPortInUse,\n- syscall.EADDRNOTAVAIL: tcpip.ErrBadLocalAddress,\n- syscall.EPIPE: tcpip.ErrClosedForSend,\n- syscall.EWOULDBLOCK: tcpip.ErrWouldBlock,\n- syscall.ECONNREFUSED: tcpip.ErrConnectionRefused,\n- syscall.ETIMEDOUT: tcpip.ErrTimeout,\n- syscall.EINPROGRESS: tcpip.ErrConnectStarted,\n- syscall.EDESTADDRREQ: tcpip.ErrDestinationRequired,\n- syscall.ENOTSUP: tcpip.ErrNotSupported,\n- syscall.ENOTTY: tcpip.ErrQueueSizeNotSupported,\n- syscall.ENOTCONN: tcpip.ErrNotConnected,\n- syscall.ECONNRESET: tcpip.ErrConnectionReset,\n- syscall.ECONNABORTED: tcpip.ErrConnectionAborted,\n- syscall.EMSGSIZE: tcpip.ErrMessageTooLong,\n- syscall.ENOBUFS: tcpip.ErrNoBufferSpace,\n-}\n+const maxErrno = 134\n+\n+var translations [maxErrno]*tcpip.Error\n// TranslateErrno translate an errno from the syscall package into a\n// *tcpip.Error.\n//\n-// Not all errnos are supported and this function will panic on unreconized\n-// errnos.\n+// Valid, but unreconigized errnos will be translated to\n+// tcpip.ErrInvalidEndpointState (EINVAL). Panics on invalid errnos.\nfunc TranslateErrno(e syscall.Errno) *tcpip.Error {\n- if err, ok := translations[e]; ok {\n+ if err := translations[e]; err != nil {\nreturn err\n}\nreturn tcpip.ErrInvalidEndpointState\n}\n+\n+func addTranslation(host syscall.Errno, trans *tcpip.Error) {\n+ if translations[host] != nil {\n+ panic(fmt.Sprintf(\"duplicate translation for host errno %q (%d)\", host.Error(), host))\n+ }\n+ translations[host] = trans\n+}\n+\n+func init() {\n+ addTranslation(syscall.EEXIST, tcpip.ErrDuplicateAddress)\n+ addTranslation(syscall.ENETUNREACH, tcpip.ErrNoRoute)\n+ addTranslation(syscall.EINVAL, tcpip.ErrInvalidEndpointState)\n+ addTranslation(syscall.EALREADY, tcpip.ErrAlreadyConnecting)\n+ addTranslation(syscall.EISCONN, tcpip.ErrAlreadyConnected)\n+ addTranslation(syscall.EADDRINUSE, tcpip.ErrPortInUse)\n+ addTranslation(syscall.EADDRNOTAVAIL, tcpip.ErrBadLocalAddress)\n+ addTranslation(syscall.EPIPE, tcpip.ErrClosedForSend)\n+ addTranslation(syscall.EWOULDBLOCK, tcpip.ErrWouldBlock)\n+ addTranslation(syscall.ECONNREFUSED, tcpip.ErrConnectionRefused)\n+ addTranslation(syscall.ETIMEDOUT, tcpip.ErrTimeout)\n+ addTranslation(syscall.EINPROGRESS, tcpip.ErrConnectStarted)\n+ addTranslation(syscall.EDESTADDRREQ, tcpip.ErrDestinationRequired)\n+ addTranslation(syscall.ENOTSUP, tcpip.ErrNotSupported)\n+ addTranslation(syscall.ENOTTY, tcpip.ErrQueueSizeNotSupported)\n+ addTranslation(syscall.ENOTCONN, tcpip.ErrNotConnected)\n+ addTranslation(syscall.ECONNRESET, tcpip.ErrConnectionReset)\n+ addTranslation(syscall.ECONNABORTED, tcpip.ErrConnectionAborted)\n+ addTranslation(syscall.EMSGSIZE, tcpip.ErrMessageTooLong)\n+ addTranslation(syscall.ENOBUFS, tcpip.ErrNoBufferSpace)\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Use syserr style error translation in netstack's rawfile Replacing map lookups with slice indexing is higher performance. PiperOrigin-RevId: 219569901 Change-Id: I9b7cd22abd4b95383025edbd5a80d1c1a4496936
259,854
31.10.2018 18:41:48
25,200
59b7766af7c78f330d09044e68bb195e495993ea
Fix a race where keepalives could be sent while there is pending data
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/BUILD", "new_path": "pkg/tcpip/transport/tcp/BUILD", "diff": "@@ -62,8 +62,6 @@ go_test(\n\"tcp_test.go\",\n\"tcp_timestamp_test.go\",\n],\n- # FIXME\n- tags = [\"flaky\"],\ndeps = [\n\":tcp\",\n\"//pkg/tcpip\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/connect.go", "new_path": "pkg/tcpip/transport/tcp/connect.go", "diff": "@@ -829,6 +829,13 @@ func (e *endpoint) resetKeepaliveTimer(receivedData bool) {\n}\n}\n+// disableKeepaliveTimer stops the keepalive timer.\n+func (e *endpoint) disableKeepaliveTimer() {\n+ e.keepalive.Lock()\n+ e.keepalive.timer.disable()\n+ e.keepalive.Unlock()\n+}\n+\n// protocolMainLoop is the main loop of the TCP protocol. It runs in its own\n// goroutine and is responsible for sending segments and handling received\n// segments.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/snd.go", "new_path": "pkg/tcpip/transport/tcp/snd.go", "diff": "@@ -405,6 +405,7 @@ func (s *sender) sendData() {\n// eventually.\nvar seg *segment\nend := s.sndUna.Add(s.sndWnd)\n+ var dataSent bool\nfor seg = s.writeNext; seg != nil && s.outstanding < s.sndCwnd; seg = seg.Next() {\n// We abuse the flags field to determine if we have already\n// assigned a sequence number to this segment.\n@@ -448,6 +449,12 @@ func (s *sender) sendData() {\nsegEnd = seg.sequenceNumber.Add(seqnum.Size(seg.data.Size()))\n}\n+ if !dataSent {\n+ dataSent = true\n+ // We are sending data, so we should stop the keepalive timer to\n+ // ensure that no keepalives are sent while there is pending data.\n+ s.ep.disableKeepaliveTimer()\n+ }\ns.sendSegment(seg.data, seg.flags, seg.sequenceNumber)\n// Update sndNxt if we actually sent new data (as opposed to\n" } ]
Go
Apache License 2.0
google/gvisor
Fix a race where keepalives could be sent while there is pending data PiperOrigin-RevId: 219571556 Change-Id: I5a1042c1cb05eb2711eb01627fd298bad6c543a6
259,891
01.11.2018 10:35:04
25,200
a4cc93c7bf40679e62a2b0eaa2419a4a9536cc14
Close http.Response.Body after Get request. From "When err is nil, resp always contains a non-nil resp.Body. Caller should close resp.Body when done reading from it."
[ { "change_type": "MODIFY", "old_path": "runsc/test/testutil/testutil.go", "new_path": "runsc/test/testutil/testutil.go", "diff": "@@ -230,9 +230,13 @@ func Poll(cb func() error, timeout time.Duration) error {\n// WaitForHTTP tries GET requests on a port until the call succeeds or timeout.\nfunc WaitForHTTP(port int, timeout time.Duration) error {\ncb := func() error {\n- _, err := http.Get(fmt.Sprintf(\"http://localhost:%d/\", port))\n+ resp, err := http.Get(fmt.Sprintf(\"http://localhost:%d/\", port))\n+ if err != nil {\nreturn err\n}\n+ resp.Body.Close()\n+ return nil\n+ }\nreturn Poll(cb, timeout)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Close http.Response.Body after Get request. From https://golang.org/pkg/net/http/#Get: "When err is nil, resp always contains a non-nil resp.Body. Caller should close resp.Body when done reading from it." PiperOrigin-RevId: 219658052 Change-Id: I556e88ac4f2c90cd36ab16cd3163d1a52afc32b7
259,968
01.11.2018 11:57:09
25,200
b23cd33682a9a8bd727fa45b8424eb55d91c3086
modify modeRegexp to adapt the default spec of containerd the mode=755 didn't match the pattern modeRegexp = regexp.MustCompile("0[0-7][0-7][0-7]"). Closes
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/tmpfs/fs.go", "new_path": "pkg/sentry/fs/tmpfs/fs.go", "diff": "@@ -44,7 +44,7 @@ const (\n)\n// modeRegexp is the expected format of the mode option.\n-var modeRegexp = regexp.MustCompile(\"0[0-7][0-7][0-7]\")\n+var modeRegexp = regexp.MustCompile(\"^0?[0-7][0-7][0-7]$\")\n// Filesystem is a tmpfs.\n//\n" } ]
Go
Apache License 2.0
google/gvisor
modify modeRegexp to adapt the default spec of containerd https://github.com/containerd/containerd/blob/master/oci/spec.go#L206, the mode=755 didn't match the pattern modeRegexp = regexp.MustCompile("0[0-7][0-7][0-7]"). Closes #112 Signed-off-by: Juan <[email protected]> Change-Id: I469e0a68160a1278e34c9e1dbe4b7784c6f97e5a PiperOrigin-RevId: 219672525
259,884
01.11.2018 17:39:20
25,200
9d69d85bc13d4f0956a39951b5cd6777f938cffd
Make error messages a bit more user friendly. Updated error messages so that it doesn't print full Go struct representations when running a new container in a sandbox. For example, this occurs frequently when commands are not found when doing a 'kubectl exec'.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/control/proc.go", "new_path": "pkg/sentry/control/proc.go", "diff": "@@ -19,6 +19,7 @@ import (\n\"encoding/json\"\n\"fmt\"\n\"sort\"\n+ \"strings\"\n\"text/tabwriter\"\n\"time\"\n@@ -88,6 +89,16 @@ type ExecArgs struct {\nContainerID string\n}\n+// String prints the arguments as a string.\n+func (args ExecArgs) String() string {\n+ a := make([]string, len(args.Argv))\n+ copy(a, args.Argv)\n+ if args.Filename != \"\" {\n+ a[0] = args.Filename\n+ }\n+ return strings.Join(a, \" \")\n+}\n+\n// Exec runs a new task.\nfunc (proc *Proc) Exec(args *ExecArgs, waitStatus *uint32) error {\nnewTG, _, _, err := proc.execAsync(args)\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -618,7 +618,7 @@ func (l *Loader) executeAsync(args *control.ExecArgs) (kernel.ThreadID, error) {\nep, ok := l.processes[rootKey]\nl.mu.Unlock()\nif !ok {\n- return 0, fmt.Errorf(\"cannot exec in container %q: no such container\", args.ContainerID)\n+ return 0, fmt.Errorf(\"no such container: %q\", args.ContainerID)\n}\nep.tg.Leader().WithMuLocked(func(t *kernel.Task) {\nargs.Root = t.FSContext().RootDirectory()\n@@ -631,7 +631,7 @@ func (l *Loader) executeAsync(args *control.ExecArgs) (kernel.ThreadID, error) {\nproc := control.Proc{Kernel: l.k}\ntg, tgid, ttyFile, err := control.ExecAsync(&proc, args)\nif err != nil {\n- return 0, fmt.Errorf(\"error executing: %+v: %v\", args, err)\n+ return 0, err\n}\n// Insert the process into processes so that we can wait on it\n" }, { "change_type": "MODIFY", "old_path": "runsc/sandbox/sandbox.go", "new_path": "runsc/sandbox/sandbox.go", "diff": "@@ -229,7 +229,7 @@ func (s *Sandbox) Execute(args *control.ExecArgs) (int32, error) {\n// Send a message to the sandbox control server to start the container.\nvar pid int32\nif err := conn.Call(boot.ContainerExecuteAsync, args, &pid); err != nil {\n- return 0, fmt.Errorf(\"error executing in sandbox: %v\", err)\n+ return 0, fmt.Errorf(\"error executing command %q in sandbox: %v\", args, err)\n}\nreturn pid, nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Make error messages a bit more user friendly. Updated error messages so that it doesn't print full Go struct representations when running a new container in a sandbox. For example, this occurs frequently when commands are not found when doing a 'kubectl exec'. PiperOrigin-RevId: 219729141 Change-Id: Ic3a7bc84cd7b2167f495d48a1da241d621d3ca09
259,992
01.11.2018 17:43:50
25,200
b6b81fd04ba93db3268ff649c9d23a25c9b89db5
Add new log format that is compatible with Kubernetes Fluentd configuration uses 'log' for the log message while containerd uses 'msg'. Since we can't have a single JSON format for both, add another log format and make debug log configurable.
[ { "change_type": "MODIFY", "old_path": "pkg/log/BUILD", "new_path": "pkg/log/BUILD", "diff": "@@ -8,6 +8,7 @@ go_library(\n\"glog.go\",\n\"glog_unsafe.go\",\n\"json.go\",\n+ \"json_k8s.go\",\n\"log.go\",\n],\nimportpath = \"gvisor.googlesource.com/gvisor/pkg/log\",\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/log/json_k8s.go", "diff": "+// Copyright 2018 Google LLC\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package log\n+\n+import (\n+ \"encoding/json\"\n+ \"fmt\"\n+ \"time\"\n+)\n+\n+type k8sJSONLog struct {\n+ Log string `json:\"log\"`\n+ Level Level `json:\"level\"`\n+ Time time.Time `json:\"time\"`\n+}\n+\n+// K8sJSONEmitter logs messages in json format that is compatible with\n+// Kubernetes fluent configuration.\n+type K8sJSONEmitter struct {\n+ Writer\n+}\n+\n+// Emit implements Emitter.Emit.\n+func (e K8sJSONEmitter) Emit(level Level, timestamp time.Time, format string, v ...interface{}) {\n+ j := k8sJSONLog{\n+ Log: fmt.Sprintf(format, v...),\n+ Level: level,\n+ Time: timestamp,\n+ }\n+ b, err := json.Marshal(j)\n+ if err != nil {\n+ panic(err)\n+ }\n+ e.Writer.Write(b)\n+}\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/compat.go", "new_path": "runsc/boot/compat.go", "diff": "@@ -66,7 +66,7 @@ func newCompatEmitter(logFD int) (*compatEmitter, error) {\nif logFD > 0 {\nf := os.NewFile(uintptr(logFD), \"user log file\")\n- target := log.MultiEmitter{c.sink, log.GoogleEmitter{&log.Writer{Next: f}}}\n+ target := log.MultiEmitter{c.sink, log.K8sJSONEmitter{log.Writer{Next: f}}}\nc.sink = &log.BasicLogger{Level: log.Info, Emitter: target}\n}\nreturn c, nil\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/config.go", "new_path": "runsc/boot/config.go", "diff": "@@ -157,12 +157,15 @@ type Config struct {\n// LogFilename is the filename to log to, if not empty.\nLogFilename string\n- // LogFormat is the log format, \"text\" or \"json\".\n+ // LogFormat is the log format.\nLogFormat string\n// DebugLog is the path to log debug information to, if not empty.\nDebugLog string\n+ // DebugLogFormat is the log format for debug.\n+ DebugLogFormat string\n+\n// FileAccess indicates how the filesystem is accessed.\nFileAccess FileAccessType\n@@ -214,6 +217,7 @@ func (c *Config) ToFlags() []string {\n\"--log=\" + c.LogFilename,\n\"--log-format=\" + c.LogFormat,\n\"--debug-log=\" + c.DebugLog,\n+ \"--debug-log-format=\" + c.DebugLogFormat,\n\"--file-access=\" + c.FileAccess.String(),\n\"--overlay=\" + strconv.FormatBool(c.Overlay),\n\"--network=\" + c.Network.String(),\n" }, { "change_type": "MODIFY", "old_path": "runsc/main.go", "new_path": "runsc/main.go", "diff": "@@ -38,7 +38,7 @@ var (\n// Docker, and thus should not be changed.\nrootDir = flag.String(\"root\", \"\", \"root directory for storage of container state\")\nlogFilename = flag.String(\"log\", \"\", \"file path where internal debug information is written, default is stdout\")\n- logFormat = flag.String(\"log-format\", \"text\", \"log format: text (default) or json\")\n+ logFormat = flag.String(\"log-format\", \"text\", \"log format: text (default), json, or json-k8s\")\ndebug = flag.Bool(\"debug\", false, \"enable debug logging\")\n// These flags are unique to runsc, and are used to configure parts of the\n@@ -49,6 +49,7 @@ var (\nlogPackets = flag.Bool(\"log-packets\", false, \"enable network packet logging\")\nlogFD = flag.Int(\"log-fd\", -1, \"file descriptor to log to. If set, the 'log' flag is ignored.\")\ndebugLogFD = flag.Int(\"debug-log-fd\", -1, \"file descriptor to write debug logs to. If set, the 'debug-log-dir' flag is ignored.\")\n+ debugLogFormat = flag.String(\"debug-log-format\", \"text\", \"log format: text (default), json, or json-k8s\")\n// Debugging flags: strace related\nstrace = flag.Bool(\"strace\", false, \"enable strace\")\n@@ -133,6 +134,7 @@ func main() {\nLogFilename: *logFilename,\nLogFormat: *logFormat,\nDebugLog: *debugLog,\n+ DebugLogFormat: *debugLogFormat,\nFileAccess: fsAccess,\nOverlay: *overlay,\nNetwork: netType,\n@@ -166,15 +168,7 @@ func main() {\nlogFile = f\n}\n- var e log.Emitter\n- switch *logFormat {\n- case \"text\":\n- e = log.GoogleEmitter{&log.Writer{Next: logFile}}\n- case \"json\":\n- e = log.JSONEmitter{log.Writer{Next: logFile}}\n- default:\n- cmd.Fatalf(\"invalid log format %q, must be 'json' or 'text'\", *logFormat)\n- }\n+ e := newEmitter(*logFormat, logFile)\nsubcommand := flag.CommandLine.Arg(0)\nif *debugLogFD > -1 {\n@@ -195,13 +189,13 @@ func main() {\ncmd.Fatalf(\"error dup'ing fd %d to stderr: %v\", f.Fd(), err)\n}\n- e = log.MultiEmitter{e, log.GoogleEmitter{&log.Writer{Next: f}}}\n+ e = log.MultiEmitter{e, newEmitter(*debugLogFormat, f)}\n} else if *debugLog != \"\" {\nf, err := specutils.DebugLogFile(*debugLog, subcommand)\nif err != nil {\ncmd.Fatalf(\"error opening debug log file in %q: %v\", *debugLog, err)\n}\n- e = log.MultiEmitter{e, log.GoogleEmitter{&log.Writer{Next: f}}}\n+ e = log.MultiEmitter{e, newEmitter(*debugLogFormat, f)}\n}\nlog.SetTarget(e)\n@@ -236,6 +230,19 @@ func main() {\nos.Exit(128)\n}\n+func newEmitter(format string, logFile io.Writer) log.Emitter {\n+ switch format {\n+ case \"text\":\n+ return &log.GoogleEmitter{&log.Writer{Next: logFile}}\n+ case \"json\":\n+ return &log.JSONEmitter{log.Writer{Next: logFile}}\n+ case \"json-k8s\":\n+ return &log.K8sJSONEmitter{log.Writer{Next: logFile}}\n+ }\n+ cmd.Fatalf(\"invalid log format %q, must be 'text', 'json', or 'json-k8s'\", format)\n+ panic(\"unreachable\")\n+}\n+\nfunc init() {\n// Set default root dir to something (hopefully) user-writeable.\n*rootDir = \"/var/run/runsc\"\n" } ]
Go
Apache License 2.0
google/gvisor
Add new log format that is compatible with Kubernetes Fluentd configuration uses 'log' for the log message while containerd uses 'msg'. Since we can't have a single JSON format for both, add another log format and make debug log configurable. PiperOrigin-RevId: 219729658 Change-Id: I2a6afc4034d893ab90bafc63b394c4fb62b2a7a0
259,992
01.11.2018 17:51:22
25,200
5cd55cd90fd5a32685807a57617cde6f5f76d22b
Use spec with clean paths for gofer Otherwise the gofer's attach point may be different from sandbox when there symlinks in the path.
[ { "change_type": "MODIFY", "old_path": "runsc/boot/loader_test.go", "new_path": "runsc/boot/loader_test.go", "diff": "@@ -77,8 +77,11 @@ func startGofer(root string) (int, func(), error) {\nsyscall.Close(goferEnd)\nreturn 0, nil, fmt.Errorf(\"error creating server on FD %d: %v\", goferEnd, err)\n}\n+ at, err := fsgofer.NewAttachPoint(root, fsgofer.Config{ROMount: true})\n+ if err != nil {\n+ return 0, nil, err\n+ }\ngo func() {\n- at := fsgofer.NewAttachPoint(root, fsgofer.Config{ROMount: true})\ns := p9.NewServer(at)\nif err := s.Handle(socket); err != nil {\nlog.Infof(\"Gofer is stopping. FD: %d, err: %v\\n\", goferEnd, err)\n" }, { "change_type": "MODIFY", "old_path": "runsc/cmd/gofer.go", "new_path": "runsc/cmd/gofer.go", "diff": "@@ -99,7 +99,12 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\npanic(\"unreachable\")\n}\n- spec, err := specutils.ReadSpec(g.bundleDir)\n+ specFile, err := specutils.OpenCleanSpec(g.bundleDir)\n+ if err != nil {\n+ Fatalf(\"error opening spec: %v\", err)\n+ }\n+ spec, err := specutils.ReadSpecFromFile(g.bundleDir, specFile)\n+ specFile.Close()\nif err != nil {\nFatalf(\"error reading spec: %v\", err)\n}\n@@ -121,10 +126,14 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n// Start with root mount, then add any other additional mount as needed.\nats := make([]p9.Attacher, 0, len(spec.Mounts)+1)\n- ats = append(ats, fsgofer.NewAttachPoint(\"/\", fsgofer.Config{\n+ ap, err := fsgofer.NewAttachPoint(\"/\", fsgofer.Config{\nROMount: spec.Root.Readonly,\nPanicOnWrite: g.panicOnWrite,\n- }))\n+ })\n+ if err != nil {\n+ Fatalf(\"Error creating attach point: %v\", err)\n+ }\n+ ats = append(ats, ap)\nlog.Infof(\"Serving %q mapped to %q on FD %d (ro: %t)\", \"/\", root, g.ioFDs[0], spec.Root.Readonly)\nmountIdx := 1 // first one is the root\n@@ -134,7 +143,11 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nROMount: isReadonlyMount(m.Options),\nPanicOnWrite: g.panicOnWrite,\n}\n- ats = append(ats, fsgofer.NewAttachPoint(m.Destination, cfg))\n+ ap, err := fsgofer.NewAttachPoint(m.Destination, cfg)\n+ if err != nil {\n+ Fatalf(\"Error creating attach point: %v\", err)\n+ }\n+ ats = append(ats, ap)\nif mountIdx >= len(g.ioFDs) {\nFatalf(\"No FD found for mount. Did you forget --io-fd? mount: %d, %v\", len(g.ioFDs), m)\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/container.go", "new_path": "runsc/container/container.go", "diff": "@@ -374,6 +374,9 @@ func (c *Container) Start(conf *boot.Config) error {\nreturn fmt.Errorf(\"setup mounts: %v\", err)\n}\nc.Spec.Mounts = cleanMounts\n+ if err := specutils.WriteCleanSpec(c.BundleDir, c.Spec); err != nil {\n+ return fmt.Errorf(\"writing clean spec: %v\", err)\n+ }\n// Create the gofer process.\nioFiles, err := c.createGoferProcess(c.Spec, conf, c.BundleDir)\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/fsgofer.go", "new_path": "runsc/fsgofer/fsgofer.go", "diff": "@@ -26,6 +26,7 @@ import (\n\"math\"\n\"os\"\n\"path\"\n+ \"path/filepath\"\n\"sync\"\n\"syscall\"\n@@ -99,24 +100,28 @@ type attachPoint struct {\n}\n// NewAttachPoint creates a new attacher that gives local file\n-// access to all files under 'prefix'.\n-func NewAttachPoint(prefix string, c Config) p9.Attacher {\n+// access to all files under 'prefix'. 'prefix' must be an absolute path.\n+func NewAttachPoint(prefix string, c Config) (p9.Attacher, error) {\n+ // Sanity check the prefix.\n+ if !filepath.IsAbs(prefix) {\n+ return nil, fmt.Errorf(\"attach point prefix must be absolute %q\", prefix)\n+ }\nreturn &attachPoint{\nprefix: prefix,\nconf: c,\ndevices: make(map[uint64]uint8),\n- }\n+ }, nil\n}\n// Attach implements p9.Attacher.\nfunc (a *attachPoint) Attach() (p9.File, error) {\n- // Sanity check the prefix.\n- fi, err := os.Stat(a.prefix)\n+ // dirFD (1st argument) is ignored because 'prefix' is always absolute.\n+ stat, err := statAt(-1, a.prefix)\nif err != nil {\n- return nil, err\n+ return nil, fmt.Errorf(\"stat file %q, err: %v\", a.prefix, err)\n}\nmode := os.O_RDWR\n- if a.conf.ROMount || fi.IsDir() {\n+ if a.conf.ROMount || stat.Mode&syscall.S_IFDIR != 0 {\nmode = os.O_RDONLY\n}\n@@ -125,11 +130,6 @@ func (a *attachPoint) Attach() (p9.File, error) {\nif err != nil {\nreturn nil, fmt.Errorf(\"unable to open file %q, err: %v\", a.prefix, err)\n}\n- stat, err := stat(int(f.Fd()))\n- if err != nil {\n- f.Close()\n- return nil, fmt.Errorf(\"failed to stat file %q, err: %v\", a.prefix, err)\n- }\na.attachedMu.Lock()\ndefer a.attachedMu.Unlock()\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/fsgofer_test.go", "new_path": "runsc/fsgofer/fsgofer_test.go", "diff": "@@ -80,7 +80,10 @@ func runCustom(t *testing.T, types []fileType, confs []Config, test func(*testin\n}\ndefer os.RemoveAll(path)\n- a := NewAttachPoint(path, c)\n+ a, err := NewAttachPoint(path, c)\n+ if err != nil {\n+ t.Fatalf(\"NewAttachPoint failed: %v\", err)\n+ }\nroot, err := a.Attach()\nif err != nil {\nt.Fatalf(\"Attach failed, err: %v\", err)\n@@ -107,7 +110,10 @@ func setup(ft fileType) (string, string, error) {\n}\n// First attach with writable configuration to setup tree.\n- a := NewAttachPoint(path, Config{})\n+ a, err := NewAttachPoint(path, Config{})\n+ if err != nil {\n+ return \"\", \"\", err\n+ }\nroot, err := a.Attach()\nif err != nil {\nreturn \"\", \"\", fmt.Errorf(\"Attach failed, err: %v\", err)\n@@ -556,7 +562,10 @@ func TestAttachFile(t *testing.T) {\nt.Fatalf(\"os.Create(%q) failed, err: %v\", path, err)\n}\n- a := NewAttachPoint(path, conf)\n+ a, err := NewAttachPoint(path, conf)\n+ if err != nil {\n+ t.Fatalf(\"NewAttachPoint failed: %v\", err)\n+ }\nroot, err := a.Attach()\nif err != nil {\nt.Fatalf(\"Attach failed, err: %v\", err)\n@@ -595,7 +604,10 @@ func TestDoubleAttachError(t *testing.T) {\nt.Fatalf(\"ioutil.TempDir() failed, err: %v\", err)\n}\ndefer os.RemoveAll(root)\n- a := NewAttachPoint(root, conf)\n+ a, err := NewAttachPoint(root, conf)\n+ if err != nil {\n+ t.Fatalf(\"NewAttachPoint failed: %v\", err)\n+ }\nif _, err := a.Attach(); err != nil {\nt.Fatalf(\"Attach failed: %v\", err)\n" } ]
Go
Apache License 2.0
google/gvisor
Use spec with clean paths for gofer Otherwise the gofer's attach point may be different from sandbox when there symlinks in the path. PiperOrigin-RevId: 219730492 Change-Id: Ia9c4c2d16228c6a1a9e790e0cb673fd881003fe1
259,891
01.11.2018 18:28:12
25,200
704b56a40d0a041a4e6f814c3dbb1f9ec15f9002
First crictl integration tests. More tests will come, but it's worth getting what's done so far reviewed.
[ { "change_type": "MODIFY", "old_path": "kokoro/run_tests.sh", "new_path": "kokoro/run_tests.sh", "diff": "@@ -80,7 +80,7 @@ installCrictl() (\nchmod +x ${shim_path}\nsudo -n -E mv ${shim_path} /usr/local/bin\n- # Configure containerd.\n+ # Configure containerd-shim.\nlocal shim_config_path=/etc/containerd\nlocal shim_config_tmp_path=/tmp/gvisor-containerd-shim.toml\nsudo -n -E mkdir -p ${shim_config_path}\n@@ -89,11 +89,14 @@ installCrictl() (\n[runsc_config]\ndebug = \"true\"\n- debug-log = \"/tmp/runsc-log/\"\n+ debug-log = \"/tmp/runsc-logs/\"\nstrace = \"true\"\nfile-access = \"shared\"\nEOF\nsudo mv ${shim_config_tmp_path} ${shim_config_path}\n+\n+ # Configure CNI.\n+ sudo -n -E env PATH=${PATH} ${GOPATH}/src/github.com/containerd/containerd/script/setup/install-cni\n)\n# Install containerd and crictl.\n@@ -128,7 +131,7 @@ if [[ ${exit_code} -eq 0 ]]; then\necho \"root_test executable not found\"\nexit 1\nfi\n- sudo -n -E RUNSC_RUNTIME=${runtime} ${root_test}\n+ sudo -n -E RUNSC_RUNTIME=${runtime} RUNSC_EXEC=/tmp/${runtime}/runsc ${root_test}\nexit_code=${?}\nfi\n" }, { "change_type": "MODIFY", "old_path": "runsc/test/root/BUILD", "new_path": "runsc/test/root/BUILD", "diff": "@@ -14,6 +14,7 @@ go_test(\nsrcs = [\n\"cgroup_test.go\",\n\"chroot_test.go\",\n+ \"crictl_test.go\",\n],\nembed = [\":root\"],\ntags = [\n@@ -24,6 +25,7 @@ go_test(\n],\ndeps = [\n\"//runsc/specutils\",\n+ \"//runsc/test/root/testdata\",\n\"//runsc/test/testutil\",\n\"@com_github_syndtr_gocapability//capability:go_default_library\",\n],\n" }, { "change_type": "MODIFY", "old_path": "runsc/test/root/chroot_test.go", "new_path": "runsc/test/root/chroot_test.go", "diff": "// limitations under the License.\n// Package root is used for tests that requires sysadmin privileges run. First,\n-// follow the setup instruction in runsc/test/README.md. To run these test:\n+// follow the setup instruction in runsc/test/README.md. To run these tests:\n//\n// bazel build //runsc/test/root:root_test\n// root_test=$(find -L ./bazel-bin/ -executable -type f -name root_test | grep __main__)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "runsc/test/root/crictl_test.go", "diff": "+// Copyright 2018 Google LLC\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package root\n+\n+import (\n+ \"bytes\"\n+ \"fmt\"\n+ \"io\"\n+ \"io/ioutil\"\n+ \"log\"\n+ \"net/http\"\n+ \"os\"\n+ \"os/exec\"\n+ \"path\"\n+ \"path/filepath\"\n+ \"testing\"\n+ \"time\"\n+\n+ \"gvisor.googlesource.com/gvisor/runsc/specutils\"\n+ \"gvisor.googlesource.com/gvisor/runsc/test/root/testdata\"\n+ \"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n+)\n+\n+// Tests for crictl have to be run as root (rather than in a user namespace)\n+// because crictl creates named network namespaces in /var/run/netns/.\n+func TestCrictlSanity(t *testing.T) {\n+ // Setup containerd and crictl.\n+ crictl, cleanup, err := setup(t)\n+ if err != nil {\n+ t.Fatalf(\"failed to setup crictl: %v\", err)\n+ }\n+ defer cleanup()\n+ podID, contID, err := crictl.StartPodAndContainer(\"httpd\", testdata.Sandbox, testdata.Httpd)\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+\n+ // Look for the httpd page.\n+ if err = httpGet(crictl, podID, \"index.html\"); err != nil {\n+ t.Fatalf(\"failed to get page: %v\", err)\n+ }\n+\n+ // Stop everything.\n+ if err := crictl.StopPodAndContainer(podID, contID); err != nil {\n+ t.Fatal(err)\n+ }\n+}\n+func TestMountPaths(t *testing.T) {\n+ // Setup containerd and crictl.\n+ crictl, cleanup, err := setup(t)\n+ if err != nil {\n+ t.Fatalf(\"failed to setup crictl: %v\", err)\n+ }\n+ defer cleanup()\n+ podID, contID, err := crictl.StartPodAndContainer(\"httpd\", testdata.Sandbox, testdata.HttpdMountPaths)\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+\n+ // Look for the directory available at /test.\n+ if err = httpGet(crictl, podID, \"test\"); err != nil {\n+ t.Fatalf(\"failed to get page: %v\", err)\n+ }\n+\n+ // Stop everything.\n+ if err := crictl.StopPodAndContainer(podID, contID); err != nil {\n+ t.Fatal(err)\n+ }\n+}\n+\n+// setup sets up before a test. Specifically it:\n+// * Creates directories and a socket for containerd to utilize.\n+// * Runs containerd and waits for it to reach a \"ready\" state for testing.\n+// * Returns a cleanup function that should be called at the end of the test.\n+func setup(t *testing.T) (*testutil.Crictl, func(), error) {\n+ var cleanups []func()\n+ cleanupFunc := func() {\n+ for i := len(cleanups) - 1; i >= 0; i-- {\n+ cleanups[i]()\n+ }\n+ }\n+ cleanup := specutils.MakeCleanup(cleanupFunc)\n+ defer cleanup.Clean()\n+\n+ // Create temporary containerd root and state directories, and a socket\n+ // via which crictl and containerd communicate.\n+ containerdRoot, err := ioutil.TempDir(testutil.TmpDir(), \"containerd-root\")\n+ if err != nil {\n+ t.Fatalf(\"failed to create containerd root: %v\", err)\n+ }\n+ cleanups = append(cleanups, func() { os.RemoveAll(containerdRoot) })\n+ containerdState, err := ioutil.TempDir(testutil.TmpDir(), \"containerd-state\")\n+ if err != nil {\n+ t.Fatalf(\"failed to create containerd state: %v\", err)\n+ }\n+ cleanups = append(cleanups, func() { os.RemoveAll(containerdState) })\n+ sockAddr := filepath.Join(testutil.TmpDir(), \"containerd-test.sock\")\n+\n+ // Start containerd.\n+ config, err := testutil.WriteTmpFile(\"containerd-config\", testdata.ContainerdConfig(getRunsc()))\n+ if err != nil {\n+ t.Fatalf(\"failed to write containerd config\")\n+ }\n+ cleanups = append(cleanups, func() { os.RemoveAll(config) })\n+ containerd := exec.Command(getContainerd(),\n+ \"--config\", config,\n+ \"--log-level\", \"debug\",\n+ \"--root\", containerdRoot,\n+ \"--state\", containerdState,\n+ \"--address\", sockAddr)\n+ cleanups = append(cleanups, func() {\n+ if err := testutil.KillCommand(containerd); err != nil {\n+ log.Printf(\"error killing containerd: %v\", err)\n+ }\n+ })\n+ containerdStderr, err := containerd.StderrPipe()\n+ if err != nil {\n+ t.Fatalf(\"failed to get containerd stderr: %v\", err)\n+ }\n+ containerdStdout, err := containerd.StdoutPipe()\n+ if err != nil {\n+ t.Fatalf(\"failed to get containerd stdout: %v\", err)\n+ }\n+ if err := containerd.Start(); err != nil {\n+ t.Fatalf(\"failed running containerd: %v\", err)\n+ }\n+\n+ // Wait for containerd to boot. Then put all containerd output into a\n+ // buffer to be logged at the end of the test.\n+ testutil.WaitUntilRead(containerdStderr, \"Start streaming server\", nil, 10*time.Second)\n+ stdoutBuf := &bytes.Buffer{}\n+ stderrBuf := &bytes.Buffer{}\n+ go func() { io.Copy(stdoutBuf, containerdStdout) }()\n+ go func() { io.Copy(stderrBuf, containerdStderr) }()\n+ cleanups = append(cleanups, func() {\n+ t.Logf(\"containerd stdout: %s\", string(stdoutBuf.Bytes()))\n+ t.Logf(\"containerd stderr: %s\", string(stderrBuf.Bytes()))\n+ })\n+\n+ cleanup.Release()\n+ return testutil.NewCrictl(20*time.Second, sockAddr), cleanupFunc, nil\n+}\n+\n+// httpGet GETs the contents of a file served from a pod on port 80.\n+func httpGet(crictl *testutil.Crictl, podID, filePath string) error {\n+ // Get the IP of the httpd server.\n+ ip, err := crictl.PodIP(podID)\n+ if err != nil {\n+ return fmt.Errorf(\"failed to get IP from pod %q: %v\", podID, err)\n+ }\n+\n+ // GET the page. We may be waiting for the server to start, so retry\n+ // with a timeout.\n+ var resp *http.Response\n+ cb := func() error {\n+ r, err := http.Get(fmt.Sprintf(\"http://%s\", path.Join(ip, filePath)))\n+ resp = r\n+ return err\n+ }\n+ if err := testutil.Poll(cb, 20*time.Second); err != nil {\n+ return err\n+ }\n+ defer resp.Body.Close()\n+\n+ if resp.StatusCode != 200 {\n+ return fmt.Errorf(\"bad status returned: %d\", resp.StatusCode)\n+ }\n+ return nil\n+}\n+\n+func getContainerd() string {\n+ // Bazel doesn't pass PATH through, assume the location of containerd\n+ // unless specified by environment variable.\n+ c := os.Getenv(\"CONTAINERD_PATH\")\n+ if c == \"\" {\n+ return \"/usr/local/bin/containerd\"\n+ }\n+ return c\n+}\n+\n+func getRunsc() string {\n+ // Bazel doesn't pass PATH through, assume the location of runsc unless\n+ // specified by environment variable.\n+ c := os.Getenv(\"RUNSC_EXEC\")\n+ if c == \"\" {\n+ return \"/tmp/runsc-test/runsc\"\n+ }\n+ return c\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "runsc/test/root/testdata/BUILD", "diff": "+load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\")\n+\n+package(licenses = [\"notice\"]) # Apache 2.0\n+\n+go_library(\n+ name = \"testdata\",\n+ srcs = [\n+ \"containerd_config.go\",\n+ \"httpd.go\",\n+ \"httpd_mount_paths.go\",\n+ \"sandbox.go\",\n+ ],\n+ importpath = \"gvisor.googlesource.com/gvisor/runsc/test/root/testdata\",\n+ visibility = [\n+ \"//visibility:public\",\n+ ],\n+)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "runsc/test/root/testdata/containerd_config.go", "diff": "+// Copyright 2018 Google LLC\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package testdata contains data required for root tests.\n+package testdata\n+\n+import \"fmt\"\n+\n+// containerdConfigTemplate is a .toml config for containerd. It contains a\n+// formatting verb so the runtime field can be set via fmt.Sprintf.\n+const containerdConfigTemplate = `\n+disabled_plugins = [\"restart\"]\n+[plugins.linux]\n+ runtime = \"%s\"\n+ runtime_root = \"/tmp/test-containerd/runsc\"\n+ shim = \"/usr/local/bin/gvisor-containerd-shim\"\n+ shim_debug = true\n+\n+[plugins.cri.containerd.runtimes.runsc]\n+ runtime_type = \"io.containerd.runtime.v1.linux\"\n+ runtime_engine = \"%s\"\n+`\n+\n+// ContainerdConfig returns a containerd config file with the specified\n+// runtime.\n+func ContainerdConfig(runtime string) string {\n+ return fmt.Sprintf(containerdConfigTemplate, runtime, runtime)\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "runsc/test/root/testdata/httpd.go", "diff": "+// Copyright 2018 Google LLC\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package testdata\n+\n+// Httpd is a JSON config for an httpd container.\n+const Httpd = `\n+{\n+ \"metadata\": {\n+ \"name\": \"httpd\"\n+ },\n+ \"image\":{\n+ \"image\": \"httpd\"\n+ },\n+ \"mounts\": [\n+ ],\n+ \"linux\": {\n+ },\n+ \"log_path\": \"httpd.log\"\n+}\n+`\n" }, { "change_type": "ADD", "old_path": null, "new_path": "runsc/test/root/testdata/httpd_mount_paths.go", "diff": "+// Copyright 2018 Google LLC\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package testdata\n+\n+// HttpdMountPaths is a JSON config for an httpd container with additional\n+// mounts.\n+const HttpdMountPaths = `\n+{\n+ \"metadata\": {\n+ \"name\": \"httpd\"\n+ },\n+ \"image\":{\n+ \"image\": \"httpd\"\n+ },\n+ \"mounts\": [\n+ {\n+ \"container_path\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n+ \"host_path\": \"/var/lib/kubelet/pods/82bae206-cdf5-11e8-b245-8cdcd43ac064/volumes/kubernetes.io~secret/default-token-2rpfx\",\n+ \"readonly\": true\n+ },\n+ {\n+ \"container_path\": \"/etc/hosts\",\n+ \"host_path\": \"/var/lib/kubelet/pods/82bae206-cdf5-11e8-b245-8cdcd43ac064/etc-hosts\",\n+ \"readonly\": false\n+ },\n+ {\n+ \"container_path\": \"/dev/termination-log\",\n+ \"host_path\": \"/var/lib/kubelet/pods/82bae206-cdf5-11e8-b245-8cdcd43ac064/containers/httpd/d1709580\",\n+ \"readonly\": false\n+ },\n+ {\n+ \"container_path\": \"/usr/local/apache2/htdocs/test\",\n+ \"host_path\": \"/var/lib/kubelet/pods/82bae206-cdf5-11e8-b245-8cdcd43ac064\",\n+ \"readonly\": true\n+ }\n+ ],\n+ \"linux\": {\n+ },\n+ \"log_path\": \"httpd.log\"\n+}\n+`\n" }, { "change_type": "ADD", "old_path": null, "new_path": "runsc/test/root/testdata/sandbox.go", "diff": "+// Copyright 2018 Google LLC\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package testdata\n+\n+// Sandbox is a default JSON config for a sandbox.\n+const Sandbox = `\n+{\n+ \"metadata\": {\n+ \"name\": \"default-sandbox\",\n+ \"namespace\": \"default\",\n+ \"attempt\": 1,\n+ \"uid\": \"hdishd83djaidwnduwk28bcsb\"\n+ },\n+ \"linux\": {\n+ },\n+ \"log_directory\": \"/tmp\"\n+}\n+`\n" }, { "change_type": "MODIFY", "old_path": "runsc/test/testutil/BUILD", "new_path": "runsc/test/testutil/BUILD", "diff": "@@ -5,6 +5,7 @@ package(licenses = [\"notice\"]) # Apache 2.0\ngo_library(\nname = \"testutil\",\nsrcs = [\n+ \"crictl.go\",\n\"docker.go\",\n\"testutil.go\",\n\"testutil_race.go\",\n" }, { "change_type": "ADD", "old_path": null, "new_path": "runsc/test/testutil/crictl.go", "diff": "+// Copyright 2018 Google LLC\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package testutil\n+\n+import (\n+ \"encoding/json\"\n+ \"fmt\"\n+ \"os\"\n+ \"os/exec\"\n+ \"strings\"\n+ \"time\"\n+)\n+\n+const endpointPrefix = \"unix://\"\n+\n+// Crictl contains information required to run the crictl utility.\n+type Crictl struct {\n+ executable string\n+ timeout time.Duration\n+ imageEndpoint string\n+ runtimeEndpoint string\n+}\n+\n+// NewCrictl returns a Crictl configured with a timeout and an endpoint over\n+// which it will talk to containerd.\n+func NewCrictl(timeout time.Duration, endpoint string) *Crictl {\n+ // Bazel doesn't pass PATH through, assume the location of crictl\n+ // unless specified by environment variable.\n+ executable := os.Getenv(\"CRICTL_PATH\")\n+ if executable == \"\" {\n+ executable = \"/usr/local/bin/crictl\"\n+ }\n+ return &Crictl{\n+ executable: executable,\n+ timeout: timeout,\n+ imageEndpoint: endpointPrefix + endpoint,\n+ runtimeEndpoint: endpointPrefix + endpoint,\n+ }\n+}\n+\n+// Pull pulls an container image. It corresponds to `crictl pull`.\n+func (cc *Crictl) Pull(imageName string) error {\n+ _, err := cc.run(\"pull\", imageName)\n+ return err\n+}\n+\n+// RunPod creates a sandbox. It corresponds to `crictl runp`.\n+func (cc *Crictl) RunPod(sbSpecFile string) (string, error) {\n+ podID, err := cc.run(\"runp\", sbSpecFile)\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"runp failed: %v\", err)\n+ }\n+ // Strip the trailing newline from crictl output.\n+ return strings.TrimSpace(podID), nil\n+}\n+\n+// Create creates a container within a sandbox. It corresponds to `crictl\n+// create`.\n+func (cc *Crictl) Create(podID, contSpecFile, sbSpecFile string) (string, error) {\n+ podID, err := cc.run(\"create\", podID, contSpecFile, sbSpecFile)\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"create failed: %v\", err)\n+ }\n+ // Strip the trailing newline from crictl output.\n+ return strings.TrimSpace(podID), nil\n+}\n+\n+// Start starts a container. It corresponds to `crictl start`.\n+func (cc *Crictl) Start(contID string) (string, error) {\n+ output, err := cc.run(\"start\", contID)\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"start failed: %v\", err)\n+ }\n+ return output, nil\n+}\n+\n+// Stop stops a container. It corresponds to `crictl stop`.\n+func (cc *Crictl) Stop(contID string) error {\n+ _, err := cc.run(\"stop\", contID)\n+ return err\n+}\n+\n+// Rm removes a container. It corresponds to `crictl rm`.\n+func (cc *Crictl) Rm(contID string) error {\n+ _, err := cc.run(\"rm\", contID)\n+ return err\n+}\n+\n+// StopPod stops a pod. It corresponds to `crictl stopp`.\n+func (cc *Crictl) StopPod(podID string) error {\n+ _, err := cc.run(\"stopp\", podID)\n+ return err\n+}\n+\n+// containsConfig is a minimal copy of\n+// https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/apis/cri/runtime/v1alpha2/api.proto\n+// It only contains fields needed for testing.\n+type containerConfig struct {\n+ Status containerStatus\n+}\n+\n+type containerStatus struct {\n+ Network containerNetwork\n+}\n+\n+type containerNetwork struct {\n+ IP string\n+}\n+\n+// PodIP returns a pod's IP address.\n+func (cc *Crictl) PodIP(podID string) (string, error) {\n+ output, err := cc.run(\"inspectp\", podID)\n+ if err != nil {\n+ return \"\", err\n+ }\n+ conf := &containerConfig{}\n+ if err := json.Unmarshal([]byte(output), conf); err != nil {\n+ return \"\", fmt.Errorf(\"failed to unmarshal JSON: %v, %s\", err, output)\n+ }\n+ if conf.Status.Network.IP == \"\" {\n+ return \"\", fmt.Errorf(\"no IP found in config: %s\", output)\n+ }\n+ return conf.Status.Network.IP, nil\n+}\n+\n+// RmPod removes a container. It corresponds to `crictl rmp`.\n+func (cc *Crictl) RmPod(podID string) error {\n+ _, err := cc.run(\"rmp\", podID)\n+ return err\n+}\n+\n+// StartPodAndContainer pulls an image, then starts a sandbox and container in\n+// that sandbox. It returns the pod ID and container ID.\n+func (cc *Crictl) StartPodAndContainer(image, sbSpec, contSpec string) (string, string, error) {\n+ if err := cc.Pull(image); err != nil {\n+ return \"\", \"\", fmt.Errorf(\"failed to pull %s: %v\", image, err)\n+ }\n+\n+ // Write the specs to files that can be read by crictl.\n+ sbSpecFile, err := WriteTmpFile(\"sbSpec\", sbSpec)\n+ if err != nil {\n+ return \"\", \"\", fmt.Errorf(\"failed to write sandbox spec: %v\", err)\n+ }\n+ contSpecFile, err := WriteTmpFile(\"contSpec\", contSpec)\n+ if err != nil {\n+ return \"\", \"\", fmt.Errorf(\"failed to write container spec: %v\", err)\n+ }\n+\n+ podID, err := cc.RunPod(sbSpecFile)\n+ if err != nil {\n+ return \"\", \"\", err\n+ }\n+\n+ contID, err := cc.Create(podID, contSpecFile, sbSpecFile)\n+ if err != nil {\n+ return \"\", \"\", fmt.Errorf(\"failed to create container in pod %q: %v\", podID, err)\n+ }\n+\n+ if _, err := cc.Start(contID); err != nil {\n+ return \"\", \"\", fmt.Errorf(\"failed to start container %q in pod %q: %v\", contID, podID, err)\n+ }\n+\n+ return podID, contID, nil\n+}\n+\n+// StopPodAndContainer stops a container and pod.\n+func (cc *Crictl) StopPodAndContainer(podID, contID string) error {\n+ if err := cc.Stop(contID); err != nil {\n+ return fmt.Errorf(\"failed to stop container %q in pod %q: %v\", contID, podID, err)\n+ }\n+\n+ if err := cc.Rm(contID); err != nil {\n+ return fmt.Errorf(\"failed to remove container %q in pod %q: %v\", contID, podID, err)\n+ }\n+\n+ if err := cc.StopPod(podID); err != nil {\n+ return fmt.Errorf(\"failed to stop pod %q: %v\", podID, err)\n+ }\n+\n+ if err := cc.RmPod(podID); err != nil {\n+ return fmt.Errorf(\"failed to remove pod %q: %v\", podID, err)\n+ }\n+\n+ return nil\n+}\n+\n+// run runs crictl with the given args and returns an error if it takes longer\n+// than cc.Timeout to run.\n+func (cc *Crictl) run(args ...string) (string, error) {\n+ defaultArgs := []string{\n+ \"--image-endpoint\", cc.imageEndpoint,\n+ \"--runtime-endpoint\", cc.runtimeEndpoint,\n+ }\n+ cmd := exec.Command(cc.executable, append(defaultArgs, args...)...)\n+\n+ // Run the command with a timeout.\n+ done := make(chan string)\n+ errCh := make(chan error)\n+ go func() {\n+ output, err := cmd.CombinedOutput()\n+ if err != nil {\n+ errCh <- fmt.Errorf(\"error: \\\"%v\\\", output: %s\", err, string(output))\n+ }\n+ done <- string(output)\n+ }()\n+ select {\n+ case output := <-done:\n+ return output, nil\n+ case err := <-errCh:\n+ return \"\", err\n+ case <-time.After(cc.timeout):\n+ if err := KillCommand(cmd); err != nil {\n+ return \"\", fmt.Errorf(\"timed out, then couldn't kill process %+v: %v\", cmd, err)\n+ }\n+ return \"\", fmt.Errorf(\"timed out: %+v\", cmd)\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "runsc/test/testutil/testutil.go", "new_path": "runsc/test/testutil/testutil.go", "diff": "@@ -72,7 +72,7 @@ func FindFile(path string) (string, error) {\n}\n// The test root is demarcated by a path element called \"__main__\". Search for\n- // it backwards from the in the working directory.\n+ // it backwards from the working directory.\nroot := wd\nfor {\ndir, name := filepath.Split(root)\n@@ -242,7 +242,7 @@ func WaitForHTTP(port int, timeout time.Duration) error {\n// RunAsRoot ensures the test runs with CAP_SYS_ADMIN and CAP_SYS_CHROOT. If\n// needed it will create a new user namespace and re-execute the test as root\n-// inside of the namespace. This functionr returns when it's running as root. If\n+// inside of the namespace. This function returns when it's running as root. If\n// it needs to create another process, it will exit from there and not return.\nfunc RunAsRoot() {\nif specutils.HasCapabilities(capability.CAP_SYS_ADMIN, capability.CAP_SYS_CHROOT) {\n@@ -288,7 +288,7 @@ func RunAsRoot() {\nos.Exit(0)\n}\n-// StartReaper starts a gorouting that will reap all children processes created\n+// StartReaper starts a goroutine that will reap all children processes created\n// by the tests. Caller must call the returned function to stop it.\nfunc StartReaper() func() {\nch := make(chan os.Signal, 1)\n@@ -356,3 +356,32 @@ func WaitUntilRead(r io.Reader, want string, split bufio.SplitFunc, timeout time\nreturn nil\n}\n}\n+\n+// KillCommand kills the process running cmd unless it hasn't been started. It\n+// returns an error if it cannot kill the process unless the reason is that the\n+// process has already exited.\n+func KillCommand(cmd *exec.Cmd) error {\n+ if cmd.Process == nil {\n+ return nil\n+ }\n+ if err := cmd.Process.Kill(); err != nil {\n+ if !strings.Contains(err.Error(), \"process already finished\") {\n+ return fmt.Errorf(\"failed to kill process %v: %v\", cmd, err)\n+ }\n+ }\n+ return nil\n+}\n+\n+// WriteTmpFile writes text to a temporary file, closes the file, and returns\n+// the name of the file.\n+func WriteTmpFile(pattern, text string) (string, error) {\n+ file, err := ioutil.TempFile(TmpDir(), pattern)\n+ if err != nil {\n+ return \"\", err\n+ }\n+ defer file.Close()\n+ if _, err := file.Write([]byte(text)); err != nil {\n+ return \"\", err\n+ }\n+ return file.Name(), nil\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
First crictl integration tests. More tests will come, but it's worth getting what's done so far reviewed. PiperOrigin-RevId: 219734531 Change-Id: If15ca6e6855e3d1cc28c83b5f9c3a72cb65b2e59
259,854
05.11.2018 17:22:41
28,800
95722dc4dd00b5efc182a05605e6e460383e618e
Use correct company name in copyright header These files were added with the wrong name after all of the existing files were corrected.
[ { "change_type": "MODIFY", "old_path": "pkg/p9/buffer_test.go", "new_path": "pkg/p9/buffer_test.go", "diff": "-// Copyright 2018 Google Inc.\n+// Copyright 2018 Google LLC\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n" }, { "change_type": "MODIFY", "old_path": "pkg/p9/p9test/p9test.go", "new_path": "pkg/p9/p9test/p9test.go", "diff": "-// Copyright 2018 Google Inc.\n+// Copyright 2018 Google LLC\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n" }, { "change_type": "MODIFY", "old_path": "pkg/p9/path_tree.go", "new_path": "pkg/p9/path_tree.go", "diff": "-// Copyright 2018 Google Inc.\n+// Copyright 2018 Google LLC\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n" } ]
Go
Apache License 2.0
google/gvisor
Use correct company name in copyright header These files were added with the wrong name after all of the existing files were corrected. PiperOrigin-RevId: 220202068 Change-Id: Ia0d15233c1aa69330356a7cf16b5aa00d978e09c
259,992
05.11.2018 17:41:22
28,800
a467f092616122f1f718df2a375ba66e97997594
Log when external signal is received
[ { "change_type": "MODIFY", "old_path": "runsc/boot/controller.go", "new_path": "runsc/boot/controller.go", "diff": "@@ -445,6 +445,18 @@ const (\nDeliverToForegroundProcessGroup\n)\n+func (s SignalDeliveryMode) String() string {\n+ switch s {\n+ case DeliverToProcess:\n+ return \"Process\"\n+ case DeliverToAllProcesses:\n+ return \"All\"\n+ case DeliverToForegroundProcessGroup:\n+ return \"Foreground Process Group\"\n+ }\n+ return fmt.Sprintf(\"unknown signal delivery mode: %d\", s)\n+}\n+\n// SignalArgs are arguments to the Signal method.\ntype SignalArgs struct {\n// CID is the container ID.\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -336,6 +336,7 @@ func New(args Args) (*Loader, error) {\n// properly.\ndeliveryMode = DeliverToForegroundProcessGroup\n}\n+ log.Infof(\"Received external signal %d, mode: %v\", sig, deliveryMode)\nif err := l.signal(args.ID, 0, int32(sig), deliveryMode); err != nil {\nlog.Warningf(\"error sending signal %v to container %q: %v\", sig, args.ID, err)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Log when external signal is received PiperOrigin-RevId: 220204591 Change-Id: I21a9c6f5c12a376d18da5d10c1871837c4f49ad2
259,881
06.11.2018 13:11:56
28,800
6ae1c90b891fdefa47c9be943369a7e7bbd75907
Move bazelrc to new location Bazel 0.18 moved the workspace bazelrc location from //tools/bazel.rc to //.bazelrc. The old location will be dropped by a future version of bazel. This bumps the minimum required version of bazel to 0.18. More context: https://groups.google.com/forum/#!msg/bazel-discuss/ycDacctX2vw/EGFxGLibAgAJ
[ { "change_type": "RENAME", "old_path": "tools/bazel.rc", "new_path": ".bazelrc", "diff": "" }, { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -255,7 +255,7 @@ gVisor currently requires x86\\_64 Linux to build.\nMake sure the following dependencies are installed:\n* [git][git]\n-* [Bazel][bazel]\n+* [Bazel][bazel] 0.18+\n* [Python][python]\n* [Docker version 17.09.0 or greater][docker]\n* Gold linker (e.g. `binutils-gold` package on Ubuntu)\n" } ]
Go
Apache License 2.0
google/gvisor
Move bazelrc to new location Bazel 0.18 moved the workspace bazelrc location from //tools/bazel.rc to //.bazelrc. The old location will be dropped by a future version of bazel. This bumps the minimum required version of bazel to 0.18. More context: https://groups.google.com/forum/#!msg/bazel-discuss/ycDacctX2vw/EGFxGLibAgAJ PiperOrigin-RevId: 220338084 Change-Id: Ib6fa83a4a0f89e8e898d67152c7bd429e0b9b21e
259,891
06.11.2018 16:17:16
28,800
a81111d5448346098af375de82aec44459239689
Fix problem where crictl tests would signal both error and done channels
[ { "change_type": "MODIFY", "old_path": "runsc/test/testutil/crictl.go", "new_path": "runsc/test/testutil/crictl.go", "diff": "@@ -212,6 +212,7 @@ func (cc *Crictl) run(args ...string) (string, error) {\noutput, err := cmd.CombinedOutput()\nif err != nil {\nerrCh <- fmt.Errorf(\"error: \\\"%v\\\", output: %s\", err, string(output))\n+ return\n}\ndone <- string(output)\n}()\n" } ]
Go
Apache License 2.0
google/gvisor
Fix problem where crictl tests would signal both error and done channels PiperOrigin-RevId: 220372291 Change-Id: I054ba56a23c402c7244b476d7d6fe72084942a0e
259,992
07.11.2018 12:00:51
28,800
dce61075c03907a70362878d362b2b95ff06addf
Fix flaky TestCacheResolutionTimeout Increase timeout to prevent the entry from being found when there is delay on the address resolution goroutine that doesn't mark the request as failed.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/linkaddrcache_test.go", "new_path": "pkg/tcpip/stack/linkaddrcache_test.go", "diff": "@@ -237,8 +237,8 @@ func TestCacheResolutionFailed(t *testing.T) {\n}\nfunc TestCacheResolutionTimeout(t *testing.T) {\n- resolverDelay := 50 * time.Millisecond\n- expiration := resolverDelay / 2\n+ resolverDelay := 500 * time.Millisecond\n+ expiration := resolverDelay / 10\nc := newLinkAddrCache(expiration, 1*time.Millisecond, 3)\nlinkRes := &testLinkAddressResolver{cache: c, delay: resolverDelay}\n" } ]
Go
Apache License 2.0
google/gvisor
Fix flaky TestCacheResolutionTimeout Increase timeout to prevent the entry from being found when there is delay on the address resolution goroutine that doesn't mark the request as failed. PiperOrigin-RevId: 220504789 Change-Id: I7e44fd95d8624bd69962f862fbf5517a81395f2a
259,992
07.11.2018 13:32:26
28,800
c92b9b7086b89fd8e7f5913bf74d04761163e24b
Add more logging to controller.go
[ { "change_type": "MODIFY", "old_path": "runsc/boot/controller.go", "new_path": "runsc/boot/controller.go", "diff": "@@ -169,7 +169,7 @@ type containerManager struct {\n// StartRoot will start the root container process.\nfunc (cm *containerManager) StartRoot(cid *string, _ *struct{}) error {\n- log.Debugf(\"containerManager.StartRoot\")\n+ log.Debugf(\"containerManager.StartRoot %q\", *cid)\n// Tell the root container to start and wait for the result.\ncm.startChan <- struct{}{}\nif err := <-cm.startResultChan; err != nil {\n@@ -239,6 +239,7 @@ func (cm *containerManager) Start(args *StartArgs, _ *struct{}) error {\nerr := cm.l.startContainer(cm.l.k, args.Spec, args.Conf, args.CID, args.FilePayload.Files)\nif err != nil {\n+ log.Debugf(\"containerManager.Start failed %q: %+v\", args.CID, args)\nreturn err\n}\nlog.Debugf(\"Container %q started\", args.CID)\n@@ -259,6 +260,7 @@ func (cm *containerManager) ExecuteAsync(args *control.ExecArgs, pid *int32) err\nlog.Debugf(\"containerManager.ExecuteAsync: %+v\", args)\ntgid, err := cm.l.executeAsync(args)\nif err != nil {\n+ log.Debugf(\"containerManager.ExecuteAsync failed: %+v: %v\", args, err)\nreturn err\n}\n*pid = int32(tgid)\n@@ -277,6 +279,7 @@ func (cm *containerManager) Checkpoint(o *control.SaveOpts, _ *struct{}) error {\n// Pause suspends a container.\nfunc (cm *containerManager) Pause(_, _ *struct{}) error {\n+ log.Debugf(\"containerManager.Pause\")\ncm.l.k.Pause()\nreturn nil\n}\n@@ -398,6 +401,7 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {\n// Resume unpauses a container.\nfunc (cm *containerManager) Resume(_, _ *struct{}) error {\n+ log.Debugf(\"containerManager.Resume\")\ncm.l.k.Unpause()\nreturn nil\n}\n@@ -405,7 +409,9 @@ func (cm *containerManager) Resume(_, _ *struct{}) error {\n// Wait waits for the init process in the given container.\nfunc (cm *containerManager) Wait(cid *string, waitStatus *uint32) error {\nlog.Debugf(\"containerManager.Wait\")\n- return cm.l.waitContainer(*cid, waitStatus)\n+ err := cm.l.waitContainer(*cid, waitStatus)\n+ log.Debugf(\"containerManager.Wait returned, waitStatus: %v: %v\", waitStatus, err)\n+ return err\n}\n// WaitPIDArgs are arguments to the WaitPID method.\n" } ]
Go
Apache License 2.0
google/gvisor
Add more logging to controller.go PiperOrigin-RevId: 220519632 Change-Id: Iaeec007fc1aa3f0b72569b288826d45f2534c4bf
259,992
07.11.2018 21:30:11
28,800
d12a0dd6b8afaca9fbb5fe60fb84a3ae0502261a
Fix test --race violation SetupContainerInRoot was setting Config.RootDir unnecessarily and causing a --race violation in TestMultiContainerDestroyStarting.
[ { "change_type": "MODIFY", "old_path": "runsc/container/container_test.go", "new_path": "runsc/container/container_test.go", "diff": "@@ -1287,24 +1287,25 @@ func TestReadonlyMount(t *testing.T) {\n// TestAbbreviatedIDs checks that runsc supports using abbreviated container\n// IDs in place of full IDs.\nfunc TestAbbreviatedIDs(t *testing.T) {\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\n+ conf := testutil.TestConfigWithRoot(rootDir)\n+\ncids := []string{\n\"foo-\" + testutil.UniqueContainerID(),\n\"bar-\" + testutil.UniqueContainerID(),\n\"baz-\" + testutil.UniqueContainerID(),\n}\n-\n- rootDir, err := testutil.SetupRootDir()\n- if err != nil {\n- t.Fatalf(\"error creating root dir: %v\", err)\n- }\nfor _, cid := range cids {\nspec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n- conf := testutil.TestConfig()\n- bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\n+ bundleDir, err := testutil.SetupBundleDir(spec)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n- defer os.RemoveAll(rootDir)\ndefer os.RemoveAll(bundleDir)\n// Create and start the container.\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/multi_container_test.go", "new_path": "runsc/container/multi_container_test.go", "diff": "@@ -63,6 +63,7 @@ func startContainers(conf *boot.Config, specs []*specs.Spec, ids []string) ([]*C\nif err != nil {\nreturn nil, nil, fmt.Errorf(\"error creating root dir: %v\", err)\n}\n+ conf.RootDir = rootDir\nvar containers []*Container\nvar bundles []string\n@@ -76,7 +77,7 @@ func startContainers(conf *boot.Config, specs []*specs.Spec, ids []string) ([]*C\nos.RemoveAll(rootDir)\n}\nfor i, spec := range specs {\n- bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\n+ bundleDir, err := testutil.SetupBundleDir(spec)\nif err != nil {\ncleanup()\nreturn nil, nil, fmt.Errorf(\"error setting up container: %v\", err)\n@@ -617,16 +618,16 @@ func TestMultiContainerDestroyNotStarted(t *testing.T) {\nspecs, ids := createSpecs(\n[]string{\"/bin/sleep\", \"100\"},\n[]string{\"/bin/sleep\", \"100\"})\n- conf := testutil.TestConfig()\n-\nrootDir, err := testutil.SetupRootDir()\nif err != nil {\nt.Fatalf(\"error creating root dir: %v\", err)\n}\ndefer os.RemoveAll(rootDir)\n+ conf := testutil.TestConfigWithRoot(rootDir)\n+\n// Create and start root container.\n- rootBundleDir, err := testutil.SetupContainerInRoot(rootDir, specs[0], conf)\n+ rootBundleDir, err := testutil.SetupBundleDir(specs[0])\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n@@ -642,7 +643,7 @@ func TestMultiContainerDestroyNotStarted(t *testing.T) {\n}\n// Create and destroy sub-container.\n- bundleDir, err := testutil.SetupContainerInRoot(rootDir, specs[1], conf)\n+ bundleDir, err := testutil.SetupBundleDir(specs[1])\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n@@ -667,7 +668,6 @@ func TestMultiContainerDestroyStarting(t *testing.T) {\ncmds[i] = []string{\"/bin/sleep\", \"100\"}\n}\nspecs, ids := createSpecs(cmds...)\n- conf := testutil.TestConfig()\nrootDir, err := testutil.SetupRootDir()\nif err != nil {\n@@ -675,8 +675,10 @@ func TestMultiContainerDestroyStarting(t *testing.T) {\n}\ndefer os.RemoveAll(rootDir)\n+ conf := testutil.TestConfigWithRoot(rootDir)\n+\n// Create and start root container.\n- rootBundleDir, err := testutil.SetupContainerInRoot(rootDir, specs[0], conf)\n+ rootBundleDir, err := testutil.SetupBundleDir(specs[0])\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n@@ -697,7 +699,7 @@ func TestMultiContainerDestroyStarting(t *testing.T) {\ncontinue // skip root container\n}\n- bundleDir, err := testutil.SetupContainerInRoot(rootDir, specs[i], conf)\n+ bundleDir, err := testutil.SetupBundleDir(specs[i])\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/test/testutil/testutil.go", "new_path": "runsc/test/testutil/testutil.go", "diff": "@@ -104,7 +104,8 @@ func FindFile(path string) (string, error) {\nreturn matches[0], nil\n}\n-// TestConfig return the default configuration to use in tests.\n+// TestConfig returns the default configuration to use in tests. Note that\n+// 'RootDir' must be set by caller if required.\nfunc TestConfig() *boot.Config {\nreturn &boot.Config{\nDebug: true,\n@@ -117,6 +118,13 @@ func TestConfig() *boot.Config {\n}\n}\n+// TestConfigWithRoot returns the default configuration to use in tests.\n+func TestConfigWithRoot(rootDir string) *boot.Config {\n+ conf := TestConfig()\n+ conf.RootDir = rootDir\n+ return conf\n+}\n+\n// NewSpecWithArgs creates a simple spec with the given args suitable for use\n// in tests.\nfunc NewSpecWithArgs(args ...string) *specs.Spec {\n@@ -162,13 +170,13 @@ func SetupContainer(spec *specs.Spec, conf *boot.Config) (rootDir, bundleDir str\nif err != nil {\nreturn \"\", \"\", err\n}\n- bundleDir, err = SetupContainerInRoot(rootDir, spec, conf)\n+ conf.RootDir = rootDir\n+ bundleDir, err = SetupBundleDir(spec)\nreturn rootDir, bundleDir, err\n}\n-// SetupContainerInRoot creates a bundle for the container, generates a test\n-// config, and writes the spec to config.json in the bundle dir.\n-func SetupContainerInRoot(rootDir string, spec *specs.Spec, conf *boot.Config) (bundleDir string, err error) {\n+// SetupBundleDir creates a bundle dir and writes the spec to config.json.\n+func SetupBundleDir(spec *specs.Spec) (bundleDir string, err error) {\nbundleDir, err = ioutil.TempDir(TmpDir(), \"bundle\")\nif err != nil {\nreturn \"\", fmt.Errorf(\"error creating bundle dir: %v\", err)\n@@ -177,8 +185,6 @@ func SetupContainerInRoot(rootDir string, spec *specs.Spec, conf *boot.Config) (\nif err = writeSpec(bundleDir, spec); err != nil {\nreturn \"\", fmt.Errorf(\"error writing spec: %v\", err)\n}\n-\n- conf.RootDir = rootDir\nreturn bundleDir, nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Fix test --race violation SetupContainerInRoot was setting Config.RootDir unnecessarily and causing a --race violation in TestMultiContainerDestroyStarting. PiperOrigin-RevId: 220580073 Change-Id: Ie0b28c19846106c7458a92681b708ae70f87d25a
259,992
07.11.2018 23:28:37
28,800
90e81b2e5c665b9fc149f97dcf15142c190260c6
Add test that volume can be mounted on top of a symlink
[ { "change_type": "MODIFY", "old_path": "runsc/test/root/crictl_test.go", "new_path": "runsc/test/root/crictl_test.go", "diff": "@@ -25,6 +25,7 @@ import (\n\"os/exec\"\n\"path\"\n\"path/filepath\"\n+ \"strings\"\n\"testing\"\n\"time\"\n@@ -79,6 +80,43 @@ func TestMountPaths(t *testing.T) {\nt.Fatal(err)\n}\n}\n+func TestMountOverSymlinks(t *testing.T) {\n+ // Setup containerd and crictl.\n+ crictl, cleanup, err := setup(t)\n+ if err != nil {\n+ t.Fatalf(\"failed to setup crictl: %v\", err)\n+ }\n+ defer cleanup()\n+ podID, contID, err := crictl.StartPodAndContainer(\"k8s.gcr.io/busybox\", testdata.Sandbox, testdata.MountOverSymlink)\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+\n+ out, err := crictl.Exec(contID, \"readlink\", \"/etc/resolv.conf\")\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+ if want := \"/tmp/resolv.conf\"; !strings.Contains(string(out), want) {\n+ t.Fatalf(\"/etc/resolv.conf is not pointing to %q: %q\", want, string(out))\n+ }\n+\n+ etc, err := crictl.Exec(contID, \"cat\", \"/etc/resolv.conf\")\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+ tmp, err := crictl.Exec(contID, \"cat\", \"/tmp/resolv.conf\")\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+ if tmp != etc {\n+ t.Fatalf(\"file content doesn't match:\\n\\t/etc/resolv.conf: %s\\n\\t/tmp/resolv.conf: %s\", string(etc), string(tmp))\n+ }\n+\n+ // Stop everything.\n+ if err := crictl.StopPodAndContainer(podID, contID); err != nil {\n+ t.Fatal(err)\n+ }\n+}\n// setup sets up before a test. Specifically it:\n// * Creates directories and a socket for containerd to utilize.\n" }, { "change_type": "MODIFY", "old_path": "runsc/test/root/testdata/BUILD", "new_path": "runsc/test/root/testdata/BUILD", "diff": "@@ -5,6 +5,7 @@ package(licenses = [\"notice\"]) # Apache 2.0\ngo_library(\nname = \"testdata\",\nsrcs = [\n+ \"busybox.go\",\n\"containerd_config.go\",\n\"httpd.go\",\n\"httpd_mount_paths.go\",\n" }, { "change_type": "ADD", "old_path": null, "new_path": "runsc/test/root/testdata/busybox.go", "diff": "+// Copyright 2018 Google LLC\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package testdata\n+\n+// MountOverSymlink is a JSON config for a container that /etc/resolv.conf is a\n+// symlink to /tmp/resolv.conf.\n+var MountOverSymlink = `\n+{\n+ \"metadata\": {\n+ \"name\": \"busybox\"\n+ },\n+ \"image\": {\n+ \"image\": \"k8s.gcr.io/busybox\"\n+ },\n+ \"command\": [\n+ \"sleep\",\n+ \"1000\"\n+ ]\n+}\n+`\n" }, { "change_type": "MODIFY", "old_path": "runsc/test/testutil/crictl.go", "new_path": "runsc/test/testutil/crictl.go", "diff": "@@ -92,6 +92,17 @@ func (cc *Crictl) Stop(contID string) error {\nreturn err\n}\n+// Exec execs a program inside a container. It corresponds to `crictl exec`.\n+func (cc *Crictl) Exec(contID string, args ...string) (string, error) {\n+ a := []string{\"exec\", contID}\n+ a = append(a, args...)\n+ output, err := cc.run(a...)\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"exec failed: %v\", err)\n+ }\n+ return output, nil\n+}\n+\n// Rm removes a container. It corresponds to `crictl rm`.\nfunc (cc *Crictl) Rm(contID string) error {\n_, err := cc.run(\"rm\", contID)\n" } ]
Go
Apache License 2.0
google/gvisor
Add test that volume can be mounted on top of a symlink PiperOrigin-RevId: 220588094 Change-Id: I18915e892ceac86eac1f89ebcadffb4fdf8d0cf6
259,985
08.11.2018 11:08:41
28,800
5a0be6fa203273d1e4ab06a206eaffeca5724533
Create stubs for syscalls upto Linux 4.4. Create syscall stubs for missing syscalls upto Linux 4.4 and advertise a kernel version of 4.4.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/inode.go", "new_path": "pkg/sentry/fs/inode.go", "diff": "@@ -439,10 +439,7 @@ func (i *Inode) CheckOwnership(ctx context.Context) bool {\n// CheckCapability checks whether `ctx` has capability `cp` with respect to\n// operations on this Inode.\n//\n-// Compare Linux's kernel/capability.c:capable_wrt_inode_uidgid(). Note that\n-// this function didn't exist in Linux 3.11.10, but was added by upstream\n-// 23adbe12ef7d \"fs,userns: Change inode_capable to capable_wrt_inode_uidgid\"\n-// to fix local privilege escalation CVE-2014-4014.\n+// Compare Linux's kernel/capability.c:capable_wrt_inode_uidgid().\nfunc (i *Inode) CheckCapability(ctx context.Context, cp linux.Capability) bool {\nuattr, err := i.UnstableAttr(ctx)\nif err != nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/proc/README.md", "new_path": "pkg/sentry/fs/proc/README.md", "diff": "@@ -223,7 +223,7 @@ Number of seconds idle | Always zero\n```bash\n$ cat /proc/version\n-Linux version 3.11.10 #1 SMP Fri Nov 29 10:47:50 PST 2013\n+Linux version 4.4 #1 SMP Sun Jan 10 15:06:54 PST 2016\n```\n## Process-specific data\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/proc/fds.go", "new_path": "pkg/sentry/fs/proc/fds.go", "diff": "@@ -173,11 +173,6 @@ func (f *fdDir) Check(ctx context.Context, inode *fs.Inode, req fs.PermMask) boo\nif t := kernel.TaskFromContext(ctx); t != nil {\n// Allow access if the task trying to access it is in the\n// thread group corresponding to this directory.\n- //\n- // N.B. Technically, in Linux 3.11, this compares what would be\n- // the equivalent of task pointers. However, this was fixed\n- // later in 54708d2858e7 (\"proc: actually make\n- // proc_fd_permission() thread-friendly\").\nif f.t.ThreadGroup() == t.ThreadGroup() {\nreturn true\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/proc/task.go", "new_path": "pkg/sentry/fs/proc/task.go", "diff": "@@ -76,7 +76,7 @@ func newTaskDir(t *kernel.Task, msrc *fs.MountSource, pidns *kernel.PIDNamespace\n\"gid_map\": newGIDMap(t, msrc),\n// TODO: This is incorrect for /proc/[pid]/task/[tid]/io, i.e. if\n// showSubtasks is false:\n- // http://lxr.free-electrons.com/source/fs/proc/base.c?v=3.11#L2980\n+ // https://elixir.bootlin.com/linux/v4.4/source/fs/proc/base.c#L3154\n\"io\": newIO(t, msrc),\n\"maps\": newMaps(t, msrc),\n\"mountinfo\": seqfile.NewSeqFileInode(t, &mountInfoFile{t: t}, msrc),\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/auth/user_namespace.go", "new_path": "pkg/sentry/kernel/auth/user_namespace.go", "diff": "@@ -49,10 +49,7 @@ type UserNamespace struct {\ngidMapFromParent idMapSet\ngidMapToParent idMapSet\n- // TODO: Consider supporting disabling setgroups(2), which \"was\n- // added in Linux 3.19, but was backported to many earlier stable kernel\n- // series, because it addresses a security issue\" - user_namespaces(7). (It\n- // was not backported to 3.11.10, which we are currently imitating.)\n+ // TODO: Support disabling setgroups(2).\n}\n// NewRootUserNamespace returns a UserNamespace that is appropriate for a\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/version.go", "new_path": "pkg/sentry/kernel/version.go", "diff": "@@ -19,7 +19,7 @@ type Version struct {\n// Operating system name (e.g. \"Linux\").\nSysname string\n- // Operating system release (e.g. \"3.11.10-amd64\").\n+ // Operating system release (e.g. \"4.4-amd64\").\nRelease string\n// Operating system version. On Linux this takes the shape\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/linux64.go", "new_path": "pkg/sentry/syscalls/linux/linux64.go", "diff": "@@ -32,15 +32,19 @@ import (\nconst _AUDIT_ARCH_X86_64 = 0xc000003e\n// AMD64 is a table of Linux amd64 syscall API with the corresponding syscall\n-// numbers from Linux 3.11. The entries commented out are those syscalls we\n+// numbers from Linux 4.4. The entries commented out are those syscalls we\n// don't currently support.\nvar AMD64 = &kernel.SyscallTable{\nOS: abi.Linux,\nArch: arch.AMD64,\nVersion: kernel.Version{\n+ // Version 4.4 is chosen as a stable, longterm version of Linux, which\n+ // guides the interface provided by this syscall table. The build\n+ // version is that for a clean build with default kernel config, at 5\n+ // minutes after v4.4 was tagged.\nSysname: \"Linux\",\n- Release: \"3.11.10\",\n- Version: \"#1 SMP Fri Nov 29 10:47:50 PST 2013\",\n+ Release: \"4.4\",\n+ Version: \"#1 SMP Sun Jan 10 15:06:54 PST 2016\",\n},\nAuditNumber: _AUDIT_ARCH_X86_64,\nTable: map[uintptr]kernel.SyscallFn{\n@@ -358,9 +362,18 @@ var AMD64 = &kernel.SyscallTable{\n// 311: ProcessVmWritev, TODO may require cap_sys_ptrace\n312: syscalls.CapError(linux.CAP_SYS_PTRACE), // Kcmp, requires cap_sys_ptrace\n313: syscalls.CapError(linux.CAP_SYS_MODULE), // FinitModule, requires cap_sys_module\n- // \"Backports.\"\n+ // 314: SchedSetattr, TODO, we have no scheduler\n+ // 315: SchedGetattr, TODO, we have no scheduler\n+ // 316: Renameat2, TODO\n317: Seccomp,\n318: GetRandom,\n+ // 319: MemfdCreate, TODO\n+ 320: syscalls.CapError(linux.CAP_SYS_BOOT), // KexecFileLoad, infeasible to support\n+ 321: syscalls.CapError(linux.CAP_SYS_ADMIN), // Bpf, requires cap_sys_admin for all commands\n+ // 322: Execveat, TODO\n+ // 323: Userfaultfd, TODO\n+ // 324: Membarrier, TODO\n+ 325: syscalls.Error(nil), // Mlock2, TODO\n},\nEmulate: map[usermem.Addr]uintptr{\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_file.go", "new_path": "pkg/sentry/syscalls/linux/sys_file.go", "diff": "@@ -1140,9 +1140,6 @@ func mayLinkAt(t *kernel.Task, target *fs.Inode) error {\n// always enabled, and thus imposes the following restrictions on hard\n// links.\n- // Technically Linux is more restrictive in 3.11.10 (requires CAP_FOWNER in\n- // root user namespace); this is from the later f2ca379642d7 \"namei: permit\n- // linking with CAP_FOWNER in userns\".\nif target.CheckOwnership(t) {\n// fs/namei.c:may_linkat: \"Source inode owner (or CAP_FOWNER)\n// can hardlink all they like.\"\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_thread.go", "new_path": "pkg/sentry/syscalls/linux/sys_thread.go", "diff": "@@ -159,8 +159,8 @@ func clone(t *kernel.Task, flags int, stack usermem.Addr, parentTID usermem.Addr\n}\n// Clone implements linux syscall clone(2).\n-// sys_clone has so many flavors. We implement the default one in the\n-// current linux 3.11 x86_64:\n+// sys_clone has so many flavors. We implement the default one in linux 3.11\n+// x86_64:\n// sys_clone(clone_flags, newsp, parent_tidptr, child_tidptr, tls_val)\nfunc Clone(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\nflags := int(args[0].Int())\n" } ]
Go
Apache License 2.0
google/gvisor
Create stubs for syscalls upto Linux 4.4. Create syscall stubs for missing syscalls upto Linux 4.4 and advertise a kernel version of 4.4. PiperOrigin-RevId: 220667680 Change-Id: Idbdccde538faabf16debc22f492dd053a8af0ba7
259,992
09.11.2018 10:57:45
28,800
93e88760b0d0c9c6656f7773f68540b1853d169b
Add tests multicontainer start/stop Each container has its respective gofer. Test that gofer can be shutdown when a container stops and that it doesn't affect other containers.
[ { "change_type": "MODIFY", "old_path": "runsc/container/multi_container_test.go", "new_path": "runsc/container/multi_container_test.go", "diff": "@@ -732,3 +732,94 @@ func TestMultiContainerDestroyStarting(t *testing.T) {\n}\nwg.Wait()\n}\n+\n+// TestMultiContainerGoferStop tests that IO operations continue to work after\n+// containers have been stopped and gofers killed.\n+func TestMultiContainerGoferStop(t *testing.T) {\n+ app, err := testutil.FindFile(\"runsc/container/test_app\")\n+ if err != nil {\n+ t.Fatal(\"error finding test_app:\", err)\n+ }\n+\n+ // Setup containers. Root container just reaps children, while the others\n+ // perform some IOs. Children are executed in 3 batches of 10. Within the\n+ // batch there is overlap between containers starting and being destroyed. In\n+ // between batches all containers stop before starting another batch.\n+ cmds := [][]string{{app, \"reaper\"}}\n+ const batchSize = 10\n+ for i := 0; i < 3*batchSize; i++ {\n+ cmds = append(cmds, []string{\"sh\", \"-c\", \"find /bin -type f | head | xargs -I SRC cp SRC /tmp/output\"})\n+ }\n+ allSpecs, allIDs := createSpecs(cmds...)\n+\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\n+ // Split up the specs and IDs.\n+ rootSpec := allSpecs[0]\n+ rootID := allIDs[0]\n+ childrenSpecs := allSpecs[1:]\n+ childrenIDs := allIDs[1:]\n+\n+ bundleDir, err := testutil.SetupBundleDir(rootSpec)\n+ if err != nil {\n+ t.Fatalf(\"error setting up bundle dir: %v\", err)\n+ }\n+ defer os.RemoveAll(bundleDir)\n+\n+ // Start root container.\n+ conf := testutil.TestConfigWithRoot(rootDir)\n+ root, err := Create(rootID, rootSpec, conf, bundleDir, \"\", \"\", \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating root container: %v\", err)\n+ }\n+ if err := root.Start(conf); err != nil {\n+ t.Fatalf(\"error starting root container: %v\", err)\n+ }\n+ defer root.Destroy()\n+\n+ // Run batches. Each batch starts containers in parallel, then wait and\n+ // destroy them before starting another batch.\n+ for i := 0; i < len(childrenSpecs); i += batchSize {\n+ t.Logf(\"Starting batch from %d to %d\", i, i+batchSize)\n+ specs := childrenSpecs[i : i+batchSize]\n+ ids := childrenIDs[i : i+batchSize]\n+\n+ var children []*Container\n+ for j, spec := range specs {\n+ bundleDir, err := testutil.SetupBundleDir(spec)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer os.RemoveAll(bundleDir)\n+\n+ child, err := Create(ids[j], spec, conf, bundleDir, \"\", \"\", \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ children = append(children, child)\n+\n+ if err := child.Start(conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n+ }\n+\n+ // Give a small gap between containers.\n+ time.Sleep(50 * time.Millisecond)\n+ }\n+ for _, child := range children {\n+ ws, err := child.Wait()\n+ if err != nil {\n+ t.Fatalf(\"waiting for container: %v\", err)\n+ }\n+ if !ws.Exited() || ws.ExitStatus() != 0 {\n+ t.Fatalf(\"container failed, waitStatus: %x (%d)\", ws, ws.ExitStatus())\n+ }\n+ if err := child.Destroy(); err != nil {\n+ t.Fatalf(\"error destroying container: %v\", err)\n+ }\n+ }\n+ }\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Add tests multicontainer start/stop Each container has its respective gofer. Test that gofer can be shutdown when a container stops and that it doesn't affect other containers. PiperOrigin-RevId: 220829898 Change-Id: I2a44a3cf2a88577e6ad1133afc622bbf4a5f6591
259,962
09.11.2018 14:37:42
28,800
33089561b1d53dada959a312ab69574cd6635b4b
Add an implementation of a SACK scoreboard as per RFC6675.
[ { "change_type": "MODIFY", "old_path": "WORKSPACE", "new_path": "WORKSPACE", "diff": "@@ -131,3 +131,9 @@ http_archive(\n\"https://github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz\",\n],\n)\n+\n+go_repository(\n+ name = \"com_github_google_btree\",\n+ importpath = \"github.com/google/btree\",\n+ commit = \"4030bb1f1f0c35b30ca7009e9ebd06849dd45306\",\n+)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/BUILD", "new_path": "pkg/tcpip/header/BUILD", "diff": "@@ -24,6 +24,7 @@ go_library(\n\"//pkg/tcpip\",\n\"//pkg/tcpip/buffer\",\n\"//pkg/tcpip/seqnum\",\n+ \"@com_github_google_btree//:go_default_library\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/tcp.go", "new_path": "pkg/tcpip/header/tcp.go", "diff": "@@ -17,6 +17,7 @@ package header\nimport (\n\"encoding/binary\"\n+ \"github.com/google/btree\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum\"\n)\n@@ -131,6 +132,16 @@ type SACKBlock struct {\nEnd seqnum.Value\n}\n+// Less returns true if r.Start < b.Start.\n+func (r SACKBlock) Less(b btree.Item) bool {\n+ return r.Start.LessThan(b.(SACKBlock).Start)\n+}\n+\n+// Contains returns true if b is completely contained in r.\n+func (r SACKBlock) Contains(b SACKBlock) bool {\n+ return r.Start.LessThanEq(b.Start) && b.End.LessThanEq(r.End)\n+}\n+\n// TCPOptions are used to parse and cache the TCP segment options for a non\n// syn/syn-ack segment.\n//\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/stack.go", "new_path": "pkg/tcpip/stack/stack.go", "diff": "@@ -200,9 +200,17 @@ type TCPSenderState struct {\n// TCPSACKInfo holds TCP SACK related information for a given TCP endpoint.\ntype TCPSACKInfo struct {\n- // Blocks is the list of SACK block currently received by the\n- // TCP endpoint.\n+ // Blocks is the list of SACK Blocks that identify the out of order segments\n+ // held by a given TCP endpoint.\nBlocks []header.SACKBlock\n+\n+ // ReceivedBlocks are the SACK blocks received by this endpoint\n+ // from the peer endpoint.\n+ ReceivedBlocks []header.SACKBlock\n+\n+ // MaxSACKED is the highest sequence number that has been SACKED\n+ // by the peer.\n+ MaxSACKED seqnum.Value\n}\n// TCPEndpointState is a copy of the internal state of a TCP endpoint.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/BUILD", "new_path": "pkg/tcpip/transport/tcp/BUILD", "diff": "@@ -28,6 +28,7 @@ go_library(\n\"rcv.go\",\n\"reno.go\",\n\"sack.go\",\n+ \"sack_scoreboard.go\",\n\"segment.go\",\n\"segment_heap.go\",\n\"segment_queue.go\",\n@@ -50,14 +51,24 @@ go_library(\n\"//pkg/tcpip/stack\",\n\"//pkg/tmutex\",\n\"//pkg/waiter\",\n+ \"@com_github_google_btree//:go_default_library\",\n],\n)\n+filegroup(\n+ name = \"autogen\",\n+ srcs = [\n+ \"tcp_segment_list.go\",\n+ ],\n+ visibility = [\"//:sandbox\"],\n+)\n+\ngo_test(\nname = \"tcp_test\",\nsize = \"small\",\nsrcs = [\n\"dual_stack_test.go\",\n+ \"sack_scoreboard_test.go\",\n\"tcp_sack_test.go\",\n\"tcp_test.go\",\n\"tcp_timestamp_test.go\",\n@@ -79,11 +90,3 @@ go_test(\n\"//pkg/waiter\",\n],\n)\n-\n-filegroup(\n- name = \"autogen\",\n- srcs = [\n- \"tcp_segment_list.go\",\n- ],\n- visibility = [\"//:sandbox\"],\n-)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/tcpip/transport/tcp/sack_scoreboard.go", "diff": "+// Copyright 2018 Google LLC\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package tcp\n+\n+import (\n+ \"fmt\"\n+ \"strings\"\n+\n+ \"github.com/google/btree\"\n+ \"gvisor.googlesource.com/gvisor/pkg/tcpip/header\"\n+ \"gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum\"\n+)\n+\n+// maxSACKBlocks is the maximum number of distinct SACKBlocks the scoreboard\n+// will track. Once there are 100 distinct blocks, new insertions will fail.\n+const maxSACKBlocks = 100\n+\n+// SACKScoreboard stores a set of disjoint SACK ranges.\n+type SACKScoreboard struct {\n+ smss uint16\n+ maxSACKED seqnum.Value\n+ sacked seqnum.Size\n+ ranges *btree.BTree\n+}\n+\n+// NewSACKScoreboard returns a new SACK Scoreboard.\n+func NewSACKScoreboard(smss uint16, iss seqnum.Value) *SACKScoreboard {\n+ return &SACKScoreboard{\n+ smss: smss,\n+ ranges: btree.New(2),\n+ maxSACKED: iss,\n+ }\n+}\n+\n+// Insert inserts/merges the provided SACKBlock into the scoreboard.\n+func (s *SACKScoreboard) Insert(r header.SACKBlock) {\n+ if s.ranges.Len() >= maxSACKBlocks {\n+ return\n+ }\n+\n+ // Check if we can merge the new range with a range before or after it.\n+ var toDelete []btree.Item\n+ if s.maxSACKED.LessThan(r.End - 1) {\n+ s.maxSACKED = r.End - 1\n+ }\n+ s.ranges.AscendGreaterOrEqual(r, func(i btree.Item) bool {\n+ if i == r {\n+ return true\n+ }\n+ sacked := i.(header.SACKBlock)\n+ // There is a hole between these two SACK blocks, so we can't\n+ // merge anymore.\n+ if r.End.LessThan(r.Start) {\n+ return false\n+ }\n+ // There is some overlap at this point, merge the blocks and\n+ // delete the other one.\n+ //\n+ // ----sS--------sE\n+ // r.S---------------rE\n+ // -------sE\n+ if sacked.End.LessThan(r.End) {\n+ // sacked is contained in the newly inserted range.\n+ // Delete this block.\n+ toDelete = append(toDelete, i)\n+ return true\n+ }\n+ // sacked covers a range past end of the newly inserted\n+ // block.\n+ r.End = sacked.End\n+ toDelete = append(toDelete, i)\n+ return true\n+ })\n+\n+ s.ranges.DescendLessOrEqual(r, func(i btree.Item) bool {\n+ if i == r {\n+ return true\n+ }\n+ sacked := i.(header.SACKBlock)\n+ // sA------sE\n+ // rA----rE\n+ if sacked.End.LessThan(r.Start) {\n+ return false\n+ }\n+ // The previous range extends into the current block. Merge it\n+ // into the newly inserted range and delete the other one.\n+ //\n+ // <-rA---rE----<---rE--->\n+ // sA--------------sE\n+ r.Start = sacked.Start\n+ // Extend r to cover sacked if sacked extends past r.\n+ if r.End.LessThan(sacked.End) {\n+ r.End = sacked.End\n+ }\n+ toDelete = append(toDelete, i)\n+ return true\n+ })\n+ for _, i := range toDelete {\n+ if sb := s.ranges.Delete(i); sb != nil {\n+ sb := i.(header.SACKBlock)\n+ s.sacked -= sb.Start.Size(sb.End)\n+ }\n+ }\n+\n+ replaced := s.ranges.ReplaceOrInsert(r)\n+ if replaced == nil {\n+ s.sacked += r.Start.Size(r.End)\n+ }\n+}\n+\n+// IsSACKED returns true if the a given range of sequence numbers denoted by r\n+// are already covered by SACK information in the scoreboard.\n+func (s *SACKScoreboard) IsSACKED(r header.SACKBlock) bool {\n+ found := false\n+ s.ranges.DescendLessOrEqual(r, func(i btree.Item) bool {\n+ sacked := i.(header.SACKBlock)\n+ if sacked.End.LessThan(r.Start) {\n+ return false\n+ }\n+ if sacked.Contains(r) {\n+ found = true\n+ return false\n+ }\n+ return true\n+ })\n+ return found\n+}\n+\n+// Dump prints the state of the scoreboard structure.\n+func (s *SACKScoreboard) String() string {\n+ var str strings.Builder\n+ str.WriteString(\"SACKScoreboard: {\")\n+ s.ranges.Ascend(func(i btree.Item) bool {\n+ str.WriteString(fmt.Sprintf(\"%v,\", i))\n+ return true\n+ })\n+ str.WriteString(\"}\\n\")\n+ return str.String()\n+}\n+\n+// Delete removes all SACK information prior to seq.\n+func (s *SACKScoreboard) Delete(seq seqnum.Value) {\n+ toDelete := []btree.Item{}\n+ r := header.SACKBlock{seq, seq.Add(1)}\n+ s.ranges.DescendLessOrEqual(r, func(i btree.Item) bool {\n+ if i == r {\n+ return true\n+ }\n+ sb := i.(header.SACKBlock)\n+ toDelete = append(toDelete, i)\n+ if sb.End.LessThanEq(seq) {\n+ s.sacked -= sb.Start.Size(sb.End)\n+ } else {\n+ newSB := header.SACKBlock{seq, sb.End}\n+ s.ranges.ReplaceOrInsert(newSB)\n+ s.sacked -= sb.Start.Size(seq)\n+ }\n+ return true\n+ })\n+ for _, i := range toDelete {\n+ s.ranges.Delete(i)\n+ }\n+}\n+\n+// Copy provides a copy of the SACK scoreboard.\n+func (s *SACKScoreboard) Copy() (sackBlocks []header.SACKBlock, maxSACKED seqnum.Value) {\n+ s.ranges.Ascend(func(i btree.Item) bool {\n+ sackBlocks = append(sackBlocks, i.(header.SACKBlock))\n+ return true\n+ })\n+ return sackBlocks, s.maxSACKED\n+}\n+\n+// IsLost implements the IsLost(SeqNum) operation defined in RFC 3517 section 4.\n+//\n+// This routine returns whether the given sequence number is considered to be\n+// lost. The routine returns true when either nDupAckThreshold discontiguous\n+// SACKed sequences have arrived above 'SeqNum' or (nDupAckThreshold * SMSS)\n+// bytes with sequence numbers greater than 'SeqNum' have been SACKed.\n+// Otherwise, the routine returns false.\n+func (s *SACKScoreboard) IsLost(r header.SACKBlock) bool {\n+ nDupSACK := 0\n+ nDupSACKBytes := seqnum.Size(0)\n+ isLost := false\n+ s.ranges.AscendGreaterOrEqual(r, func(i btree.Item) bool {\n+ sacked := i.(header.SACKBlock)\n+ if sacked.Contains(r) {\n+ return false\n+ }\n+ nDupSACKBytes += sacked.Start.Size(sacked.End)\n+ nDupSACK++\n+ if nDupSACK >= nDupAckThreshold || nDupSACKBytes >= seqnum.Size(nDupAckThreshold*s.smss) {\n+ isLost = true\n+ return false\n+ }\n+ return true\n+ })\n+ return isLost\n+}\n+\n+// Empty returns true if the SACK scoreboard has no entries, false otherwise.\n+func (s *SACKScoreboard) Empty() bool {\n+ return s.ranges.Len() == 0\n+}\n+\n+// Sacked returns the current number of bytes held in the SACK scoreboard.\n+func (s *SACKScoreboard) Sacked() seqnum.Size {\n+ return s.sacked\n+}\n+\n+// MaxSACKED returns the highest sequence number ever inserted in the SACK\n+// scoreboard.\n+func (s *SACKScoreboard) MaxSACKED() seqnum.Value {\n+ return s.maxSACKED\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/tcpip/transport/tcp/sack_scoreboard_test.go", "diff": "+// Copyright 2018 Google LLC\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package tcp_test\n+\n+import (\n+ \"testing\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/tcpip/header\"\n+ \"gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum\"\n+ \"gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp\"\n+)\n+\n+const smss = 1500\n+\n+func initScoreboard(blocks []header.SACKBlock, iss seqnum.Value) *tcp.SACKScoreboard {\n+ s := tcp.NewSACKScoreboard(smss, iss)\n+ for _, blk := range blocks {\n+ s.Insert(blk)\n+ }\n+ return s\n+}\n+\n+func TestSACKScoreboardIsSACKED(t *testing.T) {\n+ type blockTest struct {\n+ block header.SACKBlock\n+ sacked bool\n+ }\n+ testCases := []struct {\n+ comment string\n+ scoreboardBlocks []header.SACKBlock\n+ blockTests []blockTest\n+ iss seqnum.Value\n+ }{\n+ {\n+ \"Test holes and unsacked SACK blocks in SACKed ranges and insertion of overlapping SACK blocks\",\n+ []header.SACKBlock{{10, 20}, {10, 30}, {30, 40}, {41, 50}, {5, 10}, {1, 50}, {111, 120}, {101, 110}, {52, 120}},\n+ []blockTest{\n+ {header.SACKBlock{15, 21}, true},\n+ {header.SACKBlock{200, 201}, false},\n+ {header.SACKBlock{50, 51}, false},\n+ {header.SACKBlock{53, 120}, true},\n+ },\n+ 0,\n+ },\n+ {\n+ \"Test disjoint SACKBlocks\",\n+ []header.SACKBlock{{2288624809, 2288810057}, {2288811477, 2288838565}},\n+ []blockTest{\n+ {header.SACKBlock{2288624809, 2288810057}, true},\n+ {header.SACKBlock{2288811477, 2288838565}, true},\n+ {header.SACKBlock{2288810057, 2288811477}, false},\n+ },\n+ 2288624809,\n+ },\n+ {\n+ \"Test sequence number wrap around\",\n+ []header.SACKBlock{{4294254144, 225652}, {5340409, 5350509}},\n+ []blockTest{\n+ {header.SACKBlock{4294254144, 4294254145}, true},\n+ {header.SACKBlock{4294254143, 4294254144}, false},\n+ {header.SACKBlock{4294254144, 1}, true},\n+ {header.SACKBlock{225652, 5350509}, false},\n+ {header.SACKBlock{5340409, 5350509}, true},\n+ {header.SACKBlock{5350509, 5350609}, false},\n+ },\n+ 4294254144,\n+ },\n+ }\n+ for _, tc := range testCases {\n+ sb := initScoreboard(tc.scoreboardBlocks, tc.iss)\n+ for _, blkTest := range tc.blockTests {\n+ if want, got := blkTest.sacked, sb.IsSACKED(blkTest.block); got != want {\n+ t.Errorf(\"%s: s.IsSACKED(%v) = %v, want %v\", tc.comment, blkTest.block, got, want)\n+ }\n+ }\n+ }\n+}\n+\n+func TestSACKScoreboardIsLost(t *testing.T) {\n+ s := tcp.NewSACKScoreboard(10, 0)\n+ s.Insert(header.SACKBlock{1, 50})\n+ s.Insert(header.SACKBlock{51, 100})\n+ s.Insert(header.SACKBlock{111, 120})\n+ s.Insert(header.SACKBlock{101, 110})\n+ s.Insert(header.SACKBlock{121, 151})\n+ testCases := []struct {\n+ block header.SACKBlock\n+ lost bool\n+ }{\n+ {header.SACKBlock{0, 1}, true},\n+ {header.SACKBlock{1, 2}, false},\n+ {header.SACKBlock{1, 45}, false},\n+ {header.SACKBlock{50, 51}, true},\n+ // This one should return true because there are > 3* 10 (smss)\n+ // bytes that have been sacked above this sequence number.\n+ {header.SACKBlock{119, 120}, true},\n+ {header.SACKBlock{120, 121}, true},\n+ {header.SACKBlock{125, 126}, false},\n+ }\n+ for _, tc := range testCases {\n+ if want, got := tc.lost, s.IsLost(tc.block); got != want {\n+ t.Errorf(\"s.IsLost(%v) = %v, want %v\", tc.block, got, want)\n+ }\n+ }\n+}\n+\n+func TestSACKScoreboardDelete(t *testing.T) {\n+ blocks := []header.SACKBlock{{4294254144, 225652}, {5340409, 5350509}}\n+ s := initScoreboard(blocks, 4294254143)\n+ s.Delete(5340408)\n+ if s.Empty() {\n+ t.Fatalf(\"s.Empty() = true, want false\")\n+ }\n+ if got, want := s.Sacked(), blocks[1].Start.Size(blocks[1].End); got != want {\n+ t.Fatalf(\"incorrect sacked bytes in scoreboard got: %v, want: %v\", got, want)\n+ }\n+ s.Delete(5340410)\n+ if s.Empty() {\n+ t.Fatal(\"s.Empty() = true, want false\")\n+ }\n+ newSB := header.SACKBlock{5340410, 5350509}\n+ if !s.IsSACKED(newSB) {\n+ t.Fatalf(\"s.IsSACKED(%v) = false, want true, scoreboard: %v\", newSB, s)\n+ }\n+ s.Delete(5350509)\n+ lastOctet := header.SACKBlock{5350508, 5350509}\n+ if s.IsSACKED(lastOctet) {\n+ t.Fatalf(\"s.IsSACKED(%v) = false, want true\", lastOctet)\n+ }\n+\n+ s.Delete(5350510)\n+ if !s.Empty() {\n+ t.Fatal(\"s.Empty() = false, want true\")\n+ }\n+ if got, want := s.Sacked(), seqnum.Size(0); got != want {\n+ t.Fatalf(\"incorrect sacked bytes in scoreboard got: %v, want: %v\", got, want)\n+ }\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Add an implementation of a SACK scoreboard as per RFC6675. PiperOrigin-RevId: 220866996 Change-Id: I89d48215df57c00d6a6ec512fc18712a2ea9080b
259,992
09.11.2018 14:53:24
28,800
d97ccfa346d23d99dcbe634a10fa5d81b089100d
Close donated files if containerManager.Start() fails
[ { "change_type": "MODIFY", "old_path": "runsc/boot/controller.go", "new_path": "runsc/boot/controller.go", "diff": "@@ -213,6 +213,12 @@ type StartArgs struct {\nfunc (cm *containerManager) Start(args *StartArgs, _ *struct{}) error {\nlog.Debugf(\"containerManager.Start: %+v\", args)\n+ defer func() {\n+ for _, f := range args.FilePayload.Files {\n+ f.Close()\n+ }\n+ }()\n+\n// Validate arguments.\nif args == nil {\nreturn errors.New(\"start missing arguments\")\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -515,7 +515,8 @@ func (l *Loader) createContainer(cid string) error {\n}\n// startContainer starts a child container. It returns the thread group ID of\n-// the newly created process.\n+// the newly created process. Caller owns 'files' and may close them after\n+// this method returns.\nfunc (l *Loader) startContainer(k *kernel.Kernel, spec *specs.Spec, conf *Config, cid string, files []*os.File) error {\n// Create capabilities.\ncaps, err := specutils.Capabilities(spec.Process.Capabilities)\n@@ -553,7 +554,6 @@ func (l *Loader) startContainer(k *kernel.Kernel, spec *specs.Spec, conf *Config\nif err != nil {\nreturn fmt.Errorf(\"failed to dup file: %v\", err)\n}\n- f.Close()\nioFDs = append(ioFDs, fd)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Close donated files if containerManager.Start() fails PiperOrigin-RevId: 220869535 Change-Id: I9917e5daf02499f7aab6e2aa4051c54ff4461b9a
259,854
13.11.2018 18:01:26
28,800
7f60294a7367ee62cc5e0bd21648a68184c4ca5e
Implement TCP_NODELAY and TCP_CORK Previously, TCP_NODELAY was always enabled and we would lie about it being configurable. TCP_NODELAY is now disabled by default (to match Linux) in the socket layer so that non-gVisor users don't automatically start using this questionable optimization.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/epsocket/epsocket.go", "new_path": "pkg/sentry/socket/epsocket/epsocket.go", "diff": "@@ -157,7 +157,13 @@ type SocketOperations struct {\n}\n// New creates a new endpoint socket.\n-func New(t *kernel.Task, family int, skType transport.SockType, queue *waiter.Queue, endpoint tcpip.Endpoint) *fs.File {\n+func New(t *kernel.Task, family int, skType transport.SockType, queue *waiter.Queue, endpoint tcpip.Endpoint) (*fs.File, *syserr.Error) {\n+ if skType == transport.SockStream {\n+ if err := endpoint.SetSockOpt(tcpip.DelayOption(1)); err != nil {\n+ return nil, syserr.TranslateNetstackError(err)\n+ }\n+ }\n+\ndirent := socket.NewDirent(t, epsocketDevice)\ndefer dirent.DecRef()\nreturn fs.NewFile(t, dirent, fs.FileFlags{Read: true, Write: true}, &SocketOperations{\n@@ -165,7 +171,7 @@ func New(t *kernel.Task, family int, skType transport.SockType, queue *waiter.Qu\nfamily: family,\nEndpoint: endpoint,\nskType: skType,\n- })\n+ }), nil\n}\nvar sockAddrInetSize = int(binary.Size(linux.SockAddrInet{}))\n@@ -426,10 +432,10 @@ func (s *SocketOperations) blockingAccept(t *kernel.Task) (tcpip.Endpoint, *wait\n// tcpip.Endpoint.\nfunc (s *SocketOperations) Accept(t *kernel.Task, peerRequested bool, flags int, blocking bool) (kdefs.FD, interface{}, uint32, *syserr.Error) {\n// Issue the accept request to get the new endpoint.\n- ep, wq, err := s.Endpoint.Accept()\n- if err != nil {\n- if err != tcpip.ErrWouldBlock || !blocking {\n- return 0, nil, 0, syserr.TranslateNetstackError(err)\n+ ep, wq, terr := s.Endpoint.Accept()\n+ if terr != nil {\n+ if terr != tcpip.ErrWouldBlock || !blocking {\n+ return 0, nil, 0, syserr.TranslateNetstackError(terr)\n}\nvar err *syserr.Error\n@@ -439,7 +445,10 @@ func (s *SocketOperations) Accept(t *kernel.Task, peerRequested bool, flags int,\n}\n}\n- ns := New(t, s.family, s.skType, wq, ep)\n+ ns, err := New(t, s.family, s.skType, wq, ep)\n+ if err != nil {\n+ return 0, nil, 0, err\n+ }\ndefer ns.DecRef()\nif flags&linux.SOCK_NONBLOCK != 0 {\n@@ -632,7 +641,22 @@ func GetSockOpt(t *kernel.Task, s socket.Socket, ep commonEndpoint, family int,\nreturn nil, syserr.ErrInvalidArgument\n}\n- var v tcpip.NoDelayOption\n+ var v tcpip.DelayOption\n+ if err := ep.GetSockOpt(&v); err != nil {\n+ return nil, syserr.TranslateNetstackError(err)\n+ }\n+\n+ if v == 0 {\n+ return int32(1), nil\n+ }\n+ return int32(0), nil\n+\n+ case syscall.TCP_CORK:\n+ if outLen < sizeOfInt32 {\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ var v tcpip.CorkOption\nif err := ep.GetSockOpt(&v); err != nil {\nreturn nil, syserr.TranslateNetstackError(err)\n}\n@@ -748,7 +772,18 @@ func SetSockOpt(t *kernel.Task, s socket.Socket, ep commonEndpoint, level int, n\n}\nv := usermem.ByteOrder.Uint32(optVal)\n- return syserr.TranslateNetstackError(ep.SetSockOpt(tcpip.NoDelayOption(v)))\n+ var o tcpip.DelayOption\n+ if v == 0 {\n+ o = 1\n+ }\n+ return syserr.TranslateNetstackError(ep.SetSockOpt(o))\n+ case syscall.TCP_CORK:\n+ if len(optVal) < sizeOfInt32 {\n+ return syserr.ErrInvalidArgument\n+ }\n+\n+ v := usermem.ByteOrder.Uint32(optVal)\n+ return syserr.TranslateNetstackError(ep.SetSockOpt(tcpip.CorkOption(v)))\n}\ncase syscall.SOL_IPV6:\nswitch name {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/epsocket/provider.go", "new_path": "pkg/sentry/socket/epsocket/provider.go", "diff": "@@ -88,7 +88,7 @@ func (p *provider) Socket(t *kernel.Task, stype transport.SockType, protocol int\nreturn nil, syserr.TranslateNetstackError(e)\n}\n- return New(t, p.family, stype, wq, ep), nil\n+ return New(t, p.family, stype, wq, ep)\n}\n// Pair just returns nil sockets (not supported).\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tcpip.go", "new_path": "pkg/tcpip/tcpip.go", "diff": "@@ -420,10 +420,14 @@ type ReceiveQueueSizeOption int\n// socket is to be restricted to sending and receiving IPv6 packets only.\ntype V6OnlyOption int\n-// NoDelayOption is used by SetSockOpt/GetSockOpt to specify if data should be\n+// DelayOption is used by SetSockOpt/GetSockOpt to specify if data should be\n// sent out immediately by the transport protocol. For TCP, it determines if the\n// Nagle algorithm is on or off.\n-type NoDelayOption int\n+type DelayOption int\n+\n+// CorkOption is used by SetSockOpt/GetSockOpt to specify if data should be\n+// held until segments are full by the TCP transport protocol.\n+type CorkOption int\n// ReuseAddressOption is used by SetSockOpt/GetSockOpt to specify whether Bind()\n// should allow reuse of local address.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -162,10 +162,19 @@ type endpoint struct {\n// sack holds TCP SACK related information for this endpoint.\nsack SACKInfo\n+ // delay enables Nagle's algorithm.\n+ //\n+ // delay is a boolean (0 is false) and must be accessed atomically.\n+ delay uint32\n+\n+ // cork holds back segments until full.\n+ //\n+ // cork is a boolean (0 is false) and must be accessed atomically.\n+ cork uint32\n+\n// The options below aren't implemented, but we remember the user\n// settings because applications expect to be able to set/query these\n// options.\n- noDelay bool\nreuseAddr bool\n// segmentQueue is used to hand received segments to the protocol\n@@ -276,7 +285,6 @@ func newEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber, waite\nrcvBufSize: DefaultBufferSize,\nsndBufSize: DefaultBufferSize,\nsndMTU: int(math.MaxInt32),\n- noDelay: false,\nreuseAddr: true,\nkeepalive: keepalive{\n// Linux defaults.\n@@ -643,10 +651,24 @@ func (e *endpoint) zeroReceiveWindow(scale uint8) bool {\n// SetSockOpt sets a socket option.\nfunc (e *endpoint) SetSockOpt(opt interface{}) *tcpip.Error {\nswitch v := opt.(type) {\n- case tcpip.NoDelayOption:\n- e.mu.Lock()\n- e.noDelay = v != 0\n- e.mu.Unlock()\n+ case tcpip.DelayOption:\n+ if v == 0 {\n+ atomic.StoreUint32(&e.delay, 0)\n+ } else {\n+ atomic.StoreUint32(&e.delay, 1)\n+ }\n+ return nil\n+\n+ case tcpip.CorkOption:\n+ if v == 0 {\n+ atomic.StoreUint32(&e.cork, 0)\n+ } else {\n+ atomic.StoreUint32(&e.cork, 1)\n+ }\n+\n+ // Handle the corked data.\n+ e.sndWaker.Assert()\n+\nreturn nil\ncase tcpip.ReuseAddressOption:\n@@ -812,13 +834,16 @@ func (e *endpoint) GetSockOpt(opt interface{}) *tcpip.Error {\n*o = tcpip.ReceiveQueueSizeOption(v)\nreturn nil\n- case *tcpip.NoDelayOption:\n- e.mu.RLock()\n- v := e.noDelay\n- e.mu.RUnlock()\n+ case *tcpip.DelayOption:\n+ *o = 0\n+ if v := atomic.LoadUint32(&e.delay); v != 0 {\n+ *o = 1\n+ }\n+ return nil\n+ case *tcpip.CorkOption:\n*o = 0\n- if v {\n+ if v := atomic.LoadUint32(&e.cork); v != 0 {\n*o = 1\n}\nreturn nil\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/snd.go", "new_path": "pkg/tcpip/transport/tcp/snd.go", "diff": "@@ -17,6 +17,7 @@ package tcp\nimport (\n\"math\"\n\"sync\"\n+ \"sync/atomic\"\n\"time\"\n\"gvisor.googlesource.com/gvisor/pkg/sleep\"\n@@ -409,8 +410,6 @@ func (s *sender) sendData() {\n// We abuse the flags field to determine if we have already\n// assigned a sequence number to this segment.\nif seg.flags == 0 {\n- seg.sequenceNumber = s.sndNxt\n- seg.flags = flagAck | flagPsh\n// Merge segments if allowed.\nif seg.data.Size() != 0 {\navailable := int(seg.sequenceNumber.Size(end))\n@@ -418,8 +417,20 @@ func (s *sender) sendData() {\navailable = limit\n}\n+ // nextTooBig indicates that the next segment was too\n+ // large to entirely fit in the current segment. It would\n+ // be possible to split the next segment and merge the\n+ // portion that fits, but unexpectedly splitting segments\n+ // can have user visible side-effects which can break\n+ // applications. For example, RFC 7766 section 8 says\n+ // that the length and data of a DNS response should be\n+ // sent in the same TCP segment to avoid triggering bugs\n+ // in poorly written DNS implementations.\n+ var nextTooBig bool\n+\nfor next != nil && next.data.Size() != 0 {\nif seg.data.Size()+next.data.Size() > available {\n+ nextTooBig = true\nbreak\n}\n@@ -429,7 +440,32 @@ func (s *sender) sendData() {\ns.writeList.Remove(next)\nnext = next.Next()\n}\n+\n+ if !nextTooBig && seg.data.Size() < available {\n+ // Segment is not full.\n+ if s.outstanding > 0 && atomic.LoadUint32(&s.ep.delay) != 0 {\n+ // Nagle's algorithm. From Wikipedia:\n+ // Nagle's algorithm works by combining a number of\n+ // small outgoing messages and sending them all at\n+ // once. Specifically, as long as there is a sent\n+ // packet for which the sender has received no\n+ // acknowledgment, the sender should keep buffering\n+ // its output until it has a full packet's worth of\n+ // output, thus allowing output to be sent all at\n+ // once.\n+ break\n+ }\n+ if atomic.LoadUint32(&s.ep.cork) != 0 {\n+ // Hold back the segment until full.\n+ break\n+ }\n+ }\n}\n+\n+ // Assign flags. We don't do it above so that we can merge\n+ // additional data if Nagle holds the segment.\n+ seg.sequenceNumber = s.sndNxt\n+ seg.flags = flagAck | flagPsh\n}\nvar segEnd seqnum.Value\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/tcp_test.go", "new_path": "pkg/tcpip/transport/tcp/tcp_test.go", "diff": "@@ -1255,17 +1255,40 @@ func TestZeroScaledWindowReceive(t *testing.T) {\n}\nfunc TestSegmentMerging(t *testing.T) {\n+ tests := []struct {\n+ name string\n+ stop func(tcpip.Endpoint)\n+ resume func(tcpip.Endpoint)\n+ }{\n+ {\n+ \"stop work\",\n+ func(ep tcpip.Endpoint) {\n+ ep.(interface{ StopWork() }).StopWork()\n+ },\n+ func(ep tcpip.Endpoint) {\n+ ep.(interface{ ResumeWork() }).ResumeWork()\n+ },\n+ },\n+ {\n+ \"cork\",\n+ func(ep tcpip.Endpoint) {\n+ ep.SetSockOpt(tcpip.CorkOption(1))\n+ },\n+ func(ep tcpip.Endpoint) {\n+ ep.SetSockOpt(tcpip.CorkOption(0))\n+ },\n+ },\n+ }\n+\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\nc := context.New(t, defaultMTU)\ndefer c.Cleanup()\nc.CreateConnected(789, 30000, nil)\n// Prevent the endpoint from processing packets.\n- worker := c.EP.(interface {\n- StopWork()\n- ResumeWork()\n- })\n- worker.StopWork()\n+ test.stop(c.EP)\nvar allData []byte\nfor i, data := range [][]byte{{1, 2, 3, 4}, {5, 6, 7}, {8, 9}, {10}, {11}} {\n@@ -1277,7 +1300,7 @@ func TestSegmentMerging(t *testing.T) {\n}\n// Let the endpoint process the segments that we just sent.\n- worker.ResumeWork()\n+ test.resume(c.EP)\n// Check that data is received.\nb := c.GetPacket()\n@@ -1304,6 +1327,56 @@ func TestSegmentMerging(t *testing.T) {\nAckNum: c.IRS.Add(1 + seqnum.Size(len(allData))),\nRcvWnd: 30000,\n})\n+ })\n+ }\n+}\n+\n+func TestDelay(t *testing.T) {\n+ c := context.New(t, defaultMTU)\n+ defer c.Cleanup()\n+\n+ c.CreateConnected(789, 30000, nil)\n+\n+ c.EP.SetSockOpt(tcpip.DelayOption(1))\n+\n+ var allData []byte\n+ for i, data := range [][]byte{{0}, {1, 2, 3, 4}, {5, 6, 7}, {8, 9}, {10}, {11}} {\n+ allData = append(allData, data...)\n+ view := buffer.NewViewFromBytes(data)\n+ if _, _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil {\n+ t.Fatalf(\"Write #%d failed: %v\", i+1, err)\n+ }\n+ }\n+\n+ seq := c.IRS.Add(1)\n+ for _, want := range [][]byte{allData[:1], allData[1:]} {\n+ // Check that data is received.\n+ b := c.GetPacket()\n+ checker.IPv4(t, b,\n+ checker.PayloadLen(len(want)+header.TCPMinimumSize),\n+ checker.TCP(\n+ checker.DstPort(context.TestPort),\n+ checker.SeqNum(uint32(seq)),\n+ checker.AckNum(790),\n+ checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),\n+ ),\n+ )\n+\n+ if got := b[header.IPv4MinimumSize+header.TCPMinimumSize:]; !bytes.Equal(got, want) {\n+ t.Fatalf(\"got data = %v, want = %v\", got, want)\n+ }\n+\n+ seq = seq.Add(seqnum.Size(len(want)))\n+ // Acknowledge the data.\n+ c.SendPacket(nil, &context.Headers{\n+ SrcPort: context.TestPort,\n+ DstPort: c.Port,\n+ Flags: header.TCPFlagAck,\n+ SeqNum: 790,\n+ AckNum: seq,\n+ RcvWnd: 30000,\n+ })\n+ }\n}\nfunc testBrokenUpWrite(t *testing.T, c *context.Context, maxPayload int) {\n" } ]
Go
Apache License 2.0
google/gvisor
Implement TCP_NODELAY and TCP_CORK Previously, TCP_NODELAY was always enabled and we would lie about it being configurable. TCP_NODELAY is now disabled by default (to match Linux) in the socket layer so that non-gVisor users don't automatically start using this questionable optimization. PiperOrigin-RevId: 221368472 Change-Id: Ib0240f66d94455081f4e0ca94f09d9338b2c1356
259,854
14.11.2018 11:57:58
28,800
b5e91eaa52cfb9b43dff0f857938ca231068924d
Clean up tcp.sendData
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/buffer/view.go", "new_path": "pkg/tcpip/buffer/view.go", "diff": "@@ -146,7 +146,7 @@ func (vv VectorisedView) Views() []View {\n}\n// Append appends the views in a vectorised view to this vectorised view.\n-func (vv *VectorisedView) Append(vv2 *VectorisedView) {\n+func (vv *VectorisedView) Append(vv2 VectorisedView) {\nvv.views = append(vv.views, vv2.views...)\nvv.size += vv2.size\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/snd.go", "new_path": "pkg/tcpip/transport/tcp/snd.go", "diff": "@@ -404,8 +404,7 @@ func (s *sender) sendData() {\nseg := s.writeNext\nend := s.sndUna.Add(s.sndWnd)\nvar dataSent bool\n- for next := (*segment)(nil); seg != nil && s.outstanding < s.sndCwnd; seg = next {\n- next = seg.Next()\n+ for ; seg != nil && s.outstanding < s.sndCwnd; seg = seg.Next() {\n// We abuse the flags field to determine if we have already\n// assigned a sequence number to this segment.\n@@ -428,17 +427,16 @@ func (s *sender) sendData() {\n// in poorly written DNS implementations.\nvar nextTooBig bool\n- for next != nil && next.data.Size() != 0 {\n- if seg.data.Size()+next.data.Size() > available {\n+ for seg.Next() != nil && seg.Next().data.Size() != 0 {\n+ if seg.data.Size()+seg.Next().data.Size() > available {\nnextTooBig = true\nbreak\n}\n- seg.data.Append(&next.data)\n+ seg.data.Append(seg.Next().data)\n// Consume the segment that we just merged in.\n- s.writeList.Remove(next)\n- next = next.Next()\n+ s.writeList.Remove(seg.Next())\n}\nif !nextTooBig && seg.data.Size() < available {\n@@ -496,7 +494,6 @@ func (s *sender) sendData() {\nnSeg.data.TrimFront(available)\nnSeg.sequenceNumber.UpdateForward(seqnum.Size(available))\ns.writeList.InsertAfter(seg, nSeg)\n- next = nSeg\nseg.data.CapLength(available)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Clean up tcp.sendData PiperOrigin-RevId: 221484739 Change-Id: I44c71f79f99d0d00a2e70a7f06d7024a62a5de0a
259,854
15.11.2018 13:16:05
28,800
9d8e49d9505e0b2659be01ec49cdad1948134188
Process delayed packets when delay is disabled Moving the wakeup logic into the disable blocks is an optimization.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -654,20 +654,24 @@ func (e *endpoint) SetSockOpt(opt interface{}) *tcpip.Error {\ncase tcpip.DelayOption:\nif v == 0 {\natomic.StoreUint32(&e.delay, 0)\n+\n+ // Handle delayed data.\n+ e.sndWaker.Assert()\n} else {\natomic.StoreUint32(&e.delay, 1)\n}\n+\nreturn nil\ncase tcpip.CorkOption:\nif v == 0 {\natomic.StoreUint32(&e.cork, 0)\n- } else {\n- atomic.StoreUint32(&e.cork, 1)\n- }\n// Handle the corked data.\ne.sndWaker.Assert()\n+ } else {\n+ atomic.StoreUint32(&e.cork, 1)\n+ }\nreturn nil\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/tcp_test.go", "new_path": "pkg/tcpip/transport/tcp/tcp_test.go", "diff": "@@ -1379,6 +1379,76 @@ func TestDelay(t *testing.T) {\n}\n}\n+func TestUndelay(t *testing.T) {\n+ c := context.New(t, defaultMTU)\n+ defer c.Cleanup()\n+\n+ c.CreateConnected(789, 30000, nil)\n+\n+ c.EP.SetSockOpt(tcpip.DelayOption(1))\n+\n+ allData := [][]byte{{0}, {1, 2, 3}}\n+ for i, data := range allData {\n+ view := buffer.NewViewFromBytes(data)\n+ if _, _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil {\n+ t.Fatalf(\"Write #%d failed: %v\", i+1, err)\n+ }\n+ }\n+\n+ seq := c.IRS.Add(1)\n+\n+ // Check that data is received.\n+ first := c.GetPacket()\n+ checker.IPv4(t, first,\n+ checker.PayloadLen(len(allData[0])+header.TCPMinimumSize),\n+ checker.TCP(\n+ checker.DstPort(context.TestPort),\n+ checker.SeqNum(uint32(seq)),\n+ checker.AckNum(790),\n+ checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),\n+ ),\n+ )\n+\n+ if got, want := first[header.IPv4MinimumSize+header.TCPMinimumSize:], allData[0]; !bytes.Equal(got, want) {\n+ t.Fatalf(\"got first packet's data = %v, want = %v\", got, want)\n+ }\n+\n+ seq = seq.Add(seqnum.Size(len(allData[0])))\n+\n+ // Check that we don't get the second packet yet.\n+ c.CheckNoPacketTimeout(\"delayed second packet transmitted\", 100*time.Millisecond)\n+\n+ c.EP.SetSockOpt(tcpip.DelayOption(0))\n+\n+ // Check that data is received.\n+ second := c.GetPacket()\n+ checker.IPv4(t, second,\n+ checker.PayloadLen(len(allData[1])+header.TCPMinimumSize),\n+ checker.TCP(\n+ checker.DstPort(context.TestPort),\n+ checker.SeqNum(uint32(seq)),\n+ checker.AckNum(790),\n+ checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),\n+ ),\n+ )\n+\n+ if got, want := second[header.IPv4MinimumSize+header.TCPMinimumSize:], allData[1]; !bytes.Equal(got, want) {\n+ t.Fatalf(\"got second packet's data = %v, want = %v\", got, want)\n+ }\n+\n+ seq = seq.Add(seqnum.Size(len(allData[1])))\n+\n+ // Acknowledge the data.\n+ c.SendPacket(nil, &context.Headers{\n+ SrcPort: context.TestPort,\n+ DstPort: c.Port,\n+ Flags: header.TCPFlagAck,\n+ SeqNum: 790,\n+ AckNum: seq,\n+ RcvWnd: 30000,\n+ })\n+}\n+\nfunc testBrokenUpWrite(t *testing.T, c *context.Context, maxPayload int) {\npayloadMultiplier := 10\ndataLen := payloadMultiplier * maxPayload\n" } ]
Go
Apache License 2.0
google/gvisor
Process delayed packets when delay is disabled Moving the wakeup logic into the disable blocks is an optimization. PiperOrigin-RevId: 221677028 Change-Id: Ib5a5a6d52cc77b4bbc5dedcad9ee1dbb3da98deb
259,985
15.11.2018 15:13:52
28,800
f7aa9371247a3e7d8c490ac0fd4c4f3ff6de2017
Advertise vsyscall support via /proc/<pid>/maps. Also update test utilities for probing vsyscall support and add a metric to see if vsyscalls are actually used in sandboxes.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/BUILD", "new_path": "pkg/sentry/kernel/BUILD", "diff": "@@ -131,6 +131,7 @@ go_library(\n\"//pkg/cpuid\",\n\"//pkg/eventchannel\",\n\"//pkg/log\",\n+ \"//pkg/metric\",\n\"//pkg/refs\",\n\"//pkg/secio\",\n\"//pkg/sentry/arch\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_syscall.go", "new_path": "pkg/sentry/kernel/task_syscall.go", "diff": "@@ -21,6 +21,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/bits\"\n+ \"gvisor.googlesource.com/gvisor/pkg/metric\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/memmap\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n@@ -60,6 +61,8 @@ const (\nERESTART_RESTARTBLOCK = SyscallRestartErrno(516)\n)\n+var vsyscallCount = metric.MustCreateNewUint64Metric(\"/kernel/vsyscall_count\", false /* sync */, \"Number of times vsyscalls were invoked by the application\")\n+\n// Error implements error.Error.\nfunc (e SyscallRestartErrno) Error() string {\n// Descriptions are borrowed from strace.\n@@ -325,6 +328,8 @@ func (*runSyscallExit) execute(t *Task) taskRunState {\n// indicated by an execution fault at address addr. doVsyscall returns the\n// task's next run state.\nfunc (t *Task) doVsyscall(addr usermem.Addr, sysno uintptr) taskRunState {\n+ vsyscallCount.Increment()\n+\n// Grab the caller up front, to make sure there's a sensible stack.\ncaller := t.Arch().Native(uintptr(0))\nif _, err := t.CopyIn(usermem.Addr(t.Arch().Stack()), caller); err != nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/mm/proc_pid_maps.go", "new_path": "pkg/sentry/mm/proc_pid_maps.go", "diff": "@@ -53,6 +53,22 @@ func (mm *MemoryManager) ReadSeqFileData(ctx context.Context, handle seqfile.Seq\nHandle: &vmaAddr,\n})\n}\n+\n+ // We always emulate vsyscall, so advertise it here. Everything about a\n+ // vsyscall region is static, so just hard code the maps entry since we\n+ // don't have a real vma backing it. The vsyscall region is at the end of\n+ // the virtual address space so nothing should be mapped after it (if\n+ // something is really mapped in the tiny ~10 MiB segment afterwards, we'll\n+ // get the sorting on the maps file wrong at worst; but that's not possible\n+ // on any current platform).\n+ //\n+ // Artifically adjust the seqfile handle so we only output vsyscall entry once.\n+ if vsyscallEnd := usermem.Addr(0xffffffffff601000); start != vsyscallEnd {\n+ data = append(data, seqfile.SeqData{\n+ Buf: []byte(\"ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]\\n\"),\n+ Handle: &vsyscallEnd,\n+ })\n+ }\nreturn data, 1\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Advertise vsyscall support via /proc/<pid>/maps. Also update test utilities for probing vsyscall support and add a metric to see if vsyscalls are actually used in sandboxes. PiperOrigin-RevId: 221698834 Change-Id: I57870ecc33ea8c864bd7437833f21aa1e8117477
259,858
16.11.2018 12:16:37
28,800
bb9a2bb62ed37f9b29c7ab4418b8b90417d1b2a2
Update futex to use usermem abstractions. This eliminates the indirection that existed in task_futex.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/futex/BUILD", "new_path": "pkg/sentry/kernel/futex/BUILD", "diff": "@@ -36,7 +36,9 @@ go_library(\nimportpath = \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/futex\",\nvisibility = [\"//pkg/sentry:internal\"],\ndeps = [\n+ \"//pkg/abi/linux\",\n\"//pkg/sentry/memmap\",\n+ \"//pkg/sentry/usermem\",\n\"//pkg/syserror\",\n],\n)\n@@ -46,4 +48,5 @@ go_test(\nsize = \"small\",\nsrcs = [\"futex_test.go\"],\nembed = [\":futex\"],\n+ deps = [\"//pkg/sentry/usermem\"],\n)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/futex/futex.go", "new_path": "pkg/sentry/kernel/futex/futex.go", "diff": "@@ -20,7 +20,9 @@ package futex\nimport (\n\"sync\"\n+ \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/memmap\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n\"gvisor.googlesource.com/gvisor/pkg/syserror\"\n)\n@@ -81,8 +83,8 @@ func (k *Key) clone() Key {\n}\n// Preconditions: k.Kind == KindPrivate or KindSharedPrivate.\n-func (k *Key) addr() uintptr {\n- return uintptr(k.Offset)\n+func (k *Key) addr() usermem.Addr {\n+ return usermem.Addr(k.Offset)\n}\n// matches returns true if a wakeup on k2 should wake a waiter waiting on k.\n@@ -91,23 +93,13 @@ func (k *Key) matches(k2 *Key) bool {\nreturn k.Kind == k2.Kind && k.Mappable == k2.Mappable && k.Offset == k2.Offset\n}\n-// Checker abstracts memory accesses. This is useful because the \"addresses\"\n-// used in this package may not be real addresses (they could be indices of an\n-// array, for example), or they could be mapped via some special mechanism.\n-//\n-// TODO: Replace this with usermem.IO.\n-type Checker interface {\n- // Check should validate that given address contains the given value.\n- // If it does not contain the value, syserror.EAGAIN must be returned.\n- // Any other error may be returned, which will be propagated.\n- Check(addr uintptr, val uint32) error\n-\n- // Op should atomically perform the operation encoded in op on the data\n- // pointed to by addr, then apply the comparison encoded in op to the\n- // original value at addr, returning the result.\n- // Note that op is an opaque operation whose behaviour is defined\n- // outside of the futex manager.\n- Op(addr uintptr, op uint32) (bool, error)\n+// Target abstracts memory accesses and keys.\n+type Target interface {\n+ // SwapUint32 gives access to usermem.SwapUint32.\n+ SwapUint32(addr usermem.Addr, new uint32) (uint32, error)\n+\n+ // CompareAndSwap gives access to usermem.CompareAndSwapUint32.\n+ CompareAndSwapUint32(addr usermem.Addr, old, new uint32) (uint32, error)\n// GetSharedKey returns a Key with kind KindSharedPrivate or\n// KindSharedMappable corresponding to the memory mapped at address addr.\n@@ -115,7 +107,84 @@ type Checker interface {\n// If GetSharedKey returns a Key with a non-nil MappingIdentity, a\n// reference is held on the MappingIdentity, which must be dropped by the\n// caller when the Key is no longer in use.\n- GetSharedKey(addr uintptr) (Key, error)\n+ GetSharedKey(addr usermem.Addr) (Key, error)\n+}\n+\n+// check performs a basic equality check on the given address.\n+func check(t Target, addr usermem.Addr, val uint32) error {\n+ prev, err := t.CompareAndSwapUint32(addr, val, val)\n+ if err != nil {\n+ return err\n+ }\n+ if prev != val {\n+ return syserror.EAGAIN\n+ }\n+ return nil\n+}\n+\n+// atomicOp performs a complex operation on the given address.\n+func atomicOp(t Target, addr usermem.Addr, opIn uint32) (bool, error) {\n+ opType := (opIn >> 28) & 0xf\n+ cmp := (opIn >> 24) & 0xf\n+ opArg := (opIn >> 12) & 0xfff\n+ cmpArg := opIn & 0xfff\n+\n+ if opType&linux.FUTEX_OP_OPARG_SHIFT != 0 {\n+ opArg = 1 << opArg\n+ opType &^= linux.FUTEX_OP_OPARG_SHIFT // Clear flag.\n+ }\n+\n+ var (\n+ oldVal uint32\n+ err error\n+ )\n+ if opType == linux.FUTEX_OP_SET {\n+ oldVal, err = t.SwapUint32(addr, opArg)\n+ } else {\n+ for {\n+ oldVal, err = t.CompareAndSwapUint32(addr, 0, 0)\n+ if err != nil {\n+ break\n+ }\n+ var newVal uint32\n+ switch opType {\n+ case linux.FUTEX_OP_ADD:\n+ newVal = oldVal + opArg\n+ case linux.FUTEX_OP_OR:\n+ newVal = oldVal | opArg\n+ case linux.FUTEX_OP_ANDN:\n+ newVal = oldVal &^ opArg\n+ case linux.FUTEX_OP_XOR:\n+ newVal = oldVal ^ opArg\n+ default:\n+ return false, syserror.ENOSYS\n+ }\n+ prev, err := t.CompareAndSwapUint32(addr, oldVal, newVal)\n+ if err != nil {\n+ break\n+ }\n+ if prev == oldVal {\n+ break // Success.\n+ }\n+ }\n+ }\n+\n+ switch cmp {\n+ case linux.FUTEX_OP_CMP_EQ:\n+ return oldVal == cmpArg, nil\n+ case linux.FUTEX_OP_CMP_NE:\n+ return oldVal != cmpArg, nil\n+ case linux.FUTEX_OP_CMP_LT:\n+ return oldVal < cmpArg, nil\n+ case linux.FUTEX_OP_CMP_LE:\n+ return oldVal <= cmpArg, nil\n+ case linux.FUTEX_OP_CMP_GT:\n+ return oldVal > cmpArg, nil\n+ case linux.FUTEX_OP_CMP_GE:\n+ return oldVal >= cmpArg, nil\n+ default:\n+ return false, syserror.ENOSYS\n+ }\n}\n// Waiter is the struct which gets enqueued into buckets for wake up routines\n@@ -243,7 +312,7 @@ const (\n)\n// getKey returns a Key representing address addr in c.\n-func getKey(c Checker, addr uintptr, private bool) (Key, error) {\n+func getKey(t Target, addr usermem.Addr, private bool) (Key, error) {\n// Ensure the address is aligned.\n// It must be a DWORD boundary.\nif addr&0x3 != 0 {\n@@ -252,11 +321,11 @@ func getKey(c Checker, addr uintptr, private bool) (Key, error) {\nif private {\nreturn Key{Kind: KindPrivate, Offset: uint64(addr)}, nil\n}\n- return c.GetSharedKey(addr)\n+ return t.GetSharedKey(addr)\n}\n// bucketIndexForAddr returns the index into Manager.buckets for addr.\n-func bucketIndexForAddr(addr uintptr) uintptr {\n+func bucketIndexForAddr(addr usermem.Addr) uintptr {\n// - The bottom 2 bits of addr must be 0, per getKey.\n//\n// - On amd64, the top 16 bits of addr (bits 48-63) must be equal to bit 47\n@@ -277,8 +346,8 @@ func bucketIndexForAddr(addr uintptr) uintptr {\n// is also why h1 and h2 are grouped separately; for \"(addr >> 2) + ... +\n// (addr >> 42)\" without any additional grouping, the compiler puts all 4\n// additions in the critical path.\n- h1 := (addr >> 2) + (addr >> 12) + (addr >> 22)\n- h2 := (addr >> 32) + (addr >> 42)\n+ h1 := uintptr(addr>>2) + uintptr(addr>>12) + uintptr(addr>>22)\n+ h2 := uintptr(addr>>32) + uintptr(addr>>42)\nreturn (h1 + h2) % bucketCount\n}\n@@ -363,9 +432,9 @@ func (m *Manager) lockBuckets(k1, k2 *Key) (*bucket, *bucket) {\n// Wake wakes up to n waiters matching the bitmask on the given addr.\n// The number of waiters woken is returned.\n-func (m *Manager) Wake(c Checker, addr uintptr, private bool, bitmask uint32, n int) (int, error) {\n+func (m *Manager) Wake(t Target, addr usermem.Addr, private bool, bitmask uint32, n int) (int, error) {\n// This function is very hot; avoid defer.\n- k, err := getKey(c, addr, private)\n+ k, err := getKey(t, addr, private)\nif err != nil {\nreturn 0, err\n}\n@@ -378,13 +447,13 @@ func (m *Manager) Wake(c Checker, addr uintptr, private bool, bitmask uint32, n\nreturn r, nil\n}\n-func (m *Manager) doRequeue(c Checker, addr, naddr uintptr, private bool, checkval bool, val uint32, nwake int, nreq int) (int, error) {\n- k1, err := getKey(c, addr, private)\n+func (m *Manager) doRequeue(t Target, addr, naddr usermem.Addr, private bool, checkval bool, val uint32, nwake int, nreq int) (int, error) {\n+ k1, err := getKey(t, addr, private)\nif err != nil {\nreturn 0, err\n}\ndefer k1.release()\n- k2, err := getKey(c, naddr, private)\n+ k2, err := getKey(t, naddr, private)\nif err != nil {\nreturn 0, err\n}\n@@ -397,7 +466,7 @@ func (m *Manager) doRequeue(c Checker, addr, naddr uintptr, private bool, checkv\n}\nif checkval {\n- if err := c.Check(addr, val); err != nil {\n+ if err := check(t, addr, val); err != nil {\nreturn 0, err\n}\n}\n@@ -413,28 +482,28 @@ func (m *Manager) doRequeue(c Checker, addr, naddr uintptr, private bool, checkv\n// Requeue wakes up to nwake waiters on the given addr, and unconditionally\n// requeues up to nreq waiters on naddr.\n-func (m *Manager) Requeue(c Checker, addr, naddr uintptr, private bool, nwake int, nreq int) (int, error) {\n- return m.doRequeue(c, addr, naddr, private, false, 0, nwake, nreq)\n+func (m *Manager) Requeue(t Target, addr, naddr usermem.Addr, private bool, nwake int, nreq int) (int, error) {\n+ return m.doRequeue(t, addr, naddr, private, false, 0, nwake, nreq)\n}\n-// RequeueCmp atomically checks that the addr contains val (via the Checker),\n+// RequeueCmp atomically checks that the addr contains val (via the Target),\n// wakes up to nwake waiters on addr and then unconditionally requeues nreq\n// waiters on naddr.\n-func (m *Manager) RequeueCmp(c Checker, addr, naddr uintptr, private bool, val uint32, nwake int, nreq int) (int, error) {\n- return m.doRequeue(c, addr, naddr, private, true, val, nwake, nreq)\n+func (m *Manager) RequeueCmp(t Target, addr, naddr usermem.Addr, private bool, val uint32, nwake int, nreq int) (int, error) {\n+ return m.doRequeue(t, addr, naddr, private, true, val, nwake, nreq)\n}\n// WakeOp atomically applies op to the memory address addr2, wakes up to nwake1\n// waiters unconditionally from addr1, and, based on the original value at addr2\n// and a comparison encoded in op, wakes up to nwake2 waiters from addr2.\n// It returns the total number of waiters woken.\n-func (m *Manager) WakeOp(c Checker, addr1, addr2 uintptr, private bool, nwake1 int, nwake2 int, op uint32) (int, error) {\n- k1, err := getKey(c, addr1, private)\n+func (m *Manager) WakeOp(t Target, addr1, addr2 usermem.Addr, private bool, nwake1 int, nwake2 int, op uint32) (int, error) {\n+ k1, err := getKey(t, addr1, private)\nif err != nil {\nreturn 0, err\n}\ndefer k1.release()\n- k2, err := getKey(c, addr2, private)\n+ k2, err := getKey(t, addr2, private)\nif err != nil {\nreturn 0, err\n}\n@@ -447,7 +516,7 @@ func (m *Manager) WakeOp(c Checker, addr1, addr2 uintptr, private bool, nwake1 i\n}\ndone := 0\n- cond, err := c.Op(addr2, op)\n+ cond, err := atomicOp(t, addr2, op)\nif err != nil {\nreturn 0, err\n}\n@@ -468,8 +537,8 @@ func (m *Manager) WakeOp(c Checker, addr1, addr2 uintptr, private bool, nwake1 i\n// enqueues w to be woken by a send to w.C. If WaitPrepare returns nil, the\n// Waiter must be subsequently removed by calling WaitComplete, whether or not\n// a wakeup is received on w.C.\n-func (m *Manager) WaitPrepare(w *Waiter, c Checker, addr uintptr, private bool, val uint32, bitmask uint32) error {\n- k, err := getKey(c, addr, private)\n+func (m *Manager) WaitPrepare(w *Waiter, t Target, addr usermem.Addr, private bool, val uint32, bitmask uint32) error {\n+ k, err := getKey(t, addr, private)\nif err != nil {\nreturn err\n}\n@@ -487,7 +556,7 @@ func (m *Manager) WaitPrepare(w *Waiter, c Checker, addr uintptr, private bool,\n// This function is very hot; avoid defer.\n// Perform our atomic check.\n- if err := c.Check(addr, val); err != nil {\n+ if err := check(t, addr, val); err != nil {\nb.mu.Unlock()\nw.key.release()\nreturn err\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/futex/futex_test.go", "new_path": "pkg/sentry/kernel/futex/futex_test.go", "diff": "@@ -22,9 +22,11 @@ import (\n\"syscall\"\n\"testing\"\n\"unsafe\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n)\n-// testData implements the Checker interface, and allows us to\n+// testData implements the Target interface, and allows us to\n// treat the address passed for futex operations as an index in\n// a byte slice for testing simplicity.\ntype testData []byte\n@@ -35,18 +37,19 @@ func newTestData(size uint) testData {\nreturn make([]byte, size)\n}\n-func (t testData) Check(addr uintptr, val uint32) error {\n- if val != atomic.LoadUint32((*uint32)(unsafe.Pointer(&t[addr]))) {\n- return syscall.EAGAIN\n- }\n- return nil\n+func (t testData) SwapUint32(addr usermem.Addr, new uint32) (uint32, error) {\n+ val := atomic.SwapUint32((*uint32)(unsafe.Pointer(&t[addr])), new)\n+ return val, nil\n}\n-func (t testData) Op(addr uintptr, val uint32) (bool, error) {\n- return val == 0, nil\n+func (t testData) CompareAndSwapUint32(addr usermem.Addr, old, new uint32) (uint32, error) {\n+ if atomic.CompareAndSwapUint32((*uint32)(unsafe.Pointer(&t[addr])), old, new) {\n+ return old, nil\n+ }\n+ return atomic.LoadUint32((*uint32)(unsafe.Pointer(&t[addr]))), nil\n}\n-func (t testData) GetSharedKey(addr uintptr) (Key, error) {\n+func (t testData) GetSharedKey(addr usermem.Addr) (Key, error) {\nreturn Key{\nKind: KindSharedMappable,\nOffset: uint64(addr),\n@@ -60,9 +63,9 @@ func futexKind(private bool) string {\nreturn \"shared\"\n}\n-func newPreparedTestWaiter(t *testing.T, m *Manager, c Checker, addr uintptr, private bool, val uint32, bitmask uint32) *Waiter {\n+func newPreparedTestWaiter(t *testing.T, m *Manager, ta Target, addr usermem.Addr, private bool, val uint32, bitmask uint32) *Waiter {\nw := NewWaiter()\n- if err := m.WaitPrepare(w, c, addr, private, val, bitmask); err != nil {\n+ if err := m.WaitPrepare(w, ta, addr, private, val, bitmask); err != nil {\nt.Fatalf(\"WaitPrepare failed: %v\", err)\n}\nreturn w\n@@ -450,12 +453,12 @@ const (\n// Beyond being used as a Locker, this is a simple mechanism for\n// changing the underlying values for simpler tests.\ntype testMutex struct {\n- a uintptr\n+ a usermem.Addr\nd testData\nm *Manager\n}\n-func newTestMutex(addr uintptr, d testData, m *Manager) *testMutex {\n+func newTestMutex(addr usermem.Addr, d testData, m *Manager) *testMutex {\nreturn &testMutex{a: addr, d: d, m: m}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_exit.go", "new_path": "pkg/sentry/kernel/task_exit.go", "diff": "@@ -247,7 +247,7 @@ func (*runExitMain) execute(t *Task) taskRunState {\nt.tg.signalHandlers.mu.Unlock()\nif !signaled {\nif _, err := t.CopyOut(t.cleartid, ThreadID(0)); err == nil {\n- t.Futex().Wake(t.FutexChecker(), uintptr(t.cleartid), false, ^uint32(0), 1)\n+ t.Futex().Wake(t, t.cleartid, false, ^uint32(0), 1)\n}\n// If the CopyOut fails, there's nothing we can do.\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_futex.go", "new_path": "pkg/sentry/kernel/task_futex.go", "diff": "package kernel\nimport (\n- \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/futex\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n- \"gvisor.googlesource.com/gvisor/pkg/syserror\"\n)\n// Futex returns t's futex manager.\n@@ -29,120 +27,21 @@ func (t *Task) Futex() *futex.Manager {\nreturn t.tc.fu\n}\n-// FutexChecker returns a futex.Checker that interprets addresses in t's\n-// address space.\n-//\n-// Preconditions: All uses of the returned futex.Checker must be on the task\n-// goroutine.\n-func (t *Task) FutexChecker() futex.Checker {\n- return futexChecker{t}\n-}\n-\n-type futexChecker struct {\n- t *Task\n-}\n-\n-// Check implements futex.Checker.Check.\n-func (f futexChecker) Check(addr uintptr, val uint32) error {\n- // FIXME\n- in := f.t.CopyScratchBuffer(4)\n- _, err := f.t.CopyInBytes(usermem.Addr(addr), in)\n- if err != nil {\n- return err\n- }\n- nval := usermem.ByteOrder.Uint32(in)\n- if val != nval {\n- return syserror.EAGAIN\n- }\n- return nil\n-}\n-\n-func (f futexChecker) atomicOp(addr uintptr, op func(uint32) uint32) (uint32, error) {\n- // FIXME\n- in := f.t.CopyScratchBuffer(4)\n- _, err := f.t.CopyInBytes(usermem.Addr(addr), in)\n- if err != nil {\n- return 0, err\n- }\n- o := usermem.ByteOrder.Uint32(in)\n- mm := f.t.MemoryManager()\n- for {\n- n := op(o)\n- r, err := mm.CompareAndSwapUint32(f.t, usermem.Addr(addr), o, n, usermem.IOOpts{\n+// SwapUint32 implements futex.Target.SwapUint32.\n+func (t *Task) SwapUint32(addr usermem.Addr, new uint32) (uint32, error) {\n+ return t.MemoryManager().SwapUint32(t, addr, new, usermem.IOOpts{\nAddressSpaceActive: true,\n})\n- if err != nil {\n- return 0, err\n}\n- if r == o {\n- return o, nil\n- }\n- o = r\n- }\n-}\n-\n-// Op implements futex.Checker.Op, interpreting opIn consistently with Linux.\n-func (f futexChecker) Op(addr uintptr, opIn uint32) (bool, error) {\n- op := (opIn >> 28) & 0xf\n- cmp := (opIn >> 24) & 0xf\n- opArg := (opIn >> 12) & 0xfff\n- cmpArg := opIn & 0xfff\n-\n- if op&linux.FUTEX_OP_OPARG_SHIFT != 0 {\n- opArg = 1 << opArg\n- op &^= linux.FUTEX_OP_OPARG_SHIFT // clear flag\n- }\n-\n- var oldVal uint32\n- var err error\n- switch op {\n- case linux.FUTEX_OP_SET:\n- oldVal, err = f.t.MemoryManager().SwapUint32(f.t, usermem.Addr(addr), opArg, usermem.IOOpts{\n+// CompareAndSwapUint32 implemets futex.Target.CompareAndSwapUint32.\n+func (t *Task) CompareAndSwapUint32(addr usermem.Addr, old, new uint32) (uint32, error) {\n+ return t.MemoryManager().CompareAndSwapUint32(t, addr, old, new, usermem.IOOpts{\nAddressSpaceActive: true,\n})\n- case linux.FUTEX_OP_ADD:\n- oldVal, err = f.atomicOp(addr, func(a uint32) uint32 {\n- return a + opArg\n- })\n- case linux.FUTEX_OP_OR:\n- oldVal, err = f.atomicOp(addr, func(a uint32) uint32 {\n- return a | opArg\n- })\n- case linux.FUTEX_OP_ANDN:\n- oldVal, err = f.atomicOp(addr, func(a uint32) uint32 {\n- return a &^ opArg\n- })\n- case linux.FUTEX_OP_XOR:\n- oldVal, err = f.atomicOp(addr, func(a uint32) uint32 {\n- return a ^ opArg\n- })\n- default:\n- return false, syserror.ENOSYS\n- }\n- if err != nil {\n- return false, err\n- }\n-\n- switch cmp {\n- case linux.FUTEX_OP_CMP_EQ:\n- return oldVal == cmpArg, nil\n- case linux.FUTEX_OP_CMP_NE:\n- return oldVal != cmpArg, nil\n- case linux.FUTEX_OP_CMP_LT:\n- return oldVal < cmpArg, nil\n- case linux.FUTEX_OP_CMP_LE:\n- return oldVal <= cmpArg, nil\n- case linux.FUTEX_OP_CMP_GT:\n- return oldVal > cmpArg, nil\n- case linux.FUTEX_OP_CMP_GE:\n- return oldVal >= cmpArg, nil\n- default:\n- return false, syserror.ENOSYS\n- }\n}\n-// GetSharedKey implements futex.Checker.GetSharedKey.\n-func (f futexChecker) GetSharedKey(addr uintptr) (futex.Key, error) {\n- return f.t.MemoryManager().GetSharedFutexKey(f.t, usermem.Addr(addr))\n+// GetSharedKey implements futex.Target.GetSharedKey.\n+func (t *Task) GetSharedKey(addr usermem.Addr) (futex.Key, error) {\n+ return t.MemoryManager().GetSharedFutexKey(t, addr)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/mm/syscalls.go", "new_path": "pkg/sentry/mm/syscalls.go", "diff": "@@ -794,10 +794,9 @@ func (mm *MemoryManager) Sync(ctx context.Context, addr usermem.Addr, length uin\nreturn nil\n}\n-// GetSharedFutexKey is used by kernel.futexChecker.GetSharedKey to implement\n-// futex.Checker.GetSharedKey.\n+// GetSharedFutexKey is used by kernel.Task.GetSharedKey.\nfunc (mm *MemoryManager) GetSharedFutexKey(ctx context.Context, addr usermem.Addr) (futex.Key, error) {\n- ar, ok := addr.ToRange(4) // sizeof(int32)\n+ ar, ok := addr.ToRange(4) // sizeof(int32).\nif !ok {\nreturn futex.Key{}, syserror.EFAULT\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_futex.go", "new_path": "pkg/sentry/syscalls/linux/sys_futex.go", "diff": "@@ -21,6 +21,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\nktime \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n\"gvisor.googlesource.com/gvisor/pkg/syserror\"\n)\n@@ -33,7 +34,6 @@ type futexWaitRestartBlock struct {\n// addr stored as uint64 since uintptr is not save-able.\naddr uint64\n-\nprivate bool\nval uint32\nmask uint32\n@@ -41,7 +41,7 @@ type futexWaitRestartBlock struct {\n// Restart implements kernel.SyscallRestartBlock.Restart.\nfunc (f *futexWaitRestartBlock) Restart(t *kernel.Task) (uintptr, error) {\n- return futexWaitDuration(t, f.duration, false, uintptr(f.addr), f.private, f.val, f.mask)\n+ return futexWaitDuration(t, f.duration, false, usermem.Addr(f.addr), f.private, f.val, f.mask)\n}\n// futexWaitAbsolute performs a FUTEX_WAIT_BITSET, blocking until the wait is\n@@ -51,9 +51,9 @@ func (f *futexWaitRestartBlock) Restart(t *kernel.Task) (uintptr, error) {\n//\n// If blocking is interrupted, the syscall is restarted with the original\n// arguments.\n-func futexWaitAbsolute(t *kernel.Task, clockRealtime bool, ts linux.Timespec, forever bool, addr uintptr, private bool, val, mask uint32) (uintptr, error) {\n+func futexWaitAbsolute(t *kernel.Task, clockRealtime bool, ts linux.Timespec, forever bool, addr usermem.Addr, private bool, val, mask uint32) (uintptr, error) {\nw := t.FutexWaiter()\n- err := t.Futex().WaitPrepare(w, t.FutexChecker(), addr, private, val, mask)\n+ err := t.Futex().WaitPrepare(w, t, addr, private, val, mask)\nif err != nil {\nreturn 0, err\n}\n@@ -87,9 +87,9 @@ func futexWaitAbsolute(t *kernel.Task, clockRealtime bool, ts linux.Timespec, fo\n// syscall. If forever is true, the syscall is restarted with the original\n// arguments. If forever is false, duration is a relative timeout and the\n// syscall is restarted with the remaining timeout.\n-func futexWaitDuration(t *kernel.Task, duration time.Duration, forever bool, addr uintptr, private bool, val, mask uint32) (uintptr, error) {\n+func futexWaitDuration(t *kernel.Task, duration time.Duration, forever bool, addr usermem.Addr, private bool, val, mask uint32) (uintptr, error) {\nw := t.FutexWaiter()\n- err := t.Futex().WaitPrepare(w, t.FutexChecker(), addr, private, val, mask)\n+ err := t.Futex().WaitPrepare(w, t, addr, private, val, mask)\nif err != nil {\nreturn 0, err\n}\n@@ -128,16 +128,14 @@ func futexWaitDuration(t *kernel.Task, duration time.Duration, forever bool, add\n// It provides a method for a program to wait for a value at a given address to\n// change, and a method to wake up anyone waiting on a particular address.\nfunc Futex(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n- uaddr := args[0].Pointer()\n+ addr := args[0].Pointer()\nfutexOp := args[1].Int()\nval := int(args[2].Int())\nnreq := int(args[3].Int())\ntimeout := args[3].Pointer()\n- uaddr2 := args[4].Pointer()\n+ naddr := args[4].Pointer()\nval3 := args[5].Int()\n- addr := uintptr(uaddr)\n- naddr := uintptr(uaddr2)\ncmd := futexOp &^ (linux.FUTEX_PRIVATE_FLAG | linux.FUTEX_CLOCK_REALTIME)\nprivate := (futexOp & linux.FUTEX_PRIVATE_FLAG) != 0\nclockRealtime := (futexOp & linux.FUTEX_CLOCK_REALTIME) == linux.FUTEX_CLOCK_REALTIME\n@@ -188,23 +186,23 @@ func Futex(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\nif mask == 0 {\nreturn 0, nil, syserror.EINVAL\n}\n- n, err := t.Futex().Wake(t.FutexChecker(), addr, private, mask, val)\n+ n, err := t.Futex().Wake(t, addr, private, mask, val)\nreturn uintptr(n), nil, err\ncase linux.FUTEX_REQUEUE:\n- n, err := t.Futex().Requeue(t.FutexChecker(), addr, naddr, private, val, nreq)\n+ n, err := t.Futex().Requeue(t, addr, naddr, private, val, nreq)\nreturn uintptr(n), nil, err\ncase linux.FUTEX_CMP_REQUEUE:\n// 'val3' contains the value to be checked at 'addr' and\n// 'val' is the number of waiters that should be woken up.\nnval := uint32(val3)\n- n, err := t.Futex().RequeueCmp(t.FutexChecker(), addr, naddr, private, nval, val, nreq)\n+ n, err := t.Futex().RequeueCmp(t, addr, naddr, private, nval, val, nreq)\nreturn uintptr(n), nil, err\ncase linux.FUTEX_WAKE_OP:\nop := uint32(val3)\n- n, err := t.Futex().WakeOp(t.FutexChecker(), addr, naddr, private, val, nreq, op)\n+ n, err := t.Futex().WakeOp(t, addr, naddr, private, val, nreq, op)\nreturn uintptr(n), nil, err\ncase linux.FUTEX_LOCK_PI, linux.FUTEX_UNLOCK_PI, linux.FUTEX_TRYLOCK_PI, linux.FUTEX_WAIT_REQUEUE_PI, linux.FUTEX_CMP_REQUEUE_PI:\n" } ]
Go
Apache License 2.0
google/gvisor
Update futex to use usermem abstractions. This eliminates the indirection that existed in task_futex. PiperOrigin-RevId: 221832498 Change-Id: Ifb4c926d493913aa6694e193deae91616a29f042
259,992
16.11.2018 18:07:52
28,800
237f9c7a5e7078b46303f1262b77372a2f6a7f7b
Don't fail when destroyContainerFS is called more than once This can happen when destroy is called multiple times or when destroy failed previously and is being called again.
[ { "change_type": "MODIFY", "old_path": "runsc/boot/fs.go", "new_path": "runsc/boot/fs.go", "diff": "@@ -673,10 +673,11 @@ func destroyContainerFS(ctx context.Context, cid string, k *kernel.Kernel) error\ndefer root.DecRef()\n// Do a best-effort unmount by flushing the refs and unmount\n- // with \"detach only = true\".\n+ // with \"detach only = true\". Unmount returns EINVAL when the mount point\n+ // doesn't exist, i.e. it has already been unmounted.\nlog.Debugf(\"Unmounting container submount %q\", root.BaseName())\nm.FlushDirentRefs()\n- if err := mns.Unmount(ctx, root, true /* detach only */); err != nil {\n+ if err := mns.Unmount(ctx, root, true /* detach only */); err != nil && err != syserror.EINVAL {\nreturn fmt.Errorf(\"error unmounting container submount %q: %v\", root.BaseName(), err)\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Don't fail when destroyContainerFS is called more than once This can happen when destroy is called multiple times or when destroy failed previously and is being called again. PiperOrigin-RevId: 221882034 Change-Id: I8d069af19cf66c4e2419bdf0d4b789c5def8d19e
259,992
19.11.2018 15:25:00
28,800
fadffa2ff831034ff63146abf408ff71462b9f43
Add unsupported syscall events for get/setsockopt
[ { "change_type": "MODIFY", "old_path": "pkg/abi/linux/BUILD", "new_path": "pkg/abi/linux/BUILD", "diff": "@@ -43,6 +43,7 @@ go_library(\n\"shm.go\",\n\"signal.go\",\n\"socket.go\",\n+ \"tcp.go\",\n\"time.go\",\n\"timer.go\",\n\"tty.go\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/abi/linux/ip.go", "new_path": "pkg/abi/linux/ip.go", "diff": "@@ -42,3 +42,110 @@ const (\nIPPROTO_MPLS = 137\nIPPROTO_RAW = 255\n)\n+\n+// Socket options from uapi/linux/in.h\n+const (\n+ IP_TOS = 1\n+ IP_TTL = 2\n+ IP_HDRINCL = 3\n+ IP_OPTIONS = 4\n+ IP_ROUTER_ALERT = 5\n+ IP_RECVOPTS = 6\n+ IP_RETOPTS = 7\n+ IP_PKTINFO = 8\n+ IP_PKTOPTIONS = 9\n+ IP_MTU_DISCOVER = 10\n+ IP_RECVERR = 11\n+ IP_RECVTTL = 12\n+ IP_RECVTOS = 13\n+ IP_MTU = 14\n+ IP_FREEBIND = 15\n+ IP_IPSEC_POLICY = 16\n+ IP_XFRM_POLICY = 17\n+ IP_PASSSEC = 18\n+ IP_TRANSPARENT = 19\n+ IP_ORIGDSTADDR = 20\n+ IP_RECVORIGDSTADDR = IP_ORIGDSTADDR\n+ IP_MINTTL = 21\n+ IP_NODEFRAG = 22\n+ IP_CHECKSUM = 23\n+ IP_BIND_ADDRESS_NO_PORT = 24\n+ IP_RECVFRAGSIZE = 25\n+ IP_MULTICAST_IF = 32\n+ IP_MULTICAST_TTL = 33\n+ IP_MULTICAST_LOOP = 34\n+ IP_ADD_MEMBERSHIP = 35\n+ IP_DROP_MEMBERSHIP = 36\n+ IP_UNBLOCK_SOURCE = 37\n+ IP_BLOCK_SOURCE = 38\n+ IP_ADD_SOURCE_MEMBERSHIP = 39\n+ IP_DROP_SOURCE_MEMBERSHIP = 40\n+ IP_MSFILTER = 41\n+ MCAST_JOIN_GROUP = 42\n+ MCAST_BLOCK_SOURCE = 43\n+ MCAST_UNBLOCK_SOURCE = 44\n+ MCAST_LEAVE_GROUP = 45\n+ MCAST_JOIN_SOURCE_GROUP = 46\n+ MCAST_LEAVE_SOURCE_GROUP = 47\n+ MCAST_MSFILTER = 48\n+ IP_MULTICAST_ALL = 49\n+ IP_UNICAST_IF = 50\n+)\n+\n+// Socket options from uapi/linux/in6.h\n+const (\n+ IPV6_ADDRFORM = 1\n+ IPV6_2292PKTINFO = 2\n+ IPV6_2292HOPOPTS = 3\n+ IPV6_2292DSTOPTS = 4\n+ IPV6_2292RTHDR = 5\n+ IPV6_2292PKTOPTIONS = 6\n+ IPV6_CHECKSUM = 7\n+ IPV6_2292HOPLIMIT = 8\n+ IPV6_NEXTHOP = 9\n+ IPV6_FLOWINFO = 11\n+ IPV6_UNICAST_HOPS = 16\n+ IPV6_MULTICAST_IF = 17\n+ IPV6_MULTICAST_HOPS = 18\n+ IPV6_MULTICAST_LOOP = 19\n+ IPV6_ADD_MEMBERSHIP = 20\n+ IPV6_DROP_MEMBERSHIP = 21\n+ IPV6_ROUTER_ALERT = 22\n+ IPV6_MTU_DISCOVER = 23\n+ IPV6_MTU = 24\n+ IPV6_RECVERR = 25\n+ IPV6_V6ONLY = 26\n+ IPV6_JOIN_ANYCAST = 27\n+ IPV6_LEAVE_ANYCAST = 28\n+ IPV6_MULTICAST_ALL = 29\n+ IPV6_FLOWLABEL_MGR = 32\n+ IPV6_FLOWINFO_SEND = 33\n+ IPV6_IPSEC_POLICY = 34\n+ IPV6_XFRM_POLICY = 35\n+ IPV6_HDRINCL = 36\n+ IPV6_RECVPKTINFO = 49\n+ IPV6_PKTINFO = 50\n+ IPV6_RECVHOPLIMIT = 51\n+ IPV6_HOPLIMIT = 52\n+ IPV6_RECVHOPOPTS = 53\n+ IPV6_HOPOPTS = 54\n+ IPV6_RTHDRDSTOPTS = 55\n+ IPV6_RECVRTHDR = 56\n+ IPV6_RTHDR = 57\n+ IPV6_RECVDSTOPTS = 58\n+ IPV6_DSTOPTS = 59\n+ IPV6_RECVPATHMTU = 60\n+ IPV6_PATHMTU = 61\n+ IPV6_DONTFRAG = 62\n+ IPV6_RECVTCLASS = 66\n+ IPV6_TCLASS = 67\n+ IPV6_AUTOFLOWLABEL = 70\n+ IPV6_ADDR_PREFERENCES = 72\n+ IPV6_MINHOPCOUNT = 73\n+ IPV6_ORIGDSTADDR = 74\n+ IPV6_RECVORIGDSTADDR = IPV6_ORIGDSTADDR\n+ IPV6_TRANSPARENT = 75\n+ IPV6_UNICAST_IF = 76\n+ IPV6_RECVFRAGSIZE = 77\n+ IPV6_FREEBIND = 78\n+)\n" }, { "change_type": "MODIFY", "old_path": "pkg/abi/linux/netlink.go", "new_path": "pkg/abi/linux/netlink.go", "diff": "@@ -108,3 +108,17 @@ const NetlinkAttrHeaderSize = 4\n// NLA_ALIGNTO is the alignment of netlink attributes, from\n// uapi/linux/netlink.h.\nconst NLA_ALIGNTO = 4\n+\n+// Socket options, from uapi/linux/netlink.h.\n+const (\n+ NETLINK_ADD_MEMBERSHIP = 1\n+ NETLINK_DROP_MEMBERSHIP = 2\n+ NETLINK_PKTINFO = 3\n+ NETLINK_BROADCAST_ERROR = 4\n+ NETLINK_NO_ENOBUFS = 5\n+ NETLINK_LISTEN_ALL_NSID = 8\n+ NETLINK_LIST_MEMBERSHIPS = 9\n+ NETLINK_CAP_ACK = 10\n+ NETLINK_EXT_ACK = 11\n+ NETLINK_DUMP_STRICT_CHK = 12\n+)\n" }, { "change_type": "MODIFY", "old_path": "pkg/abi/linux/socket.go", "new_path": "pkg/abi/linux/socket.go", "diff": "@@ -89,8 +89,18 @@ const (\nMSG_CMSG_CLOEXEC = 0x40000000\n)\n-// SOL_SOCKET is from socket.h\n-const SOL_SOCKET = 1\n+// Set/get socket option levels, from socket.h.\n+const (\n+ SOL_IP = 0\n+ SOL_SOCKET = 1\n+ SOL_TCP = 6\n+ SOL_UDP = 17\n+ SOL_IPV6 = 41\n+ SOL_ICMPV6 = 58\n+ SOL_RAW = 255\n+ SOL_PACKET = 263\n+ SOL_NETLINK = 270\n+)\n// Socket types, from linux/net.h.\nconst (\n@@ -122,22 +132,63 @@ const (\n// Socket options from socket.h.\nconst (\n+ SO_DEBUG = 1\n+ SO_REUSEADDR = 2\n+ SO_TYPE = 3\nSO_ERROR = 4\n+ SO_DONTROUTE = 5\n+ SO_BROADCAST = 6\n+ SO_SNDBUF = 7\n+ SO_RCVBUF = 8\nSO_KEEPALIVE = 9\n+ SO_OOBINLINE = 10\n+ SO_NO_CHECK = 11\n+ SO_PRIORITY = 12\nSO_LINGER = 13\n- SO_MARK = 36\n+ SO_BSDCOMPAT = 14\n+ SO_REUSEPORT = 15\nSO_PASSCRED = 16\nSO_PEERCRED = 17\n- SO_PEERNAME = 28\n- SO_PROTOCOL = 38\n- SO_RCVBUF = 8\n+ SO_RCVLOWAT = 18\n+ SO_SNDLOWAT = 19\nSO_RCVTIMEO = 20\n- SO_REUSEADDR = 2\n- SO_SNDBUF = 7\nSO_SNDTIMEO = 21\n+ SO_BINDTODEVICE = 25\n+ SO_ATTACH_FILTER = 26\n+ SO_DETACH_FILTER = 27\n+ SO_GET_FILTER = SO_ATTACH_FILTER\n+ SO_PEERNAME = 28\nSO_TIMESTAMP = 29\n+ SO_ACCEPTCONN = 30\n+ SO_PEERSEC = 31\n+ SO_SNDBUFFORCE = 32\n+ SO_RCVBUFFORCE = 33\n+ SO_PASSSEC = 34\nSO_TIMESTAMPNS = 35\n- SO_TYPE = 3\n+ SO_MARK = 36\n+ SO_TIMESTAMPING = 37\n+ SO_PROTOCOL = 38\n+ SO_DOMAIN = 39\n+ SO_RXQ_OVFL = 40\n+ SO_WIFI_STATUS = 41\n+ SO_PEEK_OFF = 42\n+ SO_NOFCS = 43\n+ SO_LOCK_FILTER = 44\n+ SO_SELECT_ERR_QUEUE = 45\n+ SO_BUSY_POLL = 46\n+ SO_MAX_PACING_RATE = 47\n+ SO_BPF_EXTENSIONS = 48\n+ SO_INCOMING_CPU = 49\n+ SO_ATTACH_BPF = 50\n+ SO_ATTACH_REUSEPORT_CBPF = 51\n+ SO_ATTACH_REUSEPORT_EBPF = 52\n+ SO_CNX_ADVICE = 53\n+ SO_MEMINFO = 55\n+ SO_INCOMING_NAPI_ID = 56\n+ SO_COOKIE = 57\n+ SO_PEERGROUPS = 59\n+ SO_ZEROCOPY = 60\n+ SO_TXTIME = 61\n)\n// SockAddrMax is the maximum size of a struct sockaddr, from\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/abi/linux/tcp.go", "diff": "+// Copyright 2018 Google LLC\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package linux\n+\n+// Socket options from uapi/linux/tcp.h.\n+const (\n+ TCP_NODELAY = 1\n+ TCP_MAXSEG = 2\n+ TCP_CORK = 3\n+ TCP_KEEPIDLE = 4\n+ TCP_KEEPINTVL = 5\n+ TCP_KEEPCNT = 6\n+ TCP_SYNCNT = 7\n+ TCP_LINGER2 = 8\n+ TCP_DEFER_ACCEPT = 9\n+ TCP_WINDOW_CLAMP = 10\n+ TCP_INFO = 11\n+ TCP_QUICKACK = 12\n+ TCP_CONGESTION = 13\n+ TCP_MD5SIG = 14\n+ TCP_THIN_LINEAR_TIMEOUTS = 16\n+ TCP_THIN_DUPACK = 17\n+ TCP_USER_TIMEOUT = 18\n+ TCP_REPAIR = 19\n+ TCP_REPAIR_QUEUE = 20\n+ TCP_QUEUE_SEQ = 21\n+ TCP_REPAIR_OPTIONS = 22\n+ TCP_FASTOPEN = 23\n+ TCP_TIMESTAMP = 24\n+ TCP_NOTSENT_LOWAT = 25\n+ TCP_CC_INFO = 26\n+ TCP_SAVE_SYN = 27\n+ TCP_SAVED_SYN = 28\n+ TCP_REPAIR_WINDOW = 29\n+ TCP_FASTOPEN_CONNECT = 30\n+ TCP_ULP = 31\n+ TCP_MD5SIG_EXT = 32\n+ TCP_FASTOPEN_KEY = 33\n+ TCP_FASTOPEN_NO_COOKIE = 34\n+ TCP_ZEROCOPY_RECEIVE = 35\n+ TCP_INQ = 36\n+)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/epsocket/epsocket.go", "new_path": "pkg/sentry/socket/epsocket/epsocket.go", "diff": "@@ -515,6 +515,28 @@ func (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name, outLen int) (\nfunc GetSockOpt(t *kernel.Task, s socket.Socket, ep commonEndpoint, family int, skType transport.SockType, level, name, outLen int) (interface{}, *syserr.Error) {\nswitch level {\ncase linux.SOL_SOCKET:\n+ return getSockOptSocket(t, s, ep, family, skType, name, outLen)\n+\n+ case linux.SOL_TCP:\n+ return getSockOptTCP(t, ep, name, outLen)\n+\n+ case linux.SOL_IPV6:\n+ return getSockOptIPv6(t, ep, name, outLen)\n+\n+ case linux.SOL_IP,\n+ linux.SOL_UDP,\n+ linux.SOL_ICMPV6,\n+ linux.SOL_RAW,\n+ linux.SOL_PACKET:\n+\n+ t.Kernel().EmitUnimplementedEvent(t)\n+ }\n+\n+ return nil, syserr.ErrProtocolNotAvailable\n+}\n+\n+// getSockOptSocket implements GetSockOpt when level is SOL_SOCKET.\n+func getSockOptSocket(t *kernel.Task, s socket.Socket, ep commonEndpoint, family int, skType transport.SockType, name, outLen int) (interface{}, *syserr.Error) {\nswitch name {\ncase linux.SO_TYPE:\nif outLen < sizeOfInt32 {\n@@ -632,11 +654,17 @@ func GetSockOpt(t *kernel.Task, s socket.Socket, ep commonEndpoint, family int,\n}\nreturn int32(v), nil\n+\n+ default:\n+ socket.GetSockOptEmitUnimplementedEvent(t, name)\n+ }\n+ return nil, syserr.ErrProtocolNotAvailable\n}\n- case syscall.SOL_TCP:\n+// getSockOptTCP implements GetSockOpt when level is SOL_TCP.\n+func getSockOptTCP(t *kernel.Task, ep commonEndpoint, name, outLen int) (interface{}, *syserr.Error) {\nswitch name {\n- case syscall.TCP_NODELAY:\n+ case linux.TCP_NODELAY:\nif outLen < sizeOfInt32 {\nreturn nil, syserr.ErrInvalidArgument\n}\n@@ -651,7 +679,7 @@ func GetSockOpt(t *kernel.Task, s socket.Socket, ep commonEndpoint, family int,\n}\nreturn int32(0), nil\n- case syscall.TCP_CORK:\n+ case linux.TCP_CORK:\nif outLen < sizeOfInt32 {\nreturn nil, syserr.ErrInvalidArgument\n}\n@@ -663,7 +691,7 @@ func GetSockOpt(t *kernel.Task, s socket.Socket, ep commonEndpoint, family int,\nreturn int32(v), nil\n- case syscall.TCP_INFO:\n+ case linux.TCP_INFO:\nvar v tcpip.TCPInfoOption\nif err := ep.GetSockOpt(&v); err != nil {\nreturn nil, syserr.TranslateNetstackError(err)\n@@ -680,11 +708,23 @@ func GetSockOpt(t *kernel.Task, s socket.Socket, ep commonEndpoint, family int,\n}\nreturn ib, nil\n+\n+ case linux.TCP_CC_INFO,\n+ linux.TCP_NOTSENT_LOWAT,\n+ linux.TCP_ZEROCOPY_RECEIVE:\n+\n+ t.Kernel().EmitUnimplementedEvent(t)\n+\n+ default:\n+ emitUmplementedEventTCP(t, name)\n+ }\n+ return nil, syserr.ErrProtocolNotAvailable\n}\n- case syscall.SOL_IPV6:\n+// getSockOptIPv6 implements GetSockOpt when level is SOL_IPV6.\n+func getSockOptIPv6(t *kernel.Task, ep commonEndpoint, name, outLen int) (interface{}, *syserr.Error) {\nswitch name {\n- case syscall.IPV6_V6ONLY:\n+ case linux.IPV6_V6ONLY:\nif outLen < sizeOfInt32 {\nreturn nil, syserr.ErrInvalidArgument\n}\n@@ -695,9 +735,13 @@ func GetSockOpt(t *kernel.Task, s socket.Socket, ep commonEndpoint, family int,\n}\nreturn int32(v), nil\n- }\n- }\n+ case linux.IPV6_PATHMTU:\n+ t.Kernel().EmitUnimplementedEvent(t)\n+\n+ default:\n+ emitUmplementedEventIPv6(t, name)\n+ }\nreturn nil, syserr.ErrProtocolNotAvailable\n}\n@@ -712,6 +756,31 @@ func (s *SocketOperations) SetSockOpt(t *kernel.Task, level int, name int, optVa\nfunc SetSockOpt(t *kernel.Task, s socket.Socket, ep commonEndpoint, level int, name int, optVal []byte) *syserr.Error {\nswitch level {\ncase linux.SOL_SOCKET:\n+ return setSockOptSocket(t, s, ep, name, optVal)\n+\n+ case linux.SOL_TCP:\n+ return setSockOptTCP(t, ep, name, optVal)\n+\n+ case linux.SOL_IPV6:\n+ return setSockOptIPv6(t, ep, name, optVal)\n+\n+ case linux.SOL_IP:\n+ return setSockOptIP(t, ep, name, optVal)\n+\n+ case linux.SOL_UDP,\n+ linux.SOL_ICMPV6,\n+ linux.SOL_RAW,\n+ linux.SOL_PACKET:\n+\n+ t.Kernel().EmitUnimplementedEvent(t)\n+ }\n+\n+ // Default to the old behavior; hand off to network stack.\n+ return syserr.TranslateNetstackError(ep.SetSockOpt(struct{}{}))\n+}\n+\n+// setSockOptSocket implements SetSockOpt when level is SOL_SOCKET.\n+func setSockOptSocket(t *kernel.Task, s socket.Socket, ep commonEndpoint, name int, optVal []byte) *syserr.Error {\nswitch name {\ncase linux.SO_SNDBUF:\nif len(optVal) < sizeOfInt32 {\n@@ -762,11 +831,19 @@ func SetSockOpt(t *kernel.Task, s socket.Socket, ep commonEndpoint, level int, n\nv := usermem.ByteOrder.Uint32(optVal)\nreturn syserr.TranslateNetstackError(ep.SetSockOpt(tcpip.TimestampOption(v)))\n+\n+ default:\n+ socket.SetSockOptEmitUnimplementedEvent(t, name)\n}\n- case syscall.SOL_TCP:\n+ // Default to the old behavior; hand off to network stack.\n+ return syserr.TranslateNetstackError(ep.SetSockOpt(struct{}{}))\n+}\n+\n+// setSockOptTCP implements SetSockOpt when level is SOL_TCP.\n+func setSockOptTCP(t *kernel.Task, ep commonEndpoint, name int, optVal []byte) *syserr.Error {\nswitch name {\n- case syscall.TCP_NODELAY:\n+ case linux.TCP_NODELAY:\nif len(optVal) < sizeOfInt32 {\nreturn syserr.ErrInvalidArgument\n}\n@@ -777,44 +854,206 @@ func SetSockOpt(t *kernel.Task, s socket.Socket, ep commonEndpoint, level int, n\no = 1\n}\nreturn syserr.TranslateNetstackError(ep.SetSockOpt(o))\n- case syscall.TCP_CORK:\n+\n+ case linux.TCP_CORK:\nif len(optVal) < sizeOfInt32 {\nreturn syserr.ErrInvalidArgument\n}\nv := usermem.ByteOrder.Uint32(optVal)\nreturn syserr.TranslateNetstackError(ep.SetSockOpt(tcpip.CorkOption(v)))\n+\n+ case linux.TCP_REPAIR_OPTIONS:\n+ t.Kernel().EmitUnimplementedEvent(t)\n+\n+ default:\n+ emitUmplementedEventTCP(t, name)\n+ }\n+\n+ // Default to the old behavior; hand off to network stack.\n+ return syserr.TranslateNetstackError(ep.SetSockOpt(struct{}{}))\n}\n- case syscall.SOL_IPV6:\n+\n+// setSockOptIPv6 implements SetSockOpt when level is SOL_IPV6.\n+func setSockOptIPv6(t *kernel.Task, ep commonEndpoint, name int, optVal []byte) *syserr.Error {\nswitch name {\n- case syscall.IPV6_V6ONLY:\n+ case linux.IPV6_V6ONLY:\nif len(optVal) < sizeOfInt32 {\nreturn syserr.ErrInvalidArgument\n}\nv := usermem.ByteOrder.Uint32(optVal)\nreturn syserr.TranslateNetstackError(ep.SetSockOpt(tcpip.V6OnlyOption(v)))\n+\n+ case linux.IPV6_ADD_MEMBERSHIP,\n+ linux.IPV6_DROP_MEMBERSHIP,\n+ linux.IPV6_IPSEC_POLICY,\n+ linux.IPV6_JOIN_ANYCAST,\n+ linux.IPV6_LEAVE_ANYCAST,\n+ linux.IPV6_PKTINFO,\n+ linux.IPV6_ROUTER_ALERT,\n+ linux.IPV6_XFRM_POLICY,\n+ linux.MCAST_BLOCK_SOURCE,\n+ linux.MCAST_JOIN_GROUP,\n+ linux.MCAST_JOIN_SOURCE_GROUP,\n+ linux.MCAST_LEAVE_GROUP,\n+ linux.MCAST_LEAVE_SOURCE_GROUP,\n+ linux.MCAST_UNBLOCK_SOURCE:\n+\n+ t.Kernel().EmitUnimplementedEvent(t)\n+\n+ default:\n+ emitUmplementedEventIPv6(t, name)\n}\n- case syscall.SOL_IP:\n- const (\n- _IP_MULTICAST_IF = 32\n- _IP_ADD_MEMBERSHIP = 35\n- _MCAST_JOIN_GROUP = 42\n- )\n+\n+ // Default to the old behavior; hand off to network stack.\n+ return syserr.TranslateNetstackError(ep.SetSockOpt(struct{}{}))\n+}\n+\n+// setSockOptIP implements SetSockOpt when level is SOL_IP.\n+func setSockOptIP(t *kernel.Task, ep commonEndpoint, name int, optVal []byte) *syserr.Error {\nswitch name {\n- case _IP_ADD_MEMBERSHIP, _MCAST_JOIN_GROUP, _IP_MULTICAST_IF:\n+ case linux.IP_ADD_MEMBERSHIP, linux.MCAST_JOIN_GROUP, linux.IP_MULTICAST_IF:\n// FIXME: Disallow IP-level multicast group options by\n// default. These will need to be supported by appropriately plumbing\n// the level through to the network stack (if at all). However, we\n// still allow setting TTL, and multicast-enable/disable type options.\n+ t.Kernel().EmitUnimplementedEvent(t)\nreturn syserr.ErrInvalidArgument\n- }\n+\n+ case linux.IP_ADD_SOURCE_MEMBERSHIP,\n+ linux.IP_BIND_ADDRESS_NO_PORT,\n+ linux.IP_BLOCK_SOURCE,\n+ linux.IP_CHECKSUM,\n+ linux.IP_DROP_MEMBERSHIP,\n+ linux.IP_DROP_SOURCE_MEMBERSHIP,\n+ linux.IP_FREEBIND,\n+ linux.IP_HDRINCL,\n+ linux.IP_IPSEC_POLICY,\n+ linux.IP_MINTTL,\n+ linux.IP_MSFILTER,\n+ linux.IP_MTU_DISCOVER,\n+ linux.IP_MULTICAST_ALL,\n+ linux.IP_MULTICAST_LOOP,\n+ linux.IP_MULTICAST_TTL,\n+ linux.IP_NODEFRAG,\n+ linux.IP_OPTIONS,\n+ linux.IP_PASSSEC,\n+ linux.IP_PKTINFO,\n+ linux.IP_RECVERR,\n+ linux.IP_RECVFRAGSIZE,\n+ linux.IP_RECVOPTS,\n+ linux.IP_RECVORIGDSTADDR,\n+ linux.IP_RECVTOS,\n+ linux.IP_RECVTTL,\n+ linux.IP_RETOPTS,\n+ linux.IP_TOS,\n+ linux.IP_TRANSPARENT,\n+ linux.IP_TTL,\n+ linux.IP_UNBLOCK_SOURCE,\n+ linux.IP_UNICAST_IF,\n+ linux.IP_XFRM_POLICY,\n+ linux.MCAST_BLOCK_SOURCE,\n+ linux.MCAST_JOIN_SOURCE_GROUP,\n+ linux.MCAST_LEAVE_GROUP,\n+ linux.MCAST_LEAVE_SOURCE_GROUP,\n+ linux.MCAST_MSFILTER,\n+ linux.MCAST_UNBLOCK_SOURCE:\n+\n+ t.Kernel().EmitUnimplementedEvent(t)\n}\n// Default to the old behavior; hand off to network stack.\nreturn syserr.TranslateNetstackError(ep.SetSockOpt(struct{}{}))\n}\n+// emitUmplementedEventTCP emits unimplemented event if name is valid. This\n+// function contains names that are common between Get and SetSockOpt when\n+// level is SOL_TCP.\n+func emitUmplementedEventTCP(t *kernel.Task, name int) {\n+ switch name {\n+ case linux.TCP_CONGESTION,\n+ linux.TCP_CORK,\n+ linux.TCP_DEFER_ACCEPT,\n+ linux.TCP_FASTOPEN,\n+ linux.TCP_FASTOPEN_CONNECT,\n+ linux.TCP_FASTOPEN_KEY,\n+ linux.TCP_FASTOPEN_NO_COOKIE,\n+ linux.TCP_INQ,\n+ linux.TCP_KEEPCNT,\n+ linux.TCP_KEEPIDLE,\n+ linux.TCP_KEEPINTVL,\n+ linux.TCP_LINGER2,\n+ linux.TCP_MAXSEG,\n+ linux.TCP_QUEUE_SEQ,\n+ linux.TCP_QUICKACK,\n+ linux.TCP_REPAIR,\n+ linux.TCP_REPAIR_QUEUE,\n+ linux.TCP_REPAIR_WINDOW,\n+ linux.TCP_SAVED_SYN,\n+ linux.TCP_SAVE_SYN,\n+ linux.TCP_SYNCNT,\n+ linux.TCP_THIN_DUPACK,\n+ linux.TCP_THIN_LINEAR_TIMEOUTS,\n+ linux.TCP_TIMESTAMP,\n+ linux.TCP_ULP,\n+ linux.TCP_USER_TIMEOUT,\n+ linux.TCP_WINDOW_CLAMP:\n+\n+ t.Kernel().EmitUnimplementedEvent(t)\n+ }\n+}\n+\n+// emitUmplementedEventIPv6 emits unimplemented event if name is valid. It\n+// contains names that are common between Get and SetSockOpt when level is\n+// SOL_IPV6.\n+func emitUmplementedEventIPv6(t *kernel.Task, name int) {\n+ switch name {\n+ case linux.IPV6_2292DSTOPTS,\n+ linux.IPV6_2292HOPLIMIT,\n+ linux.IPV6_2292HOPOPTS,\n+ linux.IPV6_2292PKTINFO,\n+ linux.IPV6_2292PKTOPTIONS,\n+ linux.IPV6_2292RTHDR,\n+ linux.IPV6_ADDR_PREFERENCES,\n+ linux.IPV6_AUTOFLOWLABEL,\n+ linux.IPV6_DONTFRAG,\n+ linux.IPV6_DSTOPTS,\n+ linux.IPV6_FLOWINFO,\n+ linux.IPV6_FLOWINFO_SEND,\n+ linux.IPV6_FLOWLABEL_MGR,\n+ linux.IPV6_FREEBIND,\n+ linux.IPV6_HOPOPTS,\n+ linux.IPV6_MINHOPCOUNT,\n+ linux.IPV6_MTU,\n+ linux.IPV6_MTU_DISCOVER,\n+ linux.IPV6_MULTICAST_ALL,\n+ linux.IPV6_MULTICAST_HOPS,\n+ linux.IPV6_MULTICAST_IF,\n+ linux.IPV6_MULTICAST_LOOP,\n+ linux.IPV6_RECVDSTOPTS,\n+ linux.IPV6_RECVERR,\n+ linux.IPV6_RECVFRAGSIZE,\n+ linux.IPV6_RECVHOPLIMIT,\n+ linux.IPV6_RECVHOPOPTS,\n+ linux.IPV6_RECVORIGDSTADDR,\n+ linux.IPV6_RECVPATHMTU,\n+ linux.IPV6_RECVPKTINFO,\n+ linux.IPV6_RECVRTHDR,\n+ linux.IPV6_RECVTCLASS,\n+ linux.IPV6_RTHDR,\n+ linux.IPV6_RTHDRDSTOPTS,\n+ linux.IPV6_TCLASS,\n+ linux.IPV6_TRANSPARENT,\n+ linux.IPV6_UNICAST_HOPS,\n+ linux.IPV6_UNICAST_IF,\n+ linux.MCAST_MSFILTER,\n+ linux.IPV6_ADDRFORM:\n+\n+ t.Kernel().EmitUnimplementedEvent(t)\n+ }\n+}\n+\n// isLinkLocal determines if the given IPv6 address is link-local. This is the\n// case when it has the fe80::/10 prefix. This check is used to determine when\n// the NICID is relevant for a given IPv6 address.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netlink/socket.go", "new_path": "pkg/sentry/socket/netlink/socket.go", "diff": "@@ -299,6 +299,21 @@ func (s *Socket) GetSockOpt(t *kernel.Task, level int, name int, outLen int) (in\n}\n// We don't have limit on receiving size.\nreturn math.MaxInt32, nil\n+\n+ default:\n+ socket.GetSockOptEmitUnimplementedEvent(t, name)\n+ }\n+ case linux.SOL_NETLINK:\n+ switch name {\n+ case linux.NETLINK_BROADCAST_ERROR,\n+ linux.NETLINK_CAP_ACK,\n+ linux.NETLINK_DUMP_STRICT_CHK,\n+ linux.NETLINK_EXT_ACK,\n+ linux.NETLINK_LIST_MEMBERSHIPS,\n+ linux.NETLINK_NO_ENOBUFS,\n+ linux.NETLINK_PKTINFO:\n+\n+ t.Kernel().EmitUnimplementedEvent(t)\n}\n}\n// TODO: other sockopts are not supported.\n@@ -329,7 +344,25 @@ func (s *Socket) SetSockOpt(t *kernel.Task, level int, name int, opt []byte) *sy\n// We don't have limit on receiving size. So just accept anything as\n// valid for compatibility.\nreturn nil\n+ default:\n+ socket.SetSockOptEmitUnimplementedEvent(t, name)\n}\n+\n+ case linux.SOL_NETLINK:\n+ switch name {\n+ case linux.NETLINK_ADD_MEMBERSHIP,\n+ linux.NETLINK_BROADCAST_ERROR,\n+ linux.NETLINK_CAP_ACK,\n+ linux.NETLINK_DROP_MEMBERSHIP,\n+ linux.NETLINK_DUMP_STRICT_CHK,\n+ linux.NETLINK_EXT_ACK,\n+ linux.NETLINK_LISTEN_ALL_NSID,\n+ linux.NETLINK_NO_ENOBUFS,\n+ linux.NETLINK_PKTINFO:\n+\n+ t.Kernel().EmitUnimplementedEvent(t)\n+ }\n+\n}\n// TODO: other sockopts are not supported.\nreturn syserr.ErrProtocolNotAvailable\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/socket.go", "new_path": "pkg/sentry/socket/socket.go", "diff": "@@ -213,3 +213,94 @@ func (rt *ReceiveTimeout) SetRecvTimeout(nanoseconds int64) {\nfunc (rt *ReceiveTimeout) RecvTimeout() int64 {\nreturn atomic.LoadInt64(&rt.ns)\n}\n+\n+// GetSockOptEmitUnimplementedEvent emits unimplemented event if name is valid.\n+// It contains names that are valid for GetSockOpt when level is SOL_SOCKET.\n+func GetSockOptEmitUnimplementedEvent(t *kernel.Task, name int) {\n+ switch name {\n+ case linux.SO_ACCEPTCONN,\n+ linux.SO_BPF_EXTENSIONS,\n+ linux.SO_COOKIE,\n+ linux.SO_DOMAIN,\n+ linux.SO_ERROR,\n+ linux.SO_GET_FILTER,\n+ linux.SO_INCOMING_NAPI_ID,\n+ linux.SO_MEMINFO,\n+ linux.SO_PEERCRED,\n+ linux.SO_PEERGROUPS,\n+ linux.SO_PEERNAME,\n+ linux.SO_PEERSEC,\n+ linux.SO_PROTOCOL,\n+ linux.SO_SNDLOWAT,\n+ linux.SO_TYPE:\n+\n+ t.Kernel().EmitUnimplementedEvent(t)\n+\n+ default:\n+ emitUnimplementedEvent(t, name)\n+ }\n+}\n+\n+// SetSockOptEmitUnimplementedEvent emits unimplemented event if name is valid.\n+// It contains names that are valid for SetSockOpt when level is SOL_SOCKET.\n+func SetSockOptEmitUnimplementedEvent(t *kernel.Task, name int) {\n+ switch name {\n+ case linux.SO_ATTACH_BPF,\n+ linux.SO_ATTACH_FILTER,\n+ linux.SO_ATTACH_REUSEPORT_CBPF,\n+ linux.SO_ATTACH_REUSEPORT_EBPF,\n+ linux.SO_CNX_ADVICE,\n+ linux.SO_DETACH_FILTER,\n+ linux.SO_RCVBUFFORCE,\n+ linux.SO_SNDBUFFORCE:\n+\n+ t.Kernel().EmitUnimplementedEvent(t)\n+\n+ default:\n+ emitUnimplementedEvent(t, name)\n+ }\n+}\n+\n+// emitUnimplementedEvent emits unimplemented event if name is valid. It\n+// contains names that are common between Get and SetSocketOpt when level is\n+// SOL_SOCKET.\n+func emitUnimplementedEvent(t *kernel.Task, name int) {\n+ switch name {\n+ case linux.SO_BINDTODEVICE,\n+ linux.SO_BROADCAST,\n+ linux.SO_BSDCOMPAT,\n+ linux.SO_BUSY_POLL,\n+ linux.SO_DEBUG,\n+ linux.SO_DONTROUTE,\n+ linux.SO_INCOMING_CPU,\n+ linux.SO_KEEPALIVE,\n+ linux.SO_LINGER,\n+ linux.SO_LOCK_FILTER,\n+ linux.SO_MARK,\n+ linux.SO_MAX_PACING_RATE,\n+ linux.SO_NOFCS,\n+ linux.SO_NO_CHECK,\n+ linux.SO_OOBINLINE,\n+ linux.SO_PASSCRED,\n+ linux.SO_PASSSEC,\n+ linux.SO_PEEK_OFF,\n+ linux.SO_PRIORITY,\n+ linux.SO_RCVBUF,\n+ linux.SO_RCVLOWAT,\n+ linux.SO_RCVTIMEO,\n+ linux.SO_REUSEADDR,\n+ linux.SO_REUSEPORT,\n+ linux.SO_RXQ_OVFL,\n+ linux.SO_SELECT_ERR_QUEUE,\n+ linux.SO_SNDBUF,\n+ linux.SO_SNDTIMEO,\n+ linux.SO_TIMESTAMP,\n+ linux.SO_TIMESTAMPING,\n+ linux.SO_TIMESTAMPNS,\n+ linux.SO_TXTIME,\n+ linux.SO_WIFI_STATUS,\n+ linux.SO_ZEROCOPY:\n+\n+ t.Kernel().EmitUnimplementedEvent(t)\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/compat.go", "new_path": "runsc/boot/compat.go", "diff": "@@ -89,10 +89,16 @@ func (c *compatEmitter) Emit(msg proto.Message) (hangup bool, err error) {\nif tr == nil {\nswitch sysnr {\ncase syscall.SYS_PRCTL, syscall.SYS_ARCH_PRCTL:\n- tr = newCmdTracker(0)\n+ // args: cmd, ...\n+ tr = newArgsTracker(0)\ncase syscall.SYS_IOCTL, syscall.SYS_EPOLL_CTL, syscall.SYS_SHMCTL:\n- tr = newCmdTracker(1)\n+ // args: fd, cmd, ...\n+ tr = newArgsTracker(1)\n+\n+ case syscall.SYS_GETSOCKOPT, syscall.SYS_SETSOCKOPT:\n+ // args: fd, level, name, ...\n+ tr = newArgsTracker(1, 2)\ndefault:\ntr = &onceTracker{}\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/compat_amd64.go", "new_path": "runsc/boot/compat_amd64.go", "diff": "@@ -20,35 +20,58 @@ import (\nrpb \"gvisor.googlesource.com/gvisor/pkg/sentry/arch/registers_go_proto\"\n)\n-// cmdTracker reports only a single time for each different command argument in\n-// the syscall. It's used for generic syscalls like ioctl to report once per\n-// 'cmd'\n-type cmdTracker struct {\n- // argIdx is the syscall argument index where the command is located.\n- argIdx int\n- cmds map[uint32]struct{}\n+// reportLimit is the max number of events that should be reported per tracker.\n+const reportLimit = 100\n+\n+// argsTracker reports only once for each different combination of arguments.\n+// It's used for generic syscalls like ioctl to report once per 'cmd'.\n+type argsTracker struct {\n+ // argsIdx is the syscall arguments to use as unique ID.\n+ argsIdx []int\n+ reported map[string]struct{}\n+ count int\n}\n-func newCmdTracker(argIdx int) *cmdTracker {\n- return &cmdTracker{argIdx: argIdx, cmds: make(map[uint32]struct{})}\n+func newArgsTracker(argIdx ...int) *argsTracker {\n+ return &argsTracker{argsIdx: argIdx, reported: make(map[string]struct{})}\n}\n// cmd returns the command based on the syscall argument index.\n-func (c *cmdTracker) cmd(regs *rpb.AMD64Registers) uint32 {\n- switch c.argIdx {\n+func (a *argsTracker) key(regs *rpb.AMD64Registers) string {\n+ var rv string\n+ for _, idx := range a.argsIdx {\n+ rv += fmt.Sprintf(\"%d|\", argVal(idx, regs))\n+ }\n+ return rv\n+}\n+\n+func argVal(argIdx int, regs *rpb.AMD64Registers) uint32 {\n+ switch argIdx {\ncase 0:\nreturn uint32(regs.Rdi)\ncase 1:\nreturn uint32(regs.Rsi)\n+ case 2:\n+ return uint32(regs.Rdx)\n+ case 3:\n+ return uint32(regs.R10)\n+ case 4:\n+ return uint32(regs.R8)\n+ case 5:\n+ return uint32(regs.R9)\n}\n- panic(fmt.Sprintf(\"unsupported syscall argument index %d\", c.argIdx))\n+ panic(fmt.Sprintf(\"invalid syscall argument index %d\", argIdx))\n}\n-func (c *cmdTracker) shouldReport(regs *rpb.AMD64Registers) bool {\n- _, ok := c.cmds[c.cmd(regs)]\n+func (a *argsTracker) shouldReport(regs *rpb.AMD64Registers) bool {\n+ if a.count >= reportLimit {\n+ return false\n+ }\n+ _, ok := a.reported[a.key(regs)]\nreturn !ok\n}\n-func (c *cmdTracker) onReported(regs *rpb.AMD64Registers) {\n- c.cmds[c.cmd(regs)] = struct{}{}\n+func (a *argsTracker) onReported(regs *rpb.AMD64Registers) {\n+ a.count++\n+ a.reported[a.key(regs)] = struct{}{}\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/compat_test.go", "new_path": "runsc/boot/compat_test.go", "diff": "@@ -33,34 +33,53 @@ func TestOnceTracker(t *testing.T) {\n}\n}\n-func TestCmdTracker(t *testing.T) {\n+func TestArgsTracker(t *testing.T) {\nfor _, tc := range []struct {\nname string\n- idx int\n+ idx []int\nrdi1 uint64\nrdi2 uint64\nrsi1 uint64\nrsi2 uint64\nwant bool\n}{\n- {name: \"same rdi\", idx: 0, rdi1: 123, rdi2: 123, want: false},\n- {name: \"same rsi\", idx: 1, rsi1: 123, rsi2: 123, want: false},\n- {name: \"diff rdi\", idx: 0, rdi1: 123, rdi2: 321, want: true},\n- {name: \"diff rsi\", idx: 1, rsi1: 123, rsi2: 321, want: true},\n- {name: \"cmd is uint32\", idx: 0, rsi1: 0xdead00000123, rsi2: 0xbeef00000123, want: false},\n+ {name: \"same rdi\", idx: []int{0}, rdi1: 123, rdi2: 123, want: false},\n+ {name: \"same rsi\", idx: []int{1}, rsi1: 123, rsi2: 123, want: false},\n+ {name: \"diff rdi\", idx: []int{0}, rdi1: 123, rdi2: 321, want: true},\n+ {name: \"diff rsi\", idx: []int{1}, rsi1: 123, rsi2: 321, want: true},\n+ {name: \"cmd is uint32\", idx: []int{0}, rsi1: 0xdead00000123, rsi2: 0xbeef00000123, want: false},\n+ {name: \"same 2 args\", idx: []int{0, 1}, rsi1: 123, rdi1: 321, rsi2: 123, rdi2: 321, want: false},\n+ {name: \"diff 2 args\", idx: []int{0, 1}, rsi1: 123, rdi1: 321, rsi2: 789, rdi2: 987, want: true},\n} {\nt.Run(tc.name, func(t *testing.T) {\n- c := newCmdTracker(tc.idx)\n+ c := newArgsTracker(tc.idx...)\nregs := &rpb.AMD64Registers{Rdi: tc.rdi1, Rsi: tc.rsi1}\nif !c.shouldReport(regs) {\n- t.Error(\"first call to checkAndMark, got: false, want: true\")\n+ t.Error(\"first call to shouldReport, got: false, want: true\")\n}\nc.onReported(regs)\nregs.Rdi, regs.Rsi = tc.rdi2, tc.rsi2\nif got := c.shouldReport(regs); tc.want != got {\n- t.Errorf(\"after first call to checkAndMark, got: %t, want: %t\", got, tc.want)\n+ t.Errorf(\"second call to shouldReport, got: %t, want: %t\", got, tc.want)\n}\n})\n}\n}\n+\n+func TestArgsTrackerLimit(t *testing.T) {\n+ c := newArgsTracker(0, 1)\n+ for i := 0; i < reportLimit; i++ {\n+ regs := &rpb.AMD64Registers{Rdi: 123, Rsi: uint64(i)}\n+ if !c.shouldReport(regs) {\n+ t.Error(\"shouldReport before limit was reached, got: false, want: true\")\n+ }\n+ c.onReported(regs)\n+ }\n+\n+ // Should hit the count limit now.\n+ regs := &rpb.AMD64Registers{Rdi: 123, Rsi: 123456}\n+ if c.shouldReport(regs) {\n+ t.Error(\"shouldReport after limit was reached, got: true, want: false\")\n+ }\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Add unsupported syscall events for get/setsockopt PiperOrigin-RevId: 222148953 Change-Id: I21500a9f08939c45314a6414e0824490a973e5aa
259,881
19.11.2018 18:02:50
28,800
03c1eb78b583ca3247f299889146675311727325
Reference upstream licenses Include copyright notices and the referenced LICENSE file.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/safecopy/BUILD", "new_path": "pkg/sentry/platform/safecopy/BUILD", "diff": "load(\"//tools/go_stateify:defs.bzl\", \"go_library\", \"go_test\")\n-package(licenses = [\"notice\"]) # Apache 2.0\n+package(licenses = [\"notice\"]) # Apache 2.0, portions BSD, MIT\ngo_library(\nname = \"safecopy\",\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/sentry/platform/safecopy/LICENSE", "diff": "+Copyright (c) 2009 The Go Authors. All rights reserved.\n+\n+Redistribution and use in source and binary forms, with or without\n+modification, are permitted provided that the following conditions are\n+met:\n+\n+ * Redistributions of source code must retain the above copyright\n+notice, this list of conditions and the following disclaimer.\n+ * Redistributions in binary form must reproduce the above\n+copyright notice, this list of conditions and the following disclaimer\n+in the documentation and/or other materials provided with the\n+distribution.\n+ * Neither the name of Google Inc. nor the names of its\n+contributors may be used to endorse or promote products derived from\n+this software without specific prior written permission.\n+\n+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/safecopy/memclr_amd64.s", "new_path": "pkg/sentry/platform/safecopy/memclr_amd64.s", "diff": "-// Copyright 2018 Google LLC\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n+// Copyright 2014 The Go Authors. All rights reserved.\n+// Use of this source code is governed by a BSD-style\n+// license that can be found in the LICENSE file.\n#include \"textflag.h\"\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/sync/LICENSE", "diff": "+Copyright (c) 2009 The Go Authors. All rights reserved.\n+\n+Redistribution and use in source and binary forms, with or without\n+modification, are permitted provided that the following conditions are\n+met:\n+\n+ * Redistributions of source code must retain the above copyright\n+notice, this list of conditions and the following disclaimer.\n+ * Redistributions in binary form must reproduce the above\n+copyright notice, this list of conditions and the following disclaimer\n+in the documentation and/or other materials provided with the\n+distribution.\n+ * Neither the name of Google Inc. nor the names of its\n+contributors may be used to endorse or promote products derived from\n+this software without specific prior written permission.\n+\n+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n" } ]
Go
Apache License 2.0
google/gvisor
Reference upstream licenses Include copyright notices and the referenced LICENSE file. PiperOrigin-RevId: 222171321 Change-Id: I0cc0b167ca51b536d1087bf1c4742fdf1430bc2a
259,992
20.11.2018 15:07:12
28,800
8b314b0bf402da58f90ccaac852a880d375f0885
Fix recursive read lock taken on TaskSet SyncSyscallFiltersToThreadGroup and Task.TheadID() both acquired TaskSet RWLock in R mode and could deadlock if a writer comes in between.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/seccomp.go", "new_path": "pkg/sentry/kernel/seccomp.go", "diff": "@@ -179,20 +179,19 @@ func (t *Task) evaluateSyscallFilters(sysno int32, args arch.SyscallArguments, i\n// AppendSyscallFilter adds BPF program p as a system call filter.\n//\n// Preconditions: The caller must be running on the task goroutine.\n-func (t *Task) AppendSyscallFilter(p bpf.Program) error {\n+func (t *Task) AppendSyscallFilter(p bpf.Program, syncAll bool) error {\n+ // While syscallFilters are an atomic.Value we must take the mutex to prevent\n+ // our read-copy-update from happening while another task is syncing syscall\n+ // filters to us, this keeps the filters in a consistent state.\n+ t.tg.signalHandlers.mu.Lock()\n+ defer t.tg.signalHandlers.mu.Unlock()\n+\n// Cap the combined length of all syscall filters (plus a penalty of 4\n- // instructions per filter beyond the first) to\n- // maxSyscallFilterInstructions. (This restriction is inherited from\n- // Linux.)\n+ // instructions per filter beyond the first) to maxSyscallFilterInstructions.\n+ // This restriction is inherited from Linux.\ntotalLength := p.Length()\nvar newFilters []bpf.Program\n- // While syscallFilters are an atomic.Value we must take the mutex to\n- // prevent our read-copy-update from happening while another task\n- // is syncing syscall filters to us, this keeps the filters in a\n- // consistent state.\n- t.mu.Lock()\n- defer t.mu.Unlock()\nif sf := t.syscallFilters.Load(); sf != nil {\noldFilters := sf.([]bpf.Program)\nfor _, f := range oldFilters {\n@@ -207,31 +206,18 @@ func (t *Task) AppendSyscallFilter(p bpf.Program) error {\nnewFilters = append(newFilters, p)\nt.syscallFilters.Store(newFilters)\n- return nil\n-}\n-\n-// SyncSyscallFiltersToThreadGroup will copy this task's filters to all other\n-// threads in our thread group.\n-func (t *Task) SyncSyscallFiltersToThreadGroup() error {\n- f := t.syscallFilters.Load()\n-\n- t.tg.pidns.owner.mu.RLock()\n- defer t.tg.pidns.owner.mu.RUnlock()\n+ if syncAll {\n// Note: No new privs is always assumed to be set.\nfor ot := t.tg.tasks.Front(); ot != nil; ot = ot.Next() {\n- if ot.ThreadID() != t.ThreadID() {\n- // We must take the other task's mutex to prevent it from\n- // appending to its own syscall filters while we're syncing.\n- ot.mu.Lock()\n+ if ot != t {\nvar copiedFilters []bpf.Program\n- if f != nil {\n- copiedFilters = append(copiedFilters, f.([]bpf.Program)...)\n- }\n+ copiedFilters = append(copiedFilters, newFilters...)\not.syscallFilters.Store(copiedFilters)\n- ot.mu.Unlock()\n}\n}\n+ }\n+\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task.go", "new_path": "pkg/sentry/kernel/task.go", "diff": "@@ -392,7 +392,7 @@ type Task struct {\n// syscallFilters is all seccomp-bpf syscall filters applicable to the\n// task, in the order in which they were installed. The type of the atomic\n- // is []bpf.Program. Writing needs to be protected by mu.\n+ // is []bpf.Program. Writing needs to be protected by the signal mutex.\n//\n// syscallFilters is owned by the task goroutine.\nsyscallFilters atomic.Value `state:\".([]bpf.Program)\"`\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_seccomp.go", "new_path": "pkg/sentry/syscalls/linux/sys_seccomp.go", "diff": "@@ -68,12 +68,7 @@ func seccomp(t *kernel.Task, mode, flags uint64, addr usermem.Addr) error {\nreturn syscall.EINVAL\n}\n- err = t.AppendSyscallFilter(compiledFilter)\n- if err == nil && tsync {\n- // Now we must copy this seccomp program to all other threads.\n- err = t.SyncSyscallFiltersToThreadGroup()\n- }\n- return err\n+ return t.AppendSyscallFilter(compiledFilter, tsync)\n}\n// Seccomp implements linux syscall seccomp(2).\n" } ]
Go
Apache License 2.0
google/gvisor
Fix recursive read lock taken on TaskSet SyncSyscallFiltersToThreadGroup and Task.TheadID() both acquired TaskSet RWLock in R mode and could deadlock if a writer comes in between. PiperOrigin-RevId: 222313551 Change-Id: I4221057d8d46fec544cbfa55765c9a284fe7ebfa
259,992
20.11.2018 17:23:14
28,800
5236b78242677612ac71b19cee85b3bf4cca4008
Dumps stacks if watchdog thread is stuck
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/watchdog/watchdog.go", "new_path": "pkg/sentry/watchdog/watchdog.go", "diff": "@@ -190,7 +190,24 @@ func (w *Watchdog) loop() {\n// runTurn runs a single pass over all tasks and reports anything it finds.\nfunc (w *Watchdog) runTurn() {\n- tasks := w.k.TaskSet().Root.Tasks()\n+ // Someone needs to watch the watchdog. The call below can get stuck if there\n+ // is a deadlock affecting root's PID namespace mutex. Run it in a goroutine\n+ // and report if it takes too long to return.\n+ var tasks []*kernel.Task\n+ done := make(chan struct{})\n+ go func() { // S/R-SAFE: watchdog is stopped and restarted during S/R.\n+ tasks = w.k.TaskSet().Root.Tasks()\n+ close(done)\n+ }()\n+\n+ select {\n+ case <-done:\n+ case <-time.After(w.taskTimeout):\n+ // Report if the watchdog is not making progress.\n+ // No one is wathching the watchdog watcher though.\n+ w.reportStuckWatchdog()\n+ <-done\n+ }\nnewOffenders := make(map[*kernel.Task]*offender)\nnewTaskFound := false\n@@ -245,7 +262,16 @@ func (w *Watchdog) report(offenders map[*kernel.Task]*offender, newTaskFound boo\nbuf.WriteString(fmt.Sprintf(\"\\tTask tid: %v (%#x), entered RunSys state %v ago.\\n\", tid, uint64(tid), now.Sub(o.lastUpdateTime)))\n}\nbuf.WriteString(\"Search for '(*Task).run(0x..., 0x<tid>)' in the stack dump to find the offending goroutine\")\n+ w.onStuckTask(newTaskFound, &buf)\n+}\n+\n+func (w *Watchdog) reportStuckWatchdog() {\n+ var buf bytes.Buffer\n+ buf.WriteString(\"Watchdog goroutine is stuck:\\n\")\n+ w.onStuckTask(true, &buf)\n+}\n+func (w *Watchdog) onStuckTask(newTaskFound bool, buf *bytes.Buffer) {\nswitch w.timeoutAction {\ncase LogWarning:\n// Dump stack only if a new task is detected or if it sometime has passed since\n" } ]
Go
Apache License 2.0
google/gvisor
Dumps stacks if watchdog thread is stuck PiperOrigin-RevId: 222332703 Change-Id: Id5c3cf79591c5d2949895b4e323e63c48c679820
259,854
21.11.2018 18:09:22
28,800
1918563525662d6645ec921e61aa7e6da92af0dd
Make ToView non-allocating for single VectorizedViews containing a single View
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/buffer/view.go", "new_path": "pkg/tcpip/buffer/view.go", "diff": "@@ -132,7 +132,13 @@ func (vv VectorisedView) Size() int {\n}\n// ToView returns a single view containing the content of the vectorised view.\n+//\n+// If the vectorised view contains a single view, that view will be returned\n+// directly.\nfunc (vv VectorisedView) ToView() View {\n+ if len(vv.views) == 1 {\n+ return vv.views[0]\n+ }\nu := make([]byte, 0, vv.size)\nfor _, v := range vv.views {\nu = append(u, v...)\n" } ]
Go
Apache License 2.0
google/gvisor
Make ToView non-allocating for single VectorizedViews containing a single View PiperOrigin-RevId: 222483471 Change-Id: I6720690b20167dd541fdfa5218eba7c9f7483347
259,975
26.11.2018 09:49:53
28,800
b3b60ea29adf9415c9c7b98ba331dacd92f231b7
Implementation of preadv2 for Linux 4.4 support Implement RWF_HIPRI (4.6) silently passes the read call. Implement -1 offset calls readv.
[ { "change_type": "MODIFY", "old_path": "pkg/abi/linux/file.go", "new_path": "pkg/abi/linux/file.go", "diff": "@@ -150,6 +150,13 @@ const (\nPermissionsMask = 0777\n)\n+// Values for preadv2/pwritev2.\n+const (\n+ RWF_HIPRI = 0x0001\n+ RWF_DSYNC = 0X0002\n+ RWF_SYNC = 0x0004\n+)\n+\n// Stat represents struct stat.\ntype Stat struct {\nDev uint64\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/linux64.go", "new_path": "pkg/sentry/syscalls/linux/linux64.go", "diff": "@@ -373,7 +373,10 @@ var AMD64 = &kernel.SyscallTable{\n// 322: Execveat, TODO\n// 323: Userfaultfd, TODO\n// 324: Membarrier, TODO\n+ // Syscalls after 325 are backports from 4.6.\n325: syscalls.Error(nil), // Mlock2, TODO\n+ 327: Preadv2,\n+ // 328: Pwritev2, // Pwritev2, TODO\n},\nEmulate: map[usermem.Addr]uintptr{\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_read.go", "new_path": "pkg/sentry/syscalls/linux/sys_read.go", "diff": "@@ -187,6 +187,68 @@ func Preadv(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\nreturn uintptr(n), nil, handleIOError(t, n != 0, err, kernel.ERESTARTSYS, \"preadv\", file)\n}\n+// Preadv2 implements linux syscall preadv2(2).\n+func Preadv2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n+ fd := kdefs.FD(args[0].Int())\n+ addr := args[1].Pointer()\n+ iovcnt := int(args[2].Int())\n+ offset := args[3].Int64()\n+ flags := int(args[4].Int())\n+\n+ validFlags := linux.RWF_HIPRI\n+\n+ file := t.FDMap().GetFile(fd)\n+ if file == nil {\n+ return 0, nil, syserror.EBADF\n+ }\n+ defer file.DecRef()\n+\n+ // Check that the offset is legitimate.\n+ if offset < -1 {\n+ return 0, nil, syserror.EINVAL\n+ }\n+\n+ // Is reading at an offset supported?\n+ if offset > -1 && !file.Flags().Pread {\n+ return 0, nil, syserror.ESPIPE\n+ }\n+\n+ // Check that the file is readable.\n+ if !file.Flags().Read {\n+ return 0, nil, syserror.EBADF\n+ }\n+\n+ // Check flags field.\n+ if flags != 0 {\n+ if flags&^validFlags != 0 {\n+ return 0, nil, syserror.EINVAL\n+ }\n+ // RWF_HIPRI must be called on a file with O_DIRECT flag set.\n+ if flags&linux.RWF_HIPRI != 0 && !file.Flags().Direct {\n+ return 0, nil, syserror.EINVAL\n+ }\n+ }\n+\n+ // Read the iovecs that specify the destination of the read.\n+ dst, err := t.IovecsIOSequence(addr, iovcnt, usermem.IOOpts{\n+ AddressSpaceActive: true,\n+ })\n+ if err != nil {\n+ return 0, nil, err\n+ }\n+\n+ // If preadv2 is called with an offset of -1, readv is called.\n+ if offset == -1 {\n+ n, err := readv(t, file, dst)\n+ t.IOUsage().AccountReadSyscall(n)\n+ return uintptr(n), nil, handleIOError(t, n != 0, err, kernel.ERESTARTSYS, \"preadv2\", file)\n+ }\n+\n+ n, err := preadv(t, file, dst, offset)\n+ t.IOUsage().AccountReadSyscall(n)\n+ return uintptr(n), nil, handleIOError(t, n != 0, err, kernel.ERESTARTSYS, \"preadv2\", file)\n+}\n+\nfunc readv(t *kernel.Task, f *fs.File, dst usermem.IOSequence) (int64, error) {\nn, err := f.Readv(t, dst)\nif err != syserror.ErrWouldBlock || f.Flags().NonBlocking {\n" } ]
Go
Apache License 2.0
google/gvisor
Implementation of preadv2 for Linux 4.4 support Implement RWF_HIPRI (4.6) silently passes the read call. Implement -1 offset calls readv. PiperOrigin-RevId: 222840324 Change-Id: If9ddc1e8d086e1a632bdf5e00bae08205f95b6b0
259,881
27.11.2018 09:24:17
28,800
071aeea9d3ff783b2946ef291b1c440aa9b21b88
Disable crictl tests gvisor-containerd-shim installation is currently broken.
[ { "change_type": "MODIFY", "old_path": "kokoro/run_tests.sh", "new_path": "kokoro/run_tests.sh", "diff": "@@ -100,10 +100,11 @@ EOF\n)\n# Install containerd and crictl.\n-if [[ ${exit_code} -eq 0 ]]; then\n- installCrictl\n- exit_code=${?}\n-fi\n+# FIXME: gvisor-containerd-shim installation broken.\n+#if [[ ${exit_code} -eq 0 ]]; then\n+# installCrictl\n+# exit_code=${?}\n+#fi\n# Execute local tests that require docker.\nif [[ ${exit_code} -eq 0 ]]; then\n" }, { "change_type": "MODIFY", "old_path": "runsc/test/root/crictl_test.go", "new_path": "runsc/test/root/crictl_test.go", "diff": "@@ -37,6 +37,9 @@ import (\n// Tests for crictl have to be run as root (rather than in a user namespace)\n// because crictl creates named network namespaces in /var/run/netns/.\nfunc TestCrictlSanity(t *testing.T) {\n+ // FIXME\n+ t.Skip(\"crictl installation broken\")\n+\n// Setup containerd and crictl.\ncrictl, cleanup, err := setup(t)\nif err != nil {\n@@ -59,6 +62,9 @@ func TestCrictlSanity(t *testing.T) {\n}\n}\nfunc TestMountPaths(t *testing.T) {\n+ // FIXME\n+ t.Skip(\"crictl installation broken\")\n+\n// Setup containerd and crictl.\ncrictl, cleanup, err := setup(t)\nif err != nil {\n@@ -81,6 +87,9 @@ func TestMountPaths(t *testing.T) {\n}\n}\nfunc TestMountOverSymlinks(t *testing.T) {\n+ // FIXME\n+ t.Skip(\"crictl installation broken\")\n+\n// Setup containerd and crictl.\ncrictl, cleanup, err := setup(t)\nif err != nil {\n" } ]
Go
Apache License 2.0
google/gvisor
Disable crictl tests gvisor-containerd-shim installation is currently broken. PiperOrigin-RevId: 223002877 Change-Id: I2b890c5bf602a96c475c3805f24852ead8593a35
259,974
27.11.2018 12:45:04
28,800
9e0f13237793897c805f75af163006049b37e784
Add procid support for arm64 platform
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/procid/BUILD", "new_path": "pkg/sentry/platform/procid/BUILD", "diff": "@@ -7,6 +7,7 @@ go_library(\nsrcs = [\n\"procid.go\",\n\"procid_amd64.s\",\n+ \"procid_arm64.s\",\n],\nimportpath = \"gvisor.googlesource.com/gvisor/pkg/sentry/platform/procid\",\nvisibility = [\"//pkg/sentry:internal\"],\n" } ]
Go
Apache License 2.0
google/gvisor
Add procid support for arm64 platform Change-Id: I7c3db8dfdf95a125d7384c1d67c3300dbb99a47e PiperOrigin-RevId: 223039923
260,013
27.11.2018 17:47:16
28,800
5bd02b224fd0eb81fc028644137a24d0bbf7dab5
Save shutdown flags first. With rpcinet if shutdown flags are not saved before making the rpc a race is possible where blocked threads are woken up before the flags have been persisted. This would mean that threads can block indefinitely in a recvmsg after a shutdown(SHUT_RD) has happened.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/rpcinet/socket.go", "new_path": "pkg/sentry/socket/rpcinet/socket.go", "diff": "@@ -354,6 +354,13 @@ func (s *socketOperations) Listen(t *kernel.Task, backlog int) *syserr.Error {\n// Shutdown implements socket.Socket.Shutdown.\nfunc (s *socketOperations) Shutdown(t *kernel.Task, how int) *syserr.Error {\n+ // We save the shutdown state because of strange differences on linux\n+ // related to recvs on blocking vs. non-blocking sockets after a SHUT_RD.\n+ // We need to emulate that behavior on the blocking side.\n+ // TODO: There is a possible race that can exist with loopback,\n+ // where data could possibly be lost.\n+ s.setShutdownFlags(how)\n+\nstack := t.NetworkContext().(*Stack)\nid, c := stack.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_Shutdown{&pb.ShutdownRequest{Fd: s.fd, How: int64(how)}}}, false /* ignoreResult */)\n<-c\n@@ -362,10 +369,6 @@ func (s *socketOperations) Shutdown(t *kernel.Task, how int) *syserr.Error {\nreturn syserr.FromHost(syscall.Errno(e))\n}\n- // We save the shutdown state because of strange differences on linux\n- // related to recvs on blocking vs. non-blocking sockets after a SHUT_RD.\n- // We need to emulate that behavior on the blocking side.\n- s.setShutdownFlags(how)\nreturn nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Save shutdown flags first. With rpcinet if shutdown flags are not saved before making the rpc a race is possible where blocked threads are woken up before the flags have been persisted. This would mean that threads can block indefinitely in a recvmsg after a shutdown(SHUT_RD) has happened. PiperOrigin-RevId: 223089783 Change-Id: If595e7add12aece54bcdf668ab64c570910d061a
259,891
28.11.2018 10:09:22
28,800
7b86d36a63e9b281834fdb6c2db0840df992c57c
Fix crictl tests. gvisor-containerd-shim moved. It now has a stable URL that run_tests.sh always uses.
[ { "change_type": "MODIFY", "old_path": "kokoro/run_tests.sh", "new_path": "kokoro/run_tests.sh", "diff": "@@ -75,8 +75,10 @@ installCrictl() (\nsudo -n -E make install\n# Install gvisor-containerd-shim.\n+ local latest=/tmp/gvisor-containerd-shim-latest\nlocal shim_path=/tmp/gvisor-containerd-shim\n- wget https://storage.googleapis.com/cri-containerd-staging/gvisor-containerd-shim/gvisor-containerd-shim -O ${shim_path}\n+ wget https://storage.googleapis.com/cri-containerd-staging/gvisor-containerd-shim/latest -O ${latest}\n+ wget https://storage.googleapis.com/cri-containerd-staging/gvisor-containerd-shim/$(cat ${latest}) -O ${shim_path}\nchmod +x ${shim_path}\nsudo -n -E mv ${shim_path} /usr/local/bin\n@@ -100,11 +102,10 @@ EOF\n)\n# Install containerd and crictl.\n-# FIXME: gvisor-containerd-shim installation broken.\n-#if [[ ${exit_code} -eq 0 ]]; then\n-# installCrictl\n-# exit_code=${?}\n-#fi\n+if [[ ${exit_code} -eq 0 ]]; then\n+ installCrictl\n+ exit_code=${?}\n+fi\n# Execute local tests that require docker.\nif [[ ${exit_code} -eq 0 ]]; then\n" }, { "change_type": "MODIFY", "old_path": "runsc/test/root/crictl_test.go", "new_path": "runsc/test/root/crictl_test.go", "diff": "@@ -37,9 +37,6 @@ import (\n// Tests for crictl have to be run as root (rather than in a user namespace)\n// because crictl creates named network namespaces in /var/run/netns/.\nfunc TestCrictlSanity(t *testing.T) {\n- // FIXME\n- t.Skip(\"crictl installation broken\")\n-\n// Setup containerd and crictl.\ncrictl, cleanup, err := setup(t)\nif err != nil {\n@@ -62,9 +59,6 @@ func TestCrictlSanity(t *testing.T) {\n}\n}\nfunc TestMountPaths(t *testing.T) {\n- // FIXME\n- t.Skip(\"crictl installation broken\")\n-\n// Setup containerd and crictl.\ncrictl, cleanup, err := setup(t)\nif err != nil {\n@@ -87,9 +81,6 @@ func TestMountPaths(t *testing.T) {\n}\n}\nfunc TestMountOverSymlinks(t *testing.T) {\n- // FIXME\n- t.Skip(\"crictl installation broken\")\n-\n// Setup containerd and crictl.\ncrictl, cleanup, err := setup(t)\nif err != nil {\n" } ]
Go
Apache License 2.0
google/gvisor
Fix crictl tests. gvisor-containerd-shim moved. It now has a stable URL that run_tests.sh always uses. PiperOrigin-RevId: 223188822 Change-Id: I5687c78289404da27becd8d5949371e580fdb360
259,884
29.11.2018 04:48:15
28,800
db0473b1bec1481e5a7807752b5eaa72c816c562
Add a new docs directory. refs Add a new 'docs' directory for that can be built on in the future. Docs are divided into a 'user guide', 'contributor guide', and 'architecture guide'. This is currently a work in progress.
[ { "change_type": "ADD", "old_path": null, "new_path": "docs/README.md", "diff": "+# gVisor Documentation\n+\n+**This doc is a work in progress. For the definitive documentation please see\n+the [README](../README.md)**\n+\n+gVisor is a user-space kernel, written in Go, that implements a substantial\n+portion of the [Linux system call interface][linux-interface]. It provides an\n+additional layer of isolation between running applications and the host\n+operating system.\n+\n+gVisor includes an [Open Container Initiative (OCI)][oci] runtime called `runsc`\n+that makes it easy to work with existing container tooling. The `runsc` runtime\n+integrates with Docker and Kubernetes, making it simple to run sandboxed\n+containers.\n+\n+Check out the [gVisor Quick Start](user_guide/quick_start.md) to get started\n+using gVisor.\n+\n+gVisor takes a distinct approach to container sandboxing and makes a different\n+set of technical trade-offs compared to existing sandbox technologies, thus\n+providing new tools and ideas for the container security landscape.\n+\n+Check out [Why gVisor?](architecture_guide/why.md) for more on why we made\n+gVisor.\n+\n+## How this documentation is organized\n+\n+- The [Architecture Guide](architecture_guide/README.md) explains about\n+ gVisor's architecture & design philosophy. Start here if you would like to\n+ know more about how gVisor works and why it was created.\n+- The [User Guide](user_guide/README.md) contains info on how to use gVisor\n+ and integrate it into your application or platform.\n+- The [Contributer Guide](contributer_guide/README.md) includes documentation\n+ on how to build gVisor, run tests, and contribute to gVisor's development.\n+\n+[linux-interface]: https://en.wikipedia.org/wiki/Linux_kernel_interfaces\n+[oci]: https://www.opencontainers.org\n" }, { "change_type": "ADD", "old_path": null, "new_path": "docs/architecture_guide/README.md", "diff": "+# Architecture Guide\n" }, { "change_type": "ADD", "old_path": null, "new_path": "docs/architecture_guide/why.md", "diff": "+# Why gVisor?\n+\n+gVisor makes a different set of technical trade-offs compared to existing\n+sandbox technologies, thus providing new tools and ideas for the container\n+security landscape.\n+\n+As the developers of gVisor, we wanted an execution environment that was secure,\n+simple, and lightweight and were able to make trade offs in other areas. We were\n+not able to achieve that with existing solutions.\n" }, { "change_type": "ADD", "old_path": null, "new_path": "docs/contributor_guide/README.md", "diff": "+# Contributor Guide\n" }, { "change_type": "ADD", "old_path": null, "new_path": "docs/user_guide/README.md", "diff": "+# User Guide\n" }, { "change_type": "ADD", "old_path": null, "new_path": "docs/user_guide/docker.md", "diff": "+# Run gVisor with Docker\n+\n+## Configuring Docker\n+\n+Next, configure Docker to use `runsc` by adding a runtime entry to your Docker\n+configuration (`/etc/docker/daemon.json`). You may have to create this file if\n+it does not exist. Also, some Docker versions also require you to [specify the\n+`storage-driver` field][docker-storage-driver].\n+\n+In the end, the file should look something like:\n+\n+```\n+{\n+ \"runtimes\": {\n+ \"runsc\": {\n+ \"path\": \"/usr/local/bin/runsc\"\n+ }\n+ }\n+}\n+```\n+\n+You must restart the Docker daemon after making changes to this file, typically\n+this is done via:\n+\n+```\n+sudo systemctl restart docker\n+```\n+\n+## Running a container\n+\n+Now run your container in `runsc`:\n+\n+```\n+docker run --runtime=runsc hello-world\n+```\n+\n+You can also run a terminal to explore the container.\n+\n+```\n+docker run --runtime=runsc -it ubuntu /bin/bash\n+```\n" }, { "change_type": "ADD", "old_path": null, "new_path": "docs/user_guide/quick_start.md", "diff": "+# Quick Start\n+\n+This guide will quickly get you started running your first gVisor sandbox\n+container.\n+\n+Some requirements:\n+\n+- gVisor requires Linux x86\\_64 Linux 3.17+\n+- This guide requires Docker. Read the Docker documentation for how to install\n+ it on how to [install Docker](https://docs.docker.com/install/)\n+\n+## Install gVisor\n+\n+The easiest way to get `runsc` is from the\n+[latest nightly build][runsc-nightly]. After you download the binary, check it\n+against the SHA512 [checksum file][runsc-nightly-sha]. Older builds can be found\n+here:\n+`https://storage.googleapis.com/gvisor/releases/nightly/${yyyy-mm-dd}/runsc` and\n+`https://storage.googleapis.com/gvisor/releases/nightly/${yyyy-mm-dd}/runsc.sha512`\n+\n+**It is important to copy this binary to some place that is accessible to all\n+users, and make is executable to all users**, since `runsc` executes itself as\n+user `nobody` to avoid unnecessary privileges. The `/usr/local/bin` directory is\n+a good place to put the `runsc` binary.\n+\n+```\n+wget https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc\n+wget https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc.sha512\n+sha512sum -c runsc.sha512\n+chmod a+x runsc\n+sudo mv runsc /usr/local/bin\n+```\n+\n+## Run an OCI compatible container\n+\n+Now we will create an [OCI][oci] container bundle to run our container. First we\n+will create a root directory for our bundle.\n+\n+```\n+$ mkdir bundle\n+$ cd bundle\n+```\n+\n+Create a root file system for the container. We will use the Docker hello-world\n+image as the basis for our container.\n+\n+```\n+$ mkdir rootfs\n+$ docker export $(docker create hello-world) | tar -xf - -C rootfs\n+```\n+\n+Next, create an specification file called `config.json` that contains our\n+container specification. We will update the default command it runs to `/hello`\n+in the `hello-world` container.\n+\n+```\n+$ runsc spec\n+$ sed -i 's;\"sh\";\"/hello\";' config.json\n+```\n+\n+Finally run the container.\n+\n+```\n+$ sudo runsc run hello\n+```\n+\n+\\[TODO]:# Add some next steps\n+\n+[runsc-nightly-sha]: https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc.sha512\n+[runsc-nightly]: https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc\n+[oci]: https://www.opencontainers.org\n" } ]
Go
Apache License 2.0
google/gvisor
Add a new docs directory. refs #109 Add a new 'docs' directory for that can be built on in the future. Docs are divided into a 'user guide', 'contributor guide', and 'architecture guide'. This is currently a work in progress. PiperOrigin-RevId: 223326836 Change-Id: I78d08d6a89d686e92d3415d4269463e8e74bddee
259,854
29.11.2018 15:45:02
28,800
99fb1138696c520f8db7064320e14ec63c2458b6
Test that full segments will be sent when delay/cork is enabled
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/tcp_test.go", "new_path": "pkg/tcpip/transport/tcp/tcp_test.go", "diff": "@@ -1449,6 +1449,71 @@ func TestUndelay(t *testing.T) {\n})\n}\n+func TestMSSNotDelayed(t *testing.T) {\n+ tests := []struct {\n+ name string\n+ fn func(tcpip.Endpoint)\n+ }{\n+ {\"no-op\", func(tcpip.Endpoint) {}},\n+ {\"delay\", func(ep tcpip.Endpoint) { ep.SetSockOpt(tcpip.DelayOption(1)) }},\n+ {\"cork\", func(ep tcpip.Endpoint) { ep.SetSockOpt(tcpip.CorkOption(1)) }},\n+ }\n+\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\n+ const maxPayload = 100\n+ c := context.New(t, defaultMTU)\n+ defer c.Cleanup()\n+\n+ c.CreateConnectedWithRawOptions(789, 30000, nil, []byte{\n+ header.TCPOptionMSS, 4, byte(maxPayload / 256), byte(maxPayload % 256),\n+ })\n+\n+ test.fn(c.EP)\n+\n+ allData := [][]byte{{0}, make([]byte, maxPayload), make([]byte, maxPayload)}\n+ for i, data := range allData {\n+ view := buffer.NewViewFromBytes(data)\n+ if _, _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil {\n+ t.Fatalf(\"Write #%d failed: %v\", i+1, err)\n+ }\n+ }\n+\n+ seq := c.IRS.Add(1)\n+\n+ for i, data := range allData {\n+ // Check that data is received.\n+ packet := c.GetPacket()\n+ checker.IPv4(t, packet,\n+ checker.PayloadLen(len(data)+header.TCPMinimumSize),\n+ checker.TCP(\n+ checker.DstPort(context.TestPort),\n+ checker.SeqNum(uint32(seq)),\n+ checker.AckNum(790),\n+ checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),\n+ ),\n+ )\n+\n+ if got, want := packet[header.IPv4MinimumSize+header.TCPMinimumSize:], data; !bytes.Equal(got, want) {\n+ t.Fatalf(\"got packet #%d's data = %v, want = %v\", i+1, got, want)\n+ }\n+\n+ seq = seq.Add(seqnum.Size(len(data)))\n+ }\n+\n+ // Acknowledge the data.\n+ c.SendPacket(nil, &context.Headers{\n+ SrcPort: context.TestPort,\n+ DstPort: c.Port,\n+ Flags: header.TCPFlagAck,\n+ SeqNum: 790,\n+ AckNum: seq,\n+ RcvWnd: 30000,\n+ })\n+ })\n+ }\n+}\n+\nfunc testBrokenUpWrite(t *testing.T, c *context.Context, maxPayload int) {\npayloadMultiplier := 10\ndataLen := payloadMultiplier * maxPayload\n" } ]
Go
Apache License 2.0
google/gvisor
Test that full segments will be sent when delay/cork is enabled PiperOrigin-RevId: 223425575 Change-Id: Idd777e04c69e6ffcbfb0bdbea828a8b8b42d7672
259,854
03.12.2018 17:02:28
28,800
5560615c531bc2a0108a4db1e9877f0397a69f8f
Return an int32 for netlink SO_RCVBUF Untyped integer constants default to type int and the binary package will panic if one tries to encode an int.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netlink/socket.go", "new_path": "pkg/sentry/socket/netlink/socket.go", "diff": "@@ -298,7 +298,7 @@ func (s *Socket) GetSockOpt(t *kernel.Task, level int, name int, outLen int) (in\nreturn nil, syserr.ErrInvalidArgument\n}\n// We don't have limit on receiving size.\n- return math.MaxInt32, nil\n+ return int32(math.MaxInt32), nil\ndefault:\nsocket.GetSockOptEmitUnimplementedEvent(t, name)\n" } ]
Go
Apache License 2.0
google/gvisor
Return an int32 for netlink SO_RCVBUF Untyped integer constants default to type int and the binary package will panic if one tries to encode an int. PiperOrigin-RevId: 223890001 Change-Id: Iccc3afd6d74bad24c35d764508e450fd317b76ec
259,948
04.12.2018 13:13:13
28,800
ad8f293e1af99f3c04d1020bb51b46c0dba60e45
sentry: save copy of tcp segment's delivered views to avoid in-struct pointers.
[ { "change_type": "MODIFY", "old_path": "pkg/state/encode.go", "new_path": "pkg/state/encode.go", "diff": "@@ -119,9 +119,10 @@ func (es *encodeState) register(obj reflect.Value) uint64 {\nif size := typ.Size(); size > 0 {\nr := addrRange{addr, addr + size}\nif !es.values.IsEmptyRange(r) {\n- panic(fmt.Errorf(\"overlapping objects: [new object] %#v [existing object] %#v\", obj.Interface(), es.values.FindSegment(addr).Value().Elem().Interface()))\n+ old := es.values.LowerBoundSegment(addr).Value().Interface().(recoverable)\n+ panic(fmt.Errorf(\"overlapping objects: [new object] %#v [existing object path] %s\", obj.Interface(), old.path()))\n}\n- es.values.Add(r, obj)\n+ es.values.Add(r, reflect.ValueOf(es.recoverable.copy()))\n}\n} else {\n// Push back the map itself; when maps are encoded from the\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/segment.go", "new_path": "pkg/tcpip/transport/tcp/segment.go", "diff": "@@ -46,7 +46,7 @@ type segment struct {\ndata buffer.VectorisedView `state:\".(buffer.VectorisedView)\"`\n// views is used as buffer for data when its length is large\n// enough to store a VectorisedView.\n- views [8]buffer.View\n+ views [8]buffer.View `state:\"nosave\"`\n// viewToDeliver keeps track of the next View that should be\n// delivered by the Read endpoint.\nviewToDeliver int\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/segment_state.go", "new_path": "pkg/tcpip/transport/tcp/segment_state.go", "diff": "@@ -22,7 +22,16 @@ import (\nfunc (s *segment) saveData() buffer.VectorisedView {\n// We cannot save s.data directly as s.data.views may alias to s.views,\n// which is not allowed by state framework (in-struct pointer).\n- return s.data.Clone(nil)\n+ v := make([]buffer.View, len(s.data.Views()))\n+ // For views already delivered, we cannot save them directly as they may\n+ // have already been sliced and saved elsewhere (e.g., readViews).\n+ for i := 0; i < s.viewToDeliver; i++ {\n+ v[i] = append([]byte(nil), s.data.Views()[i]...)\n+ }\n+ for i := s.viewToDeliver; i < len(v); i++ {\n+ v[i] = s.data.Views()[i]\n+ }\n+ return buffer.NewVectorisedView(s.data.Size(), v)\n}\n// loadData is invoked by stateify.\n" } ]
Go
Apache License 2.0
google/gvisor
sentry: save copy of tcp segment's delivered views to avoid in-struct pointers. PiperOrigin-RevId: 224033238 Change-Id: Ie5b1854b29340843b02c123766d290a8738d7631
259,854
04.12.2018 13:14:22
28,800
8cbd6153a69ae370a3c5d3795952a1de00f7436b
Fix available calculation when merging TCP segments
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/snd.go", "new_path": "pkg/tcpip/transport/tcp/snd.go", "diff": "@@ -411,7 +411,7 @@ func (s *sender) sendData() {\nif seg.flags == 0 {\n// Merge segments if allowed.\nif seg.data.Size() != 0 {\n- available := int(seg.sequenceNumber.Size(end))\n+ available := int(s.sndNxt.Size(end))\nif available > limit {\navailable = limit\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Fix available calculation when merging TCP segments PiperOrigin-RevId: 224033418 Change-Id: I780be973e8be68ac93e8c9e7a100002e912f40d2
260,013
04.12.2018 14:27:46
28,800
5a6a1eb420620c3d41a9db4ddf7ac7b163310f09
Enforce name length restriction on paths. NAME_LENGTH must be enforced per component.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/dirent.go", "new_path": "pkg/sentry/fs/dirent.go", "diff": "@@ -458,6 +458,12 @@ func (d *Dirent) walk(ctx context.Context, root *Dirent, name string, walkMayUnl\nif !IsDir(d.Inode.StableAttr) {\nreturn nil, syscall.ENOTDIR\n}\n+\n+ // The component must be less than NAME_MAX.\n+ if len(name) > linux.NAME_MAX {\n+ return nil, syscall.ENAMETOOLONG\n+ }\n+\nif name == \"\" || name == \".\" {\nd.IncRef()\nreturn d, nil\n" } ]
Go
Apache License 2.0
google/gvisor
Enforce name length restriction on paths. NAME_LENGTH must be enforced per component. PiperOrigin-RevId: 224046749 Change-Id: Iba8105b00d951f2509dc768af58e4110dafbe1c9
259,948
04.12.2018 14:29:56
28,800
adafc08d7cee594ea94abefbedf67ea315922550
sentry: save / restore netstack procfs configuration.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/proc/BUILD", "new_path": "pkg/sentry/fs/proc/BUILD", "diff": "@@ -20,6 +20,7 @@ go_library(\n\"stat.go\",\n\"sys.go\",\n\"sys_net.go\",\n+ \"sys_net_state.go\",\n\"task.go\",\n\"uid_gid_map.go\",\n\"uptime.go\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/proc/net.go", "new_path": "pkg/sentry/fs/proc/net.go", "diff": "@@ -54,6 +54,8 @@ func (p *proc) newNetDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode {\n}\n// ifinet6 implements seqfile.SeqSource for /proc/net/if_inet6.\n+//\n+// +stateify savable\ntype ifinet6 struct {\ns inet.Stack\n}\n@@ -108,6 +110,8 @@ func (n *ifinet6) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]s\n}\n// netDev implements seqfile.SeqSource for /proc/net/dev.\n+//\n+// +stateify savable\ntype netDev struct {\ns inet.Stack\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/proc/sys_net.go", "new_path": "pkg/sentry/fs/proc/sys_net.go", "diff": "@@ -36,7 +36,7 @@ const (\n// +stateify savable\ntype tcpMem struct {\nramfs.Entry\n- s inet.Stack\n+ s inet.Stack `state:\"wait\"`\nsize inet.TCPBufferSize\ndir tcpMemDir\n}\n@@ -81,30 +81,33 @@ func (m *tcpMem) DeprecatedPwritev(ctx context.Context, src usermem.IOSequence,\nbuf := []int32{int32(m.size.Min), int32(m.size.Default), int32(m.size.Max)}\nn, cperr := usermem.CopyInt32StringsInVec(ctx, src.IO, src.Addrs, buf, src.Opts)\n- size := inet.TCPBufferSize{\n+ m.size = inet.TCPBufferSize{\nMin: int(buf[0]),\nDefault: int(buf[1]),\nMax: int(buf[2]),\n}\n- var err error\n+ if err := m.writeSize(); err != nil {\n+ return n, err\n+ }\n+ return n, cperr\n+}\n+\n+func (m *tcpMem) writeSize() error {\nswitch m.dir {\ncase tcpRMem:\n- err = m.s.SetTCPReceiveBufferSize(size)\n+ return m.s.SetTCPReceiveBufferSize(m.size)\ncase tcpWMem:\n- err = m.s.SetTCPSendBufferSize(size)\n+ return m.s.SetTCPSendBufferSize(m.size)\ndefault:\npanic(fmt.Sprintf(\"unknown tcpMem.dir: %v\", m.dir))\n}\n- if err != nil {\n- return n, err\n- }\n- return n, cperr\n}\n// +stateify savable\ntype tcpSack struct {\nramfs.Entry\n- s inet.Stack\n+ s inet.Stack `state:\"wait\"`\n+ enabled *bool\n}\nfunc newTCPSackInode(ctx context.Context, msrc *fs.MountSource, s inet.Stack) *fs.Inode {\n@@ -124,13 +127,16 @@ func (s *tcpSack) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence,\nreturn 0, io.EOF\n}\n+ if s.enabled == nil {\nsack, err := s.s.TCPSACKEnabled()\nif err != nil {\nreturn 0, err\n}\n+ s.enabled = &sack\n+ }\nval := \"0\\n\"\n- if sack {\n+ if *s.enabled {\n// Technically, this is not quite compatible with Linux. Linux\n// stores these as an integer, so if you write \"2\" into\n// tcp_sack, you should get 2 back. Tough luck.\n@@ -157,7 +163,11 @@ func (s *tcpSack) DeprecatedPwritev(ctx context.Context, src usermem.IOSequence,\nif err != nil {\nreturn n, err\n}\n- return n, s.s.SetTCPSACKEnabled(v != 0)\n+ if s.enabled == nil {\n+ s.enabled = new(bool)\n+ }\n+ *s.enabled = v != 0\n+ return n, s.s.SetTCPSACKEnabled(*s.enabled)\n}\nfunc (p *proc) newSysNetCore(ctx context.Context, msrc *fs.MountSource, s inet.Stack) *fs.Inode {\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/sentry/fs/proc/sys_net_state.go", "diff": "+// Copyright 2018 Google LLC\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package proc\n+\n+import \"fmt\"\n+\n+// afterLoad is invoked by stateify.\n+func (m *tcpMem) afterLoad() {\n+ if err := m.writeSize(); err != nil {\n+ panic(fmt.Sprintf(\"failed to write previous TCP send / receive buffer sizes [%v]: %v\", m.size, err))\n+ }\n+}\n+\n+// afterLoad is invoked by stateify.\n+func (s *tcpSack) afterLoad() {\n+ if s.enabled != nil {\n+ if err := s.s.SetTCPSACKEnabled(*s.enabled); err != nil {\n+ panic(fmt.Sprintf(\"failed to set previous TCP sack configuration [%v]: %v\", *s.enabled, err))\n+ }\n+ }\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
sentry: save / restore netstack procfs configuration. PiperOrigin-RevId: 224047120 Change-Id: Ia6cb17fa978595cd73857b6178c4bdba401e185e
260,013
04.12.2018 14:33:34
28,800
2cab0e82ad8c1e38392b8c35aaa65d1121a9e2b2
Linkat(2) should sanity check flags.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_file.go", "new_path": "pkg/sentry/syscalls/linux/sys_file.go", "diff": "@@ -1251,6 +1251,12 @@ func Linkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\n// AT_SYMLINK_FOLLOW can be specified in flags to cause oldpath to be\n// dereferenced if it is a symbolic link.\nflags := args[4].Int()\n+\n+ // Sanity check flags.\n+ if flags&^(linux.AT_SYMLINK_FOLLOW|linux.AT_EMPTY_PATH) != 0 {\n+ return 0, nil, syserror.EINVAL\n+ }\n+\nresolve := flags&linux.AT_SYMLINK_FOLLOW == linux.AT_SYMLINK_FOLLOW\nallowEmpty := flags&linux.AT_EMPTY_PATH == linux.AT_EMPTY_PATH\n" } ]
Go
Apache License 2.0
google/gvisor
Linkat(2) should sanity check flags. PiperOrigin-RevId: 224047765 Change-Id: I6f3c75b33c32bf8f8910ea3fab35406d7d672d87
260,013
04.12.2018 18:14:17
28,800
ffcbda0c8bd772c9019977775daf1d86891c3f28
Partial writes should loop in rpcinet. FileOperations.Write should return ErrWouldBlock to allow the upper layer to loop and sendmsg should continue writing where it left off on a partial write.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/rpcinet/socket.go", "new_path": "pkg/sentry/socket/rpcinet/socket.go", "diff": "@@ -212,6 +212,11 @@ func (s *socketOperations) Write(ctx context.Context, _ *fs.File, src usermem.IO\n}\nn, err := rpcWrite(t, &pb.SyscallRequest_Write{&pb.WriteRequest{Fd: s.fd, Data: v}})\n+ if n > 0 && n < uint32(src.NumBytes()) {\n+ // The FileOperations.Write interface expects us to return ErrWouldBlock in\n+ // the event of a partial write.\n+ return int64(n), syserror.ErrWouldBlock\n+ }\nreturn int64(n), err.ToError()\n}\n@@ -735,19 +740,24 @@ func (s *socketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []\n// TODO: this needs to change to map directly to a SendMsg syscall\n// in the RPC.\n- req := &pb.SyscallRequest_Sendmsg{&pb.SendmsgRequest{\n+ totalWritten := 0\n+ n, err := rpcSendMsg(t, &pb.SyscallRequest_Sendmsg{&pb.SendmsgRequest{\nFd: uint32(s.fd),\nData: v,\nAddress: to,\nMore: flags&linux.MSG_MORE != 0,\nEndOfRecord: flags&linux.MSG_EOR != 0,\n- }}\n+ }})\n- n, err := rpcSendMsg(t, req)\nif err != syserr.ErrWouldBlock && err != syserr.ErrTryAgain || flags&linux.MSG_DONTWAIT != 0 {\nreturn int(n), err\n}\n+ if n > 0 {\n+ totalWritten += int(n)\n+ v.TrimFront(int(n))\n+ }\n+\n// We'll have to block. Register for notification and keep trying to\n// send all the data.\ne, ch := waiter.NewChannelEntry(nil)\n@@ -755,13 +765,30 @@ func (s *socketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []\ndefer s.EventUnregister(&e)\nfor {\n- n, err := rpcSendMsg(t, req)\n+ n, err := rpcSendMsg(t, &pb.SyscallRequest_Sendmsg{&pb.SendmsgRequest{\n+ Fd: uint32(s.fd),\n+ Data: v,\n+ Address: to,\n+ More: flags&linux.MSG_MORE != 0,\n+ EndOfRecord: flags&linux.MSG_EOR != 0,\n+ }})\n+\n+ if n > 0 {\n+ totalWritten += int(n)\n+ v.TrimFront(int(n))\n+\n+ if err == nil && totalWritten < int(src.NumBytes()) {\n+ continue\n+ }\n+ }\n+\nif err != syserr.ErrWouldBlock && err != syserr.ErrTryAgain {\n- return int(n), err\n+ // We eat the error in this situation.\n+ return int(totalWritten), nil\n}\nif err := t.Block(ch); err != nil {\n- return 0, syserr.FromError(err)\n+ return int(totalWritten), syserr.FromError(err)\n}\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Partial writes should loop in rpcinet. FileOperations.Write should return ErrWouldBlock to allow the upper layer to loop and sendmsg should continue writing where it left off on a partial write. PiperOrigin-RevId: 224081631 Change-Id: Ic61f6943ea6b7abbd82e4279decea215347eac48
259,881
04.12.2018 18:52:56
28,800
076f107643fafab30a0d45dd5af49b8bd4b574b9
Remove initRegs arg from clone It is always the same as t.initRegs.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ptrace/ptrace_unsafe.go", "new_path": "pkg/sentry/platform/ptrace/ptrace_unsafe.go", "diff": "@@ -130,8 +130,8 @@ func (t *thread) getSignalInfo(si *arch.SignalInfo) error {\n// call attach on it.\n//\n// Precondition: the OS thread must be locked and own t.\n-func (t *thread) clone(initRegs *syscall.PtraceRegs) (*thread, error) {\n- r, ok := usermem.Addr(initRegs.Rsp).RoundUp()\n+func (t *thread) clone() (*thread, error) {\n+ r, ok := usermem.Addr(t.initRegs.Rsp).RoundUp()\nif !ok {\nreturn nil, syscall.EINVAL\n}\n@@ -153,7 +153,7 @@ func (t *thread) clone(initRegs *syscall.PtraceRegs) (*thread, error) {\narch.SyscallArgument{},\n// We use these registers initially, but really they\n// could be anything. We're going to stop immediately.\n- arch.SyscallArgument{Value: uintptr(unsafe.Pointer(initRegs))})\n+ arch.SyscallArgument{Value: uintptr(unsafe.Pointer(&t.initRegs))})\nif err != nil {\nreturn nil, err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ptrace/subprocess.go", "new_path": "pkg/sentry/platform/ptrace/subprocess.go", "diff": "@@ -160,7 +160,7 @@ func newSubprocess(create func() (*thread, error)) (*subprocess, error) {\n// Wait for requests to create threads.\nfor r := range requests {\n- t, err := firstThread.clone(&firstThread.initRegs)\n+ t, err := firstThread.clone()\nif err != nil {\n// Should not happen: not recoverable.\npanic(fmt.Sprintf(\"error initializing first thread: %v\", err))\n" } ]
Go
Apache License 2.0
google/gvisor
Remove initRegs arg from clone It is always the same as t.initRegs. PiperOrigin-RevId: 224085550 Change-Id: I5cc4ddc3b481d4748c3c43f6f4bb50da1dbac694
260,006
04.12.2018 19:08:13
28,800
fab029c50b445e06ba770c9ccd7d6d0a06e15057
Remove incorrect code and improve testing of Stack.GetMainNICAddress This removes code that should have never made it in in the first place, but did so due to incomplete testing. With the new tests the original code fails, the new code passes.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/nic.go", "new_path": "pkg/tcpip/stack/nic.go", "diff": "@@ -119,16 +119,6 @@ func (n *NIC) getMainNICAddress(protocol tcpip.NetworkProtocolNumber) (tcpip.Add\n}\n- // If no primary endpoints then check for other endpoints.\n- if r == nil {\n- for _, ref := range n.endpoints {\n- if ref.holdsInsertRef && ref.tryIncRef() {\n- r = ref\n- break\n- }\n- }\n- }\n-\nif r == nil {\nreturn \"\", tcpip.Subnet{}, tcpip.ErrNoLinkAddress\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/stack_test.go", "new_path": "pkg/tcpip/stack/stack_test.go", "diff": "package stack_test\nimport (\n+ \"bytes\"\n+ \"fmt\"\n\"math\"\n\"strings\"\n\"testing\"\n@@ -787,7 +789,79 @@ func TestSubnetAddRemove(t *testing.T) {\n}\n}\n-func TestGetMainNICAddress(t *testing.T) {\n+func TestGetMainNICAddressAddPrimaryNonPrimary(t *testing.T) {\n+ for _, addrLen := range []int{4, 16} {\n+ t.Run(fmt.Sprintf(\"addrLen=%d\", addrLen), func(t *testing.T) {\n+ for canBe := 0; canBe < 3; canBe++ {\n+ t.Run(fmt.Sprintf(\"canBe=%d\", canBe), func(t *testing.T) {\n+ for never := 0; never < 3; never++ {\n+ t.Run(fmt.Sprintf(\"never=%d\", never), func(t *testing.T) {\n+ s := stack.New([]string{\"fakeNet\"}, nil, stack.Options{})\n+ id, _ := channel.New(10, defaultMTU, \"\")\n+ if err := s.CreateNIC(1, id); err != nil {\n+ t.Fatalf(\"CreateNIC failed: %v\", err)\n+ }\n+ // Insert <canBe> primary and <never> never-primary addresses.\n+ // Each one will add a network endpoint to the NIC.\n+ primaryAddrAdded := make(map[tcpip.Address]tcpip.Subnet)\n+ for i := 0; i < canBe+never; i++ {\n+ var behavior stack.PrimaryEndpointBehavior\n+ if i < canBe {\n+ behavior = stack.CanBePrimaryEndpoint\n+ } else {\n+ behavior = stack.NeverPrimaryEndpoint\n+ }\n+ // Add an address and in case of a primary one also add a\n+ // subnet.\n+ address := tcpip.Address(bytes.Repeat([]byte{byte(i)}, addrLen))\n+ if err := s.AddAddressWithOptions(1, fakeNetNumber, address, behavior); err != nil {\n+ t.Fatalf(\"AddAddressWithOptions failed: %v\", err)\n+ }\n+ if behavior == stack.CanBePrimaryEndpoint {\n+ mask := tcpip.AddressMask(strings.Repeat(\"\\xff\", len(address)))\n+ subnet, err := tcpip.NewSubnet(address, mask)\n+ if err != nil {\n+ t.Fatalf(\"NewSubnet failed: %v\", err)\n+ }\n+ if err := s.AddSubnet(1, fakeNetNumber, subnet); err != nil {\n+ t.Fatalf(\"AddSubnet failed: %v\", err)\n+ }\n+ // Remember the address/subnet.\n+ primaryAddrAdded[address] = subnet\n+ }\n+ }\n+ // Check that GetMainNICAddress returns an address if at least\n+ // one primary address was added. In that case make sure the\n+ // address/subnet matches what we added.\n+ if len(primaryAddrAdded) == 0 {\n+ // No primary addresses present, expect an error.\n+ if _, _, err := s.GetMainNICAddress(1, fakeNetNumber); err != tcpip.ErrNoLinkAddress {\n+ t.Fatalf(\"got s.GetMainNICAddress(...) = %v, wanted = %v\", err, tcpip.ErrNoLinkAddress)\n+ }\n+ } else {\n+ // At least one primary address was added, expect a valid\n+ // address and subnet.\n+ gotAddress, gotSubnet, err := s.GetMainNICAddress(1, fakeNetNumber)\n+ if err != nil {\n+ t.Fatalf(\"GetMainNICAddress failed: %v\", err)\n+ }\n+ expectedSubnet, ok := primaryAddrAdded[gotAddress]\n+ if !ok {\n+ t.Fatalf(\"GetMainNICAddress: got address = %v, wanted any in {%v}\", gotAddress, primaryAddrAdded)\n+ }\n+ if expectedSubnet != gotSubnet {\n+ t.Fatalf(\"GetMainNICAddress: got subnet = %v, wanted %v\", gotSubnet, expectedSubnet)\n+ }\n+ }\n+ })\n+ }\n+ })\n+ }\n+ })\n+ }\n+}\n+\n+func TestGetMainNICAddressAddRemove(t *testing.T) {\ns := stack.New([]string{\"fakeNet\"}, nil, stack.Options{})\nid, _ := channel.New(10, defaultMTU, \"\")\nif err := s.CreateNIC(1, id); err != nil {\n" } ]
Go
Apache License 2.0
google/gvisor
Remove incorrect code and improve testing of Stack.GetMainNICAddress This removes code that should have never made it in in the first place, but did so due to incomplete testing. With the new tests the original code fails, the new code passes. PiperOrigin-RevId: 224086966 Change-Id: I646fef76977f4528f3705f497b95fad6b3ec32bc
259,975
05.12.2018 10:52:44
28,800
06131fe749e3715534f9d551528d89048ae1398b
Check for CAP_SYS_RESOURCE in prctl(PR_SET_MM, ...) If sys_prctl is called with PR_SET_MM without CAP_SYS_RESOURCE, the syscall should return failure with errno set to EPERM. See:
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_prctl.go", "new_path": "pkg/sentry/syscalls/linux/sys_prctl.go", "diff": "@@ -87,6 +87,10 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\n}\ncase linux.PR_SET_MM:\n+ if !t.HasCapability(linux.CAP_SYS_RESOURCE) {\n+ return 0, nil, syscall.EPERM\n+ }\n+\nswitch args[1].Int() {\ncase linux.PR_SET_MM_EXE_FILE:\nfd := kdefs.FD(args[2].Int())\n" } ]
Go
Apache License 2.0
google/gvisor
Check for CAP_SYS_RESOURCE in prctl(PR_SET_MM, ...) If sys_prctl is called with PR_SET_MM without CAP_SYS_RESOURCE, the syscall should return failure with errno set to EPERM. See: http://man7.org/linux/man-pages/man2/prctl.2.html PiperOrigin-RevId: 224182874 Change-Id: I630d1dd44af8b444dd16e8e58a0764a0cf1ad9a3
259,881
05.12.2018 12:45:35
28,800
592f5bdc675ae2933919b649b45551c6781c7876
Add context to mount errors This makes it more obvious why a mount failed.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/overlay.go", "new_path": "pkg/sentry/fs/overlay.go", "diff": "@@ -95,10 +95,10 @@ func isXattrOverlay(name string) bool {\n// - lower must not have dynamic file/directory content.\nfunc NewOverlayRoot(ctx context.Context, upper *Inode, lower *Inode, flags MountSourceFlags) (*Inode, error) {\nif !IsDir(upper.StableAttr) {\n- return nil, fmt.Errorf(\"upper Inode is not a directory\")\n+ return nil, fmt.Errorf(\"upper Inode is a %v, not a directory\", upper.StableAttr.Type)\n}\nif !IsDir(lower.StableAttr) {\n- return nil, fmt.Errorf(\"lower Inode is not a directory\")\n+ return nil, fmt.Errorf(\"lower Inode is a %v, not a directory\", lower.StableAttr.Type)\n}\nif upper.overlay != nil {\nreturn nil, fmt.Errorf(\"cannot nest overlay in upper file of another overlay\")\n" } ]
Go
Apache License 2.0
google/gvisor
Add context to mount errors This makes it more obvious why a mount failed. PiperOrigin-RevId: 224203880 Change-Id: I7961774a7b6fdbb5493a791f8b3815c49b8f7631
259,948
05.12.2018 13:50:14
28,800
fda4557e3dc19c72f857b107a52359723cd37216
sentry: skip waiting for undrain for netstack TCP endpoints in error state.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/connect.go", "new_path": "pkg/tcpip/transport/tcp/connect.go", "diff": "@@ -976,25 +976,35 @@ func (e *endpoint) protocolMainLoop(handshake bool) *tcpip.Error {\ne.mu.Unlock()\n}\nif n&notifyClose != 0 && closeTimer == nil {\n- // Reset the connection 3 seconds after the\n- // endpoint has been closed.\n+ // Reset the connection 3 seconds after\n+ // the endpoint has been closed.\n+ //\n+ // The timer could fire in background\n+ // when the endpoint is drained. That's\n+ // OK as the loop here will not honor\n+ // the firing until the undrain arrives.\ncloseTimer = time.AfterFunc(3*time.Second, func() {\ncloseWaker.Assert()\n})\n}\n+ if n&notifyKeepaliveChanged != 0 {\n+ // The timer could fire in background\n+ // when the endpoint is drained. That's\n+ // OK. See above.\n+ e.resetKeepaliveTimer(true)\n+ }\n+\nif n&notifyDrain != 0 {\nfor !e.segmentQueue.empty() {\nif err := e.handleSegments(); err != nil {\nreturn err\n}\n}\n+ if e.state != stateError {\nclose(e.drainDone)\n<-e.undrain\n}\n-\n- if n&notifyKeepaliveChanged != 0 {\n- e.resetKeepaliveTimer(true)\n}\nreturn nil\n" } ]
Go
Apache License 2.0
google/gvisor
sentry: skip waiting for undrain for netstack TCP endpoints in error state. PiperOrigin-RevId: 224214981 Change-Id: I4c1dd5b1c856f7a4f9866a5dda44a5297e92486a
259,885
05.12.2018 14:26:24
28,800
23438b36327524ba3e71b6416d71863fb4dfa166
Update MM.usageAS when mremap copies or moves a mapping.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/mm/syscalls.go", "new_path": "pkg/sentry/mm/syscalls.go", "diff": "@@ -463,6 +463,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi\nvma.id.IncRef()\n}\nmm.vmas.Add(newAR, vma)\n+ mm.usageAS += uint64(newAR.Length())\nreturn newAR.Start, nil\n}\n@@ -479,14 +480,13 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi\n// oldAR, so calling RemoveMapping could cause us to miss an invalidation\n// overlapping oldAR.\n//\n- // Call vseg.Value() (rather than vseg.ValuePtr()) first to make a copy of\n- // the vma.\n+ // Call vseg.Value() (rather than vseg.ValuePtr()) to make a copy of the\n+ // vma.\nvseg = mm.vmas.Isolate(vseg, oldAR)\nvma := vseg.Value()\nmm.vmas.Remove(vseg)\n-\n- // Insert the new vma, transferring the reference on vma.id.\nmm.vmas.Add(newAR, vma)\n+ mm.usageAS = mm.usageAS - uint64(oldAR.Length()) + uint64(newAR.Length())\n// Move pmas. This is technically optional for non-private pmas, which\n// could just go through memmap.Mappable.Translate again, but it's required\n" } ]
Go
Apache License 2.0
google/gvisor
Update MM.usageAS when mremap copies or moves a mapping. PiperOrigin-RevId: 224221509 Change-Id: I7aaea74629227d682786d3e435737364921249bf
259,881
05.12.2018 14:31:07
28,800
9f64e64a6ee1fe44a05ed57893785fa9064125e1
Enforce directory accessibility before delete Walk By Walking before checking that the directory is writable and executable, MayDelete may return the Walk error (e.g., ENOENT) which would normally be masked by a permission error (EACCES).
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/dirent.go", "new_path": "pkg/sentry/fs/dirent.go", "diff": "@@ -1461,6 +1461,10 @@ func checkSticky(ctx context.Context, dir *Dirent, victim *Dirent) error {\n//\n// Compare Linux kernel fs/namei.c:may_delete.\nfunc MayDelete(ctx context.Context, root, dir *Dirent, name string) error {\n+ if err := dir.Inode.CheckPermission(ctx, PermMask{Write: true, Execute: true}); err != nil {\n+ return err\n+ }\n+\nvictim, err := dir.Walk(ctx, root, name)\nif err != nil {\nreturn err\n@@ -1470,11 +1474,11 @@ func MayDelete(ctx context.Context, root, dir *Dirent, name string) error {\nreturn mayDelete(ctx, dir, victim)\n}\n+// mayDelete determines whether `victim`, a child of `dir`, can be deleted or\n+// renamed by `ctx`.\n+//\n+// Preconditions: `dir` is writable and executable by `ctx`.\nfunc mayDelete(ctx context.Context, dir, victim *Dirent) error {\n- if err := dir.Inode.CheckPermission(ctx, PermMask{Write: true, Execute: true}); err != nil {\n- return err\n- }\n-\nif err := checkSticky(ctx, dir, victim); err != nil {\nreturn err\n}\n@@ -1512,6 +1516,15 @@ func Rename(ctx context.Context, root *Dirent, oldParent *Dirent, oldName string\nreturn syscall.ENOENT\n}\n+ // Do we have general permission to remove from oldParent and\n+ // create/replace in newParent?\n+ if err := oldParent.Inode.CheckPermission(ctx, PermMask{Write: true, Execute: true}); err != nil {\n+ return err\n+ }\n+ if err := newParent.Inode.CheckPermission(ctx, PermMask{Write: true, Execute: true}); err != nil {\n+ return err\n+ }\n+\n// renamed is the dirent that will be renamed to something else.\nrenamed, err := oldParent.walk(ctx, root, oldName, false /* may unlock */)\nif err != nil {\n@@ -1549,10 +1562,7 @@ func Rename(ctx context.Context, root *Dirent, oldParent *Dirent, oldName string\nreturn err\n}\n- // Make sure we can create a new child in the new parent.\n- if err := newParent.Inode.CheckPermission(ctx, PermMask{Write: true, Execute: true}); err != nil {\n- return err\n- }\n+ // newName doesn't exist; simply create it below.\n} else {\n// Check constraints on the dirent being replaced.\n@@ -1560,7 +1570,7 @@ func Rename(ctx context.Context, root *Dirent, oldParent *Dirent, oldName string\n// across the Rename, so must call DecRef manually (no defer).\n// Check that we can delete replaced.\n- if err := mayDelete(ctx, oldParent, renamed); err != nil {\n+ if err := mayDelete(ctx, newParent, replaced); err != nil {\nreplaced.DecRef()\nreturn err\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Enforce directory accessibility before delete Walk By Walking before checking that the directory is writable and executable, MayDelete may return the Walk error (e.g., ENOENT) which would normally be masked by a permission error (EACCES). PiperOrigin-RevId: 224222453 Change-Id: I108a7f730e6bdaa7f277eaddb776267c00805475
259,948
05.12.2018 15:01:41
28,800
7f35daddd2cabef2e7ffb6899e1a54ff8c0475c6
sentry: support save / restore of TCP bind socket after shutdown.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint_state.go", "new_path": "pkg/tcpip/transport/tcp/endpoint_state.go", "diff": "@@ -92,8 +92,8 @@ func (e *endpoint) beforeSave() {\npanic(\"endpoint still has waiters upon save\")\n}\n- if !((e.state == stateBound || e.state == stateListen) == e.isPortReserved) {\n- panic(\"endpoint port must and must only be reserved in bound or listen state\")\n+ if e.state != stateClosed && !((e.state == stateBound || e.state == stateListen) == e.isPortReserved) {\n+ panic(\"endpoints which are not in the closed state must have a reserved port IFF they are in bound or listen state\")\n}\n}\n@@ -102,16 +102,22 @@ func (e *endpoint) saveAcceptedChan() []*endpoint {\nif e.acceptedChan == nil {\nreturn nil\n}\n- close(e.acceptedChan)\nacceptedEndpoints := make([]*endpoint, len(e.acceptedChan), cap(e.acceptedChan))\n- i := 0\n- for ep := range e.acceptedChan {\n+ for i := 0; i < len(acceptedEndpoints); i++ {\n+ select {\n+ case ep := <-e.acceptedChan:\nacceptedEndpoints[i] = ep\n- i++\n- }\n- if i != len(acceptedEndpoints) {\n+ default:\npanic(\"endpoint acceptedChan buffer got consumed by background context\")\n}\n+ }\n+ for i := 0; i < len(acceptedEndpoints); i++ {\n+ select {\n+ case e.acceptedChan <- acceptedEndpoints[i]:\n+ default:\n+ panic(\"endpoint acceptedChan buffer got populated by background context\")\n+ }\n+ }\nreturn acceptedEndpoints\n}\n@@ -235,7 +241,20 @@ func (e *endpoint) afterLoad() {\nbind()\ntcpip.AsyncLoading.Done()\n}()\n- case stateClosed, stateError:\n+ case stateClosed:\n+ if e.isPortReserved {\n+ tcpip.AsyncLoading.Add(1)\n+ go func() {\n+ connectedLoading.Wait()\n+ listenLoading.Wait()\n+ connectingLoading.Wait()\n+ bind()\n+ e.state = stateClosed\n+ tcpip.AsyncLoading.Done()\n+ }()\n+ }\n+ fallthrough\n+ case stateError:\ntcpip.DeleteDanglingEndpoint(e)\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
sentry: support save / restore of TCP bind socket after shutdown. PiperOrigin-RevId: 224227677 Change-Id: I08b0e0c0574170556269900653e5bcf9e9e5c9c9
259,975
06.12.2018 09:25:57
28,800
4d8c7ae869a4e9bf60c7ea9aff79a0bee551fbc9
Fixing O_TRUNC behavior to match Linux.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/flags.go", "new_path": "pkg/sentry/syscalls/linux/flags.go", "diff": "@@ -22,6 +22,9 @@ import (\n// flagsToPermissions returns a Permissions object from Linux flags.\n// This includes truncate permission if O_TRUNC is set in the mask.\nfunc flagsToPermissions(mask uint) (p fs.PermMask) {\n+ if mask&linux.O_TRUNC != 0 {\n+ p.Write = true\n+ }\nswitch mask & linux.O_ACCMODE {\ncase linux.O_WRONLY:\np.Write = true\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_file.go", "new_path": "pkg/sentry/syscalls/linux/sys_file.go", "diff": "@@ -170,7 +170,7 @@ func openAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, flags uint) (fd u\nif dirPath {\nreturn syserror.ENOTDIR\n}\n- if fileFlags.Write && flags&linux.O_TRUNC != 0 {\n+ if flags&linux.O_TRUNC != 0 {\nif err := d.Inode.Truncate(t, d, 0); err != nil {\nreturn err\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Fixing O_TRUNC behavior to match Linux. PiperOrigin-RevId: 224351139 Change-Id: I9453bd75e5a8d38db406bb47fdc01038ac60922e
259,985
06.12.2018 11:14:57
28,800
685eaf119ffa6c44c4dcaec0e083bbdc0271231a
Add counters for memory events. Also ensure an event is emitted at startup.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/memevent/BUILD", "new_path": "pkg/sentry/kernel/memevent/BUILD", "diff": "@@ -12,6 +12,7 @@ go_library(\n\":memory_events_go_proto\",\n\"//pkg/eventchannel\",\n\"//pkg/log\",\n+ \"//pkg/metric\",\n\"//pkg/sentry/kernel\",\n\"//pkg/sentry/usage\",\n],\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/memevent/memory_events.go", "new_path": "pkg/sentry/kernel/memevent/memory_events.go", "diff": "@@ -22,11 +22,15 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/eventchannel\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n+ \"gvisor.googlesource.com/gvisor/pkg/metric\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\npb \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/memevent/memory_events_go_proto\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/usage\"\n)\n+var totalTicks = metric.MustCreateNewUint64Metric(\"/memory_events/ticks\", false /*sync*/, \"Total number of memory event periods that have elapsed since startup.\")\n+var totalEvents = metric.MustCreateNewUint64Metric(\"/memory_events/events\", false /*sync*/, \"Total number of memory events emitted.\")\n+\n// MemoryEvents describes the configuration for the global memory event emitter.\ntype MemoryEvents struct {\nk *kernel.Kernel\n@@ -71,6 +75,10 @@ func (m *MemoryEvents) Start() {\nfunc (m *MemoryEvents) run() {\nm.done.Add(1)\n+ // Emit the first event immediately on startup.\n+ totalTicks.Increment()\n+ m.emit()\n+\nticker := time.NewTicker(m.period)\ndefer ticker.Stop()\n@@ -80,6 +88,7 @@ func (m *MemoryEvents) run() {\nm.done.Done()\nreturn\ncase <-ticker.C:\n+ totalTicks.Increment()\nm.emit()\n}\n}\n@@ -94,6 +103,7 @@ func (m *MemoryEvents) emit() {\nsnapshot, _ := usage.MemoryAccounting.Copy()\ntotal := totalPlatform + snapshot.Mapped\n+ totalEvents.Increment()\neventchannel.Emit(&pb.MemoryUsageEvent{\nMapped: snapshot.Mapped,\nTotal: total,\n" } ]
Go
Apache License 2.0
google/gvisor
Add counters for memory events. Also ensure an event is emitted at startup. PiperOrigin-RevId: 224372065 Change-Id: I5f642b6d6b13c6468ee8f794effe285fcbbf29cf
259,881
06.12.2018 11:42:23
28,800
666db00c262c7d6d6359fbaba28e344d015a7823
Convert ValueSet to a map Unlike FlagSet, order doesn't matter here, so it can simply be a map.
[ { "change_type": "MODIFY", "old_path": "pkg/abi/flag.go", "new_path": "pkg/abi/flag.go", "diff": "@@ -46,30 +46,25 @@ func (s FlagSet) Parse(val uint64) string {\nreturn strings.Join(flags, \"|\")\n}\n-// ValueSet is a slice of syscall values and their name. Parse will replace\n-// values that exactly match an entry with its name.\n-type ValueSet []struct {\n- Value uint64\n- Name string\n-}\n+// ValueSet is a map of syscall values to their name. Parse will use the name\n+// or the value if unknown.\n+type ValueSet map[uint64]string\n// Parse returns the name of the value associated with `val`. Unknown values\n// are converted to hex.\n-func (e ValueSet) Parse(val uint64) string {\n- for _, f := range e {\n- if val == f.Value {\n- return f.Name\n- }\n+func (s ValueSet) Parse(val uint64) string {\n+ if v, ok := s[val]; ok {\n+ return v\n}\nreturn fmt.Sprintf(\"%#x\", val)\n}\n// ParseName returns the flag value associated with 'name'. Returns false\n// if no value is found.\n-func (e ValueSet) ParseName(name string) (uint64, bool) {\n- for _, f := range e {\n- if name == f.Name {\n- return f.Value, true\n+func (s ValueSet) ParseName(name string) (uint64, bool) {\n+ for k, v := range s {\n+ if v == name {\n+ return k, true\n}\n}\nreturn math.MaxUint64, false\n" }, { "change_type": "MODIFY", "old_path": "pkg/abi/linux/file.go", "new_path": "pkg/abi/linux/file.go", "diff": "@@ -223,32 +223,11 @@ var modeExtraBits = abi.FlagSet{\n}\nvar fileType = abi.ValueSet{\n- {\n- Value: ModeSocket,\n- Name: \"S_IFSOCK\",\n- },\n- {\n- Value: ModeSymlink,\n- Name: \"S_IFLINK\",\n- },\n- {\n- Value: ModeRegular,\n- Name: \"S_IFREG\",\n- },\n- {\n- Value: ModeBlockDevice,\n- Name: \"S_IFBLK\",\n- },\n- {\n- Value: ModeDirectory,\n- Name: \"S_IFDIR\",\n- },\n- {\n- Value: ModeCharacterDevice,\n- Name: \"S_IFCHR\",\n- },\n- {\n- Value: ModeNamedPipe,\n- Name: \"S_IFIFO\",\n- },\n+ ModeSocket: \"S_IFSOCK\",\n+ ModeSymlink: \"S_IFLINK\",\n+ ModeRegular: \"S_IFREG\",\n+ ModeBlockDevice: \"S_IFBLK\",\n+ ModeDirectory: \"S_IFDIR\",\n+ ModeCharacterDevice: \"S_IFCHR\",\n+ ModeNamedPipe: \"S_IFIFO\",\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/strace/futex.go", "new_path": "pkg/sentry/strace/futex.go", "diff": "@@ -21,58 +21,19 @@ import (\n// FutexCmd are the possible futex(2) commands.\nvar FutexCmd = abi.ValueSet{\n- {\n- Value: linux.FUTEX_WAIT,\n- Name: \"FUTEX_WAIT\",\n- },\n- {\n- Value: linux.FUTEX_WAKE,\n- Name: \"FUTEX_WAKE\",\n- },\n- {\n- Value: linux.FUTEX_FD,\n- Name: \"FUTEX_FD\",\n- },\n- {\n- Value: linux.FUTEX_REQUEUE,\n- Name: \"FUTEX_REQUEUE\",\n- },\n- {\n- Value: linux.FUTEX_CMP_REQUEUE,\n- Name: \"FUTEX_CMP_REQUEUE\",\n- },\n- {\n- Value: linux.FUTEX_WAKE_OP,\n- Name: \"FUTEX_WAKE_OP\",\n- },\n- {\n- Value: linux.FUTEX_LOCK_PI,\n- Name: \"FUTEX_LOCK_PI\",\n- },\n- {\n- Value: linux.FUTEX_UNLOCK_PI,\n- Name: \"FUTEX_UNLOCK_PI\",\n- },\n- {\n- Value: linux.FUTEX_TRYLOCK_PI,\n- Name: \"FUTEX_TRYLOCK_PI\",\n- },\n- {\n- Value: linux.FUTEX_WAIT_BITSET,\n- Name: \"FUTEX_WAIT_BITSET\",\n- },\n- {\n- Value: linux.FUTEX_WAKE_BITSET,\n- Name: \"FUTEX_WAKE_BITSET\",\n- },\n- {\n- Value: linux.FUTEX_WAIT_REQUEUE_PI,\n- Name: \"FUTEX_WAIT_REQUEUE_PI\",\n- },\n- {\n- Value: linux.FUTEX_CMP_REQUEUE_PI,\n- Name: \"FUTEX_CMP_REQUEUE_PI\",\n- },\n+ linux.FUTEX_WAIT: \"FUTEX_WAIT\",\n+ linux.FUTEX_WAKE: \"FUTEX_WAKE\",\n+ linux.FUTEX_FD: \"FUTEX_FD\",\n+ linux.FUTEX_REQUEUE: \"FUTEX_REQUEUE\",\n+ linux.FUTEX_CMP_REQUEUE: \"FUTEX_CMP_REQUEUE\",\n+ linux.FUTEX_WAKE_OP: \"FUTEX_WAKE_OP\",\n+ linux.FUTEX_LOCK_PI: \"FUTEX_LOCK_PI\",\n+ linux.FUTEX_UNLOCK_PI: \"FUTEX_UNLOCK_PI\",\n+ linux.FUTEX_TRYLOCK_PI: \"FUTEX_TRYLOCK_PI\",\n+ linux.FUTEX_WAIT_BITSET: \"FUTEX_WAIT_BITSET\",\n+ linux.FUTEX_WAKE_BITSET: \"FUTEX_WAKE_BITSET\",\n+ linux.FUTEX_WAIT_REQUEUE_PI: \"FUTEX_WAIT_REQUEUE_PI\",\n+ linux.FUTEX_CMP_REQUEUE_PI: \"FUTEX_CMP_REQUEUE_PI\",\n}\nfunc futex(op uint64) string {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/strace/open.go", "new_path": "pkg/sentry/strace/open.go", "diff": "@@ -22,18 +22,9 @@ import (\n// OpenMode represents the mode to open(2) a file.\nvar OpenMode = abi.ValueSet{\n- {\n- Value: syscall.O_RDWR,\n- Name: \"O_RDWR\",\n- },\n- {\n- Value: syscall.O_WRONLY,\n- Name: \"O_WRONLY\",\n- },\n- {\n- Value: syscall.O_RDONLY,\n- Name: \"O_RDONLY\",\n- },\n+ syscall.O_RDWR: \"O_RDWR\",\n+ syscall.O_WRONLY: \"O_WRONLY\",\n+ syscall.O_RDONLY: \"O_RDONLY\",\n}\n// OpenFlagSet is the set of open(2) flags.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/strace/ptrace.go", "new_path": "pkg/sentry/strace/ptrace.go", "diff": "@@ -21,156 +21,42 @@ import (\n// PtraceRequestSet are the possible ptrace(2) requests.\nvar PtraceRequestSet = abi.ValueSet{\n- {\n- Value: linux.PTRACE_TRACEME,\n- Name: \"PTRACE_TRACEME\",\n- },\n- {\n- Value: linux.PTRACE_PEEKTEXT,\n- Name: \"PTRACE_PEEKTEXT\",\n- },\n- {\n- Value: linux.PTRACE_PEEKDATA,\n- Name: \"PTRACE_PEEKDATA\",\n- },\n- {\n- Value: linux.PTRACE_PEEKUSR,\n- Name: \"PTRACE_PEEKUSR\",\n- },\n- {\n- Value: linux.PTRACE_POKETEXT,\n- Name: \"PTRACE_POKETEXT\",\n- },\n- {\n- Value: linux.PTRACE_POKEDATA,\n- Name: \"PTRACE_POKEDATA\",\n- },\n- {\n- Value: linux.PTRACE_POKEUSR,\n- Name: \"PTRACE_POKEUSR\",\n- },\n- {\n- Value: linux.PTRACE_CONT,\n- Name: \"PTRACE_CONT\",\n- },\n- {\n- Value: linux.PTRACE_KILL,\n- Name: \"PTRACE_KILL\",\n- },\n- {\n- Value: linux.PTRACE_SINGLESTEP,\n- Name: \"PTRACE_SINGLESTEP\",\n- },\n- {\n- Value: linux.PTRACE_ATTACH,\n- Name: \"PTRACE_ATTACH\",\n- },\n- {\n- Value: linux.PTRACE_DETACH,\n- Name: \"PTRACE_DETACH\",\n- },\n- {\n- Value: linux.PTRACE_SYSCALL,\n- Name: \"PTRACE_SYSCALL\",\n- },\n- {\n- Value: linux.PTRACE_SETOPTIONS,\n- Name: \"PTRACE_SETOPTIONS\",\n- },\n- {\n- Value: linux.PTRACE_GETEVENTMSG,\n- Name: \"PTRACE_GETEVENTMSG\",\n- },\n- {\n- Value: linux.PTRACE_GETSIGINFO,\n- Name: \"PTRACE_GETSIGINFO\",\n- },\n- {\n- Value: linux.PTRACE_SETSIGINFO,\n- Name: \"PTRACE_SETSIGINFO\",\n- },\n- {\n- Value: linux.PTRACE_GETREGSET,\n- Name: \"PTRACE_GETREGSET\",\n- },\n- {\n- Value: linux.PTRACE_SETREGSET,\n- Name: \"PTRACE_SETREGSET\",\n- },\n- {\n- Value: linux.PTRACE_SEIZE,\n- Name: \"PTRACE_SEIZE\",\n- },\n- {\n- Value: linux.PTRACE_INTERRUPT,\n- Name: \"PTRACE_INTERRUPT\",\n- },\n- {\n- Value: linux.PTRACE_LISTEN,\n- Name: \"PTRACE_LISTEN\",\n- },\n- {\n- Value: linux.PTRACE_PEEKSIGINFO,\n- Name: \"PTRACE_PEEKSIGINFO\",\n- },\n- {\n- Value: linux.PTRACE_GETSIGMASK,\n- Name: \"PTRACE_GETSIGMASK\",\n- },\n- {\n- Value: linux.PTRACE_SETSIGMASK,\n- Name: \"PTRACE_SETSIGMASK\",\n- },\n- {\n- Value: linux.PTRACE_GETREGS,\n- Name: \"PTRACE_GETREGS\",\n- },\n- {\n- Value: linux.PTRACE_SETREGS,\n- Name: \"PTRACE_SETREGS\",\n- },\n- {\n- Value: linux.PTRACE_GETFPREGS,\n- Name: \"PTRACE_GETFPREGS\",\n- },\n- {\n- Value: linux.PTRACE_SETFPREGS,\n- Name: \"PTRACE_SETFPREGS\",\n- },\n- {\n- Value: linux.PTRACE_GETFPXREGS,\n- Name: \"PTRACE_GETFPXREGS\",\n- },\n- {\n- Value: linux.PTRACE_SETFPXREGS,\n- Name: \"PTRACE_SETFPXREGS\",\n- },\n- {\n- Value: linux.PTRACE_OLDSETOPTIONS,\n- Name: \"PTRACE_OLDSETOPTIONS\",\n- },\n- {\n- Value: linux.PTRACE_GET_THREAD_AREA,\n- Name: \"PTRACE_GET_THREAD_AREA\",\n- },\n- {\n- Value: linux.PTRACE_SET_THREAD_AREA,\n- Name: \"PTRACE_SET_THREAD_AREA\",\n- },\n- {\n- Value: linux.PTRACE_ARCH_PRCTL,\n- Name: \"PTRACE_ARCH_PRCTL\",\n- },\n- {\n- Value: linux.PTRACE_SYSEMU,\n- Name: \"PTRACE_SYSEMU\",\n- },\n- {\n- Value: linux.PTRACE_SYSEMU_SINGLESTEP,\n- Name: \"PTRACE_SYSEMU_SINGLESTEP\",\n- },\n- {\n- Value: linux.PTRACE_SINGLEBLOCK,\n- Name: \"PTRACE_SINGLEBLOCK\",\n- },\n+ linux.PTRACE_TRACEME: \"PTRACE_TRACEME\",\n+ linux.PTRACE_PEEKTEXT: \"PTRACE_PEEKTEXT\",\n+ linux.PTRACE_PEEKDATA: \"PTRACE_PEEKDATA\",\n+ linux.PTRACE_PEEKUSR: \"PTRACE_PEEKUSR\",\n+ linux.PTRACE_POKETEXT: \"PTRACE_POKETEXT\",\n+ linux.PTRACE_POKEDATA: \"PTRACE_POKEDATA\",\n+ linux.PTRACE_POKEUSR: \"PTRACE_POKEUSR\",\n+ linux.PTRACE_CONT: \"PTRACE_CONT\",\n+ linux.PTRACE_KILL: \"PTRACE_KILL\",\n+ linux.PTRACE_SINGLESTEP: \"PTRACE_SINGLESTEP\",\n+ linux.PTRACE_ATTACH: \"PTRACE_ATTACH\",\n+ linux.PTRACE_DETACH: \"PTRACE_DETACH\",\n+ linux.PTRACE_SYSCALL: \"PTRACE_SYSCALL\",\n+ linux.PTRACE_SETOPTIONS: \"PTRACE_SETOPTIONS\",\n+ linux.PTRACE_GETEVENTMSG: \"PTRACE_GETEVENTMSG\",\n+ linux.PTRACE_GETSIGINFO: \"PTRACE_GETSIGINFO\",\n+ linux.PTRACE_SETSIGINFO: \"PTRACE_SETSIGINFO\",\n+ linux.PTRACE_GETREGSET: \"PTRACE_GETREGSET\",\n+ linux.PTRACE_SETREGSET: \"PTRACE_SETREGSET\",\n+ linux.PTRACE_SEIZE: \"PTRACE_SEIZE\",\n+ linux.PTRACE_INTERRUPT: \"PTRACE_INTERRUPT\",\n+ linux.PTRACE_LISTEN: \"PTRACE_LISTEN\",\n+ linux.PTRACE_PEEKSIGINFO: \"PTRACE_PEEKSIGINFO\",\n+ linux.PTRACE_GETSIGMASK: \"PTRACE_GETSIGMASK\",\n+ linux.PTRACE_SETSIGMASK: \"PTRACE_SETSIGMASK\",\n+ linux.PTRACE_GETREGS: \"PTRACE_GETREGS\",\n+ linux.PTRACE_SETREGS: \"PTRACE_SETREGS\",\n+ linux.PTRACE_GETFPREGS: \"PTRACE_GETFPREGS\",\n+ linux.PTRACE_SETFPREGS: \"PTRACE_SETFPREGS\",\n+ linux.PTRACE_GETFPXREGS: \"PTRACE_GETFPXREGS\",\n+ linux.PTRACE_SETFPXREGS: \"PTRACE_SETFPXREGS\",\n+ linux.PTRACE_OLDSETOPTIONS: \"PTRACE_OLDSETOPTIONS\",\n+ linux.PTRACE_GET_THREAD_AREA: \"PTRACE_GET_THREAD_AREA\",\n+ linux.PTRACE_SET_THREAD_AREA: \"PTRACE_SET_THREAD_AREA\",\n+ linux.PTRACE_ARCH_PRCTL: \"PTRACE_ARCH_PRCTL\",\n+ linux.PTRACE_SYSEMU: \"PTRACE_SYSEMU\",\n+ linux.PTRACE_SYSEMU_SINGLESTEP: \"PTRACE_SYSEMU_SINGLESTEP\",\n+ linux.PTRACE_SINGLEBLOCK: \"PTRACE_SINGLEBLOCK\",\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/strace/socket.go", "new_path": "pkg/sentry/strace/socket.go", "diff": "@@ -31,202 +31,58 @@ import (\n// SocketFamily are the possible socket(2) families.\nvar SocketFamily = abi.ValueSet{\n- {\n- Value: linux.AF_UNSPEC,\n- Name: \"AF_UNSPEC\",\n- },\n- {\n- Value: linux.AF_UNIX,\n- Name: \"AF_UNIX\",\n- },\n- {\n- Value: linux.AF_INET,\n- Name: \"AF_INET\",\n- },\n- {\n- Value: linux.AF_AX25,\n- Name: \"AF_AX25\",\n- },\n- {\n- Value: linux.AF_IPX,\n- Name: \"AF_IPX\",\n- },\n- {\n- Value: linux.AF_APPLETALK,\n- Name: \"AF_APPLETALK\",\n- },\n- {\n- Value: linux.AF_NETROM,\n- Name: \"AF_NETROM\",\n- },\n- {\n- Value: linux.AF_BRIDGE,\n- Name: \"AF_BRIDGE\",\n- },\n- {\n- Value: linux.AF_ATMPVC,\n- Name: \"AF_ATMPVC\",\n- },\n- {\n- Value: linux.AF_X25,\n- Name: \"AF_X25\",\n- },\n- {\n- Value: linux.AF_INET6,\n- Name: \"AF_INET6\",\n- },\n- {\n- Value: linux.AF_ROSE,\n- Name: \"AF_ROSE\",\n- },\n- {\n- Value: linux.AF_DECnet,\n- Name: \"AF_DECnet\",\n- },\n- {\n- Value: linux.AF_NETBEUI,\n- Name: \"AF_NETBEUI\",\n- },\n- {\n- Value: linux.AF_SECURITY,\n- Name: \"AF_SECURITY\",\n- },\n- {\n- Value: linux.AF_KEY,\n- Name: \"AF_KEY\",\n- },\n- {\n- Value: linux.AF_NETLINK,\n- Name: \"AF_NETLINK\",\n- },\n- {\n- Value: linux.AF_PACKET,\n- Name: \"AF_PACKET\",\n- },\n- {\n- Value: linux.AF_ASH,\n- Name: \"AF_ASH\",\n- },\n- {\n- Value: linux.AF_ECONET,\n- Name: \"AF_ECONET\",\n- },\n- {\n- Value: linux.AF_ATMSVC,\n- Name: \"AF_ATMSVC\",\n- },\n- {\n- Value: linux.AF_RDS,\n- Name: \"AF_RDS\",\n- },\n- {\n- Value: linux.AF_SNA,\n- Name: \"AF_SNA\",\n- },\n- {\n- Value: linux.AF_IRDA,\n- Name: \"AF_IRDA\",\n- },\n- {\n- Value: linux.AF_PPPOX,\n- Name: \"AF_PPPOX\",\n- },\n- {\n- Value: linux.AF_WANPIPE,\n- Name: \"AF_WANPIPE\",\n- },\n- {\n- Value: linux.AF_LLC,\n- Name: \"AF_LLC\",\n- },\n- {\n- Value: linux.AF_IB,\n- Name: \"AF_IB\",\n- },\n- {\n- Value: linux.AF_MPLS,\n- Name: \"AF_MPLS\",\n- },\n- {\n- Value: linux.AF_CAN,\n- Name: \"AF_CAN\",\n- },\n- {\n- Value: linux.AF_TIPC,\n- Name: \"AF_TIPC\",\n- },\n- {\n- Value: linux.AF_BLUETOOTH,\n- Name: \"AF_BLUETOOTH\",\n- },\n- {\n- Value: linux.AF_IUCV,\n- Name: \"AF_IUCV\",\n- },\n- {\n- Value: linux.AF_RXRPC,\n- Name: \"AF_RXRPC\",\n- },\n- {\n- Value: linux.AF_ISDN,\n- Name: \"AF_ISDN\",\n- },\n- {\n- Value: linux.AF_PHONET,\n- Name: \"AF_PHONET\",\n- },\n- {\n- Value: linux.AF_IEEE802154,\n- Name: \"AF_IEEE802154\",\n- },\n- {\n- Value: linux.AF_CAIF,\n- Name: \"AF_CAIF\",\n- },\n- {\n- Value: linux.AF_ALG,\n- Name: \"AF_ALG\",\n- },\n- {\n- Value: linux.AF_NFC,\n- Name: \"AF_NFC\",\n- },\n- {\n- Value: linux.AF_VSOCK,\n- Name: \"AF_VSOCK\",\n- },\n+ linux.AF_UNSPEC: \"AF_UNSPEC\",\n+ linux.AF_UNIX: \"AF_UNIX\",\n+ linux.AF_INET: \"AF_INET\",\n+ linux.AF_AX25: \"AF_AX25\",\n+ linux.AF_IPX: \"AF_IPX\",\n+ linux.AF_APPLETALK: \"AF_APPLETALK\",\n+ linux.AF_NETROM: \"AF_NETROM\",\n+ linux.AF_BRIDGE: \"AF_BRIDGE\",\n+ linux.AF_ATMPVC: \"AF_ATMPVC\",\n+ linux.AF_X25: \"AF_X25\",\n+ linux.AF_INET6: \"AF_INET6\",\n+ linux.AF_ROSE: \"AF_ROSE\",\n+ linux.AF_DECnet: \"AF_DECnet\",\n+ linux.AF_NETBEUI: \"AF_NETBEUI\",\n+ linux.AF_SECURITY: \"AF_SECURITY\",\n+ linux.AF_KEY: \"AF_KEY\",\n+ linux.AF_NETLINK: \"AF_NETLINK\",\n+ linux.AF_PACKET: \"AF_PACKET\",\n+ linux.AF_ASH: \"AF_ASH\",\n+ linux.AF_ECONET: \"AF_ECONET\",\n+ linux.AF_ATMSVC: \"AF_ATMSVC\",\n+ linux.AF_RDS: \"AF_RDS\",\n+ linux.AF_SNA: \"AF_SNA\",\n+ linux.AF_IRDA: \"AF_IRDA\",\n+ linux.AF_PPPOX: \"AF_PPPOX\",\n+ linux.AF_WANPIPE: \"AF_WANPIPE\",\n+ linux.AF_LLC: \"AF_LLC\",\n+ linux.AF_IB: \"AF_IB\",\n+ linux.AF_MPLS: \"AF_MPLS\",\n+ linux.AF_CAN: \"AF_CAN\",\n+ linux.AF_TIPC: \"AF_TIPC\",\n+ linux.AF_BLUETOOTH: \"AF_BLUETOOTH\",\n+ linux.AF_IUCV: \"AF_IUCV\",\n+ linux.AF_RXRPC: \"AF_RXRPC\",\n+ linux.AF_ISDN: \"AF_ISDN\",\n+ linux.AF_PHONET: \"AF_PHONET\",\n+ linux.AF_IEEE802154: \"AF_IEEE802154\",\n+ linux.AF_CAIF: \"AF_CAIF\",\n+ linux.AF_ALG: \"AF_ALG\",\n+ linux.AF_NFC: \"AF_NFC\",\n+ linux.AF_VSOCK: \"AF_VSOCK\",\n}\n// SocketType are the possible socket(2) types.\nvar SocketType = abi.ValueSet{\n- {\n- Value: linux.SOCK_STREAM,\n- Name: \"SOCK_STREAM\",\n- },\n- {\n- Value: linux.SOCK_DGRAM,\n- Name: \"SOCK_DGRAM\",\n- },\n- {\n- Value: linux.SOCK_RAW,\n- Name: \"SOCK_RAW\",\n- },\n- {\n- Value: linux.SOCK_RDM,\n- Name: \"SOCK_RDM\",\n- },\n- {\n- Value: linux.SOCK_SEQPACKET,\n- Name: \"SOCK_SEQPACKET\",\n- },\n- {\n- Value: linux.SOCK_DCCP,\n- Name: \"SOCK_DCCP\",\n- },\n- {\n- Value: linux.SOCK_PACKET,\n- Name: \"SOCK_PACKET\",\n- },\n+ linux.SOCK_STREAM: \"SOCK_STREAM\",\n+ linux.SOCK_DGRAM: \"SOCK_DGRAM\",\n+ linux.SOCK_RAW: \"SOCK_RAW\",\n+ linux.SOCK_RDM: \"SOCK_RDM\",\n+ linux.SOCK_SEQPACKET: \"SOCK_SEQPACKET\",\n+ linux.SOCK_DCCP: \"SOCK_DCCP\",\n+ linux.SOCK_PACKET: \"SOCK_PACKET\",\n}\n// SocketFlagSet are the possible socket(2) flags.\n@@ -243,106 +99,31 @@ var SocketFlagSet = abi.FlagSet{\n// ipProtocol are the possible socket(2) types for INET and INET6 sockets.\nvar ipProtocol = abi.ValueSet{\n- {\n- Value: linux.IPPROTO_IP,\n- Name: \"IPPROTO_IP\",\n- },\n- {\n- Value: linux.IPPROTO_ICMP,\n- Name: \"IPPROTO_ICMP\",\n- },\n- {\n- Value: linux.IPPROTO_IGMP,\n- Name: \"IPPROTO_IGMP\",\n- },\n- {\n- Value: linux.IPPROTO_IPIP,\n- Name: \"IPPROTO_IPIP\",\n- },\n- {\n- Value: linux.IPPROTO_TCP,\n- Name: \"IPPROTO_TCP\",\n- },\n- {\n- Value: linux.IPPROTO_EGP,\n- Name: \"IPPROTO_EGP\",\n- },\n- {\n- Value: linux.IPPROTO_PUP,\n- Name: \"IPPROTO_PUP\",\n- },\n- {\n- Value: linux.IPPROTO_UDP,\n- Name: \"IPPROTO_UDP\",\n- },\n- {\n- Value: linux.IPPROTO_IDP,\n- Name: \"IPPROTO_IDP\",\n- },\n- {\n- Value: linux.IPPROTO_TP,\n- Name: \"IPPROTO_TP\",\n- },\n- {\n- Value: linux.IPPROTO_DCCP,\n- Name: \"IPPROTO_DCCP\",\n- },\n- {\n- Value: linux.IPPROTO_IPV6,\n- Name: \"IPPROTO_IPV6\",\n- },\n- {\n- Value: linux.IPPROTO_RSVP,\n- Name: \"IPPROTO_RSVP\",\n- },\n- {\n- Value: linux.IPPROTO_GRE,\n- Name: \"IPPROTO_GRE\",\n- },\n- {\n- Value: linux.IPPROTO_ESP,\n- Name: \"IPPROTO_ESP\",\n- },\n- {\n- Value: linux.IPPROTO_AH,\n- Name: \"IPPROTO_AH\",\n- },\n- {\n- Value: linux.IPPROTO_MTP,\n- Name: \"IPPROTO_MTP\",\n- },\n- {\n- Value: linux.IPPROTO_BEETPH,\n- Name: \"IPPROTO_BEETPH\",\n- },\n- {\n- Value: linux.IPPROTO_ENCAP,\n- Name: \"IPPROTO_ENCAP\",\n- },\n- {\n- Value: linux.IPPROTO_PIM,\n- Name: \"IPPROTO_PIM\",\n- },\n- {\n- Value: linux.IPPROTO_COMP,\n- Name: \"IPPROTO_COMP\",\n- },\n- {\n- Value: linux.IPPROTO_SCTP,\n- Name: \"IPPROTO_SCTP\",\n- },\n- {\n- Value: linux.IPPROTO_UDPLITE,\n- Name: \"IPPROTO_UDPLITE\",\n- },\n- {\n- Value: linux.IPPROTO_MPLS,\n- Name: \"IPPROTO_MPLS\",\n- },\n- {\n- Value: linux.IPPROTO_RAW,\n- Name: \"IPPROTO_RAW\",\n- },\n+ linux.IPPROTO_IP: \"IPPROTO_IP\",\n+ linux.IPPROTO_ICMP: \"IPPROTO_ICMP\",\n+ linux.IPPROTO_IGMP: \"IPPROTO_IGMP\",\n+ linux.IPPROTO_IPIP: \"IPPROTO_IPIP\",\n+ linux.IPPROTO_TCP: \"IPPROTO_TCP\",\n+ linux.IPPROTO_EGP: \"IPPROTO_EGP\",\n+ linux.IPPROTO_PUP: \"IPPROTO_PUP\",\n+ linux.IPPROTO_UDP: \"IPPROTO_UDP\",\n+ linux.IPPROTO_IDP: \"IPPROTO_IDP\",\n+ linux.IPPROTO_TP: \"IPPROTO_TP\",\n+ linux.IPPROTO_DCCP: \"IPPROTO_DCCP\",\n+ linux.IPPROTO_IPV6: \"IPPROTO_IPV6\",\n+ linux.IPPROTO_RSVP: \"IPPROTO_RSVP\",\n+ linux.IPPROTO_GRE: \"IPPROTO_GRE\",\n+ linux.IPPROTO_ESP: \"IPPROTO_ESP\",\n+ linux.IPPROTO_AH: \"IPPROTO_AH\",\n+ linux.IPPROTO_MTP: \"IPPROTO_MTP\",\n+ linux.IPPROTO_BEETPH: \"IPPROTO_BEETPH\",\n+ linux.IPPROTO_ENCAP: \"IPPROTO_ENCAP\",\n+ linux.IPPROTO_PIM: \"IPPROTO_PIM\",\n+ linux.IPPROTO_COMP: \"IPPROTO_COMP\",\n+ linux.IPPROTO_SCTP: \"IPPROTO_SCTP\",\n+ linux.IPPROTO_UDPLITE: \"IPPROTO_UDPLITE\",\n+ linux.IPPROTO_MPLS: \"IPPROTO_MPLS\",\n+ linux.IPPROTO_RAW: \"IPPROTO_RAW\",\n}\n// SocketProtocol are the possible socket(2) protocols for each protocol family.\n@@ -350,90 +131,27 @@ var SocketProtocol = map[int32]abi.ValueSet{\nlinux.AF_INET: ipProtocol,\nlinux.AF_INET6: ipProtocol,\nlinux.AF_NETLINK: {\n- {\n- Value: linux.NETLINK_ROUTE,\n- Name: \"NETLINK_ROUTE\",\n- },\n- {\n- Value: linux.NETLINK_UNUSED,\n- Name: \"NETLINK_UNUSED\",\n- },\n- {\n- Value: linux.NETLINK_USERSOCK,\n- Name: \"NETLINK_USERSOCK\",\n- },\n- {\n- Value: linux.NETLINK_FIREWALL,\n- Name: \"NETLINK_FIREWALL\",\n- },\n- {\n- Value: linux.NETLINK_SOCK_DIAG,\n- Name: \"NETLINK_SOCK_DIAG\",\n- },\n- {\n- Value: linux.NETLINK_NFLOG,\n- Name: \"NETLINK_NFLOG\",\n- },\n- {\n- Value: linux.NETLINK_XFRM,\n- Name: \"NETLINK_XFRM\",\n- },\n- {\n- Value: linux.NETLINK_SELINUX,\n- Name: \"NETLINK_SELINUX\",\n- },\n- {\n- Value: linux.NETLINK_ISCSI,\n- Name: \"NETLINK_ISCSI\",\n- },\n- {\n- Value: linux.NETLINK_AUDIT,\n- Name: \"NETLINK_AUDIT\",\n- },\n- {\n- Value: linux.NETLINK_FIB_LOOKUP,\n- Name: \"NETLINK_FIB_LOOKUP\",\n- },\n- {\n- Value: linux.NETLINK_CONNECTOR,\n- Name: \"NETLINK_CONNECTOR\",\n- },\n- {\n- Value: linux.NETLINK_NETFILTER,\n- Name: \"NETLINK_NETFILTER\",\n- },\n- {\n- Value: linux.NETLINK_IP6_FW,\n- Name: \"NETLINK_IP6_FW\",\n- },\n- {\n- Value: linux.NETLINK_DNRTMSG,\n- Name: \"NETLINK_DNRTMSG\",\n- },\n- {\n- Value: linux.NETLINK_KOBJECT_UEVENT,\n- Name: \"NETLINK_KOBJECT_UEVENT\",\n- },\n- {\n- Value: linux.NETLINK_GENERIC,\n- Name: \"NETLINK_GENERIC\",\n- },\n- {\n- Value: linux.NETLINK_SCSITRANSPORT,\n- Name: \"NETLINK_SCSITRANSPORT\",\n- },\n- {\n- Value: linux.NETLINK_ECRYPTFS,\n- Name: \"NETLINK_ECRYPTFS\",\n- },\n- {\n- Value: linux.NETLINK_RDMA,\n- Name: \"NETLINK_RDMA\",\n- },\n- {\n- Value: linux.NETLINK_CRYPTO,\n- Name: \"NETLINK_CRYPTO\",\n- },\n+ linux.NETLINK_ROUTE: \"NETLINK_ROUTE\",\n+ linux.NETLINK_UNUSED: \"NETLINK_UNUSED\",\n+ linux.NETLINK_USERSOCK: \"NETLINK_USERSOCK\",\n+ linux.NETLINK_FIREWALL: \"NETLINK_FIREWALL\",\n+ linux.NETLINK_SOCK_DIAG: \"NETLINK_SOCK_DIAG\",\n+ linux.NETLINK_NFLOG: \"NETLINK_NFLOG\",\n+ linux.NETLINK_XFRM: \"NETLINK_XFRM\",\n+ linux.NETLINK_SELINUX: \"NETLINK_SELINUX\",\n+ linux.NETLINK_ISCSI: \"NETLINK_ISCSI\",\n+ linux.NETLINK_AUDIT: \"NETLINK_AUDIT\",\n+ linux.NETLINK_FIB_LOOKUP: \"NETLINK_FIB_LOOKUP\",\n+ linux.NETLINK_CONNECTOR: \"NETLINK_CONNECTOR\",\n+ linux.NETLINK_NETFILTER: \"NETLINK_NETFILTER\",\n+ linux.NETLINK_IP6_FW: \"NETLINK_IP6_FW\",\n+ linux.NETLINK_DNRTMSG: \"NETLINK_DNRTMSG\",\n+ linux.NETLINK_KOBJECT_UEVENT: \"NETLINK_KOBJECT_UEVENT\",\n+ linux.NETLINK_GENERIC: \"NETLINK_GENERIC\",\n+ linux.NETLINK_SCSITRANSPORT: \"NETLINK_SCSITRANSPORT\",\n+ linux.NETLINK_ECRYPTFS: \"NETLINK_ECRYPTFS\",\n+ linux.NETLINK_RDMA: \"NETLINK_RDMA\",\n+ linux.NETLINK_CRYPTO: \"NETLINK_CRYPTO\",\n},\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/strace/strace.go", "new_path": "pkg/sentry/strace/strace.go", "diff": "@@ -50,18 +50,9 @@ var EventMaximumSize uint\n// ItimerTypes are the possible itimer types.\nvar ItimerTypes = abi.ValueSet{\n- {\n- Value: linux.ITIMER_REAL,\n- Name: \"ITIMER_REAL\",\n- },\n- {\n- Value: linux.ITIMER_VIRTUAL,\n- Name: \"ITIMER_VIRTUAL\",\n- },\n- {\n- Value: linux.ITIMER_PROF,\n- Name: \"ITIMER_PROF\",\n- },\n+ linux.ITIMER_REAL: \"ITIMER_REAL\",\n+ linux.ITIMER_VIRTUAL: \"ITIMER_VIRTUAL\",\n+ linux.ITIMER_PROF: \"ITIMER_PROF\",\n}\nfunc iovecs(t *kernel.Task, addr usermem.Addr, iovcnt int, printContent bool, maxBytes uint64) string {\n" } ]
Go
Apache License 2.0
google/gvisor
Convert ValueSet to a map Unlike FlagSet, order doesn't matter here, so it can simply be a map. PiperOrigin-RevId: 224377910 Change-Id: I15810c698a7f02d8614bf09b59583ab73cba0514
259,853
06.12.2018 15:26:58
28,800
1b1a42ba6dc7953db742959a54fd19124348f3fc
A sandbox process should wait until it has not been moved into cgroups
[ { "change_type": "MODIFY", "old_path": "runsc/cmd/boot.go", "new_path": "runsc/cmd/boot.go", "diff": "@@ -69,6 +69,9 @@ type Boot struct {\n// userLogFD is the file descriptor to write user logs to.\nuserLogFD int\n+\n+ // startSyncFD is the file descriptor to synchronize runsc and sandbox.\n+ startSyncFD int\n}\n// Name implements subcommands.Command.Name.\n@@ -99,12 +102,13 @@ func (b *Boot) SetFlags(f *flag.FlagSet) {\nf.IntVar(&b.cpuNum, \"cpu-num\", 0, \"number of CPUs to create inside the sandbox\")\nf.Uint64Var(&b.totalMem, \"total-memory\", 0, \"sets the initial amount of total memory to report back to the container\")\nf.IntVar(&b.userLogFD, \"user-log-fd\", 0, \"file descriptor to write user logs to. 0 means no logging.\")\n+ f.IntVar(&b.startSyncFD, \"start-sync-fd\", -1, \"required FD to used to synchronize sandbox startup\")\n}\n// Execute implements subcommands.Command.Execute. It starts a sandbox in a\n// waiting state.\nfunc (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {\n- if b.specFD == -1 || b.controllerFD == -1 || f.NArg() != 1 {\n+ if b.specFD == -1 || b.controllerFD == -1 || b.startSyncFD == -1 || f.NArg() != 1 {\nf.Usage()\nreturn subcommands.ExitUsageError\n}\n@@ -155,6 +159,14 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\npanic(\"setCapsAndCallSelf must never return success\")\n}\n+ // Wait until this process has been moved into cgroups.\n+ startSyncFile := os.NewFile(uintptr(b.startSyncFD), \"start-sync file\")\n+ defer startSyncFile.Close()\n+ buf := make([]byte, 1)\n+ if r, err := startSyncFile.Read(buf); err != nil || r != 1 {\n+ Fatalf(\"Unable to read from the start-sync descriptor: %v\", err)\n+ }\n+\n// Create the loader.\nbootArgs := boot.Args{\nID: f.Arg(0),\n@@ -173,9 +185,19 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nif err != nil {\nFatalf(\"error creating loader: %v\", err)\n}\n+\n// Fatalf exits the process and doesn't run defers. 'l' must be destroyed\n// explicitly!\n+ // Notify the parent process the controller has been created.\n+ if w, err := startSyncFile.Write(buf); err != nil || w != 1 {\n+ l.Destroy()\n+ Fatalf(\"Unable to write into the start-sync descriptor: %v\", err)\n+ }\n+ // startSyncFile is closed here to be sure that starting with this point\n+ // the runsc process will not write anything into it.\n+ startSyncFile.Close()\n+\n// Notify other processes the loader has been created.\nl.NotifyLoaderCreated()\n" }, { "change_type": "MODIFY", "old_path": "runsc/sandbox/sandbox.go", "new_path": "runsc/sandbox/sandbox.go", "diff": "@@ -60,7 +60,7 @@ type Sandbox struct {\n// is running in.\nChroot string `json:\"chroot\"`\n- // Ccroup has the cgroup configuration for the sandbox.\n+ // Cgroup has the cgroup configuration for the sandbox.\nCgroup *cgroup.Cgroup `json:\"cgroup\"`\n}\n@@ -69,7 +69,7 @@ type Sandbox struct {\nfunc Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, userLog string, ioFiles []*os.File) (*Sandbox, error) {\ns := &Sandbox{ID: id}\n// The Cleanup object cleans up partially created sandboxes when an error occurs.\n- // Any errors occuring during cleanup itself are ignored.\n+ // Any errors occurring during cleanup itself are ignored.\nc := specutils.MakeCleanup(func() { _ = s.destroy() })\ndefer c.Clean()\n@@ -82,13 +82,25 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo\n}\n}\n- // Create the sandbox process.\n- if err := s.createSandboxProcess(spec, conf, bundleDir, consoleSocket, userLog, ioFiles); err != nil {\n- return nil, err\n+ // Create a socket pair to synchronize runsc and sandbox processes.\n+ // It is used for the following:\n+ // * to notify the sandbox process when it has been moved into cgroups.\n+ // * to wait for the controller socket.\n+ fds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_SEQPACKET, 0)\n+ if err != nil {\n+ return nil, fmt.Errorf(\"error creating a start-sync socket pair %q: %v\", s.ID, err)\n}\n+ startSyncFile := os.NewFile(uintptr(fds[0]), \"start-sync socket\")\n+ defer startSyncFile.Close()\n- // Wait for the control server to come up (or timeout).\n- if err := s.waitForCreated(20 * time.Second); err != nil {\n+ sandboxSyncFile := os.NewFile(uintptr(fds[1]), \"sandbox start-sync socket\")\n+\n+ // Create the sandbox process.\n+ err = s.createSandboxProcess(spec, conf, bundleDir, consoleSocket, userLog, ioFiles, sandboxSyncFile)\n+ // sandboxSyncFile has to be closed to be able to detect\n+ // when the sandbox process exits unexpectedly.\n+ sandboxSyncFile.Close()\n+ if err != nil {\nreturn nil, err\n}\n@@ -98,6 +110,24 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo\n}\n}\n+ b := make([]byte, 1)\n+ // Notify the sandbox process it has been moved into cgroups.\n+ if l, err := startSyncFile.Write(b); err != nil || l != 1 {\n+ return nil, fmt.Errorf(\"error writing into the start-sync descriptor: %v\", err)\n+ }\n+ // Wait until the sandbox process has initialized the controller socket.\n+ if l, err := startSyncFile.Read(b); err != nil || l != 1 {\n+ return nil, fmt.Errorf(\"error reading from the start-sync descriptor: %v\", err)\n+ }\n+ // startSyncFile is closed here to be sure that starting with this point\n+ // the sandbox process will not write anything into it.\n+ startSyncFile.Close()\n+\n+ // Wait for the control server to come up.\n+ if err := s.waitForCreated(); err != nil {\n+ return nil, err\n+ }\n+\nc.Release()\nreturn s, nil\n}\n@@ -282,7 +312,7 @@ func (s *Sandbox) connError(err error) error {\n// createSandboxProcess starts the sandbox as a subprocess by running the \"boot\"\n// command, passing in the bundle dir.\n-func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, userLog string, ioFiles []*os.File) error {\n+func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, userLog string, ioFiles []*os.File, startSyncFile *os.File) error {\n// nextFD is used to get unused FDs that we can pass to the sandbox. It\n// starts at 3 because 0, 1, and 2 are taken by stdin/out/err.\nnextFD := 3\n@@ -346,6 +376,10 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund\ncmd.Args = append(cmd.Args, \"--spec-fd=\"+strconv.Itoa(nextFD))\nnextFD++\n+ cmd.ExtraFiles = append(cmd.ExtraFiles, startSyncFile)\n+ cmd.Args = append(cmd.Args, \"--start-sync-fd=\"+strconv.Itoa(nextFD))\n+ nextFD++\n+\n// If there is a gofer, sends all socket ends to the sandbox.\nfor _, f := range ioFiles {\ndefer f.Close()\n@@ -581,21 +615,8 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund\n// waitForCreated waits for the sandbox subprocess control server to be\n// running and for the loader to have been created, at which point the sandbox\n// is in Created state.\n-func (s *Sandbox) waitForCreated(timeout time.Duration) error {\n+func (s *Sandbox) waitForCreated() error {\nlog.Debugf(\"Waiting for sandbox %q creation\", s.ID)\n-\n- ready := func() (bool, error) {\n- c, err := client.ConnectTo(boot.ControlSocketAddr(s.ID))\n- if err != nil {\n- return false, nil\n- }\n- // It's alive!\n- c.Close()\n- return true, nil\n- }\n- if err := specutils.WaitForReady(s.Pid, timeout, ready); err != nil {\n- return fmt.Errorf(\"unexpected error waiting for sandbox %q, err: %v\", s.ID, err)\n- }\nconn, err := s.sandboxConnect()\nif err != nil {\nreturn err\n" } ]
Go
Apache License 2.0
google/gvisor
A sandbox process should wait until it has not been moved into cgroups PiperOrigin-RevId: 224418900 Change-Id: I53cf4d7c1c70117875b6920f8fd3d58a3b1497e9
259,948
07.12.2018 17:03:06
28,800
9984138abee51d6145469f9298bfeb8a98589709
sentry: turn "dynamically-created" procfs files into static creation.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/proc/proc.go", "new_path": "pkg/sentry/fs/proc/proc.go", "diff": "@@ -85,8 +85,6 @@ func New(ctx context.Context, msrc *fs.MountSource) (*fs.Inode, error) {\np := &proc{k: k, pidns: pidns}\np.InitDir(ctx, map[string]*fs.Inode{\n- // Note that these are just the static members. There are\n- // dynamic members populated in Readdir and Lookup below.\n\"filesystems\": seqfile.NewSeqFileInode(ctx, &filesystemsData{}, msrc),\n\"loadavg\": seqfile.NewSeqFileInode(ctx, &loadavgData{}, msrc),\n\"meminfo\": seqfile.NewSeqFileInode(ctx, &meminfoData{k}, msrc),\n@@ -96,12 +94,23 @@ func New(ctx context.Context, msrc *fs.MountSource) (*fs.Inode, error) {\n}, fs.RootOwner, fs.FilePermsFromMode(0555))\np.AddChild(ctx, \"cpuinfo\", p.newCPUInfo(ctx, msrc))\n+ // If we're using rpcinet we will let it manage /proc/net.\n+ if _, ok := p.k.NetworkStack().(*rpcinet.Stack); ok {\n+ p.AddChild(ctx, \"net\", newRPCInetProcNet(ctx, msrc))\n+ } else {\n+ p.AddChild(ctx, \"net\", p.newNetDir(ctx, msrc))\n+ }\n+ p.AddChild(ctx, \"self\", p.newSelf(ctx, msrc))\n+ p.AddChild(ctx, \"sys\", p.newSysDir(ctx, msrc))\n+ p.AddChild(ctx, \"thread-self\", p.newThreadSelf(ctx, msrc))\np.AddChild(ctx, \"uptime\", p.newUptime(ctx, msrc))\nreturn newFile(p, msrc, fs.SpecialDirectory, nil), nil\n}\n// self is a magical link.\n+//\n+// +stateify savable\ntype self struct {\nramfs.Symlink\n@@ -146,6 +155,8 @@ func (s *self) Readlink(ctx context.Context, inode *fs.Inode) (string, error) {\n}\n// threadSelf is more magical than \"self\" link.\n+//\n+// +stateify savable\ntype threadSelf struct {\nramfs.Symlink\n@@ -169,29 +180,11 @@ func (s *threadSelf) Readlink(ctx context.Context, inode *fs.Inode) (string, err\n// Lookup loads an Inode at name into a Dirent.\nfunc (p *proc) Lookup(ctx context.Context, dir *fs.Inode, name string) (*fs.Dirent, error) {\n- // Is it one of the static ones?\ndirent, walkErr := p.Dir.Lookup(ctx, dir, name)\nif walkErr == nil {\nreturn dirent, nil\n}\n- // Is it a dynamic element?\n- nfs := map[string]func() *fs.Inode{\n- \"net\": func() *fs.Inode {\n- // If we're using rpcinet we will let it manage /proc/net.\n- if _, ok := p.k.NetworkStack().(*rpcinet.Stack); ok {\n- return newRPCInetProcNet(ctx, dir.MountSource)\n- }\n- return p.newNetDir(ctx, dir.MountSource)\n- },\n- \"self\": func() *fs.Inode { return p.newSelf(ctx, dir.MountSource) },\n- \"sys\": func() *fs.Inode { return p.newSysDir(ctx, dir.MountSource) },\n- \"thread-self\": func() *fs.Inode { return p.newThreadSelf(ctx, dir.MountSource) },\n- }\n- if nf, ok := nfs[name]; ok {\n- return fs.NewDirent(nf(), name), nil\n- }\n-\n// Try to lookup a corresponding task.\ntid, err := strconv.ParseUint(name, 10, 64)\nif err != nil {\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/controller.go", "new_path": "runsc/boot/controller.go", "diff": "@@ -30,6 +30,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/state\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/time\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/watchdog\"\n+ \"gvisor.googlesource.com/gvisor/pkg/tcpip/stack\"\n\"gvisor.googlesource.com/gvisor/pkg/urpc\"\n)\n@@ -356,6 +357,9 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {\nif err != nil {\nreturn fmt.Errorf(\"failed to create network: %v\", err)\n}\n+ if eps, ok := networkStack.(*epsocket.Stack); ok {\n+ stack.StackFromEnv = eps.Stack // FIXME\n+ }\ninfo, err := o.FilePayload.Files[0].Stat()\nif err != nil {\nreturn err\n" } ]
Go
Apache License 2.0
google/gvisor
sentry: turn "dynamically-created" procfs files into static creation. PiperOrigin-RevId: 224600982 Change-Id: I547253528e24fb0bb318fc9d2632cb80504acb34
259,992
08.12.2018 09:20:55
28,800
b89f9909d77c9b41adf15c032bf588c99fc4b7eb
Update K8s support Add pointers to Minikube and gvisor-containerd-shim.
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -191,9 +191,9 @@ chmod a+x runsc\nsudo mv runsc /usr/local/bin\n```\n-### Configuring Docker\n+### Running with Docker\n-Next, configure Docker to use `runsc` by adding a runtime entry to your Docker\n+To use gVisor with Docker you must add `runsc` as a runtime to your Docker\nconfiguration (`/etc/docker/daemon.json`). You may have to create this file if\nit does not exist. Also, some Docker versions also require you to\n[specify the `storage-driver` field][docker-storage-driver].\n@@ -229,20 +229,16 @@ Terminal support works too:\ndocker run --runtime=runsc -it ubuntu /bin/bash\n```\n-### Kubernetes Support (Experimental)\n+### Running with Kubernetes\n-gVisor can run sandboxed containers in a Kubernetes cluster with cri-o, although\n-this is not recommended for production environments yet. Follow\n-[these instructions][cri-o-k8s] to run [cri-o][cri-o] on a node in a Kubernetes\n-cluster. Build `runsc` and put it on the node, and set it as the\n-`runtime_untrusted_workload` in `/etc/crio/crio.conf`.\n+gVisor can run sandboxed containers in a Kubernetes cluster with Minikube. After\n+the gVisor addon is enabled, pods with `io.kubernetes.cri.untrusted-workload`\n+set to true will execute with `runsc`. Follow [these instructions][minikube] to\n+enable gVisor addon.\n-Any Pod without the `io.kubernetes.cri-o.TrustedSandbox` annotation (or with the\n-annotation set to false) will be run with `runsc`.\n-\n-Currently, gVisor only supports Pods with a single container (not counting the\n-ever-present pause container). Support for multiple containers within a single\n-Pod is coming soon.\n+You can also setup Kubernetes node to use `gvisor-containerd-shim`. Pods with\n+`io.kubernetes.cri.untrusted-workload` annotation will execute with `runsc`. You\n+can find instructions [here][gvisor-containerd-shim].\n## Advanced Usage\n@@ -444,14 +440,14 @@ See [Contributing.md](CONTRIBUTING.md).\n[bazel]: https://bazel.build\n[bug]: https://github.com/google/gvisor/issues\n[checkpoint-restore]: https://gvisor.googlesource.com/gvisor/+/master/g3doc/checkpoint_restore.md\n-[cri-o-k8s]: https://github.com/kubernetes-incubator/cri-o/blob/master/kubernetes.md\n-[cri-o]: https://github.com/kubernetes-incubator/cri-o\n[docker-storage-driver]: https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-storage-driver\n[docker]: https://www.docker.com\n[git]: https://git-scm.com\n+[gvisor-containerd-shim]: https://github.com/google/gvisor-containerd-shim\n[gvisor-security-list]: https://groups.google.com/forum/#!forum/gvisor-security\n[gvisor-users-list]: https://groups.google.com/forum/#!forum/gvisor-users\n[kvm]: https://www.linux-kvm.org\n+[minikube]: https://github.com/kubernetes/minikube/blob/master/deploy/addons/gvisor/README.md\n[netstack]: https://github.com/google/netstack\n[oci]: https://www.opencontainers.org\n[python]: https://python.org\n" } ]
Go
Apache License 2.0
google/gvisor
Update K8s support Add pointers to Minikube and gvisor-containerd-shim. PiperOrigin-RevId: 224654334 Change-Id: Icefefbe531e901fe4807ba81904de8b01baf8a15
259,854
09.12.2018 00:49:37
28,800
25b8424d754bd659a0f976f82f7c8846dc2a194f
Stub out TCP_QUICKACK
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/epsocket/epsocket.go", "new_path": "pkg/sentry/socket/epsocket/epsocket.go", "diff": "@@ -698,6 +698,18 @@ func getSockOptTCP(t *kernel.Task, ep commonEndpoint, name, outLen int) (interfa\nreturn int32(v), nil\n+ case linux.TCP_QUICKACK:\n+ if outLen < sizeOfInt32 {\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ var v tcpip.QuickAckOption\n+ if err := ep.GetSockOpt(&v); err != nil {\n+ return nil, syserr.TranslateNetstackError(err)\n+ }\n+\n+ return int32(v), nil\n+\ncase linux.TCP_INFO:\nvar v tcpip.TCPInfoOption\nif err := ep.GetSockOpt(&v); err != nil {\n@@ -870,6 +882,14 @@ func setSockOptTCP(t *kernel.Task, ep commonEndpoint, name int, optVal []byte) *\nv := usermem.ByteOrder.Uint32(optVal)\nreturn syserr.TranslateNetstackError(ep.SetSockOpt(tcpip.CorkOption(v)))\n+ case linux.TCP_QUICKACK:\n+ if len(optVal) < sizeOfInt32 {\n+ return syserr.ErrInvalidArgument\n+ }\n+\n+ v := usermem.ByteOrder.Uint32(optVal)\n+ return syserr.TranslateNetstackError(ep.SetSockOpt(tcpip.QuickAckOption(v)))\n+\ncase linux.TCP_REPAIR_OPTIONS:\nt.Kernel().EmitUnimplementedEvent(t)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tcpip.go", "new_path": "pkg/tcpip/tcpip.go", "diff": "@@ -436,6 +436,9 @@ type CorkOption int\n// should allow reuse of local address.\ntype ReuseAddressOption int\n+// QuickAckOption is stubbed out in SetSockOpt/GetSockOpt.\n+type QuickAckOption int\n+\n// PasscredOption is used by SetSockOpt/GetSockOpt to specify whether\n// SCM_CREDENTIALS socket control messages are enabled.\n//\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -177,6 +177,12 @@ type endpoint struct {\n// options.\nreuseAddr bool\n+ // slowAck holds the negated state of quick ack. It is stubbed out and\n+ // does nothing.\n+ //\n+ // slowAck is a boolean (0 is false) and must be accessed atomically.\n+ slowAck uint32\n+\n// segmentQueue is used to hand received segments to the protocol\n// goroutine. Segments are queued as long as the queue is not full,\n// and dropped when it is.\n@@ -677,6 +683,15 @@ func (e *endpoint) SetSockOpt(opt interface{}) *tcpip.Error {\ne.mu.Unlock()\nreturn nil\n+ case tcpip.QuickAckOption:\n+ if v == 0 {\n+ atomic.StoreUint32(&e.slowAck, 1)\n+ } else {\n+ atomic.StoreUint32(&e.slowAck, 0)\n+ }\n+\n+ return nil\n+\ncase tcpip.ReceiveBufferSizeOption:\n// Make sure the receive buffer size is within the min and max\n// allowed.\n@@ -859,6 +874,13 @@ func (e *endpoint) GetSockOpt(opt interface{}) *tcpip.Error {\n}\nreturn nil\n+ case *tcpip.QuickAckOption:\n+ *o = 1\n+ if v := atomic.LoadUint32(&e.slowAck); v != 0 {\n+ *o = 0\n+ }\n+ return nil\n+\ncase *tcpip.V6OnlyOption:\n// We only recognize this option on v6 endpoints.\nif e.netProto != header.IPv6ProtocolNumber {\n" } ]
Go
Apache License 2.0
google/gvisor
Stub out TCP_QUICKACK PiperOrigin-RevId: 224696233 Change-Id: I45c425d9e32adee5dcce29ca7439a06567b26014
259,881
10.12.2018 12:36:27
28,800
99d595869332f817de8f570fae184658c513a43c
Validate FS_BASE in Task.Clone arch_prctl already verified that the new FS_BASE was canonical, but Task.Clone did not. Centralize these checks in the arch packages. Failure to validate could cause an error in PTRACE_SET_REGS when we try to switch to the app.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/arch/arch.go", "new_path": "pkg/sentry/arch/arch.go", "diff": "@@ -115,6 +115,12 @@ type Context interface {\n// SetStack sets the current stack pointer.\nSetStack(value uintptr)\n+ // TLS returns the current TLS pointer.\n+ TLS() uintptr\n+\n+ // SetTLS sets the current TLS pointer. Returns false if value is invalid.\n+ SetTLS(value uintptr) bool\n+\n// SetRSEQInterruptedIP sets the register that contains the old IP when a\n// restartable sequence is interrupted.\nSetRSEQInterruptedIP(value uintptr)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/arch/arch_amd64.go", "new_path": "pkg/sentry/arch/arch_amd64.go", "diff": "@@ -158,6 +158,22 @@ func (c *context64) SetStack(value uintptr) {\nc.Regs.Rsp = uint64(value)\n}\n+// TLS returns the current TLS pointer.\n+func (c *context64) TLS() uintptr {\n+ return uintptr(c.Regs.Fs_base)\n+}\n+\n+// SetTLS sets the current TLS pointer. Returns false if value is invalid.\n+func (c *context64) SetTLS(value uintptr) bool {\n+ if !isValidSegmentBase(uint64(value)) {\n+ return false\n+ }\n+\n+ c.Regs.Fs = 0\n+ c.Regs.Fs_base = uint64(value)\n+ return true\n+}\n+\n// SetRSEQInterruptedIP implements Context.SetRSEQInterruptedIP.\nfunc (c *context64) SetRSEQInterruptedIP(value uintptr) {\nc.Regs.R10 = uint64(value)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/arch/arch_x86.go", "new_path": "pkg/sentry/arch/arch_x86.go", "diff": "@@ -353,10 +353,10 @@ func (s *State) PtraceSetRegs(src io.Reader) (int, error) {\nif !isUserSegmentSelector(regs.Ss) {\nreturn 0, syscall.EIO\n}\n- if regs.Fs_base >= uint64(maxAddr64) {\n+ if !isValidSegmentBase(regs.Fs_base) {\nreturn 0, syscall.EIO\n}\n- if regs.Gs_base >= uint64(maxAddr64) {\n+ if !isValidSegmentBase(regs.Gs_base) {\nreturn 0, syscall.EIO\n}\n// CS and SS are validated, but changes to them are otherwise silently\n@@ -389,6 +389,12 @@ func isUserSegmentSelector(reg uint64) bool {\nreturn reg&3 == 3\n}\n+// isValidSegmentBase returns true if the given segment base specifies a\n+// canonical user address.\n+func isValidSegmentBase(reg uint64) bool {\n+ return reg < uint64(maxAddr64)\n+}\n+\n// ptraceFPRegsSize is the size in bytes of Linux's user_i387_struct, the type\n// manipulated by PTRACE_GETFPREGS and PTRACE_SETFPREGS on x86. Equivalently,\n// ptraceFPRegsSize is the size in bytes of the x86 FXSAVE area.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_clone.go", "new_path": "pkg/sentry/kernel/task_clone.go", "diff": "@@ -210,7 +210,9 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\ntc.Arch.SetStack(uintptr(opts.Stack))\n}\nif opts.SetTLS {\n- tc.Arch.StateData().Regs.Fs_base = uint64(opts.TLS)\n+ if !tc.Arch.SetTLS(uintptr(opts.TLS)) {\n+ return 0, nil, syserror.EPERM\n+ }\n}\nvar fsc *FSContext\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ptrace/subprocess.go", "new_path": "pkg/sentry/platform/ptrace/subprocess.go", "diff": "@@ -480,10 +480,10 @@ func (s *subprocess) switchToApp(c *context, ac arch.Context) bool {\n// Set registers.\nif err := t.setRegs(regs); err != nil {\n- panic(fmt.Sprintf(\"ptrace set regs failed: %v\", err))\n+ panic(fmt.Sprintf(\"ptrace set regs (%+v) failed: %v\", regs, err))\n}\nif err := t.setFPRegs(fpState, uint64(fpLen), useXsave); err != nil {\n- panic(fmt.Sprintf(\"ptrace set fpregs failed: %v\", err))\n+ panic(fmt.Sprintf(\"ptrace set fpregs (%+v) failed: %v\", fpState, err))\n}\nfor {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_tls.go", "new_path": "pkg/sentry/syscalls/linux/sys_tls.go", "diff": "@@ -22,7 +22,6 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n- \"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n)\n// ArchPrctl implements linux syscall arch_prctl(2).\n@@ -31,19 +30,17 @@ func ArchPrctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys\nswitch args[0].Int() {\ncase linux.ARCH_GET_FS:\naddr := args[1].Pointer()\n- _, err := t.CopyOut(addr, &t.Arch().StateData().Regs.Fs_base)\n+ fsbase := t.Arch().TLS()\n+ _, err := t.CopyOut(addr, uint64(fsbase))\nif err != nil {\nreturn 0, nil, err\n}\ncase linux.ARCH_SET_FS:\nfsbase := args[1].Uint64()\n- if _, ok := t.MemoryManager().CheckIORange(usermem.Addr(fsbase), 0); !ok {\n+ if !t.Arch().SetTLS(uintptr(fsbase)) {\nreturn 0, nil, syscall.EPERM\n}\n- regs := &t.Arch().StateData().Regs\n- regs.Fs = 0\n- regs.Fs_base = fsbase\ncase linux.ARCH_GET_GS, linux.ARCH_SET_GS:\nt.Kernel().EmitUnimplementedEvent(t)\n" } ]
Go
Apache License 2.0
google/gvisor
Validate FS_BASE in Task.Clone arch_prctl already verified that the new FS_BASE was canonical, but Task.Clone did not. Centralize these checks in the arch packages. Failure to validate could cause an error in PTRACE_SET_REGS when we try to switch to the app. PiperOrigin-RevId: 224862398 Change-Id: Iefe63b3f9aa6c4810326b8936e501be3ec407f14
259,985
10.12.2018 12:47:20
28,800
fc297702511edef4760c4f7a1d89cc6f02347d50
Add type safety to shm ids and keys.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/shm/shm.go", "new_path": "pkg/sentry/kernel/shm/shm.go", "diff": "@@ -51,6 +51,12 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/syserror\"\n)\n+// Key represents a shm segment key. Analogous to a file name.\n+type Key int32\n+\n+// ID represents the opaque handle for a shm segment. Analogous to an fd.\n+type ID int32\n+\n// Registry tracks all shared memory segments in an IPC namespace. The registry\n// provides the mechanisms for creating and finding segments, and reporting\n// global shm parameters.\n@@ -63,33 +69,33 @@ type Registry struct {\nmu sync.Mutex `state:\"nosave\"`\n// shms maps segment ids to segments. Protected by mu.\n- shms map[int32]*Shm\n+ shms map[ID]*Shm\n// Sum of the sizes of all existing segments rounded up to page size, in\n// units of page size. Protected by mu.\ntotalPages uint64\n// lastIDUsed is protected by mu.\n- lastIDUsed int32\n+ lastIDUsed ID\n}\n// NewRegistry creates a new shm registry.\nfunc NewRegistry(userNS *auth.UserNamespace) *Registry {\nreturn &Registry{\nuserNS: userNS,\n- shms: make(map[int32]*Shm),\n+ shms: make(map[ID]*Shm),\n}\n}\n// FindByID looks up a segment given an ID.\n-func (r *Registry) FindByID(id int32) *Shm {\n+func (r *Registry) FindByID(id ID) *Shm {\nr.mu.Lock()\ndefer r.mu.Unlock()\nreturn r.shms[id]\n}\n// Precondition: Caller must hold r.mu.\n-func (r *Registry) findByKey(key int32) *Shm {\n+func (r *Registry) findByKey(key Key) *Shm {\nfor _, v := range r.shms {\nif v.key == key {\nreturn v\n@@ -100,7 +106,7 @@ func (r *Registry) findByKey(key int32) *Shm {\n// FindOrCreate looks up or creates a segment in the registry. It's functionally\n// analogous to open(2).\n-func (r *Registry) FindOrCreate(ctx context.Context, pid, key int32, size uint64, mode linux.FileMode, private, create, exclusive bool) (*Shm, error) {\n+func (r *Registry) FindOrCreate(ctx context.Context, pid int32, key Key, size uint64, mode linux.FileMode, private, create, exclusive bool) (*Shm, error) {\nif (create || private) && (size < linux.SHMMIN || size > linux.SHMMAX) {\n// \"A new segment was to be created and size is less than SHMMIN or\n// greater than SHMMAX.\" - man shmget(2)\n@@ -178,7 +184,7 @@ func (r *Registry) FindOrCreate(ctx context.Context, pid, key int32, size uint64\n}\n// newShm creates a new segment in the registry.\n-func (r *Registry) newShm(ctx context.Context, pid, key int32, creator fs.FileOwner, perms fs.FilePermissions, size uint64) (*Shm, error) {\n+func (r *Registry) newShm(ctx context.Context, pid int32, key Key, creator fs.FileOwner, perms fs.FilePermissions, size uint64) (*Shm, error) {\np := platform.FromContext(ctx)\nif p == nil {\npanic(fmt.Sprintf(\"context.Context %T lacks non-nil value for key %T\", ctx, platform.CtxPlatform))\n@@ -289,7 +295,7 @@ type Shm struct {\nregistry *Registry\n// ID is the kernel identifier for this segment. Immutable.\n- ID int32\n+ ID ID\n// creator is the user that created the segment. Immutable.\ncreator fs.FileOwner\n@@ -309,7 +315,7 @@ type Shm struct {\nfr platform.FileRange\n// key is the public identifier for this segment.\n- key int32\n+ key Key\n// mu protects all fields below.\nmu sync.Mutex `state:\"nosave\"`\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_shm.go", "new_path": "pkg/sentry/syscalls/linux/sys_shm.go", "diff": "@@ -24,7 +24,7 @@ import (\n// Shmget implements shmget(2).\nfunc Shmget(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n- key := args[0].Int()\n+ key := shm.Key(args[0].Int())\nsize := uint64(args[1].SizeT())\nflag := args[2].Int()\n@@ -43,7 +43,7 @@ func Shmget(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\n}\n// findSegment retrives a shm segment by the given id.\n-func findSegment(t *kernel.Task, id int32) (*shm.Shm, error) {\n+func findSegment(t *kernel.Task, id shm.ID) (*shm.Shm, error) {\nr := t.IPCNamespace().ShmRegistry()\nsegment := r.FindByID(id)\nif segment == nil {\n@@ -55,7 +55,7 @@ func findSegment(t *kernel.Task, id int32) (*shm.Shm, error) {\n// Shmat implements shmat(2).\nfunc Shmat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n- id := args[0].Int()\n+ id := shm.ID(args[0].Int())\naddr := args[1].Pointer()\nflag := args[2].Int()\n@@ -86,7 +86,7 @@ func Shmdt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\n// Shmctl implements shmctl(2).\nfunc Shmctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n- id := args[0].Int()\n+ id := shm.ID(args[0].Int())\ncmd := args[1].Int()\nbuf := args[2].Pointer()\n" } ]
Go
Apache License 2.0
google/gvisor
Add type safety to shm ids and keys. PiperOrigin-RevId: 224864380 Change-Id: I49542279ad56bf15ba462d3de1ef2b157b31830a
259,854
10.12.2018 17:55:45
28,800
5d87d8865f8771c00b84717d40f27f8f93dda7ca
Implement MSG_WAITALL MSG_WAITALL requests that recv family calls do not perform short reads. It only has an effect for SOCK_STREAM sockets, other types ignore it.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/host/socket.go", "new_path": "pkg/sentry/fs/host/socket.go", "diff": "@@ -169,7 +169,7 @@ func NewSocketWithDirent(ctx context.Context, d *fs.Dirent, f *fd.FD, flags fs.F\nep := transport.NewExternal(e.stype, uniqueid.GlobalProviderFromContext(ctx), &q, e, e)\n- return unixsocket.NewWithDirent(ctx, d, ep, flags), nil\n+ return unixsocket.NewWithDirent(ctx, d, ep, e.stype != transport.SockStream, flags), nil\n}\n// newSocket allocates a new unix socket with host endpoint.\n@@ -201,7 +201,7 @@ func newSocket(ctx context.Context, orgfd int, saveable bool) (*fs.File, error)\nep := transport.NewExternal(e.stype, uniqueid.GlobalProviderFromContext(ctx), &q, e, e)\n- return unixsocket.New(ctx, ep), nil\n+ return unixsocket.New(ctx, ep, e.stype != transport.SockStream), nil\n}\n// Send implements transport.ConnectedEndpoint.Send.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/epsocket/epsocket.go", "new_path": "pkg/sentry/socket/epsocket/epsocket.go", "diff": "@@ -1300,6 +1300,8 @@ func (s *SocketOperations) nonBlockingRead(ctx context.Context, dst usermem.IOSe\nfunc (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (n int, senderAddr interface{}, senderAddrLen uint32, controlMessages socket.ControlMessages, err *syserr.Error) {\ntrunc := flags&linux.MSG_TRUNC != 0\npeek := flags&linux.MSG_PEEK != 0\n+ dontWait := flags&linux.MSG_DONTWAIT != 0\n+ waitAll := flags&linux.MSG_WAITALL != 0\nif senderRequested && !s.isPacketBased() {\n// Stream sockets ignore the sender address.\nsenderRequested = false\n@@ -1311,10 +1313,19 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nreturn 0, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain\n}\n- if err != syserr.ErrWouldBlock || flags&linux.MSG_DONTWAIT != 0 {\n+ if err != nil && (err != syserr.ErrWouldBlock || dontWait) {\n+ // Read failed and we should not retry.\n+ return 0, nil, 0, socket.ControlMessages{}, err\n+ }\n+\n+ if err == nil && (dontWait || !waitAll || s.isPacketBased() || int64(n) >= dst.NumBytes()) {\n+ // We got all the data we need.\nreturn\n}\n+ // Don't overwrite any data we received.\n+ dst = dst.DropFirst(n)\n+\n// We'll have to block. Register for notifications and keep trying to\n// send all the data.\ne, ch := waiter.NewChannelEntry(nil)\n@@ -1322,10 +1333,23 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\ndefer s.EventUnregister(&e)\nfor {\n- n, senderAddr, senderAddrLen, controlMessages, err = s.nonBlockingRead(t, dst, peek, trunc, senderRequested)\n- if err != syserr.ErrWouldBlock {\n+ var rn int\n+ rn, senderAddr, senderAddrLen, controlMessages, err = s.nonBlockingRead(t, dst, peek, trunc, senderRequested)\n+ n += rn\n+ if err != nil && err != syserr.ErrWouldBlock {\n+ // Always stop on errors other than would block as we generally\n+ // won't be able to get any more data. Eat the error if we got\n+ // any data.\n+ if n > 0 {\n+ err = nil\n+ }\n+ return\n+ }\n+ if err == nil && (s.isPacketBased() || !waitAll || int64(rn) >= dst.NumBytes()) {\n+ // We got all the data we need.\nreturn\n}\n+ dst = dst.DropFirst(rn)\nif err := t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil {\nif err == syserror.ETIMEDOUT {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/unix.go", "new_path": "pkg/sentry/socket/unix/unix.go", "diff": "@@ -53,19 +53,21 @@ type SocketOperations struct {\nfsutil.NoopFlush `state:\"nosave\"`\nfsutil.NoMMap `state:\"nosave\"`\nep transport.Endpoint\n+ isPacket bool\n}\n// New creates a new unix socket.\n-func New(ctx context.Context, endpoint transport.Endpoint) *fs.File {\n+func New(ctx context.Context, endpoint transport.Endpoint, isPacket bool) *fs.File {\ndirent := socket.NewDirent(ctx, unixSocketDevice)\ndefer dirent.DecRef()\n- return NewWithDirent(ctx, dirent, endpoint, fs.FileFlags{Read: true, Write: true})\n+ return NewWithDirent(ctx, dirent, endpoint, isPacket, fs.FileFlags{Read: true, Write: true})\n}\n// NewWithDirent creates a new unix socket using an existing dirent.\n-func NewWithDirent(ctx context.Context, d *fs.Dirent, ep transport.Endpoint, flags fs.FileFlags) *fs.File {\n+func NewWithDirent(ctx context.Context, d *fs.Dirent, ep transport.Endpoint, isPacket bool, flags fs.FileFlags) *fs.File {\nreturn fs.NewFile(ctx, d, flags, &SocketOperations{\nep: ep,\n+ isPacket: isPacket,\n})\n}\n@@ -188,7 +190,7 @@ func (s *SocketOperations) Accept(t *kernel.Task, peerRequested bool, flags int,\n}\n}\n- ns := New(t, ep)\n+ ns := New(t, ep, s.isPacket)\ndefer ns.DecRef()\nif flags&linux.SOCK_NONBLOCK != 0 {\n@@ -471,6 +473,8 @@ func (s *SocketOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOS\nfunc (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (n int, senderAddr interface{}, senderAddrLen uint32, controlMessages socket.ControlMessages, err *syserr.Error) {\ntrunc := flags&linux.MSG_TRUNC != 0\npeek := flags&linux.MSG_PEEK != 0\n+ dontWait := flags&linux.MSG_DONTWAIT != 0\n+ waitAll := flags&linux.MSG_WAITALL != 0\n// Calculate the number of FDs for which we have space and if we are\n// requesting credentials.\n@@ -497,7 +501,8 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nif senderRequested {\nr.From = &tcpip.FullAddress{}\n}\n- if n, err := dst.CopyOutFrom(t, &r); err != syserror.ErrWouldBlock || flags&linux.MSG_DONTWAIT != 0 {\n+ var total int64\n+ if n, err := dst.CopyOutFrom(t, &r); err != syserror.ErrWouldBlock || dontWait {\nvar from interface{}\nvar fromLen uint32\nif r.From != nil {\n@@ -506,9 +511,15 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nif trunc {\nn = int64(r.MsgSize)\n}\n+ if err != nil || dontWait || !waitAll || s.isPacket || n >= dst.NumBytes() {\nreturn int(n), from, fromLen, socket.ControlMessages{Unix: r.Control}, syserr.FromError(err)\n}\n+ // Don't overwrite any data we received.\n+ dst = dst.DropFirst64(n)\n+ total += n\n+ }\n+\n// We'll have to block. Register for notification and keep trying to\n// send all the data.\ne, ch := waiter.NewChannelEntry(nil)\n@@ -525,7 +536,13 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nif trunc {\nn = int64(r.MsgSize)\n}\n- return int(n), from, fromLen, socket.ControlMessages{Unix: r.Control}, syserr.FromError(err)\n+ total += n\n+ if err != nil || !waitAll || s.isPacket || n >= dst.NumBytes() {\n+ return int(total), from, fromLen, socket.ControlMessages{Unix: r.Control}, syserr.FromError(err)\n+ }\n+\n+ // Don't overwrite any data we received.\n+ dst = dst.DropFirst64(n)\n}\nif err := t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil {\n@@ -549,16 +566,21 @@ func (*provider) Socket(t *kernel.Task, stype transport.SockType, protocol int)\n// Create the endpoint and socket.\nvar ep transport.Endpoint\n+ var isPacket bool\nswitch stype {\ncase linux.SOCK_DGRAM:\n+ isPacket = true\nep = transport.NewConnectionless()\n- case linux.SOCK_STREAM, linux.SOCK_SEQPACKET:\n+ case linux.SOCK_SEQPACKET:\n+ isPacket = true\n+ fallthrough\n+ case linux.SOCK_STREAM:\nep = transport.NewConnectioned(stype, t.Kernel())\ndefault:\nreturn nil, syserr.ErrInvalidArgument\n}\n- return New(t, ep), nil\n+ return New(t, ep, isPacket), nil\n}\n// Pair creates a new pair of AF_UNIX connected sockets.\n@@ -568,16 +590,19 @@ func (*provider) Pair(t *kernel.Task, stype transport.SockType, protocol int) (*\nreturn nil, nil, syserr.ErrInvalidArgument\n}\n+ var isPacket bool\nswitch stype {\n- case linux.SOCK_STREAM, linux.SOCK_DGRAM, linux.SOCK_SEQPACKET:\n+ case linux.SOCK_STREAM:\n+ case linux.SOCK_DGRAM, linux.SOCK_SEQPACKET:\n+ isPacket = true\ndefault:\nreturn nil, nil, syserr.ErrInvalidArgument\n}\n// Create the endpoints and sockets.\nep1, ep2 := transport.NewPair(stype, t.Kernel())\n- s1 := New(t, ep1)\n- s2 := New(t, ep2)\n+ s1 := New(t, ep1, isPacket)\n+ s2 := New(t, ep2, isPacket)\nreturn s1, s2, nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_socket.go", "new_path": "pkg/sentry/syscalls/linux/sys_socket.go", "diff": "@@ -602,7 +602,7 @@ func RecvMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca\n}\n// Reject flags that we don't handle yet.\n- if flags & ^(linux.MSG_DONTWAIT|linux.MSG_NOSIGNAL|linux.MSG_PEEK|linux.MSG_TRUNC|linux.MSG_CTRUNC|linux.MSG_CMSG_CLOEXEC|linux.MSG_ERRQUEUE) != 0 {\n+ if flags & ^(linux.MSG_DONTWAIT|linux.MSG_NOSIGNAL|linux.MSG_PEEK|linux.MSG_TRUNC|linux.MSG_CTRUNC|linux.MSG_CMSG_CLOEXEC|linux.MSG_ERRQUEUE|linux.MSG_WAITALL) != 0 {\nreturn 0, nil, syscall.EINVAL\n}\n@@ -635,7 +635,7 @@ func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc\n}\n// Reject flags that we don't handle yet.\n- if flags & ^(linux.MSG_DONTWAIT|linux.MSG_NOSIGNAL|linux.MSG_TRUNC|linux.MSG_CTRUNC|linux.MSG_CMSG_CLOEXEC|linux.MSG_ERRQUEUE) != 0 {\n+ if flags & ^(linux.MSG_DONTWAIT|linux.MSG_NOSIGNAL|linux.MSG_TRUNC|linux.MSG_CTRUNC|linux.MSG_CMSG_CLOEXEC|linux.MSG_ERRQUEUE|linux.MSG_WAITALL) != 0 {\nreturn 0, nil, syscall.EINVAL\n}\n@@ -791,7 +791,7 @@ func recvFrom(t *kernel.Task, fd kdefs.FD, bufPtr usermem.Addr, bufLen uint64, f\n}\n// Reject flags that we don't handle yet.\n- if flags & ^(linux.MSG_DONTWAIT|linux.MSG_NOSIGNAL|linux.MSG_PEEK|linux.MSG_TRUNC|linux.MSG_CTRUNC|linux.MSG_CONFIRM) != 0 {\n+ if flags & ^(linux.MSG_DONTWAIT|linux.MSG_NOSIGNAL|linux.MSG_PEEK|linux.MSG_TRUNC|linux.MSG_CTRUNC|linux.MSG_CONFIRM|linux.MSG_WAITALL) != 0 {\nreturn 0, syscall.EINVAL\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_generic.cc", "new_path": "test/syscalls/linux/socket_generic.cc", "diff": "@@ -383,8 +383,6 @@ TEST_P(AllSocketPairTest, RecvmsgTimeoutOneSecondSucceeds) {\n}\nTEST_P(AllSocketPairTest, RecvWaitAll) {\n- SKIP_IF(IsRunningOnGvisor()); // FIXME: Support MSG_WAITALL.\n-\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\nchar sent_data[100];\n@@ -399,5 +397,14 @@ TEST_P(AllSocketPairTest, RecvWaitAll) {\nSyscallSucceedsWithValue(sizeof(sent_data)));\n}\n+TEST_P(AllSocketPairTest, RecvWaitAllDontWait) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ char data[100] = {};\n+ ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), data, sizeof(data),\n+ MSG_WAITALL | MSG_DONTWAIT),\n+ SyscallFailsWithErrno(EAGAIN));\n+}\n+\n} // namespace testing\n} // namespace gvisor\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_non_stream_blocking.cc", "new_path": "test/syscalls/linux/socket_non_stream_blocking.cc", "diff": "@@ -31,8 +31,6 @@ namespace gvisor {\nnamespace testing {\nTEST_P(BlockingNonStreamSocketPairTest, RecvLessThanBufferWaitAll) {\n- SKIP_IF(IsRunningOnGvisor()); // FIXME: Support MSG_WAITALL.\n-\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\nchar sent_data[100];\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_stream_blocking.cc", "new_path": "test/syscalls/linux/socket_stream_blocking.cc", "diff": "@@ -99,8 +99,6 @@ TEST_P(BlockingStreamSocketPairTest, RecvLessThanBuffer) {\n}\nTEST_P(BlockingStreamSocketPairTest, RecvLessThanBufferWaitAll) {\n- SKIP_IF(IsRunningOnGvisor()); // FIXME: Support MSG_WAITALL.\n-\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\nchar sent_data[100];\n" } ]
Go
Apache License 2.0
google/gvisor
Implement MSG_WAITALL MSG_WAITALL requests that recv family calls do not perform short reads. It only has an effect for SOCK_STREAM sockets, other types ignore it. PiperOrigin-RevId: 224918540 Change-Id: Id97fbf972f1f7cbd4e08eec0138f8cbdf1c94fe7
259,974
10.12.2018 21:34:08
28,800
52fe3b87a415006a4ef96548e33a7153b14ac28d
Add safecopy support for arm64 platform.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/safecopy/BUILD", "new_path": "pkg/sentry/platform/safecopy/BUILD", "diff": "@@ -6,11 +6,15 @@ go_library(\nname = \"safecopy\",\nsrcs = [\n\"atomic_amd64.s\",\n+ \"atomic_arm64.s\",\n\"memclr_amd64.s\",\n+ \"memclr_arm64.s\",\n\"memcpy_amd64.s\",\n+ \"memcpy_arm64.s\",\n\"safecopy.go\",\n\"safecopy_unsafe.go\",\n\"sighandler_amd64.s\",\n+ \"sighandler_arm64.s\",\n],\nimportpath = \"gvisor.googlesource.com/gvisor/pkg/sentry/platform/safecopy\",\nvisibility = [\"//pkg/sentry:internal\"],\n" } ]
Go
Apache License 2.0
google/gvisor
Add safecopy support for arm64 platform. Signed-off-by: Haibo Xu <[email protected]> Change-Id: I565214581eeb44045169da7f44d45a489082ac3a PiperOrigin-RevId: 224938170
259,884
10.12.2018 22:56:19
28,800
93ac04a6c32003597b3c639480126f8048988b72
Minor wording update to Kubernetes support section of the README Updated the README to correct some wording and clarify a bit that containerd CRI runtime is needed.
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -236,7 +236,8 @@ the gVisor addon is enabled, pods with `io.kubernetes.cri.untrusted-workload`\nset to true will execute with `runsc`. Follow [these instructions][minikube] to\nenable gVisor addon.\n-You can also setup Kubernetes node to use `gvisor-containerd-shim`. Pods with\n+You can also setup Kubernetes nodes to run pods in gvisor using the `containerd`\n+CRI runtime and the `gvisor-containerd-shim`. Pods with the\n`io.kubernetes.cri.untrusted-workload` annotation will execute with `runsc`. You\ncan find instructions [here][gvisor-containerd-shim].\n" } ]
Go
Apache License 2.0
google/gvisor
Minor wording update to Kubernetes support section of the README Updated the README to correct some wording and clarify a bit that containerd CRI runtime is needed. PiperOrigin-RevId: 224944753 Change-Id: I7b9c527500f99571aca7ef73058472ae9b3d5371
259,881
10.12.2018 23:58:20
28,800
77a443269a81bc81d1d0b4ac46406745e882be88
Fix test tag argument typo The argument is --test_tag_filters, not --test_tag_filter. Also switch to ... instead of :*, as it doesn't require special shell quoting to avoid * expansion.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/README.md", "new_path": "test/syscalls/README.md", "diff": "@@ -39,13 +39,13 @@ tag:\n```bash\n# Run all tests in native environment:\n-$ bazel test --test_tag_filter=native //test/syscalls:*\n+$ bazel test --test_tag_filters=native //test/syscalls/...\n# Run all tests in runsc with ptrace:\n-$ bazel test --test_tag_filter=runsc_ptrace //test/syscalls:*\n+$ bazel test --test_tag_filters=runsc_ptrace //test/syscalls/...\n# Run all tests in runsc with kvm:\n-$ bazel test --test_tag_filter=runsc_kvm //test/syscalls:*\n+$ bazel test --test_tag_filters=runsc_kvm //test/syscalls/...\n```\nYou can also run all the tests on every platform. (Warning, this may take a\n@@ -53,7 +53,7 @@ while to run.)\n```bash\n# Run all tests on every platform:\n-$ bazel test //test/syscalls:*\n+$ bazel test //test/syscalls/...\n```\n## Writing new tests\n" } ]
Go
Apache License 2.0
google/gvisor
Fix test tag argument typo The argument is --test_tag_filters, not --test_tag_filter. Also switch to ... instead of :*, as it doesn't require special shell quoting to avoid * expansion. PiperOrigin-RevId: 224949618 Change-Id: I45dd6acbaeae29f2cc0baa977b086b5c037c6a88
259,956
11.12.2018 11:39:17
28,800
5934fad1d781f13d04184c7585014a98a3b86958
Remove unused envv variable from two funcs.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/loader/interpreter.go", "new_path": "pkg/sentry/loader/interpreter.go", "diff": "@@ -37,7 +37,7 @@ const (\n)\n// parseInterpreterScript returns the interpreter path and argv.\n-func parseInterpreterScript(ctx context.Context, filename string, f *fs.File, argv, envv []string) (newpath string, newargv []string, err error) {\n+func parseInterpreterScript(ctx context.Context, filename string, f *fs.File, argv []string) (newpath string, newargv []string, err error) {\nline := make([]byte, interpMaxLineLength)\nn, err := readFull(ctx, f, usermem.BytesIOSequence(line), 0)\n// Short read is OK.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/loader/loader.go", "new_path": "pkg/sentry/loader/loader.go", "diff": "@@ -136,7 +136,7 @@ const (\n// * arch.Context matching the binary arch\n// * fs.Dirent of the binary file\n// * Possibly updated argv\n-func loadPath(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace, root, wd *fs.Dirent, remainingTraversals *uint, fs *cpuid.FeatureSet, filename string, argv, envv []string) (loadedELF, arch.Context, *fs.Dirent, []string, error) {\n+func loadPath(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace, root, wd *fs.Dirent, remainingTraversals *uint, fs *cpuid.FeatureSet, filename string, argv []string) (loadedELF, arch.Context, *fs.Dirent, []string, error) {\nfor i := 0; i < maxLoaderAttempts; i++ {\nd, f, err := openPath(ctx, mounts, root, wd, remainingTraversals, filename)\nif err != nil {\n@@ -172,7 +172,7 @@ func loadPath(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespac\nd.IncRef()\nreturn loaded, ac, d, argv, err\ncase bytes.Equal(hdr[:2], []byte(interpreterScriptMagic)):\n- newpath, newargv, err := parseInterpreterScript(ctx, filename, f, argv, envv)\n+ newpath, newargv, err := parseInterpreterScript(ctx, filename, f, argv)\nif err != nil {\nctx.Infof(\"Error loading interpreter script: %v\", err)\nreturn loadedELF{}, nil, nil, nil, err\n@@ -198,7 +198,7 @@ func loadPath(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespac\n// * Load is called on the Task goroutine.\nfunc Load(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace, root, wd *fs.Dirent, maxTraversals *uint, fs *cpuid.FeatureSet, filename string, argv, envv []string, extraAuxv []arch.AuxEntry, vdso *VDSO) (abi.OS, arch.Context, string, error) {\n// Load the binary itself.\n- loaded, ac, d, argv, err := loadPath(ctx, m, mounts, root, wd, maxTraversals, fs, filename, argv, envv)\n+ loaded, ac, d, argv, err := loadPath(ctx, m, mounts, root, wd, maxTraversals, fs, filename, argv)\nif err != nil {\nctx.Infof(\"Failed to load %s: %v\", filename, err)\nreturn 0, nil, \"\", err\n" } ]
Go
Apache License 2.0
google/gvisor
Remove unused envv variable from two funcs. PiperOrigin-RevId: 225041520 Change-Id: Ib1afc693e592d308d60db82022c5b7743fd3c646
259,881
11.12.2018 15:32:23
28,800
2b6df6a2049e839e39717f90c1760f3d410c98f1
Format unshare flags unshare actually takes a subset of clone flags, but has no unique flags, so formatting as clone flags is close enough.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/strace/linux64.go", "new_path": "pkg/sentry/strace/linux64.go", "diff": "@@ -289,7 +289,7 @@ var linuxAMD64 = SyscallMap{\n269: makeSyscallInfo(\"faccessat\", Hex, Path, Oct, Hex),\n270: makeSyscallInfo(\"pselect6\", Hex, Hex, Hex, Hex, Hex, Hex),\n271: makeSyscallInfo(\"ppoll\", Hex, Hex, Timespec, SigSet, Hex),\n- 272: makeSyscallInfo(\"unshare\", Hex),\n+ 272: makeSyscallInfo(\"unshare\", CloneFlags),\n273: makeSyscallInfo(\"set_robust_list\", Hex, Hex),\n274: makeSyscallInfo(\"get_robust_list\", Hex, Hex, Hex),\n275: makeSyscallInfo(\"splice\", Hex, Hex, Hex, Hex, Hex, Hex),\n" } ]
Go
Apache License 2.0
google/gvisor
Format unshare flags unshare actually takes a subset of clone flags, but has no unique flags, so formatting as clone flags is close enough. PiperOrigin-RevId: 225082774 Change-Id: I5b580f18607c7785f323e37809094115520a17c0
259,881
11.12.2018 16:11:53
28,800
24c1158b9c21f7f8b7126e810d623a518422052e
Add "trace signal" option This option is effectively equivalent to -panic-signal, except that the sandbox does not die after logging the traceback.
[ { "change_type": "MODIFY", "old_path": "runsc/boot/config.go", "new_path": "runsc/boot/config.go", "diff": "@@ -198,10 +198,17 @@ type Config struct {\n// WatchdogAction sets what action the watchdog takes when triggered.\nWatchdogAction watchdog.Action\n- // PanicSignal register signal handling that panics. Usually set to\n+ // PanicSignal registers signal handling that panics. Usually set to\n// SIGUSR2(12) to troubleshoot hangs. -1 disables it.\n+ //\n+ // PanicSignal takes precedence over TraceSignal.\nPanicSignal int\n+ // TraceSignal registers signal handling that logs a traceback of all\n+ // goroutines. Usually set to SIGUSR2(12) to troubleshoot hangs. -1\n+ // disables it.\n+ TraceSignal int\n+\n// TestOnlyAllowRunAsCurrentUserWithoutChroot should only be used in\n// tests. It allows runsc to start the sandbox process as the current\n// user, and without chrooting the sandbox process. This can be\n@@ -228,5 +235,6 @@ func (c *Config) ToFlags() []string {\n\"--strace-log-size=\" + strconv.Itoa(int(c.StraceLogSize)),\n\"--watchdog-action=\" + c.WatchdogAction.String(),\n\"--panic-signal=\" + strconv.Itoa(c.PanicSignal),\n+ \"--trace-signal=\" + strconv.Itoa(c.TraceSignal),\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -324,10 +324,14 @@ func New(args Args) (*Loader, error) {\n// Handle signals by forwarding them to the root container process\n// (except for panic signal, which should cause a panic).\nl.startSignalForwarding = sighandling.PrepareHandler(func(sig linux.Signal) {\n- // Panic signal should cause a panic.\n+ // Tracing signals should cause their respective actions.\nif args.Conf.PanicSignal != -1 && sig == linux.Signal(args.Conf.PanicSignal) {\npanic(\"Signal-induced panic\")\n}\n+ if args.Conf.TraceSignal != -1 && sig == linux.Signal(args.Conf.TraceSignal) {\n+ log.TracebackAll(\"Signal-induced traceback\")\n+ return\n+ }\n// Otherwise forward to root container.\ndeliveryMode := DeliverToProcess\n" }, { "change_type": "MODIFY", "old_path": "runsc/main.go", "new_path": "runsc/main.go", "diff": "@@ -62,7 +62,8 @@ var (\nfileAccess = flag.String(\"file-access\", \"exclusive\", \"specifies which filesystem to use for the root mount: exclusive (default), shared. Volume mounts are always shared.\")\noverlay = flag.Bool(\"overlay\", false, \"wrap filesystem mounts with writable overlay. All modifications are stored in memory inside the sandbox.\")\nwatchdogAction = flag.String(\"watchdog-action\", \"log\", \"sets what action the watchdog takes when triggered: log (default), panic.\")\n- panicSignal = flag.Int(\"panic-signal\", -1, \"register signal handling that panics. Usually set to SIGUSR2(12) to troubleshoot hangs. -1 disables it.\")\n+ panicSignal = flag.Int(\"panic-signal\", -1, \"register signal handling that panics. Usually set to SIGUSR2(12) to troubleshoot hangs. -1 disables it. This takes precendence over -trace-signal.\")\n+ traceSignal = flag.Int(\"trace-signal\", -1, \"register signal handling that logs a traceback of all goroutines. Usually set to SIGUSR2(12) to troubleshoot hangs. -1 disables it.\")\n)\n// gitRevision is set during linking.\n@@ -144,6 +145,7 @@ func main() {\nStraceLogSize: *straceLogSize,\nWatchdogAction: wa,\nPanicSignal: *panicSignal,\n+ TraceSignal: *traceSignal,\n}\nif len(*straceSyscalls) != 0 {\nconf.StraceSyscalls = strings.Split(*straceSyscalls, \",\")\n" } ]
Go
Apache License 2.0
google/gvisor
Add "trace signal" option This option is effectively equivalent to -panic-signal, except that the sandbox does not die after logging the traceback. PiperOrigin-RevId: 225089593 Change-Id: Ifb1c411210110b6104613f404334bd02175e484e
259,885
11.12.2018 17:04:42
28,800
a2c868a098fcb51dcdf629045c5f5c0f68c2766f
Add rvalue ref-qualified PosixErrorOr<T>::ValueOrDie() overloads. This allows ValueOrDie() to be called on PosixErrorOr rvalues (e.g. temporaries) holding move-only types without extraneous std::move()s.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/itimer.cc", "new_path": "test/syscalls/linux/itimer.cc", "diff": "@@ -126,7 +126,7 @@ SignalTestResult ItimerSignalTest(int id, clock_t main_clock,\nsa.sa_handler = &SignalTestSignalHandler;\nsa.sa_flags = SA_RESTART;\nsigemptyset(&sa.sa_mask);\n- auto sigaction_cleanup = std::move(ScopedSigaction(signal, sa).ValueOrDie());\n+ auto sigaction_cleanup = ScopedSigaction(signal, sa).ValueOrDie();\nint socketfds[2];\nTEST_PCHECK(socketpair(AF_UNIX, SOCK_STREAM, 0, socketfds) == 0);\n@@ -167,7 +167,7 @@ SignalTestResult ItimerSignalTest(int id, clock_t main_clock,\nstruct itimerval timer = {};\ntimer.it_value = absl::ToTimeval(kPeriod);\ntimer.it_interval = absl::ToTimeval(kPeriod);\n- auto cleanup_itimer = std::move(ScopedItimer(id, timer).ValueOrDie());\n+ auto cleanup_itimer = ScopedItimer(id, timer).ValueOrDie();\n// Unblock th1.\n//\n" }, { "change_type": "MODIFY", "old_path": "test/util/posix_error.h", "new_path": "test/util/posix_error.h", "diff": "@@ -103,8 +103,10 @@ class ABSL_MUST_USE_RESULT PosixErrorOr {\nbool ok() const;\n// Returns a reference to our current value, or CHECK-fails if !this->ok().\n- const T& ValueOrDie() const;\n- T& ValueOrDie();\n+ const T& ValueOrDie() const&;\n+ T& ValueOrDie() &;\n+ const T&& ValueOrDie() const&&;\n+ T&& ValueOrDie() &&;\n// Ignores any errors. This method does nothing except potentially suppress\n// complaints from any tools that are checking that errors are not dropped on\n@@ -179,17 +181,29 @@ bool PosixErrorOr<T>::ok() const {\n}\ntemplate <typename T>\n-const T& PosixErrorOr<T>::ValueOrDie() const {\n+const T& PosixErrorOr<T>::ValueOrDie() const& {\nTEST_CHECK(absl::holds_alternative<T>(value_));\nreturn absl::get<T>(value_);\n}\ntemplate <typename T>\n-T& PosixErrorOr<T>::ValueOrDie() {\n+T& PosixErrorOr<T>::ValueOrDie() & {\nTEST_CHECK(absl::holds_alternative<T>(value_));\nreturn absl::get<T>(value_);\n}\n+template <typename T>\n+const T&& PosixErrorOr<T>::ValueOrDie() const&& {\n+ TEST_CHECK(absl::holds_alternative<T>(value_));\n+ return std::move(absl::get<T>(value_));\n+}\n+\n+template <typename T>\n+T&& PosixErrorOr<T>::ValueOrDie() && {\n+ TEST_CHECK(absl::holds_alternative<T>(value_));\n+ return std::move(absl::get<T>(value_));\n+}\n+\nextern ::std::ostream& operator<<(::std::ostream& os, const PosixError& e);\ntemplate <typename T>\n@@ -399,7 +413,7 @@ IsPosixErrorOkAndHolds(InnerMatcher&& inner_matcher) {\nif (!posixerroror.ok()) { \\\nreturn (posixerroror.error()); \\\n} \\\n- lhs = std::move(posixerroror.ValueOrDie())\n+ lhs = std::move(posixerroror).ValueOrDie()\n#define EXPECT_NO_ERRNO(expression) \\\nEXPECT_THAT(expression, IsPosixErrorOkMatcher())\n@@ -419,7 +433,7 @@ IsPosixErrorOkAndHolds(InnerMatcher&& inner_matcher) {\n({ \\\nauto _expr_result = (expr); \\\nASSERT_NO_ERRNO(_expr_result); \\\n- std::move(_expr_result.ValueOrDie()); \\\n+ std::move(_expr_result).ValueOrDie(); \\\n})\n} // namespace testing\n" }, { "change_type": "MODIFY", "old_path": "test/util/proc_util.h", "new_path": "test/util/proc_util.h", "diff": "@@ -115,7 +115,7 @@ MATCHER_P(ContainsMappings, mappings,\nreturn false;\n}\n- auto maps = std::move(maps_or.ValueOrDie());\n+ auto maps = std::move(maps_or).ValueOrDie();\n// Does maps contain all elements in mappings? The comparator ignores\n// the major, minor, and inode fields.\n" } ]
Go
Apache License 2.0
google/gvisor
Add rvalue ref-qualified PosixErrorOr<T>::ValueOrDie() overloads. This allows ValueOrDie() to be called on PosixErrorOr rvalues (e.g. temporaries) holding move-only types without extraneous std::move()s. PiperOrigin-RevId: 225098036 Change-Id: I662862e4f3562141f941845fc6e197edb27ce29b
259,985
12.12.2018 13:09:10
28,800
75e39eaa74c65b6f7cfb95addb6ac0cbcc7d951a
Pass information about map writableness to filesystems. This is necessary to implement file seals for memfds.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/binder/binder.go", "new_path": "pkg/sentry/fs/binder/binder.go", "diff": "@@ -302,7 +302,7 @@ func (bp *Proc) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArgum\n}\n// AddMapping implements memmap.Mappable.AddMapping.\n-func (bp *Proc) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) error {\n+func (bp *Proc) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, _ bool) error {\nbp.mu.Lock()\ndefer bp.mu.Unlock()\nif bp.mapped.Length() != 0 {\n@@ -320,12 +320,12 @@ func (bp *Proc) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar userm\n}\n// RemoveMapping implements memmap.Mappable.RemoveMapping.\n-func (bp *Proc) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) {\n+func (*Proc) RemoveMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, uint64, bool) {\n// Nothing to do. Notably, we don't free bp.mapped to allow another mmap.\n}\n// CopyMapping implements memmap.Mappable.CopyMapping.\n-func (bp *Proc) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64) error {\n+func (bp *Proc) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, _ bool) error {\n// Nothing to do. Notably, this is one case where CopyMapping isn't\n// equivalent to AddMapping, as AddMapping would return EBUSY.\nreturn nil\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/copy_up.go", "new_path": "pkg/sentry/fs/copy_up.go", "diff": "@@ -270,13 +270,13 @@ func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error {\nfor seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {\nadded := make(memmap.MappingsOfRange)\nfor m := range seg.Value() {\n- if err := upperMappable.AddMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start()); err != nil {\n+ if err := upperMappable.AddMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable); err != nil {\nfor m := range added {\n- upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start())\n+ upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable)\n}\nfor mr, mappings := range allAdded {\nfor m := range mappings {\n- upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, mr.Start)\n+ upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, mr.Start, m.Writable)\n}\n}\nreturn err\n@@ -301,7 +301,7 @@ func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error {\nif lowerMappable != nil {\nfor seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {\nfor m := range seg.Value() {\n- lowerMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start())\n+ lowerMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable)\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/fsutil/inode_cached.go", "new_path": "pkg/sentry/fs/fsutil/inode_cached.go", "diff": "@@ -686,10 +686,10 @@ func (rw *inodeReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error\n}\n// AddMapping implements memmap.Mappable.AddMapping.\n-func (c *CachingInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) error {\n+func (c *CachingInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error {\n// Hot path. Avoid defers.\nc.mapsMu.Lock()\n- mapped := c.mappings.AddMapping(ms, ar, offset)\n+ mapped := c.mappings.AddMapping(ms, ar, offset, writable)\n// Do this unconditionally since whether we have c.backingFile.FD() >= 0\n// can change across save/restore.\nfor _, r := range mapped {\n@@ -705,10 +705,10 @@ func (c *CachingInodeOperations) AddMapping(ctx context.Context, ms memmap.Mappi\n}\n// RemoveMapping implements memmap.Mappable.RemoveMapping.\n-func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) {\n+func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) {\n// Hot path. Avoid defers.\nc.mapsMu.Lock()\n- unmapped := c.mappings.RemoveMapping(ms, ar, offset)\n+ unmapped := c.mappings.RemoveMapping(ms, ar, offset, writable)\nfor _, r := range unmapped {\nc.hostFileMapper.DecRefOn(r)\n}\n@@ -739,8 +739,8 @@ func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.Ma\n}\n// CopyMapping implements memmap.Mappable.CopyMapping.\n-func (c *CachingInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64) error {\n- return c.AddMapping(ctx, ms, dstAR, offset)\n+func (c *CachingInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error {\n+ return c.AddMapping(ctx, ms, dstAR, offset, writable)\n}\n// Translate implements memmap.Mappable.Translate.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/fsutil/inode_cached_test.go", "new_path": "pkg/sentry/fs/fsutil/inode_cached_test.go", "diff": "@@ -305,7 +305,7 @@ func TestRead(t *testing.T) {\n// be cached.\nvar ms noopMappingSpace\nar := usermem.AddrRange{usermem.PageSize, 2 * usermem.PageSize}\n- if err := iops.AddMapping(ctx, ms, ar, usermem.PageSize); err != nil {\n+ if err := iops.AddMapping(ctx, ms, ar, usermem.PageSize, true); err != nil {\nt.Fatalf(\"AddMapping got %v, want nil\", err)\n}\nmr := memmap.MappableRange{usermem.PageSize, 2 * usermem.PageSize}\n@@ -334,7 +334,7 @@ func TestRead(t *testing.T) {\n// Delete the memory mapping and expect it to cause the cached page to be\n// uncached.\n- iops.RemoveMapping(ctx, ms, ar, usermem.PageSize)\n+ iops.RemoveMapping(ctx, ms, ar, usermem.PageSize, true)\nif cached := iops.cache.Span(); cached != 0 {\nt.Fatalf(\"Span got %d, want 0\", cached)\n}\n@@ -363,10 +363,10 @@ func TestWrite(t *testing.T) {\n// Translate to force them to be cached.\nvar ms noopMappingSpace\nar := usermem.AddrRange{usermem.PageSize, 3 * usermem.PageSize}\n- if err := iops.AddMapping(ctx, ms, ar, usermem.PageSize); err != nil {\n+ if err := iops.AddMapping(ctx, ms, ar, usermem.PageSize, true); err != nil {\nt.Fatalf(\"AddMapping got %v, want nil\", err)\n}\n- defer iops.RemoveMapping(ctx, ms, ar, usermem.PageSize)\n+ defer iops.RemoveMapping(ctx, ms, ar, usermem.PageSize, true)\nmr := memmap.MappableRange{usermem.PageSize, 3 * usermem.PageSize}\nif _, err := iops.Translate(ctx, mr, mr, usermem.Read); err != nil {\nt.Fatalf(\"Translate got %v, want nil\", err)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/overlay.go", "new_path": "pkg/sentry/fs/overlay.go", "diff": "@@ -259,32 +259,32 @@ func (o *overlayEntry) isMappableLocked() bool {\n}\n// AddMapping implements memmap.Mappable.AddMapping.\n-func (o *overlayEntry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) error {\n+func (o *overlayEntry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error {\no.mapsMu.Lock()\ndefer o.mapsMu.Unlock()\n- if err := o.inodeLocked().Mappable().AddMapping(ctx, ms, ar, offset); err != nil {\n+ if err := o.inodeLocked().Mappable().AddMapping(ctx, ms, ar, offset, writable); err != nil {\nreturn err\n}\n- o.mappings.AddMapping(ms, ar, offset)\n+ o.mappings.AddMapping(ms, ar, offset, writable)\nreturn nil\n}\n// RemoveMapping implements memmap.Mappable.RemoveMapping.\n-func (o *overlayEntry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) {\n+func (o *overlayEntry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) {\no.mapsMu.Lock()\ndefer o.mapsMu.Unlock()\n- o.inodeLocked().Mappable().RemoveMapping(ctx, ms, ar, offset)\n- o.mappings.RemoveMapping(ms, ar, offset)\n+ o.inodeLocked().Mappable().RemoveMapping(ctx, ms, ar, offset, writable)\n+ o.mappings.RemoveMapping(ms, ar, offset, writable)\n}\n// CopyMapping implements memmap.Mappable.CopyMapping.\n-func (o *overlayEntry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64) error {\n+func (o *overlayEntry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error {\no.mapsMu.Lock()\ndefer o.mapsMu.Unlock()\n- if err := o.inodeLocked().Mappable().CopyMapping(ctx, ms, srcAR, dstAR, offset); err != nil {\n+ if err := o.inodeLocked().Mappable().CopyMapping(ctx, ms, srcAR, dstAR, offset, writable); err != nil {\nreturn err\n}\n- o.mappings.AddMapping(ms, dstAR, offset)\n+ o.mappings.AddMapping(ms, dstAR, offset, writable)\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/tmpfs/inode_file.go", "new_path": "pkg/sentry/fs/tmpfs/inode_file.go", "diff": "@@ -426,23 +426,23 @@ func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error)\n}\n// AddMapping implements memmap.Mappable.AddMapping.\n-func (f *fileInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) error {\n+func (f *fileInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error {\nf.mapsMu.Lock()\ndefer f.mapsMu.Unlock()\n- f.mappings.AddMapping(ms, ar, offset)\n+ f.mappings.AddMapping(ms, ar, offset, writable)\nreturn nil\n}\n// RemoveMapping implements memmap.Mappable.RemoveMapping.\n-func (f *fileInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) {\n+func (f *fileInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) {\nf.mapsMu.Lock()\ndefer f.mapsMu.Unlock()\n- f.mappings.RemoveMapping(ms, ar, offset)\n+ f.mappings.RemoveMapping(ms, ar, offset, writable)\n}\n// CopyMapping implements memmap.Mappable.CopyMapping.\n-func (f *fileInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64) error {\n- return f.AddMapping(ctx, ms, dstAR, offset)\n+func (f *fileInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error {\n+ return f.AddMapping(ctx, ms, dstAR, offset, writable)\n}\n// Translate implements memmap.Mappable.Translate.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/shm/shm.go", "new_path": "pkg/sentry/kernel/shm/shm.go", "diff": "@@ -375,7 +375,7 @@ func (s *Shm) Msync(context.Context, memmap.MappableRange) error {\n}\n// AddMapping implements memmap.Mappable.AddMapping.\n-func (s *Shm) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) error {\n+func (s *Shm) AddMapping(ctx context.Context, _ memmap.MappingSpace, _ usermem.AddrRange, _ uint64, _ bool) error {\ns.mu.Lock()\ndefer s.mu.Unlock()\ns.attachTime = ktime.NowFromContext(ctx)\n@@ -390,7 +390,7 @@ func (s *Shm) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem\n}\n// RemoveMapping implements memmap.Mappable.RemoveMapping.\n-func (s *Shm) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) {\n+func (s *Shm) RemoveMapping(ctx context.Context, _ memmap.MappingSpace, _ usermem.AddrRange, _ uint64, _ bool) {\ns.mu.Lock()\ndefer s.mu.Unlock()\n// TODO: RemoveMapping may be called during task exit, when ctx\n@@ -411,7 +411,7 @@ func (s *Shm) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar user\n}\n// CopyMapping implements memmap.Mappable.CopyMapping.\n-func (s *Shm) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64) error {\n+func (*Shm) CopyMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, usermem.AddrRange, uint64, bool) error {\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/memmap/mapping_set.go", "new_path": "pkg/sentry/memmap/mapping_set.go", "diff": "@@ -40,6 +40,7 @@ type MappingsOfRange map[MappingOfRange]struct{}\ntype MappingOfRange struct {\nMappingSpace MappingSpace\nAddrRange usermem.AddrRange\n+ Writable bool\n}\nfunc (r MappingOfRange) invalidate(opts InvalidateOpts) {\n@@ -92,6 +93,7 @@ func (mappingSetFunctions) Merge(r1 MappableRange, val1 MappingsOfRange, r2 Mapp\nStart: k1.AddrRange.End,\nEnd: k1.AddrRange.End + usermem.Addr(r2.Length()),\n},\n+ Writable: k1.Writable,\n}\nif _, ok := val2[k2]; !ok {\nreturn nil, false\n@@ -104,6 +106,7 @@ func (mappingSetFunctions) Merge(r1 MappableRange, val1 MappingsOfRange, r2 Mapp\nStart: k1.AddrRange.Start,\nEnd: k2.AddrRange.End,\n},\n+ Writable: k1.Writable,\n}] = struct{}{}\n}\n@@ -129,6 +132,7 @@ func (mappingSetFunctions) Split(r MappableRange, val MappingsOfRange, split uin\nStart: k.AddrRange.Start,\nEnd: k.AddrRange.Start + offset,\n},\n+ Writable: k.Writable,\n}\nm1[k1] = struct{}{}\n@@ -138,6 +142,7 @@ func (mappingSetFunctions) Split(r MappableRange, val MappingsOfRange, split uin\nStart: k.AddrRange.Start + offset,\nEnd: k.AddrRange.End,\n},\n+ Writable: k.Writable,\n}\nm2[k2] = struct{}{}\n}\n@@ -152,7 +157,7 @@ func (mappingSetFunctions) Split(r MappableRange, val MappingsOfRange, split uin\n// indicating that ms maps addresses [0x4000, 0x6000) to MappableRange [0x0,\n// 0x2000). Then for subsetRange = [0x1000, 0x2000), subsetMapping returns a\n// MappingOfRange for which AddrRange = [0x5000, 0x6000).\n-func subsetMapping(wholeRange, subsetRange MappableRange, ms MappingSpace, addr usermem.Addr) MappingOfRange {\n+func subsetMapping(wholeRange, subsetRange MappableRange, ms MappingSpace, addr usermem.Addr, writable bool) MappingOfRange {\nif !wholeRange.IsSupersetOf(subsetRange) {\npanic(fmt.Sprintf(\"%v is not a superset of %v\", wholeRange, subsetRange))\n}\n@@ -165,6 +170,7 @@ func subsetMapping(wholeRange, subsetRange MappableRange, ms MappingSpace, addr\nStart: start,\nEnd: start + usermem.Addr(subsetRange.Length()),\n},\n+ Writable: writable,\n}\n}\n@@ -172,7 +178,7 @@ func subsetMapping(wholeRange, subsetRange MappableRange, ms MappingSpace, addr\n// previously had no mappings.\n//\n// Preconditions: As for Mappable.AddMapping.\n-func (s *MappingSet) AddMapping(ms MappingSpace, ar usermem.AddrRange, offset uint64) []MappableRange {\n+func (s *MappingSet) AddMapping(ms MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) []MappableRange {\nmr := MappableRange{offset, offset + uint64(ar.Length())}\nvar mapped []MappableRange\nseg, gap := s.Find(mr.Start)\n@@ -180,7 +186,7 @@ func (s *MappingSet) AddMapping(ms MappingSpace, ar usermem.AddrRange, offset ui\nswitch {\ncase seg.Ok() && seg.Start() < mr.End:\nseg = s.Isolate(seg, mr)\n- seg.Value()[subsetMapping(mr, seg.Range(), ms, ar.Start)] = struct{}{}\n+ seg.Value()[subsetMapping(mr, seg.Range(), ms, ar.Start, writable)] = struct{}{}\nseg, gap = seg.NextNonEmpty()\ncase gap.Ok() && gap.Start() < mr.End:\n@@ -199,7 +205,7 @@ func (s *MappingSet) AddMapping(ms MappingSpace, ar usermem.AddrRange, offset ui\n// MappableRanges that now have no mappings.\n//\n// Preconditions: As for Mappable.RemoveMapping.\n-func (s *MappingSet) RemoveMapping(ms MappingSpace, ar usermem.AddrRange, offset uint64) []MappableRange {\n+func (s *MappingSet) RemoveMapping(ms MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) []MappableRange {\nmr := MappableRange{offset, offset + uint64(ar.Length())}\nvar unmapped []MappableRange\n@@ -213,7 +219,7 @@ func (s *MappingSet) RemoveMapping(ms MappingSpace, ar usermem.AddrRange, offset\n// Remove this part of the mapping.\nmappings := seg.Value()\n- delete(mappings, subsetMapping(mr, seg.Range(), ms, ar.Start))\n+ delete(mappings, subsetMapping(mr, seg.Range(), ms, ar.Start, writable))\nif len(mappings) == 0 {\nunmapped = append(unmapped, seg.Range())\n@@ -231,7 +237,7 @@ func (s *MappingSet) Invalidate(mr MappableRange, opts InvalidateOpts) {\nfor seg := s.LowerBoundSegment(mr.Start); seg.Ok() && seg.Start() < mr.End; seg = seg.NextSegment() {\nsegMR := seg.Range()\nfor m := range seg.Value() {\n- region := subsetMapping(segMR, segMR.Intersect(mr), m.MappingSpace, m.AddrRange.Start)\n+ region := subsetMapping(segMR, segMR.Intersect(mr), m.MappingSpace, m.AddrRange.Start, m.Writable)\nregion.invalidate(opts)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/memmap/mapping_set_test.go", "new_path": "pkg/sentry/memmap/mapping_set_test.go", "diff": "@@ -40,7 +40,7 @@ func TestAddRemoveMapping(t *testing.T) {\nset := MappingSet{}\nms := &testMappingSpace{}\n- mapped := set.AddMapping(ms, usermem.AddrRange{0x10000, 0x12000}, 0x1000)\n+ mapped := set.AddMapping(ms, usermem.AddrRange{0x10000, 0x12000}, 0x1000, true)\nif got, want := mapped, []MappableRange{{0x1000, 0x3000}}; !reflect.DeepEqual(got, want) {\nt.Errorf(\"AddMapping: got %+v, wanted %+v\", got, want)\n}\n@@ -49,7 +49,7 @@ func TestAddRemoveMapping(t *testing.T) {\n// [0x10000, 0x12000) => [0x1000, 0x3000)\nt.Log(&set)\n- mapped = set.AddMapping(ms, usermem.AddrRange{0x20000, 0x21000}, 0x2000)\n+ mapped = set.AddMapping(ms, usermem.AddrRange{0x20000, 0x21000}, 0x2000, true)\nif len(mapped) != 0 {\nt.Errorf(\"AddMapping: got %+v, wanted []\", mapped)\n}\n@@ -59,7 +59,7 @@ func TestAddRemoveMapping(t *testing.T) {\n// [0x11000, 0x12000) and [0x20000, 0x21000) => [0x2000, 0x3000)\nt.Log(&set)\n- mapped = set.AddMapping(ms, usermem.AddrRange{0x30000, 0x31000}, 0x4000)\n+ mapped = set.AddMapping(ms, usermem.AddrRange{0x30000, 0x31000}, 0x4000, true)\nif got, want := mapped, []MappableRange{{0x4000, 0x5000}}; !reflect.DeepEqual(got, want) {\nt.Errorf(\"AddMapping: got %+v, wanted %+v\", got, want)\n}\n@@ -70,7 +70,7 @@ func TestAddRemoveMapping(t *testing.T) {\n// [0x30000, 0x31000) => [0x4000, 0x5000)\nt.Log(&set)\n- mapped = set.AddMapping(ms, usermem.AddrRange{0x12000, 0x15000}, 0x3000)\n+ mapped = set.AddMapping(ms, usermem.AddrRange{0x12000, 0x15000}, 0x3000, true)\nif got, want := mapped, []MappableRange{{0x3000, 0x4000}, {0x5000, 0x6000}}; !reflect.DeepEqual(got, want) {\nt.Errorf(\"AddMapping: got %+v, wanted %+v\", got, want)\n}\n@@ -83,7 +83,7 @@ func TestAddRemoveMapping(t *testing.T) {\n// [0x14000, 0x15000) => [0x5000, 0x6000)\nt.Log(&set)\n- unmapped := set.RemoveMapping(ms, usermem.AddrRange{0x10000, 0x11000}, 0x1000)\n+ unmapped := set.RemoveMapping(ms, usermem.AddrRange{0x10000, 0x11000}, 0x1000, true)\nif got, want := unmapped, []MappableRange{{0x1000, 0x2000}}; !reflect.DeepEqual(got, want) {\nt.Errorf(\"RemoveMapping: got %+v, wanted %+v\", got, want)\n}\n@@ -95,7 +95,7 @@ func TestAddRemoveMapping(t *testing.T) {\n// [0x14000, 0x15000) => [0x5000, 0x6000)\nt.Log(&set)\n- unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x20000, 0x21000}, 0x2000)\n+ unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x20000, 0x21000}, 0x2000, true)\nif len(unmapped) != 0 {\nt.Errorf(\"RemoveMapping: got %+v, wanted []\", unmapped)\n}\n@@ -106,7 +106,7 @@ func TestAddRemoveMapping(t *testing.T) {\n// [0x14000, 0x15000) => [0x5000, 0x6000)\nt.Log(&set)\n- unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x11000, 0x15000}, 0x2000)\n+ unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x11000, 0x15000}, 0x2000, true)\nif got, want := unmapped, []MappableRange{{0x2000, 0x4000}, {0x5000, 0x6000}}; !reflect.DeepEqual(got, want) {\nt.Errorf(\"RemoveMapping: got %+v, wanted %+v\", got, want)\n}\n@@ -115,7 +115,7 @@ func TestAddRemoveMapping(t *testing.T) {\n// [0x30000, 0x31000) => [0x4000, 0x5000)\nt.Log(&set)\n- unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x30000, 0x31000}, 0x4000)\n+ unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x30000, 0x31000}, 0x4000, true)\nif got, want := unmapped, []MappableRange{{0x4000, 0x5000}}; !reflect.DeepEqual(got, want) {\nt.Errorf(\"RemoveMapping: got %+v, wanted %+v\", got, want)\n}\n@@ -125,7 +125,7 @@ func TestInvalidateWholeMapping(t *testing.T) {\nset := MappingSet{}\nms := &testMappingSpace{}\n- set.AddMapping(ms, usermem.AddrRange{0x10000, 0x11000}, 0)\n+ set.AddMapping(ms, usermem.AddrRange{0x10000, 0x11000}, 0, true)\n// Mappings:\n// [0x10000, 0x11000) => [0, 0x1000)\nt.Log(&set)\n@@ -139,7 +139,7 @@ func TestInvalidatePartialMapping(t *testing.T) {\nset := MappingSet{}\nms := &testMappingSpace{}\n- set.AddMapping(ms, usermem.AddrRange{0x10000, 0x13000}, 0)\n+ set.AddMapping(ms, usermem.AddrRange{0x10000, 0x13000}, 0, true)\n// Mappings:\n// [0x10000, 0x13000) => [0, 0x3000)\nt.Log(&set)\n@@ -153,8 +153,8 @@ func TestInvalidateMultipleMappings(t *testing.T) {\nset := MappingSet{}\nms := &testMappingSpace{}\n- set.AddMapping(ms, usermem.AddrRange{0x10000, 0x11000}, 0)\n- set.AddMapping(ms, usermem.AddrRange{0x20000, 0x21000}, 0x2000)\n+ set.AddMapping(ms, usermem.AddrRange{0x10000, 0x11000}, 0, true)\n+ set.AddMapping(ms, usermem.AddrRange{0x20000, 0x21000}, 0x2000, true)\n// Mappings:\n// [0x10000, 0x11000) => [0, 0x1000)\n// [0x12000, 0x13000) => [0x2000, 0x3000)\n@@ -170,8 +170,8 @@ func TestInvalidateOverlappingMappings(t *testing.T) {\nms1 := &testMappingSpace{}\nms2 := &testMappingSpace{}\n- set.AddMapping(ms1, usermem.AddrRange{0x10000, 0x12000}, 0)\n- set.AddMapping(ms2, usermem.AddrRange{0x20000, 0x22000}, 0x1000)\n+ set.AddMapping(ms1, usermem.AddrRange{0x10000, 0x12000}, 0, true)\n+ set.AddMapping(ms2, usermem.AddrRange{0x20000, 0x22000}, 0x1000, true)\n// Mappings:\n// ms1:[0x10000, 0x12000) => [0, 0x2000)\n// ms2:[0x11000, 0x13000) => [0x1000, 0x3000)\n@@ -184,3 +184,77 @@ func TestInvalidateOverlappingMappings(t *testing.T) {\nt.Errorf(\"Invalidate: ms1: got %+v, wanted %+v\", got, want)\n}\n}\n+\n+func TestMixedWritableMappings(t *testing.T) {\n+ set := MappingSet{}\n+ ms := &testMappingSpace{}\n+\n+ mapped := set.AddMapping(ms, usermem.AddrRange{0x10000, 0x12000}, 0x1000, true)\n+ if got, want := mapped, []MappableRange{{0x1000, 0x3000}}; !reflect.DeepEqual(got, want) {\n+ t.Errorf(\"AddMapping: got %+v, wanted %+v\", got, want)\n+ }\n+\n+ // Mappings:\n+ // [0x10000, 0x12000) writable => [0x1000, 0x3000)\n+ t.Log(&set)\n+\n+ mapped = set.AddMapping(ms, usermem.AddrRange{0x20000, 0x22000}, 0x2000, false)\n+ if got, want := mapped, []MappableRange{{0x3000, 0x4000}}; !reflect.DeepEqual(got, want) {\n+ t.Errorf(\"AddMapping: got %+v, wanted %+v\", got, want)\n+ }\n+\n+ // Mappings:\n+ // [0x10000, 0x11000) writable => [0x1000, 0x2000)\n+ // [0x11000, 0x12000) writable and [0x20000, 0x21000) readonly => [0x2000, 0x3000)\n+ // [0x21000, 0x22000) readonly => [0x3000, 0x4000)\n+ t.Log(&set)\n+\n+ // Unmap should fail because we specified the readonly map address range, but\n+ // asked to unmap a writable segment.\n+ unmapped := set.RemoveMapping(ms, usermem.AddrRange{0x20000, 0x21000}, 0x2000, true)\n+ if len(unmapped) != 0 {\n+ t.Errorf(\"RemoveMapping: got %+v, wanted []\", unmapped)\n+ }\n+\n+ // Readonly mapping removed, but writable mapping still exists in the range,\n+ // so no mappable range fully unmapped.\n+ unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x20000, 0x21000}, 0x2000, false)\n+ if len(unmapped) != 0 {\n+ t.Errorf(\"RemoveMapping: got %+v, wanted []\", unmapped)\n+ }\n+\n+ // Mappings:\n+ // [0x10000, 0x12000) writable => [0x1000, 0x3000)\n+ // [0x21000, 0x22000) readonly => [0x3000, 0x4000)\n+ t.Log(&set)\n+\n+ unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x11000, 0x12000}, 0x2000, true)\n+ if got, want := unmapped, []MappableRange{{0x2000, 0x3000}}; !reflect.DeepEqual(got, want) {\n+ t.Errorf(\"RemoveMapping: got %+v, wanted %+v\", got, want)\n+ }\n+\n+ // Mappings:\n+ // [0x10000, 0x12000) writable => [0x1000, 0x3000)\n+ // [0x21000, 0x22000) readonly => [0x3000, 0x4000)\n+ t.Log(&set)\n+\n+ // Unmap should fail since writable bit doesn't match.\n+ unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x10000, 0x12000}, 0x1000, false)\n+ if len(unmapped) != 0 {\n+ t.Errorf(\"RemoveMapping: got %+v, wanted []\", unmapped)\n+ }\n+\n+ unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x10000, 0x12000}, 0x1000, true)\n+ if got, want := unmapped, []MappableRange{{0x1000, 0x2000}}; !reflect.DeepEqual(got, want) {\n+ t.Errorf(\"RemoveMapping: got %+v, wanted %+v\", got, want)\n+ }\n+\n+ // Mappings:\n+ // [0x21000, 0x22000) readonly => [0x3000, 0x4000)\n+ t.Log(&set)\n+\n+ unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x21000, 0x22000}, 0x3000, false)\n+ if got, want := unmapped, []MappableRange{{0x3000, 0x4000}}; !reflect.DeepEqual(got, want) {\n+ t.Errorf(\"RemoveMapping: got %+v, wanted %+v\", got, want)\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/memmap/memmap.go", "new_path": "pkg/sentry/memmap/memmap.go", "diff": "@@ -36,16 +36,22 @@ type Mappable interface {\n// AddMapping notifies the Mappable of a mapping from addresses ar in ms to\n// offsets [offset, offset+ar.Length()) in this Mappable.\n//\n+ // The writable flag indicates whether the backing data for a Mappable can\n+ // be modified through the mapping. Effectively, this means a shared mapping\n+ // where Translate may be called with at.Write == true. This is a property\n+ // established at mapping creation and must remain constant throughout the\n+ // lifetime of the mapping.\n+ //\n// Preconditions: offset+ar.Length() does not overflow.\n- AddMapping(ctx context.Context, ms MappingSpace, ar usermem.AddrRange, offset uint64) error\n+ AddMapping(ctx context.Context, ms MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error\n// RemoveMapping notifies the Mappable of the removal of a mapping from\n// addresses ar in ms to offsets [offset, offset+ar.Length()) in this\n// Mappable.\n//\n// Preconditions: offset+ar.Length() does not overflow. The removed mapping\n- // must exist.\n- RemoveMapping(ctx context.Context, ms MappingSpace, ar usermem.AddrRange, offset uint64)\n+ // must exist. writable must match the corresponding call to AddMapping.\n+ RemoveMapping(ctx context.Context, ms MappingSpace, ar usermem.AddrRange, offset uint64, writable bool)\n// CopyMapping notifies the Mappable of an attempt to copy a mapping in ms\n// from srcAR to dstAR. For most Mappables, this is equivalent to\n@@ -56,8 +62,9 @@ type Mappable interface {\n// MappingSpace; it is analogous to Linux's vm_operations_struct::mremap.\n//\n// Preconditions: offset+srcAR.Length() and offset+dstAR.Length() do not\n- // overflow. The mapping at srcAR must exist.\n- CopyMapping(ctx context.Context, ms MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64) error\n+ // overflow. The mapping at srcAR must exist. writable must match the\n+ // corresponding call to AddMapping.\n+ CopyMapping(ctx context.Context, ms MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error\n// Translate returns the Mappable's current mappings for at least the range\n// of offsets specified by required, and at most the range of offsets\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/mm/aio_context.go", "new_path": "pkg/sentry/mm/aio_context.go", "diff": "@@ -244,7 +244,7 @@ func (m *aioMappable) Msync(ctx context.Context, mr memmap.MappableRange) error\n}\n// AddMapping implements memmap.Mappable.AddMapping.\n-func (m *aioMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) error {\n+func (m *aioMappable) AddMapping(_ context.Context, _ memmap.MappingSpace, ar usermem.AddrRange, offset uint64, _ bool) error {\n// Don't allow mappings to be expanded (in Linux, fs/aio.c:aio_ring_mmap()\n// sets VM_DONTEXPAND).\nif offset != 0 || uint64(ar.Length()) != aioRingBufferSize {\n@@ -254,11 +254,11 @@ func (m *aioMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar\n}\n// RemoveMapping implements memmap.Mappable.RemoveMapping.\n-func (m *aioMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) {\n+func (m *aioMappable) RemoveMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, uint64, bool) {\n}\n// CopyMapping implements memmap.Mappable.CopyMapping.\n-func (m *aioMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64) error {\n+func (m *aioMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, _ bool) error {\n// Don't allow mappings to be expanded (in Linux, fs/aio.c:aio_ring_mmap()\n// sets VM_DONTEXPAND).\nif offset != 0 || uint64(dstAR.Length()) != aioRingBufferSize {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/mm/lifecycle.go", "new_path": "pkg/sentry/mm/lifecycle.go", "diff": "@@ -81,7 +81,7 @@ func (mm *MemoryManager) Fork(ctx context.Context) (*MemoryManager, error) {\nvmaAR := srcvseg.Range()\n// Inform the Mappable, if any, of the new mapping.\nif vma.mappable != nil {\n- if err := vma.mappable.AddMapping(ctx, mm2, vmaAR, vma.off); err != nil {\n+ if err := vma.mappable.AddMapping(ctx, mm2, vmaAR, vma.off, vma.isMappableAsWritable()); err != nil {\nmm2.removeVMAsLocked(ctx, mm2.applicationAddrRange())\nreturn nil, err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/mm/mm.go", "new_path": "pkg/sentry/mm/mm.go", "diff": "@@ -349,6 +349,10 @@ func (v *vma) loadRealPerms(b int) {\n}\n}\n+func (v *vma) isMappableAsWritable() bool {\n+ return !v.private && v.maxPerms.Write\n+}\n+\n// pma represents a platform mapping area.\n//\n// +stateify savable\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/mm/special_mappable.go", "new_path": "pkg/sentry/mm/special_mappable.go", "diff": "@@ -76,16 +76,16 @@ func (m *SpecialMappable) Msync(ctx context.Context, mr memmap.MappableRange) er\n}\n// AddMapping implements memmap.Mappable.AddMapping.\n-func (m *SpecialMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) error {\n+func (*SpecialMappable) AddMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, uint64, bool) error {\nreturn nil\n}\n// RemoveMapping implements memmap.Mappable.RemoveMapping.\n-func (m *SpecialMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) {\n+func (*SpecialMappable) RemoveMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, uint64, bool) {\n}\n// CopyMapping implements memmap.Mappable.CopyMapping.\n-func (m *SpecialMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64) error {\n+func (*SpecialMappable) CopyMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, usermem.AddrRange, uint64, bool) error {\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/mm/syscalls.go", "new_path": "pkg/sentry/mm/syscalls.go", "diff": "@@ -443,7 +443,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi\nreturn 0, syserror.EINVAL\n}\n// Inform the Mappable, if any, of the new mapping.\n- if err := vma.mappable.CopyMapping(ctx, mm, oldAR, newAR, vseg.mappableOffsetAt(oldAR.Start)); err != nil {\n+ if err := vma.mappable.CopyMapping(ctx, mm, oldAR, newAR, vseg.mappableOffsetAt(oldAR.Start), vma.isMappableAsWritable()); err != nil {\nreturn 0, err\n}\n}\n@@ -498,7 +498,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi\n// Now that pmas have been moved to newAR, we can notify vma.mappable that\n// oldAR is no longer mapped.\nif vma.mappable != nil {\n- vma.mappable.RemoveMapping(ctx, mm, oldAR, vma.off)\n+ vma.mappable.RemoveMapping(ctx, mm, oldAR, vma.off, vma.isMappableAsWritable())\n}\nreturn newAR.Start, nil\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/mm/vma.go", "new_path": "pkg/sentry/mm/vma.go", "diff": "@@ -65,7 +65,7 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp\n// Inform the Mappable, if any, of the new mapping.\nif opts.Mappable != nil {\n- if err := opts.Mappable.AddMapping(ctx, mm, ar, opts.Offset); err != nil {\n+ if err := opts.Mappable.AddMapping(ctx, mm, ar, opts.Offset, !opts.Private && opts.MaxPerms.Write); err != nil {\nreturn vmaIterator{}, usermem.AddrRange{}, err\n}\n}\n@@ -332,7 +332,7 @@ func (mm *MemoryManager) removeVMAsLocked(ctx context.Context, ar usermem.AddrRa\nvmaAR := vseg.Range()\nvma := vseg.ValuePtr()\nif vma.mappable != nil {\n- vma.mappable.RemoveMapping(ctx, mm, vmaAR, vma.off)\n+ vma.mappable.RemoveMapping(ctx, mm, vmaAR, vma.off, vma.isMappableAsWritable())\n}\nif vma.id != nil {\nvma.id.DecRef()\n" } ]
Go
Apache License 2.0
google/gvisor
Pass information about map writableness to filesystems. This is necessary to implement file seals for memfds. PiperOrigin-RevId: 225239394 Change-Id: Ib3f1ab31385afc4b24e96cd81a05ef1bebbcbb70
259,985
12.12.2018 13:17:46
28,800
f93c288dd70846f335239e2d0cb351135a756f51
Fix a data race on Shm.key.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/shm/shm.go", "new_path": "pkg/sentry/kernel/shm/shm.go", "diff": "@@ -66,16 +66,21 @@ type Registry struct {\n// userNS owns the IPC namespace this registry belong to. Immutable.\nuserNS *auth.UserNamespace\n+ // mu protects all fields below.\nmu sync.Mutex `state:\"nosave\"`\n- // shms maps segment ids to segments. Protected by mu.\n+ // shms maps segment ids to segments.\nshms map[ID]*Shm\n+ // keysToShms maps segment keys to segments.\n+ keysToShms map[Key]*Shm\n+\n// Sum of the sizes of all existing segments rounded up to page size, in\n- // units of page size. Protected by mu.\n+ // units of page size.\ntotalPages uint64\n- // lastIDUsed is protected by mu.\n+ // ID assigned to the last created segment. Used to quickly find the next\n+ // unused ID.\nlastIDUsed ID\n}\n@@ -84,6 +89,7 @@ func NewRegistry(userNS *auth.UserNamespace) *Registry {\nreturn &Registry{\nuserNS: userNS,\nshms: make(map[ID]*Shm),\n+ keysToShms: make(map[Key]*Shm),\n}\n}\n@@ -94,14 +100,20 @@ func (r *Registry) FindByID(id ID) *Shm {\nreturn r.shms[id]\n}\n-// Precondition: Caller must hold r.mu.\n-func (r *Registry) findByKey(key Key) *Shm {\n- for _, v := range r.shms {\n- if v.key == key {\n- return v\n- }\n+// dissociateKey removes the association between a segment and its key,\n+// preventing it from being discovered in the registry. This doesn't necessarily\n+// mean the segment is about to be destroyed. This is analogous to unlinking a\n+// file; the segment can still be used by a process already referencing it, but\n+// cannot be discovered by a new process.\n+func (r *Registry) dissociateKey(s *Shm) {\n+ r.mu.Lock()\n+ defer r.mu.Unlock()\n+ s.mu.Lock()\n+ defer s.mu.Unlock()\n+ if s.key != linux.IPC_PRIVATE {\n+ delete(r.keysToShms, s.key)\n+ s.key = linux.IPC_PRIVATE\n}\n- return nil\n}\n// FindOrCreate looks up or creates a segment in the registry. It's functionally\n@@ -127,7 +139,7 @@ func (r *Registry) FindOrCreate(ctx context.Context, pid int32, key Key, size ui\nif !private {\n// Look up an existing segment.\n- if shm := r.findByKey(key); shm != nil {\n+ if shm := r.keysToShms[key]; shm != nil {\nshm.mu.Lock()\ndefer shm.mu.Unlock()\n@@ -184,6 +196,8 @@ func (r *Registry) FindOrCreate(ctx context.Context, pid int32, key Key, size ui\n}\n// newShm creates a new segment in the registry.\n+//\n+// Precondition: Caller must hold r.mu.\nfunc (r *Registry) newShm(ctx context.Context, pid int32, key Key, creator fs.FileOwner, perms fs.FilePermissions, size uint64) (*Shm, error) {\np := platform.FromContext(ctx)\nif p == nil {\n@@ -219,8 +233,10 @@ func (r *Registry) newShm(ctx context.Context, pid int32, key Key, creator fs.Fi\n}\nif r.shms[id] == nil {\nr.lastIDUsed = id\n- r.shms[id] = shm\n+\nshm.ID = id\n+ r.shms[id] = shm\n+ r.keysToShms[key] = shm\nr.totalPages += effectiveSize / usermem.PageSize\n@@ -258,13 +274,20 @@ func (r *Registry) ShmInfo() *linux.ShmInfo {\n}\n}\n-// remove unregisters a segment from this registry, preventing it from being\n-// discovered in the future. Caller is responsible for ensuring s is destroyed.\n+// remove deletes a segment from this registry, deaccounting the memory used by\n+// the segment.\n//\n-// Precondition: To preserve lock ordering, caller must not hold s.mu.\n+// Precondition: Must follow a call to r.dissociateKey(s).\nfunc (r *Registry) remove(s *Shm) {\nr.mu.Lock()\ndefer r.mu.Unlock()\n+ s.mu.Lock()\n+ defer s.mu.Unlock()\n+\n+ if s.key != linux.IPC_PRIVATE {\n+ panic(fmt.Sprintf(\"Attempted to remove shm segment %+v from the registry whose key is still associated\", s))\n+ }\n+\ndelete(r.shms, s.ID)\nr.totalPages -= s.effectiveSize / usermem.PageSize\n}\n@@ -314,12 +337,12 @@ type Shm struct {\n// segment. Immutable.\nfr platform.FileRange\n- // key is the public identifier for this segment.\n- key Key\n-\n// mu protects all fields below.\nmu sync.Mutex `state:\"nosave\"`\n+ // key is the public identifier for this segment.\n+ key Key\n+\n// perms is the access permissions for the segment.\nperms fs.FilePermissions\n@@ -342,12 +365,14 @@ type Shm struct {\n// pendingDestruction indicates the segment was marked as destroyed through\n// shmctl(IPC_RMID). When marked as destroyed, the segment will not be found\n// in the registry and can no longer be attached. When the last user\n- // detaches from the segment, it is destroyed. Protected by mu.\n+ // detaches from the segment, it is destroyed.\npendingDestruction bool\n}\n// MappedName implements memmap.MappingIdentity.MappedName.\nfunc (s *Shm) MappedName(ctx context.Context) string {\n+ s.mu.Lock()\n+ defer s.mu.Unlock()\nreturn fmt.Sprintf(\"SYSV%08d\", s.key)\n}\n@@ -364,6 +389,8 @@ func (s *Shm) InodeID() uint64 {\n}\n// DecRef overrides refs.RefCount.DecRef with a destructor.\n+//\n+// Precondition: Caller must not hold s.mu.\nfunc (s *Shm) DecRef() {\ns.DecRefWithDestructor(s.destroy)\n}\n@@ -572,28 +599,30 @@ func (s *Shm) Set(ctx context.Context, ds *linux.ShmidDS) error {\n}\nfunc (s *Shm) destroy() {\n- s.registry.remove(s)\ns.p.Memory().DecRef(s.fr)\n+ s.registry.remove(s)\n}\n-// MarkDestroyed marks a shm for destruction. The shm is actually destroyed once\n-// it has no references. See shmctl(IPC_RMID).\n+// MarkDestroyed marks a segment for destruction. The segment is actually\n+// destroyed once it has no references. MarkDestroyed may be called multiple\n+// times, and is safe to call after a segment has already been destroyed. See\n+// shmctl(IPC_RMID).\nfunc (s *Shm) MarkDestroyed() {\n- s.mu.Lock()\n- defer s.mu.Unlock()\n-\n- // Prevent the segment from being found in the registry.\n- s.key = linux.IPC_PRIVATE\n+ s.registry.dissociateKey(s)\n+ s.mu.Lock()\n// Only drop the segment's self-reference once, when destruction is\n- // requested. Otherwise, repeated calls shmctl(IPC_RMID) would force a\n+ // requested. Otherwise, repeated calls to shmctl(IPC_RMID) would force a\n// segment to be destroyed prematurely, potentially with active maps to the\n// segment's address range. Remaining references are dropped when the\n// segment is detached or unmaped.\nif !s.pendingDestruction {\ns.pendingDestruction = true\n+ s.mu.Unlock() // Must release s.mu before calling s.DecRef.\ns.DecRef()\n+ return\n}\n+ s.mu.Unlock()\n}\n// checkOwnership verifies whether a segment may be accessed by ctx as an\n" } ]
Go
Apache License 2.0
google/gvisor
Fix a data race on Shm.key. PiperOrigin-RevId: 225240907 Change-Id: Ie568ce3cd643f3e4a0eaa0444f4ed589dcf6031f
259,854
12.12.2018 18:40:04
28,800
f484b6d4c2f1dea6169ed50a5d3d1809e8f007be
Flush socket test description This ensures that we know what type of socket caused a timeout.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_test_util.h", "new_path": "test/syscalls/linux/socket_test_util.h", "diff": "@@ -379,6 +379,7 @@ class SocketPairTest : public ::testing::TestWithParam<SocketPairKind> {\nSocketPairTest() {\n// gUnit uses printf, so so will we.\nprintf(\"Testing with %s\\n\", GetParam().description.c_str());\n+ fflush(stdout);\n}\nPosixErrorOr<std::unique_ptr<SocketPair>> NewSocketPair() const {\n" } ]
Go
Apache License 2.0
google/gvisor
Flush socket test description This ensures that we know what type of socket caused a timeout. PiperOrigin-RevId: 225294255 Change-Id: I9033bd0f3791d3b5714aa08d111cf58a3014d252
259,854
13.12.2018 13:01:56
28,800
6253d32cc932e76608be5c57a4870b3d61464487
transport/tcp: remove unused error return values
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/accept.go", "new_path": "pkg/tcpip/transport/tcp/accept.go", "diff": "@@ -245,11 +245,7 @@ func (l *listenContext) createEndpointAndPerformHandshake(s *segment, opts *head\n}\n// Perform the 3-way handshake.\n- h, err := newHandshake(ep, l.rcvWnd)\n- if err != nil {\n- ep.Close()\n- return nil, err\n- }\n+ h := newHandshake(ep, l.rcvWnd)\nh.resetToSynRcvd(cookie, irs, opts)\nif err := h.execute(); err != nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/connect.go", "new_path": "pkg/tcpip/transport/tcp/connect.go", "diff": "@@ -86,18 +86,15 @@ type handshake struct {\nrcvWndScale int\n}\n-func newHandshake(ep *endpoint, rcvWnd seqnum.Size) (handshake, *tcpip.Error) {\n+func newHandshake(ep *endpoint, rcvWnd seqnum.Size) handshake {\nh := handshake{\nep: ep,\nactive: true,\nrcvWnd: rcvWnd,\nrcvWndScale: FindWndScale(rcvWnd),\n}\n- if err := h.resetState(); err != nil {\n- return handshake{}, err\n- }\n-\n- return h, nil\n+ h.resetState()\n+ return h\n}\n// FindWndScale determines the window scale to use for the given maximum window\n@@ -119,7 +116,7 @@ func FindWndScale(wnd seqnum.Size) int {\n// resetState resets the state of the handshake object such that it becomes\n// ready for a new 3-way handshake.\n-func (h *handshake) resetState() *tcpip.Error {\n+func (h *handshake) resetState() {\nb := make([]byte, 4)\nif _, err := rand.Read(b); err != nil {\npanic(err)\n@@ -130,8 +127,6 @@ func (h *handshake) resetState() *tcpip.Error {\nh.ackNum = 0\nh.mss = 0\nh.iss = seqnum.Value(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)\n-\n- return nil\n}\n// effectiveRcvWndScale returns the effective receive window scale to be used.\n@@ -269,9 +264,7 @@ func (h *handshake) synRcvdState(s *segment) *tcpip.Error {\nreturn tcpip.ErrInvalidEndpointState\n}\n- if err := h.resetState(); err != nil {\n- return err\n- }\n+ h.resetState()\nsynOpts := header.TCPSynOptions{\nWS: h.rcvWndScale,\nTS: h.ep.sendTSOk,\n@@ -868,11 +861,8 @@ func (e *endpoint) protocolMainLoop(handshake bool) *tcpip.Error {\n// This is an active connection, so we must initiate the 3-way\n// handshake, and then inform potential waiters about its\n// completion.\n- h, err := newHandshake(e, seqnum.Size(e.receiveBufferAvailable()))\n- if err == nil {\n- err = h.execute()\n- }\n- if err != nil {\n+ h := newHandshake(e, seqnum.Size(e.receiveBufferAvailable()))\n+ if err := h.execute(); err != nil {\ne.lastErrorMu.Lock()\ne.lastError = err\ne.lastErrorMu.Unlock()\n" } ]
Go
Apache License 2.0
google/gvisor
transport/tcp: remove unused error return values PiperOrigin-RevId: 225421480 Change-Id: I1e9259b0b7e8490164e830b73338a615129c7f0e
259,854
13.12.2018 13:19:39
28,800
4659f7ed1a63f031b5450d065684ef6c32d35f01
Fix WAITALL and RCVTIMEO interaction
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/epsocket/epsocket.go", "new_path": "pkg/sentry/socket/epsocket/epsocket.go", "diff": "@@ -1352,6 +1352,9 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\ndst = dst.DropFirst(rn)\nif err := t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil {\n+ if n > 0 {\n+ return n, senderAddr, senderAddrLen, controlMessages, nil\n+ }\nif err == syserror.ETIMEDOUT {\nreturn 0, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/socket.go", "new_path": "pkg/sentry/socket/socket.go", "diff": "@@ -86,6 +86,8 @@ type Socket interface {\n//\n// senderAddrLen is the address length to be returned to the application,\n// not necessarily the actual length of the address.\n+ //\n+ // If err != nil, the recv was not successful.\nRecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (n int, senderAddr interface{}, senderAddrLen uint32, controlMessages ControlMessages, err *syserr.Error)\n// SendMsg implements the sendmsg(2) linux syscall. SendMsg does not take\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/unix.go", "new_path": "pkg/sentry/socket/unix/unix.go", "diff": "@@ -538,6 +538,9 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\n}\ntotal += n\nif err != nil || !waitAll || s.isPacket || n >= dst.NumBytes() {\n+ if total > 0 {\n+ err = nil\n+ }\nreturn int(total), from, fromLen, socket.ControlMessages{Unix: r.Control}, syserr.FromError(err)\n}\n@@ -546,10 +549,13 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\n}\nif err := t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil {\n+ if total > 0 {\n+ err = nil\n+ }\nif err == syserror.ETIMEDOUT {\n- return 0, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain\n+ return int(total), nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain\n}\n- return 0, nil, 0, socket.ControlMessages{}, syserr.FromError(err)\n+ return int(total), nil, 0, socket.ControlMessages{}, syserr.FromError(err)\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_generic.cc", "new_path": "test/syscalls/linux/socket_generic.cc", "diff": "@@ -395,6 +395,8 @@ TEST_P(AllSocketPairTest, RecvWaitAll) {\nASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,\nsizeof(received_data), MSG_WAITALL),\nSyscallSucceedsWithValue(sizeof(sent_data)));\n+\n+ EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));\n}\nTEST_P(AllSocketPairTest, RecvWaitAllDontWait) {\n@@ -406,5 +408,29 @@ TEST_P(AllSocketPairTest, RecvWaitAllDontWait) {\nSyscallFailsWithErrno(EAGAIN));\n}\n+TEST_P(AllSocketPairTest, RecvTimeoutWaitAll) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ struct timeval tv {\n+ .tv_sec = 0, .tv_usec = 200000 // 200ms\n+ };\n+ EXPECT_THAT(setsockopt(sockets->second_fd(), SOL_SOCKET, SO_RCVTIMEO, &tv,\n+ sizeof(tv)),\n+ SyscallSucceeds());\n+\n+ char sent_data[100];\n+ RandomizeBuffer(sent_data, sizeof(sent_data));\n+\n+ ASSERT_THAT(write(sockets->first_fd(), sent_data, sizeof(sent_data)),\n+ SyscallSucceedsWithValue(sizeof(sent_data)));\n+\n+ char received_data[sizeof(sent_data) * 2] = {};\n+ ASSERT_THAT(RetryEINTR(recv)(sockets->second_fd(), received_data,\n+ sizeof(received_data), MSG_WAITALL),\n+ SyscallSucceedsWithValue(sizeof(sent_data)));\n+\n+ EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));\n+}\n+\n} // namespace testing\n} // namespace gvisor\n" } ]
Go
Apache License 2.0
google/gvisor
Fix WAITALL and RCVTIMEO interaction PiperOrigin-RevId: 225424296 Change-Id: I60fcc2b859339dca9963cb32227a287e719ab765
259,884
14.12.2018 13:21:21
0
3a7894df044c149113c97edd273e57703c32534b
Fix sandbox.json instructions for containerd 1.1 Annotations aren't part of the PodSandboxMetadata but rather part of the PodSandboxConfig object. crictl's parsing logic seems to ignore extraneous fields so it silently fails to create pods using the untrusted workload runtime. See:
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -78,10 +78,10 @@ $ cat > sandbox.json << EOL\n\"name\": \"nginx-sandbox\",\n\"namespace\": \"default\",\n\"attempt\": 1,\n- \"uid\": \"hdishd83djaidwnduwk28bcsb\",\n+ \"uid\": \"hdishd83djaidwnduwk28bcsb\"\n+ },\n\"annotations\": {\n\"io.kubernetes.cri.untrusted-workload\": \"true\"\n- }\n},\n\"linux\": {\n},\n" } ]
Go
Apache License 2.0
google/gvisor
Fix sandbox.json instructions for containerd 1.1 Annotations aren't part of the PodSandboxMetadata but rather part of the PodSandboxConfig object. crictl's parsing logic seems to ignore extraneous fields so it silently fails to create pods using the untrusted workload runtime. See: https://github.com/kubernetes-sigs/cri-tools/blob/v1.13.0/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2/api.pb.go#L775
259,854
14.12.2018 16:12:51
28,800
e1dcf92ec5cf7d9bf58fb322f46f6ae2d98699d2
Implement SO_SNDTIMEO
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/host/BUILD", "new_path": "pkg/sentry/fs/host/BUILD", "diff": "@@ -71,6 +71,7 @@ go_test(\n\"//pkg/sentry/context\",\n\"//pkg/sentry/context/contexttest\",\n\"//pkg/sentry/fs\",\n+ \"//pkg/sentry/kernel/time\",\n\"//pkg/sentry/socket\",\n\"//pkg/sentry/socket/unix/transport\",\n\"//pkg/sentry/usermem\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/host/socket_test.go", "new_path": "pkg/sentry/fs/host/socket_test.go", "diff": "@@ -21,6 +21,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/fd\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest\"\n+ ktime \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/socket\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/socket/unix/transport\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n@@ -142,7 +143,7 @@ func TestSocketSendMsgLen0(t *testing.T) {\ndefer sfile.DecRef()\ns := sfile.FileOperations.(socket.Socket)\n- n, terr := s.SendMsg(nil, usermem.BytesIOSequence(nil), []byte{}, 0, socket.ControlMessages{})\n+ n, terr := s.SendMsg(nil, usermem.BytesIOSequence(nil), []byte{}, 0, false, ktime.Time{}, socket.ControlMessages{})\nif n != 0 {\nt.Fatalf(\"socket sendmsg() failed: %v wrote: %d\", terr, n)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/epsocket/epsocket.go", "new_path": "pkg/sentry/socket/epsocket/epsocket.go", "diff": "@@ -30,6 +30,7 @@ import (\n\"strings\"\n\"sync\"\n\"syscall\"\n+ \"time\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/binary\"\n@@ -137,12 +138,12 @@ type commonEndpoint interface {\n//\n// +stateify savable\ntype SocketOperations struct {\n- socket.ReceiveTimeout\nfsutil.PipeSeek `state:\"nosave\"`\nfsutil.NotDirReaddir `state:\"nosave\"`\nfsutil.NoFsync `state:\"nosave\"`\nfsutil.NoopFlush `state:\"nosave\"`\nfsutil.NoMMap `state:\"nosave\"`\n+ socket.SendReceiveTimeout\n*waiter.Queue\nfamily int\n@@ -643,7 +644,16 @@ func getSockOptSocket(t *kernel.Task, s socket.Socket, ep commonEndpoint, family\n}\nreturn syscall.Linger{}, nil\n+ case linux.SO_SNDTIMEO:\n+ // TODO: Linux allows shorter lengths for partial results.\n+ if outLen < linux.SizeOfTimeval {\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ return linux.NsecToTimeval(s.SendTimeout()), nil\n+\ncase linux.SO_RCVTIMEO:\n+ // TODO: Linux allows shorter lengths for partial results.\nif outLen < linux.SizeOfTimeval {\nreturn nil, syserr.ErrInvalidArgument\n}\n@@ -833,6 +843,19 @@ func setSockOptSocket(t *kernel.Task, s socket.Socket, ep commonEndpoint, name i\nv := usermem.ByteOrder.Uint32(optVal)\nreturn syserr.TranslateNetstackError(ep.SetSockOpt(tcpip.PasscredOption(v)))\n+ case linux.SO_SNDTIMEO:\n+ if len(optVal) < linux.SizeOfTimeval {\n+ return syserr.ErrInvalidArgument\n+ }\n+\n+ var v linux.Timeval\n+ binary.Unmarshal(optVal[:linux.SizeOfTimeval], usermem.ByteOrder, &v)\n+ if v.Usec < 0 || v.Usec >= int64(time.Second/time.Microsecond) {\n+ return syserr.ErrDomain\n+ }\n+ s.SetSendTimeout(v.ToNsecCapped())\n+ return nil\n+\ncase linux.SO_RCVTIMEO:\nif len(optVal) < linux.SizeOfTimeval {\nreturn syserr.ErrInvalidArgument\n@@ -840,6 +863,9 @@ func setSockOptSocket(t *kernel.Task, s socket.Socket, ep commonEndpoint, name i\nvar v linux.Timeval\nbinary.Unmarshal(optVal[:linux.SizeOfTimeval], usermem.ByteOrder, &v)\n+ if v.Usec < 0 || v.Usec >= int64(time.Second/time.Microsecond) {\n+ return syserr.ErrDomain\n+ }\ns.SetRecvTimeout(v.ToNsecCapped())\nreturn nil\n@@ -1365,7 +1391,7 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\n// SendMsg implements the linux syscall sendmsg(2) for sockets backed by\n// tcpip.Endpoint.\n-func (s *SocketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, controlMessages socket.ControlMessages) (int, *syserr.Error) {\n+func (s *SocketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, haveDeadline bool, deadline ktime.Time, controlMessages socket.ControlMessages) (int, *syserr.Error) {\n// Reject Unix control messages.\nif !controlMessages.Unix.Empty() {\nreturn 0, syserr.ErrInvalidArgument\n@@ -1431,7 +1457,10 @@ func (s *SocketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []\nreturn int(total), nil\n}\n- if err := t.Block(ch); err != nil {\n+ if err := t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil {\n+ if err == syserror.ETIMEDOUT {\n+ return int(total), syserr.ErrTryAgain\n+ }\n// handleIOError will consume errors from t.Block if needed.\nreturn int(total), syserr.FromError(err)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/hostinet/socket.go", "new_path": "pkg/sentry/socket/hostinet/socket.go", "diff": "@@ -46,12 +46,12 @@ const (\n// socketOperations implements fs.FileOperations and socket.Socket for a socket\n// implemented using a host socket.\ntype socketOperations struct {\n- socket.ReceiveTimeout\nfsutil.PipeSeek `state:\"nosave\"`\nfsutil.NotDirReaddir `state:\"nosave\"`\nfsutil.NoFsync `state:\"nosave\"`\nfsutil.NoopFlush `state:\"nosave\"`\nfsutil.NoMMap `state:\"nosave\"`\n+ socket.SendReceiveTimeout\nfd int // must be O_NONBLOCK\nqueue waiter.Queue\n@@ -418,7 +418,7 @@ func (s *socketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\n}\n// SendMsg implements socket.Socket.SendMsg.\n-func (s *socketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, controlMessages socket.ControlMessages) (int, *syserr.Error) {\n+func (s *socketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, haveDeadline bool, deadline ktime.Time, controlMessages socket.ControlMessages) (int, *syserr.Error) {\n// Whitelist flags.\nif flags&^(syscall.MSG_DONTWAIT|syscall.MSG_EOR|syscall.MSG_FASTOPEN|syscall.MSG_MORE|syscall.MSG_NOSIGNAL) != 0 {\nreturn 0, syserr.ErrInvalidArgument\n@@ -468,7 +468,10 @@ func (s *socketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []\npanic(fmt.Sprintf(\"CopyInTo: got (%d, %v), wanted (0, %v)\", n, err, err))\n}\nif ch != nil {\n- if err = t.Block(ch); err != nil {\n+ if err = t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil {\n+ if err == syserror.ETIMEDOUT {\n+ err = syserror.ErrWouldBlock\n+ }\nbreak\n}\n} else {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netlink/socket.go", "new_path": "pkg/sentry/socket/netlink/socket.go", "diff": "@@ -65,12 +65,12 @@ var netlinkSocketDevice = device.NewAnonDevice()\n//\n// +stateify savable\ntype Socket struct {\n- socket.ReceiveTimeout\nfsutil.PipeSeek `state:\"nosave\"`\nfsutil.NotDirReaddir `state:\"nosave\"`\nfsutil.NoFsync `state:\"nosave\"`\nfsutil.NoopFlush `state:\"nosave\"`\nfsutil.NoMMap `state:\"nosave\"`\n+ socket.SendReceiveTimeout\n// ports provides netlink port allocation.\nports *port.Manager\n@@ -593,7 +593,7 @@ func (s *Socket) sendMsg(ctx context.Context, src usermem.IOSequence, to []byte,\n}\n// SendMsg implements socket.Socket.SendMsg.\n-func (s *Socket) SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, controlMessages socket.ControlMessages) (int, *syserr.Error) {\n+func (s *Socket) SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, haveDeadline bool, deadline ktime.Time, controlMessages socket.ControlMessages) (int, *syserr.Error) {\nreturn s.sendMsg(t, src, to, flags, controlMessages)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/rpcinet/socket.go", "new_path": "pkg/sentry/socket/rpcinet/socket.go", "diff": "@@ -17,6 +17,7 @@ package rpcinet\nimport (\n\"sync/atomic\"\n\"syscall\"\n+ \"time\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/binary\"\n@@ -44,12 +45,12 @@ import (\n// socketOperations implements fs.FileOperations and socket.Socket for a socket\n// implemented using a host socket.\ntype socketOperations struct {\n- socket.ReceiveTimeout\nfsutil.PipeSeek `state:\"nosave\"`\nfsutil.NotDirReaddir `state:\"nosave\"`\nfsutil.NoFsync `state:\"nosave\"`\nfsutil.NoopFlush `state:\"nosave\"`\nfsutil.NoMMap `state:\"nosave\"`\n+ socket.SendReceiveTimeout\nfd uint32 // must be O_NONBLOCK\nwq *waiter.Queue\n@@ -379,7 +380,8 @@ func (s *socketOperations) Shutdown(t *kernel.Task, how int) *syserr.Error {\n// GetSockOpt implements socket.Socket.GetSockOpt.\nfunc (s *socketOperations) GetSockOpt(t *kernel.Task, level int, name int, outLen int) (interface{}, *syserr.Error) {\n- // SO_RCVTIMEO is special because blocking is performed within the sentry.\n+ // SO_RCVTIMEO and SO_SNDTIMEO are special because blocking is performed\n+ // within the sentry.\nif level == linux.SOL_SOCKET && name == linux.SO_RCVTIMEO {\nif outLen < linux.SizeOfTimeval {\nreturn nil, syserr.ErrInvalidArgument\n@@ -387,6 +389,13 @@ func (s *socketOperations) GetSockOpt(t *kernel.Task, level int, name int, outLe\nreturn linux.NsecToTimeval(s.RecvTimeout()), nil\n}\n+ if level == linux.SOL_SOCKET && name == linux.SO_SNDTIMEO {\n+ if outLen < linux.SizeOfTimeval {\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ return linux.NsecToTimeval(s.SendTimeout()), nil\n+ }\nstack := t.NetworkContext().(*Stack)\nid, c := stack.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_GetSockOpt{&pb.GetSockOptRequest{Fd: s.fd, Level: int64(level), Name: int64(name), Length: uint32(outLen)}}}, false /* ignoreResult */)\n@@ -403,8 +412,9 @@ func (s *socketOperations) GetSockOpt(t *kernel.Task, level int, name int, outLe\n// SetSockOpt implements socket.Socket.SetSockOpt.\nfunc (s *socketOperations) SetSockOpt(t *kernel.Task, level int, name int, opt []byte) *syserr.Error {\n// Because blocking actually happens within the sentry we need to inspect\n- // this socket option to determine if it's a SO_RCVTIMEO, and if so, we will\n- // save it and use it as the deadline for recv(2) related syscalls.\n+ // this socket option to determine if it's a SO_RCVTIMEO or SO_SNDTIMEO,\n+ // and if so, we will save it and use it as the deadline for recv(2)\n+ // or send(2) related syscalls.\nif level == linux.SOL_SOCKET && name == linux.SO_RCVTIMEO {\nif len(opt) < linux.SizeOfTimeval {\nreturn syserr.ErrInvalidArgument\n@@ -412,9 +422,25 @@ func (s *socketOperations) SetSockOpt(t *kernel.Task, level int, name int, opt [\nvar v linux.Timeval\nbinary.Unmarshal(opt[:linux.SizeOfTimeval], usermem.ByteOrder, &v)\n+ if v.Usec < 0 || v.Usec >= int64(time.Second/time.Microsecond) {\n+ return syserr.ErrDomain\n+ }\ns.SetRecvTimeout(v.ToNsecCapped())\nreturn nil\n}\n+ if level == linux.SOL_SOCKET && name == linux.SO_SNDTIMEO {\n+ if len(opt) < linux.SizeOfTimeval {\n+ return syserr.ErrInvalidArgument\n+ }\n+\n+ var v linux.Timeval\n+ binary.Unmarshal(opt[:linux.SizeOfTimeval], usermem.ByteOrder, &v)\n+ if v.Usec < 0 || v.Usec >= int64(time.Second/time.Microsecond) {\n+ return syserr.ErrDomain\n+ }\n+ s.SetSendTimeout(v.ToNsecCapped())\n+ return nil\n+ }\nstack := t.NetworkContext().(*Stack)\nid, c := stack.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_SetSockOpt{&pb.SetSockOptRequest{Fd: s.fd, Level: int64(level), Name: int64(name), Opt: opt}}}, false /* ignoreResult */)\n@@ -720,7 +746,7 @@ func rpcSendMsg(t *kernel.Task, req *pb.SyscallRequest_Sendmsg) (uint32, *syserr\n}\n// SendMsg implements socket.Socket.SendMsg.\n-func (s *socketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, controlMessages socket.ControlMessages) (int, *syserr.Error) {\n+func (s *socketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, haveDeadline bool, deadline ktime.Time, controlMessages socket.ControlMessages) (int, *syserr.Error) {\n// Whitelist flags.\nif flags&^(syscall.MSG_DONTWAIT|syscall.MSG_EOR|syscall.MSG_FASTOPEN|syscall.MSG_MORE|syscall.MSG_NOSIGNAL) != 0 {\nreturn 0, syserr.ErrInvalidArgument\n@@ -787,7 +813,10 @@ func (s *socketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []\nreturn int(totalWritten), nil\n}\n- if err := t.Block(ch); err != nil {\n+ if err := t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil {\n+ if err == syserror.ETIMEDOUT {\n+ return int(totalWritten), syserr.ErrTryAgain\n+ }\nreturn int(totalWritten), syserr.FromError(err)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/socket.go", "new_path": "pkg/sentry/socket/socket.go", "diff": "@@ -94,15 +94,23 @@ type Socket interface {\n// ownership of the ControlMessage on error.\n//\n// If n > 0, err will either be nil or an error from t.Block.\n- SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, controlMessages ControlMessages) (n int, err *syserr.Error)\n+ SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, haveDeadline bool, deadline ktime.Time, controlMessages ControlMessages) (n int, err *syserr.Error)\n// SetRecvTimeout sets the timeout (in ns) for recv operations. Zero means\n- // no timeout.\n+ // no timeout, and negative means DONTWAIT.\nSetRecvTimeout(nanoseconds int64)\n// RecvTimeout gets the current timeout (in ns) for recv operations. Zero\n- // means no timeout.\n+ // means no timeout, and negative means DONTWAIT.\nRecvTimeout() int64\n+\n+ // SetSendTimeout sets the timeout (in ns) for send operations. Zero means\n+ // no timeout, and negative means DONTWAIT.\n+ SetSendTimeout(nanoseconds int64)\n+\n+ // SendTimeout gets the current timeout (in ns) for send operations. Zero\n+ // means no timeout, and negative means DONTWAIT.\n+ SendTimeout() int64\n}\n// Provider is the interface implemented by providers of sockets for specific\n@@ -192,30 +200,45 @@ func NewDirent(ctx context.Context, d *device.Device) *fs.Dirent {\nreturn fs.NewDirent(inode, fmt.Sprintf(\"socket:[%d]\", ino))\n}\n-// ReceiveTimeout stores a timeout for receive calls.\n+// SendReceiveTimeout stores timeouts for send and receive calls.\n//\n// It is meant to be embedded into Socket implementations to help satisfy the\n// interface.\n//\n-// Care must be taken when copying ReceiveTimeout as it contains atomic\n+// Care must be taken when copying SendReceiveTimeout as it contains atomic\n// variables.\n//\n// +stateify savable\n-type ReceiveTimeout struct {\n- // ns is length of the timeout in nanoseconds.\n+type SendReceiveTimeout struct {\n+ // send is length of the send timeout in nanoseconds.\n+ //\n+ // send must be accessed atomically.\n+ send int64\n+\n+ // recv is length of the receive timeout in nanoseconds.\n//\n- // ns must be accessed atomically.\n- ns int64\n+ // recv must be accessed atomically.\n+ recv int64\n}\n// SetRecvTimeout implements Socket.SetRecvTimeout.\n-func (rt *ReceiveTimeout) SetRecvTimeout(nanoseconds int64) {\n- atomic.StoreInt64(&rt.ns, nanoseconds)\n+func (to *SendReceiveTimeout) SetRecvTimeout(nanoseconds int64) {\n+ atomic.StoreInt64(&to.recv, nanoseconds)\n}\n// RecvTimeout implements Socket.RecvTimeout.\n-func (rt *ReceiveTimeout) RecvTimeout() int64 {\n- return atomic.LoadInt64(&rt.ns)\n+func (to *SendReceiveTimeout) RecvTimeout() int64 {\n+ return atomic.LoadInt64(&to.recv)\n+}\n+\n+// SetSendTimeout implements Socket.SetSendTimeout.\n+func (to *SendReceiveTimeout) SetSendTimeout(nanoseconds int64) {\n+ atomic.StoreInt64(&to.send, nanoseconds)\n+}\n+\n+// SendTimeout implements Socket.SendTimeout.\n+func (to *SendReceiveTimeout) SendTimeout() int64 {\n+ return atomic.LoadInt64(&to.send)\n}\n// GetSockOptEmitUnimplementedEvent emits unimplemented event if name is valid.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/unix.go", "new_path": "pkg/sentry/socket/unix/unix.go", "diff": "@@ -45,13 +45,14 @@ import (\n//\n// +stateify savable\ntype SocketOperations struct {\n- refs.AtomicRefCount\n- socket.ReceiveTimeout\nfsutil.PipeSeek `state:\"nosave\"`\nfsutil.NotDirReaddir `state:\"nosave\"`\nfsutil.NoFsync `state:\"nosave\"`\nfsutil.NoopFlush `state:\"nosave\"`\nfsutil.NoMMap `state:\"nosave\"`\n+ refs.AtomicRefCount\n+ socket.SendReceiveTimeout\n+\nep transport.Endpoint\nisPacket bool\n}\n@@ -367,7 +368,7 @@ func (s *SocketOperations) Write(ctx context.Context, _ *fs.File, src usermem.IO\n// SendMsg implements the linux syscall sendmsg(2) for unix sockets backed by\n// a transport.Endpoint.\n-func (s *SocketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, controlMessages socket.ControlMessages) (int, *syserr.Error) {\n+func (s *SocketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, haveDeadline bool, deadline ktime.Time, controlMessages socket.ControlMessages) (int, *syserr.Error) {\nw := EndpointWriter{\nEndpoint: s.ep,\nControl: controlMessages.Unix,\n@@ -404,7 +405,10 @@ func (s *SocketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []\nbreak\n}\n- if err := t.Block(ch); err != nil {\n+ if err = t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil {\n+ if err == syserror.ETIMEDOUT {\n+ err = syserror.ErrWouldBlock\n+ }\nbreak\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_socket.go", "new_path": "pkg/sentry/syscalls/linux/sys_socket.go", "diff": "@@ -612,9 +612,11 @@ func RecvMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca\nvar haveDeadline bool\nvar deadline ktime.Time\n- if dl := s.RecvTimeout(); dl != 0 {\n+ if dl := s.RecvTimeout(); dl > 0 {\ndeadline = t.Kernel().MonotonicClock().Now().Add(time.Duration(dl) * time.Nanosecond)\nhaveDeadline = true\n+ } else if dl < 0 {\n+ flags |= linux.MSG_DONTWAIT\n}\nn, err := recvSingleMsg(t, s, msgPtr, flags, haveDeadline, deadline)\n@@ -671,10 +673,11 @@ func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc\n}\nif !haveDeadline {\n- dl := s.RecvTimeout()\n- if dl != 0 {\n+ if dl := s.RecvTimeout(); dl > 0 {\ndeadline = t.Kernel().MonotonicClock().Now().Add(time.Duration(dl) * time.Nanosecond)\nhaveDeadline = true\n+ } else if dl < 0 {\n+ flags |= linux.MSG_DONTWAIT\n}\n}\n@@ -821,10 +824,11 @@ func recvFrom(t *kernel.Task, fd kdefs.FD, bufPtr usermem.Addr, bufLen uint64, f\nvar haveDeadline bool\nvar deadline ktime.Time\n-\n- if dl := s.RecvTimeout(); dl != 0 {\n+ if dl := s.RecvTimeout(); dl > 0 {\ndeadline = t.Kernel().MonotonicClock().Now().Add(time.Duration(dl) * time.Nanosecond)\nhaveDeadline = true\n+ } else if dl < 0 {\n+ flags |= linux.MSG_DONTWAIT\n}\nn, sender, senderLen, cm, e := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, nameLenPtr != 0, 0)\n@@ -1001,8 +1005,17 @@ func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr userme\nreturn 0, err\n}\n+ var haveDeadline bool\n+ var deadline ktime.Time\n+ if dl := s.SendTimeout(); dl > 0 {\n+ deadline = t.Kernel().MonotonicClock().Now().Add(time.Duration(dl) * time.Nanosecond)\n+ haveDeadline = true\n+ } else if dl < 0 {\n+ flags |= linux.MSG_DONTWAIT\n+ }\n+\n// Call the syscall implementation.\n- n, e := s.SendMsg(t, src, to, int(flags), socket.ControlMessages{Unix: controlMessages})\n+ n, e := s.SendMsg(t, src, to, int(flags), haveDeadline, deadline, socket.ControlMessages{Unix: controlMessages})\nerr = handleIOError(t, n != 0, e.ToError(), kernel.ERESTARTSYS, \"sendmsg\", file)\nif err != nil {\ncontrolMessages.Release()\n@@ -1052,8 +1065,17 @@ func sendTo(t *kernel.Task, fd kdefs.FD, bufPtr usermem.Addr, bufLen uint64, fla\nreturn 0, err\n}\n+ var haveDeadline bool\n+ var deadline ktime.Time\n+ if dl := s.SendTimeout(); dl > 0 {\n+ deadline = t.Kernel().MonotonicClock().Now().Add(time.Duration(dl) * time.Nanosecond)\n+ haveDeadline = true\n+ } else if dl < 0 {\n+ flags |= linux.MSG_DONTWAIT\n+ }\n+\n// Call the syscall implementation.\n- n, e := s.SendMsg(t, src, to, int(flags), socket.ControlMessages{Unix: control.New(t, s, nil)})\n+ n, e := s.SendMsg(t, src, to, int(flags), haveDeadline, deadline, socket.ControlMessages{Unix: control.New(t, s, nil)})\nreturn uintptr(n), handleIOError(t, n != 0, e.ToError(), kernel.ERESTARTSYS, \"sendto\", file)\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_generic.cc", "new_path": "test/syscalls/linux/socket_generic.cc", "diff": "@@ -332,6 +332,35 @@ TEST_P(AllSocketPairTest, RecvmsgTimeoutSucceeds) {\nSyscallFailsWithErrno(EAGAIN));\n}\n+TEST_P(AllSocketPairTest, SendTimeoutAllowsSend) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ struct timeval tv {\n+ .tv_sec = 0, .tv_usec = 10\n+ };\n+ EXPECT_THAT(\n+ setsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)),\n+ SyscallSucceeds());\n+\n+ char buf[20] = {};\n+ ASSERT_THAT(RetryEINTR(send)(sockets->first_fd(), buf, sizeof(buf), 0),\n+ SyscallSucceedsWithValue(sizeof(buf)));\n+}\n+\n+TEST_P(AllSocketPairTest, SendmsgTimeoutAllowsSend) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ struct timeval tv {\n+ .tv_sec = 0, .tv_usec = 10\n+ };\n+ EXPECT_THAT(\n+ setsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)),\n+ SyscallSucceeds());\n+\n+ char buf[20] = {};\n+ ASSERT_NO_FATAL_FAILURE(SendNullCmsg(sockets->first_fd(), buf, sizeof(buf)));\n+}\n+\nTEST_P(AllSocketPairTest, SoRcvTimeoIsSet) {\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n@@ -382,6 +411,87 @@ TEST_P(AllSocketPairTest, RecvmsgTimeoutOneSecondSucceeds) {\nSyscallFailsWithErrno(EAGAIN));\n}\n+TEST_P(AllSocketPairTest, RecvTimeoutUsecTooLarge) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ struct timeval tv {\n+ .tv_sec = 0, .tv_usec = 2000000 // 2 seconds.\n+ };\n+ EXPECT_THAT(\n+ setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)),\n+ SyscallFailsWithErrno(EDOM));\n+}\n+\n+TEST_P(AllSocketPairTest, SendTimeoutUsecTooLarge) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ struct timeval tv {\n+ .tv_sec = 0, .tv_usec = 2000000 // 2 seconds.\n+ };\n+ EXPECT_THAT(\n+ setsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)),\n+ SyscallFailsWithErrno(EDOM));\n+}\n+\n+TEST_P(AllSocketPairTest, RecvTimeoutUsecNeg) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ struct timeval tv {\n+ .tv_sec = 0, .tv_usec = -1\n+ };\n+ EXPECT_THAT(\n+ setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)),\n+ SyscallFailsWithErrno(EDOM));\n+}\n+\n+TEST_P(AllSocketPairTest, SendTimeoutUsecNeg) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ struct timeval tv {\n+ .tv_sec = 0, .tv_usec = -1\n+ };\n+ EXPECT_THAT(\n+ setsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)),\n+ SyscallFailsWithErrno(EDOM));\n+}\n+\n+TEST_P(AllSocketPairTest, RecvTimeoutNegSec) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ struct timeval tv {\n+ .tv_sec = -1, .tv_usec = 0\n+ };\n+ EXPECT_THAT(\n+ setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)),\n+ SyscallSucceeds());\n+\n+ char buf[20] = {};\n+ EXPECT_THAT(RetryEINTR(recv)(sockets->first_fd(), buf, sizeof(buf), 0),\n+ SyscallFailsWithErrno(EAGAIN));\n+}\n+\n+TEST_P(AllSocketPairTest, RecvmsgTimeoutNegSec) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ struct timeval tv {\n+ .tv_sec = -1, .tv_usec = 0\n+ };\n+ EXPECT_THAT(\n+ setsockopt(sockets->first_fd(), SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)),\n+ SyscallSucceeds());\n+\n+ struct msghdr msg = {};\n+ char buf[20] = {};\n+ struct iovec iov;\n+ iov.iov_base = buf;\n+ iov.iov_len = sizeof(buf);\n+ msg.msg_iov = &iov;\n+ msg.msg_iovlen = 1;\n+\n+ EXPECT_THAT(RetryEINTR(recvmsg)(sockets->first_fd(), &msg, 0),\n+ SyscallFailsWithErrno(EAGAIN));\n+}\n+\nTEST_P(AllSocketPairTest, RecvWaitAll) {\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_stream_blocking.cc", "new_path": "test/syscalls/linux/socket_stream_blocking.cc", "diff": "@@ -125,5 +125,27 @@ TEST_P(BlockingStreamSocketPairTest, RecvLessThanBufferWaitAll) {\nEXPECT_GE(after - before, kDuration);\n}\n+TEST_P(BlockingStreamSocketPairTest, SendTimeout) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ struct timeval tv {\n+ .tv_sec = 0, .tv_usec = 10\n+ };\n+ EXPECT_THAT(\n+ setsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)),\n+ SyscallSucceeds());\n+\n+ char buf[100] = {};\n+ for (;;) {\n+ int ret;\n+ ASSERT_THAT(\n+ ret = RetryEINTR(send)(sockets->first_fd(), buf, sizeof(buf), 0),\n+ ::testing::AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(EAGAIN)));\n+ if (ret == -1) {\n+ break;\n+ }\n+ }\n+}\n+\n} // namespace testing\n} // namespace gvisor\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_unix_non_stream.cc", "new_path": "test/syscalls/linux/socket_unix_non_stream.cc", "diff": "@@ -225,5 +225,27 @@ TEST_P(UnixNonStreamSocketPairTest, FragmentedRecvMsg) {\nEXPECT_EQ(0, memcmp(write_buf.data(), ptr, buffer_size));\n}\n+TEST_P(UnixNonStreamSocketPairTest, SendTimeout) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ struct timeval tv {\n+ .tv_sec = 0, .tv_usec = 10\n+ };\n+ EXPECT_THAT(\n+ setsockopt(sockets->first_fd(), SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)),\n+ SyscallSucceeds());\n+\n+ char buf[100] = {};\n+ for (;;) {\n+ int ret;\n+ ASSERT_THAT(\n+ ret = RetryEINTR(send)(sockets->first_fd(), buf, sizeof(buf), 0),\n+ ::testing::AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(EAGAIN)));\n+ if (ret == -1) {\n+ break;\n+ }\n+ }\n+}\n+\n} // namespace testing\n} // namespace gvisor\n" } ]
Go
Apache License 2.0
google/gvisor
Implement SO_SNDTIMEO PiperOrigin-RevId: 225620490 Change-Id: Ia726107b3f58093a5f881634f90b071b32d2c269