author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
259,992 | 17.05.2018 23:21:47 | 25,200 | a1e5862f3c7b0a3baabee2311d3d519d322a0168 | Move postgres to list of supported images | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -370,6 +370,7 @@ The following applications/images have been tested:\n* mysql\n* node\n* php\n+* postgres\n* prometheus\n* python\n* redis\n@@ -385,8 +386,6 @@ The following applications have been tested and may not yet work:\n#2](https://github.com/google/gvisor/issues/2).\n* nginx: Requires `ioctl(FIOASYNC)`, but see workaround in [bug\n#1](https://github.com/google/gvisor/issues/1).\n-* postgres: Requires SysV shared memory support. See [bug\n- #3](https://github.com/google/gvisor/issues/3).\n### Will my container work with gVisor?\n"
}
] | Go | Apache License 2.0 | google/gvisor | Move postgres to list of supported images
PiperOrigin-RevId: 197104043
Change-Id: I377c0727ebf0c44361ed221e1b197787825bfb7b |
259,858 | 21.05.2018 16:48:41 | 25,200 | 61b0b19497e9ac417de5a600e6ff06d52db4268f | Dramatically improve handling of KVM vCPU pool.
Especially in situations with small numbers of vCPUs, the existing
system resulted in excessive thrashing. Now, execution contexts
co-ordinate as smoothly as they can to share a small number of cores. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/context.go",
"new_path": "pkg/sentry/platform/kvm/context.go",
"diff": "@@ -41,10 +41,7 @@ func (c *context) Switch(as platform.AddressSpace, ac arch.Context, _ int32) (*a\nfp := (*byte)(ac.FloatingPointData())\n// Grab a vCPU.\n- cpu, err := c.machine.Get()\n- if err != nil {\n- return nil, usermem.NoAccess, err\n- }\n+ cpu := c.machine.Get()\n// Enable interrupts (i.e. calls to vCPU.Notify).\nif !c.interrupt.Enable(cpu) {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/kvm_test.go",
"new_path": "pkg/sentry/platform/kvm/kvm_test.go",
"diff": "@@ -59,10 +59,7 @@ func kvmTest(t testHarness, setup func(*KVM), fn func(*vCPU) bool) {\n}\n}()\nfor {\n- c, err = k.machine.Get()\n- if err != nil {\n- t.Fatalf(\"error getting vCPU: %v\", err)\n- }\n+ c = k.machine.Get()\nif !fn(c) {\nbreak\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine.go",
"new_path": "pkg/sentry/platform/kvm/machine.go",
"diff": "@@ -48,6 +48,9 @@ type machine struct {\n// mu protects vCPUs.\nmu sync.Mutex\n+ // available is notified when vCPUs are available.\n+ available sync.Cond\n+\n// vCPUs are the machine vCPUs.\n//\n// This is eventually keyed by system TID, but is initially indexed by\n@@ -118,6 +121,7 @@ func newMachine(vm int, vCPUs int) (*machine, error) {\nfd: vm,\nvCPUs: make(map[uint64]*vCPU),\n}\n+ m.available.L = &m.mu\nif vCPUs > _KVM_NR_VCPUS {\n// Hard cap at KVM's limit.\nvCPUs = _KVM_NR_VCPUS\n@@ -284,25 +288,21 @@ func (m *machine) Destroy() {\n}\n// Get gets an available vCPU.\n-func (m *machine) Get() (*vCPU, error) {\n+func (m *machine) Get() *vCPU {\nruntime.LockOSThread()\ntid := procid.Current()\nm.mu.Lock()\n- for {\n// Check for an exact match.\nif c := m.vCPUs[tid]; c != nil {\nc.lock()\nm.mu.Unlock()\n- return c, nil\n+ return c\n}\n+ for {\n// Scan for an available vCPU.\nfor origTID, c := range m.vCPUs {\n- // We can only steal a vCPU that is the vCPUReady\n- // state. That is, it must not be heading to user mode\n- // with some other thread, have a waiter registered, or\n- // be in guest mode already.\nif atomic.CompareAndSwapUint32(&c.state, vCPUReady, vCPUUser) {\ndelete(m.vCPUs, origTID)\nm.vCPUs[tid] = c\n@@ -313,24 +313,44 @@ func (m *machine) Get() (*vCPU, error) {\n// may be stale.\nc.loadSegments()\natomic.StoreUint64(&c.tid, tid)\n- return c, nil\n+ return c\n}\n}\n- // Everything is already in guest mode.\n- //\n- // We hold the pool lock here, so we should be able to kick\n- // something out of kernel mode and have it bounce into host\n- // mode when it tries to grab the vCPU again.\n- for _, c := range m.vCPUs {\n- c.BounceToHost()\n+ // Scan for something not in user mode.\n+ for origTID, c := range m.vCPUs {\n+ if !atomic.CompareAndSwapUint32(&c.state, vCPUGuest, vCPUGuest|vCPUWaiter) {\n+ continue\n+ }\n+\n+ // The vCPU is not be able to transition to\n+ // vCPUGuest|vCPUUser or to vCPUUser because that\n+ // transition requires holding the machine mutex, as we\n+ // do now. There is no path to register a waiter on\n+ // just the vCPUReady state.\n+ for {\n+ c.waitUntilNot(vCPUGuest | vCPUWaiter)\n+ if atomic.CompareAndSwapUint32(&c.state, vCPUReady, vCPUUser) {\n+ break\n+ }\n+ }\n+\n+ // Steal the vCPU.\n+ delete(m.vCPUs, origTID)\n+ m.vCPUs[tid] = c\n+ m.mu.Unlock()\n+\n+ // See above.\n+ c.loadSegments()\n+ atomic.StoreUint64(&c.tid, tid)\n+ return c\n}\n- // Give other threads an opportunity to run. We don't yield the\n- // pool lock above, so if they try to regrab the lock we will\n- // serialize at this point. This is extreme, but we don't\n- // expect to exhaust all vCPUs frequently.\n- yield()\n+ // Everything is executing in user mode. Wait until something\n+ // is available. Note that signaling the condition variable\n+ // will have the extra effect of kicking the vCPUs out of guest\n+ // mode if that's where they were.\n+ m.available.Wait()\n}\n}\n@@ -338,6 +358,7 @@ func (m *machine) Get() (*vCPU, error) {\nfunc (m *machine) Put(c *vCPU) {\nc.unlock()\nruntime.UnlockOSThread()\n+ m.available.Signal()\n}\n// lock marks the vCPU as in user mode.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Dramatically improve handling of KVM vCPU pool.
Especially in situations with small numbers of vCPUs, the existing
system resulted in excessive thrashing. Now, execution contexts
co-ordinate as smoothly as they can to share a small number of cores.
PiperOrigin-RevId: 197483323
Change-Id: I0afc0c5363ea9386994355baf3904bf5fe08c56c |
259,992 | 21.05.2018 17:47:13 | 25,200 | ed2b86a54942dfd245e9f872e6da52d9bde0da6d | Fix test failure when user can't mount temp dir | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -434,6 +434,17 @@ func TestCapabilities(t *testing.T) {\nType: \"bind\",\n})\n+ // Capability below is needed to mount TempDir above in case the user doesn't\n+ // have access to all parents that lead to TempDir.\n+ caps := []string{\"CAP_DAC_OVERRIDE\"}\n+ spec.Process.Capabilities = &specs.LinuxCapabilities{\n+ Bounding: caps,\n+ Effective: caps,\n+ Inheritable: caps,\n+ Permitted: caps,\n+ Ambient: caps,\n+ }\n+\nrootDir, bundleDir, conf, err := setupContainer(spec)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix test failure when user can't mount temp dir
PiperOrigin-RevId: 197491098
Change-Id: Ifb75bd4e4f41b84256b6d7afc4b157f6ce3839f3 |
259,854 | 22.05.2018 11:53:42 | 25,200 | 3a6070dc9882d43b00bd66b23492daa422435c7c | Clarify that syserr.New must only be called during init | [
{
"change_type": "MODIFY",
"old_path": "pkg/syserr/syserr.go",
"new_path": "pkg/syserr/syserr.go",
"diff": "@@ -31,6 +31,8 @@ type Error struct {\n}\n// New creates a new Error and adds a translation for it.\n+//\n+// New must only be called at init.\nfunc New(message string, linuxTranslation *linux.Errno) *Error {\nerr := &Error{message}\nlinuxABITranslations[err] = linuxTranslation\n"
}
] | Go | Apache License 2.0 | google/gvisor | Clarify that syserr.New must only be called during init
PiperOrigin-RevId: 197599402
Change-Id: I23eb0336195ab0d3e5fb49c0c57fc9e0715a9b75 |
259,891 | 22.05.2018 13:46:37 | 25,200 | 705605f9011cfbd58f407ca84bc4c2d8cf39d80b | sentry: Add simple SIOCGIFFLAGS support (IFF_RUNNING and IFF_PROMIS).
Establishes a way of communicating interface flags between netstack and
epsocket. More flags can be added over time. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/epsocket/epsocket.go",
"new_path": "pkg/sentry/socket/epsocket/epsocket.go",
"diff": "@@ -48,12 +48,15 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/syserror\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/buffer\"\n+ nstack \"gvisor.googlesource.com/gvisor/pkg/tcpip/stack\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix\"\n\"gvisor.googlesource.com/gvisor/pkg/waiter\"\n)\nconst sizeOfInt32 int = 4\n+var errStackType = syserr.New(\"expected but did not receive an epsocket.Stack\", linux.EINVAL)\n+\n// ntohs converts a 16-bit number from network byte order to host byte order. It\n// assumes that the host is little endian.\nfunc ntohs(v uint16) uint16 {\n@@ -1177,9 +1180,11 @@ func interfaceIoctl(ctx context.Context, io usermem.IO, arg int, ifr *linux.IFRe\nusermem.ByteOrder.PutUint16(ifr.Data[:2], uint16(n))\ncase syscall.SIOCGIFFLAGS:\n- // TODO: Implement. For now, return only that the\n- // device is up so that ifconfig prints it.\n- usermem.ByteOrder.PutUint16(ifr.Data[:2], linux.IFF_UP)\n+ f, err := interfaceStatusFlags(stack, iface.Name)\n+ if err != nil {\n+ return err\n+ }\n+ usermem.ByteOrder.PutUint16(ifr.Data[:2], f)\ncase syscall.SIOCGIFADDR:\n// Copy the IPv4 address out.\n@@ -1288,3 +1293,49 @@ func ifconfIoctl(ctx context.Context, io usermem.IO, ifc *linux.IFConf) error {\n}\nreturn nil\n}\n+\n+// interfaceStatusFlags returns status flags for an interface in the stack.\n+// Flag values and meanings are described in greater detail in netdevice(7) in\n+// the SIOCGIFFLAGS section.\n+func interfaceStatusFlags(stack inet.Stack, name string) (uint16, *syserr.Error) {\n+ // epsocket should only ever be passed an epsocket.Stack.\n+ epstack, ok := stack.(*Stack)\n+ if !ok {\n+ return 0, errStackType\n+ }\n+\n+ // Find the NIC corresponding to this interface.\n+ var (\n+ nicid tcpip.NICID\n+ info nstack.NICInfo\n+ found bool\n+ )\n+ ns := epstack.Stack\n+ for nicid, info = range ns.NICInfo() {\n+ if info.Name == name {\n+ found = true\n+ break\n+ }\n+ }\n+ if !found {\n+ return 0, syserr.ErrNoDevice\n+ }\n+\n+ // Set flags based on NIC state.\n+ nicFlags, err := ns.NICFlags(nicid)\n+ if err != nil {\n+ return 0, syserr.TranslateNetstackError(err)\n+ }\n+\n+ var retFlags uint16\n+ if nicFlags.Up {\n+ retFlags |= linux.IFF_UP\n+ }\n+ if nicFlags.Running {\n+ retFlags |= linux.IFF_RUNNING\n+ }\n+ if nicFlags.Promiscuous {\n+ retFlags |= linux.IFF_PROMISC\n+ }\n+ return retFlags, nil\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/channel/channel.go",
"new_path": "pkg/tcpip/link/channel/channel.go",
"diff": "@@ -67,6 +67,11 @@ func (e *Endpoint) Attach(dispatcher stack.NetworkDispatcher) {\ne.dispatcher = dispatcher\n}\n+// IsAttached implements stack.LinkEndpoint.IsAttached.\n+func (e *Endpoint) IsAttached() bool {\n+ return e.dispatcher != nil\n+}\n+\n// MTU implements stack.LinkEndpoint.MTU. It returns the value initialized\n// during construction.\nfunc (e *Endpoint) MTU() uint32 {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/fdbased/endpoint.go",
"new_path": "pkg/tcpip/link/fdbased/endpoint.go",
"diff": "@@ -48,6 +48,7 @@ type endpoint struct {\nvv *buffer.VectorisedView\niovecs []syscall.Iovec\nviews []buffer.View\n+ attached bool\n}\n// Options specify the details about the fd-based endpoint to be created.\n@@ -96,9 +97,15 @@ func New(opts *Options) tcpip.LinkEndpointID {\n// Attach launches the goroutine that reads packets from the file descriptor and\n// dispatches them via the provided dispatcher.\nfunc (e *endpoint) Attach(dispatcher stack.NetworkDispatcher) {\n+ e.attached = true\ngo e.dispatchLoop(dispatcher) // S/R-FIXME\n}\n+// IsAttached implements stack.LinkEndpoint.IsAttached.\n+func (e *endpoint) IsAttached() bool {\n+ return e.attached\n+}\n+\n// MTU implements stack.LinkEndpoint.MTU. It returns the value initialized\n// during construction.\nfunc (e *endpoint) MTU() uint32 {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/loopback/loopback.go",
"new_path": "pkg/tcpip/link/loopback/loopback.go",
"diff": "@@ -32,6 +32,11 @@ func (e *endpoint) Attach(dispatcher stack.NetworkDispatcher) {\ne.dispatcher = dispatcher\n}\n+// IsAttached implements stack.LinkEndpoint.IsAttached.\n+func (e *endpoint) IsAttached() bool {\n+ return e.dispatcher != nil\n+}\n+\n// MTU implements stack.LinkEndpoint.MTU. It returns a constant that matches the\n// linux loopback interface.\nfunc (*endpoint) MTU() uint32 {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem.go",
"diff": "@@ -137,6 +137,13 @@ func (e *endpoint) Attach(dispatcher stack.NetworkDispatcher) {\ne.mu.Unlock()\n}\n+// IsAttached implements stack.LinkEndpoint.IsAttached.\n+func (e *endpoint) IsAttached() bool {\n+ e.mu.Lock()\n+ defer e.mu.Unlock()\n+ return e.workerStarted\n+}\n+\n// MTU implements stack.LinkEndpoint.MTU. It returns the value initialized\n// during construction.\nfunc (e *endpoint) MTU() uint32 {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sniffer/sniffer.go",
"new_path": "pkg/tcpip/link/sniffer/sniffer.go",
"diff": "@@ -143,6 +143,11 @@ func (e *endpoint) Attach(dispatcher stack.NetworkDispatcher) {\ne.lower.Attach(e)\n}\n+// IsAttached implements stack.LinkEndpoint.IsAttached.\n+func (e *endpoint) IsAttached() bool {\n+ return e.dispatcher != nil\n+}\n+\n// MTU implements stack.LinkEndpoint.MTU. It just forwards the request to the\n// lower endpoint.\nfunc (e *endpoint) MTU() uint32 {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/waitable/waitable.go",
"new_path": "pkg/tcpip/link/waitable/waitable.go",
"diff": "@@ -58,6 +58,11 @@ func (e *Endpoint) Attach(dispatcher stack.NetworkDispatcher) {\ne.lower.Attach(e)\n}\n+// IsAttached implements stack.LinkEndpoint.IsAttached.\n+func (e *Endpoint) IsAttached() bool {\n+ return e.dispatcher != nil\n+}\n+\n// MTU implements stack.LinkEndpoint.MTU. It just forwards the request to the\n// lower endpoint.\nfunc (e *Endpoint) MTU() uint32 {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/waitable/waitable_test.go",
"new_path": "pkg/tcpip/link/waitable/waitable_test.go",
"diff": "@@ -34,6 +34,11 @@ func (e *countedEndpoint) Attach(dispatcher stack.NetworkDispatcher) {\ne.dispatcher = dispatcher\n}\n+// IsAttached implements stack.LinkEndpoint.IsAttached.\n+func (e *countedEndpoint) IsAttached() bool {\n+ return e.dispatcher != nil\n+}\n+\nfunc (e *countedEndpoint) MTU() uint32 {\nreturn e.mtu\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ip_test.go",
"new_path": "pkg/tcpip/network/ip_test.go",
"diff": "@@ -90,6 +90,11 @@ func (t *testObject) DeliverTransportControlPacket(local, remote tcpip.Address,\n// Attach is only implemented to satisfy the LinkEndpoint interface.\nfunc (*testObject) Attach(stack.NetworkDispatcher) {}\n+// IsAttached implements stack.LinkEndpoint.IsAttached.\n+func (*testObject) IsAttached() bool {\n+ return true\n+}\n+\n// MTU implements stack.LinkEndpoint.MTU. It just returns a constant that\n// matches the linux loopback MTU.\nfunc (*testObject) MTU() uint32 {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/registration.go",
"new_path": "pkg/tcpip/stack/registration.go",
"diff": "@@ -224,6 +224,10 @@ type LinkEndpoint interface {\n// Attach attaches the data link layer endpoint to the network-layer\n// dispatcher of the stack.\nAttach(dispatcher NetworkDispatcher)\n+\n+ // IsAttached returns whether a NetworkDispatcher is attached to the\n+ // endpoint.\n+ IsAttached() bool\n}\n// A LinkAddressResolver is an extension to a NetworkProtocol that\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack.go",
"new_path": "pkg/tcpip/stack/stack.go",
"diff": "@@ -541,6 +541,39 @@ func (s *Stack) NICInfo() map[tcpip.NICID]NICInfo {\nreturn nics\n}\n+// NICStateFlags holds information about the state of an NIC.\n+type NICStateFlags struct {\n+ // Up indicates whether the interface is running.\n+ Up bool\n+\n+ // Running indicates whether resources are allocated.\n+ Running bool\n+\n+ // Promiscuous indicates whether the interface is in promiscuous mode.\n+ Promiscuous bool\n+}\n+\n+// NICFlags returns flags about the state of the NIC. It returns an error if\n+// the NIC corresponding to id cannot be found.\n+func (s *Stack) NICFlags(id tcpip.NICID) (NICStateFlags, *tcpip.Error) {\n+ s.mu.RLock()\n+ defer s.mu.RUnlock()\n+\n+ nic := s.nics[id]\n+ if nic == nil {\n+ return NICStateFlags{}, tcpip.ErrUnknownNICID\n+ }\n+\n+ ret := NICStateFlags{\n+ // Netstack interfaces are always up.\n+ Up: true,\n+\n+ Running: nic.linkEP.IsAttached(),\n+ Promiscuous: nic.promiscuous,\n+ }\n+ return ret, nil\n+}\n+\n// AddAddress adds a new network-layer address to the specified NIC.\nfunc (s *Stack) AddAddress(id tcpip.NICID, protocol tcpip.NetworkProtocolNumber, addr tcpip.Address) *tcpip.Error {\ns.mu.RLock()\n"
}
] | Go | Apache License 2.0 | google/gvisor | sentry: Add simple SIOCGIFFLAGS support (IFF_RUNNING and IFF_PROMIS).
Establishes a way of communicating interface flags between netstack and
epsocket. More flags can be added over time.
PiperOrigin-RevId: 197616669
Change-Id: I230448c5fb5b7d2e8d69b41a451eb4e1096a0e30 |
260,013 | 22.05.2018 15:51:55 | 25,200 | 257ab8de93312295d475638498c57e4de77a4b02 | When sending a RST the acceptable ACK window shouldn't change.
Today when we transmit a RST it's happening during the time-wait
flow. Because a FIN is allowed to advance the acceptable ACK window
we're incorrectly doing that for a RST. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/snd.go",
"new_path": "pkg/tcpip/transport/tcp/snd.go",
"diff": "@@ -342,15 +342,17 @@ func (s *sender) sendData() {\ns.ep.mu.Lock()\n// We're sending a FIN by default\nfl := flagFin\n+ segEnd = seg.sequenceNumber\nif (s.ep.shutdownFlags&tcpip.ShutdownRead) != 0 && rcvBufUsed > 0 {\n// If there is unread data we must send a RST.\n// For more information see RFC 2525 section 2.17.\nfl = flagRst\n+ } else {\n+ segEnd = seg.sequenceNumber.Add(1)\n}\n+\ns.ep.mu.Unlock()\nseg.flags |= uint8(fl)\n-\n- segEnd = seg.sequenceNumber.Add(1)\n} else {\n// We're sending a non-FIN segment.\nif !seg.sequenceNumber.LessThan(end) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | When sending a RST the acceptable ACK window shouldn't change.
Today when we transmit a RST it's happening during the time-wait
flow. Because a FIN is allowed to advance the acceptable ACK window
we're incorrectly doing that for a RST.
PiperOrigin-RevId: 197637565
Change-Id: I080190b06bd0225326cd68c1fbf37bd3fdbd414e |
259,992 | 22.05.2018 16:35:58 | 25,200 | 51c95c270be3e0c3867c1bc93cc454b32b276721 | Remove offset check to match with Linux implementation. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_file.go",
"new_path": "pkg/sentry/syscalls/linux/sys_file.go",
"diff": "@@ -870,11 +870,11 @@ const (\n// This implementation currently ignores the provided advice.\nfunc Fadvise64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\nfd := kdefs.FD(args[0].Int())\n- offset := args[1].Int64()\nlength := args[2].Int64()\nadvice := args[3].Int()\n- if offset < 0 || length < 0 {\n+ // Note: offset is allowed to be negative.\n+ if length < 0 {\nreturn 0, nil, syserror.EINVAL\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove offset check to match with Linux implementation.
PiperOrigin-RevId: 197644246
Change-Id: I63eb0a58889e69fbc4af2af8232f6fa1c399d43f |
259,854 | 23.05.2018 14:27:52 | 25,200 | 02ad0dc3d9909fb93dd41d7f8976be4fc54a99d5 | Fix typo in TCP transport | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/connect.go",
"new_path": "pkg/tcpip/transport/tcp/connect.go",
"diff": "@@ -818,7 +818,7 @@ func (e *endpoint) protocolMainLoop(passive bool) *tcpip.Error {\nvar closeWaker sleep.Waker\ndefer func() {\n- // e.mu is expected to be hold upon entering this section.\n+ // e.mu is expected to be held upon entering this section.\ne.waiterQueue.Notify(waiter.EventIn | waiter.EventOut)\ne.completeWorkerLocked()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix typo in TCP transport
PiperOrigin-RevId: 197789418
Change-Id: I86b1574c8d3b8b321348d9b101ffaef7aa15f722 |
260,013 | 23.05.2018 15:00:59 | 25,200 | 7996ae7ccf284718fc98f5ba34c94b044b858ec2 | Adding test case for RST acceptable ack panic | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/tcp_test.go",
"new_path": "pkg/tcpip/transport/tcp/tcp_test.go",
"diff": "@@ -400,7 +400,20 @@ func TestRstOnCloseWithUnreadData(t *testing.T) {\nchecker.TCP(\nchecker.DstPort(context.TestPort),\nchecker.TCPFlags(header.TCPFlagAck|header.TCPFlagRst),\n+ // We shouldn't consume a sequence number on RST.\n+ checker.SeqNum(uint32(c.IRS)+1),\n))\n+\n+ // This final should be ignored because an ACK on a reset doesn't\n+ // mean anything.\n+ c.SendPacket(nil, &context.Headers{\n+ SrcPort: context.TestPort,\n+ DstPort: c.Port,\n+ Flags: header.TCPFlagAck,\n+ SeqNum: seqnum.Value(790 + len(data)),\n+ AckNum: c.IRS.Add(seqnum.Size(2)),\n+ RcvWnd: 30000,\n+ })\n}\nfunc TestFullWindowReceive(t *testing.T) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Adding test case for RST acceptable ack panic
PiperOrigin-RevId: 197795613
Change-Id: I759dd04995d900cba6b984649fa48bbc880946d6 |
259,992 | 24.05.2018 14:27:05 | 25,200 | e48f7078761b00552ac74068c184ee4fb90fe9aa | Configure sandbox as superuser
Container user might not have enough priviledge to walk directories and
mount filesystems. Instead, create superuser to perform these steps of
the configuration. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/fs.go",
"new_path": "runsc/boot/fs.go",
"diff": "@@ -51,21 +51,30 @@ func (f *fdDispenser) empty() bool {\nreturn len(f.fds) == 0\n}\n-// createMountNamespace creates a mount manager containing the root filesystem\n-// and all mounts.\n-func createMountNamespace(ctx context.Context, spec *specs.Spec, conf *Config, ioFDs []int) (*fs.MountNamespace, error) {\n+// createMountNamespace creates a mount namespace containing the root filesystem\n+// and all mounts. 'rootCtx' is used to walk directories to find mount points.\n+func createMountNamespace(userCtx context.Context, rootCtx context.Context, spec *specs.Spec, conf *Config, ioFDs []int) (*fs.MountNamespace, error) {\nfds := &fdDispenser{fds: ioFDs}\n-\n- // Create the MountNamespace from the root.\n- rootInode, err := createRootMount(ctx, spec, conf, fds)\n+ rootInode, err := createRootMount(rootCtx, spec, conf, fds)\nif err != nil {\n- return nil, fmt.Errorf(\"failed to create root overlay: %v\", err)\n+ return nil, fmt.Errorf(\"failed to create root mount: %v\", err)\n}\n- mns, err := fs.NewMountNamespace(ctx, rootInode)\n+ mns, err := fs.NewMountNamespace(userCtx, rootInode)\nif err != nil {\n- return nil, fmt.Errorf(\"failed to construct MountNamespace: %v\", err)\n+ return nil, fmt.Errorf(\"failed to create root mount namespace: %v\", err)\n+ }\n+ if err := configureMounts(rootCtx, spec, conf, mns, fds); err != nil {\n+ return nil, fmt.Errorf(\"failed to configure mounts: %v\", err)\n+ }\n+ if !fds.empty() {\n+ return nil, fmt.Errorf(\"not all mount points were consumed, remaining: %v\", fds)\n+ }\n+ return mns, nil\n}\n+// configureMounts iterates over Spec.Mounts and mounts them in the specified\n+// mount namespace.\n+func configureMounts(ctx context.Context, spec *specs.Spec, conf *Config, mns *fs.MountNamespace, fds *fdDispenser) error {\n// Keep track of whether proc, sys, and tmp were mounted.\nvar procMounted, sysMounted, tmpMounted bool\n@@ -88,7 +97,7 @@ func createMountNamespace(ctx context.Context, spec *specs.Spec, conf *Config, i\n}\nif err := mountSubmount(ctx, spec, conf, mns, fds, m); err != nil {\n- return nil, err\n+ return err\n}\n}\n@@ -97,7 +106,7 @@ func createMountNamespace(ctx context.Context, spec *specs.Spec, conf *Config, i\nType: \"devtmpfs\",\nDestination: \"/dev\",\n}); err != nil {\n- return nil, err\n+ return err\n}\n// Mount proc and sys even if the user did not ask for it, as the spec\n@@ -107,7 +116,7 @@ func createMountNamespace(ctx context.Context, spec *specs.Spec, conf *Config, i\nType: \"proc\",\nDestination: \"/proc\",\n}); err != nil {\n- return nil, err\n+ return err\n}\n}\nif !sysMounted {\n@@ -115,7 +124,7 @@ func createMountNamespace(ctx context.Context, spec *specs.Spec, conf *Config, i\nType: \"sysfs\",\nDestination: \"/sys\",\n}); err != nil {\n- return nil, err\n+ return err\n}\n}\n@@ -127,15 +136,11 @@ func createMountNamespace(ctx context.Context, spec *specs.Spec, conf *Config, i\nType: \"tmpfs\",\nDestination: \"/tmp\",\n}); err != nil {\n- return nil, err\n+ return err\n}\n}\n- if !fds.empty() {\n- return nil, fmt.Errorf(\"not all mount points were consumed, remaining: %v\", fds)\n- }\n-\n- return mns, nil\n+ return nil\n}\n// createRootMount creates the root filesystem.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -137,9 +137,6 @@ func New(spec *specs.Spec, conf *Config, controllerFD int, ioFDs []int, console\nextraKGIDs,\ncaps,\nauth.NewRootUserNamespace())\n- if err != nil {\n- return nil, fmt.Errorf(\"error creating credentials: %v\", err)\n- }\n// Create user namespace.\n// TODO: Not clear what domain name should be here. It is\n@@ -159,22 +156,6 @@ func New(spec *specs.Spec, conf *Config, controllerFD int, ioFDs []int, console\nreturn nil, fmt.Errorf(\"error getting executable path: %v\", err)\n}\n- // Create the process arguments.\n- procArgs := kernel.CreateProcessArgs{\n- Filename: exec,\n- Argv: spec.Process.Args,\n- Envv: spec.Process.Env,\n- WorkingDirectory: spec.Process.Cwd,\n- Credentials: creds,\n- // Creating the FDMap requires that we have kernel.Kernel.fdMapUids, so\n- // it must wait until we have a Kernel.\n- Umask: uint(syscall.Umask(0)),\n- Limits: ls,\n- MaxSymlinkTraversals: linux.MaxSymlinkTraversals,\n- UTSNamespace: utsns,\n- IPCNamespace: ipcns,\n- }\n-\n// Create an empty network stack because the network namespace may be empty at\n// this point. Netns is configured before Run() is called. Netstack is\n// configured using a control uRPC message. Host network is configured inside\n@@ -219,14 +200,39 @@ func New(spec *specs.Spec, conf *Config, controllerFD int, ioFDs []int, console\nreturn nil, fmt.Errorf(\"error creating control server: %v\", err)\n}\n+ // Create the process arguments.\n+ procArgs := kernel.CreateProcessArgs{\n+ Filename: exec,\n+ Argv: spec.Process.Args,\n+ Envv: spec.Process.Env,\n+ WorkingDirectory: spec.Process.Cwd,\n+ Credentials: creds,\n+ // Creating the FDMap requires that we have kernel.Kernel.fdMapUids, so\n+ // it must wait until we have a Kernel.\n+ Umask: uint(syscall.Umask(0)),\n+ Limits: ls,\n+ MaxSymlinkTraversals: linux.MaxSymlinkTraversals,\n+ UTSNamespace: utsns,\n+ IPCNamespace: ipcns,\n+ }\nctx := procArgs.NewContext(k)\n+ // Use root user to configure mounts. The current user might not have\n+ // permission to do so.\n+ rootProcArgs := kernel.CreateProcessArgs{\n+ WorkingDirectory: \"/\",\n+ Credentials: auth.NewRootCredentials(creds.UserNamespace),\n+ Umask: uint(syscall.Umask(0022)),\n+ MaxSymlinkTraversals: linux.MaxSymlinkTraversals,\n+ }\n+ rootCtx := rootProcArgs.NewContext(k)\n+\n// Create the virtual filesystem.\n- mm, err := createMountNamespace(ctx, spec, conf, ioFDs)\n+ mns, err := createMountNamespace(ctx, rootCtx, spec, conf, ioFDs)\nif err != nil {\nreturn nil, fmt.Errorf(\"error creating mounts: %v\", err)\n}\n- k.SetRootMountNamespace(mm)\n+ k.SetRootMountNamespace(mns)\n// Create the FD map, which will set stdin, stdout, and stderr. If console\n// is true, then ioctl calls will be passed through to the host fd.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader_test.go",
"new_path": "runsc/boot/loader_test.go",
"diff": "@@ -239,7 +239,7 @@ func TestCreateMountNamespace(t *testing.T) {\nfor _, tc := range testCases {\nctx := contexttest.Context(t)\n- mm, err := createMountNamespace(ctx, &tc.spec, conf, nil)\n+ mm, err := createMountNamespace(ctx, ctx, &tc.spec, conf, nil)\nif err != nil {\nt.Fatalf(\"createMountNamespace test case %q failed: %v\", tc.name, err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -20,6 +20,7 @@ import (\n\"io\"\n\"io/ioutil\"\n\"os\"\n+ \"path\"\n\"path/filepath\"\n\"reflect\"\n\"strings\"\n@@ -132,6 +133,34 @@ func waitForProcessList(s *container.Container, expected []*control.Process) err\nreturn fmt.Errorf(\"container got process list: %s, want: %s\", procListToString(got), procListToString(expected))\n}\n+// procListsEqual is used to check whether 2 Process lists are equal for all\n+// implemented fields.\n+func procListsEqual(got, want []*control.Process) bool {\n+ if len(got) != len(want) {\n+ return false\n+ }\n+ for i := range got {\n+ pd1 := got[i]\n+ pd2 := want[i]\n+ // Zero out unimplemented and timing dependant fields.\n+ pd1.Time, pd2.Time = \"\", \"\"\n+ pd1.STime, pd2.STime = \"\", \"\"\n+ pd1.C, pd2.C = 0, 0\n+ if *pd1 != *pd2 {\n+ return false\n+ }\n+ }\n+ return true\n+}\n+\n+func procListToString(pl []*control.Process) string {\n+ strs := make([]string, 0, len(pl))\n+ for _, p := range pl {\n+ strs = append(strs, fmt.Sprintf(\"%+v\", p))\n+ }\n+ return fmt.Sprintf(\"[%s]\", strings.Join(strs, \",\"))\n+}\n+\n// TestLifecycle tests the basic Create/Start/Signal/Destroy container lifecycle.\n// It verifies after each step that the container can be loaded from disk, and\n// has the correct status.\n@@ -434,17 +463,6 @@ func TestCapabilities(t *testing.T) {\nType: \"bind\",\n})\n- // Capability below is needed to mount TempDir above in case the user doesn't\n- // have access to all parents that lead to TempDir.\n- caps := []string{\"CAP_DAC_OVERRIDE\"}\n- spec.Process.Capabilities = &specs.LinuxCapabilities{\n- Bounding: caps,\n- Effective: caps,\n- Inheritable: caps,\n- Permitted: caps,\n- Ambient: caps,\n- }\n-\nrootDir, bundleDir, conf, err := setupContainer(spec)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n@@ -621,32 +639,54 @@ func TestSpecUnsupported(t *testing.T) {\n}\n}\n-// procListsEqual is used to check whether 2 Process lists are equal for all\n-// implemented fields.\n-func procListsEqual(got, want []*control.Process) bool {\n- if len(got) != len(want) {\n- return false\n- }\n- for i := range got {\n- pd1 := got[i]\n- pd2 := want[i]\n- // Zero out unimplemented and timing dependant fields.\n- pd1.Time, pd2.Time = \"\", \"\"\n- pd1.STime, pd2.STime = \"\", \"\"\n- pd1.C, pd2.C = 0, 0\n- if *pd1 != *pd2 {\n- return false\n+// TestRunNonRoot checks that sandbox can be configured when running as\n+// non-priviledged user.\n+func TestRunNonRoot(t *testing.T) {\n+ spec := newSpecWithArgs(\"/bin/true\")\n+ spec.Process.User.UID = 343\n+ spec.Process.User.GID = 2401\n+\n+ // User that container runs as can't list '$TMP/blocked' and would fail to\n+ // mount it.\n+ dir := path.Join(os.TempDir(), \"blocked\")\n+ if err := os.Mkdir(dir, 0700); err != nil {\n+ t.Fatalf(\"os.MkDir(%q) failed: %v\", dir, err)\n}\n+ dir = path.Join(dir, \"test\")\n+ if err := os.Mkdir(dir, 0755); err != nil {\n+ t.Fatalf(\"os.MkDir(%q) failed: %v\", dir, err)\n}\n- return true\n+\n+ // We generate files in the host temporary directory.\n+ spec.Mounts = append(spec.Mounts, specs.Mount{\n+ Destination: dir,\n+ Source: dir,\n+ Type: \"bind\",\n+ })\n+\n+ rootDir, bundleDir, conf, err := setupContainer(spec)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n}\n+ defer os.RemoveAll(rootDir)\n+ defer os.RemoveAll(bundleDir)\n-func procListToString(pl []*control.Process) string {\n- strs := make([]string, 0, len(pl))\n- for _, p := range pl {\n- strs = append(strs, fmt.Sprintf(\"%+v\", p))\n+ // Create, start and wait for the container.\n+ s, err := container.Create(uniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer s.Destroy()\n+ if err := s.Start(conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n+ }\n+ ws, err := s.Wait()\n+ if err != nil {\n+ t.Errorf(\"error waiting on container: %v\", err)\n+ }\n+ if !ws.Exited() || ws.ExitStatus() != 0 {\n+ t.Errorf(\"container failed, waitStatus: %v\", ws)\n}\n- return fmt.Sprintf(\"[%s]\", strings.Join(strs, \",\"))\n}\n// TestMain acts like runsc if it is called with the \"boot\" argument, otherwise\n"
}
] | Go | Apache License 2.0 | google/gvisor | Configure sandbox as superuser
Container user might not have enough priviledge to walk directories and
mount filesystems. Instead, create superuser to perform these steps of
the configuration.
PiperOrigin-RevId: 197953667
Change-Id: I643650ab654e665408e2af1b8e2f2aa12d58d4fb |
260,013 | 24.05.2018 15:17:42 | 25,200 | 7f62e9c32ea6af19ccd92107252fd869e6ef1005 | rpcinet connect doesn't handle all errnos correctly.
These were causing non-blocking related errnos to be returned to
the sentry when they were created as blocking FDs internally. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/rpcinet/socket.go",
"new_path": "pkg/sentry/socket/rpcinet/socket.go",
"diff": "@@ -213,10 +213,10 @@ func (s *socketOperations) Connect(t *kernel.Task, sockaddr []byte, blocking boo\n// Register for notification when the endpoint becomes writable, then\n// initiate the connection.\ne, ch := waiter.NewChannelEntry(nil)\n- s.EventRegister(&e, waiter.EventOut)\n+ s.EventRegister(&e, waiter.EventOut|waiter.EventIn|waiter.EventHUp)\ndefer s.EventUnregister(&e)\n-\n- if err := rpcConnect(t, s.fd, sockaddr); err != syserr.ErrConnectStarted && err != syserr.ErrAlreadyConnecting {\n+ for {\n+ if err := rpcConnect(t, s.fd, sockaddr); err == nil || err != syserr.ErrInProgress && err != syserr.ErrAlreadyInProgress {\nreturn err\n}\n@@ -225,9 +225,7 @@ func (s *socketOperations) Connect(t *kernel.Task, sockaddr []byte, blocking boo\nif err := t.Block(ch); err != nil {\nreturn syserr.FromError(err)\n}\n-\n- // Call Connect() again after blocking to find connect's result.\n- return rpcConnect(t, s.fd, sockaddr)\n+ }\n}\nfunc rpcAccept(t *kernel.Task, fd uint32, peer bool) (*pb.AcceptResponse_ResultPayload, *syserr.Error) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | rpcinet connect doesn't handle all errnos correctly.
These were causing non-blocking related errnos to be returned to
the sentry when they were created as blocking FDs internally.
PiperOrigin-RevId: 197962932
Change-Id: I3f843535ff87ebf4cb5827e9f3d26abfb79461b0 |
260,013 | 24.05.2018 15:45:55 | 25,200 | a8b90a7158d4197428639c912d97f3bdbaf63f5a | Poll should wake up on ECONNREFUSED with no mask.
Today poll will not wake up on a ECONNREFUSED if no poll mask
is specified, which is equivalent to POLLHUP | POLLERR which are
implicitly added during the poll syscall. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/connect.go",
"new_path": "pkg/tcpip/transport/tcp/connect.go",
"diff": "@@ -819,7 +819,8 @@ func (e *endpoint) protocolMainLoop(passive bool) *tcpip.Error {\ndefer func() {\n// e.mu is expected to be held upon entering this section.\n- e.waiterQueue.Notify(waiter.EventIn | waiter.EventOut)\n+ // When the protocol loop exits we should wake up our waiters.\n+ e.waiterQueue.Notify(waiter.EventHUp | waiter.EventErr | waiter.EventIn | waiter.EventOut)\ne.completeWorkerLocked()\nif e.snd != nil {\n@@ -880,9 +881,6 @@ func (e *endpoint) protocolMainLoop(passive bool) *tcpip.Error {\ne.waiterQueue.Notify(waiter.EventOut)\n- // When the protocol loop exits we should wake up our waiters with EventHUp.\n- defer e.waiterQueue.Notify(waiter.EventHUp)\n-\n// Set up the functions that will be called when the main protocol loop\n// wakes up.\nfuncs := []struct {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/waiter/waiter.go",
"new_path": "pkg/waiter/waiter.go",
"diff": "@@ -174,7 +174,7 @@ func (q *Queue) Notify(mask EventMask) {\nq.mu.RLock()\nfor it := q.list.Front(); it != nil; it = it.Next() {\ne := it.(*Entry)\n- if (mask & e.mask) != 0 {\n+ if mask&e.mask != 0 {\ne.Callback.Callback(e)\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Poll should wake up on ECONNREFUSED with no mask.
Today poll will not wake up on a ECONNREFUSED if no poll mask
is specified, which is equivalent to POLLHUP | POLLERR which are
implicitly added during the poll syscall.
PiperOrigin-RevId: 197967183
Change-Id: I668d0730c33701228913f2d0843b48491b642efb |
259,992 | 29.05.2018 17:57:26 | 25,200 | 812e83d3bbb99d4fa1ece4712a1ac85e84fe6ec3 | Supress error when deleting non-existing container with --force
This addresses the first issue reported in CRI-O expects runsc to
return success to delete when --force is used with a non-existing container. | [
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/BUILD",
"new_path": "runsc/cmd/BUILD",
"diff": "@@ -44,13 +44,17 @@ go_library(\ngo_test(\nname = \"cmd_test\",\nsize = \"small\",\n- srcs = [\"exec_test.go\"],\n+ srcs = [\n+ \"delete_test.go\",\n+ \"exec_test.go\",\n+ ],\nembed = [\":cmd\"],\ndeps = [\n\"//pkg/abi/linux\",\n\"//pkg/sentry/control\",\n\"//pkg/sentry/kernel/auth\",\n\"//pkg/urpc\",\n+ \"//runsc/boot\",\n\"@com_github_google_go-cmp//cmp:go_default_library\",\n\"@com_github_google_go-cmp//cmp/cmpopts:go_default_library\",\n\"@com_github_opencontainers_runtime-spec//specs-go:go_default_library\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/delete.go",
"new_path": "runsc/cmd/delete.go",
"diff": "package cmd\nimport (\n+ \"fmt\"\n+ \"os\"\n+\n\"context\"\n\"flag\"\n\"github.com/google/subcommands\"\n+ \"gvisor.googlesource.com/gvisor/pkg/log\"\n\"gvisor.googlesource.com/gvisor/runsc/boot\"\n\"gvisor.googlesource.com/gvisor/runsc/container\"\n)\n@@ -56,19 +60,28 @@ func (d *Delete) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}\n}\nconf := args[0].(*boot.Config)\n+ if err := d.execute(f.Args(), conf); err != nil {\n+ Fatalf(\"%v\", err)\n+ }\n+ return subcommands.ExitSuccess\n+}\n- for i := 0; i < f.NArg(); i++ {\n- id := f.Arg(i)\n+func (d *Delete) execute(ids []string, conf *boot.Config) error {\n+ for _, id := range ids {\nc, err := container.Load(conf.RootDir, id)\nif err != nil {\n- Fatalf(\"error loading container %q: %v\", id, err)\n+ if os.IsNotExist(err) && d.force {\n+ log.Warningf(\"couldn't find container %q: %v\", id, err)\n+ return nil\n+ }\n+ return fmt.Errorf(\"error loading container %q: %v\", id, err)\n}\nif !d.force && (c.Status == container.Running) {\n- Fatalf(\"cannot stop running container without --force flag\")\n+ return fmt.Errorf(\"cannot stop running container without --force flag\")\n}\nif err := c.Destroy(); err != nil {\n- Fatalf(\"error destroying container: %v\", err)\n+ return fmt.Errorf(\"error destroying container: %v\", err)\n}\n}\n- return subcommands.ExitSuccess\n+ return nil\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/cmd/delete_test.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package cmd\n+\n+import (\n+ \"io/ioutil\"\n+ \"testing\"\n+\n+ \"gvisor.googlesource.com/gvisor/runsc/boot\"\n+)\n+\n+func TestNotFound(t *testing.T) {\n+ ids := []string{\"123\"}\n+ dir, err := ioutil.TempDir(\"\", \"metadata\")\n+ if err != nil {\n+ t.Fatalf(\"error creating dir: %v\", err)\n+ }\n+ conf := &boot.Config{RootDir: dir}\n+\n+ d := Delete{}\n+ if err := d.execute(ids, conf); err == nil {\n+ t.Error(\"Deleting non-existend container should have failed\")\n+ }\n+\n+ d = Delete{force: true}\n+ if err := d.execute(ids, conf); err != nil {\n+ t.Errorf(\"Deleting non-existend container with --force should NOT have failed: %v\", err)\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -93,21 +93,19 @@ type Container struct {\n}\n// Load loads a container with the given id from a metadata file.\n+// Returns ErrNotExist if container doesn't exits.\nfunc Load(rootDir, id string) (*Container, error) {\nlog.Debugf(\"Load container %q %q\", rootDir, id)\nif err := validateID(id); err != nil {\nreturn nil, err\n}\n- cRoot := filepath.Join(rootDir, id)\n- if !exists(cRoot) {\n- return nil, fmt.Errorf(\"container with id %q does not exist\", id)\n- }\n- metaFile := filepath.Join(cRoot, metadataFilename)\n- if !exists(metaFile) {\n- return nil, fmt.Errorf(\"container with id %q does not have metadata file %q\", id, metaFile)\n- }\n+ metaFile := filepath.Join(rootDir, id, metadataFilename)\nmetaBytes, err := ioutil.ReadFile(metaFile)\nif err != nil {\n+ if os.IsNotExist(err) {\n+ // Preserve error so that callers can distinguish 'not found' errors.\n+ return nil, err\n+ }\nreturn nil, fmt.Errorf(\"error reading container metadata file %q: %v\", metaFile, err)\n}\nvar c Container\n@@ -161,8 +159,10 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo\n}\ncontainerRoot := filepath.Join(conf.RootDir, id)\n- if exists(containerRoot) {\n+ if _, err := os.Stat(containerRoot); err == nil {\nreturn nil, fmt.Errorf(\"container with id %q already exists: %q\", id, containerRoot)\n+ } else if !os.IsNotExist(err) {\n+ return nil, fmt.Errorf(\"error looking for existing container in %q: %v\", containerRoot, err)\n}\nc := &Container{\n@@ -328,11 +328,6 @@ func (c *Container) Destroy() error {\nreturn err\n}\n- // Then destroy all the metadata.\n- if err := os.RemoveAll(c.Root); err != nil {\n- log.Warningf(\"Failed to delete container root directory %q, err: %v\", c.Root, err)\n- }\n-\n// \"If any poststop hook fails, the runtime MUST log a warning, but the\n// remaining hooks and lifecycle continue as if the hook had succeeded\".\nif c.Spec.Hooks != nil && (c.Status == Created || c.Status == Running) {\n@@ -372,13 +367,3 @@ func (c *Container) save() error {\n}\nreturn nil\n}\n-\n-// exists returns true if the given file exists.\n-func exists(f string) bool {\n- if _, err := os.Stat(f); err == nil {\n- return true\n- } else if !os.IsNotExist(err) {\n- log.Warningf(\"error checking for file %q: %v\", f, err)\n- }\n- return false\n-}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Supress error when deleting non-existing container with --force
This addresses the first issue reported in #59. CRI-O expects runsc to
return success to delete when --force is used with a non-existing container.
PiperOrigin-RevId: 198487418
Change-Id: If7660e8fdab1eb29549d0a7a45ea82e20a1d4f4a |
259,992 | 31.05.2018 10:53:08 | 25,200 | 3547c4886773acec0ac562104f055528ed490b75 | Add SHA512 file to nightly build | [
{
"change_type": "MODIFY",
"old_path": "kokoro/gcp_ubuntu/release-nightly.cfg",
"new_path": "kokoro/gcp_ubuntu/release-nightly.cfg",
"diff": "build_file: \"repo/kokoro/gcp_ubuntu/run_build.sh\"\naction {\n- # Upload only the runsc binary. It may be in multiple paths, so we must use\n- # the wildcard.\n+ # Upload runsc binary and its checksum. It may be in multiple paths, so we\n+ # must use the wildcard.\ndefine_artifacts {\nregex: \"**/runsc\"\n+ regex: \"**/runsc.sha512\"\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "kokoro/gcp_ubuntu/run_build.sh",
"new_path": "kokoro/gcp_ubuntu/run_build.sh",
"diff": "@@ -37,4 +37,6 @@ latest_dir=\"${KOKORO_ARTIFACTS_DIR}\"/latest\ntoday_dir=\"${KOKORO_ARTIFACTS_DIR}\"/\"$(date -Idate)\"\nmkdir -p \"${latest_dir}\" \"${today_dir}\"\ncp bazel-bin/runsc/linux_amd64_pure_stripped/runsc \"${latest_dir}\"\n+sha512sum \"${latest_dir}\"/runsc | awk '{print $1 \" runsc\"}' > \"${latest_dir}\"/runsc.sha512\ncp bazel-bin/runsc/linux_amd64_pure_stripped/runsc \"${today_dir}\"\n+sha512sum \"${today_dir}\"/runsc | awk '{print $1} \" runsc\"' > \"${today_dir}\"/runsc.sha512\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add SHA512 file to nightly build
PiperOrigin-RevId: 198745666
Change-Id: I38d4163cd65f1236b09ce4f6481197a9a9fd29f2 |
259,992 | 01.06.2018 10:08:40 | 25,200 | 65dadc00297d946e86b2e95b0279fb6dc94542dd | Ignores IPv6 addresses when configuring network
Closes | [
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/network.go",
"new_path": "runsc/sandbox/network.go",
"diff": "@@ -188,14 +188,14 @@ func createInterfacesAndRoutesFromNS(conn *urpc.Client, nsPath string) error {\ncontinue\n}\n- ifaddrs, err := iface.Addrs()\n+ allAddrs, err := iface.Addrs()\nif err != nil {\nreturn fmt.Errorf(\"error fetching interface addresses for %q: %v\", iface.Name, err)\n}\n// We build our own loopback devices.\nif iface.Flags&net.FlagLoopback != 0 {\n- links, err := loopbackLinks(iface, ifaddrs)\n+ links, err := loopbackLinks(iface, allAddrs)\nif err != nil {\nreturn fmt.Errorf(\"error getting loopback routes and links for iface %q: %v\", iface.Name, err)\n}\n@@ -203,6 +203,24 @@ func createInterfacesAndRoutesFromNS(conn *urpc.Client, nsPath string) error {\ncontinue\n}\n+ // Keep only IPv4 addresses.\n+ var ip4addrs []*net.IPNet\n+ for _, ifaddr := range allAddrs {\n+ ipNet, ok := ifaddr.(*net.IPNet)\n+ if !ok {\n+ return fmt.Errorf(\"address is not IPNet: %+v\", ifaddr)\n+ }\n+ if ipNet.IP.To4() == nil {\n+ log.Warningf(\"IPv6 is not supported, skipping: %v\", ipNet)\n+ continue\n+ }\n+ ip4addrs = append(ip4addrs, ipNet)\n+ }\n+ if len(ip4addrs) == 0 {\n+ log.Warningf(\"No IPv4 address found for interface %q, skipping\", iface.Name)\n+ continue\n+ }\n+\n// Get the link for the interface.\nifaceLink, err := netlink.LinkByName(iface.Name)\nif err != nil {\n@@ -250,16 +268,12 @@ func createInterfacesAndRoutesFromNS(conn *urpc.Client, nsPath string) error {\n// Collect the addresses for the interface, enable forwarding,\n// and remove them from the host.\n- for _, ifaddr := range ifaddrs {\n- ipNet, ok := ifaddr.(*net.IPNet)\n- if !ok {\n- return fmt.Errorf(\"address is not IPNet: %t %+v\", ifaddr, ifaddr)\n- }\n- link.Addresses = append(link.Addresses, ipNet.IP)\n+ for _, addr := range ip4addrs {\n+ link.Addresses = append(link.Addresses, addr.IP)\n// Steal IP address from NIC.\n- if err := removeAddress(ifaceLink, ipNet.String()); err != nil {\n- return fmt.Errorf(\"error removing address %v from device %q: %v\", iface.Name, ipNet, err)\n+ if err := removeAddress(ifaceLink, addr.String()); err != nil {\n+ return fmt.Errorf(\"error removing address %v from device %q: %v\", iface.Name, addr, err)\n}\n}\n@@ -280,7 +294,7 @@ func loopbackLinks(iface net.Interface, addrs []net.Addr) ([]boot.LoopbackLink,\nfor _, addr := range addrs {\nipNet, ok := addr.(*net.IPNet)\nif !ok {\n- return nil, fmt.Errorf(\"address is not IPNet: %t %+v\", addr, addr)\n+ return nil, fmt.Errorf(\"address is not IPNet: %+v\", addr)\n}\nlinks = append(links, boot.LoopbackLink{\nName: iface.Name,\n@@ -314,21 +328,25 @@ func routesForIface(iface net.Interface) ([]boot.Route, *boot.Route, error) {\nif r.Gw == nil {\nreturn nil, nil, fmt.Errorf(\"default route with no gateway %q: %+v\", iface.Name, r)\n}\n+ if r.Gw.To4() == nil {\n+ log.Warningf(\"IPv6 is not supported, skipping default route: %v\", r)\n+ continue\n+ }\nif def != nil {\nreturn nil, nil, fmt.Errorf(\"more than one default route found %q, def: %+v, route: %+v\", iface.Name, def, r)\n}\n- emptyAddr := net.IPv6zero\n- if r.Gw.To4() != nil {\n- emptyAddr = net.IPv4zero\n- }\n// Create a catch all route to the gateway.\ndef = &boot.Route{\n- Destination: emptyAddr,\n- Mask: net.IPMask(emptyAddr),\n+ Destination: net.IPv4zero,\n+ Mask: net.IPMask(net.IPv4zero),\nGateway: r.Gw,\n}\ncontinue\n}\n+ if r.Dst.IP.To4() == nil {\n+ log.Warningf(\"IPv6 is not supported, skipping route: %v\", r)\n+ continue\n+ }\nroutes = append(routes, boot.Route{\nDestination: r.Dst.IP.Mask(r.Dst.Mask),\nMask: r.Dst.Mask,\n"
}
] | Go | Apache License 2.0 | google/gvisor | Ignores IPv6 addresses when configuring network
Closes #60
PiperOrigin-RevId: 198887885
Change-Id: I9bf990ee3fde9259836e57d67257bef5b85c6008 |
260,013 | 01.06.2018 14:58:46 | 25,200 | 0212f222c74b9f88c5c74d920127e47e942dc376 | Fix refcount bug in rpcinet socketOperations.Accept. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/rpcinet/socket.go",
"new_path": "pkg/sentry/socket/rpcinet/socket.go",
"diff": "@@ -277,8 +277,10 @@ func (s *socketOperations) Accept(t *kernel.Task, peerRequested bool, flags int,\nfile := fs.NewFile(t, dirent, fs.FileFlags{Read: true, Write: true, NonBlocking: flags&linux.SOCK_NONBLOCK != 0}, &socketOperations{\nwq: &wq,\nfd: payload.Fd,\n+ rpcConn: s.rpcConn,\nnotifier: s.notifier,\n})\n+ defer file.DecRef()\nfdFlags := kernel.FDFlags{\nCloseOnExec: flags&linux.SOCK_CLOEXEC != 0,\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix refcount bug in rpcinet socketOperations.Accept.
PiperOrigin-RevId: 198931222
Change-Id: I69ee12318e87b9a6a4a94b18a9bf0ae4e39d7eaf |
259,992 | 02.06.2018 15:21:42 | 25,200 | 43dd424f424832415486ba354a2b8d2343ea2d4a | Add SHA512 pointer to README | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -170,8 +170,9 @@ binaries).\n### Download a Nightly Build\n-The easiest way to get `runsc` is from a the latest nightly build.\n-[here][runsc-nightly].\n+The easiest way to get `runsc` is from the\n+[latest nightly build][runsc-nightly]. After you download the binary, check it\n+against the SHA512 [checksum file][runsc-nightly-sha].\n**It is important to copy this binary to some place that is accessible to all\nusers**, since `runsc` executes itself as user `nobody` to avoid unnecessary\n@@ -180,6 +181,8 @@ privileges. The `/usr/local/bin` directory is a good choice.\n```\nwget https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc\n+wget https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc.sha512\n+sha512sum -c runsc.sha512\nchmod +x runsc\nsudo mv runsc /usr/local/bin\n```\n@@ -436,17 +439,18 @@ See [Contributing.md](CONTRIBUTING.md).\n[apparmor]: https://wiki.ubuntu.com/AppArmor\n[bazel]: https://bazel.build\n[bug]: https://github.com/google/gvisor/issues\n-[cri-o]: https://github.com/kubernetes-incubator/cri-o\n[cri-o-k8s]: https://github.com/kubernetes-incubator/cri-o/blob/master/kubernetes.md\n-[docker]: https://www.docker.com\n+[cri-o]: https://github.com/kubernetes-incubator/cri-o\n[docker-storage-driver]: https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-storage-driver\n+[docker]: https://www.docker.com\n[git]: https://git-scm.com\n-[gvisor-users-list]: https://groups.google.com/forum/#!forum/gvisor-users\n[gvisor-security-list]: https://groups.google.com/forum/#!forum/gvisor-security\n+[gvisor-users-list]: https://groups.google.com/forum/#!forum/gvisor-users\n[kvm]: https://www.linux-kvm.org\n[netstack]: https://github.com/google/netstack\n[oci]: https://www.opencontainers.org\n[python]: https://python.org\n+[runsc-nightly-sha]: https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc.sha512\n[runsc-nightly]: https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc\n[sandbox]: https://en.wikipedia.org/wiki/Sandbox_(computer_security)\n[seccomp]: https://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add SHA512 pointer to README
PiperOrigin-RevId: 199008198
Change-Id: I6d1a0107ae1b11f160b42a2cabaf1fb8ce419edf |
259,992 | 04.06.2018 10:27:33 | 25,200 | 0929bdee3450aa6bc2393227127fe72405b82e4a | Fix checksum file for today's build | [
{
"change_type": "MODIFY",
"old_path": "kokoro/gcp_ubuntu/run_build.sh",
"new_path": "kokoro/gcp_ubuntu/run_build.sh",
"diff": "@@ -39,4 +39,4 @@ mkdir -p \"${latest_dir}\" \"${today_dir}\"\ncp bazel-bin/runsc/linux_amd64_pure_stripped/runsc \"${latest_dir}\"\nsha512sum \"${latest_dir}\"/runsc | awk '{print $1 \" runsc\"}' > \"${latest_dir}\"/runsc.sha512\ncp bazel-bin/runsc/linux_amd64_pure_stripped/runsc \"${today_dir}\"\n-sha512sum \"${today_dir}\"/runsc | awk '{print $1} \" runsc\"' > \"${today_dir}\"/runsc.sha512\n+sha512sum \"${today_dir}\"/runsc | awk '{print $1 \" runsc\"}' > \"${today_dir}\"/runsc.sha512\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix checksum file for today's build
PiperOrigin-RevId: 199153448
Change-Id: Ic1f0456191080117a8586f77dd2fb44dc53754ca |
259,992 | 04.06.2018 11:51:27 | 25,200 | 55a37ceef1e33cc72236db6e95f159963ddf40bd | Fix leaky FD
9P socket was being created without CLOEXEC and was being inherited
by the children. This would prevent the gofer from detecting that the
sandbox had exited, because the socket would not be closed. | [
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/BUILD",
"new_path": "runsc/sandbox/BUILD",
"diff": "package(licenses = [\"notice\"]) # Apache 2.0\n-load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\")\n+load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\", \"go_test\")\ngo_library(\nname = \"sandbox\",\n@@ -28,3 +28,17 @@ go_library(\n\"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n+\n+go_test(\n+ name = \"sandbox_test\",\n+ size = \"small\",\n+ srcs = [\"sandbox_test.go\"],\n+ data = [\n+ \"//runsc\",\n+ ],\n+ embed = [\":sandbox\"],\n+ deps = [\n+ \"//pkg/log\",\n+ \"//runsc/test/testutil\",\n+ ],\n+)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -195,7 +195,7 @@ func (s *Sandbox) createGoferProcess(spec *specs.Spec, conf *boot.Config, bundle\ngoferEnds := make([]*os.File, 0, mountCount)\nfor i := 0; i < mountCount; i++ {\n// Create socket that connects the sandbox and gofer.\n- fds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM, 0)\n+ fds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0)\nif err != nil {\nreturn nil, err\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/sandbox/sandbox_test.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package sandbox\n+\n+import (\n+ \"os\"\n+ \"testing\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/log\"\n+ \"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n+)\n+\n+func init() {\n+ log.SetLevel(log.Debug)\n+ if err := testutil.ConfigureExePath(); err != nil {\n+ panic(err.Error())\n+ }\n+}\n+\n+func TestGoferExits(t *testing.T) {\n+ spec := testutil.NewSpecWithArgs(\"/bin/sleep\", \"10000\")\n+ rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+ defer os.RemoveAll(bundleDir)\n+\n+ // Create, start and wait for the container.\n+ s, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer s.Destroy()\n+ if err := s.Start(\"123\", spec, conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n+ }\n+\n+ sandboxProc, err := os.FindProcess(s.Pid)\n+ if err != nil {\n+ t.Fatalf(\"error finding sandbox process: %v\", err)\n+ }\n+ gofer, err := os.FindProcess(s.GoferPid)\n+ if err != nil {\n+ t.Fatalf(\"error finding sandbox process: %v\", err)\n+ }\n+\n+ // Kill sandbox and expect gofer to exit on its own.\n+ if err := sandboxProc.Kill(); err != nil {\n+ t.Fatalf(\"error killing sandbox process: %v\", err)\n+ }\n+ if _, err := sandboxProc.Wait(); err != nil {\n+ t.Fatalf(\"error waiting for sandbox process: %v\", err)\n+ }\n+\n+ if _, err := gofer.Wait(); err != nil {\n+ t.Fatalf(\"error waiting for gofer process: %v\", err)\n+ }\n+ if s.IsRunning() {\n+ t.Errorf(\"Sandbox shouldn't be running, sandbox: %+v\", s)\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix leaky FD
9P socket was being created without CLOEXEC and was being inherited
by the children. This would prevent the gofer from detecting that the
sandbox had exited, because the socket would not be closed.
PiperOrigin-RevId: 199168959
Change-Id: I3ee1a07cbe7331b0aeb1cf2b697e728ce24f85a7 |
259,992 | 04.06.2018 12:13:33 | 25,200 | 78ccd1298e1386d9c5e0eb10d328ecb16b28ea02 | Return 'running' if gofer is still alive
Containerd will start deleting container and rootfs after container
is stopped. However, if gofer is still running, rootfs cleanup will
fail because of device busy.
This CL makes sure that gofer is not running when container state is
stopped.
Change from: | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -186,6 +186,8 @@ func TestLifecycle(t *testing.T) {\n// ourselves.\np, _ := os.FindProcess(s.Sandbox.Pid)\np.Wait()\n+ g, _ := os.FindProcess(s.Sandbox.GoferPid)\n+ g.Wait()\n// Load the container from disk and check the status.\ns, err = container.Load(rootDir, id)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -440,14 +440,28 @@ func (s *Sandbox) Signal(cid string, sig syscall.Signal) error {\nreturn nil\n}\n-// IsRunning returns true iff the sandbox process is running.\n+// IsRunning returns true if the sandbox or gofer process is running.\nfunc (s *Sandbox) IsRunning() bool {\n+ if s.Pid != 0 {\n// Send a signal 0 to the sandbox process.\n- if err := killProcess(s.Pid, 0); err != nil {\n- return false\n+ if err := killProcess(s.Pid, 0); err == nil {\n+ return true\n}\n+ }\n+ if s.GoferPid != 0 {\n+ // Send a signal 0 to the gofer process.\n+ if err := killProcess(s.GoferPid, 0); err == nil {\n+ log.Warningf(\"Found orphan gofer process, pid: %d\", s.GoferPid)\n+ // Attempt to kill gofer if it's orphan.\n+ killProcess(s.GoferPid, unix.SIGKILL)\n+\n+ // Don't wait for gofer to die. Return 'running' and hope gofer is dead\n+ // next time around.\nreturn true\n}\n+ }\n+ return false\n+}\n// killProcess sends a signal to the host process (i.e. a sandbox or gofer\n// process). Sandbox.Signal should be used to send a signal to a process\n"
}
] | Go | Apache License 2.0 | google/gvisor | Return 'running' if gofer is still alive
Containerd will start deleting container and rootfs after container
is stopped. However, if gofer is still running, rootfs cleanup will
fail because of device busy.
This CL makes sure that gofer is not running when container state is
stopped.
Change from: [email protected]
PiperOrigin-RevId: 199172668
Change-Id: I9d874eec3ecf74fd9c8edd7f62d9f998edef66fe |
259,992 | 04.06.2018 12:30:47 | 25,200 | 6c585b8eb69362db9af5ed150763096874832b86 | Create destination mount dir if it doesn't exist | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/fs.go",
"new_path": "runsc/boot/fs.go",
"diff": "@@ -288,11 +288,21 @@ func mountSubmount(ctx context.Context, spec *specs.Spec, conf *Config, mns *fs.\nif useOverlay {\nlog.Debugf(\"Adding overlay on top of mount %q\", m.Destination)\n- if inode, err = addOverlay(ctx, conf, inode, m.Type, mf); err != nil {\n+ inode, err = addOverlay(ctx, conf, inode, m.Type, mf)\n+ if err != nil {\nreturn err\n}\n}\n+ // Create destination in case it doesn't exist. This is required, in addition\n+ // to 'addSubmountOverlay', in case there are symlinks to create directories\n+ // in the right location, e.g.\n+ // mount: /var/run/secrets, may be created in '/run/secrets' if\n+ // '/var/run' => '/var'.\n+ if err := mkdirAll(ctx, mns, m.Destination); err != nil {\n+ return err\n+ }\n+\nroot := mns.Root()\ndefer root.DecRef()\ndirent, err := mns.FindInode(ctx, root, nil, m.Destination, linux.MaxSymlinkTraversals)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -92,6 +92,35 @@ func procListToString(pl []*control.Process) string {\nreturn fmt.Sprintf(\"[%s]\", strings.Join(strs, \",\"))\n}\n+// run starts the sandbox and waits for it to exit, checking that the\n+// application succeeded.\n+func run(spec *specs.Spec) error {\n+ rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n+ if err != nil {\n+ return fmt.Errorf(\"error setting up container: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+ defer os.RemoveAll(bundleDir)\n+\n+ // Create, start and wait for the container.\n+ s, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ if err != nil {\n+ return fmt.Errorf(\"error creating container: %v\", err)\n+ }\n+ defer s.Destroy()\n+ if err := s.Start(conf); err != nil {\n+ return fmt.Errorf(\"error starting container: %v\", err)\n+ }\n+ ws, err := s.Wait()\n+ if err != nil {\n+ return fmt.Errorf(\"error waiting on container: %v\", err)\n+ }\n+ if !ws.Exited() || ws.ExitStatus() != 0 {\n+ return fmt.Errorf(\"container failed, waitStatus: %v\", ws)\n+ }\n+ return nil\n+}\n+\n// TestLifecycle tests the basic Create/Start/Signal/Destroy container lifecycle.\n// It verifies after each step that the container can be loaded from disk, and\n// has the correct status.\n@@ -600,27 +629,34 @@ func TestRunNonRoot(t *testing.T) {\nType: \"bind\",\n})\n- rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n- if err != nil {\n- t.Fatalf(\"error setting up container: %v\", err)\n+ if err := run(spec); err != nil {\n+ t.Fatalf(\"error running sadbox: %v\", err)\n}\n- defer os.RemoveAll(rootDir)\n- defer os.RemoveAll(bundleDir)\n-\n- // Create, start and wait for the container.\n- s, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n- if err != nil {\n- t.Fatalf(\"error creating container: %v\", err)\n}\n- defer s.Destroy()\n- if err := s.Start(conf); err != nil {\n- t.Fatalf(\"error starting container: %v\", err)\n+\n+// TestMountNewDir check that runsc will create destination directory if it\n+// doesn't exit.\n+func TestMountNewDir(t *testing.T) {\n+ srcDir := path.Join(os.TempDir(), \"src\", \"newdir\", \"anotherdir\")\n+ if err := os.MkdirAll(srcDir, 0755); err != nil {\n+ t.Fatalf(\"os.MkDir(%q) failed: %v\", srcDir, err)\n}\n- ws, err := s.Wait()\n- if err != nil {\n- t.Errorf(\"error waiting on container: %v\", err)\n+\n+ // Attempt to remove dir to ensure it doesn't exist.\n+ mountDir := path.Join(os.TempDir(), \"newdir\")\n+ if err := os.RemoveAll(mountDir); err != nil {\n+ t.Fatalf(\"os.RemoveAll(%q) failed: %v\", mountDir, err)\n}\n- if !ws.Exited() || ws.ExitStatus() != 0 {\n- t.Errorf(\"container failed, waitStatus: %v\", ws)\n+ mountDir = path.Join(mountDir, \"anotherdir\")\n+\n+ spec := testutil.NewSpecWithArgs(\"/bin/ls\", mountDir)\n+ spec.Mounts = append(spec.Mounts, specs.Mount{\n+ Destination: mountDir,\n+ Source: srcDir,\n+ Type: \"bind\",\n+ })\n+\n+ if err := run(spec); err != nil {\n+ t.Fatalf(\"error running sadbox: %v\", err)\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Create destination mount dir if it doesn't exist
PiperOrigin-RevId: 199175296
Change-Id: I694ad1cfa65572c92f77f22421fdcac818f44630 |
259,992 | 04.06.2018 18:04:05 | 25,200 | 19a0e83b50fbcfd89927baedbb1f1fd14dc448ca | Make fsgofer attach more strict
Refuse to mount paths with "." and ".." in the path to prevent
a compromised Sentry to mount "../../secrets". Only allow
Attach to be called once per mount point. | [
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/fsgofer.go",
"new_path": "runsc/fsgofer/fsgofer.go",
"diff": "@@ -26,7 +26,6 @@ import (\n\"math\"\n\"os\"\n\"path\"\n- \"path/filepath\"\n\"strings\"\n\"sync\"\n\"syscall\"\n@@ -83,6 +82,9 @@ type Config struct {\ntype attachPoint struct {\nprefix string\nconf Config\n+\n+ mu sync.Mutex\n+ attached bool\n}\n// NewAttachPoint creates a new attacher that gives local file\n@@ -93,19 +95,22 @@ func NewAttachPoint(prefix string, c Config) p9.Attacher {\n// Attach implements p9.Attacher.\nfunc (a *attachPoint) Attach(appPath string) (p9.File, error) {\n+ // Only proceed if 'appPath' is valid.\nif !path.IsAbs(appPath) {\nreturn nil, fmt.Errorf(\"invalid path %q\", appPath)\n}\n+ if path.Clean(appPath) != appPath {\n+ return nil, fmt.Errorf(\"invalid path %q\", appPath)\n+ }\n- root := filepath.Join(a.prefix, appPath)\n+ root := path.Join(a.prefix, appPath)\nfi, err := os.Stat(root)\nif err != nil {\nreturn nil, err\n}\n-\n- mode := syscall.O_RDWR\n+ mode := os.O_RDWR\nif a.conf.ROMount || fi.IsDir() {\n- mode = syscall.O_RDONLY\n+ mode = os.O_RDONLY\n}\nf, err := os.OpenFile(root, mode|openFlags, 0)\n@@ -114,8 +119,18 @@ func (a *attachPoint) Attach(appPath string) (p9.File, error) {\n}\nstat, err := stat(int(f.Fd()))\nif err != nil {\n+ f.Close()\nreturn nil, fmt.Errorf(\"failed to stat file %q, err: %v\", root, err)\n}\n+\n+ a.mu.Lock()\n+ defer a.mu.Unlock()\n+ if a.attached {\n+ f.Close()\n+ return nil, fmt.Errorf(\"attach point already attached, prefix: %s\", a.prefix)\n+ }\n+ a.attached = true\n+\nreturn newLocalFile(a.conf, f, root, stat)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/fsgofer_test.go",
"new_path": "runsc/fsgofer/fsgofer_test.go",
"diff": "@@ -19,6 +19,7 @@ import (\n\"io/ioutil\"\n\"os\"\n\"path\"\n+ \"strings\"\n\"syscall\"\n\"testing\"\n@@ -622,3 +623,45 @@ func TestAttachFile(t *testing.T) {\nt.Fatalf(\"ReadAt() wrong data, got: %s, expected: %s\", string(rBuf), \"foobar\")\n}\n}\n+\n+func TestAttachError(t *testing.T) {\n+ conf := Config{ROMount: false}\n+ root, err := ioutil.TempDir(\"\", \"root-\")\n+ if err != nil {\n+ t.Fatalf(\"ioutil.TempDir() failed, err: %v\", err)\n+ }\n+ defer os.RemoveAll(root)\n+ a := NewAttachPoint(root, conf)\n+\n+ c := path.Join(root, \"test\")\n+ if err := os.Mkdir(c, 0700); err != nil {\n+ t.Fatalf(\"os.Create(%q) failed, err: %v\", c, err)\n+ }\n+\n+ for _, p := range []string{\"test\", \"/test/../\", \"/test/./\", \"/test//\"} {\n+ _, err := a.Attach(p)\n+ if err == nil {\n+ t.Fatalf(\"Attach(%q) should have failed\", p)\n+ }\n+ if want := \"invalid path\"; !strings.Contains(err.Error(), want) {\n+ t.Fatalf(\"Attach(%q) wrong error, got: %v, wanted: %v\", p, err, want)\n+ }\n+ }\n+}\n+\n+func TestDoubleAttachError(t *testing.T) {\n+ conf := Config{ROMount: false}\n+ root, err := ioutil.TempDir(\"\", \"root-\")\n+ if err != nil {\n+ t.Fatalf(\"ioutil.TempDir() failed, err: %v\", err)\n+ }\n+ defer os.RemoveAll(root)\n+ a := NewAttachPoint(root, conf)\n+\n+ if _, err := a.Attach(\"/\"); err != nil {\n+ t.Fatalf(\"Attach(%q) failed: %v\", \"/\", err)\n+ }\n+ if _, err := a.Attach(\"/\"); err == nil {\n+ t.Fatalf(\"Attach(%q) should have failed\", \"test\")\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Make fsgofer attach more strict
Refuse to mount paths with "." and ".." in the path to prevent
a compromised Sentry to mount "../../secrets". Only allow
Attach to be called once per mount point.
PiperOrigin-RevId: 199225929
Change-Id: I2a3eb7ea0b23f22eb8dde2e383e32563ec003bd5 |
260,013 | 05.06.2018 15:43:55 | 25,200 | ff7b4a156f95a587b5df4de89a22c200fceabb96 | Add support for rpcinet owned procfs files.
This change will add support for /proc/sys/net and /proc/net which will
be managed and owned by rpcinet. This will allow these inodes to be forward
as rpcs. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/BUILD",
"new_path": "pkg/sentry/fs/proc/BUILD",
"diff": "@@ -44,6 +44,7 @@ go_library(\n\"net.go\",\n\"proc.go\",\n\"proc_state.go\",\n+ \"rpcinet_proc.go\",\n\"stat.go\",\n\"sys.go\",\n\"sys_net.go\",\n@@ -70,6 +71,7 @@ go_library(\n\"//pkg/sentry/kernel/kdefs\",\n\"//pkg/sentry/kernel/time\",\n\"//pkg/sentry/mm\",\n+ \"//pkg/sentry/socket/rpcinet\",\n\"//pkg/sentry/usage\",\n\"//pkg/sentry/usermem\",\n\"//pkg/state\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/net.go",
"new_path": "pkg/sentry/fs/proc/net.go",
"diff": "@@ -32,6 +32,23 @@ func (p *proc) newNetDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode {\nif s := p.k.NetworkStack(); s != nil && s.SupportsIPv6() {\nd.AddChild(ctx, \"dev\", seqfile.NewSeqFileInode(ctx, &netDev{s: s}, msrc))\nd.AddChild(ctx, \"if_inet6\", seqfile.NewSeqFileInode(ctx, &ifinet6{s: s}, msrc))\n+\n+ // The following files are simple stubs until they are implemented in\n+ // netstack, if the file contains a header the stub is just the header\n+ // otherwise it is an empty file.\n+ d.AddChild(ctx, \"arp\", p.newStubProcFSFile(ctx, msrc, []byte(\"IP address HW type Flags HW address Mask Device\")))\n+ d.AddChild(ctx, \"ipv6_route\", p.newStubProcFSFile(ctx, msrc, []byte(\"\")))\n+ d.AddChild(ctx, \"netlink\", p.newStubProcFSFile(ctx, msrc, []byte(\"sk Eth Pid Groups Rmem Wmem Dump Locks Drops Inode\")))\n+ d.AddChild(ctx, \"netstat\", p.newStubProcFSFile(ctx, msrc, []byte(\"TcpExt: SyncookiesSent SyncookiesRecv SyncookiesFailed EmbryonicRsts PruneCalled RcvPruned OfoPruned OutOfWindowIcmps LockDroppedIcmps ArpFilter TW TWRecycled TWKilled PAWSPassive PAWSActive PAWSEstab DelayedACKs DelayedACKLocked DelayedACKLost ListenOverflows ListenDrops TCPPrequeued TCPDirectCopyFromBacklog TCPDirectCopyFromPrequeue TCPPrequeueDropped TCPHPHits TCPHPHitsToUser TCPPureAcks TCPHPAcks TCPRenoRecovery TCPSackRecovery TCPSACKReneging TCPFACKReorder TCPSACKReorder TCPRenoReorder TCPTSReorder TCPFullUndo TCPPartialUndo TCPDSACKUndo TCPLossUndo TCPLostRetransmit TCPRenoFailures TCPSackFailures TCPLossFailures TCPFastRetrans TCPForwardRetrans TCPSlowStartRetrans TCPTimeouts TCPLossProbes TCPLossProbeRecovery TCPRenoRecoveryFail TCPSackRecoveryFail TCPSchedulerFailed TCPRcvCollapsed TCPDSACKOldSent TCPDSACKOfoSent TCPDSACKRecv TCPDSACKOfoRecv TCPAbortOnData TCPAbortOnClose TCPAbortOnMemory TCPAbortOnTimeout TCPAbortOnLinger TCPAbortFailed TCPMemoryPressures TCPSACKDiscard TCPDSACKIgnoredOld TCPDSACKIgnoredNoUndo TCPSpuriousRTOs TCPMD5NotFound TCPMD5Unexpected TCPMD5Failure TCPSackShifted TCPSackMerged TCPSackShiftFallback TCPBacklogDrop TCPMinTTLDrop TCPDeferAcceptDrop IPReversePathFilter TCPTimeWaitOverflow TCPReqQFullDoCookies TCPReqQFullDrop TCPRetransFail TCPRcvCoalesce TCPOFOQueue TCPOFODrop TCPOFOMerge TCPChallengeACK TCPSYNChallenge TCPFastOpenActive TCPFastOpenActiveFail TCPFastOpenPassive TCPFastOpenPassiveFail TCPFastOpenListenOverflow TCPFastOpenCookieReqd TCPSpuriousRtxHostQueues BusyPollRxPackets TCPAutoCorking TCPFromZeroWindowAdv TCPToZeroWindowAdv TCPWantZeroWindowAdv TCPSynRetrans TCPOrigDataSent TCPHystartTrainDetect TCPHystartTrainCwnd TCPHystartDelayDetect TCPHystartDelayCwnd TCPACKSkippedSynRecv TCPACKSkippedPAWS TCPACKSkippedSeq TCPACKSkippedFinWait2 TCPACKSkippedTimeWait TCPACKSkippedChallenge TCPWinProbe TCPKeepAlive TCPMTUPFail TCPMTUPSuccess\")))\n+ d.AddChild(ctx, \"packet\", p.newStubProcFSFile(ctx, msrc, []byte(\"sk RefCnt Type Proto Iface R Rmem User Inode\")))\n+ d.AddChild(ctx, \"protocols\", p.newStubProcFSFile(ctx, msrc, []byte(\"protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\")))\n+ d.AddChild(ctx, \"psched\", p.newStubProcFSFile(ctx, msrc, []byte(\"\")))\n+ d.AddChild(ctx, \"ptype\", p.newStubProcFSFile(ctx, msrc, []byte(\"Type Device Function\")))\n+ d.AddChild(ctx, \"route\", p.newStubProcFSFile(ctx, msrc, []byte(\"Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT\")))\n+ d.AddChild(ctx, \"tcp\", p.newStubProcFSFile(ctx, msrc, []byte(\" sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode\")))\n+ d.AddChild(ctx, \"tcp6\", p.newStubProcFSFile(ctx, msrc, []byte(\" sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode\")))\n+ d.AddChild(ctx, \"udp\", p.newStubProcFSFile(ctx, msrc, []byte(\" sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops\")))\n+ d.AddChild(ctx, \"udp6\", p.newStubProcFSFile(ctx, msrc, []byte(\" sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode\")))\n}\nreturn newFile(d, msrc, fs.SpecialDirectory, nil)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/proc.go",
"new_path": "pkg/sentry/fs/proc/proc.go",
"diff": "@@ -17,6 +17,7 @@ package proc\nimport (\n\"fmt\"\n+ \"io\"\n\"sort\"\n\"strconv\"\n@@ -26,6 +27,9 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/socket/rpcinet\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n+ \"gvisor.googlesource.com/gvisor/pkg/syserror\"\n)\n// proc is a root proc node.\n@@ -40,6 +44,30 @@ type proc struct {\npidns *kernel.PIDNamespace\n}\n+// stubProcFSFile is a file type that can be used to return file contents\n+// which are constant. This file is not writable and will always have mode\n+// 0444.\n+type stubProcFSFile struct {\n+ ramfs.Entry\n+\n+ // contents are the immutable file contents that will always be returned.\n+ contents []byte\n+}\n+\n+// DeprecatedPreadv implements fs.InodeOperations.DeprecatedPreadv.\n+func (s *stubProcFSFile) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) {\n+ if offset < 0 {\n+ return 0, syserror.EINVAL\n+ }\n+\n+ if offset >= int64(len(s.contents)) {\n+ return 0, io.EOF\n+ }\n+\n+ n, err := dst.CopyOut(ctx, s.contents[offset:])\n+ return int64(n), err\n+}\n+\n// New returns the root node of a partial simple procfs.\nfunc New(ctx context.Context, msrc *fs.MountSource) (*fs.Inode, error) {\nk := kernel.KernelFromContext(ctx)\n@@ -83,6 +111,15 @@ func (p *proc) newSelf(ctx context.Context, msrc *fs.MountSource) *fs.Inode {\nreturn newFile(s, msrc, fs.Symlink, nil)\n}\n+// newStubProcFsFile returns a procfs file with constant contents.\n+func (p *proc) newStubProcFSFile(ctx context.Context, msrc *fs.MountSource, c []byte) *fs.Inode {\n+ u := &stubProcFSFile{\n+ contents: c,\n+ }\n+ u.InitEntry(ctx, fs.RootOwner, fs.FilePermsFromMode(0444))\n+ return newFile(u, msrc, fs.SpecialFile, nil)\n+}\n+\n// Readlink implements fs.InodeOperations.Readlink.\nfunc (s *self) Readlink(ctx context.Context, inode *fs.Inode) (string, error) {\nif t := kernel.TaskFromContext(ctx); t != nil {\n@@ -107,7 +144,13 @@ func (p *proc) Lookup(ctx context.Context, dir *fs.Inode, name string) (*fs.Dire\n// Is it a dynamic element?\nnfs := map[string]func() *fs.Inode{\n- \"net\": func() *fs.Inode { return p.newNetDir(ctx, dir.MountSource) },\n+ \"net\": func() *fs.Inode {\n+ // If we're using rpcinet we will let it manage /proc/net.\n+ if _, ok := p.k.NetworkStack().(*rpcinet.Stack); ok {\n+ return newRPCInetProcNet(ctx, dir.MountSource)\n+ }\n+ return p.newNetDir(ctx, dir.MountSource)\n+ },\n\"self\": func() *fs.Inode { return p.newSelf(ctx, dir.MountSource) },\n\"sys\": func() *fs.Inode { return p.newSysDir(ctx, dir.MountSource) },\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/fs/proc/rpcinet_proc.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package proc\n+\n+import (\n+ \"io\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/socket/rpcinet\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n+)\n+\n+// rpcinetFile implments fs.InodeOperations as RPCs.\n+type rpcinetFile struct {\n+ ramfs.Entry\n+\n+ // filepath is the full path of this rpcinetFile.\n+ filepath string\n+\n+ k *kernel.Kernel\n+}\n+\n+// DeprecatedPreadv implements fs.InodeOperations.DeprecatedPreadv.\n+// This method can panic if an rpcinetFile was created without an rpcinet\n+// stack.\n+func (r rpcinetFile) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) {\n+ s, ok := r.k.NetworkStack().(*rpcinet.Stack)\n+ if !ok {\n+ panic(\"Network stack is not a rpcinet.\")\n+ }\n+\n+ contents, se := s.RPCReadFile(r.filepath)\n+ if se != nil || offset >= int64(len(contents)) {\n+ return 0, io.EOF\n+ }\n+\n+ n, err := dst.CopyOut(ctx, contents[offset:])\n+ return int64(n), err\n+}\n+\n+// Truncate implements fs.InodeOperations.Truncate.\n+func (r rpcinetFile) Truncate(context.Context, *fs.Inode, int64) error {\n+ return nil\n+}\n+\n+// DeprecatedPwritev implements fs.InodeOperations.DeprecatedPwritev.\n+// This method can panic if an rpcinetFile was created without an rpcinet\n+// stack.\n+func (r rpcinetFile) DeprecatedPwritev(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) {\n+ s, ok := r.k.NetworkStack().(*rpcinet.Stack)\n+ if !ok {\n+ panic(\"Network stack is not a rpcinet.\")\n+ }\n+\n+ if src.NumBytes() == 0 {\n+ return 0, nil\n+ }\n+\n+ b := make([]byte, src.NumBytes(), src.NumBytes())\n+ n, err := src.CopyIn(ctx, b)\n+ if err != nil {\n+ return int64(n), err\n+ }\n+\n+ written, se := s.RPCWriteFile(r.filepath, b)\n+ return int64(written), se.ToError()\n+}\n+\n+func newRPCProcFSFile(ctx context.Context, msrc *fs.MountSource, filepath string, mode linux.FileMode) *fs.Inode {\n+ f := &rpcinetFile{\n+ filepath: filepath,\n+ k: kernel.KernelFromContext(ctx),\n+ }\n+ f.InitEntry(ctx, fs.RootOwner, fs.FilePermsFromMode(mode))\n+\n+ fi := newFile(f, msrc, fs.SpecialFile, nil)\n+ return fi\n+}\n+\n+// newRPCInetProcNet will build an inode for /proc/net.\n+func newRPCInetProcNet(ctx context.Context, msrc *fs.MountSource) *fs.Inode {\n+ d := &ramfs.Dir{}\n+ d.InitDir(ctx, nil, fs.RootOwner, fs.FilePermsFromMode(0555))\n+\n+ // Add all the files we want to forward for /proc/net.\n+ d.AddChild(ctx, \"arp\", newRPCProcFSFile(ctx, msrc, \"/proc/net/arp\", 0444))\n+ d.AddChild(ctx, \"dev\", newRPCProcFSFile(ctx, msrc, \"/proc/net/dev\", 0444))\n+ d.AddChild(ctx, \"if_inet6\", newRPCProcFSFile(ctx, msrc, \"/proc/net/if_inet6\", 0444))\n+ d.AddChild(ctx, \"ipv6_route\", newRPCProcFSFile(ctx, msrc, \"/proc/net/ipv6_route\", 0444))\n+ d.AddChild(ctx, \"netlink\", newRPCProcFSFile(ctx, msrc, \"/proc/net/netlink\", 0444))\n+ d.AddChild(ctx, \"netstat\", newRPCProcFSFile(ctx, msrc, \"/proc/net/netstat\", 0444))\n+ d.AddChild(ctx, \"packet\", newRPCProcFSFile(ctx, msrc, \"/proc/net/packet\", 0444))\n+ d.AddChild(ctx, \"protocols\", newRPCProcFSFile(ctx, msrc, \"/proc/net/protocols\", 0444))\n+ d.AddChild(ctx, \"psched\", newRPCProcFSFile(ctx, msrc, \"/proc/net/psched\", 0444))\n+ d.AddChild(ctx, \"ptype\", newRPCProcFSFile(ctx, msrc, \"/proc/net/ptype\", 0444))\n+ d.AddChild(ctx, \"route\", newRPCProcFSFile(ctx, msrc, \"/proc/net/route\", 0444))\n+ d.AddChild(ctx, \"tcp\", newRPCProcFSFile(ctx, msrc, \"/proc/net/tcp\", 0444))\n+ d.AddChild(ctx, \"tcp6\", newRPCProcFSFile(ctx, msrc, \"/proc/net/tcp6\", 0444))\n+ d.AddChild(ctx, \"udp\", newRPCProcFSFile(ctx, msrc, \"/proc/net/udp\", 0444))\n+ d.AddChild(ctx, \"udp6\", newRPCProcFSFile(ctx, msrc, \"/proc/net/udp6\", 0444))\n+\n+ return newFile(d, msrc, fs.SpecialDirectory, nil)\n+}\n+\n+// newRPCInetProcSysNet will build an inode for /proc/sys/net.\n+func newRPCInetProcSysNet(ctx context.Context, msrc *fs.MountSource) *fs.Inode {\n+ d := &ramfs.Dir{}\n+ d.InitDir(ctx, nil, fs.RootOwner, fs.FilePermsFromMode(0555))\n+ d.AddChild(ctx, \"ipv4\", newRPCInetSysNetIPv4Dir(ctx, msrc))\n+ d.AddChild(ctx, \"core\", newRPCInetSysNetCore(ctx, msrc))\n+\n+ return newFile(d, msrc, fs.SpecialDirectory, nil)\n+}\n+\n+// newRPCInetSysNetCore builds the /proc/sys/net/core directory.\n+func newRPCInetSysNetCore(ctx context.Context, msrc *fs.MountSource) *fs.Inode {\n+ d := &ramfs.Dir{}\n+ d.InitDir(ctx, nil, fs.RootOwner, fs.FilePermsFromMode(0555))\n+\n+ // Add all the files we want to forward over RPC for /proc/sys/net/core\n+ d.AddChild(ctx, \"default_qdisc\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/core/default_qdisc\", 0444))\n+ d.AddChild(ctx, \"message_burst\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/core/message_burst\", 0444))\n+ d.AddChild(ctx, \"message_cost\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/core/message_cost\", 0444))\n+ d.AddChild(ctx, \"optmem_max\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/core/optmem_max\", 0444))\n+ d.AddChild(ctx, \"rmem_default\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/core/rmem_default\", 0444))\n+ d.AddChild(ctx, \"rmem_max\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/core/rmem_max\", 0444))\n+ d.AddChild(ctx, \"somaxconn\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/core/somaxconn\", 0444))\n+ d.AddChild(ctx, \"wmem_default\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/core/wmem_default\", 0444))\n+ d.AddChild(ctx, \"wmem_max\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/core/wmem_max\", 0444))\n+\n+ return newFile(d, msrc, fs.SpecialDirectory, nil)\n+}\n+\n+// newRPCInetSysNetIPv4Dir builds the /proc/sys/net/ipv4 directory.\n+func newRPCInetSysNetIPv4Dir(ctx context.Context, msrc *fs.MountSource) *fs.Inode {\n+ d := &ramfs.Dir{}\n+ d.InitDir(ctx, nil, fs.RootOwner, fs.FilePermsFromMode(0555))\n+\n+ // Add all the files we want to forward over RPC for /proc/sys/net/ipv4.\n+ d.AddChild(ctx, \"ip_local_port_range\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/ip_local_port_range\", 0444))\n+ d.AddChild(ctx, \"ip_local_reserved_ports\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/ip_local_reserved_ports\", 0444))\n+ d.AddChild(ctx, \"ipfrag_time\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/ipfrag_time\", 0444))\n+ d.AddChild(ctx, \"ip_nonlocal_bind\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/ip_nonlocal_bind\", 0444))\n+ d.AddChild(ctx, \"ip_no_pmtu_disc\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/ip_no_pmtu_disc\", 0444))\n+\n+ d.AddChild(ctx, \"tcp_allowed_congestion_control\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_allowed_congestion_control\", 0444))\n+ d.AddChild(ctx, \"tcp_available_congestion_control\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_available_congestion_control\", 0444))\n+ d.AddChild(ctx, \"tcp_base_mss\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_base_mss\", 0444))\n+ d.AddChild(ctx, \"tcp_congestion_control\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_congestion_control\", 0644))\n+ d.AddChild(ctx, \"tcp_dsack\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_dsack\", 0644))\n+ d.AddChild(ctx, \"tcp_early_retrans\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_early_retrans\", 0644))\n+ d.AddChild(ctx, \"tcp_fack\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_fack\", 0644))\n+ d.AddChild(ctx, \"tcp_fastopen\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_fastopen\", 0644))\n+ d.AddChild(ctx, \"tcp_fastopen_key\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_fastopen_key\", 0444))\n+ d.AddChild(ctx, \"tcp_fin_timeout\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_fin_timeout\", 0644))\n+ d.AddChild(ctx, \"tcp_invalid_ratelimit\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_invalid_ratelimit\", 0444))\n+ d.AddChild(ctx, \"tcp_keepalive_intvl\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_keepalive_intvl\", 0644))\n+ d.AddChild(ctx, \"tcp_keepalive_probes\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_keepalive_probes\", 0644))\n+ d.AddChild(ctx, \"tcp_keepalive_time\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_keepalive_time\", 0644))\n+ d.AddChild(ctx, \"tcp_mem\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_mem\", 0444))\n+ d.AddChild(ctx, \"tcp_mtu_probing\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_mtu_probing\", 0644))\n+ d.AddChild(ctx, \"tcp_no_metrics_save\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_no_metrics_save\", 0444))\n+ d.AddChild(ctx, \"tcp_probe_interval\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_probe_interval\", 0444))\n+ d.AddChild(ctx, \"tcp_probe_threshold\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_probe_threshold\", 0444))\n+ d.AddChild(ctx, \"tcp_retries1\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_retries1\", 0644))\n+ d.AddChild(ctx, \"tcp_retries2\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_retries2\", 0644))\n+ d.AddChild(ctx, \"tcp_rfc1337\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_rfc1337\", 0444))\n+ d.AddChild(ctx, \"tcp_rmem\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_rmem\", 0444))\n+ d.AddChild(ctx, \"tcp_sack\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_sack\", 0644))\n+ d.AddChild(ctx, \"tcp_slow_start_after_idle\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_slow_start_after_idle\", 0644))\n+ d.AddChild(ctx, \"tcp_synack_retries\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_synack_retries\", 0644))\n+ d.AddChild(ctx, \"tcp_syn_retries\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_syn_retries\", 0644))\n+ d.AddChild(ctx, \"tcp_timestamps\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_timestamps\", 0644))\n+ d.AddChild(ctx, \"tcp_wmem\", newRPCProcFSFile(ctx, msrc, \"/proc/sys/net/ipv4/tcp_wmem\", 0444))\n+\n+ return newFile(d, msrc, fs.SpecialDirectory, nil)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/sys.go",
"new_path": "pkg/sentry/fs/proc/sys.go",
"diff": "@@ -23,6 +23,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/socket/rpcinet\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n)\n@@ -112,6 +113,13 @@ func (p *proc) newSysDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode {\nd.InitDir(ctx, nil, fs.RootOwner, fs.FilePermsFromMode(0555))\nd.AddChild(ctx, \"kernel\", p.newKernelDir(ctx, msrc))\nd.AddChild(ctx, \"vm\", p.newVMDir(ctx, msrc))\n+\n+ // If we're using rpcinet we will let it manage /proc/sys/net.\n+ if _, ok := p.k.NetworkStack().(*rpcinet.Stack); ok {\n+ d.AddChild(ctx, \"net\", newRPCInetProcSysNet(ctx, msrc))\n+ } else {\nd.AddChild(ctx, \"net\", p.newSysNetDir(ctx, msrc))\n+ }\n+\nreturn newFile(d, msrc, fs.SpecialDirectory, nil)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/sys_net.go",
"new_path": "pkg/sentry/fs/proc/sys_net.go",
"diff": "@@ -158,7 +158,28 @@ func (s *tcpSack) DeprecatedPwritev(ctx context.Context, src usermem.IOSequence,\nreturn n, s.s.SetTCPSACKEnabled(v != 0)\n}\n-func newSysNetIPv4Dir(ctx context.Context, msrc *fs.MountSource, s inet.Stack) *fs.Inode {\n+func (p *proc) newSysNetCore(ctx context.Context, msrc *fs.MountSource, s inet.Stack) *fs.Inode {\n+ d := &ramfs.Dir{}\n+ d.InitDir(ctx, nil, fs.RootOwner, fs.FilePermsFromMode(0555))\n+\n+ // The following files are simple stubs until they are implemented in\n+ // netstack, most of these files are configuration related. We use the\n+ // value closest to the actual netstack behavior or any empty file,\n+ // all of these files will have mode 0444 (read-only for all users).\n+ d.AddChild(ctx, \"default_qdisc\", p.newStubProcFSFile(ctx, msrc, []byte(\"pfifo_fast\")))\n+ d.AddChild(ctx, \"message_burst\", p.newStubProcFSFile(ctx, msrc, []byte(\"10\")))\n+ d.AddChild(ctx, \"message_cost\", p.newStubProcFSFile(ctx, msrc, []byte(\"5\")))\n+ d.AddChild(ctx, \"optmem_max\", p.newStubProcFSFile(ctx, msrc, []byte(\"0\")))\n+ d.AddChild(ctx, \"rmem_default\", p.newStubProcFSFile(ctx, msrc, []byte(\"212992\")))\n+ d.AddChild(ctx, \"rmem_max\", p.newStubProcFSFile(ctx, msrc, []byte(\"212992\")))\n+ d.AddChild(ctx, \"somaxconn\", p.newStubProcFSFile(ctx, msrc, []byte(\"128\")))\n+ d.AddChild(ctx, \"wmem_default\", p.newStubProcFSFile(ctx, msrc, []byte(\"212992\")))\n+ d.AddChild(ctx, \"wmem_max\", p.newStubProcFSFile(ctx, msrc, []byte(\"212992\")))\n+\n+ return newFile(d, msrc, fs.SpecialDirectory, nil)\n+}\n+\n+func (p *proc) newSysNetIPv4Dir(ctx context.Context, msrc *fs.MountSource, s inet.Stack) *fs.Inode {\nd := &ramfs.Dir{}\nd.InitDir(ctx, nil, fs.RootOwner, fs.FilePermsFromMode(0555))\n@@ -175,6 +196,46 @@ func newSysNetIPv4Dir(ctx context.Context, msrc *fs.MountSource, s inet.Stack) *\n// Add tcp_sack.\nd.AddChild(ctx, \"tcp_sack\", newTCPSackInode(ctx, msrc, s))\n+ // The following files are simple stubs until they are implemented in\n+ // netstack, most of these files are configuration related. We use the\n+ // value closest to the actual netstack behavior or any empty file,\n+ // all of these files will have mode 0444 (read-only for all users).\n+ d.AddChild(ctx, \"ip_local_port_range\", p.newStubProcFSFile(ctx, msrc, []byte(\"16000 65535\")))\n+ d.AddChild(ctx, \"ip_local_reserved_ports\", p.newStubProcFSFile(ctx, msrc, []byte(\"\")))\n+ d.AddChild(ctx, \"ipfrag_time\", p.newStubProcFSFile(ctx, msrc, []byte(\"30\")))\n+ d.AddChild(ctx, \"ip_nonlocal_bind\", p.newStubProcFSFile(ctx, msrc, []byte(\"0\")))\n+ d.AddChild(ctx, \"ip_no_pmtu_disc\", p.newStubProcFSFile(ctx, msrc, []byte(\"1\")))\n+\n+ // tcp_allowed_congestion_control tell the user what they are able to do as an\n+ // unprivledged process so we leave it empty.\n+ d.AddChild(ctx, \"tcp_allowed_congestion_control\", p.newStubProcFSFile(ctx, msrc, []byte(\"\")))\n+ d.AddChild(ctx, \"tcp_available_congestion_control\", p.newStubProcFSFile(ctx, msrc, []byte(\"reno\")))\n+ d.AddChild(ctx, \"tcp_congestion_control\", p.newStubProcFSFile(ctx, msrc, []byte(\"reno\")))\n+\n+ // Many of the following stub files are features netstack doesn't support\n+ // and are therefore \"0\" for disabled.\n+ d.AddChild(ctx, \"tcp_base_mss\", p.newStubProcFSFile(ctx, msrc, []byte(\"1280\")))\n+ d.AddChild(ctx, \"tcp_dsack\", p.newStubProcFSFile(ctx, msrc, []byte(\"0\")))\n+ d.AddChild(ctx, \"tcp_early_retrans\", p.newStubProcFSFile(ctx, msrc, []byte(\"0\")))\n+ d.AddChild(ctx, \"tcp_fack\", p.newStubProcFSFile(ctx, msrc, []byte(\"0\")))\n+ d.AddChild(ctx, \"tcp_fastopen\", p.newStubProcFSFile(ctx, msrc, []byte(\"0\")))\n+ d.AddChild(ctx, \"tcp_fastopen_key\", p.newStubProcFSFile(ctx, msrc, []byte(\"\")))\n+ d.AddChild(ctx, \"tcp_invalid_ratelimit\", p.newStubProcFSFile(ctx, msrc, []byte(\"0\")))\n+ d.AddChild(ctx, \"tcp_keepalive_intvl\", p.newStubProcFSFile(ctx, msrc, []byte(\"0\")))\n+ d.AddChild(ctx, \"tcp_keepalive_probes\", p.newStubProcFSFile(ctx, msrc, []byte(\"0\")))\n+ d.AddChild(ctx, \"tcp_keepalive_time\", p.newStubProcFSFile(ctx, msrc, []byte(\"7200\")))\n+ d.AddChild(ctx, \"tcp_mtu_probing\", p.newStubProcFSFile(ctx, msrc, []byte(\"0\")))\n+ d.AddChild(ctx, \"tcp_no_metrics_save\", p.newStubProcFSFile(ctx, msrc, []byte(\"1\")))\n+ d.AddChild(ctx, \"tcp_probe_interval\", p.newStubProcFSFile(ctx, msrc, []byte(\"0\")))\n+ d.AddChild(ctx, \"tcp_probe_threshold\", p.newStubProcFSFile(ctx, msrc, []byte(\"0\")))\n+ d.AddChild(ctx, \"tcp_retries1\", p.newStubProcFSFile(ctx, msrc, []byte(\"3\")))\n+ d.AddChild(ctx, \"tcp_retries2\", p.newStubProcFSFile(ctx, msrc, []byte(\"15\")))\n+ d.AddChild(ctx, \"tcp_rfc1337\", p.newStubProcFSFile(ctx, msrc, []byte(\"1\")))\n+ d.AddChild(ctx, \"tcp_slow_start_after_idle\", p.newStubProcFSFile(ctx, msrc, []byte(\"1\")))\n+ d.AddChild(ctx, \"tcp_synack_retries\", p.newStubProcFSFile(ctx, msrc, []byte(\"5\")))\n+ d.AddChild(ctx, \"tcp_syn_retries\", p.newStubProcFSFile(ctx, msrc, []byte(\"3\")))\n+ d.AddChild(ctx, \"tcp_timestamps\", p.newStubProcFSFile(ctx, msrc, []byte(\"1\")))\n+\nreturn newFile(d, msrc, fs.SpecialDirectory, nil)\n}\n@@ -182,7 +243,8 @@ func (p *proc) newSysNetDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode\nd := &ramfs.Dir{}\nd.InitDir(ctx, nil, fs.RootOwner, fs.FilePermsFromMode(0555))\nif s := p.k.NetworkStack(); s != nil {\n- d.AddChild(ctx, \"ipv4\", newSysNetIPv4Dir(ctx, msrc, s))\n+ d.AddChild(ctx, \"ipv4\", p.newSysNetIPv4Dir(ctx, msrc, s))\n+ d.AddChild(ctx, \"core\", p.newSysNetCore(ctx, msrc, s))\n}\nreturn newFile(d, msrc, fs.SpecialDirectory, nil)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/rpcinet/conn/conn.go",
"new_path": "pkg/sentry/socket/rpcinet/conn/conn.go",
"diff": "@@ -147,6 +147,26 @@ func (c *RPCConnection) RPCReadFile(path string) ([]byte, *syserr.Error) {\nreturn res.(*pb.ReadFileResponse_Data).Data, nil\n}\n+// RPCWriteFile will execute the WriteFile helper RPC method which avoids the\n+// common pattern of open(2), write(2), write(2), close(2) by doing all\n+// operations as a single RPC.\n+func (c *RPCConnection) RPCWriteFile(path string, data []byte) (int64, *syserr.Error) {\n+ req := &pb.SyscallRequest_WriteFile{&pb.WriteFileRequest{\n+ Path: path,\n+ Content: data,\n+ }}\n+\n+ id, ch := c.NewRequest(pb.SyscallRequest{Args: req}, false /* ignoreResult */)\n+ <-ch\n+\n+ res := c.Request(id).Result.(*pb.SyscallResponse_WriteFile).WriteFile\n+ if e := res.ErrorNumber; e != 0 {\n+ return int64(res.Written), syserr.FromHost(syscall.Errno(e))\n+ }\n+\n+ return int64(res.Written), nil\n+}\n+\n// Request retrieves the request corresponding to the given request ID.\n//\n// The channel returned by NewRequest must have been closed before Request can\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/rpcinet/stack.go",
"new_path": "pkg/sentry/socket/rpcinet/stack.go",
"diff": "@@ -16,50 +16,24 @@ package rpcinet\nimport (\n\"fmt\"\n- \"strings\"\n\"syscall\"\n- \"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/inet\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/socket/hostinet\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/socket/rpcinet/conn\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/socket/rpcinet/notifier\"\n- \"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n- \"gvisor.googlesource.com/gvisor/pkg/syserror\"\n+ \"gvisor.googlesource.com/gvisor/pkg/syserr\"\n\"gvisor.googlesource.com/gvisor/pkg/unet\"\n)\n// Stack implements inet.Stack for RPC backed sockets.\ntype Stack struct {\n- // We intentionally do not allow these values to be changed to remain\n- // consistent with the other networking stacks.\ninterfaces map[int32]inet.Interface\ninterfaceAddrs map[int32][]inet.InterfaceAddr\n- supportsIPv6 bool\n- tcpRecvBufSize inet.TCPBufferSize\n- tcpSendBufSize inet.TCPBufferSize\n- tcpSACKEnabled bool\nrpcConn *conn.RPCConnection\nnotifier *notifier.Notifier\n}\n-func readTCPBufferSizeFile(conn *conn.RPCConnection, filename string) (inet.TCPBufferSize, error) {\n- contents, se := conn.RPCReadFile(filename)\n- if se != nil {\n- return inet.TCPBufferSize{}, fmt.Errorf(\"failed to read %s: %v\", filename, se)\n- }\n- ioseq := usermem.BytesIOSequence(contents)\n- fields := make([]int32, 3)\n- if n, err := usermem.CopyInt32StringsInVec(context.Background(), ioseq.IO, ioseq.Addrs, fields, ioseq.Opts); n != ioseq.NumBytes() || err != nil {\n- return inet.TCPBufferSize{}, fmt.Errorf(\"failed to parse %s (%q): got %v after %d/%d bytes\", filename, contents, err, n, ioseq.NumBytes())\n- }\n- return inet.TCPBufferSize{\n- Min: int(fields[0]),\n- Default: int(fields[1]),\n- Max: int(fields[2]),\n- }, nil\n-}\n-\n// NewStack returns a Stack containing the current state of the host network\n// stack.\nfunc NewStack(fd int32) (*Stack, error) {\n@@ -80,31 +54,6 @@ func NewStack(fd int32) (*Stack, error) {\nreturn nil, e\n}\n- // Load the configuration values from procfs.\n- tcpRMem, e := readTCPBufferSizeFile(stack.rpcConn, \"/proc/sys/net/ipv4/tcp_rmem\")\n- if e != nil {\n- return nil, e\n- }\n- stack.tcpRecvBufSize = tcpRMem\n-\n- tcpWMem, e := readTCPBufferSizeFile(stack.rpcConn, \"/proc/sys/net/ipv4/tcp_wmem\")\n- if e != nil {\n- return nil, e\n- }\n- stack.tcpSendBufSize = tcpWMem\n-\n- ipv6, se := stack.rpcConn.RPCReadFile(\"/proc/net/if_inet6\")\n- if len(string(ipv6)) > 0 {\n- stack.supportsIPv6 = true\n- }\n-\n- sackFile := \"/proc/sys/net/ipv4/tcp_sack\"\n- sack, se := stack.rpcConn.RPCReadFile(sackFile)\n- if se != nil {\n- return nil, fmt.Errorf(\"failed to read %s: %v\", sackFile, se)\n- }\n- stack.tcpSACKEnabled = strings.TrimSpace(string(sack)) != \"0\"\n-\nlinks, err := stack.DoNetlinkRouteRequest(syscall.RTM_GETLINK)\nif err != nil {\nreturn nil, fmt.Errorf(\"RTM_GETLINK failed: %v\", err)\n@@ -123,6 +72,21 @@ func NewStack(fd int32) (*Stack, error) {\nreturn stack, nil\n}\n+// RPCReadFile will execute the ReadFile helper RPC method which avoids the\n+// common pattern of open(2), read(2), close(2) by doing all three operations\n+// as a single RPC. It will read the entire file or return EFBIG if the file\n+// was too large.\n+func (s *Stack) RPCReadFile(path string) ([]byte, *syserr.Error) {\n+ return s.rpcConn.RPCReadFile(path)\n+}\n+\n+// RPCWriteFile will execute the WriteFile helper RPC method which avoids the\n+// common pattern of open(2), write(2), write(2), close(2) by doing all\n+// operations as a single RPC.\n+func (s *Stack) RPCWriteFile(path string, data []byte) (int64, *syserr.Error) {\n+ return s.rpcConn.RPCWriteFile(path, data)\n+}\n+\n// Interfaces implements inet.Stack.Interfaces.\nfunc (s *Stack) Interfaces() map[int32]inet.Interface {\nreturn s.interfaces\n@@ -135,41 +99,37 @@ func (s *Stack) InterfaceAddrs() map[int32][]inet.InterfaceAddr {\n// SupportsIPv6 implements inet.Stack.SupportsIPv6.\nfunc (s *Stack) SupportsIPv6() bool {\n- return s.supportsIPv6\n+ panic(\"rpcinet handles procfs directly this method should not be called\")\n}\n// TCPReceiveBufferSize implements inet.Stack.TCPReceiveBufferSize.\nfunc (s *Stack) TCPReceiveBufferSize() (inet.TCPBufferSize, error) {\n- return s.tcpRecvBufSize, nil\n+ panic(\"rpcinet handles procfs directly this method should not be called\")\n}\n// SetTCPReceiveBufferSize implements inet.Stack.SetTCPReceiveBufferSize.\nfunc (s *Stack) SetTCPReceiveBufferSize(size inet.TCPBufferSize) error {\n- // To keep all the supported stacks consistent we don't allow changing this\n- // value even though it would be possible via an RPC.\n- return syserror.EACCES\n+ panic(\"rpcinet handles procfs directly this method should not be called\")\n+\n}\n// TCPSendBufferSize implements inet.Stack.TCPSendBufferSize.\nfunc (s *Stack) TCPSendBufferSize() (inet.TCPBufferSize, error) {\n- return s.tcpSendBufSize, nil\n+ panic(\"rpcinet handles procfs directly this method should not be called\")\n+\n}\n// SetTCPSendBufferSize implements inet.Stack.SetTCPSendBufferSize.\nfunc (s *Stack) SetTCPSendBufferSize(size inet.TCPBufferSize) error {\n- // To keep all the supported stacks consistent we don't allow changing this\n- // value even though it would be possible via an RPC.\n- return syserror.EACCES\n+ panic(\"rpcinet handles procfs directly this method should not be called\")\n}\n// TCPSACKEnabled implements inet.Stack.TCPSACKEnabled.\nfunc (s *Stack) TCPSACKEnabled() (bool, error) {\n- return s.tcpSACKEnabled, nil\n+ panic(\"rpcinet handles procfs directly this method should not be called\")\n}\n// SetTCPSACKEnabled implements inet.Stack.SetTCPSACKEnabled.\nfunc (s *Stack) SetTCPSACKEnabled(enabled bool) error {\n- // To keep all the supported stacks consistent we don't allow changing this\n- // value even though it would be possible via an RPC.\n- return syserror.EACCES\n+ panic(\"rpcinet handles procfs directly this method should not be called\")\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add support for rpcinet owned procfs files.
This change will add support for /proc/sys/net and /proc/net which will
be managed and owned by rpcinet. This will allow these inodes to be forward
as rpcs.
PiperOrigin-RevId: 199370799
Change-Id: I2c876005d98fe55dd126145163bee5a645458ce4 |
260,013 | 06.06.2018 15:52:29 | 25,200 | 79fef54eb1b9e941e2c910f90b65f3cfe94e18c4 | Add support for rpcinet ioctl(2).
This change will add support for ioctls that have previously
been supported by netstack.
LINE_LENGTH_IGNORE | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/rpcinet/socket.go",
"new_path": "pkg/sentry/socket/rpcinet/socket.go",
"diff": "@@ -56,6 +56,10 @@ type socketOperations struct {\n// Verify that we actually implement socket.Socket.\nvar _ = socket.Socket(&socketOperations{})\n+const (\n+ sizeOfIfReq = 40\n+)\n+\n// New creates a new RPC socket.\nfunc newSocketFile(ctx context.Context, stack *Stack, family int, skType int, protocol int) (*fs.File, *syserr.Error) {\nid, c := stack.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_Socket{&pb.SocketRequest{Family: int64(family), Type: int64(skType | syscall.SOCK_NONBLOCK), Protocol: int64(protocol)}}}, false /* ignoreResult */)\n@@ -290,9 +294,13 @@ func (s *socketOperations) Accept(t *kernel.Task, peerRequested bool, flags int,\nreturn 0, nil, 0, syserr.FromError(err)\n}\n+ if peerRequested {\nreturn fd, payload.Address.Address, payload.Address.Length, nil\n}\n+ return fd, nil, 0, nil\n+}\n+\n// Bind implements socket.Socket.Bind.\nfunc (s *socketOperations) Bind(t *kernel.Task, sockaddr []byte) *syserr.Error {\nstack := t.NetworkContext().(*Stack)\n@@ -385,11 +393,62 @@ func (s *socketOperations) GetSockName(t *kernel.Task) (interface{}, uint32, *sy\nreturn addr.Address, addr.Length, nil\n}\n+func rpcIoctl(t *kernel.Task, fd, cmd uint32, arg []byte) ([]byte, error) {\n+ stack := t.NetworkContext().(*Stack)\n+\n+ id, c := stack.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_Ioctl{&pb.IOCtlRequest{Fd: fd, Cmd: cmd, Arg: arg}}}, false /* ignoreResult */)\n+ <-c\n+\n+ res := stack.rpcConn.Request(id).Result.(*pb.SyscallResponse_Ioctl).Ioctl.Result\n+ if e, ok := res.(*pb.IOCtlResponse_ErrorNumber); ok {\n+ return nil, syscall.Errno(e.ErrorNumber)\n+ }\n+\n+ return res.(*pb.IOCtlResponse_Value).Value, nil\n+}\n+\n// Ioctl implements fs.FileOperations.Ioctl.\nfunc (s *socketOperations) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\n+ t := ctx.(*kernel.Task)\n+\n+ cmd := uint32(args[1].Int())\n+ arg := args[2].Pointer()\n+\n+ var buf []byte\n+ switch cmd {\n+ // The following ioctls take 4 byte argument parameters.\n+ case syscall.TIOCINQ, syscall.TIOCOUTQ:\n+ buf = make([]byte, 4)\n+ // The following ioctls have args which are sizeof(struct ifreq).\n+ case syscall.SIOCGIFINDEX, syscall.SIOCGIFNETMASK, syscall.SIOCGIFHWADDR, syscall.SIOCGIFNAME, syscall.SIOCGIFFLAGS:\n+ buf = make([]byte, sizeOfIfReq)\n+ default:\nreturn 0, syserror.ENOTTY\n}\n+ _, err := io.CopyIn(ctx, arg, buf, usermem.IOOpts{\n+ AddressSpaceActive: true,\n+ })\n+\n+ if err != nil {\n+ return 0, err\n+ }\n+\n+ v, err := rpcIoctl(t, s.fd, cmd, buf)\n+ if err != nil {\n+ return 0, err\n+ }\n+\n+ if len(v) != len(buf) {\n+ return 0, syserror.EINVAL\n+ }\n+\n+ _, err = io.CopyOut(ctx, arg, v, usermem.IOOpts{\n+ AddressSpaceActive: true,\n+ })\n+ return 0, err\n+}\n+\nfunc rpcRecvMsg(t *kernel.Task, req *pb.SyscallRequest_Recvmsg) (*pb.RecvmsgResponse_ResultPayload, *syserr.Error) {\ns := t.NetworkContext().(*Stack)\nid, c := s.rpcConn.NewRequest(pb.SyscallRequest{Args: req}, false /* ignoreResult */)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/rpcinet/syscall_rpc.proto",
"new_path": "pkg/sentry/socket/rpcinet/syscall_rpc.proto",
"diff": "@@ -8,7 +8,7 @@ package syscall_rpc;\nmessage SendmsgRequest {\nuint32 fd = 1;\n- bytes data = 2;\n+ bytes data = 2 [ctype = CORD];\nbytes address = 3;\nbool more = 4;\nbool end_of_record = 5;\n@@ -24,13 +24,13 @@ message SendmsgResponse {\nmessage IOCtlRequest {\nuint32 fd = 1;\nuint32 cmd = 2;\n- uint64 arg = 3;\n+ bytes arg = 3;\n}\nmessage IOCtlResponse {\noneof result {\nuint32 error_number = 1;\n- uint64 value = 2;\n+ bytes value = 2;\n}\n}\n@@ -63,7 +63,7 @@ message ReadRequest {\nmessage ReadResponse {\noneof result {\nuint32 error_number = 1;\n- bytes data = 2;\n+ bytes data = 2 [ctype = CORD];\n}\n}\n@@ -74,13 +74,13 @@ message ReadFileRequest {\nmessage ReadFileResponse {\noneof result {\nuint32 error_number = 1;\n- bytes data = 2;\n+ bytes data = 2 [ctype = CORD];\n}\n}\nmessage WriteRequest {\nuint32 fd = 1;\n- bytes data = 2;\n+ bytes data = 2 [ctype = CORD];\n}\nmessage WriteResponse {\n@@ -107,7 +107,7 @@ message AddressResponse {\nmessage RecvmsgResponse {\nmessage ResultPayload {\n- bytes data = 1;\n+ bytes data = 1 [ctype = CORD];\nAddressResponse address = 2;\nuint32 length = 3;\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add support for rpcinet ioctl(2).
This change will add support for ioctls that have previously
been supported by netstack.
LINE_LENGTH_IGNORE
PiperOrigin-RevId: 199544114
Change-Id: I3769202c19502c3b7d05e06ea9552acfd9255893 |
259,891 | 06.06.2018 16:12:58 | 25,200 | 206e90d057211f2ac53174907b2ff04801f9a481 | runsc: Support abbreviated container IDs.
Just a UI/usability addition. It's a lot easier to type "60" than
"60185c721d7e10c00489f1fa210ee0d35c594873d6376b457fb1815e4fdbfc2c". | [
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/delete_test.go",
"new_path": "runsc/cmd/delete_test.go",
"diff": "@@ -31,11 +31,11 @@ func TestNotFound(t *testing.T) {\nd := Delete{}\nif err := d.execute(ids, conf); err == nil {\n- t.Error(\"Deleting non-existend container should have failed\")\n+ t.Error(\"Deleting non-existent container should have failed\")\n}\nd = Delete{force: true}\nif err := d.execute(ids, conf); err != nil {\n- t.Errorf(\"Deleting non-existend container with --force should NOT have failed: %v\", err)\n+ t.Errorf(\"Deleting non-existent container with --force should NOT have failed: %v\", err)\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -23,6 +23,7 @@ import (\n\"path/filepath\"\n\"regexp\"\n\"strconv\"\n+ \"strings\"\n\"syscall\"\n\"time\"\n@@ -92,14 +93,22 @@ type Container struct {\nSandbox *sandbox.Sandbox `json:\"sandbox\"`\n}\n-// Load loads a container with the given id from a metadata file.\n+// Load loads a container with the given id from a metadata file. id may be an\n+// abbreviation of the full container id, in which case Load loads the\n+// container to which id unambiguously refers to.\n// Returns ErrNotExist if container doesn't exits.\nfunc Load(rootDir, id string) (*Container, error) {\nlog.Debugf(\"Load container %q %q\", rootDir, id)\nif err := validateID(id); err != nil {\nreturn nil, err\n}\n- metaFile := filepath.Join(rootDir, id, metadataFilename)\n+\n+ cRoot, err := findContainerRoot(rootDir, id)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ metaFile := filepath.Join(cRoot, metadataFilename)\nmetaBytes, err := ioutil.ReadFile(metaFile)\nif err != nil {\nif os.IsNotExist(err) {\n@@ -133,6 +142,36 @@ func Load(rootDir, id string) (*Container, error) {\nreturn &c, nil\n}\n+func findContainerRoot(rootDir, partialID string) (string, error) {\n+ // Check whether the id fully specifies an existing container.\n+ cRoot := filepath.Join(rootDir, partialID)\n+ if _, err := os.Stat(cRoot); err == nil {\n+ return cRoot, nil\n+ }\n+\n+ // Now see whether id could be an abbreviation of exactly 1 of the\n+ // container ids. If id is ambigious (it could match more than 1\n+ // container), it is an error.\n+ cRoot = \"\"\n+ ids, err := List(rootDir)\n+ if err != nil {\n+ return \"\", err\n+ }\n+ for _, id := range ids {\n+ if strings.HasPrefix(id, partialID) {\n+ if cRoot != \"\" {\n+ return \"\", fmt.Errorf(\"id %q is ambiguous and could refer to multiple containers: %q, %q\", partialID, cRoot, id)\n+ }\n+ cRoot = id\n+ }\n+ }\n+ if cRoot == \"\" {\n+ return \"\", os.ErrNotExist\n+ }\n+ log.Debugf(\"abbreviated id %q resolves to full id %q\", partialID, cRoot)\n+ return filepath.Join(rootDir, cRoot), nil\n+}\n+\n// List returns all container ids in the given root directory.\nfunc List(rootDir string) ([]string, error) {\nlog.Debugf(\"List containers %q\", rootDir)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -634,7 +634,7 @@ func TestRunNonRoot(t *testing.T) {\n}\n}\n-// TestMountNewDir check that runsc will create destination directory if it\n+// TestMountNewDir checks that runsc will create destination directory if it\n// doesn't exit.\nfunc TestMountNewDir(t *testing.T) {\nsrcDir := path.Join(os.TempDir(), \"src\", \"newdir\", \"anotherdir\")\n@@ -660,3 +660,60 @@ func TestMountNewDir(t *testing.T) {\nt.Fatalf(\"error running sadbox: %v\", err)\n}\n}\n+\n+// TestAbbreviatedIDs checks that runsc supports using abbreviated container\n+// IDs in place of full IDs.\n+func TestAbbreviatedIDs(t *testing.T) {\n+ cids := []string{\n+ \"foo-\" + testutil.UniqueContainerID(),\n+ \"bar-\" + testutil.UniqueContainerID(),\n+ \"baz-\" + testutil.UniqueContainerID(),\n+ }\n+\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ for _, cid := range cids {\n+ spec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n+ bundleDir, conf, err := testutil.SetupContainerInRoot(rootDir, spec)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+ defer os.RemoveAll(bundleDir)\n+\n+ // Create and start the container.\n+ cont, err := container.Create(cid, spec, conf, bundleDir, \"\", \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer cont.Destroy()\n+ }\n+\n+ // These should all be unambigious.\n+ unambiguous := map[string]string{\n+ \"f\": cids[0],\n+ cids[0]: cids[0],\n+ \"bar\": cids[1],\n+ cids[1]: cids[1],\n+ \"baz\": cids[2],\n+ cids[2]: cids[2],\n+ }\n+ for shortid, longid := range unambiguous {\n+ if _, err := container.Load(rootDir, shortid); err != nil {\n+ t.Errorf(\"%q should resolve to %q: %v\", shortid, longid, err)\n+ }\n+ }\n+\n+ // These should be ambiguous.\n+ ambiguous := []string{\n+ \"b\",\n+ \"ba\",\n+ }\n+ for _, shortid := range ambiguous {\n+ if s, err := container.Load(rootDir, shortid); err == nil {\n+ t.Errorf(\"%q should be ambiguous, but resolved to %q\", shortid, s.ID)\n+ }\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/testutil.go",
"new_path": "runsc/test/testutil/testutil.go",
"diff": "@@ -84,21 +84,36 @@ func NewSpecWithArgs(args ...string) *specs.Spec {\nreturn spec\n}\n+// SetupRootDir creates a root directory for containers.\n+func SetupRootDir() (string, error) {\n+ rootDir, err := ioutil.TempDir(\"\", \"containers\")\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"error creating root dir: %v\", err)\n+ }\n+ return rootDir, nil\n+}\n+\n// SetupContainer creates a bundle and root dir for the container, generates a\n// test config, and writes the spec to config.json in the bundle dir.\nfunc SetupContainer(spec *specs.Spec) (rootDir, bundleDir string, conf *boot.Config, err error) {\n- rootDir, err = ioutil.TempDir(\"\", \"containers\")\n+ rootDir, err = SetupRootDir()\nif err != nil {\n- return \"\", \"\", nil, fmt.Errorf(\"error creating root dir: %v\", err)\n+ return \"\", \"\", nil, err\n+ }\n+ bundleDir, conf, err = SetupContainerInRoot(rootDir, spec)\n+ return rootDir, bundleDir, conf, err\n}\n+// SetupContainerInRoot creates a bundle for the container, generates a test\n+// config, and writes the spec to config.json in the bundle dir.\n+func SetupContainerInRoot(rootDir string, spec *specs.Spec) (bundleDir string, conf *boot.Config, err error) {\nbundleDir, err = ioutil.TempDir(\"\", \"bundle\")\nif err != nil {\n- return \"\", \"\", nil, fmt.Errorf(\"error creating bundle dir: %v\", err)\n+ return \"\", nil, fmt.Errorf(\"error creating bundle dir: %v\", err)\n}\nif err = writeSpec(bundleDir, spec); err != nil {\n- return \"\", \"\", nil, fmt.Errorf(\"error writing spec: %v\", err)\n+ return \"\", nil, fmt.Errorf(\"error writing spec: %v\", err)\n}\nconf = &boot.Config{\n@@ -110,7 +125,7 @@ func SetupContainer(spec *specs.Spec) (rootDir, bundleDir string, conf *boot.Con\nTestModeNoFlags: true,\n}\n- return rootDir, bundleDir, conf, nil\n+ return bundleDir, conf, nil\n}\n// writeSpec writes the spec to disk in the given directory.\n"
}
] | Go | Apache License 2.0 | google/gvisor | runsc: Support abbreviated container IDs.
Just a UI/usability addition. It's a lot easier to type "60" than
"60185c721d7e10c00489f1fa210ee0d35c594873d6376b457fb1815e4fdbfc2c".
PiperOrigin-RevId: 199547932
Change-Id: I19011b5061a88aba48a9ad7f8cf954a6782de854 |
260,013 | 07.06.2018 11:36:26 | 25,200 | 7e9893eeb500ab56dcab80471300df50c12288ae | Add missing rpcinet ioctls. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/rpcinet/socket.go",
"new_path": "pkg/sentry/socket/rpcinet/socket.go",
"diff": "@@ -417,10 +417,22 @@ func (s *socketOperations) Ioctl(ctx context.Context, io usermem.IO, args arch.S\nvar buf []byte\nswitch cmd {\n// The following ioctls take 4 byte argument parameters.\n- case syscall.TIOCINQ, syscall.TIOCOUTQ:\n+ case syscall.TIOCINQ,\n+ syscall.TIOCOUTQ:\nbuf = make([]byte, 4)\n// The following ioctls have args which are sizeof(struct ifreq).\n- case syscall.SIOCGIFINDEX, syscall.SIOCGIFNETMASK, syscall.SIOCGIFHWADDR, syscall.SIOCGIFNAME, syscall.SIOCGIFFLAGS:\n+ case syscall.SIOCGIFADDR,\n+ syscall.SIOCGIFBRDADDR,\n+ syscall.SIOCGIFDSTADDR,\n+ syscall.SIOCGIFFLAGS,\n+ syscall.SIOCGIFHWADDR,\n+ syscall.SIOCGIFINDEX,\n+ syscall.SIOCGIFMAP,\n+ syscall.SIOCGIFMETRIC,\n+ syscall.SIOCGIFMTU,\n+ syscall.SIOCGIFNAME,\n+ syscall.SIOCGIFNETMASK,\n+ syscall.SIOCGIFTXQLEN:\nbuf = make([]byte, sizeOfIfReq)\ndefault:\nreturn 0, syserror.ENOTTY\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add missing rpcinet ioctls.
PiperOrigin-RevId: 199669120
Change-Id: I0be88cdbba29760f967e9a5bb4144ca62c1ed7aa |
260,013 | 07.06.2018 15:09:27 | 25,200 | 5c37097e34a513845d77bb8b7240f0074aa1c1e9 | rpcinet should not block in read(2) rpcs. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/rpcinet/socket.go",
"new_path": "pkg/sentry/socket/rpcinet/socket.go",
"diff": "@@ -145,31 +145,10 @@ func (s *socketOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOS\nn, e := dst.CopyOut(ctx, res.Data)\nreturn int64(n), e\n}\n- if se != syserr.ErrWouldBlock {\n- return 0, se.ToError()\n- }\n- // We'll have to block. Register for notifications and read again when ready.\n- e, ch := waiter.NewChannelEntry(nil)\n- s.EventRegister(&e, waiter.EventIn)\n- defer s.EventUnregister(&e)\n-\n- for {\n- res, se := rpcRead(ctx.(*kernel.Task), req)\n- if se == nil {\n- n, e := dst.CopyOut(ctx, res.Data)\n- return int64(n), e\n- }\n- if se != syserr.ErrWouldBlock {\nreturn 0, se.ToError()\n}\n- if err := ctx.(*kernel.Task).Block(ch); err != nil {\n- return 0, err\n- }\n- }\n-}\n-\nfunc rpcWrite(t *kernel.Task, req *pb.SyscallRequest_Write) (uint32, *syserr.Error) {\ns := t.NetworkContext().(*Stack)\nid, c := s.rpcConn.NewRequest(pb.SyscallRequest{Args: req}, false /* ignoreResult */)\n"
}
] | Go | Apache License 2.0 | google/gvisor | rpcinet should not block in read(2) rpcs.
PiperOrigin-RevId: 199703609
Change-Id: I8153b0396b22a230a68d4b69c46652a5545f7630 |
259,992 | 08.06.2018 09:58:29 | 25,200 | 5c51bc51e43a0f1d1f06ae490b0d352d1b483766 | Drop capabilities not needed by Gofer | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/BUILD",
"new_path": "runsc/boot/BUILD",
"diff": "@@ -5,7 +5,6 @@ load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\", \"go_test\")\ngo_library(\nname = \"boot\",\nsrcs = [\n- \"capability.go\",\n\"config.go\",\n\"controller.go\",\n\"events.go\",\n@@ -72,7 +71,6 @@ go_library(\n\"//runsc/boot/filter\",\n\"//runsc/specutils\",\n\"@com_github_opencontainers_runtime-spec//specs-go:go_default_library\",\n- \"@com_github_syndtr_gocapability//capability:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/BUILD",
"new_path": "runsc/cmd/BUILD",
"diff": "@@ -6,6 +6,7 @@ go_library(\nname = \"cmd\",\nsrcs = [\n\"boot.go\",\n+ \"capability.go\",\n\"checkpoint.go\",\n\"cmd.go\",\n\"create.go\",\n@@ -39,6 +40,7 @@ go_library(\n\"//runsc/specutils\",\n\"@com_github_google_subcommands//:go_default_library\",\n\"@com_github_opencontainers_runtime-spec//specs-go:go_default_library\",\n+ \"@com_github_syndtr_gocapability//capability:go_default_library\",\n\"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/boot.go",
"new_path": "runsc/cmd/boot.go",
"diff": "@@ -16,7 +16,6 @@ package cmd\nimport (\n\"os\"\n- \"runtime\"\n\"runtime/debug\"\n\"strings\"\n\"syscall\"\n@@ -24,7 +23,6 @@ import (\n\"context\"\n\"flag\"\n\"github.com/google/subcommands\"\n- specs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n\"gvisor.googlesource.com/gvisor/runsc/boot\"\n\"gvisor.googlesource.com/gvisor/runsc/specutils\"\n@@ -106,8 +104,26 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nwaitStatus := args[1].(*syscall.WaitStatus)\nif b.applyCaps {\n- setCapsAndCallSelf(conf, spec)\n- Fatalf(\"setCapsAndCallSelf must never return\")\n+ caps := spec.Process.Capabilities\n+ if conf.Platform == boot.PlatformPtrace {\n+ // Ptrace platform requires extra capabilities.\n+ const c = \"CAP_SYS_PTRACE\"\n+ caps.Bounding = append(caps.Bounding, c)\n+ caps.Effective = append(caps.Effective, c)\n+ caps.Permitted = append(caps.Permitted, c)\n+ }\n+\n+ // Remove --apply-caps arg to call myself.\n+ var args []string\n+ for _, arg := range os.Args {\n+ if !strings.Contains(arg, \"apply-caps\") {\n+ args = append(args, arg)\n+ }\n+ }\n+ if err := setCapsAndCallSelf(spec, args, caps); err != nil {\n+ Fatalf(\"%v\", err)\n+ }\n+ panic(\"setCapsAndCallSelf must never return success\")\n}\n// Create the loader.\n@@ -130,32 +146,3 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n*waitStatus = syscall.WaitStatus(ws.Status())\nreturn subcommands.ExitSuccess\n}\n-\n-// setCapsAndCallSelf sets capabilities to the current thread and then execve's\n-// itself again with the same arguments except '--apply-caps' to restart the\n-// whole process with the desired capabilities.\n-func setCapsAndCallSelf(conf *boot.Config, spec *specs.Spec) {\n- // Keep thread locked while capabilities are changed.\n- runtime.LockOSThread()\n- defer runtime.UnlockOSThread()\n-\n- if err := boot.ApplyCaps(conf, spec.Process.Capabilities); err != nil {\n- Fatalf(\"ApplyCaps, err: %v\", err)\n- }\n- binPath, err := specutils.BinPath()\n- if err != nil {\n- Fatalf(\"%v\", err)\n- }\n-\n- // Remove --apply-caps arg to call myself.\n- var args []string\n- for _, arg := range os.Args {\n- if !strings.Contains(arg, \"apply-caps\") {\n- args = append(args, arg)\n- }\n- }\n-\n- log.Infof(\"Execve 'boot' again, bye!\")\n- log.Infof(\"%s %v\", binPath, args)\n- syscall.Exec(binPath, args, []string{})\n-}\n"
},
{
"change_type": "RENAME",
"old_path": "runsc/boot/capability.go",
"new_path": "runsc/cmd/capability.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-package boot\n+package cmd\nimport (\n\"fmt\"\n@@ -20,51 +20,72 @@ import (\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"github.com/syndtr/gocapability/capability\"\n+ \"gvisor.googlesource.com/gvisor/pkg/log\"\n)\n-// ApplyCaps applies the capabilities in the spec to the current thread.\n+// applyCaps applies the capabilities in the spec to the current thread.\n//\n// Note that it must be called with current thread locked.\n-func ApplyCaps(conf *Config, caps *specs.LinuxCapabilities) error {\n+func applyCaps(caps *specs.LinuxCapabilities) error {\nsetter, err := capability.NewPid2(os.Getpid())\nif err != nil {\nreturn err\n}\n+ if err := setter.Load(); err != nil {\n+ return err\n+ }\n- bounding, err := capsFromNames(caps.Bounding)\n+ bounding, err := trimCaps(caps.Bounding, setter)\nif err != nil {\nreturn err\n}\n- effective, err := capsFromNames(caps.Effective)\n+ setter.Set(capability.BOUNDS, bounding...)\n+\n+ effective, err := trimCaps(caps.Effective, setter)\nif err != nil {\nreturn err\n}\n- permitted, err := capsFromNames(caps.Permitted)\n+ setter.Set(capability.EFFECTIVE, effective...)\n+\n+ permitted, err := trimCaps(caps.Permitted, setter)\nif err != nil {\nreturn err\n}\n- inheritable, err := capsFromNames(caps.Inheritable)\n+ setter.Set(capability.PERMITTED, permitted...)\n+\n+ inheritable, err := trimCaps(caps.Inheritable, setter)\nif err != nil {\nreturn err\n}\n- ambient, err := capsFromNames(caps.Ambient)\n+ setter.Set(capability.INHERITABLE, inheritable...)\n+\n+ ambient, err := trimCaps(caps.Ambient, setter)\nif err != nil {\nreturn err\n}\n+ setter.Set(capability.AMBIENT, ambient...)\n- // Ptrace platform requires extra capabilities.\n- if conf.Platform == PlatformPtrace {\n- bounding = append(bounding, capability.CAP_SYS_PTRACE)\n- effective = append(effective, capability.CAP_SYS_PTRACE)\n- permitted = append(permitted, capability.CAP_SYS_PTRACE)\n+ return setter.Apply(capability.CAPS | capability.BOUNDS | capability.AMBS)\n}\n- setter.Set(capability.BOUNDS, bounding...)\n- setter.Set(capability.PERMITTED, permitted...)\n- setter.Set(capability.INHERITABLE, inheritable...)\n- setter.Set(capability.EFFECTIVE, effective...)\n- setter.Set(capability.AMBIENT, ambient...)\n- return setter.Apply(capability.CAPS | capability.BOUNDS | capability.AMBS)\n+func trimCaps(names []string, setter capability.Capabilities) ([]capability.Cap, error) {\n+ wantedCaps, err := capsFromNames(names)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ // Trim down capabilities that aren't possible to acquire.\n+ var caps []capability.Cap\n+ for _, c := range wantedCaps {\n+ // Capability rules are more complicated than this, but this catches most\n+ // problems with tests running with non-priviledged user.\n+ if setter.Get(capability.PERMITTED, c) {\n+ caps = append(caps, c)\n+ } else {\n+ log.Warningf(\"Capability %q is not permitted, dropping it.\", c)\n+ }\n+ }\n+ return caps, nil\n}\nfunc capsFromNames(names []string) ([]capability.Cap, error) {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/cmd.go",
"new_path": "runsc/cmd/cmd.go",
"diff": "@@ -18,9 +18,13 @@ package cmd\nimport (\n\"fmt\"\n\"os\"\n+ \"runtime\"\n\"strconv\"\n+ \"syscall\"\n+ specs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n+ \"gvisor.googlesource.com/gvisor/runsc/specutils\"\n)\n// Fatalf logs to stderr and exits with a failure status code.\n@@ -64,3 +68,25 @@ func (i *intFlags) Set(s string) error {\n*i = append(*i, fd)\nreturn nil\n}\n+\n+// setCapsAndCallSelf sets capabilities to the current thread and then execve's\n+// itself again with the arguments specified in 'args' to restart the process\n+// with the desired capabilities.\n+func setCapsAndCallSelf(spec *specs.Spec, args []string, caps *specs.LinuxCapabilities) error {\n+ // Keep thread locked while capabilities are changed.\n+ runtime.LockOSThread()\n+ defer runtime.UnlockOSThread()\n+\n+ if err := applyCaps(caps); err != nil {\n+ return fmt.Errorf(\"applyCaps() failed: %v\", err)\n+ }\n+ binPath, err := specutils.BinPath()\n+ if err != nil {\n+ return err\n+ }\n+\n+ log.Infof(\"Capabilities applied: %+v\", caps)\n+ log.Infof(\"Execve %q again, bye!\", binPath)\n+ syscall.Exec(binPath, args, []string{})\n+ panic(\"unreachable\")\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/gofer.go",
"new_path": "runsc/cmd/gofer.go",
"diff": "package cmd\nimport (\n+ \"os\"\n\"sync\"\n\"context\"\n\"flag\"\n\"github.com/google/subcommands\"\n+ specs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n\"gvisor.googlesource.com/gvisor/pkg/p9\"\n\"gvisor.googlesource.com/gvisor/pkg/unet\"\n@@ -32,6 +34,7 @@ import (\ntype Gofer struct {\nbundleDir string\nioFDs intFlags\n+ applyCaps bool\n}\n// Name implements subcommands.Command.\n@@ -53,6 +56,7 @@ func (*Gofer) Usage() string {\nfunc (g *Gofer) SetFlags(f *flag.FlagSet) {\nf.StringVar(&g.bundleDir, \"bundle\", \"\", \"path to the root of the bundle directory, defaults to the current directory\")\nf.Var(&g.ioFDs, \"io-fds\", \"list of FDs to connect 9P servers. They must follow this order: root first, then mounts as defined in the spec\")\n+ f.BoolVar(&g.applyCaps, \"apply-caps\", true, \"if true, apply capabilities to restrict what the Gofer process can do\")\n}\n// Execute implements subcommands.Command.\n@@ -66,6 +70,32 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nif err != nil {\nFatalf(\"error reading spec: %v\", err)\n}\n+\n+ if g.applyCaps {\n+ // Minimal set of capabilities needed by the Gofer to operate on files.\n+ caps := []string{\n+ \"CAP_CHOWN\",\n+ \"CAP_DAC_OVERRIDE\",\n+ \"CAP_DAC_READ_SEARCH\",\n+ \"CAP_FOWNER\",\n+ \"CAP_FSETID\",\n+ }\n+ lc := &specs.LinuxCapabilities{\n+ Bounding: caps,\n+ Effective: caps,\n+ Permitted: caps,\n+ }\n+\n+ // Disable caps when calling myself again.\n+ // Note: minimal argument handling for the default case to keep it simple.\n+ args := os.Args\n+ args = append(args, \"--apply-caps=false\")\n+ if err := setCapsAndCallSelf(spec, args, lc); err != nil {\n+ Fatalf(\"Unable to apply caps: %v\", err)\n+ }\n+ panic(\"unreachable\")\n+ }\n+\nspecutils.LogSpec(spec)\n// Start with root mount, then add any other addition mount as needed.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -295,23 +295,23 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund\n// process. IPC and UTS namespaces from the host are not used as they\n// are virtualized inside the sandbox. Be paranoid and run inside an empty\n// namespace for these.\n- log.Infof(\"Sandbox will be started in empty IPC and UTS namespaces\")\n+ log.Infof(\"Sandbox will be started in new IPC and UTS namespaces\")\nnss := []specs.LinuxNamespace{\n{Type: specs.IPCNamespace},\n{Type: specs.UTSNamespace},\n}\nif conf.Platform == boot.PlatformPtrace {\n- // TODO: Also set an empty PID namespace so that we limit\n+ // TODO: Also set a new PID namespace so that we limit\n// access to other host processes.\nlog.Infof(\"Sandbox will be started in the current PID namespace\")\n} else {\n- log.Infof(\"Sandbox will be started in empty PID namespace\")\n+ log.Infof(\"Sandbox will be started in a new PID namespace\")\nnss = append(nss, specs.LinuxNamespace{Type: specs.PIDNamespace})\n}\nif conf.FileAccess == boot.FileAccessProxy {\n- log.Infof(\"Sandbox will be started in empty mount namespace\")\n+ log.Infof(\"Sandbox will be started in new mount namespace\")\nnss = append(nss, specs.LinuxNamespace{Type: specs.MountNamespace})\n} else {\nlog.Infof(\"Sandbox will be started in the current mount namespace\")\n@@ -324,7 +324,7 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund\nlog.Infof(\"Sandbox will be started in the container's network namespace: %+v\", ns)\nnss = append(nss, ns)\n} else {\n- log.Infof(\"Sandbox will be started in empty network namespace\")\n+ log.Infof(\"Sandbox will be started in new network namespace\")\nnss = append(nss, specs.LinuxNamespace{Type: specs.NetworkNamespace})\n}\n@@ -347,7 +347,7 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund\ncmd.Args = append(cmd.Args, \"--apply-caps=true\")\n} else {\n- log.Infof(\"Sandbox will be started in empty user namespace\")\n+ log.Infof(\"Sandbox will be started in new user namespace\")\nnss = append(nss, specs.LinuxNamespace{Type: specs.UserNamespace})\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Drop capabilities not needed by Gofer
PiperOrigin-RevId: 199808391
Change-Id: Ib37a4fb6193dc85c1f93bc16769d6aa41854b9d4 |
259,962 | 08.06.2018 11:45:30 | 25,200 | de8dba205f66a07c793619a3896f2376b41a4b55 | Add a protocol option to set congestion control algorithm.
Also adds support to query available congestion control algorithms. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/protocol.go",
"new_path": "pkg/tcpip/transport/tcp/protocol.go",
"diff": "package tcp\nimport (\n+ \"strings\"\n\"sync\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n@@ -58,11 +59,21 @@ type ReceiveBufferSizeOption struct {\nMax int\n}\n+// CongestionControlOption sets the current congestion control algorithm.\n+type CongestionControlOption string\n+\n+// AvailableCongestionControlOption returns the supported congestion control\n+// algorithms.\n+type AvailableCongestionControlOption string\n+\ntype protocol struct {\nmu sync.Mutex\nsackEnabled bool\nsendBufferSize SendBufferSizeOption\nrecvBufferSize ReceiveBufferSizeOption\n+ congestionControl string\n+ availableCongestionControl []string\n+ allowedCongestionControl []string\n}\n// Number returns the tcp protocol number.\n@@ -151,6 +162,16 @@ func (p *protocol) SetOption(option interface{}) *tcpip.Error {\np.mu.Unlock()\nreturn nil\n+ case CongestionControlOption:\n+ for _, c := range p.availableCongestionControl {\n+ if string(v) == c {\n+ p.mu.Lock()\n+ p.congestionControl = string(v)\n+ p.mu.Unlock()\n+ return nil\n+ }\n+ }\n+ return tcpip.ErrInvalidOptionValue\ndefault:\nreturn tcpip.ErrUnknownProtocolOption\n}\n@@ -176,7 +197,16 @@ func (p *protocol) Option(option interface{}) *tcpip.Error {\n*v = p.recvBufferSize\np.mu.Unlock()\nreturn nil\n-\n+ case *CongestionControlOption:\n+ p.mu.Lock()\n+ *v = CongestionControlOption(p.congestionControl)\n+ p.mu.Unlock()\n+ return nil\n+ case *AvailableCongestionControlOption:\n+ p.mu.Lock()\n+ *v = AvailableCongestionControlOption(strings.Join(p.availableCongestionControl, \" \"))\n+ p.mu.Unlock()\n+ return nil\ndefault:\nreturn tcpip.ErrUnknownProtocolOption\n}\n@@ -187,6 +217,8 @@ func init() {\nreturn &protocol{\nsendBufferSize: SendBufferSizeOption{minBufferSize, DefaultBufferSize, maxBufferSize},\nrecvBufferSize: ReceiveBufferSizeOption{minBufferSize, DefaultBufferSize, maxBufferSize},\n+ congestionControl: \"reno\",\n+ availableCongestionControl: []string{\"reno\"},\n}\n})\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/tcp_test.go",
"new_path": "pkg/tcpip/transport/tcp/tcp_test.go",
"diff": "@@ -2770,3 +2770,71 @@ func TestTCPEndpointProbe(t *testing.T) {\nt.Fatalf(\"TCP Probe function was not called\")\n}\n}\n+\n+func TestSetCongestionControl(t *testing.T) {\n+ testCases := []struct {\n+ cc tcp.CongestionControlOption\n+ mustPass bool\n+ }{\n+ {\"reno\", true},\n+ {\"cubic\", false},\n+ }\n+ for _, tc := range testCases {\n+ t.Run(fmt.Sprintf(\"SetTransportProtocolOption(.., %v)\", tc.cc), func(t *testing.T) {\n+ c := context.New(t, 1500)\n+ defer c.Cleanup()\n+\n+ s := c.Stack()\n+\n+ if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tc.cc); err != nil && tc.mustPass {\n+ t.Fatalf(\"s.SetTransportProtocolOption(%v, %v) = %v, want not-nil\", tcp.ProtocolNumber, tc.cc, err)\n+ }\n+\n+ var cc tcp.CongestionControlOption\n+ if err := s.TransportProtocolOption(tcp.ProtocolNumber, &cc); err != nil {\n+ t.Fatalf(\"s.TransportProtocolOption(%v, %v) = %v\", tcp.ProtocolNumber, &cc, err)\n+ }\n+ if got, want := cc, tcp.CongestionControlOption(\"reno\"); got != want {\n+ t.Fatalf(\"unexpected value for congestion control got: %v, want: %v\", got, want)\n+ }\n+ })\n+ }\n+}\n+\n+func TestAvailableCongestionControl(t *testing.T) {\n+ c := context.New(t, 1500)\n+ defer c.Cleanup()\n+\n+ s := c.Stack()\n+\n+ // Query permitted congestion control algorithms.\n+ var aCC tcp.AvailableCongestionControlOption\n+ if err := s.TransportProtocolOption(tcp.ProtocolNumber, &aCC); err != nil {\n+ t.Fatalf(\"s.TransportProtocolOption(%v, %v) = %v\", tcp.ProtocolNumber, &aCC, err)\n+ }\n+ if got, want := aCC, tcp.AvailableCongestionControlOption(\"reno\"); got != want {\n+ t.Fatalf(\"unexpected value for AvailableCongestionControlOption: got: %v, want: %v\", got, want)\n+ }\n+}\n+\n+func TestSetAvailableCongestionControl(t *testing.T) {\n+ c := context.New(t, 1500)\n+ defer c.Cleanup()\n+\n+ s := c.Stack()\n+\n+ // Setting AvailableCongestionControlOption should fail.\n+ aCC := tcp.AvailableCongestionControlOption(\"xyz\")\n+ if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &aCC); err == nil {\n+ t.Fatalf(\"s.TransportProtocolOption(%v, %v) = nil, want non-nil\", tcp.ProtocolNumber, &aCC)\n+ }\n+\n+ // Verify that we still get the expected list of congestion control options.\n+ var cc tcp.CongestionControlOption\n+ if err := s.TransportProtocolOption(tcp.ProtocolNumber, &cc); err != nil {\n+ t.Fatalf(\"s.TransportProtocolOption(%v, %v) = %v\", tcp.ProtocolNumber, &cc, err)\n+ }\n+ if got, want := cc, tcp.CongestionControlOption(\"reno\"); got != want {\n+ t.Fatalf(\"unexpected value for congestion control got: %v, want: %v\", got, want)\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add a protocol option to set congestion control algorithm.
Also adds support to query available congestion control algorithms.
PiperOrigin-RevId: 199826897
Change-Id: I2b338b709820ee9cf58bb56d83aa7b1a39f4eab2 |
259,858 | 08.06.2018 15:00:29 | 25,200 | 6728f09910bd9f7633f277fafe6945cfaa2abf42 | Fix sigaltstack semantics.
Walking off the bottom of the sigaltstack, for example with recursive faults,
results in forced signal delivery, not resetting the stack or pushing signal
stack to whatever happens to lie below the signal stack. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/arch/arch.go",
"new_path": "pkg/sentry/arch/arch.go",
"diff": "@@ -158,7 +158,7 @@ type Context interface {\n// rt is true if SignalRestore is being entered from rt_sigreturn and\n// false if SignalRestore is being entered from sigreturn.\n// SignalRestore returns the thread's new signal mask.\n- SignalRestore(st *Stack, rt bool) (linux.SignalSet, error)\n+ SignalRestore(st *Stack, rt bool) (linux.SignalSet, SignalStack, error)\n// CPUIDEmulate emulates a CPUID instruction according to current register state.\nCPUIDEmulate(l log.Logger)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/arch/signal_amd64.go",
"new_path": "pkg/sentry/arch/signal_amd64.go",
"diff": "@@ -377,6 +377,14 @@ func (c *context64) SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt\nsp = frameBottom + usermem.Addr(frameSize)\nst.Bottom = sp\n+ // Prior to proceeding, figure out if the frame will exhaust the range\n+ // for the signal stack. This is not allowed, and should immediately\n+ // force signal delivery (reverting to the default handler).\n+ if act.IsOnStack() && alt.IsEnabled() && !alt.Contains(frameBottom) {\n+ return syscall.EFAULT\n+ }\n+\n+ // Adjust the code.\ninfo.FixSignalCodeForUser()\n// Set up the stack frame.\n@@ -422,15 +430,15 @@ func (c *context64) SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt\n// SignalRestore implements Context.SignalRestore. (Compare to Linux's\n// arch/x86/kernel/signal.c:sys_rt_sigreturn().)\n-func (c *context64) SignalRestore(st *Stack, rt bool) (linux.SignalSet, error) {\n+func (c *context64) SignalRestore(st *Stack, rt bool) (linux.SignalSet, SignalStack, error) {\n// Copy out the stack frame.\nvar uc UContext64\nif _, err := st.Pop(&uc); err != nil {\n- return 0, err\n+ return 0, SignalStack{}, err\n}\nvar info SignalInfo\nif _, err := st.Pop(&info); err != nil {\n- return 0, err\n+ return 0, SignalStack{}, err\n}\n// Restore registers.\n@@ -472,5 +480,5 @@ func (c *context64) SignalRestore(st *Stack, rt bool) (linux.SignalSet, error) {\nlog.Infof(\"sigreturn unable to restore application fpstate\")\n}\n- return uc.Sigset, nil\n+ return uc.Sigset, uc.Stack, nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/arch/signal_stack.go",
"new_path": "pkg/sentry/arch/signal_stack.go",
"diff": "@@ -39,12 +39,19 @@ func (s SignalStack) Top() usermem.Addr {\nreturn usermem.Addr(s.Addr + s.Size)\n}\n-// SetOnStack marks this signal stack as in use. (This is only called on copies\n-// sent to user applications, so there's no corresponding ClearOnStack.)\n+// SetOnStack marks this signal stack as in use.\n+//\n+// Note that there is no corresponding ClearOnStack, and that this should only\n+// be called on copies that are serialized to userspace.\nfunc (s *SignalStack) SetOnStack() {\ns.Flags |= SignalStackFlagOnStack\n}\n+// Contains checks if the stack pointer is within this stack.\n+func (s *SignalStack) Contains(sp usermem.Addr) bool {\n+ return usermem.Addr(s.Addr) < sp && sp <= usermem.Addr(s.Addr+s.Size)\n+}\n+\n// NativeSignalStack is a type that is equivalent to stack_t in the guest\n// architecture.\ntype NativeSignalStack interface {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_signals.go",
"new_path": "pkg/sentry/kernel/task_signals.go",
"diff": "@@ -212,7 +212,9 @@ func (t *Task) deliverSignal(info *arch.SignalInfo, act arch.SignalAct) taskRunS\n// Try to deliver the signal to the user-configured handler.\nt.Debugf(\"Signal %d: delivering to handler\", info.Signo)\nif err := t.deliverSignalToHandler(info, act); err != nil {\n- t.Warningf(\"Failed to deliver signal %+v to user handler: %v\", info, err)\n+ // This is not a warning, it can occur during normal operation.\n+ t.Debugf(\"Failed to deliver signal %+v to user handler: %v\", info, err)\n+\n// Send a forced SIGSEGV. If the signal that couldn't be delivered\n// was a SIGSEGV, force the handler to SIG_DFL.\nt.forceSignal(linux.SIGSEGV, linux.Signal(info.Signo) == linux.SIGSEGV /* unconditional */)\n@@ -241,7 +243,7 @@ func (t *Task) deliverSignalToHandler(info *arch.SignalInfo, act arch.SignalAct)\nalt := t.signalStack\nif act.IsOnStack() && alt.IsEnabled() {\nalt.SetOnStack()\n- if !t.OnSignalStack(alt) {\n+ if !alt.Contains(sp) {\nsp = usermem.Addr(alt.Top())\n}\n}\n@@ -275,18 +277,20 @@ var ctrlResume = &SyscallControl{ignoreReturn: true}\n// rt is true).\nfunc (t *Task) SignalReturn(rt bool) (*SyscallControl, error) {\nst := t.Stack()\n- sigset, err := t.Arch().SignalRestore(st, rt)\n+ sigset, alt, err := t.Arch().SignalRestore(st, rt)\nif err != nil {\nreturn nil, err\n}\n+ // Attempt to record the given signal stack. Note that we silently\n+ // ignore failures here, as does Linux. Only an EFAULT may be\n+ // generated, but SignalRestore has already deserialized the entire\n+ // frame successfully.\n+ t.SetSignalStack(alt)\n+\n// Restore our signal mask. SIGKILL and SIGSTOP should not be blocked.\nt.SetSignalMask(sigset &^ UnblockableSignals)\n- // TODO: sys_rt_sigreturn also calls restore_altstack from\n- // uc.stack, allowing the signal handler to implicitly mutate the signal\n- // stack.\n-\nreturn ctrlResume, nil\n}\n@@ -624,23 +628,41 @@ func (t *Task) SetSavedSignalMask(mask linux.SignalSet) {\n// SignalStack returns the task-private signal stack.\nfunc (t *Task) SignalStack() arch.SignalStack {\n- return t.signalStack\n+ alt := t.signalStack\n+ if t.onSignalStack(alt) {\n+ alt.Flags |= arch.SignalStackFlagOnStack\n+ }\n+ return alt\n}\n-// OnSignalStack returns true if, when the task resumes running, it will run on\n-// the task-private signal stack.\n-func (t *Task) OnSignalStack(s arch.SignalStack) bool {\n+// onSignalStack returns true if the task is executing on the given signal stack.\n+func (t *Task) onSignalStack(alt arch.SignalStack) bool {\nsp := usermem.Addr(t.Arch().Stack())\n- return usermem.Addr(s.Addr) <= sp && sp < usermem.Addr(s.Addr+s.Size)\n+ return alt.Contains(sp)\n+}\n+\n+// SetSignalStack sets the task-private signal stack.\n+//\n+// This value may not be changed if the task is currently executing on the\n+// signal stack, i.e. if t.onSignalStack returns true. In this case, this\n+// function will return false. Otherwise, true is returned.\n+func (t *Task) SetSignalStack(alt arch.SignalStack) bool {\n+ // Check that we're not executing on the stack.\n+ if t.onSignalStack(t.signalStack) {\n+ return false\n}\n-// SetSignalStack sets the task-private signal stack and clears the\n-// SignalStackFlagDisable, since we have a signal stack.\n-func (t *Task) SetSignalStack(alt arch.SignalStack) error {\n+ if alt.Flags&arch.SignalStackFlagDisable != 0 {\n+ // Don't record anything beyond the flags.\n+ t.signalStack = arch.SignalStack{\n+ Flags: arch.SignalStackFlagDisable,\n+ }\n+ } else {\n// Mask out irrelevant parts: only disable matters.\nalt.Flags &= arch.SignalStackFlagDisable\nt.signalStack = alt\n- return nil\n+ }\n+ return true\n}\n// SetSignalAct atomically sets the thread group's signal action for signal sig\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_signal.go",
"new_path": "pkg/sentry/syscalls/linux/sys_signal.go",
"diff": "@@ -315,25 +315,23 @@ func Sigaltstack(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S\nsetaddr := args[0].Pointer()\noldaddr := args[1].Pointer()\n- if oldaddr != 0 {\nalt := t.SignalStack()\n- if t.OnSignalStack(alt) {\n- alt.Flags |= arch.SignalStackFlagOnStack\n- }\n+ if oldaddr != 0 {\nif err := t.CopyOutSignalStack(oldaddr, &alt); err != nil {\nreturn 0, nil, err\n}\n}\nif setaddr != 0 {\n- if t.OnSignalStack(t.SignalStack()) {\n- return 0, nil, syserror.EPERM\n- }\nalt, err := t.CopyInSignalStack(setaddr)\nif err != nil {\nreturn 0, nil, err\n}\n- if err := t.SetSignalStack(alt); err != nil {\n- return 0, nil, err\n+ // The signal stack cannot be changed if the task is currently\n+ // on the stack. This is enforced at the lowest level because\n+ // these semantics apply to changing the signal stack via a\n+ // ucontext during a signal handler.\n+ if !t.SetSignalStack(alt) {\n+ return 0, nil, syserror.EPERM\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix sigaltstack semantics.
Walking off the bottom of the sigaltstack, for example with recursive faults,
results in forced signal delivery, not resetting the stack or pushing signal
stack to whatever happens to lie below the signal stack.
PiperOrigin-RevId: 199856085
Change-Id: I0004d2523f0df35d18714de2685b3eaa147837e0 |
260,013 | 08.06.2018 15:57:33 | 25,200 | 2fbd1cf57cb06c5f0165a2d0e9225eed242a41f5 | Add checks for short CopyOut in rpcinet | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/rpcinet/socket.go",
"new_path": "pkg/sentry/socket/rpcinet/socket.go",
"diff": "@@ -465,7 +465,10 @@ func (s *socketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nres, err := rpcRecvMsg(t, req)\nif err == nil {\n- _, e := dst.CopyOut(t, res.Data)\n+ n, e := dst.CopyOut(t, res.Data)\n+ if e == nil && n != len(res.Data) {\n+ panic(\"CopyOut failed to copy full buffer\")\n+ }\nreturn int(res.Length), res.Address.GetAddress(), res.Address.GetLength(), socket.ControlMessages{}, syserr.FromError(e)\n}\nif err != syserr.ErrWouldBlock || flags&linux.MSG_DONTWAIT != 0 {\n@@ -481,7 +484,10 @@ func (s *socketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nfor {\nres, err := rpcRecvMsg(t, req)\nif err == nil {\n- _, e := dst.CopyOut(t, res.Data)\n+ n, e := dst.CopyOut(t, res.Data)\n+ if e == nil && n != len(res.Data) {\n+ panic(\"CopyOut failed to copy full buffer\")\n+ }\nreturn int(res.Length), res.Address.GetAddress(), res.Address.GetLength(), socket.ControlMessages{}, syserr.FromError(e)\n}\nif err != syserr.ErrWouldBlock {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add checks for short CopyOut in rpcinet
PiperOrigin-RevId: 199864753
Change-Id: Ibace6a1fdf99ee6ce368ac12c390aa8a02dbdfb7 |
259,858 | 08.06.2018 17:50:55 | 25,200 | c0ab059e7b904197f52ade879711d7fb02ffa8c0 | Fix kernel flags handling and add missing vectors. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine_amd64.go",
"new_path": "pkg/sentry/platform/kvm/machine_amd64.go",
"diff": "@@ -150,13 +150,20 @@ func (c *vCPU) fault(signal int32) (*arch.SignalInfo, usermem.AccessType, error)\n// the code provided here. We need to re-execute.\nreturn nil, usermem.NoAccess, platform.ErrContextInterrupt\n}\n- info := &arch.SignalInfo{Signo: signal}\n+ info := &arch.SignalInfo{\n+ Signo: signal,\n+ }\ninfo.SetAddr(uint64(faultAddr))\naccessType := usermem.AccessType{\nRead: code&(1<<1) == 0,\nWrite: code&(1<<1) != 0,\nExecute: code&(1<<4) != 0,\n}\n+ if !accessType.Write && !accessType.Execute {\n+ info.Code = 1 // SEGV_MAPERR.\n+ } else {\n+ info.Code = 2 // SEGV_ACCERR.\n+ }\nreturn info, accessType, platform.ErrContextSignal\n}\n@@ -191,30 +198,55 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts) (*arch.SignalInfo, user\nreturn c.fault(int32(syscall.SIGSEGV))\ncase ring0.Debug, ring0.Breakpoint:\n- info := &arch.SignalInfo{Signo: int32(syscall.SIGTRAP)}\n+ info := &arch.SignalInfo{\n+ Signo: int32(syscall.SIGTRAP),\n+ Code: 1, // TRAP_BRKPT (breakpoint).\n+ }\n+ info.SetAddr(switchOpts.Registers.Rip) // Include address.\nreturn info, usermem.AccessType{}, platform.ErrContextSignal\ncase ring0.GeneralProtectionFault:\n- if !ring0.IsCanonical(switchOpts.Registers.Rip) {\n- // If the RIP is non-canonical, it's a SEGV.\n- info := &arch.SignalInfo{Signo: int32(syscall.SIGSEGV)}\n- return info, usermem.AccessType{}, platform.ErrContextSignal\n+ info := &arch.SignalInfo{\n+ Signo: int32(syscall.SIGSEGV),\n+ Code: arch.SignalInfoKernel,\n}\n- // Otherwise, we deliver a SIGBUS.\n- info := &arch.SignalInfo{Signo: int32(syscall.SIGBUS)}\n+ info.SetAddr(switchOpts.Registers.Rip) // Include address.\nreturn info, usermem.AccessType{}, platform.ErrContextSignal\ncase ring0.InvalidOpcode:\n- info := &arch.SignalInfo{Signo: int32(syscall.SIGILL)}\n+ info := &arch.SignalInfo{\n+ Signo: int32(syscall.SIGILL),\n+ Code: 1, // ILL_ILLOPC (illegal opcode).\n+ }\n+ info.SetAddr(switchOpts.Registers.Rip) // Include address.\n+ return info, usermem.AccessType{}, platform.ErrContextSignal\n+\n+ case ring0.DivideByZero:\n+ info := &arch.SignalInfo{\n+ Signo: int32(syscall.SIGFPE),\n+ Code: 1, // FPE_INTDIV (divide by zero).\n+ }\n+ info.SetAddr(switchOpts.Registers.Rip) // Include address.\nreturn info, usermem.AccessType{}, platform.ErrContextSignal\ncase ring0.X87FloatingPointException:\n- info := &arch.SignalInfo{Signo: int32(syscall.SIGFPE)}\n+ info := &arch.SignalInfo{\n+ Signo: int32(syscall.SIGFPE),\n+ Code: 7, // FPE_FLTINV (invalid operation).\n+ }\n+ info.SetAddr(switchOpts.Registers.Rip) // Include address.\nreturn info, usermem.AccessType{}, platform.ErrContextSignal\ncase ring0.Vector(bounce):\nreturn nil, usermem.NoAccess, platform.ErrContextInterrupt\n+ case ring0.AlignmentCheck:\n+ info := &arch.SignalInfo{\n+ Signo: int32(syscall.SIGBUS),\n+ Code: 2, // BUS_ADRERR (physical address does not exist).\n+ }\n+ return info, usermem.NoAccess, platform.ErrContextSignal\n+\ncase ring0.NMI:\n// An NMI is generated only when a fault is not servicable by\n// KVM itself, so we think some mapping is writeable but it's\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ring0/kernel_amd64.go",
"new_path": "pkg/sentry/platform/ring0/kernel_amd64.go",
"diff": "@@ -20,20 +20,6 @@ import (\n\"encoding/binary\"\n)\n-const (\n- // KernelFlagsSet should always be set in the kernel.\n- KernelFlagsSet = _RFLAGS_RESERVED\n-\n- // UserFlagsSet are always set in userspace.\n- UserFlagsSet = _RFLAGS_RESERVED | _RFLAGS_IF\n-\n- // KernelFlagsClear should always be clear in the kernel.\n- KernelFlagsClear = _RFLAGS_IF | _RFLAGS_NT | _RFLAGS_IOPL\n-\n- // UserFlagsClear are always cleared in userspace.\n- UserFlagsClear = _RFLAGS_NT | _RFLAGS_IOPL\n-)\n-\n// init initializes architecture-specific state.\nfunc (k *Kernel) init(opts KernelOpts) {\n// Save the root page tables.\n@@ -85,6 +71,9 @@ func (c *CPU) init() {\nc.registers.Ss = uint64(Kdata)\nc.registers.Fs = uint64(Kdata)\nc.registers.Gs = uint64(Kdata)\n+\n+ // Set mandatory flags.\n+ c.registers.Eflags = KernelFlagsSet\n}\n// StackTop returns the kernel's stack address.\n@@ -119,7 +108,7 @@ func (c *CPU) TSS() (uint64, uint16, *SegmentDescriptor) {\n//\n//go:nosplit\nfunc (c *CPU) CR0() uint64 {\n- return _CR0_PE | _CR0_PG | _CR0_ET\n+ return _CR0_PE | _CR0_PG | _CR0_AM | _CR0_ET\n}\n// CR4 returns the CPU's CR4 value.\n@@ -240,7 +229,7 @@ func start(c *CPU) {\n// Set the syscall target.\nwrmsr(_MSR_LSTAR, kernelFunc(sysenter))\n- wrmsr(_MSR_SYSCALL_MASK, _RFLAGS_STEP|_RFLAGS_IF|_RFLAGS_DF|_RFLAGS_IOPL|_RFLAGS_AC|_RFLAGS_NT)\n+ wrmsr(_MSR_SYSCALL_MASK, KernelFlagsClear|_RFLAGS_DF)\n// NOTE: This depends on having the 64-bit segments immediately\n// following the 32-bit user segments. This is simply the way the\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ring0/offsets_amd64.go",
"new_path": "pkg/sentry/platform/ring0/offsets_amd64.go",
"diff": "@@ -39,6 +39,7 @@ func Emit(w io.Writer) {\nfmt.Fprintf(w, \"\\n// Bits.\\n\")\nfmt.Fprintf(w, \"#define _RFLAGS_IF 0x%02x\\n\", _RFLAGS_IF)\n+ fmt.Fprintf(w, \"#define _KERNEL_FLAGS 0x%02x\\n\", KernelFlagsSet)\nfmt.Fprintf(w, \"\\n// Vectors.\\n\")\nfmt.Fprintf(w, \"#define DivideByZero 0x%02x\\n\", DivideByZero)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ring0/x86.go",
"new_path": "pkg/sentry/platform/ring0/x86.go",
"diff": "@@ -24,6 +24,7 @@ import (\nconst (\n_CR0_PE = 1 << 0\n_CR0_ET = 1 << 4\n+ _CR0_AM = 1 << 18\n_CR0_PG = 1 << 31\n_CR4_PSE = 1 << 4\n@@ -55,6 +56,20 @@ const (\n_MSR_SYSCALL_MASK = 0xc0000084\n)\n+const (\n+ // KernelFlagsSet should always be set in the kernel.\n+ KernelFlagsSet = _RFLAGS_RESERVED\n+\n+ // UserFlagsSet are always set in userspace.\n+ UserFlagsSet = _RFLAGS_RESERVED | _RFLAGS_IF\n+\n+ // KernelFlagsClear should always be clear in the kernel.\n+ KernelFlagsClear = _RFLAGS_STEP | _RFLAGS_IF | _RFLAGS_IOPL | _RFLAGS_AC | _RFLAGS_NT\n+\n+ // UserFlagsClear are always cleared in userspace.\n+ UserFlagsClear = _RFLAGS_NT | _RFLAGS_IOPL\n+)\n+\n// Vector is an exception vector.\ntype Vector uintptr\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix kernel flags handling and add missing vectors.
PiperOrigin-RevId: 199877174
Change-Id: I9d19ea301608c2b989df0a6123abb1e779427853 |
259,891 | 11.06.2018 11:08:51 | 25,200 | 032b0398a5a664c345c4868d5527846a1b6848db | Sentry: split tty.queue into its own file.
Minor refactor. line_discipline.go was home to 2 large structs (lineDiscipline
and queue), and queue is now large enough IMO to get its own file.
Also moves queue locks into the queue struct, making locking simpler. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/tty/BUILD",
"new_path": "pkg/sentry/fs/tty/BUILD",
"diff": "@@ -11,6 +11,7 @@ go_stateify(\n\"inode.go\",\n\"line_discipline.go\",\n\"master.go\",\n+ \"queue.go\",\n\"slave.go\",\n\"terminal.go\",\n],\n@@ -26,6 +27,7 @@ go_library(\n\"inode.go\",\n\"line_discipline.go\",\n\"master.go\",\n+ \"queue.go\",\n\"slave.go\",\n\"terminal.go\",\n\"tty_state.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/tty/line_discipline.go",
"new_path": "pkg/sentry/fs/tty/line_discipline.go",
"diff": "@@ -23,7 +23,6 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n- \"gvisor.googlesource.com/gvisor/pkg/syserror\"\n\"gvisor.googlesource.com/gvisor/pkg/waiter\"\n)\n@@ -38,97 +37,8 @@ const (\nnonCanonMaxBytes = canonMaxBytes - 1\nspacesPerTab = 8\n-\n- // transformInputStateifyKey is used to save and restore queues.\n- transformInputStateifyKey = \"transformInput\"\n-\n- // transformOutputStateifyKey is used to save and restore queues.\n- transformOutputStateifyKey = \"transformOutput\"\n)\n-// transformer is a helper interface to make it easier to stateify queue.\n-type transformer interface {\n- // transform functions require queue's mutex to be held.\n- transform(*lineDiscipline, *queue, []byte) int\n-}\n-\n-// queue represents one of the input or output queues between a pty master and\n-// slave. Bytes written to a queue are added to the read buffer until it is\n-// full, at which point they are written to the wait buffer. Bytes are\n-// processed (i.e. undergo termios transformations) as they are added to the\n-// read buffer. The read buffer is readable when its length is nonzero and\n-// readable is true.\n-type queue struct {\n- waiter.Queue `state:\"nosave\"`\n-\n- // readBuf is buffer of data ready to be read when readable is true.\n- // This data has been processed.\n- readBuf bytes.Buffer `state:\".([]byte)\"`\n-\n- // waitBuf contains data that can't fit into readBuf. It is put here\n- // until it can be loaded into the read buffer. waitBuf contains data\n- // that hasn't been processed.\n- waitBuf bytes.Buffer `state:\".([]byte)\"`\n-\n- // readable indicates whether the read buffer can be read from. In\n- // canonical mode, there can be an unterminated line in the read buffer,\n- // so readable must be checked.\n- readable bool\n-\n- // transform is the the queue's function for transforming bytes\n- // entering the queue. For example, transform might convert all '\\r's\n- // entering the queue to '\\n's.\n- transformer\n-}\n-\n-// saveReadBuf is invoked by stateify.\n-func (q *queue) saveReadBuf() []byte {\n- return append([]byte(nil), q.readBuf.Bytes()...)\n-}\n-\n-// loadReadBuf is invoked by stateify.\n-func (q *queue) loadReadBuf(b []byte) {\n- q.readBuf.Write(b)\n-}\n-\n-// saveWaitBuf is invoked by stateify.\n-func (q *queue) saveWaitBuf() []byte {\n- return append([]byte(nil), q.waitBuf.Bytes()...)\n-}\n-\n-// loadWaitBuf is invoked by stateify.\n-func (q *queue) loadWaitBuf(b []byte) {\n- q.waitBuf.Write(b)\n-}\n-\n-// readReadiness returns whether q is ready to be read from.\n-func (q *queue) readReadiness(t *linux.KernelTermios) waiter.EventMask {\n- if q.readBuf.Len() > 0 && q.readable {\n- return waiter.EventIn\n- }\n- return waiter.EventMask(0)\n-}\n-\n-// writeReadiness returns whether q is ready to be written to.\n-func (q *queue) writeReadiness(t *linux.KernelTermios) waiter.EventMask {\n- // Like Linux, we don't impose a maximum size on what can be enqueued.\n- return waiter.EventOut\n-}\n-\n-// readableSize writes the number of readable bytes to userspace.\n-func (q *queue) readableSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error {\n- var size int32\n- if q.readable {\n- size = int32(q.readBuf.Len())\n- }\n-\n- _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), size, usermem.IOOpts{\n- AddressSpaceActive: true,\n- })\n- return err\n-\n-}\n-\n// lineDiscipline dictates how input and output are handled between the\n// pseudoterminal (pty) master and slave. It can be configured to alter I/O,\n// modify control characters (e.g. Ctrl-C for SIGINT), etc. The following man\n@@ -160,18 +70,12 @@ func (q *queue) readableSize(ctx context.Context, io usermem.IO, args arch.Sysca\n//\n// Lock order:\n// termiosMu\n-// inMu\n-// outMu\n+// inQueue.mu\n+// outQueue.mu\ntype lineDiscipline struct {\n- // inMu protects inQueue.\n- inMu sync.Mutex `state:\"nosave\"`\n-\n// inQueue is the input queue of the terminal.\ninQueue queue\n- // outMu protects outQueue.\n- outMu sync.Mutex `state:\"nosave\"`\n-\n// outQueue is the output queue of the terminal.\noutQueue queue\n@@ -209,8 +113,6 @@ func (l *lineDiscipline) getTermios(ctx context.Context, io usermem.IO, args arc\nfunc (l *lineDiscipline) setTermios(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\nl.termiosMu.Lock()\ndefer l.termiosMu.Unlock()\n- l.inMu.Lock()\n- defer l.inMu.Unlock()\noldCanonEnabled := l.termios.LEnabled(linux.ICANON)\n// We must copy a Termios struct, not KernelTermios.\nvar t linux.Termios\n@@ -223,17 +125,13 @@ func (l *lineDiscipline) setTermios(ctx context.Context, io usermem.IO, args arc\n// buffer to its read buffer. Anything already in the read buffer is\n// now readable.\nif oldCanonEnabled && !l.termios.LEnabled(linux.ICANON) {\n- l.pushWaitBuf(&l.inQueue)\n+ l.inQueue.pushWaitBuf(l)\n}\nreturn 0, err\n}\nfunc (l *lineDiscipline) masterReadiness() waiter.EventMask {\n- l.inMu.Lock()\n- defer l.inMu.Unlock()\n- l.outMu.Lock()\n- defer l.outMu.Unlock()\n// We don't have to lock a termios because the default master termios\n// is immutable.\nreturn l.inQueue.writeReadiness(&linux.MasterTermios) | l.outQueue.readReadiness(&linux.MasterTermios)\n@@ -242,156 +140,49 @@ func (l *lineDiscipline) masterReadiness() waiter.EventMask {\nfunc (l *lineDiscipline) slaveReadiness() waiter.EventMask {\nl.termiosMu.RLock()\ndefer l.termiosMu.RUnlock()\n- l.inMu.Lock()\n- defer l.inMu.Unlock()\n- l.outMu.Lock()\n- defer l.outMu.Unlock()\nreturn l.outQueue.writeReadiness(&l.termios) | l.inQueue.readReadiness(&l.termios)\n}\nfunc (l *lineDiscipline) inputQueueReadSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error {\n- l.inMu.Lock()\n- defer l.inMu.Unlock()\nreturn l.inQueue.readableSize(ctx, io, args)\n}\nfunc (l *lineDiscipline) inputQueueRead(ctx context.Context, dst usermem.IOSequence) (int64, error) {\nl.termiosMu.RLock()\ndefer l.termiosMu.RUnlock()\n- l.inMu.Lock()\n- defer l.inMu.Unlock()\n- return l.queueRead(ctx, dst, &l.inQueue)\n+ return l.inQueue.read(ctx, dst, l)\n}\nfunc (l *lineDiscipline) inputQueueWrite(ctx context.Context, src usermem.IOSequence) (int64, error) {\nl.termiosMu.RLock()\ndefer l.termiosMu.RUnlock()\n- l.inMu.Lock()\n- defer l.inMu.Unlock()\n- return l.queueWrite(ctx, src, &l.inQueue)\n+ return l.inQueue.write(ctx, src, l)\n}\nfunc (l *lineDiscipline) outputQueueReadSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error {\n- l.outMu.Lock()\n- defer l.outMu.Unlock()\nreturn l.outQueue.readableSize(ctx, io, args)\n}\nfunc (l *lineDiscipline) outputQueueRead(ctx context.Context, dst usermem.IOSequence) (int64, error) {\nl.termiosMu.RLock()\ndefer l.termiosMu.RUnlock()\n- l.outMu.Lock()\n- defer l.outMu.Unlock()\n- return l.queueRead(ctx, dst, &l.outQueue)\n+ return l.outQueue.read(ctx, dst, l)\n}\nfunc (l *lineDiscipline) outputQueueWrite(ctx context.Context, src usermem.IOSequence) (int64, error) {\nl.termiosMu.RLock()\ndefer l.termiosMu.RUnlock()\n- l.outMu.Lock()\n- defer l.outMu.Unlock()\n- return l.queueWrite(ctx, src, &l.outQueue)\n-}\n-\n-// queueRead reads from q to userspace.\n-//\n-// Preconditions:\n-// * l.termiosMu must be held for reading.\n-// * q's lock must be held.\n-func (l *lineDiscipline) queueRead(ctx context.Context, dst usermem.IOSequence, q *queue) (int64, error) {\n- if !q.readable {\n- return 0, syserror.ErrWouldBlock\n- }\n-\n- // Read out from the read buffer.\n- n := canonMaxBytes\n- if n > int(dst.NumBytes()) {\n- n = int(dst.NumBytes())\n- }\n- if n > q.readBuf.Len() {\n- n = q.readBuf.Len()\n- }\n- n, err := dst.Writer(ctx).Write(q.readBuf.Bytes()[:n])\n- if err != nil {\n- return 0, err\n- }\n- // Discard bytes read out.\n- q.readBuf.Next(n)\n-\n- // If we read everything, this queue is no longer readable.\n- if q.readBuf.Len() == 0 {\n- q.readable = false\n- }\n-\n- // Move data from the queue's wait buffer to its read buffer.\n- l.pushWaitBuf(q)\n-\n- // If state changed, notify any waiters. If nothing was available to\n- // read, let the caller know we could block.\n- if n > 0 {\n- q.Notify(waiter.EventOut)\n- } else {\n- return 0, syserror.ErrWouldBlock\n- }\n- return int64(n), nil\n-}\n-\n-// queueWrite writes to q from userspace.\n-//\n-// Preconditions:\n-// * l.termiosMu must be held for reading.\n-// * q's lock must be held.\n-func (l *lineDiscipline) queueWrite(ctx context.Context, src usermem.IOSequence, q *queue) (int64, error) {\n- // TODO: Use CopyInTo/safemem to avoid extra copying.\n- // Copy in the bytes to write from user-space.\n- b := make([]byte, src.NumBytes())\n- n, err := src.CopyIn(ctx, b)\n- if err != nil {\n- return 0, err\n- }\n- b = b[:n]\n- return l.queueWriteBytes(b, q)\n+ return l.outQueue.write(ctx, src, l)\n}\n-// queueWriteBytes writes to q from b.\n-//\n-// Precondition:\n-// * l.termiosMu must be held for reading.\n-// * q's lock must be held.\n-func (l *lineDiscipline) queueWriteBytes(b []byte, q *queue) (int64, error) {\n- // Write as much as possible to the read buffer.\n- n := q.transform(l, q, b)\n-\n- // Write remaining data to the wait buffer.\n- nWaiting, _ := q.waitBuf.Write(b[n:])\n-\n- // If state changed, notify any waiters. If we were unable to write\n- // anything, let the caller know we could block.\n- if n > 0 {\n- q.Notify(waiter.EventIn)\n- } else if nWaiting == 0 {\n- return 0, syserror.ErrWouldBlock\n- }\n- return int64(n + nWaiting), nil\n-}\n-\n-// pushWaitBuf fills the queue's read buffer with data from the wait buffer.\n-//\n-// Precondition:\n-// * l.termiosMu must be held for reading.\n-// * l.inMu must be held.\n-func (l *lineDiscipline) pushWaitBuf(q *queue) {\n- // Remove bytes from the wait buffer and move them to the read buffer.\n- n := q.transform(l, q, q.waitBuf.Bytes())\n- q.waitBuf.Next(n)\n-\n- // If state changed, notify any waiters.\n- if n > 0 {\n- q.Notify(waiter.EventIn)\n- }\n+// transformer is a helper interface to make it easier to stateify queue.\n+type transformer interface {\n+ // transform functions require queue's mutex to be held.\n+ transform(*lineDiscipline, *queue, []byte) int\n}\n-// outputQueueTransformer implements transformer.\n+// outputQueueTransformer implements transformer. It performs line discipline\n+// transformations on the output queue.\ntype outputQueueTransformer struct{}\n// transform does output processing for one end of the pty. See\n@@ -399,7 +190,7 @@ type outputQueueTransformer struct{}\n//\n// Precondition:\n// * l.termiosMu must be held for reading.\n-// * q's mutex must be held.\n+// * q.mu must be held.\nfunc (*outputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte) int {\n// transformOutput is effectively always in noncanonical mode, as the\n// master termios never has ICANON set.\n@@ -461,7 +252,8 @@ func (*outputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte\nreturn ret\n}\n-// inputQueueTransformer implements transformer.\n+// inputQueueTransformer implements transformer. It performs line discipline\n+// transformations on the input queue.\ntype inputQueueTransformer struct{}\n// transform does input processing for one end of the pty. Characters read are\n@@ -471,7 +263,7 @@ type inputQueueTransformer struct{}\n//\n// Precondition:\n// * l.termiosMu must be held for reading.\n-// * q's mutex must be held.\n+// * q.mu must be held.\nfunc (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte) int {\n// If there's a line waiting to be read in canonical mode, don't write\n// anything else to the read buffer.\n@@ -528,11 +320,7 @@ func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte)\nq.readBuf.WriteRune(c)\n// Anything written to the readBuf will have to be echoed.\nif l.termios.LEnabled(linux.ECHO) {\n- // We can't defer Unlock here because we may\n- // Lock/Unlock l.outMu multiple times in this loop.\n- l.outMu.Lock()\n- l.queueWriteBytes(cBytes, &l.outQueue)\n- l.outMu.Unlock()\n+ l.outQueue.writeBytes(cBytes, l)\n}\n// If we finish a line, make it available for reading.\n@@ -553,6 +341,10 @@ func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte)\n// shouldDiscard returns whether c should be discarded. In canonical mode, if\n// too many bytes are enqueued, we keep reading input and discarding it until\n// we find a terminating character. Signal/echo processing still occurs.\n+//\n+// Precondition:\n+// * l.termiosMu must be held for reading.\n+// * q.mu must be held.\nfunc (l *lineDiscipline) shouldDiscard(q *queue, c rune) bool {\nreturn l.termios.LEnabled(linux.ICANON) && q.readBuf.Len()+utf8.RuneLen(c) >= canonMaxBytes && !l.termios.IsTerminating(c)\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/fs/tty/queue.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package tty\n+\n+import (\n+ \"bytes\"\n+ \"sync\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n+ \"gvisor.googlesource.com/gvisor/pkg/syserror\"\n+ \"gvisor.googlesource.com/gvisor/pkg/waiter\"\n+)\n+\n+// queue represents one of the input or output queues between a pty master and\n+// slave. Bytes written to a queue are added to the read buffer until it is\n+// full, at which point they are written to the wait buffer. Bytes are\n+// processed (i.e. undergo termios transformations) as they are added to the\n+// read buffer. The read buffer is readable when its length is nonzero and\n+// readable is true.\n+type queue struct {\n+ // mu protects everything in queue.\n+ mu sync.Mutex `state:\"nosave\"`\n+\n+ waiter.Queue `state:\"nosave\"`\n+\n+ // readBuf is buffer of data ready to be read when readable is true.\n+ // This data has been processed.\n+ readBuf bytes.Buffer `state:\".([]byte)\"`\n+\n+ // waitBuf contains data that can't fit into readBuf. It is put here\n+ // until it can be loaded into the read buffer. waitBuf contains data\n+ // that hasn't been processed.\n+ waitBuf bytes.Buffer `state:\".([]byte)\"`\n+\n+ // readable indicates whether the read buffer can be read from. In\n+ // canonical mode, there can be an unterminated line in the read buffer,\n+ // so readable must be checked.\n+ readable bool\n+\n+ // transform is the the queue's function for transforming bytes\n+ // entering the queue. For example, transform might convert all '\\r's\n+ // entering the queue to '\\n's.\n+ transformer\n+}\n+\n+// saveReadBuf is invoked by stateify.\n+func (q *queue) saveReadBuf() []byte {\n+ return append([]byte(nil), q.readBuf.Bytes()...)\n+}\n+\n+// loadReadBuf is invoked by stateify.\n+func (q *queue) loadReadBuf(b []byte) {\n+ q.readBuf.Write(b)\n+}\n+\n+// saveWaitBuf is invoked by stateify.\n+func (q *queue) saveWaitBuf() []byte {\n+ return append([]byte(nil), q.waitBuf.Bytes()...)\n+}\n+\n+// loadWaitBuf is invoked by stateify.\n+func (q *queue) loadWaitBuf(b []byte) {\n+ q.waitBuf.Write(b)\n+}\n+\n+// readReadiness returns whether q is ready to be read from.\n+func (q *queue) readReadiness(t *linux.KernelTermios) waiter.EventMask {\n+ q.mu.Lock()\n+ defer q.mu.Unlock()\n+ if q.readBuf.Len() > 0 && q.readable {\n+ return waiter.EventIn\n+ }\n+ return waiter.EventMask(0)\n+}\n+\n+// writeReadiness returns whether q is ready to be written to.\n+func (q *queue) writeReadiness(t *linux.KernelTermios) waiter.EventMask {\n+ // Like Linux, we don't impose a maximum size on what can be enqueued.\n+ return waiter.EventOut\n+}\n+\n+// readableSize writes the number of readable bytes to userspace.\n+func (q *queue) readableSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error {\n+ q.mu.Lock()\n+ defer q.mu.Unlock()\n+ var size int32\n+ if q.readable {\n+ size = int32(q.readBuf.Len())\n+ }\n+\n+ _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), size, usermem.IOOpts{\n+ AddressSpaceActive: true,\n+ })\n+ return err\n+\n+}\n+\n+// read reads from q to userspace.\n+//\n+// Preconditions:\n+// * l.termiosMu must be held for reading.\n+func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipline) (int64, error) {\n+ q.mu.Lock()\n+ defer q.mu.Unlock()\n+ if !q.readable {\n+ return 0, syserror.ErrWouldBlock\n+ }\n+\n+ // Read out from the read buffer.\n+ n := canonMaxBytes\n+ if n > int(dst.NumBytes()) {\n+ n = int(dst.NumBytes())\n+ }\n+ if n > q.readBuf.Len() {\n+ n = q.readBuf.Len()\n+ }\n+ n, err := dst.Writer(ctx).Write(q.readBuf.Bytes()[:n])\n+ if err != nil {\n+ return 0, err\n+ }\n+ // Discard bytes read out.\n+ q.readBuf.Next(n)\n+\n+ // If we read everything, this queue is no longer readable.\n+ if q.readBuf.Len() == 0 {\n+ q.readable = false\n+ }\n+\n+ // Move data from the queue's wait buffer to its read buffer.\n+ q.pushWaitBufLocked(l)\n+\n+ // If state changed, notify any waiters. If nothing was available to\n+ // read, let the caller know we could block.\n+ if n > 0 {\n+ q.Notify(waiter.EventOut)\n+ } else {\n+ return 0, syserror.ErrWouldBlock\n+ }\n+ return int64(n), nil\n+}\n+\n+// write writes to q from userspace.\n+//\n+// Preconditions:\n+// * l.termiosMu must be held for reading.\n+func (q *queue) write(ctx context.Context, src usermem.IOSequence, l *lineDiscipline) (int64, error) {\n+ // TODO: Use CopyInTo/safemem to avoid extra copying.\n+ // Copy in the bytes to write from user-space.\n+ b := make([]byte, src.NumBytes())\n+ n, err := src.CopyIn(ctx, b)\n+ if err != nil {\n+ return 0, err\n+ }\n+ b = b[:n]\n+ return q.writeBytes(b, l)\n+}\n+\n+// writeBytes writes to q from b.\n+//\n+// Preconditions:\n+// * l.termiosMu must be held for reading.\n+func (q *queue) writeBytes(b []byte, l *lineDiscipline) (int64, error) {\n+ q.mu.Lock()\n+ defer q.mu.Unlock()\n+ // Write as much as possible to the read buffer.\n+ n := q.transform(l, q, b)\n+\n+ // Write remaining data to the wait buffer.\n+ nWaiting, _ := q.waitBuf.Write(b[n:])\n+\n+ // If state changed, notify any waiters. If we were unable to write\n+ // anything, let the caller know we could block.\n+ if n > 0 {\n+ q.Notify(waiter.EventIn)\n+ } else if nWaiting == 0 {\n+ return 0, syserror.ErrWouldBlock\n+ }\n+ return int64(n + nWaiting), nil\n+}\n+\n+// pushWaitBuf fills the queue's read buffer with data from the wait buffer.\n+//\n+// Preconditions:\n+// * l.termiosMu must be held for reading.\n+func (q *queue) pushWaitBuf(l *lineDiscipline) {\n+ q.mu.Lock()\n+ defer q.mu.Unlock()\n+ q.pushWaitBufLocked(l)\n+}\n+\n+// Preconditions:\n+// * l.termiosMu must be held for reading.\n+// * q.mu must be locked.\n+func (q *queue) pushWaitBufLocked(l *lineDiscipline) {\n+ // Remove bytes from the wait buffer and move them to the read buffer.\n+ n := q.transform(l, q, q.waitBuf.Bytes())\n+ q.waitBuf.Next(n)\n+\n+ // If state changed, notify any waiters.\n+ if n > 0 {\n+ q.Notify(waiter.EventIn)\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Sentry: split tty.queue into its own file.
Minor refactor. line_discipline.go was home to 2 large structs (lineDiscipline
and queue), and queue is now large enough IMO to get its own file.
Also moves queue locks into the queue struct, making locking simpler.
PiperOrigin-RevId: 200080301
Change-Id: Ia75a0e9b3d9ac8d7e5a0f0099a54e1f5b8bdea34 |
259,992 | 11.06.2018 13:34:27 | 25,200 | 7260363751915d21538c13b08b5bb6a48d0f4f8e | Add O_TRUNC handling in openat | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_file.go",
"new_path": "pkg/sentry/syscalls/linux/sys_file.go",
"diff": "@@ -147,21 +147,25 @@ func openAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, flags uint) (fd u\n}\nfileFlags := linuxToFlags(flags)\n- isDir := fs.IsDir(d.Inode.StableAttr)\n-\n+ if fs.IsDir(d.Inode.StableAttr) {\n+ // Don't allow directories to be opened writable.\n+ if fileFlags.Write {\n+ return syserror.EISDIR\n+ }\n+ } else {\n// If O_DIRECTORY is set, but the file is not a directory, then fail.\n- if fileFlags.Directory && !isDir {\n+ if fileFlags.Directory {\nreturn syserror.ENOTDIR\n}\n-\n// If it's a directory, then make sure.\n- if dirPath && !isDir {\n+ if dirPath {\nreturn syserror.ENOTDIR\n}\n-\n- // Don't allow directories to be opened writable.\n- if isDir && fileFlags.Write {\n- return syserror.EISDIR\n+ if fileFlags.Write && flags&syscall.O_TRUNC != 0 {\n+ if err := d.Inode.Truncate(t, d, 0); err != nil {\n+ return err\n+ }\n+ }\n}\nfile, err := d.Inode.GetFile(t, d, fileFlags)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add O_TRUNC handling in openat
PiperOrigin-RevId: 200103677
Change-Id: I3efb565c30c64d35f8fd7b5c05ed78dcc2990c51 |
260,013 | 11.06.2018 15:33:07 | 25,200 | 0412f17e06670fb1f1d1d85ddd73bbadde40c087 | rpcinet is treating EAGAIN and EWOULDBLOCK as different errnos. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/rpcinet/socket.go",
"new_path": "pkg/sentry/socket/rpcinet/socket.go",
"diff": "@@ -228,7 +228,7 @@ func (s *socketOperations) Accept(t *kernel.Task, peerRequested bool, flags int,\npayload, se := rpcAccept(t, s.fd, peerRequested)\n// Check if we need to block.\n- if blocking && se == syserr.ErrWouldBlock {\n+ if blocking && se == syserr.ErrTryAgain {\n// Register for notifications.\ne, ch := waiter.NewChannelEntry(nil)\ns.EventRegister(&e, waiter.EventIn)\n@@ -237,7 +237,7 @@ func (s *socketOperations) Accept(t *kernel.Task, peerRequested bool, flags int,\n// Try to accept the connection again; if it fails, then wait until we\n// get a notification.\nfor {\n- if payload, se = rpcAccept(t, s.fd, peerRequested); se != syserr.ErrWouldBlock {\n+ if payload, se = rpcAccept(t, s.fd, peerRequested); se != syserr.ErrTryAgain {\nbreak\n}\n@@ -471,7 +471,7 @@ func (s *socketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\n}\nreturn int(res.Length), res.Address.GetAddress(), res.Address.GetLength(), socket.ControlMessages{}, syserr.FromError(e)\n}\n- if err != syserr.ErrWouldBlock || flags&linux.MSG_DONTWAIT != 0 {\n+ if err != syserr.ErrWouldBlock && err != syserr.ErrTryAgain || flags&linux.MSG_DONTWAIT != 0 {\nreturn 0, nil, 0, socket.ControlMessages{}, err\n}\n@@ -490,7 +490,7 @@ func (s *socketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\n}\nreturn int(res.Length), res.Address.GetAddress(), res.Address.GetLength(), socket.ControlMessages{}, syserr.FromError(e)\n}\n- if err != syserr.ErrWouldBlock {\n+ if err != syserr.ErrWouldBlock && err != syserr.ErrTryAgain {\nreturn 0, nil, 0, socket.ControlMessages{}, err\n}\n@@ -546,7 +546,7 @@ func (s *socketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []\n}}\nn, err := rpcSendMsg(t, req)\n- if err != syserr.ErrWouldBlock || flags&linux.MSG_DONTWAIT != 0 {\n+ if err != syserr.ErrWouldBlock && err != syserr.ErrTryAgain || flags&linux.MSG_DONTWAIT != 0 {\nreturn int(n), err\n}\n@@ -558,7 +558,7 @@ func (s *socketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []\nfor {\nn, err := rpcSendMsg(t, req)\n- if err != syserr.ErrWouldBlock {\n+ if err != syserr.ErrWouldBlock && err != syserr.ErrTryAgain {\nreturn int(n), err\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | rpcinet is treating EAGAIN and EWOULDBLOCK as different errnos.
PiperOrigin-RevId: 200124614
Change-Id: I38a7b083f1464a2a586fe24db648e624c455fec5 |
259,992 | 11.06.2018 16:44:56 | 25,200 | ea4a468fbaacd55597ce89e3eabd2bb42746427b | Set CLOEXEC option to sockets
hostinet/socket.go: the Sentry doesn't spawn new processes, but it doesn't hurt to protect the socket from leaking.
unet/unet.go: should be setting closing on exec. The FD is explicitly donated to children when needed. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/hostinet/socket.go",
"new_path": "pkg/sentry/socket/hostinet/socket.go",
"diff": "@@ -193,7 +193,7 @@ func (s *socketOperations) Accept(t *kernel.Task, peerRequested bool, flags int,\n// Conservatively ignore all flags specified by the application and add\n// SOCK_NONBLOCK since socketOperations requires it.\n- fd, syscallErr := accept4(s.fd, peerAddrPtr, peerAddrlenPtr, syscall.SOCK_NONBLOCK)\n+ fd, syscallErr := accept4(s.fd, peerAddrPtr, peerAddrlenPtr, syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC)\nif blocking {\nvar ch chan struct{}\nfor syscallErr == syserror.ErrWouldBlock {\n@@ -207,7 +207,7 @@ func (s *socketOperations) Accept(t *kernel.Task, peerRequested bool, flags int,\ns.EventRegister(&e, waiter.EventIn)\ndefer s.EventUnregister(&e)\n}\n- fd, syscallErr = accept4(s.fd, peerAddrPtr, peerAddrlenPtr, syscall.SOCK_NONBLOCK)\n+ fd, syscallErr = accept4(s.fd, peerAddrPtr, peerAddrlenPtr, syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC)\n}\n}\n@@ -545,7 +545,7 @@ func (p *socketProvider) Socket(t *kernel.Task, stypeflags unix.SockType, protoc\n// Conservatively ignore all flags specified by the application and add\n// SOCK_NONBLOCK since socketOperations requires it. Pass a protocol of 0\n// to simplify the syscall filters, since 0 and IPPROTO_* are equivalent.\n- fd, err := syscall.Socket(p.family, stype|syscall.SOCK_NONBLOCK, 0)\n+ fd, err := syscall.Socket(p.family, stype|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0)\nif err != nil {\nreturn nil, syserr.FromError(err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/unet/unet.go",
"new_path": "pkg/unet/unet.go",
"diff": "@@ -201,7 +201,7 @@ func (s *Socket) enterFD() (int, bool) {\n// SocketPair creates a pair of connected sockets.\nfunc SocketPair(packet bool) (*Socket, *Socket, error) {\n// Make a new pair.\n- fds, err := syscall.Socketpair(syscall.AF_UNIX, socketType(packet), 0)\n+ fds, err := syscall.Socketpair(syscall.AF_UNIX, socketType(packet)|syscall.SOCK_CLOEXEC, 0)\nif err != nil {\nreturn nil, nil, err\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Set CLOEXEC option to sockets
hostinet/socket.go: the Sentry doesn't spawn new processes, but it doesn't hurt to protect the socket from leaking.
unet/unet.go: should be setting closing on exec. The FD is explicitly donated to children when needed.
PiperOrigin-RevId: 200135682
Change-Id: Ia8a45ced1e00a19420c8611b12e7a8ee770f89cb |
259,858 | 11.06.2018 17:56:18 | 25,200 | 09b0a9c320bd777bc52384bd0ec91ecfc61e481d | Handle all exception vectors. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine_amd64.go",
"new_path": "pkg/sentry/platform/kvm/machine_amd64.go",
"diff": "@@ -137,6 +137,18 @@ func (c *vCPU) initArchState() error {\nreturn c.setSystemTime()\n}\n+// nonCanonical generates a canonical address return.\n+//\n+//go:nosplit\n+func nonCanonical(addr uint64, signal int32) (*arch.SignalInfo, usermem.AccessType, error) {\n+ info := &arch.SignalInfo{\n+ Signo: signal,\n+ Code: arch.SignalInfoKernel,\n+ }\n+ info.SetAddr(addr) // Include address.\n+ return info, usermem.NoAccess, platform.ErrContextSignal\n+}\n+\n// fault generates an appropriate fault return.\n//\n//go:nosplit\n@@ -169,6 +181,17 @@ func (c *vCPU) fault(signal int32) (*arch.SignalInfo, usermem.AccessType, error)\n// SwitchToUser unpacks architectural-details.\nfunc (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts) (*arch.SignalInfo, usermem.AccessType, error) {\n+ // Check for canonical addresses.\n+ if regs := switchOpts.Registers; !ring0.IsCanonical(regs.Rip) {\n+ return nonCanonical(regs.Rip, int32(syscall.SIGSEGV))\n+ } else if !ring0.IsCanonical(regs.Rsp) {\n+ return nonCanonical(regs.Rsp, int32(syscall.SIGBUS))\n+ } else if !ring0.IsCanonical(regs.Fs_base) {\n+ return nonCanonical(regs.Fs_base, int32(syscall.SIGBUS))\n+ } else if !ring0.IsCanonical(regs.Gs_base) {\n+ return nonCanonical(regs.Gs_base, int32(syscall.SIGBUS))\n+ }\n+\n// Assign PCIDs.\nif c.PCIDs != nil {\nvar requireFlushPCID bool // Force a flush?\n@@ -205,7 +228,11 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts) (*arch.SignalInfo, user\ninfo.SetAddr(switchOpts.Registers.Rip) // Include address.\nreturn info, usermem.AccessType{}, platform.ErrContextSignal\n- case ring0.GeneralProtectionFault:\n+ case ring0.GeneralProtectionFault,\n+ ring0.SegmentNotPresent,\n+ ring0.BoundRangeExceeded,\n+ ring0.InvalidTSS,\n+ ring0.StackSegmentFault:\ninfo := &arch.SignalInfo{\nSigno: int32(syscall.SIGSEGV),\nCode: arch.SignalInfoKernel,\n@@ -229,7 +256,16 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts) (*arch.SignalInfo, user\ninfo.SetAddr(switchOpts.Registers.Rip) // Include address.\nreturn info, usermem.AccessType{}, platform.ErrContextSignal\n- case ring0.X87FloatingPointException:\n+ case ring0.Overflow:\n+ info := &arch.SignalInfo{\n+ Signo: int32(syscall.SIGFPE),\n+ Code: 1, // FPE_INTOVF (integer overflow).\n+ }\n+ info.SetAddr(switchOpts.Registers.Rip) // Include address.\n+ return info, usermem.AccessType{}, platform.ErrContextSignal\n+\n+ case ring0.X87FloatingPointException,\n+ ring0.SIMDFloatingPointException:\ninfo := &arch.SignalInfo{\nSigno: int32(syscall.SIGFPE),\nCode: 7, // FPE_FLTINV (invalid operation).\n@@ -237,7 +273,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts) (*arch.SignalInfo, user\ninfo.SetAddr(switchOpts.Registers.Rip) // Include address.\nreturn info, usermem.AccessType{}, platform.ErrContextSignal\n- case ring0.Vector(bounce):\n+ case ring0.Vector(bounce): // ring0.VirtualizationException\nreturn nil, usermem.NoAccess, platform.ErrContextInterrupt\ncase ring0.AlignmentCheck:\n@@ -255,6 +291,12 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts) (*arch.SignalInfo, user\n// directly into the instance.\nreturn c.fault(int32(syscall.SIGBUS))\n+ case ring0.DeviceNotAvailable,\n+ ring0.DoubleFault,\n+ ring0.CoprocessorSegmentOverrun,\n+ ring0.MachineCheck,\n+ ring0.SecurityException:\n+ fallthrough\ndefault:\npanic(fmt.Sprintf(\"unexpected vector: 0x%x\", vector))\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ring0/kernel_amd64.go",
"new_path": "pkg/sentry/platform/ring0/kernel_amd64.go",
"diff": "@@ -161,18 +161,17 @@ func IsCanonical(addr uint64) bool {\n// Also note that this function transitively depends on the compiler generating\n// code that uses IP-relative addressing inside of absolute addresses. That's\n// the case for amd64, but may not be the case for other architectures.\n+//\n+// Precondition: the Rip, Rsp, Fs and Gs registers must be canonical.\n+\n//\n//go:nosplit\nfunc (c *CPU) SwitchToUser(switchOpts SwitchOpts) (vector Vector) {\n- // Check for canonical addresses.\n- regs := switchOpts.Registers\n- if !IsCanonical(regs.Rip) || !IsCanonical(regs.Rsp) || !IsCanonical(regs.Fs_base) || !IsCanonical(regs.Gs_base) {\n- return GeneralProtectionFault\n- }\nuserCR3 := switchOpts.PageTables.CR3(!switchOpts.Flush, switchOpts.UserPCID)\nkernelCR3 := c.kernel.PageTables.CR3(true, switchOpts.KernelPCID)\n// Sanitize registers.\n+ regs := switchOpts.Registers\nregs.Eflags &= ^uint64(UserFlagsClear)\nregs.Eflags |= UserFlagsSet\nregs.Cs = uint64(Ucode64) // Required for iret.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Handle all exception vectors.
PiperOrigin-RevId: 200144655
Change-Id: I5a753c74b75007b7714d6fe34aa0d2e845dc5c41 |
259,858 | 11.06.2018 18:16:13 | 25,200 | 41f766893ab804cd2d3ccfd782d97c022e987f79 | Minor ring0 interface cleanup.
Remove unused methods.
Provide declaration for asm function. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine.go",
"new_path": "pkg/sentry/platform/kvm/machine.go",
"diff": "@@ -40,7 +40,7 @@ type machine struct {\nnextSlot uint32\n// kernel is the set of global structures.\n- kernel *ring0.Kernel\n+ kernel ring0.Kernel\n// mappingCache is used for mapPhysical.\nmappingCache sync.Map\n@@ -135,7 +135,7 @@ func newMachine(vm int, vCPUs int) (*machine, error) {\n// issues when you've got > n active threads.)\nvCPUs = n\n}\n- m.kernel = ring0.New(ring0.KernelOpts{\n+ m.kernel.Init(ring0.KernelOpts{\nPageTables: pagetables.New(newAllocator()),\n})\n@@ -158,7 +158,7 @@ func newMachine(vm int, vCPUs int) (*machine, error) {\nfd: int(fd),\nmachine: m,\n}\n- c.CPU.Init(m.kernel)\n+ c.CPU.Init(&m.kernel)\nc.CPU.KernelSyscall = bluepillSyscall\nc.CPU.KernelException = bluepillException\nm.vCPUs[uint64(-id)] = c // See above.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ring0/kernel.go",
"new_path": "pkg/sentry/platform/ring0/kernel.go",
"diff": "package ring0\n-// New creates a new kernel.\n+// Init initializes a new kernel.\n//\n// N.B. that constraints on KernelOpts must be satisfied.\n//\n-// Init must have been called.\n-func New(opts KernelOpts) *Kernel {\n- k := new(Kernel)\n+//go:nosplit\n+func (k *Kernel) Init(opts KernelOpts) {\nk.init(opts)\n- return k\n-}\n-\n-// NewCPU creates a new CPU associated with this Kernel.\n-//\n-// Note that execution of the new CPU must begin at Start, with constraints as\n-// documented. Initialization is not completed by this method alone.\n-//\n-// See also Init.\n-func (k *Kernel) NewCPU() *CPU {\n- c := new(CPU)\n- c.Init(k)\n- return c\n}\n// Halt halts execution.\n@@ -56,8 +42,7 @@ func defaultSyscall() { Halt() }\n//go:nosplit\nfunc defaultException(Vector) { Halt() }\n-// Init allows the initialization of a CPU from a kernel without allocation.\n-// The same constraints as NewCPU apply.\n+// Init initializes a new CPU.\n//\n// Init allows embedding in other objects.\nfunc (c *CPU) Init(k *Kernel) {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ring0/lib_amd64.go",
"new_path": "pkg/sentry/platform/ring0/lib_amd64.go",
"diff": "@@ -64,6 +64,9 @@ func wrgsmsr(addr uintptr)\n// writeCR3 writes the CR3 value.\nfunc writeCR3(phys uintptr)\n+// readCR3 reads the current CR3 value.\n+func readCR3() uintptr\n+\n// readCR2 reads the current CR2 value.\nfunc readCR2() uintptr\n"
}
] | Go | Apache License 2.0 | google/gvisor | Minor ring0 interface cleanup.
- Remove unused methods.
- Provide declaration for asm function.
PiperOrigin-RevId: 200146850
Change-Id: Ic455c96ffe0d2e78ef15f824eb65d7de705b054a |
259,992 | 12.06.2018 10:24:56 | 25,200 | 48335318a23f4f536c395e602c0cd338c4c4e890 | Enable debug logging in tests
Unit tests call runsc directly now, so all command line arguments
are valid. On the other hand, enabling debug in the test binary
doesn't affect runsc. It needs to be set in the config. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/config.go",
"new_path": "runsc/boot/config.go",
"diff": "@@ -176,18 +176,10 @@ type Config struct {\n// DisableSeccomp indicates whether seccomp syscall filters should be\n// disabled. Pardon the double negation, but default to enabled is important.\nDisableSeccomp bool\n-\n- // TestModeNoFlags indicates that the ToFlags method should return\n- // empty. This should only be used in tests, since the test runner does\n- // not know about all the flags.\n- TestModeNoFlags bool\n}\n// ToFlags returns a slice of flags that correspond to the given Config.\nfunc (c *Config) ToFlags() []string {\n- if c.TestModeNoFlags {\n- return nil\n- }\nreturn []string{\n\"--root=\" + c.RootDir,\n\"--debug=\" + strconv.FormatBool(c.Debug),\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/testutil.go",
"new_path": "runsc/test/testutil/testutil.go",
"diff": "@@ -117,12 +117,12 @@ func SetupContainerInRoot(rootDir string, spec *specs.Spec) (bundleDir string, c\n}\nconf = &boot.Config{\n- RootDir: rootDir,\n+ Debug: true,\n+ LogFormat: \"text\",\n+ LogPackets: true,\nNetwork: boot.NetworkNone,\n- // Don't add flags when calling subprocesses, since the test\n- // runner does not know about all the flags. We control the\n- // Config in the subprocess anyways, so it does not matter.\n- TestModeNoFlags: true,\n+ RootDir: rootDir,\n+ Strace: true,\n}\nreturn bundleDir, conf, nil\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enable debug logging in tests
Unit tests call runsc directly now, so all command line arguments
are valid. On the other hand, enabling debug in the test binary
doesn't affect runsc. It needs to be set in the config.
PiperOrigin-RevId: 200237706
Change-Id: I0b5922db17f887f58192dbc2f8dd2fd058b76ec7 |
259,891 | 12.06.2018 11:02:35 | 25,200 | 2dc9cd7bf73d971a37fa22b52a70961f27f6c970 | runsc: enable terminals in the sandbox.
runsc now mounts the devpts filesystem, so you get a real terminal using
ssh+sshd. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/BUILD",
"new_path": "runsc/boot/BUILD",
"diff": "@@ -35,6 +35,7 @@ go_library(\n\"//pkg/sentry/fs/ramfs\",\n\"//pkg/sentry/fs/sys\",\n\"//pkg/sentry/fs/tmpfs\",\n+ \"//pkg/sentry/fs/tty\",\n\"//pkg/sentry/inet\",\n\"//pkg/sentry/kernel\",\n\"//pkg/sentry/kernel/auth\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/fs.go",
"new_path": "runsc/boot/fs.go",
"diff": "@@ -27,6 +27,7 @@ import (\n_ \"gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc\"\n_ \"gvisor.googlesource.com/gvisor/pkg/sentry/fs/sys\"\n_ \"gvisor.googlesource.com/gvisor/pkg/sentry/fs/tmpfs\"\n+ _ \"gvisor.googlesource.com/gvisor/pkg/sentry/fs/tty\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n@@ -109,6 +110,14 @@ func configureMounts(ctx context.Context, spec *specs.Spec, conf *Config, mns *f\nreturn err\n}\n+ // Always mount /dev/pts.\n+ if err := mountSubmount(ctx, spec, conf, mns, nil, specs.Mount{\n+ Type: \"devpts\",\n+ Destination: \"/dev/pts\",\n+ }); err != nil {\n+ return err\n+ }\n+\n// Mount proc and sys even if the user did not ask for it, as the spec\n// says we SHOULD.\nif !procMounted {\n@@ -214,7 +223,7 @@ func mountSubmount(ctx context.Context, spec *specs.Spec, conf *Config, mns *fs.\nvar fsName string\nvar useOverlay bool\nswitch m.Type {\n- case \"proc\", \"sysfs\", \"devtmpfs\":\n+ case \"devpts\", \"devtmpfs\", \"proc\", \"sysfs\":\nfsName = m.Type\ncase \"none\":\nfsName = \"sysfs\"\n"
}
] | Go | Apache License 2.0 | google/gvisor | runsc: enable terminals in the sandbox.
runsc now mounts the devpts filesystem, so you get a real terminal using
ssh+sshd.
PiperOrigin-RevId: 200244830
Change-Id: If577c805ad0138fda13103210fa47178d8ac6605 |
259,885 | 12.06.2018 12:37:06 | 25,200 | 7a10df454b1c12b207f479cdda7338fff2875d5f | Drop MMapOpts.MappingIdentity reference in loader.mapSegment. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/elf.go",
"new_path": "pkg/sentry/loader/elf.go",
"diff": "@@ -271,6 +271,11 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f *fs.File, phdr *elf.\nPerms: prot,\nMaxPerms: usermem.AnyAccess,\n}\n+ defer func() {\n+ if mopts.MappingIdentity != nil {\n+ mopts.MappingIdentity.DecRef()\n+ }\n+ }()\nif err := f.ConfigureMMap(ctx, &mopts); err != nil {\nctx.Infof(\"File is not memory-mappable: %v\", err)\nreturn err\n"
}
] | Go | Apache License 2.0 | google/gvisor | Drop MMapOpts.MappingIdentity reference in loader.mapSegment.
PiperOrigin-RevId: 200261995
Change-Id: I7e460b18ceab2c23096bdeb7416159d6e774aaf7 |
260,013 | 12.06.2018 16:15:21 | 25,200 | c2b3f04d1c7b5d376a3fa305fc5e309e9ec81d99 | Rpcinet doensn't handle SO_RCVTIMEO properly.
Rpcinet already inherits socket.ReceiveTimeout; however, it's
never set on setsockopt(2). The value is currently forwarded
as an RPC and ignored as all sockets will be non-blocking
on the RPC side. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/rpcinet/socket.go",
"new_path": "pkg/sentry/socket/rpcinet/socket.go",
"diff": "@@ -18,6 +18,7 @@ import (\n\"syscall\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n+ \"gvisor.googlesource.com/gvisor/pkg/binary\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n@@ -318,6 +319,15 @@ func (s *socketOperations) Shutdown(t *kernel.Task, how int) *syserr.Error {\n// GetSockOpt implements socket.Socket.GetSockOpt.\nfunc (s *socketOperations) GetSockOpt(t *kernel.Task, level int, name int, outLen int) (interface{}, *syserr.Error) {\n+ // SO_RCVTIMEO is special because blocking is performed within the sentry.\n+ if level == linux.SOL_SOCKET && name == linux.SO_RCVTIMEO {\n+ if outLen < linux.SizeOfTimeval {\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ return linux.NsecToTimeval(s.RecvTimeout()), nil\n+ }\n+\nstack := t.NetworkContext().(*Stack)\nid, c := stack.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_GetSockOpt{&pb.GetSockOptRequest{Fd: s.fd, Level: int64(level), Name: int64(name), Length: uint32(outLen)}}}, false /* ignoreResult */)\n<-c\n@@ -332,6 +342,20 @@ func (s *socketOperations) GetSockOpt(t *kernel.Task, level int, name int, outLe\n// SetSockOpt implements socket.Socket.SetSockOpt.\nfunc (s *socketOperations) SetSockOpt(t *kernel.Task, level int, name int, opt []byte) *syserr.Error {\n+ // Because blocking actually happens within the sentry we need to inspect\n+ // this socket option to determine if it's a SO_RCVTIMEO, and if so, we will\n+ // save it and use it as the deadline for recv(2) related syscalls.\n+ if level == linux.SOL_SOCKET && name == linux.SO_RCVTIMEO {\n+ if len(opt) < linux.SizeOfTimeval {\n+ return syserr.ErrInvalidArgument\n+ }\n+\n+ var v linux.Timeval\n+ binary.Unmarshal(opt[:linux.SizeOfTimeval], usermem.ByteOrder, &v)\n+ s.SetRecvTimeout(v.ToNsecCapped())\n+ return nil\n+ }\n+\nstack := t.NetworkContext().(*Stack)\nid, c := stack.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_SetSockOpt{&pb.SetSockOptRequest{Fd: s.fd, Level: int64(level), Name: int64(name), Opt: opt}}}, false /* ignoreResult */)\n<-c\n"
}
] | Go | Apache License 2.0 | google/gvisor | Rpcinet doensn't handle SO_RCVTIMEO properly.
Rpcinet already inherits socket.ReceiveTimeout; however, it's
never set on setsockopt(2). The value is currently forwarded
as an RPC and ignored as all sockets will be non-blocking
on the RPC side.
PiperOrigin-RevId: 200299260
Change-Id: I6c610ea22c808ff6420c63759dccfaeab17959dd |
259,854 | 12.06.2018 17:03:31 | 25,200 | ba426f7782d35f971820a0193cfda58485b92cad | Fix reference leak for negative dirents | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/dirent.go",
"new_path": "pkg/sentry/fs/dirent.go",
"diff": "@@ -1257,6 +1257,15 @@ func (d *Dirent) destroy() {\n// Drop all weak references.\nfor _, w := range d.children {\n+ if c := w.Get(); c != nil {\n+ if c.(*Dirent).IsNegative() {\n+ // The parent holds both weak and strong refs in the case of\n+ // negative dirents.\n+ c.DecRef()\n+ }\n+ // Drop the reference we just acquired in WeakRef.Get.\n+ c.DecRef()\n+ }\nw.Drop()\n}\nd.children = nil\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix reference leak for negative dirents
PiperOrigin-RevId: 200306715
Change-Id: I7c80059c77ebd3d9a5d7d48b05c8e7a597f10850 |
259,885 | 13.06.2018 10:03:06 | 25,200 | 55b905845650efc9a0a23066f8ffd25ce2565bbc | Log filemem state when panicing due to invalid refcount. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/filemem/filemem.go",
"new_path": "pkg/sentry/platform/filemem/filemem.go",
"diff": "@@ -156,17 +156,6 @@ type usageInfo struct {\nrefs uint64\n}\n-func (u *usageInfo) incRef() {\n- u.refs++\n-}\n-\n-func (u *usageInfo) decRef() {\n- if u.refs == 0 {\n- panic(\"DecRef at 0 refs!\")\n- }\n- u.refs--\n-}\n-\nconst (\nchunkShift = 24\nchunkSize = 1 << chunkShift // 16 MB\n@@ -506,7 +495,7 @@ func (f *FileMem) IncRef(fr platform.FileRange) {\ndefer f.mu.Unlock()\ngap := f.usage.ApplyContiguous(fr, func(seg usageIterator) {\n- seg.ValuePtr().incRef()\n+ seg.ValuePtr().refs++\n})\nif gap.Ok() {\npanic(fmt.Sprintf(\"IncRef(%v): attempted to IncRef on unallocated pages %v:\\n%v\", fr, gap.Range(), &f.usage))\n@@ -527,7 +516,10 @@ func (f *FileMem) DecRef(fr platform.FileRange) {\nfor seg := f.usage.FindSegment(fr.Start); seg.Ok() && seg.Start() < fr.End; seg = seg.NextSegment() {\nseg = f.usage.Isolate(seg, fr)\nval := seg.ValuePtr()\n- val.decRef()\n+ if val.refs == 0 {\n+ panic(fmt.Sprintf(\"DecRef(%v): 0 existing references on %v:\\n%v\", fr, seg.Range(), &f.usage))\n+ }\n+ val.refs--\nif val.refs == 0 {\nfreed = true\n// Reclassify memory as System, until it's freed by the reclaim\n"
}
] | Go | Apache License 2.0 | google/gvisor | Log filemem state when panicing due to invalid refcount.
PiperOrigin-RevId: 200408305
Change-Id: I676ee49ec77697105723577928c7f82088cd378e |
259,948 | 13.06.2018 10:13:23 | 25,200 | 686093669eb094eb585009b08175a70928849134 | sentry: do not treat all save errors as state file errors. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/state/state.go",
"new_path": "pkg/sentry/state/state.go",
"diff": "@@ -78,10 +78,7 @@ func (opts SaveOpts) Save(k *kernel.Kernel, w *watchdog.Watchdog) error {\n// Save the kernel.\nerr = k.SaveTo(wc)\nif closeErr := wc.Close(); err == nil && closeErr != nil {\n- err = closeErr\n- }\n- if err != nil {\n- err = ErrStateFile{err}\n+ err = ErrStateFile{closeErr}\n}\n}\nopts.Callback(err)\n"
}
] | Go | Apache License 2.0 | google/gvisor | sentry: do not treat all save errors as state file errors.
PiperOrigin-RevId: 200410220
Change-Id: I6a8745e33be949e335719083501f18b24f6ba471 |
259,858 | 13.06.2018 13:04:36 | 25,200 | 7b7b199ed0e282c42a753b1dc2ee16fe15aaa6d3 | Deflake kvm_test. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/BUILD",
"new_path": "pkg/sentry/platform/kvm/BUILD",
"diff": "@@ -71,7 +71,6 @@ go_library(\ngo_test(\nname = \"kvm_test\",\n- size = \"small\",\nsrcs = [\n\"kvm_test.go\",\n\"virtual_map_test.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/kvm_test.go",
"new_path": "pkg/sentry/platform/kvm/kvm_test.go",
"diff": "@@ -157,7 +157,9 @@ func TestApplicationSyscall(t *testing.T) {\nFloatingPointState: dummyFPState,\nPageTables: pt,\nFullRestore: true,\n- }); err != nil {\n+ }); err == platform.ErrContextInterrupt {\n+ return true // Retry.\n+ } else if err != nil {\nt.Errorf(\"application syscall with full restore failed: %v\", err)\n}\nreturn false\n@@ -167,7 +169,9 @@ func TestApplicationSyscall(t *testing.T) {\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\n- }); err != nil {\n+ }); err == platform.ErrContextInterrupt {\n+ return true // Retry.\n+ } else if err != nil {\nt.Errorf(\"application syscall with partial restore failed: %v\", err)\n}\nreturn false\n@@ -182,7 +186,9 @@ func TestApplicationFault(t *testing.T) {\nFloatingPointState: dummyFPState,\nPageTables: pt,\nFullRestore: true,\n- }); err != platform.ErrContextSignal || (si != nil && si.Signo != int32(syscall.SIGSEGV)) {\n+ }); err == platform.ErrContextInterrupt {\n+ return true // Retry.\n+ } else if err != platform.ErrContextSignal || (si != nil && si.Signo != int32(syscall.SIGSEGV)) {\nt.Errorf(\"application fault with full restore got (%v, %v), expected (%v, SIGSEGV)\", err, si, platform.ErrContextSignal)\n}\nreturn false\n@@ -193,7 +199,9 @@ func TestApplicationFault(t *testing.T) {\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\n- }); err != platform.ErrContextSignal || (si != nil && si.Signo != int32(syscall.SIGSEGV)) {\n+ }); err == platform.ErrContextInterrupt {\n+ return true // Retry.\n+ } else if err != platform.ErrContextSignal || (si != nil && si.Signo != int32(syscall.SIGSEGV)) {\nt.Errorf(\"application fault with partial restore got (%v, %v), expected (%v, SIGSEGV)\", err, si, platform.ErrContextSignal)\n}\nreturn false\n@@ -203,16 +211,21 @@ func TestApplicationFault(t *testing.T) {\nfunc TestRegistersSyscall(t *testing.T) {\napplicationTest(t, true, testutil.TwiddleRegsSyscall, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool {\ntestutil.SetTestRegs(regs) // Fill values for all registers.\n+ for {\nif _, _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\n- }); err != nil {\n+ }); err == platform.ErrContextInterrupt {\n+ continue // Retry.\n+ } else if err != nil {\nt.Errorf(\"application register check with partial restore got unexpected error: %v\", err)\n}\nif err := testutil.CheckTestRegs(regs, false); err != nil {\nt.Errorf(\"application register check with partial restore failed: %v\", err)\n}\n+ break // Done.\n+ }\nreturn false\n})\n}\n@@ -220,17 +233,22 @@ func TestRegistersSyscall(t *testing.T) {\nfunc TestRegistersFault(t *testing.T) {\napplicationTest(t, true, testutil.TwiddleRegsFault, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool {\ntestutil.SetTestRegs(regs) // Fill values for all registers.\n+ for {\nif si, _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\nFullRestore: true,\n- }); err != platform.ErrContextSignal || si.Signo != int32(syscall.SIGSEGV) {\n+ }); err == platform.ErrContextInterrupt {\n+ continue // Retry.\n+ } else if err != platform.ErrContextSignal || si.Signo != int32(syscall.SIGSEGV) {\nt.Errorf(\"application register check with full restore got unexpected error: %v\", err)\n}\nif err := testutil.CheckTestRegs(regs, true); err != nil {\nt.Errorf(\"application register check with full restore failed: %v\", err)\n}\n+ break // Done.\n+ }\nreturn false\n})\n}\n@@ -238,17 +256,22 @@ func TestRegistersFault(t *testing.T) {\nfunc TestSegments(t *testing.T) {\napplicationTest(t, true, testutil.TwiddleSegments, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool {\ntestutil.SetTestSegments(regs)\n+ for {\nif _, _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\nFullRestore: true,\n- }); err != nil {\n+ }); err == platform.ErrContextInterrupt {\n+ continue // Retry.\n+ } else if err != nil {\nt.Errorf(\"application segment check with full restore got unexpected error: %v\", err)\n}\nif err := testutil.CheckTestSegments(regs); err != nil {\nt.Errorf(\"application segment check with full restore failed: %v\", err)\n}\n+ break // Done.\n+ }\nreturn false\n})\n}\n@@ -323,23 +346,33 @@ func TestInvalidate(t *testing.T) {\nvar data uintptr // Used below.\napplicationTest(t, true, testutil.Touch, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool {\ntestutil.SetTouchTarget(regs, &data) // Read legitimate value.\n+ for {\nif _, _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\n- }); err != nil {\n+ }); err == platform.ErrContextInterrupt {\n+ continue // Retry.\n+ } else if err != nil {\nt.Errorf(\"application partial restore: got %v, wanted nil\", err)\n}\n+ break // Done.\n+ }\n// Unmap the page containing data & invalidate.\npt.Unmap(usermem.Addr(reflect.ValueOf(&data).Pointer() & ^uintptr(usermem.PageSize-1)), usermem.PageSize)\n+ for {\nif _, _, err := c.SwitchToUser(ring0.SwitchOpts{\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\nFlush: true,\n- }); err != platform.ErrContextSignal {\n+ }); err == platform.ErrContextInterrupt {\n+ continue // Retry.\n+ } else if err != platform.ErrContextSignal {\nt.Errorf(\"application partial restore: got %v, wanted %v\", err, platform.ErrContextSignal)\n}\n+ break // Success.\n+ }\nreturn false\n})\n}\n@@ -355,7 +388,9 @@ func TestEmptyAddressSpace(t *testing.T) {\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\n- }); !IsFault(err, si) {\n+ }); err == platform.ErrContextInterrupt {\n+ return true // Retry.\n+ } else if !IsFault(err, si) {\nt.Errorf(\"first fault with partial restore failed got %v\", err)\nt.Logf(\"registers: %#v\", ®s)\n}\n@@ -367,7 +402,9 @@ func TestEmptyAddressSpace(t *testing.T) {\nFloatingPointState: dummyFPState,\nPageTables: pt,\nFullRestore: true,\n- }); !IsFault(err, si) {\n+ }); err == platform.ErrContextInterrupt {\n+ return true // Retry.\n+ } else if !IsFault(err, si) {\nt.Errorf(\"first fault with full restore failed got %v\", err)\nt.Logf(\"registers: %#v\", ®s)\n}\n@@ -422,11 +459,10 @@ func BenchmarkApplicationSyscall(b *testing.B) {\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\n- }); err != nil {\n- if err == platform.ErrContextInterrupt {\n+ }); err == platform.ErrContextInterrupt {\na++\nreturn true // Ignore.\n- }\n+ } else if err != nil {\nb.Fatalf(\"benchmark failed: %v\", err)\n}\ni++\n@@ -459,11 +495,10 @@ func BenchmarkWorldSwitchToUserRoundtrip(b *testing.B) {\nRegisters: regs,\nFloatingPointState: dummyFPState,\nPageTables: pt,\n- }); err != nil {\n- if err == platform.ErrContextInterrupt {\n+ }); err == platform.ErrContextInterrupt {\na++\nreturn true // Ignore.\n- }\n+ } else if err != nil {\nb.Fatalf(\"benchmark failed: %v\", err)\n}\n// This will intentionally cause the world switch. By executing\n@@ -474,6 +509,6 @@ func BenchmarkWorldSwitchToUserRoundtrip(b *testing.B) {\nreturn i < b.N\n})\nif a != 0 {\n- b.Logf(\"EAGAIN occurred %d times (in %d iterations).\", a, a+i)\n+ b.Logf(\"ErrContextInterrupt occurred %d times (in %d iterations).\", a, a+i)\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/virtual_map_test.go",
"new_path": "pkg/sentry/platform/kvm/virtual_map_test.go",
"diff": "@@ -23,13 +23,15 @@ import (\ntype checker struct {\nok bool\n+ accessType usermem.AccessType\n}\n-func (c *checker) Contains(addr uintptr) func(virtualRegion) {\n+func (c *checker) Containing(addr uintptr) func(virtualRegion) {\nc.ok = false // Reset for below calls.\nreturn func(vr virtualRegion) {\nif vr.virtual <= addr && addr < vr.virtual+vr.length {\nc.ok = true\n+ c.accessType = vr.accessType\n}\n}\n}\n@@ -38,7 +40,7 @@ func TestParseMaps(t *testing.T) {\nc := new(checker)\n// Simple test.\n- if err := applyVirtualRegions(c.Contains(0)); err != nil {\n+ if err := applyVirtualRegions(c.Containing(0)); err != nil {\nt.Fatalf(\"unexpected error: %v\", err)\n}\n@@ -52,7 +54,7 @@ func TestParseMaps(t *testing.T) {\n}\n// Re-parse maps.\n- if err := applyVirtualRegions(c.Contains(addr)); err != nil {\n+ if err := applyVirtualRegions(c.Containing(addr)); err != nil {\nsyscall.RawSyscall(syscall.SYS_MUNMAP, addr, usermem.PageSize, 0)\nt.Fatalf(\"unexpected error: %v\", err)\n}\n@@ -63,16 +65,29 @@ func TestParseMaps(t *testing.T) {\nt.Fatalf(\"updated map does not contain 0x%08x, expected true\", addr)\n}\n- // Unmap the region.\n- syscall.RawSyscall(syscall.SYS_MUNMAP, addr, usermem.PageSize, 0)\n+ // Map the region as PROT_NONE.\n+ newAddr, _, errno := syscall.RawSyscall6(\n+ syscall.SYS_MMAP, addr, usermem.PageSize,\n+ syscall.PROT_NONE,\n+ syscall.MAP_ANONYMOUS|syscall.MAP_FIXED|syscall.MAP_PRIVATE, 0, 0)\n+ if errno != 0 {\n+ t.Fatalf(\"unexpected map error: %v\", errno)\n+ }\n+ if newAddr != addr {\n+ t.Fatalf(\"unable to remap address: got 0x%08x, wanted 0x%08x\", newAddr, addr)\n+ }\n// Re-parse maps.\n- if err := applyVirtualRegions(c.Contains(addr)); err != nil {\n+ if err := applyVirtualRegions(c.Containing(addr)); err != nil {\nt.Fatalf(\"unexpected error: %v\", err)\n}\n-\n- // Assert that it once again does _not_ contain the region.\n- if c.ok {\n- t.Fatalf(\"final map does contain 0x%08x, expected false\", addr)\n+ if !c.ok {\n+ t.Fatalf(\"final map does not contain 0x%08x, expected true\", addr)\n+ }\n+ if c.accessType.Read || c.accessType.Write || c.accessType.Execute {\n+ t.Fatalf(\"final map has incorrect permissions for 0x%08x\", addr)\n}\n+\n+ // Unmap the region.\n+ syscall.RawSyscall(syscall.SYS_MUNMAP, addr, usermem.PageSize, 0)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Deflake kvm_test.
PiperOrigin-RevId: 200439846
Change-Id: I9970fe0716cb02f0f41b754891d55db7e0729f56 |
260,013 | 13.06.2018 16:20:30 | 25,200 | 1170039e788db368615451a0a1f5cfccb1d28d41 | Fix missing returns in rpcinet. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/rpcinet/socket.go",
"new_path": "pkg/sentry/socket/rpcinet/socket.go",
"diff": "@@ -288,7 +288,7 @@ func (s *socketOperations) Bind(t *kernel.Task, sockaddr []byte) *syserr.Error {\n<-c\nif e := stack.rpcConn.Request(id).Result.(*pb.SyscallResponse_Bind).Bind.ErrorNumber; e != 0 {\n- syserr.FromHost(syscall.Errno(e))\n+ return syserr.FromHost(syscall.Errno(e))\n}\nreturn nil\n}\n@@ -300,7 +300,7 @@ func (s *socketOperations) Listen(t *kernel.Task, backlog int) *syserr.Error {\n<-c\nif e := stack.rpcConn.Request(id).Result.(*pb.SyscallResponse_Listen).Listen.ErrorNumber; e != 0 {\n- syserr.FromHost(syscall.Errno(e))\n+ return syserr.FromHost(syscall.Errno(e))\n}\nreturn nil\n}\n@@ -361,7 +361,7 @@ func (s *socketOperations) SetSockOpt(t *kernel.Task, level int, name int, opt [\n<-c\nif e := stack.rpcConn.Request(id).Result.(*pb.SyscallResponse_SetSockOpt).SetSockOpt.ErrorNumber; e != 0 {\n- syserr.FromHost(syscall.Errno(e))\n+ return syserr.FromHost(syscall.Errno(e))\n}\nreturn nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix missing returns in rpcinet.
PiperOrigin-RevId: 200472634
Change-Id: I3f0fb9e3b2f8616e6aa1569188258f330bf1ed31 |
259,854 | 13.06.2018 20:00:00 | 25,200 | f5d0c59f5c736f5f7fceb566e134f41b03229c22 | Fix reference leak in VDSO validation | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/dirent.go",
"new_path": "pkg/sentry/fs/dirent.go",
"diff": "@@ -213,7 +213,12 @@ func NewDirent(inode *Inode, name string) *Dirent {\n// NewTransientDirent creates a transient Dirent that shouldn't actually be\n// visible to users.\n+//\n+// An Inode is required.\nfunc NewTransientDirent(inode *Inode) *Dirent {\n+ if inode == nil {\n+ panic(\"an inode is required\")\n+ }\nreturn newDirent(inode, \"transient\")\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/BUILD",
"new_path": "pkg/sentry/loader/BUILD",
"diff": "@@ -43,6 +43,7 @@ go_library(\n\"//pkg/sentry/arch\",\n\"//pkg/sentry/context\",\n\"//pkg/sentry/fs\",\n+ \"//pkg/sentry/fs/anon\",\n\"//pkg/sentry/fs/fsutil\",\n\"//pkg/sentry/limits\",\n\"//pkg/sentry/memmap\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/vdso.go",
"new_path": "pkg/sentry/loader/vdso.go",
"diff": "@@ -20,10 +20,12 @@ import (\n\"io\"\n\"gvisor.googlesource.com/gvisor/pkg/abi\"\n+ \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/fs/anon\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/memmap\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/mm\"\n@@ -63,8 +65,23 @@ func (f *fileContext) Value(key interface{}) interface{} {\n}\n}\n+// newByteReaderFile creates a fake file to read data from.\nfunc newByteReaderFile(data []byte) *fs.File {\n- dirent := fs.NewTransientDirent(nil)\n+ // Create a fake inode.\n+ inode := fs.NewInode(fsutil.NewSimpleInodeOperations(fsutil.InodeSimpleAttributes{\n+ FSType: linux.ANON_INODE_FS_MAGIC,\n+ }), fs.NewNonCachingMountSource(nil, fs.MountSourceFlags{}), fs.StableAttr{\n+ Type: fs.Anonymous,\n+ DeviceID: anon.PseudoDevice.DeviceID(),\n+ InodeID: anon.PseudoDevice.NextIno(),\n+ BlockSize: usermem.PageSize,\n+ })\n+\n+ // Use the fake inode to create a fake dirent.\n+ dirent := fs.NewTransientDirent(inode)\n+ defer dirent.DecRef()\n+\n+ // Use the fake dirent to make a fake file.\nflags := fs.FileFlags{Read: true, Pread: true}\nreturn fs.NewFile(&fileContext{Context: context.Background()}, dirent, flags, &byteReader{\ndata: data,\n@@ -202,6 +219,7 @@ func PrepareVDSO(p platform.Platform) (*VDSO, error) {\n// First make sure the VDSO is valid. vdsoFile does not use ctx, so a\n// nil context can be passed.\ninfo, err := validateVDSO(nil, vdsoFile, uint64(len(vdsoBin)))\n+ vdsoFile.DecRef()\nif err != nil {\nreturn nil, err\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix reference leak in VDSO validation
PiperOrigin-RevId: 200496070
Change-Id: I33adb717c44e5b4bcadece882be3ab1ee3920556 |
259,881 | 14.06.2018 10:10:09 | 25,200 | d71f5ef6885b9c241018308944e4b2e4b4857029 | Add nanosleep filter for Go 1.11 support
golang.org/cl/108538 replaces pselect6 with nanosleep in runtime.usleep. Update
the filters accordingly. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/BUILD",
"new_path": "runsc/boot/filter/BUILD",
"diff": "@@ -6,6 +6,8 @@ go_library(\nname = \"filter\",\nsrcs = [\n\"config.go\",\n+ \"config_go110.go\",\n+ \"config_go111.go\",\n\"extra_filters.go\",\n\"extra_filters_msan.go\",\n\"extra_filters_race.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/config.go",
"new_path": "runsc/boot/filter/config.go",
"diff": "@@ -61,7 +61,6 @@ var allowedSyscalls = seccomp.SyscallRules{\nsyscall.SYS_NEWFSTATAT: {},\nsyscall.SYS_POLL: {},\nsyscall.SYS_PREAD64: {},\n- syscall.SYS_PSELECT6: {},\nsyscall.SYS_PWRITE64: {},\nsyscall.SYS_READ: {},\nsyscall.SYS_READLINKAT: {},\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/boot/filter/config_go110.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build !go1.11\n+\n+package filter\n+\n+import (\n+ \"syscall\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/seccomp\"\n+)\n+\n+// TODO: Remove this file and merge config_go111.go back into\n+// config.go once we no longer build with Go 1.10.\n+\n+func init() {\n+ allowedSyscalls[syscall.SYS_PSELECT6] = []seccomp.Rule{}\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/boot/filter/config_go111.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build go1.11\n+\n+package filter\n+\n+import (\n+ \"syscall\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/seccomp\"\n+)\n+\n+func init() {\n+ allowedSyscalls[syscall.SYS_NANOSLEEP] = []seccomp.Rule{}\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add nanosleep filter for Go 1.11 support
golang.org/cl/108538 replaces pselect6 with nanosleep in runtime.usleep. Update
the filters accordingly.
PiperOrigin-RevId: 200574612
Change-Id: Ifb2296fcb3781518fc047aabbbffedb9ae488cd7 |
259,885 | 14.06.2018 11:34:15 | 25,200 | 657db692b2241d89a324acc246b3c5230d8bd6ac | Ignore expiration count in kernelCPUClockListener.Notify. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -960,7 +960,13 @@ type kernelCPUClockListener struct {\n// Notify implements ktime.TimerListener.Notify.\nfunc (l kernelCPUClockListener) Notify(exp uint64) {\n- atomic.AddUint64(&l.k.cpuClock, exp)\n+ // Only increment cpuClock by 1 regardless of the number of expirations.\n+ // This approximately compensates for cases where thread throttling or bad\n+ // Go runtime scheduling prevents the cpuClockTicker goroutine, and\n+ // presumably task goroutines as well, from executing for a long period of\n+ // time. It's also necessary to prevent CPU clocks from seeing large\n+ // discontinuous jumps.\n+ atomic.AddUint64(&l.k.cpuClock, 1)\n}\n// Destroy implements ktime.TimerListener.Destroy.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Ignore expiration count in kernelCPUClockListener.Notify.
PiperOrigin-RevId: 200590832
Change-Id: I35b817ecccc9414a742dee4815dfc67d0c7d0496 |
259,858 | 15.06.2018 01:21:08 | 25,200 | 1eb1bf8670e85bccd8df04ee8452d327b9891518 | Update contributing guidelines with an example.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "CONTRIBUTING.md",
"new_path": "CONTRIBUTING.md",
"diff": "@@ -56,17 +56,55 @@ Rules:\n### Code reviews\n-All changes must be submitted via [Gerrit](https://gvisor-review.googlesource.com).\n+All changes must be submitted via [Gerrit][gerrit].\nAll submissions, including submissions by project members, require review.\n-Please see these [instructions](https://gvisor-review.googlesource.com/Documentation/).\n+\n+To submit a patch, first clone the canonical repository.\n+\n+```\n+git clone https://gvisor.googlesource.com/gvisor\n+```\n+\n+From within the cloned directory, install the commit hooks (optional, but if\n+you don't you will need to generate Change-Ids manually in your commits).\n+\n+```\n+curl -Lo `git rev-parse --git-dir`/hooks/commit-msg https://gerrit-review.googlesource.com/tools/hooks/commit-msg\n+chmod +x `git rev-parse --git-dir`/hooks/commit-msg\n+```\n+\n+Edit the source and generate commits as you normally would. While making\n+changes, remember to organize commits logically. Changes are not reviewed per\n+branch (as with a pull request), they are reviewed per commit.\n+\n+Before posting a new patch, you will need to generate an appropriate\n+authentication cookie. Visit the [repository][repo] and click the\n+\"Generate Password\" link at the top of the page for instructions.\n+\n+To post a patch for review, push to a special \"for\" reference.\n+\n+```\n+git push origin HEAD:refs/for/master\n+```\n+\n+A change link will be generated for the commit, and a team member will review\n+your change request, provide feedback (and submit when appropriate). To address\n+feedback, you may be required to amend your commit and repush (don't change\n+the Commit-Id in the commit message). This will generate a new version of\n+the change.\n+\n+When approved, the change will be submitted by a team member and automatically\n+merged into the repository.\n### The small print\nContributions made by corporations are covered by a different agreement than\nthe one above, the\n-[Software Grant and Corporate Contributor License Agreement]\n-(https://cla.developers.google.com/about/google-corporate).\n+[Software Grant and Corporate Contributor License Agreement][gccla].\n[gcla]: https://cla.developers.google.com/about/google-individual\n+[gccla]: https://cla.developers.google.com/about/google-corporate\n+[gerrit]: https://gvisor-review.googlesource.com\n[gostyle]: https://github.com/golang/go/wiki/CodeReviewComments\n+[repo]: https://gvisor.googlesource.com\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update contributing guidelines with an example.
Fixes #69
PiperOrigin-RevId: 200683809
Change-Id: I1312ebb3775d5f9088e9108359c19e2dedbb7b70 |
259,992 | 15.06.2018 09:17:08 | 25,200 | 119a302ceb070243cc2d3d3b4dcf5f4d57809479 | Implement /proc/thread-self
Closes | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/proc.go",
"new_path": "pkg/sentry/fs/proc/proc.go",
"diff": "@@ -111,6 +111,13 @@ func (p *proc) newSelf(ctx context.Context, msrc *fs.MountSource) *fs.Inode {\nreturn newFile(s, msrc, fs.Symlink, nil)\n}\n+// newThreadSelf returns a new \"threadSelf\" node.\n+func (p *proc) newThreadSelf(ctx context.Context, msrc *fs.MountSource) *fs.Inode {\n+ s := &threadSelf{pidns: p.pidns}\n+ s.InitSymlink(ctx, fs.RootOwner, \"\")\n+ return newFile(s, msrc, fs.Symlink, nil)\n+}\n+\n// newStubProcFsFile returns a procfs file with constant contents.\nfunc (p *proc) newStubProcFSFile(ctx context.Context, msrc *fs.MountSource, c []byte) *fs.Inode {\nu := &stubProcFSFile{\n@@ -134,6 +141,28 @@ func (s *self) Readlink(ctx context.Context, inode *fs.Inode) (string, error) {\nreturn \"\", ramfs.ErrInvalidOp\n}\n+// threadSelf is more magical than \"self\" link.\n+type threadSelf struct {\n+ ramfs.Symlink\n+\n+ pidns *kernel.PIDNamespace\n+}\n+\n+// Readlink implements fs.InodeOperations.Readlink.\n+func (s *threadSelf) Readlink(ctx context.Context, inode *fs.Inode) (string, error) {\n+ if t := kernel.TaskFromContext(ctx); t != nil {\n+ tgid := s.pidns.IDOfThreadGroup(t.ThreadGroup())\n+ tid := s.pidns.IDOfTask(t)\n+ if tid == 0 || tgid == 0 {\n+ return \"\", ramfs.ErrNotFound\n+ }\n+ return fmt.Sprintf(\"%d/task/%d\", tgid, tid), nil\n+ }\n+\n+ // Who is reading this link?\n+ return \"\", ramfs.ErrInvalidOp\n+}\n+\n// Lookup loads an Inode at name into a Dirent.\nfunc (p *proc) Lookup(ctx context.Context, dir *fs.Inode, name string) (*fs.Dirent, error) {\n// Is it one of the static ones?\n@@ -153,6 +182,7 @@ func (p *proc) Lookup(ctx context.Context, dir *fs.Inode, name string) (*fs.Dire\n},\n\"self\": func() *fs.Inode { return p.newSelf(ctx, dir.MountSource) },\n\"sys\": func() *fs.Inode { return p.newSysDir(ctx, dir.MountSource) },\n+ \"thread-self\": func() *fs.Inode { return p.newThreadSelf(ctx, dir.MountSource) },\n}\nif nf, ok := nfs[name]; ok {\nreturn fs.NewDirent(nf(), name), nil\n"
}
] | Go | Apache License 2.0 | google/gvisor | Implement /proc/thread-self
Closes #68
PiperOrigin-RevId: 200725401
Change-Id: I4827009b8aee89d22887c3af67291ccf7058d420 |
259,992 | 15.06.2018 09:17:40 | 25,200 | ef5dd4df9b65fb98d952b83baa736c14b2627fe7 | Set kernel.applicationCores to the number of processor on the host
The right number to use is the number of processors assigned to the cgroup. But until
we make the sandbox join the respective cgroup, just use the number of processors on
the host.
Closes closes | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -18,6 +18,7 @@ package boot\nimport (\n\"fmt\"\n\"math/rand\"\n+ \"runtime\"\n\"sync/atomic\"\n\"syscall\"\ngtime \"time\"\n@@ -171,7 +172,8 @@ func New(spec *specs.Spec, conf *Config, controllerFD int, ioFDs []int, console\nTimekeeper: tk,\nRootUserNamespace: creds.UserNamespace,\nNetworkStack: networkStack,\n- ApplicationCores: 8,\n+ // TODO: use number of logical processors from cgroups.\n+ ApplicationCores: uint(runtime.NumCPU()),\nVdso: vdso,\nRootUTSNamespace: utsns,\nRootIPCNamespace: ipcns,\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader_test.go",
"new_path": "runsc/boot/loader_test.go",
"diff": "package boot\nimport (\n+ \"fmt\"\n\"io/ioutil\"\n+ \"math/rand\"\n\"os\"\n\"sync\"\n\"testing\"\n@@ -29,6 +31,7 @@ import (\nfunc init() {\nlog.SetLevel(log.Debug)\n+ rand.Seed(time.Now().UnixNano())\n}\n// testSpec returns a simple spec that can be used in tests.\n@@ -46,7 +49,7 @@ func testSpec() *specs.Spec {\n}\nfunc createLoader() (*Loader, error) {\n- fd, err := server.CreateSocket(ControlSocketAddr(\"123\"))\n+ fd, err := server.CreateSocket(ControlSocketAddr(fmt.Sprintf(\"%010d\", rand.Int())[:10]))\nif err != nil {\nreturn nil, err\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Set kernel.applicationCores to the number of processor on the host
The right number to use is the number of processors assigned to the cgroup. But until
we make the sandbox join the respective cgroup, just use the number of processors on
the host.
Closes #65, closes #66
PiperOrigin-RevId: 200725483
Change-Id: I34a566b1a872e26c66f56fa6e3100f42aaf802b1 |
259,858 | 15.06.2018 09:29:19 | 25,200 | b31ac4e1dfc0eef688e2d8e85df965292690726e | Use notify explicitly on unlock path.
There are circumstances under which the redpill call will not generate
the appropriate action and notification. Replace this call with an
explicit notification, which is guaranteed to transition as well as
perform the futex wake. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine.go",
"new_path": "pkg/sentry/platform/kvm/machine.go",
"diff": "@@ -397,7 +397,7 @@ func (c *vCPU) unlock() {\ncase vCPUUser | vCPUGuest | vCPUWaiter:\n// Force a transition: this must trigger a notification when we\n// return from guest mode.\n- redpill()\n+ c.notify()\ncase vCPUUser | vCPUWaiter:\n// Waiting for the lock to be released; the responsibility is\n// on us to notify the waiter and clear the associated bit.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Use notify explicitly on unlock path.
There are circumstances under which the redpill call will not generate
the appropriate action and notification. Replace this call with an
explicit notification, which is guaranteed to transition as well as
perform the futex wake.
PiperOrigin-RevId: 200726934
Change-Id: Ie19e008a6007692dd7335a31a8b59f0af6e54aaa |
260,013 | 15.06.2018 12:54:38 | 25,200 | fa6db05e0ce828f2500651ca1226babbbf5edc80 | FIFOs should support O_TRUNC as a no-op. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/pipe/node.go",
"new_path": "pkg/sentry/kernel/pipe/node.go",
"diff": "@@ -162,6 +162,18 @@ func (i *inodeOperations) waitFor(wakeupChan *chan struct{}, sleeper amutex.Slee\n}\n}\n+// Truncate implements fs.InodeOperations.Truncate\n+//\n+// This method is required to override the default i.InodeOperations.Truncate\n+// which may return ErrInvalidOperation, this allows open related\n+// syscalls to set the O_TRUNC flag without returning an error by\n+// calling Truncate directly during openat. The ftruncate and truncate\n+// system calls will check that the file is an actual file and return\n+// EINVAL because it's a PIPE, making this behavior consistent with linux.\n+func (i *inodeOperations) Truncate(context.Context, *fs.Inode, int64) error {\n+ return nil\n+}\n+\n// newHandleLocked signals a new pipe reader or writer depending on where\n// 'wakeupChan' points. This unblocks any corresponding reader or writer\n// waiting for the other end of the channel to be opened, see Fifo.waitFor.\n"
}
] | Go | Apache License 2.0 | google/gvisor | FIFOs should support O_TRUNC as a no-op.
PiperOrigin-RevId: 200759323
Change-Id: I683b2edcc2188304c4ca563e46af457e23625905 |
259,948 | 15.06.2018 13:37:21 | 25,200 | fc8ca72a32bb4cb348ece3033c84696ea3502068 | sentry: do not start delivering external signal immediately. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/sighandling/sighandling.go",
"new_path": "pkg/sentry/sighandling/sighandling.go",
"diff": "@@ -29,23 +29,31 @@ import (\n// numSignals is the number of normal (non-realtime) signals on Linux.\nconst numSignals = 32\n-// forwardSignals listens for incoming signals and delivers them to k. It stops\n-// when the stop channel is closed.\n-func forwardSignals(k *kernel.Kernel, sigchans []chan os.Signal, stop chan struct{}) {\n+// forwardSignals listens for incoming signals and delivers them to k. It starts\n+// when the start channel is closed and stops when the stop channel is closed.\n+func forwardSignals(k *kernel.Kernel, sigchans []chan os.Signal, start, stop chan struct{}) {\n// Build a select case.\n- sc := []reflect.SelectCase{{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(stop)}}\n+ sc := []reflect.SelectCase{{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(start)}}\nfor _, sigchan := range sigchans {\nsc = append(sc, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(sigchan)})\n}\n+ started := false\nfor {\n// Wait for a notification.\nindex, _, ok := reflect.Select(sc)\n- // Was it the stop channel?\n+ // Was it the start / stop channel?\nif index == 0 {\nif !ok {\n+ if started {\n+ // stop channel\nbreak\n+ } else {\n+ // start channel\n+ started = true\n+ sc[0] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(stop)}\n+ }\n}\ncontinue\n}\n@@ -57,18 +65,18 @@ func forwardSignals(k *kernel.Kernel, sigchans []chan os.Signal, stop chan struc\n// Otherwise, it was a signal on channel N. Index 0 represents the stop\n// channel, so index N represents the channel for signal N.\n- if !k.SendExternalSignal(&arch.SignalInfo{Signo: int32(index)}, \"sentry\") {\n+ if !started || !k.SendExternalSignal(&arch.SignalInfo{Signo: int32(index)}, \"sentry\") {\n// Kernel is not ready to receive signals.\n//\n// Kill ourselves if this signal would have killed the\n- // process before StartForwarding was called. i.e., all\n+ // process before PrepareForwarding was called. i.e., all\n// _SigKill signals; see Go\n// src/runtime/sigtab_linux_generic.go.\n//\n// Otherwise ignore the signal.\n//\n// TODO: Convert Go's runtime.raise from\n- // tkill to tgkill so StartForwarding doesn't need to\n+ // tkill to tgkill so PrepareForwarding doesn't need to\n// be called until after filter installation.\nswitch linux.Signal(index) {\ncase linux.SIGHUP, linux.SIGINT, linux.SIGTERM:\n@@ -84,9 +92,11 @@ func forwardSignals(k *kernel.Kernel, sigchans []chan os.Signal, stop chan struc\n}\n}\n-// StartForwarding ensures that synchronous signals are forwarded to k and\n-// returns a callback that stops signal forwarding.\n-func StartForwarding(k *kernel.Kernel) func() {\n+// PrepareForwarding ensures that synchronous signals are forwarded to k and\n+// returns a callback that starts signal delivery, which itself returns a\n+// callback that stops signal forwarding.\n+func PrepareForwarding(k *kernel.Kernel) func() func() {\n+ start := make(chan struct{})\nstop := make(chan struct{})\n// Register individual channels. One channel per standard signal is\n@@ -109,8 +119,18 @@ func StartForwarding(k *kernel.Kernel) func() {\nsignal.Notify(sigchan, syscall.Signal(sig))\n}\n// Start up our listener.\n- go forwardSignals(k, sigchans, stop) // S/R-SAFE: synchronized by Kernel.extMu\n+ go forwardSignals(k, sigchans, start, stop) // S/R-SAFE: synchronized by Kernel.extMu\n- // ... shouldn't this wait until the forwardSignals goroutine returns?\n- return func() { close(stop) }\n+ return func() func() {\n+ close(start)\n+ return func() {\n+ close(stop)\n+ }\n+ }\n+}\n+\n+// StartForwarding ensures that synchronous signals are forwarded to k and\n+// returns a callback that stops signal forwarding.\n+func StartForwarding(k *kernel.Kernel) func() {\n+ return PrepareForwarding(k)()\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | sentry: do not start delivering external signal immediately.
PiperOrigin-RevId: 200765756
Change-Id: Ie4266f32e4e977df3925eb29f3fbb756e0337606 |
259,891 | 15.06.2018 14:07:00 | 25,200 | 437890dc4b6987a64ac98766c752ce64091757dc | runsc: Make gofer logs show up in test output. | [
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -209,6 +209,8 @@ func (s *Sandbox) createGoferProcess(spec *specs.Spec, conf *boot.Config, bundle\n}\ncmd := exec.Command(binPath, args...)\n+ cmd.Stdout = os.Stdout\n+ cmd.Stderr = os.Stderr\ncmd.ExtraFiles = goferEnds\n// Setup any uid/gid mappings, and create or join the configured user\n"
}
] | Go | Apache License 2.0 | google/gvisor | runsc: Make gofer logs show up in test output.
PiperOrigin-RevId: 200770591
Change-Id: Ifc096d88615b63135210d93c2b4cee2eaecf1eee |
259,881 | 15.06.2018 15:35:09 | 25,200 | bd2d1aaa16474202b1a2c1edbf62e6782fa2dc36 | Replace crypto/rand with internal rand package | [
{
"change_type": "MODIFY",
"old_path": "pkg/dhcp/BUILD",
"new_path": "pkg/dhcp/BUILD",
"diff": "@@ -11,6 +11,7 @@ go_library(\n],\nimportpath = \"gvisor.googlesource.com/gvisor/pkg/dhcp\",\ndeps = [\n+ \"//pkg/rand\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/network/ipv4\",\n\"//pkg/tcpip/stack\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/dhcp/client.go",
"new_path": "pkg/dhcp/client.go",
"diff": "@@ -7,12 +7,12 @@ package dhcp\nimport (\n\"bytes\"\n\"context\"\n- \"crypto/rand\"\n\"fmt\"\n\"log\"\n\"sync\"\n\"time\"\n+ \"gvisor.googlesource.com/gvisor/pkg/rand\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv4\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/stack\"\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/rand/BUILD",
"diff": "+package(licenses = [\"notice\"]) # Apache 2.0\n+\n+load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\")\n+\n+go_library(\n+ name = \"rand\",\n+ srcs = [\"rand.go\"],\n+ importpath = \"gvisor.googlesource.com/gvisor/pkg/rand\",\n+ visibility = [\"//:sandbox\"],\n+ deps = [\"@org_golang_x_sys//unix:go_default_library\"],\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/rand/rand.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package rand implements a cryptographically secure pseudorandom number\n+// generator.\n+package rand\n+\n+import (\n+ \"io\"\n+\n+ \"golang.org/x/sys/unix\"\n+)\n+\n+// reader implements an io.Reader that returns pseudorandom bytes.\n+type reader struct{}\n+\n+// Read implements io.Reader.Read.\n+func (reader) Read(p []byte) (int, error) {\n+ return unix.Getrandom(p, 0)\n+}\n+\n+// Reader is the default reader.\n+var Reader io.Reader = reader{}\n+\n+// Read reads from the default reader.\n+func Read(b []byte) (int, error) {\n+ return io.ReadFull(Reader, b)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/dev/BUILD",
"new_path": "pkg/sentry/fs/dev/BUILD",
"diff": "@@ -33,6 +33,7 @@ go_library(\n\"//pkg/abi/linux\",\n\"//pkg/amutex\",\n\"//pkg/log\",\n+ \"//pkg/rand\",\n\"//pkg/sentry/context\",\n\"//pkg/sentry/device\",\n\"//pkg/sentry/fs\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/dev/random.go",
"new_path": "pkg/sentry/fs/dev/random.go",
"diff": "package dev\nimport (\n- \"crypto/rand\"\n-\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n+ \"gvisor.googlesource.com/gvisor/pkg/rand\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs\"\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/BUILD",
"new_path": "pkg/sentry/loader/BUILD",
"diff": "@@ -39,6 +39,7 @@ go_library(\n\"//pkg/binary\",\n\"//pkg/cpuid\",\n\"//pkg/log\",\n+ \"//pkg/rand\",\n\"//pkg/refs\",\n\"//pkg/sentry/arch\",\n\"//pkg/sentry/context\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/loader.go",
"new_path": "pkg/sentry/loader/loader.go",
"diff": "@@ -17,13 +17,13 @@ package loader\nimport (\n\"bytes\"\n- \"crypto/rand\"\n\"io\"\n\"path\"\n\"gvisor.googlesource.com/gvisor/pkg/abi\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/cpuid\"\n+ \"gvisor.googlesource.com/gvisor/pkg/rand\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/BUILD",
"new_path": "pkg/sentry/syscalls/linux/BUILD",
"diff": "@@ -70,6 +70,7 @@ go_library(\n\"//pkg/eventchannel\",\n\"//pkg/log\",\n\"//pkg/metric\",\n+ \"//pkg/rand\",\n\"//pkg/sentry/arch\",\n\"//pkg/sentry/context\",\n\"//pkg/sentry/fs\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_random.go",
"new_path": "pkg/sentry/syscalls/linux/sys_random.go",
"diff": "package linux\nimport (\n- \"crypto/rand\"\n\"io\"\n\"math\"\n+ \"gvisor.googlesource.com/gvisor/pkg/rand\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/safemem\"\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/hash/BUILD",
"new_path": "pkg/tcpip/network/hash/BUILD",
"diff": "@@ -7,5 +7,8 @@ go_library(\nsrcs = [\"hash.go\"],\nimportpath = \"gvisor.googlesource.com/gvisor/pkg/tcpip/network/hash\",\nvisibility = [\"//visibility:public\"],\n- deps = [\"//pkg/tcpip/header\"],\n+ deps = [\n+ \"//pkg/rand\",\n+ \"//pkg/tcpip/header\",\n+ ],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/hash/hash.go",
"new_path": "pkg/tcpip/network/hash/hash.go",
"diff": "package hash\nimport (\n- \"crypto/rand\"\n\"encoding/binary\"\n+ \"gvisor.googlesource.com/gvisor/pkg/rand\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/header\"\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/BUILD",
"new_path": "pkg/tcpip/transport/tcp/BUILD",
"diff": "@@ -51,6 +51,7 @@ go_library(\nimportpath = \"gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp\",\nvisibility = [\"//visibility:public\"],\ndeps = [\n+ \"//pkg/rand\",\n\"//pkg/sleep\",\n\"//pkg/state\",\n\"//pkg/tcpip\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/accept.go",
"new_path": "pkg/tcpip/transport/tcp/accept.go",
"diff": "package tcp\nimport (\n- \"crypto/rand\"\n\"crypto/sha1\"\n\"encoding/binary\"\n\"hash\"\n@@ -13,6 +12,7 @@ import (\n\"sync\"\n\"time\"\n+ \"gvisor.googlesource.com/gvisor/pkg/rand\"\n\"gvisor.googlesource.com/gvisor/pkg/sleep\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/header\"\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/connect.go",
"new_path": "pkg/tcpip/transport/tcp/connect.go",
"diff": "package tcp\nimport (\n- \"crypto/rand\"\n\"sync\"\n\"sync/atomic\"\n\"time\"\n+ \"gvisor.googlesource.com/gvisor/pkg/rand\"\n\"gvisor.googlesource.com/gvisor/pkg/sleep\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/buffer\"\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "package tcp\nimport (\n- \"crypto/rand\"\n\"math\"\n\"sync\"\n\"sync/atomic\"\n\"time\"\n+ \"gvisor.googlesource.com/gvisor/pkg/rand\"\n\"gvisor.googlesource.com/gvisor/pkg/sleep\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/buffer\"\n"
}
] | Go | Apache License 2.0 | google/gvisor | Replace crypto/rand with internal rand package
PiperOrigin-RevId: 200784607
Change-Id: I39aa6ee632936dcbb00fc298adccffa606e9f4c0 |
259,991 | 15.06.2018 16:08:20 | 25,200 | 0786707cd94b8feffaeb083077eccaf10873e682 | Added code for a pause command for a container process.
Like runc, the pause command will pause the processes of the given container.
It will set that container's status to "paused."
A resume command will be be added to unpause and continue running the process. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/controller.go",
"new_path": "runsc/boot/controller.go",
"diff": "@@ -37,6 +37,9 @@ const (\n// container..\nContainerExecute = \"containerManager.Execute\"\n+ // ContainerPause pauses the container.\n+ ContainerPause = \"containerManager.Pause\"\n+\n// ContainerProcesses is the URPC endpoint for getting the list of\n// processes running in a container.\nContainerProcesses = \"containerManager.Processes\"\n@@ -153,6 +156,12 @@ func (cm *containerManager) Checkpoint(o *control.SaveOpts, _ *struct{}) error {\nreturn state.Save(o, nil)\n}\n+// Pause suspends the process in a container.\n+func (cm *containerManager) Pause(_, _ *struct{}) error {\n+ cm.k.Pause()\n+ return nil\n+}\n+\n// Wait waits for the init process in the given container.\nfunc (cm *containerManager) Wait(cid *string, waitStatus *uint32) error {\n// TODO: Use the cid and wait on the init process in that\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/BUILD",
"new_path": "runsc/cmd/BUILD",
"diff": "@@ -17,6 +17,7 @@ go_library(\n\"kill.go\",\n\"list.go\",\n\"path.go\",\n+ \"pause.go\",\n\"ps.go\",\n\"restore.go\",\n\"run.go\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/cmd/pause.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package cmd\n+\n+import (\n+ \"context\"\n+ \"flag\"\n+ \"github.com/google/subcommands\"\n+ \"gvisor.googlesource.com/gvisor/runsc/boot\"\n+ \"gvisor.googlesource.com/gvisor/runsc/container\"\n+)\n+\n+// Pause implements subcommands.Command for the \"pause\" command.\n+type Pause struct{}\n+\n+// Name implements subcommands.Command.Name.\n+func (*Pause) Name() string {\n+ return \"pause\"\n+}\n+\n+// Synopsis implements subcommands.Command.Synopsis.\n+func (*Pause) Synopsis() string {\n+ return \"pause suspends all processes in a container\"\n+}\n+\n+// Usage implements subcommands.Command.Usage.\n+func (*Pause) Usage() string {\n+ return `pause <container id> - pause process in instance of container.`\n+}\n+\n+// SetFlags implements subcommands.Command.SetFlags.\n+func (*Pause) SetFlags(f *flag.FlagSet) {\n+}\n+\n+// Execute implements subcommands.Command.Execute.\n+func (*Pause) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {\n+ if f.NArg() != 1 {\n+ f.Usage()\n+ return subcommands.ExitUsageError\n+ }\n+\n+ id := f.Arg(0)\n+ conf := args[0].(*boot.Config)\n+\n+ cont, err := container.Load(conf.RootDir, id)\n+ if err != nil {\n+ Fatalf(\"error loading container: %v\", err)\n+ }\n+\n+ if err := cont.Pause(); err != nil {\n+ Fatalf(\"pause failed: %v\", err)\n+ }\n+\n+ return subcommands.ExitSuccess\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -316,7 +316,7 @@ func (c *Container) Event() (*boot.Event, error) {\n// Pid returns the Pid of the sandbox the container is running in, or -1 if the\n// container is not running.\nfunc (c *Container) Pid() int {\n- if c.Status != Running && c.Status != Created {\n+ if c.Status != Running && c.Status != Created && c.Status != Paused {\nreturn -1\n}\nreturn c.Sandbox.Pid\n@@ -349,6 +349,23 @@ func (c *Container) Checkpoint(f *os.File) error {\nreturn c.Sandbox.Checkpoint(c.ID, f)\n}\n+// Pause suspends the container and its kernel.\n+// The call only succeeds if the container's status is created or running.\n+func (c *Container) Pause() error {\n+ log.Debugf(\"Pausing container %q\", c.ID)\n+ switch c.Status {\n+ case Created, Running:\n+ if err := c.Sandbox.Pause(c.ID); err != nil {\n+ return fmt.Errorf(\"error pausing container: %v\", err)\n+ }\n+ c.Status = Paused\n+ return c.save()\n+ default:\n+ log.Warningf(\"container %q not created or running, not pausing\", c.ID)\n+ return nil\n+ }\n+}\n+\n// State returns the metadata of the container.\nfunc (c *Container) State() specs.State {\nreturn specs.State{\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -459,6 +459,40 @@ func TestCheckpoint(t *testing.T) {\n}\n}\n+// TestPause tests that calling pause successfully pauses the container.\n+// It checks that no errors are returned and that the state of the container\n+// is in fact 'Paused.'\n+func TestPause(t *testing.T) {\n+ spec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n+\n+ rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+ defer os.RemoveAll(bundleDir)\n+\n+ // Create and start the container.\n+ cont, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer cont.Destroy()\n+ if err := cont.Start(conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n+ }\n+\n+ // Pause the running container.\n+ if err := cont.Pause(); err != nil {\n+ t.Errorf(\"error pausing container: %v\", err)\n+ }\n+\n+ // Confirm the status of the container is paused.\n+ if got, want := cont.Status, container.Paused; got != want {\n+ t.Errorf(\"container status got %v, want %v\", got, want)\n+ }\n+}\n+\n// TestCapabilities verifies that:\n// - Running exec as non-root UID and GID will result in an error (because the\n// executable file can't be read).\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/status.go",
"new_path": "runsc/container/status.go",
"diff": "@@ -19,13 +19,17 @@ package container\ntype Status int\nconst (\n- // Creating indicates \"the container is being created\".\n- Creating Status = iota\n-\n// Created indicates \"the runtime has finished the create operation and\n// the container process has neither exited nor executed the\n// user-specified program\".\n- Created\n+ Created Status = iota\n+\n+ // Creating indicates \"the container is being created\".\n+ Creating\n+\n+ // Paused indicates that the process within the container has been\n+ // suspended.\n+ Paused\n// Running indicates \"the container process has executed the\n// user-specified program but has not exited\".\n@@ -39,10 +43,12 @@ const (\n// CLI spec and should not be changed.\nfunc (s Status) String() string {\nswitch s {\n- case Creating:\n- return \"creating\"\ncase Created:\nreturn \"created\"\n+ case Creating:\n+ return \"creating\"\n+ case Paused:\n+ return \"paused\"\ncase Running:\nreturn \"running\"\ncase Stopped:\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -464,6 +464,21 @@ func (s *Sandbox) Checkpoint(cid string, f *os.File) error {\nreturn nil\n}\n+// Pause sends the pause call for a container in the sandbox.\n+func (s *Sandbox) Pause(cid string) error {\n+ log.Debugf(\"Pause sandbox %q\", s.ID)\n+ conn, err := s.connect()\n+ if err != nil {\n+ return err\n+ }\n+ defer conn.Close()\n+\n+ if err := conn.Call(boot.ContainerPause, nil, nil); err != nil {\n+ return fmt.Errorf(\"err pausing container %q: %v\", cid, err)\n+ }\n+ return nil\n+}\n+\n// IsRunning returns true if the sandbox or gofer process is running.\nfunc (s *Sandbox) IsRunning() bool {\nif s.Pid != 0 {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Added code for a pause command for a container process.
Like runc, the pause command will pause the processes of the given container.
It will set that container's status to "paused."
A resume command will be be added to unpause and continue running the process.
PiperOrigin-RevId: 200789624
Change-Id: I72a5d7813d90ecfc4d01cc252d6018855016b1ea |
260,013 | 17.06.2018 17:05:36 | 25,200 | 563a71ef243360bc20db0e481b3adbfb07cd8702 | Add rpcinet support for control messages.
Add support for control messages, but at this time the only
control message that the sentry will support here is SO_TIMESTAMP. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/rpcinet/socket.go",
"new_path": "pkg/sentry/socket/rpcinet/socket.go",
"diff": "@@ -477,6 +477,37 @@ func rpcRecvMsg(t *kernel.Task, req *pb.SyscallRequest_Recvmsg) (*pb.RecvmsgResp\nreturn res.(*pb.RecvmsgResponse_Payload).Payload, nil\n}\n+// Because we only support SO_TIMESTAMP we will search control messages for\n+// that value and set it if so, all other control messages will be ignored.\n+func (s *socketOperations) extractControlMessages(payload *pb.RecvmsgResponse_ResultPayload) socket.ControlMessages {\n+ c := socket.ControlMessages{}\n+ if len(payload.GetCmsgData()) > 0 {\n+ // Parse the control messages looking for SO_TIMESTAMP.\n+ msgs, e := syscall.ParseSocketControlMessage(payload.GetCmsgData())\n+ if e != nil {\n+ return socket.ControlMessages{}\n+ }\n+ for _, m := range msgs {\n+ if m.Header.Level != linux.SOL_SOCKET || m.Header.Type != linux.SO_TIMESTAMP {\n+ continue\n+ }\n+\n+ // Let's parse the time stamp and set it.\n+ if len(m.Data) < linux.SizeOfTimeval {\n+ // Give up on locating the SO_TIMESTAMP option.\n+ return socket.ControlMessages{}\n+ }\n+\n+ var v linux.Timeval\n+ binary.Unmarshal(m.Data[:linux.SizeOfTimeval], usermem.ByteOrder, &v)\n+ c.IP.HasTimestamp = true\n+ c.IP.Timestamp = v.ToNsecCapped()\n+ break\n+ }\n+ }\n+ return c\n+}\n+\n// RecvMsg implements socket.Socket.RecvMsg.\nfunc (s *socketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (int, interface{}, uint32, socket.ControlMessages, *syserr.Error) {\nreq := &pb.SyscallRequest_Recvmsg{&pb.RecvmsgRequest{\n@@ -497,7 +528,8 @@ func (s *socketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\npanic(\"CopyOut failed to copy full buffer\")\n}\n}\n- return int(res.Length), res.Address.GetAddress(), res.Address.GetLength(), socket.ControlMessages{}, syserr.FromError(e)\n+ c := s.extractControlMessages(res)\n+ return int(res.Length), res.Address.GetAddress(), res.Address.GetLength(), c, syserr.FromError(e)\n}\nif err != syserr.ErrWouldBlock && err != syserr.ErrTryAgain || flags&linux.MSG_DONTWAIT != 0 {\nreturn 0, nil, 0, socket.ControlMessages{}, err\n@@ -520,7 +552,8 @@ func (s *socketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\npanic(\"CopyOut failed to copy full buffer\")\n}\n}\n- return int(res.Length), res.Address.GetAddress(), res.Address.GetLength(), socket.ControlMessages{}, syserr.FromError(e)\n+ c := s.extractControlMessages(res)\n+ return int(res.Length), res.Address.GetAddress(), res.Address.GetLength(), c, syserr.FromError(e)\n}\nif err != syserr.ErrWouldBlock && err != syserr.ErrTryAgain {\nreturn 0, nil, 0, socket.ControlMessages{}, err\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/rpcinet/syscall_rpc.proto",
"new_path": "pkg/sentry/socket/rpcinet/syscall_rpc.proto",
"diff": "@@ -40,6 +40,7 @@ message RecvmsgRequest {\nbool sender = 3;\nbool peek = 4;\nbool trunc = 5;\n+ uint32 cmsg_length = 6;\n}\nmessage OpenRequest {\n@@ -110,6 +111,7 @@ message RecvmsgResponse {\nbytes data = 1 [ctype = CORD];\nAddressResponse address = 2;\nuint32 length = 3;\n+ bytes cmsg_data = 4;\n}\noneof result {\nuint32 error_number = 1;\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add rpcinet support for control messages.
Add support for control messages, but at this time the only
control message that the sentry will support here is SO_TIMESTAMP.
PiperOrigin-RevId: 200922230
Change-Id: I63a852d9305255625d9df1d989bd46a66e93c446 |
259,992 | 18.06.2018 15:17:49 | 25,200 | 7eaca1074bf1a6c0e260727b2d9cbd3c878d2b77 | Add pointer to dated builds in README | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -168,17 +168,19 @@ Note that gVisor can only run on x86\\_64 Linux 3.17+. In addition, gVisor only\nsupports x86\\_64 binaries inside the sandbox (i.e., it cannot run 32-bit\nbinaries).\n-### Download a Nightly Build\n+### Download a Build\nThe easiest way to get `runsc` is from the\n[latest nightly build][runsc-nightly]. After you download the binary, check it\n-against the SHA512 [checksum file][runsc-nightly-sha].\n+against the SHA512 [checksum file][runsc-nightly-sha]. Older builds can be\n+found here:\n+`https://storage.googleapis.com/gvisor/releases/nightly/${yyyy-mm-dd}/runsc` and\n+`https://storage.googleapis.com/gvisor/releases/nightly/${yyyy-mm-dd}/runsc.sha512`\n**It is important to copy this binary to some place that is accessible to all\nusers**, since `runsc` executes itself as user `nobody` to avoid unnecessary\nprivileges. The `/usr/local/bin` directory is a good choice.\n-\n```\nwget https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc\nwget https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc.sha512\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add pointer to dated builds in README
PiperOrigin-RevId: 201068427
Change-Id: If03c8c22907e6ef623f39c8ae5316fdd76cf80cb |
259,991 | 18.06.2018 15:19:36 | 25,200 | 873ec0c414973e829c1570f21d0d2e2a0df681f4 | Modified boot.go to allow for restores.
A file descriptor was added as a flag to boot so a state file can restore a
container that was checkpointed. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/BUILD",
"new_path": "runsc/boot/BUILD",
"diff": "@@ -51,6 +51,7 @@ go_library(\n\"//pkg/sentry/socket/netlink\",\n\"//pkg/sentry/socket/netlink/route\",\n\"//pkg/sentry/socket/unix\",\n+ \"//pkg/sentry/state\",\n\"//pkg/sentry/strace\",\n\"//pkg/sentry/syscalls/linux\",\n\"//pkg/sentry/time\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -18,6 +18,7 @@ package boot\nimport (\n\"fmt\"\n\"math/rand\"\n+ \"os\"\n\"runtime\"\n\"sync/atomic\"\n\"syscall\"\n@@ -35,6 +36,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/platform/kvm\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/platform/ptrace\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/sighandling\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/state\"\nslinux \"gvisor.googlesource.com/gvisor/pkg/sentry/syscalls/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/time\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/watchdog\"\n@@ -90,7 +92,7 @@ func init() {\n}\n// New initializes a new kernel loader configured by spec.\n-func New(spec *specs.Spec, conf *Config, controllerFD int, ioFDs []int, console bool) (*Loader, error) {\n+func New(spec *specs.Spec, conf *Config, controllerFD, restoreFD int, ioFDs []int, console bool) (*Loader, error) {\n// Create kernel and platform.\np, err := createPlatform(conf)\nif err != nil {\n@@ -165,6 +167,19 @@ func New(spec *specs.Spec, conf *Config, controllerFD int, ioFDs []int, console\n// Run().\nnetworkStack := newEmptyNetworkStack(conf, k)\n+ // Check if we need to restore the kernel\n+ if restoreFD != -1 {\n+ restoreFile := os.NewFile(uintptr(restoreFD), \"restore_file\")\n+ defer restoreFile.Close()\n+\n+ // Load the state.\n+ loadOpts := state.LoadOpts{\n+ Source: restoreFile,\n+ }\n+ if err := loadOpts.Load(k, p, networkStack); err != nil {\n+ return nil, err\n+ }\n+ } else {\n// Initiate the Kernel object, which is required by the Context passed\n// to createVFS in order to mount (among other things) procfs.\nif err = k.Init(kernel.InitKernelArgs{\n@@ -180,6 +195,7 @@ func New(spec *specs.Spec, conf *Config, controllerFD int, ioFDs []int, console\n}); err != nil {\nreturn nil, fmt.Errorf(\"error initializing kernel: %v\", err)\n}\n+ }\n// Turn on packet logging if enabled.\nif conf.LogPackets {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader_test.go",
"new_path": "runsc/boot/loader_test.go",
"diff": "@@ -59,7 +59,7 @@ func createLoader() (*Loader, error) {\nFileAccess: FileAccessDirect,\nDisableSeccomp: true,\n}\n- return New(testSpec(), conf, fd, nil, false)\n+ return New(testSpec(), conf, fd, -1, nil, false)\n}\n// TestRun runs a simple application in a sandbox and checks that it succeeds.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/boot.go",
"new_path": "runsc/cmd/boot.go",
"diff": "@@ -48,6 +48,9 @@ type Boot struct {\n// applyCaps determines if capabilities defined in the spec should be applied\n// to the process.\napplyCaps bool\n+\n+ // restoreFD is the file descriptor to the state file to be restored.\n+ restoreFD int\n}\n// Name implements subcommands.Command.Name.\n@@ -72,6 +75,7 @@ func (b *Boot) SetFlags(f *flag.FlagSet) {\nf.Var(&b.ioFDs, \"io-fds\", \"list of FDs to connect 9P clients. They must follow this order: root first, then mounts as defined in the spec\")\nf.BoolVar(&b.console, \"console\", false, \"set to true if the sandbox should allow terminal ioctl(2) syscalls\")\nf.BoolVar(&b.applyCaps, \"apply-caps\", false, \"if true, apply capabilities defined in the spec to the process\")\n+ f.IntVar(&b.restoreFD, \"restore-fd\", -1, \"FD of the state file to be restored\")\n}\n// Execute implements subcommands.Command.Execute. It starts a sandbox in a\n@@ -127,7 +131,7 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n}\n// Create the loader.\n- l, err := boot.New(spec, conf, b.controllerFD, b.ioFDs.GetArray(), b.console)\n+ l, err := boot.New(spec, conf, b.controllerFD, b.restoreFD, b.ioFDs.GetArray(), b.console)\nif err != nil {\nFatalf(\"error creating loader: %v\", err)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Modified boot.go to allow for restores.
A file descriptor was added as a flag to boot so a state file can restore a
container that was checkpointed.
PiperOrigin-RevId: 201068699
Change-Id: I18e96069488ffa3add468861397f3877725544aa |
259,948 | 19.06.2018 11:04:05 | 25,200 | 5581256f879f4249de5ebffddaf0626fcb39eebd | state: include I/O and protobuf time in kernel S/R timing stats. | [
{
"change_type": "MODIFY",
"old_path": "pkg/state/decode.go",
"new_path": "pkg/state/decode.go",
"diff": "@@ -78,12 +78,11 @@ func (os *objectState) checkComplete(stats *Stats) {\nif os.blockedBy > 0 {\nreturn\n}\n+ stats.Start(os.obj)\n// Fire all callbacks.\nfor _, fn := range os.callbacks {\n- stats.Start(os.obj)\nfn()\n- stats.Done()\n}\nos.callbacks = nil\n@@ -93,6 +92,7 @@ func (os *objectState) checkComplete(stats *Stats) {\nother.checkComplete(stats)\n}\nos.blocking = nil\n+ stats.Done()\n}\n// waitFor queues a dependency on the given object.\n@@ -329,6 +329,7 @@ func (ds *decodeState) decodeInterface(os *objectState, obj reflect.Value, i *pb\n// decodeObject decodes a object value.\nfunc (ds *decodeState) decodeObject(os *objectState, obj reflect.Value, object *pb.Object, format string, param interface{}) {\nds.push(false, format, param)\n+ ds.stats.Add(obj)\nds.stats.Start(obj)\nswitch x := object.GetValue().(type) {\n@@ -466,12 +467,14 @@ func (ds *decodeState) Deserialize(obj reflect.Value) {\n// See above, we never process objects while we have no outstanding\n// interests (other than the very first object).\nfor id := uint64(1); ds.outstanding > 0; id++ {\n+ os := ds.lookup(id)\n+ ds.stats.Start(os.obj)\n+\no, err := ds.readObject()\nif err != nil {\npanic(err)\n}\n- os := ds.lookup(id)\nif os != nil {\n// Decode the object.\nds.from = &os.path\n@@ -483,6 +486,8 @@ func (ds *decodeState) Deserialize(obj reflect.Value) {\n// registered.\nds.deferred[id] = o\n}\n+\n+ ds.stats.Done()\n}\n// Check the zero-length header at the end.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/state/encode.go",
"new_path": "pkg/state/encode.go",
"diff": "@@ -241,6 +241,7 @@ func (es *encodeState) encodeInterface(obj reflect.Value) *pb.Interface {\n// If mapAsValue is true, then a map will be encoded directly.\nfunc (es *encodeState) encodeObject(obj reflect.Value, mapAsValue bool, format string, param interface{}) (object *pb.Object) {\nes.push(false, format, param)\n+ es.stats.Add(obj)\nes.stats.Start(obj)\nswitch obj.Kind() {\n@@ -354,10 +355,13 @@ func (es *encodeState) Serialize(obj reflect.Value) {\n// Pop off the list until we're done.\nfor es.pending.Len() > 0 {\ne := es.pending.Front()\n- es.pending.Remove(e)\n// Extract the queued object.\nqo := e.Value.(queuedObject)\n+ es.stats.Start(qo.obj)\n+\n+ es.pending.Remove(e)\n+\nes.from = &qo.path\no := es.encodeObject(qo.obj, true, \"\", nil)\n@@ -368,6 +372,7 @@ func (es *encodeState) Serialize(obj reflect.Value) {\n// Mark as done.\nes.done.PushBack(e)\n+ es.stats.Done()\n}\n// Write a zero-length terminal at the end; this is a sanity check\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/state/stats.go",
"new_path": "pkg/state/stats.go",
"diff": "@@ -44,20 +44,28 @@ type Stats struct {\nlast time.Time\n}\n-// sample adds the given number of samples to the given object.\n-func (s *Stats) sample(typ reflect.Type, count uint) {\n+// sample adds the samples to the given object.\n+func (s *Stats) sample(typ reflect.Type) {\n+ now := time.Now()\n+ s.byType[typ].total += now.Sub(s.last)\n+ s.last = now\n+}\n+\n+// Add adds a sample count.\n+func (s *Stats) Add(obj reflect.Value) {\n+ if s == nil {\n+ return\n+ }\nif s.byType == nil {\ns.byType = make(map[reflect.Type]*statEntry)\n}\n+ typ := obj.Type()\nentry, ok := s.byType[typ]\nif !ok {\nentry = new(statEntry)\ns.byType[typ] = entry\n}\n- now := time.Now()\n- entry.count += count\n- entry.total += now.Sub(s.last)\n- s.last = now\n+ entry.count++\n}\n// Start starts a sample.\n@@ -67,7 +75,7 @@ func (s *Stats) Start(obj reflect.Value) {\n}\nif len(s.stack) > 0 {\nlast := s.stack[len(s.stack)-1]\n- s.sample(last, 0)\n+ s.sample(last)\n} else {\n// First time sample.\ns.last = time.Now()\n@@ -81,7 +89,7 @@ func (s *Stats) Done() {\nreturn\n}\nlast := s.stack[len(s.stack)-1]\n- s.sample(last, 1)\n+ s.sample(last)\ns.stack = s.stack[:len(s.stack)-1]\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | state: include I/O and protobuf time in kernel S/R timing stats.
PiperOrigin-RevId: 201205733
Change-Id: I300307b0668989ba7776ab9e3faee71efdd33f46 |
260,013 | 19.06.2018 14:11:58 | 25,200 | bda2a1ed3503699b8cb814bb3cc7ad0b9694155b | Rpcinet is racy around shutdown flags.
Correct a data race in rpcinet where a shutdown and recvmsg can
race around shutown flags. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/rpcinet/socket.go",
"new_path": "pkg/sentry/socket/rpcinet/socket.go",
"diff": "package rpcinet\nimport (\n+ \"sync/atomic\"\n\"syscall\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n@@ -57,7 +58,7 @@ type socketOperations struct {\n// shState is the state of the connection with respect to shutdown. Because\n// we're mixing non-blocking semantics on the other side we have to adapt for\n// some strange differences between blocking and non-blocking sockets.\n- shState tcpip.ShutdownFlags\n+ shState int32\n}\n// Verify that we actually implement socket.Socket.\n@@ -105,26 +106,35 @@ func translateIOSyscallError(err error) error {\n// setShutdownFlags will set the shutdown flag so we can handle blocking reads\n// after a read shutdown.\nfunc (s *socketOperations) setShutdownFlags(how int) {\n+ var f tcpip.ShutdownFlags\nswitch how {\ncase linux.SHUT_RD:\n- s.shState |= tcpip.ShutdownRead\n+ f = tcpip.ShutdownRead\ncase linux.SHUT_WR:\n- s.shState |= tcpip.ShutdownWrite\n+ f = tcpip.ShutdownWrite\ncase linux.SHUT_RDWR:\n- s.shState |= tcpip.ShutdownWrite | tcpip.ShutdownRead\n+ f = tcpip.ShutdownWrite | tcpip.ShutdownRead\n+ }\n+\n+ // Atomically update the flags.\n+ for {\n+ old := atomic.LoadInt32(&s.shState)\n+ if atomic.CompareAndSwapInt32(&s.shState, old, old|int32(f)) {\n+ break\n+ }\n}\n}\nfunc (s *socketOperations) resetShutdownFlags() {\n- s.shState = 0\n+ atomic.StoreInt32(&s.shState, 0)\n}\nfunc (s *socketOperations) isShutRdSet() bool {\n- return s.shState&tcpip.ShutdownRead != 0\n+ return atomic.LoadInt32(&s.shState)&int32(tcpip.ShutdownRead) != 0\n}\nfunc (s *socketOperations) isShutWrSet() bool {\n- return s.shState&tcpip.ShutdownWrite != 0\n+ return atomic.LoadInt32(&s.shState)&int32(tcpip.ShutdownWrite) != 0\n}\n// Release implements fs.FileOperations.Release.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Rpcinet is racy around shutdown flags.
Correct a data race in rpcinet where a shutdown and recvmsg can
race around shutown flags.
PiperOrigin-RevId: 201238366
Change-Id: I5eb06df4a2b4eba331eeb5de19076213081d581f |
259,991 | 19.06.2018 15:22:23 | 25,200 | a6dbef045ff684e92f472280eb6f7f688b9bc87a | Added a resume command to unpause a paused container.
Resume checks the status of the container and unpauses the kernel
if its status is paused. Otherwise nothing happens.
Tests were added to ensure that the process is in the correct state
after various commands. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/control/proc.go",
"new_path": "pkg/sentry/control/proc.go",
"diff": "@@ -18,6 +18,7 @@ import (\n\"bytes\"\n\"encoding/json\"\n\"fmt\"\n+ \"sort\"\n\"syscall\"\n\"text/tabwriter\"\n\"time\"\n@@ -245,6 +246,7 @@ func Processes(k *kernel.Kernel, out *[]*Process) error {\nCmd: tg.Leader().Name(),\n})\n}\n+ sort.Slice(*out, func(i, j int) bool { return (*out)[i].PID < (*out)[j].PID })\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/controller.go",
"new_path": "runsc/boot/controller.go",
"diff": "@@ -44,6 +44,9 @@ const (\n// processes running in a container.\nContainerProcesses = \"containerManager.Processes\"\n+ // ContainerResume unpauses the paused container.\n+ ContainerResume = \"containerManager.Resume\"\n+\n// ContainerSignal is used to send a signal to a container.\nContainerSignal = \"containerManager.Signal\"\n@@ -156,12 +159,18 @@ func (cm *containerManager) Checkpoint(o *control.SaveOpts, _ *struct{}) error {\nreturn state.Save(o, nil)\n}\n-// Pause suspends the process in a container.\n+// Pause suspends a container.\nfunc (cm *containerManager) Pause(_, _ *struct{}) error {\ncm.k.Pause()\nreturn nil\n}\n+// Resume unpauses a container.\n+func (cm *containerManager) Resume(_, _ *struct{}) error {\n+ cm.k.Unpause()\n+ return nil\n+}\n+\n// Wait waits for the init process in the given container.\nfunc (cm *containerManager) Wait(cid *string, waitStatus *uint32) error {\n// TODO: Use the cid and wait on the init process in that\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/BUILD",
"new_path": "runsc/cmd/BUILD",
"diff": "@@ -20,6 +20,7 @@ go_library(\n\"pause.go\",\n\"ps.go\",\n\"restore.go\",\n+ \"resume.go\",\n\"run.go\",\n\"start.go\",\n\"state.go\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/cmd/resume.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package cmd\n+\n+import (\n+ \"context\"\n+ \"flag\"\n+ \"github.com/google/subcommands\"\n+ \"gvisor.googlesource.com/gvisor/runsc/boot\"\n+ \"gvisor.googlesource.com/gvisor/runsc/container\"\n+)\n+\n+// Resume implements subcommands.Command for the \"resume\" command.\n+type Resume struct{}\n+\n+// Name implements subcommands.Command.Name.\n+func (*Resume) Name() string {\n+ return \"resume\"\n+}\n+\n+// Synopsis implements subcommands.Command.Synopsis.\n+func (*Resume) Synopsis() string {\n+ return \"Resume unpauses a paused container\"\n+}\n+\n+// Usage implements subcommands.Command.Usage.\n+func (*Resume) Usage() string {\n+ return `resume <container id> - resume a paused container.\n+`\n+}\n+\n+// SetFlags implements subcommands.Command.SetFlags.\n+func (r *Resume) SetFlags(f *flag.FlagSet) {\n+}\n+\n+// Execute implements subcommands.Command.Execute.\n+func (r *Resume) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {\n+ if f.NArg() != 1 {\n+ f.Usage()\n+ return subcommands.ExitUsageError\n+ }\n+\n+ id := f.Arg(0)\n+ conf := args[0].(*boot.Config)\n+\n+ cont, err := container.Load(conf.RootDir, id)\n+ if err != nil {\n+ Fatalf(\"error loading container: %v\", err)\n+ }\n+\n+ if err := cont.Resume(); err != nil {\n+ Fatalf(\"resume failed: %v\", err)\n+ }\n+\n+ return subcommands.ExitSuccess\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -361,8 +361,23 @@ func (c *Container) Pause() error {\nc.Status = Paused\nreturn c.save()\ndefault:\n- log.Warningf(\"container %q not created or running, not pausing\", c.ID)\n- return nil\n+ return fmt.Errorf(\"container %q not created or running, not pausing\", c.ID)\n+ }\n+}\n+\n+// Resume unpauses the container and its kernel.\n+// The call only succeeds if the container's status is paused.\n+func (c *Container) Resume() error {\n+ log.Debugf(\"Resuming container %q\", c.ID)\n+ switch c.Status {\n+ case Paused:\n+ if err := c.Sandbox.Resume(c.ID); err != nil {\n+ return fmt.Errorf(\"error resuming container: %v\", err)\n+ }\n+ c.Status = Running\n+ return c.save()\n+ default:\n+ return fmt.Errorf(\"container %q not paused, not resuming\", c.ID)\n}\n}\n@@ -380,7 +395,7 @@ func (c *Container) State() specs.State {\n// Processes retrieves the list of processes and associated metadata inside a\n// container.\nfunc (c *Container) Processes() ([]*control.Process, error) {\n- if c.Status != Running {\n+ if c.Status != Running && c.Status != Paused {\nreturn nil, fmt.Errorf(\"cannot get processes of container %q because it isn't running. It is in state %v\", c.ID, c.Status)\n}\nreturn c.Sandbox.Processes(c.ID)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -84,6 +84,19 @@ func procListsEqual(got, want []*control.Process) bool {\nreturn true\n}\n+// getAndCheckProcLists is similar to waitForProcessList, but does not wait and retry the\n+// test for equality. This is because we already confirmed that exec occurred.\n+func getAndCheckProcLists(cont *container.Container, want []*control.Process) error {\n+ got, err := cont.Processes()\n+ if err != nil {\n+ return fmt.Errorf(\"error getting process data from container: %v\", err)\n+ }\n+ if procListsEqual(got, want) {\n+ return nil\n+ }\n+ return fmt.Errorf(\"container got process list: %s, want: %s\", procListToString(got), procListToString(want))\n+}\n+\nfunc procListToString(pl []*control.Process) string {\nstrs := make([]string, 0, len(pl))\nfor _, p := range pl {\n@@ -459,11 +472,14 @@ func TestCheckpoint(t *testing.T) {\n}\n}\n-// TestPause tests that calling pause successfully pauses the container.\n-// It checks that no errors are returned and that the state of the container\n-// is in fact 'Paused.'\n-func TestPause(t *testing.T) {\n- spec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n+// TestPauseResume tests that we can successfully pause and resume a container.\n+// It checks starts running sleep and executes another sleep. It pauses and checks\n+// that both processes are still running: sleep will be paused and still exist.\n+// It will then unpause and confirm that both processes are running. Then it will\n+// wait until one sleep completes and check to make sure the other is running.\n+func TestPauseResume(t *testing.T) {\n+ const uid = 343\n+ spec := testutil.NewSpecWithArgs(\"sleep\", \"20\")\nrootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\nif err != nil {\n@@ -482,15 +498,139 @@ func TestPause(t *testing.T) {\nt.Fatalf(\"error starting container: %v\", err)\n}\n+ // expectedPL lists the expected process state of the container.\n+ expectedPL := []*control.Process{\n+ {\n+ UID: 0,\n+ PID: 1,\n+ PPID: 0,\n+ C: 0,\n+ Cmd: \"sleep\",\n+ },\n+ {\n+ UID: uid,\n+ PID: 2,\n+ PPID: 0,\n+ C: 0,\n+ Cmd: \"sleep\",\n+ },\n+ }\n+\n+ execArgs := control.ExecArgs{\n+ Filename: \"/bin/sleep\",\n+ Argv: []string{\"sleep\", \"5\"},\n+ Envv: []string{\"PATH=\" + os.Getenv(\"PATH\")},\n+ WorkingDirectory: \"/\",\n+ KUID: uid,\n+ }\n+\n+ // First, start running exec (whick blocks).\n+ go cont.Execute(&execArgs)\n+\n+ // Verify that \"sleep 5\" is running.\n+ if err := waitForProcessList(cont, expectedPL); err != nil {\n+ t.Fatal(err)\n+ }\n+\n// Pause the running container.\nif err := cont.Pause(); err != nil {\nt.Errorf(\"error pausing container: %v\", err)\n}\n+ if got, want := cont.Status, container.Paused; got != want {\n+ t.Errorf(\"container status got %v, want %v\", got, want)\n+ }\n+\n+ time.Sleep(10 * time.Second)\n+\n+ // Verify that the two processes still exist. Sleep 5 is paused so\n+ // it should still be in the process list after 10 seconds.\n+ if err := getAndCheckProcLists(cont, expectedPL); err != nil {\n+ t.Fatal(err)\n+ }\n+\n+ // Resume the running container.\n+ if err := cont.Resume(); err != nil {\n+ t.Errorf(\"error pausing container: %v\", err)\n+ }\n+ if got, want := cont.Status, container.Running; got != want {\n+ t.Errorf(\"container status got %v, want %v\", got, want)\n+ }\n- // Confirm the status of the container is paused.\n+ if err := getAndCheckProcLists(cont, expectedPL); err != nil {\n+ t.Fatal(err)\n+ }\n+\n+ expectedPL2 := []*control.Process{\n+ {\n+ UID: 0,\n+ PID: 1,\n+ PPID: 0,\n+ C: 0,\n+ Cmd: \"sleep\",\n+ },\n+ }\n+\n+ // Verify there is only one process left since we waited 10 at most seconds for\n+ // sleep 5 to end.\n+ if err := waitForProcessList(cont, expectedPL2); err != nil {\n+ t.Fatal(err)\n+ }\n+}\n+\n+// TestPauseResumeStatus makes sure that the statuses are set correctly\n+// with calls to pause and resume and that pausing and resuming only\n+// occurs given the correct state.\n+func TestPauseResumeStatus(t *testing.T) {\n+ spec := testutil.NewSpecWithArgs(\"sleep\", \"20\")\n+\n+ rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+ defer os.RemoveAll(bundleDir)\n+\n+ // Create and start the container.\n+ cont, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer cont.Destroy()\n+ if err := cont.Start(conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n+ }\n+\n+ // Pause the running container.\n+ if err := cont.Pause(); err != nil {\n+ t.Errorf(\"error pausing container: %v\", err)\n+ }\nif got, want := cont.Status, container.Paused; got != want {\nt.Errorf(\"container status got %v, want %v\", got, want)\n}\n+\n+ // Try to Pause again. Should cause error.\n+ if err := cont.Pause(); err == nil {\n+ t.Errorf(\"error pausing container that was already paused: %v\", err)\n+ }\n+ if got, want := cont.Status, container.Paused; got != want {\n+ t.Errorf(\"container status got %v, want %v\", got, want)\n+ }\n+\n+ // Resume the running container.\n+ if err := cont.Resume(); err != nil {\n+ t.Errorf(\"error resuming container: %v\", err)\n+ }\n+ if got, want := cont.Status, container.Running; got != want {\n+ t.Errorf(\"container status got %v, want %v\", got, want)\n+ }\n+\n+ // Try to resume again. Should cause error.\n+ if err := cont.Resume(); err == nil {\n+ t.Errorf(\"error resuming container already running: %v\", err)\n+ }\n+ if got, want := cont.Status, container.Running; got != want {\n+ t.Errorf(\"container status got %v, want %v\", got, want)\n+ }\n}\n// TestCapabilities verifies that:\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/main.go",
"new_path": "runsc/main.go",
"diff": "@@ -76,7 +76,9 @@ func main() {\nsubcommands.Register(new(cmd.Gofer), \"\")\nsubcommands.Register(new(cmd.Kill), \"\")\nsubcommands.Register(new(cmd.List), \"\")\n+ subcommands.Register(new(cmd.Pause), \"\")\nsubcommands.Register(new(cmd.PS), \"\")\n+ subcommands.Register(new(cmd.Resume), \"\")\nsubcommands.Register(new(cmd.Run), \"\")\nsubcommands.Register(new(cmd.Start), \"\")\nsubcommands.Register(new(cmd.State), \"\")\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -477,6 +477,21 @@ func (s *Sandbox) Pause(cid string) error {\nreturn nil\n}\n+// Resume sends the resume call for a container in the sandbox.\n+func (s *Sandbox) Resume(cid string) error {\n+ log.Debugf(\"Resume sandbox %q\", s.ID)\n+ conn, err := s.connect()\n+ if err != nil {\n+ return err\n+ }\n+ defer conn.Close()\n+\n+ if err := conn.Call(boot.ContainerResume, nil, nil); err != nil {\n+ return fmt.Errorf(\"err resuming container %q: %v\", cid, err)\n+ }\n+ return nil\n+}\n+\n// IsRunning returns true if the sandbox or gofer process is running.\nfunc (s *Sandbox) IsRunning() bool {\nif s.Pid != 0 {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Added a resume command to unpause a paused container.
Resume checks the status of the container and unpauses the kernel
if its status is paused. Otherwise nothing happens.
Tests were added to ensure that the process is in the correct state
after various commands.
PiperOrigin-RevId: 201251234
Change-Id: Ifd11b336c33b654fea6238738f864fcf2bf81e19 |
259,948 | 19.06.2018 16:07:08 | 25,200 | aa14a2c1be7f705927e9558f0e46ceca159e23e6 | sentry: futex S/R optimization.
No need to save thousands of zerovalue buckets. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/futex/futex.go",
"new_path": "pkg/sentry/kernel/futex/futex.go",
"diff": "@@ -197,7 +197,7 @@ func bucketIndexForAddr(addr uintptr) uintptr {\n// Manager holds futex state for a single virtual address space.\ntype Manager struct {\n- buckets [bucketCount]bucket\n+ buckets [bucketCount]bucket `state:\"zerovalue\"`\n}\n// NewManager returns an initialized futex manager.\n"
}
] | Go | Apache License 2.0 | google/gvisor | sentry: futex S/R optimization.
No need to save thousands of zerovalue buckets.
PiperOrigin-RevId: 201258598
Change-Id: I5d3ea7b6a5345117ab4f610332d5288ca550be33 |
259,891 | 19.06.2018 17:03:55 | 25,200 | 33f29c730f46aacb56cb7710c31d19dbe0d5ff3f | runsc: Fix flakey container_test.
Verified that this is no longer flakey over 10K repetitions. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -164,6 +164,7 @@ func TestLifecycle(t *testing.T) {\nif _, err := container.Create(id, spec, conf, bundleDir, \"\", \"\"); err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n+\n// Load the container from disk and check the status.\ns, err := container.Load(rootDir, id)\nif err != nil {\n@@ -206,14 +207,17 @@ func TestLifecycle(t *testing.T) {\ngo func() {\nws, err := s.Wait()\nif err != nil {\n- t.Errorf(\"error waiting on container: %v\", err)\n+ t.Fatalf(\"error waiting on container: %v\", err)\n}\nif got, want := ws.Signal(), syscall.SIGTERM; got != want {\n- t.Errorf(\"got signal %v, want %v\", got, want)\n+ t.Fatalf(\"got signal %v, want %v\", got, want)\n}\nwg.Done()\n}()\n+ // Wait a bit to ensure that we've started waiting on the container\n+ // before we signal.\n+ time.Sleep(5 * time.Second)\n// Send the container a SIGTERM which will cause it to stop.\nif err := s.Signal(syscall.SIGTERM); err != nil {\nt.Fatalf(\"error sending signal %v to container: %v\", syscall.SIGTERM, err)\n"
}
] | Go | Apache License 2.0 | google/gvisor | runsc: Fix flakey container_test.
Verified that this is no longer flakey over 10K repetitions.
PiperOrigin-RevId: 201267499
Change-Id: I793c916fe725412aec25953f764cb4f52c9fbed3 |
259,948 | 19.06.2018 17:12:48 | 25,200 | 18d899245329daf472c322c81af356958b3e2613 | state: pretty-print primitive type arrays. | [
{
"change_type": "MODIFY",
"old_path": "pkg/state/printer.go",
"new_path": "pkg/state/printer.go",
"diff": "@@ -18,13 +18,15 @@ import (\n\"fmt\"\n\"io\"\n\"io/ioutil\"\n+ \"reflect\"\n\"strings\"\n\"github.com/golang/protobuf/proto\"\npb \"gvisor.googlesource.com/gvisor/pkg/state/object_go_proto\"\n)\n-// format formats a single object, for pretty-printing.\n+// format formats a single object, for pretty-printing. It also returns whether\n+// the value is a non-zero value.\nfunc format(graph uint64, depth int, object *pb.Object, html bool) (string, bool) {\nswitch x := object.GetValue().(type) {\ncase *pb.Object_BoolValue:\n@@ -76,7 +78,7 @@ func format(graph uint64, depth int, object *pb.Object, html bool) (string, bool\n}\n}\nif len(zeros) > 0 {\n- items = append(items, fmt.Sprintf(\"\\t... (%d zero),\", len(zeros)))\n+ items = append(items, fmt.Sprintf(\"\\t... (%d zeros),\", len(zeros)))\n}\nitems = append(items, \"]\")\nreturn strings.Join(items, tabs), len(zeros) < len(x.ArrayValue.Contents)\n@@ -115,6 +117,30 @@ func format(graph uint64, depth int, object *pb.Object, html bool) (string, bool\n}\nelement, _ := format(graph, depth+1, x.InterfaceValue.Value, html)\nreturn fmt.Sprintf(\"interface(\\\"%s\\\"){%s}\", x.InterfaceValue.Type, element), true\n+ case *pb.Object_ByteArrayValue:\n+ return printArray(reflect.ValueOf(x.ByteArrayValue))\n+ case *pb.Object_Uint16ArrayValue:\n+ return printArray(reflect.ValueOf(x.Uint16ArrayValue.Values))\n+ case *pb.Object_Uint32ArrayValue:\n+ return printArray(reflect.ValueOf(x.Uint32ArrayValue.Values))\n+ case *pb.Object_Uint64ArrayValue:\n+ return printArray(reflect.ValueOf(x.Uint64ArrayValue.Values))\n+ case *pb.Object_UintptrArrayValue:\n+ return printArray(castSlice(reflect.ValueOf(x.UintptrArrayValue.Values), reflect.TypeOf(uintptr(0))))\n+ case *pb.Object_Int8ArrayValue:\n+ return printArray(castSlice(reflect.ValueOf(x.Int8ArrayValue.Values), reflect.TypeOf(int8(0))))\n+ case *pb.Object_Int16ArrayValue:\n+ return printArray(reflect.ValueOf(x.Int16ArrayValue.Values))\n+ case *pb.Object_Int32ArrayValue:\n+ return printArray(reflect.ValueOf(x.Int32ArrayValue.Values))\n+ case *pb.Object_Int64ArrayValue:\n+ return printArray(reflect.ValueOf(x.Int64ArrayValue.Values))\n+ case *pb.Object_BoolArrayValue:\n+ return printArray(reflect.ValueOf(x.BoolArrayValue.Values))\n+ case *pb.Object_Float64ArrayValue:\n+ return printArray(reflect.ValueOf(x.Float64ArrayValue.Values))\n+ case *pb.Object_Float32ArrayValue:\n+ return printArray(reflect.ValueOf(x.Float32ArrayValue.Values))\n}\n// Should not happen, but tolerate.\n@@ -186,3 +212,40 @@ func PrettyPrint(w io.Writer, r io.Reader, html bool) error {\nreturn nil\n}\n+\n+func printArray(s reflect.Value) (string, bool) {\n+ zero := reflect.Zero(s.Type().Elem()).Interface()\n+ z := \"0\"\n+ switch s.Type().Elem().Kind() {\n+ case reflect.Bool:\n+ z = \"false\"\n+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n+ case reflect.Float32, reflect.Float64:\n+ default:\n+ return fmt.Sprintf(\"unexpected non-primitive type array: %#v\", s.Interface()), true\n+ }\n+\n+ zeros := 0\n+ items := make([]string, 0, s.Len())\n+ for i := 0; i <= s.Len(); i++ {\n+ if i < s.Len() && reflect.DeepEqual(s.Index(i).Interface(), zero) {\n+ zeros++\n+ continue\n+ }\n+ if zeros > 0 {\n+ if zeros <= 4 {\n+ for ; zeros > 0; zeros-- {\n+ items = append(items, z)\n+ }\n+ } else {\n+ items = append(items, fmt.Sprintf(\"(%d %ss)\", zeros, z))\n+ zeros = 0\n+ }\n+ }\n+ if i < s.Len() {\n+ items = append(items, fmt.Sprintf(\"%v\", s.Index(i).Interface()))\n+ }\n+ }\n+ return \"[\" + strings.Join(items, \",\") + \"]\", zeros < s.Len()\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | state: pretty-print primitive type arrays.
PiperOrigin-RevId: 201269072
Change-Id: Ia542c5a42b5b5d21c1104a003ddff5279644d309 |
259,891 | 19.06.2018 17:16:39 | 25,200 | 3ebd0e35f43d9ca282886aabce52fbb7fc7e1fc5 | runsc: Whitelist lstat, as it is now used in specutils.
When running multi-container, child containers are added after the filters have
been installed. Thus, lstat must be in the set of allowed syscalls. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/config.go",
"new_path": "runsc/boot/filter/config.go",
"diff": "@@ -53,6 +53,9 @@ var allowedSyscalls = seccomp.SyscallRules{\nsyscall.SYS_GETTIMEOFDAY: {},\nsyscall.SYS_LISTEN: {},\nsyscall.SYS_LSEEK: {},\n+ // TODO: Remove SYS_LSTAT when executable lookup moves\n+ // into the gofer.\n+ syscall.SYS_LSTAT: {},\nsyscall.SYS_MADVISE: {},\nsyscall.SYS_MINCORE: {},\nsyscall.SYS_MMAP: {},\n"
}
] | Go | Apache License 2.0 | google/gvisor | runsc: Whitelist lstat, as it is now used in specutils.
When running multi-container, child containers are added after the filters have
been installed. Thus, lstat must be in the set of allowed syscalls.
PiperOrigin-RevId: 201269550
Change-Id: I03f2e6675a53d462ed12a0f651c10049b76d4c52 |
260,013 | 19.06.2018 17:28:19 | 25,200 | db66e383c33228c43efbe16ad3b14ae9833879dc | Epsocket has incorrect recv(2) behavior after SHUT_RD.
After shutdown(SHUT_RD) calls to recv /w MSG_DONTWAIT or with
O_NONBLOCK should result in a EAGAIN and not 0. Blocking sockets
should return 0 as they would have otherwise blocked indefinitely. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/epsocket/epsocket.go",
"new_path": "pkg/sentry/socket/epsocket/epsocket.go",
"diff": "@@ -952,6 +952,12 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nsenderRequested = false\n}\nn, senderAddr, senderAddrLen, controlMessages, err = s.nonBlockingRead(t, dst, peek, trunc, senderRequested)\n+\n+ if err == syserr.ErrClosedForReceive && flags&linux.MSG_DONTWAIT != 0 {\n+ // In this situation we should return EAGAIN.\n+ return 0, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain\n+ }\n+\nif err != syserr.ErrWouldBlock || flags&linux.MSG_DONTWAIT != 0 {\nreturn\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Epsocket has incorrect recv(2) behavior after SHUT_RD.
After shutdown(SHUT_RD) calls to recv /w MSG_DONTWAIT or with
O_NONBLOCK should result in a EAGAIN and not 0. Blocking sockets
should return 0 as they would have otherwise blocked indefinitely.
PiperOrigin-RevId: 201271123
Change-Id: If589b69c17fa5b9ff05bcf9e44024da9588c8876 |
259,948 | 20.06.2018 11:01:32 | 25,200 | 4e9f0e91d724b547e1ecaeeb210017f4c0b3fd0d | sentry: pending signals S/R optimization.
Almost all of the hundreds of pending signal queues are empty upon save. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/BUILD",
"new_path": "pkg/sentry/kernel/BUILD",
"diff": "@@ -13,7 +13,7 @@ go_stateify(\n\"ipc_namespace.go\",\n\"kernel.go\",\n\"pending_signals.go\",\n- \"pending_signals_list.go\",\n+ \"pending_signals_state.go\",\n\"process_group_list.go\",\n\"ptrace.go\",\n\"rseq.go\",\n@@ -46,7 +46,10 @@ go_stateify(\n\"version.go\",\n],\nout = \"kernel_state.go\",\n- imports = [\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs\"],\n+ imports = [\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/arch\",\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs\",\n+ ],\npackage = \"kernel\",\n)\n@@ -117,6 +120,7 @@ go_library(\n\"kernel_state.go\",\n\"pending_signals.go\",\n\"pending_signals_list.go\",\n+ \"pending_signals_state.go\",\n\"process_group_list.go\",\n\"ptrace.go\",\n\"rseq.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/pending_signals.go",
"new_path": "pkg/sentry/kernel/pending_signals.go",
"diff": "@@ -44,11 +44,11 @@ type pendingSignals struct {\n// Note that signals is zero-indexed, but signal 1 is the first valid\n// signal, so signals[0] contains signals with signo 1 etc. This offset is\n// usually handled by using Signal.index().\n- signals [linux.SignalMaximum]pendingSignalQueue\n+ signals [linux.SignalMaximum]pendingSignalQueue `state:\".([]*arch.SignalInfo)\"`\n// Bit i of pendingSet is set iff there is at least one signal with signo\n// i+1 pending.\n- pendingSet linux.SignalSet\n+ pendingSet linux.SignalSet `state:\"manual\"`\n}\n// pendingSignalQueue holds a pendingSignalList for a single signal number.\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/kernel/pending_signals_state.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package kernel\n+\n+import (\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n+)\n+\n+// saveSignals is invoked by stateify.\n+func (p *pendingSignals) saveSignals() []*arch.SignalInfo {\n+ var pending []*arch.SignalInfo\n+ for _, q := range p.signals {\n+ for ps := q.pendingSignalList.Front(); ps != nil; ps = ps.Next() {\n+ pending = append(pending, ps.SignalInfo)\n+ }\n+ }\n+ return pending\n+}\n+\n+// loadSignals is invoked by stateify.\n+func (p *pendingSignals) loadSignals(pending []*arch.SignalInfo) {\n+ for _, si := range pending {\n+ p.enqueue(si)\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | sentry: pending signals S/R optimization.
Almost all of the hundreds of pending signal queues are empty upon save.
PiperOrigin-RevId: 201380318
Change-Id: I40747072435299de681d646e0862efac0637e172 |
259,992 | 20.06.2018 13:00:21 | 25,200 | af6f9f56f80027a89ee517b79502ca6183094a39 | Add tool to configure runtime settings in docker
This will be used with the upcoming e2e image tests. | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/tools/dockercfg/BUILD",
"diff": "+package(licenses = [\"notice\"]) # Apache 2.0\n+\n+load(\"@io_bazel_rules_go//go:def.bzl\", \"go_binary\")\n+\n+go_binary(\n+ name = \"dockercfg\",\n+ srcs = [\"dockercfg.go\"],\n+ visibility = [\n+ \"//runsc/test:__subpackages__\",\n+ ],\n+ deps = [\"@com_github_google_subcommands//:go_default_library\"],\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/tools/dockercfg/dockercfg.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Helper tool to configure Docker daemon.\n+package main\n+\n+import (\n+ \"encoding/json\"\n+ \"fmt\"\n+ \"io/ioutil\"\n+ \"log\"\n+ \"os\"\n+\n+ \"context\"\n+ \"flag\"\n+ \"github.com/google/subcommands\"\n+)\n+\n+var (\n+ configFile = flag.String(\"config_file\", \"/etc/docker/daemon.json\", \"path to Docker daemon config file\")\n+)\n+\n+func main() {\n+ subcommands.Register(subcommands.HelpCommand(), \"\")\n+ subcommands.Register(subcommands.FlagsCommand(), \"\")\n+ subcommands.Register(&runtimeAdd{}, \"\")\n+ subcommands.Register(&runtimeRemove{}, \"\")\n+\n+ // All subcommands must be registered before flag parsing.\n+ flag.Parse()\n+\n+ exitCode := subcommands.Execute(context.Background())\n+ os.Exit(int(exitCode))\n+}\n+\n+type runtime struct {\n+ Path string `json:\"path,omitempty\"`\n+ RuntimeArgs []string `json:\"runtimeArgs,omitempty\"`\n+}\n+\n+// runtimeAdd implements subcommands.Command.\n+type runtimeAdd struct {\n+}\n+\n+// Name implements subcommands.Command.Name.\n+func (*runtimeAdd) Name() string {\n+ return \"runtime-add\"\n+}\n+\n+// Synopsis implements subcommands.Command.Synopsis.\n+func (*runtimeAdd) Synopsis() string {\n+ return \"adds a runtime to docker daemon configuration\"\n+}\n+\n+// Usage implements subcommands.Command.Usage.\n+func (*runtimeAdd) Usage() string {\n+ return `runtime-add [flags] <name> <path> [args...] -- if provided, args are passed as arguments to the runtime\n+`\n+}\n+\n+// SetFlags implements subcommands.Command.SetFlags.\n+func (*runtimeAdd) SetFlags(*flag.FlagSet) {\n+}\n+\n+// Execute implements subcommands.Command.Execute.\n+func (r *runtimeAdd) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {\n+ if f.NArg() < 2 {\n+ f.Usage()\n+ return subcommands.ExitUsageError\n+ }\n+ name := f.Arg(0)\n+ path := f.Arg(1)\n+ runtimeArgs := f.Args()[2:]\n+\n+ fmt.Printf(\"Adding runtime %q to file %q\\n\", name, *configFile)\n+ c, err := readConfig(*configFile)\n+ if err != nil {\n+ log.Fatalf(\"Error reading config file %q: %v\", *configFile, err)\n+ }\n+\n+ var rts map[string]interface{}\n+ if i, ok := c[\"runtimes\"]; ok {\n+ rts = i.(map[string]interface{})\n+ } else {\n+ rts = make(map[string]interface{})\n+ c[\"runtimes\"] = rts\n+ }\n+ rts[name] = runtime{Path: path, RuntimeArgs: runtimeArgs}\n+\n+ if err := writeConfig(c, *configFile); err != nil {\n+ log.Fatalf(\"Error writing config file %q: %v\", *configFile, err)\n+ }\n+ return subcommands.ExitSuccess\n+}\n+\n+// runtimeRemove implements subcommands.Command.\n+type runtimeRemove struct {\n+}\n+\n+// Name implements subcommands.Command.Name.\n+func (*runtimeRemove) Name() string {\n+ return \"runtime-rm\"\n+}\n+\n+// Synopsis implements subcommands.Command.Synopsis.\n+func (*runtimeRemove) Synopsis() string {\n+ return \"removes a runtime from docker daemon configuration\"\n+}\n+\n+// Usage implements subcommands.Command.Usage.\n+func (*runtimeRemove) Usage() string {\n+ return `runtime-rm [flags] <name>\n+`\n+}\n+\n+// SetFlags implements subcommands.Command.SetFlags.\n+func (*runtimeRemove) SetFlags(*flag.FlagSet) {\n+}\n+\n+// Execute implements subcommands.Command.Execute.\n+func (r *runtimeRemove) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {\n+ if f.NArg() != 1 {\n+ f.Usage()\n+ return subcommands.ExitUsageError\n+ }\n+ name := f.Arg(0)\n+\n+ fmt.Printf(\"Removing runtime %q from file %q\\n\", name, *configFile)\n+ c, err := readConfig(*configFile)\n+ if err != nil {\n+ log.Fatalf(\"Error reading config file %q: %v\", *configFile, err)\n+ }\n+\n+ var rts map[string]interface{}\n+ if i, ok := c[\"runtimes\"]; ok {\n+ rts = i.(map[string]interface{})\n+ } else {\n+ log.Fatalf(\"runtime %q not found\", name)\n+ }\n+ if _, ok := rts[name]; !ok {\n+ log.Fatalf(\"runtime %q not found\", name)\n+ }\n+ delete(rts, name)\n+\n+ if err := writeConfig(c, *configFile); err != nil {\n+ log.Fatalf(\"Error writing config file %q: %v\", *configFile, err)\n+ }\n+ return subcommands.ExitSuccess\n+}\n+\n+func readConfig(path string) (map[string]interface{}, error) {\n+ configBytes, err := ioutil.ReadFile(path)\n+ if err != nil && !os.IsNotExist(err) {\n+ return nil, err\n+ }\n+ c := make(map[string]interface{})\n+ if len(configBytes) > 0 {\n+ if err := json.Unmarshal(configBytes, &c); err != nil {\n+ return nil, err\n+ }\n+ }\n+ return c, nil\n+}\n+\n+func writeConfig(c map[string]interface{}, path string) error {\n+ b, err := json.MarshalIndent(c, \"\", \" \")\n+ if err != nil {\n+ return err\n+ }\n+\n+ if err := os.Rename(path, path+\"~\"); err != nil && !os.IsNotExist(err) {\n+ return fmt.Errorf(\"error renaming config file %q: %v\", path, err)\n+ }\n+ if err := ioutil.WriteFile(path, b, 0644); err != nil {\n+ return fmt.Errorf(\"error writing config file %q: %v\", path, err)\n+ }\n+ return nil\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add tool to configure runtime settings in docker
This will be used with the upcoming e2e image tests.
PiperOrigin-RevId: 201400832
Change-Id: I49509314e16ea54655ea8060dbf511a04a7a8f79 |
259,992 | 20.06.2018 13:30:39 | 25,200 | 4ad7315b6759afa81f492ec119080deb9a224101 | Add 'runsc debug' command
It prints sandbox stacks to the log to help debug stuckness. I expect
that many more options will be added in the future. | [
{
"change_type": "MODIFY",
"old_path": "pkg/log/log.go",
"new_path": "pkg/log/log.go",
"diff": "@@ -251,8 +251,8 @@ const defaultStackSize = 1 << 16 // 64KB\n// maxStackSize is the maximum buffer size to allocate for stack traces.\nconst maxStackSize = 1 << 26 // 64MB\n-// stacks returns goroutine stacks, like panic.\n-func stacks(all bool) []byte {\n+// Stacks returns goroutine stacks, like panic.\n+func Stacks(all bool) []byte {\nvar trace []byte\nfor s := defaultStackSize; s <= maxStackSize; s *= 4 {\ntrace = make([]byte, s)\n@@ -271,7 +271,7 @@ func stacks(all bool) []byte {\n//\n// This will be print a traceback, tb, as Warningf(format+\":\\n%s\", v..., tb).\nfunc Traceback(format string, v ...interface{}) {\n- v = append(v, stacks(false))\n+ v = append(v, Stacks(false))\nWarningf(format+\":\\n%s\", v...)\n}\n@@ -279,7 +279,7 @@ func Traceback(format string, v ...interface{}) {\n//\n// This will be print a traceback, tb, as Warningf(format+\":\\n%s\", v..., tb).\nfunc TracebackAll(format string, v ...interface{}) {\n- v = append(v, stacks(true))\n+ v = append(v, Stacks(true))\nWarningf(format+\":\\n%s\", v...)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/BUILD",
"new_path": "runsc/boot/BUILD",
"diff": "@@ -7,6 +7,7 @@ go_library(\nsrcs = [\n\"config.go\",\n\"controller.go\",\n+ \"debug.go\",\n\"events.go\",\n\"fds.go\",\n\"fs.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/controller.go",
"new_path": "runsc/boot/controller.go",
"diff": "@@ -68,6 +68,9 @@ const (\n// RootContainerStart is the URPC endpoint for starting a new sandbox\n// with root container.\nRootContainerStart = \"containerManager.StartRoot\"\n+\n+ // SandboxStacks collects sandbox stacks for debugging.\n+ SandboxStacks = \"debug.Stacks\"\n)\n// ControlSocketAddr generates an abstract unix socket name for the given id.\n@@ -107,6 +110,8 @@ func newController(fd int, k *kernel.Kernel, w *watchdog.Watchdog) (*controller,\nsrv.Register(net)\n}\n+ srv.Register(&debug{})\n+\nif err := srv.StartServing(); err != nil {\nreturn nil, err\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/boot/debug.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package boot\n+\n+import (\n+ \"gvisor.googlesource.com/gvisor/pkg/log\"\n+)\n+\n+type debug struct {\n+}\n+\n+// Stacks collects all sandbox stacks and copies them to 'stacks'.\n+func (*debug) Stacks(_ *struct{}, stacks *string) error {\n+ buf := log.Stacks(true)\n+ *stacks = string(buf)\n+ return nil\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/BUILD",
"new_path": "runsc/cmd/BUILD",
"diff": "@@ -10,6 +10,7 @@ go_library(\n\"checkpoint.go\",\n\"cmd.go\",\n\"create.go\",\n+ \"debug.go\",\n\"delete.go\",\n\"events.go\",\n\"exec.go\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/cmd/debug.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package cmd\n+\n+import (\n+ \"context\"\n+ \"flag\"\n+ \"github.com/google/subcommands\"\n+ \"gvisor.googlesource.com/gvisor/pkg/log\"\n+ \"gvisor.googlesource.com/gvisor/runsc/boot\"\n+ \"gvisor.googlesource.com/gvisor/runsc/container\"\n+)\n+\n+// Debug implements subcommands.Command for the \"debug\" command.\n+type Debug struct {\n+ pid int\n+ stacks bool\n+}\n+\n+// Name implements subcommands.Command.\n+func (*Debug) Name() string {\n+ return \"debug\"\n+}\n+\n+// Synopsis implements subcommands.Command.\n+func (*Debug) Synopsis() string {\n+ return \"shows a variety of debug information\"\n+}\n+\n+// Usage implements subcommands.Command.\n+func (*Debug) Usage() string {\n+ return `debug [flags] <container id>`\n+}\n+\n+// SetFlags implements subcommands.Command.\n+func (d *Debug) SetFlags(f *flag.FlagSet) {\n+ f.IntVar(&d.pid, \"pid\", 0, \"sandbox process ID. Container ID is not necessary if this is set\")\n+ f.BoolVar(&d.stacks, \"stacks\", false, \"if true, dumps all sandbox stacks to the log\")\n+}\n+\n+// Execute implements subcommands.Command.Execute.\n+func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {\n+ var c *container.Container\n+ conf := args[0].(*boot.Config)\n+\n+ if d.pid == 0 {\n+ // No pid, container ID must have been provided.\n+ if f.NArg() != 1 {\n+ f.Usage()\n+ return subcommands.ExitUsageError\n+ }\n+ var err error\n+ c, err = container.Load(conf.RootDir, f.Arg(0))\n+ if err != nil {\n+ Fatalf(\"error loading container %q: %v\", f.Arg(0), err)\n+ }\n+ } else {\n+ if f.NArg() != 0 {\n+ f.Usage()\n+ return subcommands.ExitUsageError\n+ }\n+ // Go over all sandboxes and find the one that matches PID.\n+ ids, err := container.List(conf.RootDir)\n+ if err != nil {\n+ Fatalf(\"error listing containers: %v\", err)\n+ }\n+ for _, id := range ids {\n+ candidate, err := container.Load(conf.RootDir, id)\n+ if err != nil {\n+ Fatalf(\"error loading container %q: %v\", id, err)\n+ }\n+ if candidate.Pid() == d.pid {\n+ c = candidate\n+ break\n+ }\n+ }\n+ if c == nil {\n+ Fatalf(\"container with PID %d not found\", d.pid)\n+ }\n+ }\n+\n+ log.Infof(\"Found sandbox %q, PID: %d\", c.Sandbox.ID, c.Sandbox.Pid)\n+ if !c.Sandbox.IsRunning() {\n+ Fatalf(\"sandbox %q is not running\", c.Sandbox.ID)\n+ }\n+\n+ if d.stacks {\n+ log.Infof(\"Retrieving sandbox stacks\")\n+ stacks, err := c.Sandbox.Stacks()\n+ if err != nil {\n+ Fatalf(\"error retrieving stacks: %v\", err)\n+ }\n+ log.Infof(\" *** Stack dump ***\\n%s\", stacks)\n+ }\n+ return subcommands.ExitSuccess\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/main.go",
"new_path": "runsc/main.go",
"diff": "@@ -88,6 +88,7 @@ func main() {\n// The string below will be printed above the commands.\nconst internalGroup = \"internal use only\"\nsubcommands.Register(new(cmd.Boot), internalGroup)\n+ subcommands.Register(new(cmd.Debug), internalGroup)\nsubcommands.Register(new(cmd.Gofer), internalGroup)\n// All subcommands must be registered before flag parsing.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -537,6 +537,22 @@ func (s *Sandbox) IsRunning() bool {\nreturn false\n}\n+// Stacks collects and returns all stacks for the sandbox.\n+func (s *Sandbox) Stacks() (string, error) {\n+ log.Debugf(\"Stacks sandbox %q\", s.ID)\n+ conn, err := s.connect()\n+ if err != nil {\n+ return \"\", err\n+ }\n+ defer conn.Close()\n+\n+ var stacks string\n+ if err := conn.Call(boot.SandboxStacks, nil, &stacks); err != nil {\n+ return \"\", fmt.Errorf(\"err getting sandbox %q stacks: %v\", s.ID, err)\n+ }\n+ return stacks, nil\n+}\n+\n// killProcess sends a signal to the host process (i.e. a sandbox or gofer\n// process). Sandbox.Signal should be used to send a signal to a process\n// running inside the sandbox.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add 'runsc debug' command
It prints sandbox stacks to the log to help debug stuckness. I expect
that many more options will be added in the future.
PiperOrigin-RevId: 201405931
Change-Id: I87e560800cd5a5a7b210dc25a5661363c8c3a16e |
259,992 | 20.06.2018 14:37:56 | 25,200 | 2b5bdb525e99fc1ef099b2ef083a09772241ea58 | Add end-to-end image tests | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/image/BUILD",
"diff": "+package(licenses = [\"notice\"]) # Apache 2.0\n+\n+load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\", \"go_test\")\n+\n+go_test(\n+ name = \"image_test\",\n+ size = \"small\",\n+ srcs = [\"image_test.go\"],\n+ data = [\"latin10k.txt\"],\n+ embed = [\":image\"],\n+ tags = [\n+ # Requires docker and runsc to be configured before the test runs.\n+ \"manual\",\n+ \"local\",\n+ ],\n+ deps = [\"//runsc/test/testutil\"],\n+)\n+\n+go_library(\n+ name = \"image\",\n+ srcs = [\"image.go\"],\n+ importpath = \"gvisor.googlesource.com/gvisor/runsc/test/image\",\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/image/image.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package image is empty. See image_test.go for description.\n+package image\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/image/image_test.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package image provides end-to-end image tests for runsc. These tests require\n+// docker and runsc to be installed on the machine.\n+//\n+// The tests expect the runtime name to be provided in the RUNSC_RUNTIME\n+// environment variable (default: runsc-test).\n+//\n+// Each test calls docker commands to start up a container, and tests that it is\n+// behaving properly, like connecting to a port or looking at the output. The\n+// container is killed and deleted at the end.\n+package image\n+\n+import (\n+ \"fmt\"\n+ \"io/ioutil\"\n+ \"log\"\n+ \"math/rand\"\n+ \"net/http\"\n+ \"os\"\n+ \"os/exec\"\n+ \"path\"\n+ \"regexp\"\n+ \"strconv\"\n+ \"strings\"\n+ \"testing\"\n+ \"time\"\n+\n+ \"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n+)\n+\n+func init() {\n+ rand.Seed(time.Now().UnixNano())\n+}\n+\n+func runtime() string {\n+ r := os.Getenv(\"RUNSC_RUNTIME\")\n+ if r == \"\" {\n+ return \"runsc-test\"\n+ }\n+ return r\n+}\n+\n+func mountArg(source, target string) string {\n+ return fmt.Sprintf(\"%s:%s\", source, target)\n+}\n+\n+func getLocalPath(file string) string {\n+ return path.Join(\".\", file)\n+}\n+\n+type docker struct {\n+ runtime string\n+ name string\n+}\n+\n+func makeDocker(namePrefix string) docker {\n+ suffix := fmt.Sprintf(\"-%06d\", rand.Int())[:7]\n+ return docker{name: namePrefix + suffix, runtime: runtime()}\n+}\n+\n+// do executes docker command.\n+func (d *docker) do(args ...string) (string, error) {\n+ cmd := exec.Command(\"docker\", args...)\n+ out, err := cmd.CombinedOutput()\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"error executing docker %s: %v\", args, err)\n+ }\n+ return string(out), nil\n+}\n+\n+// run calls 'docker run' with the arguments provided.\n+func (d *docker) run(args ...string) (string, error) {\n+ a := []string{\"run\", \"--runtime\", d.runtime, \"--name\", d.name, \"-d\"}\n+ a = append(a, args...)\n+ return d.do(a...)\n+}\n+\n+// cleanUp kills and deletes the container.\n+func (d *docker) cleanUp() error {\n+ if _, err := d.do(\"kill\", d.name); err != nil {\n+ return fmt.Errorf(\"error killing container %q: %v\", d.name, err)\n+ }\n+ if _, err := d.do(\"rm\", d.name); err != nil {\n+ return fmt.Errorf(\"error deleting container %q: %v\", d.name, err)\n+ }\n+ return nil\n+}\n+\n+// findPort returns the host port that is mapped to 'sandboxPort'. This calls\n+// docker to allocate a free port in the host and prevent conflicts.\n+func (d *docker) findPort(sandboxPort int) (int, error) {\n+ format := fmt.Sprintf(`{{ (index (index .NetworkSettings.Ports \"%d/tcp\") 0).HostPort }}`, sandboxPort)\n+ out, err := d.do(\"inspect\", \"-f\", format, d.name)\n+ if err != nil {\n+ return -1, fmt.Errorf(\"error retrieving port: %v\", err)\n+ }\n+ port, err := strconv.Atoi(strings.TrimSuffix(string(out), \"\\n\"))\n+ if err != nil {\n+ return -1, fmt.Errorf(\"error parsing port %q: %v\", out, err)\n+ }\n+ return port, nil\n+}\n+\n+// waitForOutput calls 'docker logs' to retrieve containers output and searches\n+// for the given pattern.\n+func (d *docker) waitForOutput(pattern string, timeout time.Duration) error {\n+ re := regexp.MustCompile(pattern)\n+ for exp := time.Now().Add(timeout); time.Now().Before(exp); {\n+ out, err := d.do(\"logs\", d.name)\n+ if err != nil {\n+ return err\n+ }\n+ if re.MatchString(out) {\n+ return nil\n+ }\n+ time.Sleep(10 * time.Millisecond)\n+ }\n+ return fmt.Errorf(\"timeout waiting for output %q\", re.String())\n+}\n+\n+func TestHelloWorld(t *testing.T) {\n+ d := makeDocker(\"hello-test\")\n+ if out, err := d.run(\"hello-world\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n+ }\n+ defer d.cleanUp()\n+\n+ if err := d.waitForOutput(\".*Hello from Docker!.*\", 5*time.Second); err != nil {\n+ t.Fatalf(\"docker didn't say hello: %v\", err)\n+ }\n+}\n+\n+func TestHttpd(t *testing.T) {\n+ d := makeDocker(\"http-test\")\n+\n+ // Create temp directory to copy htdocs files. The sandbox doesn't have access\n+ // to files in the test dir.\n+ dir, err := ioutil.TempDir(\"\", \"httpd\")\n+ if err != nil {\n+ t.Fatalf(\"ioutil.TempDir failed: %v\", err)\n+ }\n+ if err := os.Chmod(dir, 0777); err != nil {\n+ t.Fatalf(\"os.Chmod(%q, 0777) failed: %v\", dir, err)\n+ }\n+ src := getLocalPath(\"latin10k.txt\")\n+ dst := path.Join(dir, \"latin10k.txt\")\n+ if err := testutil.Copy(src, dst); err != nil {\n+ t.Fatalf(\"testutil.Copy(%q, %q) failed: %v\", src, dst, err)\n+ }\n+\n+ // Start the container.\n+ if out, err := d.run(\"-p\", \"80\", \"-v\", mountArg(dir, \"/usr/local/apache2/htdocs\"), \"httpd\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n+ }\n+ defer d.cleanUp()\n+\n+ // Find where port 80 is mapped to.\n+ port, err := d.findPort(80)\n+ if err != nil {\n+ t.Fatalf(\"docker.findPort(80) failed: %v\", err)\n+ }\n+\n+ // Wait until it's up and running.\n+ if err := d.waitForOutput(\".*'httpd -D FOREGROUND'.*\", 5*time.Second); err != nil {\n+ t.Fatalf(\"docker.WaitForOutput() timeout: %v\", err)\n+ }\n+\n+ url := fmt.Sprintf(\"http://localhost:%d/not-found\", port)\n+ resp, err := http.Get(url)\n+ if err != nil {\n+ t.Fatalf(\"error reaching http server: %v\", err)\n+ }\n+ if want := http.StatusNotFound; resp.StatusCode != want {\n+ t.Errorf(\"Wrong response code, got: %d, want: %d\", resp.StatusCode, want)\n+ }\n+\n+ url = fmt.Sprintf(\"http://localhost:%d/latin10k.txt\", port)\n+ resp, err = http.Get(url)\n+ if err != nil {\n+ t.Fatalf(\"Error reaching http server: %v\", err)\n+ }\n+ if want := http.StatusOK; resp.StatusCode != want {\n+ t.Errorf(\"Wrong response code, got: %d, want: %d\", resp.StatusCode, want)\n+ }\n+\n+ body, err := ioutil.ReadAll(resp.Body)\n+ if err != nil {\n+ t.Fatalf(\"Error reading http response: %v\", err)\n+ }\n+ defer resp.Body.Close()\n+\n+ // READALL is the last word in the file. Ensures everything was read.\n+ if want := \"READALL\"; strings.HasSuffix(string(body), want) {\n+ t.Errorf(\"response doesn't contain %q, resp: %q\", want, body)\n+ }\n+}\n+\n+func MainTest(m *testing.M) {\n+ // Check correct docker is installed.\n+ cmd := exec.Command(\"docker\", \"version\")\n+ out, err := cmd.CombinedOutput()\n+ if err != nil {\n+ log.Fatalf(\"Error running %q: %v\", \"docker version\", err)\n+ }\n+ re := regexp.MustCompile(`Version:\\s+(\\d+)\\.(\\d+)\\.\\d.*`)\n+ matches := re.FindStringSubmatch(string(out))\n+ if len(matches) != 3 {\n+ log.Fatalf(\"Invalid docker output: %s\", out)\n+ }\n+ major, _ := strconv.Atoi(matches[1])\n+ minor, _ := strconv.Atoi(matches[2])\n+ if major < 17 || (major == 17 && minor < 9) {\n+ log.Fatalf(\"Docker version 17.09.0 or greater is required, found: %02d.%02d\", major, minor)\n+ }\n+\n+ os.Exit(m.Run())\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/image/latin10k.txt",
"diff": "+Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras ut placerat felis. Maecenas urna est, auctor a efficitur sit amet, egestas et augue. Curabitur dignissim scelerisque nunc vel cursus. Ut vehicula est pretium, consectetur nunc non, pharetra ligula. Curabitur ut ultricies metus. Suspendisse pulvinar, orci sed fermentum vestibulum, eros turpis molestie lectus, nec elementum risus dolor mattis felis. Donec ultrices ipsum sem, at pretium lacus convallis at. Mauris nulla enim, tincidunt non bibendum at, vehicula pulvinar mauris.\n+\n+Duis in dapibus turpis. Pellentesque maximus magna odio, ac congue libero laoreet quis. Maecenas euismod risus in justo aliquam accumsan. Nunc quis ornare arcu, sit amet sodales elit. Phasellus nec scelerisque nisl, a tincidunt arcu. Proin ornare est nunc, sed suscipit orci interdum et. Suspendisse condimentum venenatis diam in tempor. Aliquam egestas lectus in rutrum tempus. Donec id egestas eros. Donec molestie consequat purus, sed posuere odio venenatis vitae. Nunc placerat augue id vehicula varius. In hac habitasse platea dictumst. Proin at est accumsan, venenatis quam a, fermentum risus. Phasellus posuere pellentesque enim, id suscipit magna consequat ut. Quisque ut tortor ante.\n+\n+Cras ut vulputate metus, a laoreet lectus. Vivamus ultrices molestie odio in tristique. Morbi faucibus mi eget sollicitudin fringilla. Fusce vitae lacinia ligula. Sed egestas sed diam eu posuere. Maecenas justo nisl, venenatis vel nibh vel, cursus aliquam velit. Praesent lacinia dui id erat venenatis rhoncus. Morbi gravida felis ante, sit amet vehicula orci rhoncus vitae.\n+\n+Sed finibus sagittis dictum. Proin auctor suscipit sem et mattis. Phasellus libero ligula, pellentesque ut felis porttitor, fermentum sollicitudin orci. Nulla eu nulla nibh. Fusce a eros risus. Proin vel magna risus. Donec nec elit eleifend, scelerisque sapien vitae, pharetra quam. Donec porttitor mauris scelerisque, tempus orci hendrerit, dapibus felis. Nullam libero elit, sollicitudin a aliquam at, ultrices in erat. Mauris eget ligula sodales, porta turpis et, scelerisque odio. Mauris mollis leo vitae purus gravida, in tempor nunc efficitur. Nulla facilisis posuere augue, nec pellentesque lectus eleifend ac. Vestibulum convallis est a feugiat tincidunt. Donec vitae enim volutpat, tincidunt eros eu, malesuada nibh.\n+\n+Quisque molestie, magna ornare elementum convallis, erat enim sagittis ipsum, eget porttitor sapien arcu id purus. Donec ut cursus diam. Nulla rutrum nulla et mi fermentum, vel tempus tellus posuere. Proin vitae pharetra nulla, nec ornare ex. Nulla consequat, augue a accumsan euismod, turpis leo ornare ligula, a pulvinar enim dolor ut augue. Quisque volutpat, lectus a varius mollis, nisl eros feugiat sem, at egestas lacus justo eu elit. Vestibulum scelerisque mauris est, sagittis interdum nunc accumsan sit amet. Maecenas aliquet ex ut lacus ornare, eu sagittis nibh imperdiet. Duis ultrices nisi velit, sed sodales risus sollicitudin et. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Etiam a accumsan augue, vitae pulvinar nulla. Pellentesque euismod sodales magna, nec luctus eros mattis eget. Sed lacinia suscipit lectus, eget consectetur dui pellentesque sed. Nullam nec mattis tellus.\n+\n+Aliquam erat volutpat. Praesent lobortis massa porttitor eros tincidunt, nec consequat diam pharetra. Duis efficitur non lorem sed mattis. Suspendisse justo nunc, pulvinar eu porttitor at, facilisis id eros. Suspendisse potenti. Cras molestie aliquet orci ut fermentum. In tempus aliquet eros nec suscipit. Suspendisse in mauris ut lectus ultrices blandit sit amet vitae est. Nam magna massa, porttitor ut semper id, feugiat vel quam. Suspendisse dignissim posuere scelerisque. Donec scelerisque lorem efficitur suscipit suscipit. Nunc luctus ligula et scelerisque lacinia.\n+\n+Suspendisse potenti. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Sed ultrices, sem in venenatis scelerisque, tellus ipsum porttitor urna, et iaculis lectus odio ac nisi. Integer luctus dui urna, at sollicitudin elit dapibus eu. Praesent nibh ante, porttitor a ante in, ullamcorper pretium felis. Aliquam vel tortor imperdiet, imperdiet lorem et, cursus mi. Proin tempus velit est, ut hendrerit metus gravida sed. Sed nibh sapien, faucibus quis ipsum in, scelerisque lacinia elit. In nec magna eu magna laoreet rhoncus. Donec vitae rutrum mauris. Integer urna felis, consequat at rhoncus vitae, auctor quis elit. Duis a pulvinar sem, nec gravida nisl. Nam non dapibus purus. Praesent vestibulum turpis nec erat porttitor, a scelerisque purus tincidunt.\n+\n+Nam fringilla leo nisi, nec placerat nisl luctus eget. Aenean malesuada nunc porta sapien sodales convallis. Suspendisse ut massa tempor, ullamcorper mi ut, faucibus turpis. Vivamus at sagittis metus. Donec varius ac mi eget sodales. Nulla feugiat, nulla eu fringilla fringilla, nunc lorem sollicitudin quam, vitae lacinia velit lorem eu orci. Mauris leo urna, pellentesque ac posuere non, pellentesque sit amet quam.\n+\n+Vestibulum porta diam urna, a aliquet nibh vestibulum et. Proin interdum bibendum nisl sed rhoncus. Sed vel diam hendrerit, faucibus ante et, hendrerit diam. Nunc dolor augue, mattis non dolor vel, luctus sodales neque. Cras malesuada fermentum dolor eu lobortis. Integer dapibus volutpat consequat. Maecenas posuere feugiat nunc. Donec vel mollis elit, volutpat consequat enim. Nulla id nisi finibus orci imperdiet elementum. Phasellus ultrices, elit vitae consequat rutrum, nisl est congue massa, quis condimentum justo nisi vitae turpis. Maecenas aliquet risus sit amet accumsan elementum. Proin non finibus elit, sit amet lobortis augue.\n+\n+Morbi pretium pulvinar sem vel sollicitudin. Proin imperdiet fringilla leo, non pellentesque lacus gravida nec. Vivamus ullamcorper consectetur ligula eu consectetur. Curabitur sit amet tempus purus. Curabitur quam quam, tincidunt eu tempus vel, volutpat at ipsum. Maecenas lobortis elit ac justo interdum, sit amet mattis ligula mollis. Sed posuere ligula et felis convallis tempor. Aliquam nec mollis velit. Donec varius sit amet erat at imperdiet. Nulla ipsum justo, tempor non sollicitudin gravida, dignissim vel orci. In hac habitasse platea dictumst. Cras cursus tellus id arcu aliquet accumsan. Phasellus ac erat dui.\n+\n+Duis mollis metus at mi luctus aliquam. Duis varius eget erat ac porttitor. Phasellus lobortis sagittis lacinia. Etiam sagittis eget erat in pulvinar. Phasellus sodales risus nec vulputate accumsan. Cras sit amet pellentesque dui. Praesent consequat felis mi, at vulputate diam convallis a. Donec hendrerit nibh vel justo consequat dictum. In euismod, dui sit amet malesuada suscipit, mauris ex rhoncus eros, sed ornare arcu nunc eu urna. Pellentesque eget erat augue. Integer rutrum mauris sem, nec sodales nulla cursus vel. Vivamus porta, urna vel varius vulputate, nulla arcu malesuada dui, a ultrices magna ante sed nibh.\n+\n+Morbi ultricies aliquam lorem id bibendum. Donec sit amet nunc vitae massa gravida eleifend hendrerit vel libero. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nulla vestibulum tempus condimentum. Aliquam dolor ipsum, condimentum in sapien et, tempor iaculis nulla. Aenean non pharetra augue. Maecenas mattis dignissim maximus. Fusce elementum tincidunt massa sit amet lobortis. Phasellus nec pharetra dui, et malesuada ante. Nullam commodo pretium tellus. Praesent sollicitudin, enim eget imperdiet scelerisque, odio felis vulputate dolor, eget auctor neque tellus ac lorem.\n+\n+In consectetur augue et sapien feugiat varius. Nam tortor mi, consectetur ac felis non, elementum venenatis augue. Suspendisse ut tellus in est sagittis cursus. Quisque faucibus, neque sit amet semper congue, nibh augue finibus odio, vitae interdum dolor arcu eget arcu. Curabitur dictum risus massa, non tincidunt urna molestie non. Maecenas eu quam purus. Donec vulputate, dui eu accumsan blandit, mauris tortor tristique mi, sed blandit leo quam id quam. Ut venenatis sagittis malesuada. Integer non auctor orci. Duis consectetur massa felis. Fusce euismod est sit amet bibendum finibus. Vestibulum dolor ex, tempor at elit in, iaculis cursus dui. Nunc sed neque ac risus rutrum tempus sit amet at ante. In hac habitasse platea dictumst.\n+\n+Donec rutrum, velit nec viverra tincidunt, est velit viverra neque, quis auctor leo ex at lectus. Morbi eget purus nisi. Aliquam lacus dui, interdum vitae elit at, venenatis dignissim est. Duis ac mollis lorem. Vivamus a vestibulum quam. Maecenas non metus dolor. Praesent tortor nunc, tristique at nisl molestie, vulputate eleifend diam. Integer ultrices lacus odio, vel imperdiet enim accumsan id. Sed ligula tortor, interdum eu velit eget, pharetra pulvinar magna. Sed non lacus in eros tincidunt sagittis ac vel justo. Donec vitae leo sagittis, accumsan ante sit amet, accumsan odio. Ut volutpat ultricies tortor. Vestibulum tempus purus et est tristique sagittis quis vitae turpis.\n+\n+Nam iaculis neque lacus, eget euismod turpis blandit eget. In hac habitasse platea dictumst. Phasellus justo neque, scelerisque sit amet risus ut, pretium commodo nisl. Phasellus auctor sapien sed ex bibendum fermentum. Proin maximus odio a ante ornare, a feugiat lorem egestas. Etiam efficitur tortor a ante tincidunt interdum. Nullam non est ac massa congue efficitur sit amet nec eros. Nullam at ipsum vel mauris tincidunt efficitur. Duis pulvinar nisl elit, id auctor risus laoreet ac. Sed nunc mauris, tristique id leo ut, condimentum congue nunc. Sed ultricies, mauris et convallis faucibus, justo ex faucibus est, at lobortis purus justo non arcu. Integer vel facilisis elit, dapibus imperdiet mauris.\n+\n+Pellentesque non mattis turpis, eget bibendum velit. Fusce sollicitudin ante ac tincidunt rhoncus. Praesent porta scelerisque consequat. Donec eleifend faucibus sollicitudin. Quisque vitae purus eget tortor tempor ultrices. Maecenas mauris diam, semper vitae est non, imperdiet tempor magna. Duis elit lacus, auctor vestibulum enim eget, rhoncus porttitor tortor.\n+\n+Donec non rhoncus nibh. Cras dapibus justo vitae nunc accumsan, id congue erat egestas. Aenean at ante ante. Duis eleifend imperdiet dREADALL\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/testutil.go",
"new_path": "runsc/test/testutil/testutil.go",
"diff": "@@ -18,6 +18,7 @@ package testutil\nimport (\n\"encoding/json\"\n\"fmt\"\n+ \"io\"\n\"io/ioutil\"\n\"os\"\n\"path/filepath\"\n@@ -146,3 +147,21 @@ func writeSpec(dir string, spec *specs.Spec) error {\nfunc UniqueContainerID() string {\nreturn fmt.Sprintf(\"test-container-%d\", time.Now().UnixNano())\n}\n+\n+// Copy copies file from src to dst.\n+func Copy(src, dst string) error {\n+ in, err := os.Open(src)\n+ if err != nil {\n+ return err\n+ }\n+ defer in.Close()\n+\n+ out, err := os.Create(dst)\n+ if err != nil {\n+ return err\n+ }\n+ defer out.Close()\n+\n+ _, err = io.Copy(out, in)\n+ return err\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add end-to-end image tests
PiperOrigin-RevId: 201418619
Change-Id: I7961b027394d98422642f829bc54745838c138bd |
259,992 | 20.06.2018 15:27:06 | 25,200 | 2f59ba0e2d2169cf429b73a39a920f8d615f8eca | Include image test as part of kokoro tests | [
{
"change_type": "MODIFY",
"old_path": "kokoro/gcp_ubuntu/run_tests.sh",
"new_path": "kokoro/gcp_ubuntu/run_tests.sh",
"diff": "@@ -31,6 +31,10 @@ cd git/repo\n# Build everything.\nbazel build //...\n+# Test use this variable to determine what runtime to use.\n+runtime=runsc_test_$((RANDOM))\n+sudo -n ./runsc/test/image/install.sh --runtime ${runtime}\n+\n# Run the tests and upload results.\n#\n# We turn off \"-e\" flag because we must move the log files even if the test\n@@ -38,6 +42,15 @@ bazel build //...\nset +e\nbazel test --test_output=errors //...\nexit_code=${?}\n+\n+if [[ ${exit_code} -eq 0 ]]; then\n+ # image_test is tagged manual\n+ bazel test --test_output=errors --test_env=RUNSC_RUNTIME=${runtime} //runsc/test/image:image_test\n+ exit_code=${?}\n+fi\n+\n+# Best effort to uninstall\n+sudo -n ./runsc/test/image/install.sh -u --runtime ${runtime}\nset -e\n# Find and rename all test xml and log files so that Sponge can pick them up.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/image_test.go",
"new_path": "runsc/test/image/image_test.go",
"diff": "// limitations under the License.\n// Package image provides end-to-end image tests for runsc. These tests require\n-// docker and runsc to be installed on the machine.\n+// docker and runsc to be installed on the machine. To set it up, run:\n+//\n+// ./runsc/test/image/install.sh [--runtime <name>]\n//\n// The tests expect the runtime name to be provided in the RUNSC_RUNTIME\n// environment variable (default: runsc-test).\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/image/install.sh",
"diff": "+#!/bin/bash\n+\n+# Copyright 2018 Google Inc.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Fail on any error\n+set -e\n+\n+# Defaults\n+declare runtime=runsc-test\n+declare uninstall=0\n+\n+function findExe() {\n+ local exe=${1}\n+\n+ local path=$(find bazel-bin/runsc -type f -executable -name \"${exe}\" | head -n1)\n+ if [[ \"${path}\" == \"\" ]]; then\n+ echo \"Location of ${exe} not found in bazel-bin\" >&2\n+ exit 1\n+ fi\n+ echo \"${path}\"\n+}\n+\n+while [[ $# -gt 0 ]]; do\n+ case \"$1\" in\n+ --runtime)\n+ shift\n+ [ \"$#\" -le 0 ] && echo \"No runtime provided\" && exit 1\n+ runtime=$1\n+ ;;\n+ -u)\n+ uninstall=1\n+ ;;\n+ *)\n+ echo \"Unknown option: ${1}\"\n+ echo \"\"\n+ echo \"Usage: ${0} [--runtime <name>] [-u]\"\n+ echo \" --runtime sets the runtime name, default: runsc-test\"\n+ echo \" -u uninstall the runtime\"\n+ exit 1\n+ esac\n+ shift\n+done\n+\n+# Find location of executables.\n+declare -r dockercfg=$(findExe dockercfg)\n+[[ \"${dockercfg}\" == \"\" ]] && exit 1\n+\n+declare runsc=$(findExe runsc)\n+[[ \"${runsc}\" == \"\" ]] && exit 1\n+\n+if [[ ${uninstall} == 0 ]]; then\n+ rm -rf /tmp/${runtime}\n+ mkdir -p /tmp/${runtime}\n+ cp \"${runsc}\" /tmp/${runtime}/runsc\n+ runsc=/tmp/${runtime}/runsc\n+\n+ # Make tmp dir and runsc binary readable and executable to all users, since it\n+ # will run in an empty user namespace.\n+ chmod a+rx \"${runsc}\" $(dirname \"${runsc}\")\n+\n+ # Make log dir executable and writable to all users for the same reason.\n+ declare logdir=/tmp/\"${runtime?}/logs\"\n+ mkdir -p \"${logdir}\"\n+ sudo -n chmod a+wx \"${logdir}\"\n+\n+ sudo -n \"${dockercfg}\" runtime-add \"${runtime}\" \"${runsc}\" --debug-log-dir \"${logdir}\" --debug --strace --log-packets\n+\n+else\n+ sudo -n \"${dockercfg}\" runtime-rm \"${runtime}\"\n+fi\n+\n+echo \"Restarting docker service...\"\n+sudo -n /etc/init.d/docker restart\n"
}
] | Go | Apache License 2.0 | google/gvisor | Include image test as part of kokoro tests
PiperOrigin-RevId: 201427731
Change-Id: I5cbee383ec51c02b7892ec7812cbbdc426be8991 |
259,992 | 20.06.2018 15:31:12 | 25,200 | 95cb01e0a9517f7119e3d848728500692a4f5cba | Reduce test sleep time | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -205,7 +205,9 @@ func TestLifecycle(t *testing.T) {\n// Wait on the container.\nvar wg sync.WaitGroup\nwg.Add(1)\n+ ch := make(chan struct{})\ngo func() {\n+ ch <- struct{}{}\nws, err := s.Wait()\nif err != nil {\nt.Fatalf(\"error waiting on container: %v\", err)\n@@ -218,7 +220,8 @@ func TestLifecycle(t *testing.T) {\n// Wait a bit to ensure that we've started waiting on the container\n// before we signal.\n- time.Sleep(5 * time.Second)\n+ <-ch\n+ time.Sleep(100 * time.Millisecond)\n// Send the container a SIGTERM which will cause it to stop.\nif err := s.Signal(syscall.SIGTERM); err != nil {\nt.Fatalf(\"error sending signal %v to container: %v\", syscall.SIGTERM, err)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Reduce test sleep time
PiperOrigin-RevId: 201428433
Change-Id: I72de1e46788ec84f61513416bb690956e515907e |
259,854 | 21.06.2018 08:33:46 | 25,200 | ef4f239c793a1a202d3249c6a8139e0602d94d94 | Fix typo in runsc gofer flag description | [
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/gofer.go",
"new_path": "runsc/cmd/gofer.go",
"diff": "@@ -44,7 +44,7 @@ func (*Gofer) Name() string {\n// Synopsis implements subcommands.Command.\nfunc (*Gofer) Synopsis() string {\n- return \"launch a gofer process that server files over 9P protocol (internal use only)\"\n+ return \"launch a gofer process that serves files over 9P protocol (internal use only)\"\n}\n// Usage implements subcommands.Command.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix typo in runsc gofer flag description
PiperOrigin-RevId: 201529295
Change-Id: I55eb516ec6d14fbcd48593a3d61f724adc253a23 |
259,854 | 21.06.2018 10:52:33 | 25,200 | d571a4359cebbcf8a9b201bb125f1cdc9fb126e4 | Implement ioctl(FIOASYNC)
FIOASYNC and friends are used to send signals when a file is ready for IO.
This may or may not be needed by Nginx. While Nginx does use it, it is unclear
if the code that uses it has any effect. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/BUILD",
"new_path": "pkg/abi/linux/BUILD",
"diff": "@@ -31,6 +31,7 @@ go_library(\n\"elf.go\",\n\"errors.go\",\n\"exec.go\",\n+ \"fcntl.go\",\n\"file.go\",\n\"fs.go\",\n\"futex.go\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/abi/linux/fcntl.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package linux\n+\n+// Comands from linux/fcntl.h.\n+const (\n+ F_DUPFD = 0\n+ F_DUPFD_CLOEXEC = 1030\n+ F_GETFD = 1\n+ F_GETFL = 3\n+ F_GETOWN = 9\n+ F_SETFD = 2\n+ F_SETFL = 4\n+ F_SETLK = 6\n+ F_SETLKW = 7\n+ F_SETOWN = 8\n+)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/ioctl.go",
"new_path": "pkg/abi/linux/ioctl.go",
"diff": "@@ -29,6 +29,11 @@ const (\nTIOCSPTLCK = 0x40045431\nFIONCLEX = 0x00005450\nFIOCLEX = 0x00005451\n+ FIOASYNC = 0x00005452\n+ FIOSETOWN = 0x00008901\n+ SIOCSPGRP = 0x00008902\n+ FIOGETOWN = 0x00008903\n+ SIOCGPGRP = 0x00008904\n)\n// ioctl(2) requests provided by uapi/linux/android/binder.h\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/signal.go",
"new_path": "pkg/abi/linux/signal.go",
"diff": "@@ -175,3 +175,37 @@ const (\nSA_NOMASK = SA_NODEFER\nSA_ONESHOT = SA_RESTARTHAND\n)\n+\n+// Signal info types.\n+const (\n+ SI_MASK = 0xffff0000\n+ SI_KILL = 0 << 16\n+ SI_TIMER = 1 << 16\n+ SI_POLL = 2 << 16\n+ SI_FAULT = 3 << 16\n+ SI_CHLD = 4 << 16\n+ SI_RT = 5 << 16\n+ SI_MESGQ = 6 << 16\n+ SI_SYS = 7 << 16\n+)\n+\n+// SIGPOLL si_codes.\n+const (\n+ // POLL_IN indicates that data input available.\n+ POLL_IN = SI_POLL | 1\n+\n+ // POLL_OUT indicates that output buffers available.\n+ POLL_OUT = SI_POLL | 2\n+\n+ // POLL_MSG indicates that an input message available.\n+ POLL_MSG = SI_POLL | 3\n+\n+ // POLL_ERR indicates that there was an i/o error.\n+ POLL_ERR = SI_POLL | 4\n+\n+ // POLL_PRI indicates that a high priority input available.\n+ POLL_PRI = SI_POLL | 5\n+\n+ // POLL_HUP indicates that a device disconnected.\n+ POLL_HUP = SI_POLL | 6\n+)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/file.go",
"new_path": "pkg/sentry/fs/file.go",
"diff": "@@ -16,6 +16,7 @@ package fs\nimport (\n\"math\"\n+ \"sync\"\n\"sync/atomic\"\n\"gvisor.googlesource.com/gvisor/pkg/amutex\"\n@@ -72,9 +73,15 @@ type File struct {\n// other files via the Dirent cache.\nDirent *Dirent\n+ // flagsMu protects flags and async below.\n+ flagsMu sync.Mutex `state:\"nosave\"`\n+\n// flags are the File's flags. Setting or getting flags is fully atomic\n// and is not protected by mu (below).\n- flags atomic.Value `state:\".(FileFlags)\"`\n+ flags FileFlags\n+\n+ // async handles O_ASYNC notifications.\n+ async FileAsync\n// mu is dual-purpose: first, to make read(2) and write(2) thread-safe\n// in conformity with POSIX, and second, to cancel operations before they\n@@ -99,8 +106,8 @@ func NewFile(ctx context.Context, dirent *Dirent, flags FileFlags, fops FileOper\nUniqueID: uniqueid.GlobalFromContext(ctx),\nDirent: dirent,\nFileOperations: fops,\n+ flags: flags,\n}\n- f.flags.Store(flags)\nf.mu.Init()\nreturn f\n}\n@@ -117,22 +124,40 @@ func (f *File) DecRef() {\n// Release a reference on the Dirent.\nf.Dirent.DecRef()\n+\n+ f.flagsMu.Lock()\n+ if f.flags.Async && f.async != nil {\n+ f.async.Unregister(f)\n+ }\n+ f.flagsMu.Unlock()\n})\n}\n// Flags atomically loads the File's flags.\nfunc (f *File) Flags() FileFlags {\n- return f.flags.Load().(FileFlags)\n+ f.flagsMu.Lock()\n+ flags := f.flags\n+ f.flagsMu.Unlock()\n+ return flags\n}\n// SetFlags atomically changes the File's flags to the values contained\n// in newFlags. See SettableFileFlags for values that can be set.\nfunc (f *File) SetFlags(newFlags SettableFileFlags) {\n- flags := f.flags.Load().(FileFlags)\n- flags.Direct = newFlags.Direct\n- flags.NonBlocking = newFlags.NonBlocking\n- flags.Append = newFlags.Append\n- f.flags.Store(flags)\n+ f.flagsMu.Lock()\n+ f.flags.Direct = newFlags.Direct\n+ f.flags.NonBlocking = newFlags.NonBlocking\n+ f.flags.Append = newFlags.Append\n+ if f.async != nil {\n+ if newFlags.Async && !f.flags.Async {\n+ f.async.Register(f)\n+ }\n+ if !newFlags.Async && f.flags.Async {\n+ f.async.Unregister(f)\n+ }\n+ }\n+ f.flags.Async = newFlags.Async\n+ f.flagsMu.Unlock()\n}\n// Offset atomically loads the File's offset.\n@@ -361,6 +386,27 @@ func (f *File) Msync(ctx context.Context, mr memmap.MappableRange) error {\nreturn f.Fsync(ctx, int64(mr.Start), int64(mr.End-1), SyncData)\n}\n+// A FileAsync sends signals to its owner when w is ready for IO.\n+type FileAsync interface {\n+ Register(w waiter.Waitable)\n+ Unregister(w waiter.Waitable)\n+}\n+\n+// Async gets the stored FileAsync or creates a new one with the supplied\n+// function. If the supplied function is nil, no FileAsync is created and the\n+// current value is returned.\n+func (f *File) Async(newAsync func() FileAsync) FileAsync {\n+ f.flagsMu.Lock()\n+ defer f.flagsMu.Unlock()\n+ if f.async == nil && newAsync != nil {\n+ f.async = newAsync()\n+ if f.flags.Async {\n+ f.async.Register(f)\n+ }\n+ }\n+ return f.async\n+}\n+\n// FileReader implements io.Reader and io.ReaderAt.\ntype FileReader struct {\n// Ctx is the context for the file reader.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/file_state.go",
"new_path": "pkg/sentry/fs/file_state.go",
"diff": "@@ -18,13 +18,3 @@ package fs\nfunc (f *File) afterLoad() {\nf.mu.Init()\n}\n-\n-// saveFlags is invoked by stateify.\n-func (f *File) saveFlags() FileFlags {\n- return f.flags.Load().(FileFlags)\n-}\n-\n-// loadFlags is invoked by stateify.\n-func (f *File) loadFlags(flags FileFlags) {\n- f.flags.Store(flags)\n-}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/flags.go",
"new_path": "pkg/sentry/fs/flags.go",
"diff": "@@ -42,6 +42,9 @@ type FileFlags struct {\n// Directory indicates that this file must be a directory.\nDirectory bool\n+\n+ // Async indicates that this file sends signals on IO events.\n+ Async bool\n}\n// SettableFileFlags is a subset of FileFlags above that can be changed\n@@ -55,6 +58,9 @@ type SettableFileFlags struct {\n// Append indicates this file is append only.\nAppend bool\n+\n+ // Async indicates that this file sends signals on IO events.\n+ Async bool\n}\n// Settable returns the subset of f that are settable.\n@@ -63,5 +69,6 @@ func (f FileFlags) Settable() SettableFileFlags {\nDirect: f.Direct,\nNonBlocking: f.NonBlocking,\nAppend: f.Append,\n+ Async: f.Async,\n}\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/kernel/fasync/BUILD",
"diff": "+package(licenses = [\"notice\"]) # Apache 2.0\n+\n+load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\")\n+\n+go_library(\n+ name = \"fasync\",\n+ srcs = [\"fasync.go\"],\n+ importpath = \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/fasync\",\n+ visibility = [\"//:sandbox\"],\n+ deps = [\n+ \"//pkg/abi/linux\",\n+ \"//pkg/sentry/arch\",\n+ \"//pkg/sentry/fs\",\n+ \"//pkg/sentry/kernel\",\n+ \"//pkg/sentry/kernel/auth\",\n+ \"//pkg/waiter\",\n+ ],\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/kernel/fasync/fasync.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package fasync provides FIOASYNC related functionality.\n+package fasync\n+\n+import (\n+ \"sync\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth\"\n+ \"gvisor.googlesource.com/gvisor/pkg/waiter\"\n+)\n+\n+// New creates a new FileAsync.\n+func New() fs.FileAsync {\n+ return &FileAsync{}\n+}\n+\n+// FileAsync sends signals when the registered file is ready for IO.\n+type FileAsync struct {\n+ mu sync.Mutex\n+ e waiter.Entry\n+ requester auth.Credentials\n+\n+ // Only one of the following is allowed to be non-nil.\n+ recipientPG *kernel.ProcessGroup\n+ recipientTG *kernel.ThreadGroup\n+ recipientT *kernel.Task\n+}\n+\n+// Callback sends a signal.\n+func (a *FileAsync) Callback(e *waiter.Entry) {\n+ a.mu.Lock()\n+ if a.e.Callback == nil {\n+ return\n+ }\n+ t := a.recipientT\n+ tg := a.recipientTG\n+ if a.recipientPG != nil {\n+ tg = a.recipientPG.Originator()\n+ }\n+ if tg != nil {\n+ t = tg.Leader()\n+ }\n+ c := t.Credentials()\n+ // Logic from sigio_perm in fs/fcntl.c.\n+ if a.requester.EffectiveKUID == 0 ||\n+ a.requester.EffectiveKUID == c.SavedKUID ||\n+ a.requester.EffectiveKUID == c.RealKUID ||\n+ a.requester.RealKUID == c.SavedKUID ||\n+ a.requester.RealKUID == c.RealKUID {\n+ t.SendSignal(&arch.SignalInfo{\n+ Signo: int32(linux.SIGIO),\n+ // SEND_SIG_PRIV\n+ Code: arch.SignalInfoKernel,\n+ })\n+ }\n+ a.mu.Unlock()\n+}\n+\n+// Register sets the file which will be monitored for IO events.\n+//\n+// The file must not be currently registered.\n+func (a *FileAsync) Register(w waiter.Waitable) {\n+ a.mu.Lock()\n+ defer a.mu.Unlock()\n+\n+ if a.e.Callback != nil {\n+ panic(\"registering already registered file\")\n+ }\n+\n+ a.e.Callback = a\n+ w.EventRegister(&a.e, waiter.EventIn|waiter.EventOut|waiter.EventErr|waiter.EventHUp)\n+}\n+\n+// Unregister stops monitoring a file.\n+//\n+// The file must be currently registered.\n+func (a *FileAsync) Unregister(w waiter.Waitable) {\n+ a.mu.Lock()\n+ defer a.mu.Unlock()\n+\n+ if a.e.Callback == nil {\n+ panic(\"unregistering unregistered file\")\n+ }\n+\n+ w.EventUnregister(&a.e)\n+ a.e.Callback = nil\n+}\n+\n+// Owner returns who is currently getting signals. All return values will be\n+// nil if no one is set to receive signals.\n+func (a *FileAsync) Owner() (*kernel.Task, *kernel.ThreadGroup, *kernel.ProcessGroup) {\n+ a.mu.Lock()\n+ defer a.mu.Unlock()\n+ return a.recipientT, a.recipientTG, a.recipientPG\n+}\n+\n+// SetOwnerTask sets the owner (who will receive signals) to a specified task.\n+// Only this owner will receive signals.\n+func (a *FileAsync) SetOwnerTask(requester *kernel.Task, recipient *kernel.Task) {\n+ a.mu.Lock()\n+ defer a.mu.Unlock()\n+ a.requester = requester.Credentials()\n+ a.recipientT = recipient\n+ a.recipientTG = nil\n+ a.recipientPG = nil\n+}\n+\n+// SetOwnerThreadGroup sets the owner (who will receive signals) to a specified\n+// thread group. Only this owner will receive signals.\n+func (a *FileAsync) SetOwnerThreadGroup(requester *kernel.Task, recipient *kernel.ThreadGroup) {\n+ a.mu.Lock()\n+ defer a.mu.Unlock()\n+ a.requester = requester.Credentials()\n+ a.recipientT = nil\n+ a.recipientTG = recipient\n+ a.recipientPG = nil\n+}\n+\n+// SetOwnerProcessGroup sets the owner (who will receive signals) to a\n+// specified process group. Only this owner will receive signals.\n+func (a *FileAsync) SetOwnerProcessGroup(requester *kernel.Task, recipient *kernel.ProcessGroup) {\n+ a.mu.Lock()\n+ defer a.mu.Unlock()\n+ a.requester = requester.Credentials()\n+ a.recipientT = nil\n+ a.recipientTG = nil\n+ a.recipientPG = recipient\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/sessions.go",
"new_path": "pkg/sentry/kernel/sessions.go",
"diff": "@@ -110,6 +110,11 @@ type ProcessGroup struct {\nprocessGroupEntry\n}\n+// Originator retuns the originator of the process group.\n+func (pg *ProcessGroup) Originator() *ThreadGroup {\n+ return pg.originator\n+}\n+\n// incRefWithParent grabs a reference.\n//\n// This function is called when this ProcessGroup is being associated with some\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/BUILD",
"new_path": "pkg/sentry/syscalls/linux/BUILD",
"diff": "@@ -82,6 +82,7 @@ go_library(\n\"//pkg/sentry/kernel/auth\",\n\"//pkg/sentry/kernel/epoll\",\n\"//pkg/sentry/kernel/eventfd\",\n+ \"//pkg/sentry/kernel/fasync\",\n\"//pkg/sentry/kernel/kdefs\",\n\"//pkg/sentry/kernel/pipe\",\n\"//pkg/sentry/kernel/sched\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/flags.go",
"new_path": "pkg/sentry/syscalls/linux/flags.go",
"diff": "@@ -61,6 +61,9 @@ func flagsToLinux(flags fs.FileFlags) (mask uint) {\nif flags.Directory {\nmask |= syscall.O_DIRECTORY\n}\n+ if flags.Async {\n+ mask |= syscall.O_ASYNC\n+ }\nswitch {\ncase flags.Read && flags.Write:\nmask |= syscall.O_RDWR\n@@ -82,6 +85,7 @@ func linuxToFlags(mask uint) (flags fs.FileFlags) {\nWrite: (mask & syscall.O_ACCMODE) != syscall.O_RDONLY,\nAppend: mask&syscall.O_APPEND != 0,\nDirectory: mask&syscall.O_DIRECTORY != 0,\n+ Async: mask&syscall.O_ASYNC != 0,\n}\n}\n@@ -91,5 +95,6 @@ func linuxToSettableFlags(mask uint) fs.SettableFileFlags {\nDirect: mask&syscall.O_DIRECT != 0,\nNonBlocking: mask&syscall.O_NONBLOCK != 0,\nAppend: mask&syscall.O_APPEND != 0,\n+ Async: mask&syscall.O_ASYNC != 0,\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_file.go",
"new_path": "pkg/sentry/syscalls/linux/sys_file.go",
"diff": "@@ -25,6 +25,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs/lock\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/fasync\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs\"\nktime \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/limits\"\n@@ -528,6 +529,33 @@ func Ioctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\nfile.SetFlags(flags.Settable())\nreturn 0, nil, nil\n+ case linux.FIOASYNC:\n+ var set int32\n+ if _, err := t.CopyIn(args[2].Pointer(), &set); err != nil {\n+ return 0, nil, err\n+ }\n+ flags := file.Flags()\n+ if set != 0 {\n+ flags.Async = true\n+ } else {\n+ flags.Async = false\n+ }\n+ file.SetFlags(flags.Settable())\n+ return 0, nil, nil\n+\n+ case linux.FIOSETOWN, linux.SIOCSPGRP:\n+ var set int32\n+ if _, err := t.CopyIn(args[2].Pointer(), &set); err != nil {\n+ return 0, nil, err\n+ }\n+ fSetOwn(t, file, set)\n+ return 0, nil, nil\n+\n+ case linux.FIOGETOWN, linux.SIOCGPGRP:\n+ who := fGetOwn(t, file)\n+ _, err := t.CopyOut(args[2].Pointer(), &who)\n+ return 0, nil, err\n+\ndefault:\nret, err := file.FileOperations.Ioctl(t, t.MemoryManager(), args)\nif err != nil {\n@@ -725,6 +753,39 @@ func Dup3(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC\nreturn uintptr(newfd), nil, nil\n}\n+func fGetOwn(t *kernel.Task, file *fs.File) int32 {\n+ ma := file.Async(nil)\n+ if ma == nil {\n+ return 0\n+ }\n+ a := ma.(*fasync.FileAsync)\n+ ot, otg, opg := a.Owner()\n+ switch {\n+ case ot != nil:\n+ return int32(t.PIDNamespace().IDOfTask(ot))\n+ case otg != nil:\n+ return int32(t.PIDNamespace().IDOfThreadGroup(otg))\n+ case opg != nil:\n+ return int32(-t.PIDNamespace().IDOfProcessGroup(opg))\n+ default:\n+ return 0\n+ }\n+}\n+\n+// fSetOwn sets the file's owner with the semantics of F_SETOWN in Linux.\n+//\n+// If who is positive, it represents a PID. If negative, it represents a PGID.\n+// If the PID or PGID is invalid, the owner is silently unset.\n+func fSetOwn(t *kernel.Task, file *fs.File, who int32) {\n+ a := file.Async(fasync.New).(*fasync.FileAsync)\n+ if who < 0 {\n+ pg := t.PIDNamespace().ProcessGroupWithID(kernel.ProcessGroupID(-who))\n+ a.SetOwnerProcessGroup(t, pg)\n+ }\n+ tg := t.PIDNamespace().ThreadGroupWithID(kernel.ThreadID(who))\n+ a.SetOwnerThreadGroup(t, tg)\n+}\n+\n// Fcntl implements linux syscall fcntl(2).\nfunc Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\nfd := kdefs.FD(args[0].Int())\n@@ -737,7 +798,7 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\ndefer file.DecRef()\nswitch cmd {\n- case syscall.F_DUPFD, syscall.F_DUPFD_CLOEXEC:\n+ case linux.F_DUPFD, linux.F_DUPFD_CLOEXEC:\nfrom := kdefs.FD(args[2].Int())\nfdFlags := kernel.FDFlags{CloseOnExec: cmd == syscall.F_DUPFD_CLOEXEC}\nfd, err := t.FDMap().NewFDFrom(from, file, fdFlags, t.ThreadGroup().Limits())\n@@ -745,19 +806,19 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\nreturn 0, nil, err\n}\nreturn uintptr(fd), nil, nil\n- case syscall.F_GETFD:\n+ case linux.F_GETFD:\nreturn uintptr(fdFlagsToLinux(flags)), nil, nil\n- case syscall.F_SETFD:\n+ case linux.F_SETFD:\nflags := args[2].Uint()\nt.FDMap().SetFlags(fd, kernel.FDFlags{\nCloseOnExec: flags&syscall.FD_CLOEXEC != 0,\n})\n- case syscall.F_GETFL:\n+ case linux.F_GETFL:\nreturn uintptr(flagsToLinux(file.Flags())), nil, nil\n- case syscall.F_SETFL:\n+ case linux.F_SETFL:\nflags := uint(args[2].Uint())\nfile.SetFlags(linuxToSettableFlags(flags))\n- case syscall.F_SETLK, syscall.F_SETLKW:\n+ case linux.F_SETLK, linux.F_SETLKW:\n// In Linux the file system can choose to provide lock operations for an inode.\n// Normally pipe and socket types lack lock operations. We diverge and use a heavy\n// hammer by only allowing locks on files and directories.\n@@ -854,6 +915,11 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\ndefault:\nreturn 0, nil, syserror.EINVAL\n}\n+ case linux.F_GETOWN:\n+ return uintptr(fGetOwn(t, file)), nil, nil\n+ case linux.F_SETOWN:\n+ fSetOwn(t, file, args[2].Int())\n+ return 0, nil, nil\ndefault:\n// Everything else is not yet supported.\nreturn 0, nil, syserror.EINVAL\n"
}
] | Go | Apache License 2.0 | google/gvisor | Implement ioctl(FIOASYNC)
FIOASYNC and friends are used to send signals when a file is ready for IO.
This may or may not be needed by Nginx. While Nginx does use it, it is unclear
if the code that uses it has any effect.
PiperOrigin-RevId: 201550828
Change-Id: I7ba05a7db4eb2dfffde11e9bd9a35b65b98d7f50 |
259,992 | 21.06.2018 13:21:25 | 25,200 | f6be5fe6193163ad46722bc36209572da4a15ad0 | Forward SIGUSR2 to the sandbox too
SIGUSR2 was being masked out to be used as a way to dump sentry
stacks. This could cause compatibility problems in cases anyone
uses SIGUSR2 to communicate with the container init process. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/sighandling/sighandling.go",
"new_path": "pkg/sentry/sighandling/sighandling.go",
"diff": "@@ -95,7 +95,7 @@ func forwardSignals(k *kernel.Kernel, sigchans []chan os.Signal, start, stop cha\n// PrepareForwarding ensures that synchronous signals are forwarded to k and\n// returns a callback that starts signal delivery, which itself returns a\n// callback that stops signal forwarding.\n-func PrepareForwarding(k *kernel.Kernel) func() func() {\n+func PrepareForwarding(k *kernel.Kernel, enablePanicSignal bool) func() func() {\nstart := make(chan struct{})\nstop := make(chan struct{})\n@@ -112,7 +112,7 @@ func PrepareForwarding(k *kernel.Kernel) func() func() {\nsigchans = append(sigchans, sigchan)\n// SignalPanic is handled by Run.\n- if linux.Signal(sig) == kernel.SignalPanic {\n+ if enablePanicSignal && linux.Signal(sig) == kernel.SignalPanic {\ncontinue\n}\n@@ -128,9 +128,3 @@ func PrepareForwarding(k *kernel.Kernel) func() func() {\n}\n}\n}\n-\n-// StartForwarding ensures that synchronous signals are forwarded to k and\n-// returns a callback that stops signal forwarding.\n-func StartForwarding(k *kernel.Kernel) func() {\n- return PrepareForwarding(k)()\n-}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -215,9 +215,8 @@ func New(spec *specs.Spec, conf *Config, controllerFD, restoreFD int, ioFDs []in\nif err := sighandling.IgnoreChildStop(); err != nil {\nreturn nil, fmt.Errorf(\"failed to ignore child stop signals: %v\", err)\n}\n- // Ensure that most signals received in sentry context are forwarded to\n- // the emulated kernel.\n- stopSignalForwarding := sighandling.StartForwarding(k)\n+ // Ensure that signals received are forwarded to the emulated kernel.\n+ stopSignalForwarding := sighandling.PrepareForwarding(k, false)()\nprocArgs, err := newProcess(spec, conf, ioFDs, console, creds, utsns, ipcns, k)\nif err != nil {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Forward SIGUSR2 to the sandbox too
SIGUSR2 was being masked out to be used as a way to dump sentry
stacks. This could cause compatibility problems in cases anyone
uses SIGUSR2 to communicate with the container init process.
PiperOrigin-RevId: 201575374
Change-Id: I312246e828f38ad059139bb45b8addc2ed055d74 |
259,881 | 21.06.2018 14:53:05 | 25,200 | 2dedbc7211fb6b7f8b86148e6627054e781eaa87 | Drop return from SendExternalSignal
SendExternalSignal is no longer called before CreateProcess, so it can
enforce this simplified precondition.
StartForwarding, and after Kernel.Start. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -760,12 +760,11 @@ func (k *Kernel) Unpause() {\n//\n// context is used only for debugging to describe how the signal was received.\n//\n-// Returns false if signal could not be sent because the Kernel is not fully\n-// initialized yet.\n-func (k *Kernel) SendExternalSignal(info *arch.SignalInfo, context string) bool {\n+// Preconditions: Kernel must have an init process.\n+func (k *Kernel) SendExternalSignal(info *arch.SignalInfo, context string) {\nk.extMu.Lock()\ndefer k.extMu.Unlock()\n- return k.sendExternalSignal(info, context)\n+ k.sendExternalSignal(info, context)\n}\n// FeatureSet returns the FeatureSet.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/signal.go",
"new_path": "pkg/sentry/kernel/signal.go",
"diff": "package kernel\nimport (\n+ \"fmt\"\n+\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n@@ -33,13 +35,11 @@ const SignalPanic = linux.SIGUSR2\n//\n// context is used only for debugging to differentiate these cases.\n//\n-// Returns false if signal could not be sent because the Kernel is not fully\n-// initialized yet.\n-func (k *Kernel) sendExternalSignal(info *arch.SignalInfo, context string) bool {\n+// Preconditions: Kernel must have an init process.\n+func (k *Kernel) sendExternalSignal(info *arch.SignalInfo, context string) {\nswitch linux.Signal(info.Signo) {\ncase platform.SignalInterrupt:\n// Assume that a call to platform.Context.Interrupt() misfired.\n- return true\ncase SignalPanic:\n// SignalPanic is also specially handled in sentry setup to ensure that\n@@ -50,13 +50,10 @@ func (k *Kernel) sendExternalSignal(info *arch.SignalInfo, context string) bool\ndefault:\nlog.Infof(\"Received external signal %d in %s context\", info.Signo, context)\nif k.globalInit == nil {\n- log.Warningf(\"Received external signal %d before init created\", info.Signo)\n- return false\n+ panic(fmt.Sprintf(\"Received external signal %d before init created\", info.Signo))\n}\nk.globalInit.SendSignal(info)\n}\n-\n- return true\n}\n// sigPriv returns a SignalInfo representing a signal sent by the sentry. (The\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/sighandling/sighandling.go",
"new_path": "pkg/sentry/sighandling/sighandling.go",
"diff": "package sighandling\nimport (\n+ \"fmt\"\n\"os\"\n\"os/signal\"\n\"reflect\"\n@@ -65,7 +66,9 @@ func forwardSignals(k *kernel.Kernel, sigchans []chan os.Signal, start, stop cha\n// Otherwise, it was a signal on channel N. Index 0 represents the stop\n// channel, so index N represents the channel for signal N.\n- if !started || !k.SendExternalSignal(&arch.SignalInfo{Signo: int32(index)}, \"sentry\") {\n+ signal := linux.Signal(index)\n+\n+ if !started {\n// Kernel is not ready to receive signals.\n//\n// Kill ourselves if this signal would have killed the\n@@ -78,11 +81,16 @@ func forwardSignals(k *kernel.Kernel, sigchans []chan os.Signal, start, stop cha\n// TODO: Convert Go's runtime.raise from\n// tkill to tgkill so PrepareForwarding doesn't need to\n// be called until after filter installation.\n- switch linux.Signal(index) {\n+ switch signal {\ncase linux.SIGHUP, linux.SIGINT, linux.SIGTERM:\n- dieFromSignal(linux.Signal(index))\n+ dieFromSignal(signal)\n+ panic(fmt.Sprintf(\"Failed to die from signal %d\", signal))\n+ default:\n+ continue\n}\n}\n+\n+ k.SendExternalSignal(&arch.SignalInfo{Signo: int32(signal)}, \"sentry\")\n}\n// Close all individual channels.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Drop return from SendExternalSignal
SendExternalSignal is no longer called before CreateProcess, so it can
enforce this simplified precondition.
StartForwarding, and after Kernel.Start.
PiperOrigin-RevId: 201591170
Change-Id: Ib7022ef7895612d7d82a00942ab59fa433c4d6e9 |
259,948 | 21.06.2018 15:18:47 | 25,200 | 0e434b66a625b937d90e4ebe632de4546101be5a | netstack: tcp socket connected state S/R support. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/BUILD",
"new_path": "pkg/sentry/kernel/BUILD",
"diff": "@@ -12,6 +12,7 @@ go_stateify(\n\"fs_context.go\",\n\"ipc_namespace.go\",\n\"kernel.go\",\n+ \"kernel_state.go\",\n\"pending_signals.go\",\n\"pending_signals_state.go\",\n\"process_group_list.go\",\n@@ -45,10 +46,11 @@ go_stateify(\n\"vdso.go\",\n\"version.go\",\n],\n- out = \"kernel_state.go\",\n+ out = \"kernel_autogen_state.go\",\nimports = [\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\",\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs\",\n+ \"gvisor.googlesource.com/gvisor/pkg/tcpip\",\n],\npackage = \"kernel\",\n)\n@@ -117,6 +119,7 @@ go_library(\n\"fs_context.go\",\n\"ipc_namespace.go\",\n\"kernel.go\",\n+ \"kernel_autogen_state.go\",\n\"kernel_state.go\",\n\"pending_signals.go\",\n\"pending_signals_list.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -57,6 +57,7 @@ import (\nsentrytime \"gvisor.googlesource.com/gvisor/pkg/sentry/time\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/uniqueid\"\n\"gvisor.googlesource.com/gvisor/pkg/state\"\n+ \"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n)\n// Kernel represents an emulated Linux kernel. It must be initialized by calling\n@@ -158,6 +159,9 @@ type Kernel struct {\n// exitErr is the error causing the sandbox to exit, if any. It is\n// protected by extMu.\nexitErr error\n+\n+ // danglingEndpoints is used to save / restore tcpip.DanglingEndpoints.\n+ danglingEndpoints struct{} `state:\".([]tcpip.Endpoint)\"`\n}\n// InitKernelArgs holds arguments to Init.\n@@ -422,6 +426,8 @@ func (k *Kernel) LoadFrom(r io.Reader, p platform.Platform, net inet.Stack) erro\nreturn err\n}\n+ tcpip.AsyncLoading.Wait()\n+\nlog.Infof(\"Overall load took [%s]\", time.Since(loadStart))\n// Applications may size per-cpu structures based on k.applicationCores, so\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/kernel/kernel_state.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package kernel\n+\n+import (\n+ \"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n+)\n+\n+// saveDanglingEndpoints is invoked by stateify.\n+func (k *Kernel) saveDanglingEndpoints() []tcpip.Endpoint {\n+ return tcpip.GetDanglingEndpoints()\n+}\n+\n+// loadDanglingEndpoints is invoked by stateify.\n+func (k *Kernel) loadDanglingEndpoints(es []tcpip.Endpoint) {\n+ for _, e := range es {\n+ tcpip.AddDanglingEndpoint(e)\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack_global_state.go",
"new_path": "pkg/tcpip/stack/stack_global_state.go",
"diff": "package stack\n// StackFromEnv is the global stack created in restore run.\n-// FIXME: remove this variable once tcpip S/R is fully supported.\n+// FIXME\nvar StackFromEnv *Stack\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tcpip.go",
"new_path": "pkg/tcpip/tcpip.go",
"diff": "@@ -23,6 +23,7 @@ import (\n\"fmt\"\n\"strconv\"\n\"strings\"\n+ \"sync\"\n\"time\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/buffer\"\n@@ -552,3 +553,38 @@ type ProtocolAddress struct {\n// Address is a network address.\nAddress Address\n}\n+\n+// danglingEndpointsMu protects access to danglingEndpoints.\n+var danglingEndpointsMu sync.Mutex\n+\n+// danglingEndpoints tracks all dangling endpoints no longer owned by the app.\n+var danglingEndpoints = make(map[Endpoint]struct{})\n+\n+// GetDanglingEndpoints returns all dangling endpoints.\n+func GetDanglingEndpoints() []Endpoint {\n+ es := make([]Endpoint, 0, len(danglingEndpoints))\n+ danglingEndpointsMu.Lock()\n+ for e, _ := range danglingEndpoints {\n+ es = append(es, e)\n+ }\n+ danglingEndpointsMu.Unlock()\n+ return es\n+}\n+\n+// AddDanglingEndpoint adds a dangling endpoint.\n+func AddDanglingEndpoint(e Endpoint) {\n+ danglingEndpointsMu.Lock()\n+ danglingEndpoints[e] = struct{}{}\n+ danglingEndpointsMu.Unlock()\n+}\n+\n+// DeleteDanglingEndpoint removes a dangling endpoint.\n+func DeleteDanglingEndpoint(e Endpoint) {\n+ danglingEndpointsMu.Lock()\n+ delete(danglingEndpoints, e)\n+ danglingEndpointsMu.Unlock()\n+}\n+\n+// AsyncLoading is the global barrier for asynchronous endpoint loading\n+// activities.\n+var AsyncLoading sync.WaitGroup\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/BUILD",
"new_path": "pkg/tcpip/transport/tcp/BUILD",
"diff": "@@ -10,11 +10,16 @@ go_stateify(\n\"endpoint.go\",\n\"endpoint_state.go\",\n\"rcv.go\",\n+ \"segment.go\",\n\"segment_heap.go\",\n+ \"segment_queue.go\",\n+ \"segment_state.go\",\n\"snd.go\",\n+ \"snd_state.go\",\n\"tcp_segment_list.go\",\n],\nout = \"tcp_state.go\",\n+ imports = [\"gvisor.googlesource.com/gvisor/pkg/tcpip/buffer\"],\npackage = \"tcp\",\n)\n@@ -43,7 +48,9 @@ go_library(\n\"segment.go\",\n\"segment_heap.go\",\n\"segment_queue.go\",\n+ \"segment_state.go\",\n\"snd.go\",\n+ \"snd_state.go\",\n\"tcp_segment_list.go\",\n\"tcp_state.go\",\n\"timer.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/accept.go",
"new_path": "pkg/tcpip/transport/tcp/accept.go",
"diff": "@@ -69,6 +69,7 @@ func encodeMSS(mss uint16) uint32 {\nvar synRcvdCount struct {\nsync.Mutex\nvalue uint64\n+ pending sync.WaitGroup\n}\n// listenContext is used by a listening endpoint to store state used while\n@@ -102,6 +103,7 @@ func incSynRcvdCount() bool {\nreturn false\n}\n+ synRcvdCount.pending.Add(1)\nsynRcvdCount.value++\nreturn true\n@@ -115,6 +117,7 @@ func decSynRcvdCount() {\ndefer synRcvdCount.Unlock()\nsynRcvdCount.value--\n+ synRcvdCount.pending.Done()\n}\n// newListenContext creates a new listen context.\n@@ -292,7 +295,7 @@ func (e *endpoint) handleListenSegment(ctx *listenContext, s *segment) {\nopts := parseSynSegmentOptions(s)\nif incSynRcvdCount() {\ns.incRef()\n- go e.handleSynSegment(ctx, s, &opts) // S/R-FIXME\n+ go e.handleSynSegment(ctx, s, &opts) // S/R-SAFE: synRcvdCount is the barrier.\n} else {\ncookie := ctx.createCookie(s.id, s.sequenceNumber, encodeMSS(opts.MSS))\n// Send SYN with window scaling because we currently\n@@ -381,10 +384,12 @@ func (e *endpoint) protocolListenLoop(rcvWnd seqnum.Size) *tcpip.Error {\nreturn nil\n}\nif n¬ifyDrain != 0 {\n- for s := e.segmentQueue.dequeue(); s != nil; s = e.segmentQueue.dequeue() {\n+ for !e.segmentQueue.empty() {\n+ s := e.segmentQueue.dequeue()\ne.handleListenSegment(ctx, s)\ns.decRef()\n}\n+ synRcvdCount.pending.Wait()\nclose(e.drainDone)\n<-e.undrain\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/connect.go",
"new_path": "pkg/tcpip/transport/tcp/connect.go",
"diff": "@@ -443,7 +443,8 @@ func (h *handshake) execute() *tcpip.Error {\nreturn tcpip.ErrAborted\n}\nif n¬ifyDrain != 0 {\n- for s := h.ep.segmentQueue.dequeue(); s != nil; s = h.ep.segmentQueue.dequeue() {\n+ for !h.ep.segmentQueue.empty() {\n+ s := h.ep.segmentQueue.dequeue()\nerr := h.handleSegment(s)\ns.decRef()\nif err != nil {\n@@ -813,15 +814,13 @@ func (e *endpoint) handleSegments() *tcpip.Error {\n// protocolMainLoop is the main loop of the TCP protocol. It runs in its own\n// goroutine and is responsible for sending segments and handling received\n// segments.\n-func (e *endpoint) protocolMainLoop(passive bool) *tcpip.Error {\n+func (e *endpoint) protocolMainLoop(handshake bool) *tcpip.Error {\nvar closeTimer *time.Timer\nvar closeWaker sleep.Waker\ndefer func() {\n// e.mu is expected to be hold upon entering this section.\n- e.completeWorkerLocked()\n-\nif e.snd != nil {\ne.snd.resendTimer.cleanup()\n}\n@@ -830,6 +829,8 @@ func (e *endpoint) protocolMainLoop(passive bool) *tcpip.Error {\ncloseTimer.Stop()\n}\n+ e.completeWorkerLocked()\n+\nif e.drainDone != nil {\nclose(e.drainDone)\n}\n@@ -840,7 +841,7 @@ func (e *endpoint) protocolMainLoop(passive bool) *tcpip.Error {\ne.waiterQueue.Notify(waiter.EventHUp | waiter.EventErr | waiter.EventIn | waiter.EventOut)\n}()\n- if !passive {\n+ if handshake {\n// This is an active connection, so we must initiate the 3-way\n// handshake, and then inform potential waiters about its\n// completion.\n@@ -945,6 +946,17 @@ func (e *endpoint) protocolMainLoop(passive bool) *tcpip.Error {\ncloseWaker.Assert()\n})\n}\n+\n+ if n¬ifyDrain != 0 {\n+ for !e.segmentQueue.empty() {\n+ if err := e.handleSegments(); err != nil {\n+ return err\n+ }\n+ }\n+ close(e.drainDone)\n+ <-e.undrain\n+ }\n+\nreturn nil\n},\n},\n@@ -956,6 +968,27 @@ func (e *endpoint) protocolMainLoop(passive bool) *tcpip.Error {\ns.AddWaker(funcs[i].w, i)\n}\n+ // The following assertions and notifications are needed for restored\n+ // endpoints. Fresh newly created endpoints have empty states and should\n+ // not invoke any.\n+ e.segmentQueue.mu.Lock()\n+ if !e.segmentQueue.list.Empty() {\n+ e.newSegmentWaker.Assert()\n+ }\n+ e.segmentQueue.mu.Unlock()\n+\n+ e.rcvListMu.Lock()\n+ if !e.rcvList.Empty() {\n+ e.waiterQueue.Notify(waiter.EventIn)\n+ }\n+ e.rcvListMu.Unlock()\n+\n+ e.mu.RLock()\n+ if e.workerCleanup {\n+ e.notifyProtocolGoroutine(notifyClose)\n+ }\n+ e.mu.RUnlock()\n+\n// Main loop. Handle segments until both send and receive ends of the\n// connection have completed.\nfor !e.rcv.closed || !e.snd.closed || e.snd.sndUna != e.snd.sndNxtList {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -69,7 +69,7 @@ type endpoint struct {\n// change throughout the lifetime of the endpoint.\nstack *stack.Stack `state:\"manual\"`\nnetProto tcpip.NetworkProtocolNumber\n- waiterQueue *waiter.Queue\n+ waiterQueue *waiter.Queue `state:\"wait\"`\n// lastError represents the last error that the endpoint reported;\n// access to it is protected by the following mutex.\n@@ -83,7 +83,7 @@ type endpoint struct {\n// Once the peer has closed its send side, rcvClosed is set to true\n// to indicate to users that no more data is coming.\nrcvListMu sync.Mutex `state:\"nosave\"`\n- rcvList segmentList\n+ rcvList segmentList `state:\"wait\"`\nrcvClosed bool\nrcvBufSize int\nrcvBufUsed int\n@@ -91,7 +91,7 @@ type endpoint struct {\n// The following fields are protected by the mutex.\nmu sync.RWMutex `state:\"nosave\"`\nid stack.TransportEndpointID\n- state endpointState\n+ state endpointState `state:\".(endpointState)\"`\nisPortReserved bool `state:\"manual\"`\nisRegistered bool\nboundNICID tcpip.NICID `state:\"manual\"`\n@@ -118,7 +118,7 @@ type endpoint struct {\n// workerCleanup specifies if the worker goroutine must perform cleanup\n// before exitting. This can only be set to true when workerRunning is\n// also true, and they're both protected by the mutex.\n- workerCleanup bool `state:\"zerovalue\"`\n+ workerCleanup bool\n// sendTSOk is used to indicate when the TS Option has been negotiated.\n// When sendTSOk is true every non-RST segment should carry a TS as per\n@@ -153,7 +153,7 @@ type endpoint struct {\n// segmentQueue is used to hand received segments to the protocol\n// goroutine. Segments are queued as long as the queue is not full,\n// and dropped when it is.\n- segmentQueue segmentQueue `state:\"zerovalue\"`\n+ segmentQueue segmentQueue `state:\"wait\"`\n// The following fields are used to manage the send buffer. When\n// segments are ready to be sent, they are added to sndQueue and the\n@@ -166,7 +166,7 @@ type endpoint struct {\nsndBufUsed int\nsndClosed bool\nsndBufInQueue seqnum.Size\n- sndQueue segmentList\n+ sndQueue segmentList `state:\"wait\"`\nsndWaker sleep.Waker `state:\"manual\"`\nsndCloseWaker sleep.Waker `state:\"manual\"`\n@@ -188,17 +188,21 @@ type endpoint struct {\n// notifyFlags is a bitmask of flags used to indicate to the protocol\n// goroutine what it was notified; this is only accessed atomically.\n- notifyFlags uint32 `state:\"zerovalue\"`\n+ notifyFlags uint32 `state:\"nosave\"`\n// acceptedChan is used by a listening endpoint protocol goroutine to\n// send newly accepted connections to the endpoint so that they can be\n// read by Accept() calls.\n- acceptedChan chan *endpoint `state:\".(endpointChan)\"`\n+ acceptedChan chan *endpoint `state:\"manual\"`\n+\n+ // acceptedEndpoints is only used to save / restore the channel buffer.\n+ // FIXME\n+ acceptedEndpoints []*endpoint\n// The following are only used from the protocol goroutine, and\n// therefore don't need locks to protect them.\n- rcv *receiver\n- snd *sender\n+ rcv *receiver `state:\"wait\"`\n+ snd *sender `state:\"wait\"`\n// The goroutine drain completion notification channel.\ndrainDone chan struct{} `state:\"nosave\"`\n@@ -211,6 +215,7 @@ type endpoint struct {\nprobe stack.TCPProbeFunc `state:\"nosave\"`\n// The following are only used to assist the restore run to re-connect.\n+ bindAddress tcpip.Address\nconnectingAddress tcpip.Address\n}\n@@ -344,6 +349,7 @@ func (e *endpoint) Close() {\n// Either perform the local cleanup or kick the worker to make sure it\n// knows it needs to cleanup.\n+ tcpip.AddDanglingEndpoint(e)\nif !e.workerRunning {\ne.cleanupLocked()\n} else {\n@@ -363,9 +369,12 @@ func (e *endpoint) cleanupLocked() {\nif e.acceptedChan != nil {\nclose(e.acceptedChan)\nfor n := range e.acceptedChan {\n+ n.mu.Lock()\nn.resetConnectionLocked(tcpip.ErrConnectionAborted)\n+ n.mu.Unlock()\nn.Close()\n}\n+ e.acceptedChan = nil\n}\ne.workerCleanup = false\n@@ -374,6 +383,7 @@ func (e *endpoint) cleanupLocked() {\n}\ne.route.Release()\n+ tcpip.DeleteDanglingEndpoint(e)\n}\n// Read reads data from the endpoint.\n@@ -786,6 +796,16 @@ func (e *endpoint) checkV4Mapped(addr *tcpip.FullAddress) (tcpip.NetworkProtocol\n// Connect connects the endpoint to its peer.\nfunc (e *endpoint) Connect(addr tcpip.FullAddress) *tcpip.Error {\n+ return e.connect(addr, true, true)\n+}\n+\n+// connect connects the endpoint to its peer. In the normal non-S/R case, the\n+// new connection is expected to run the main goroutine and perform handshake.\n+// In restore of previously connected endpoints, both ends will be passively\n+// created (so no new handshaking is done); for stack-accepted connections not\n+// yet accepted by the app, they are restored without running the main goroutine\n+// here.\n+func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) *tcpip.Error {\ne.mu.Lock()\ndefer e.mu.Unlock()\n@@ -897,9 +917,27 @@ func (e *endpoint) Connect(addr tcpip.FullAddress) *tcpip.Error {\ne.boundNICID = nicid\ne.effectiveNetProtos = netProtos\ne.connectingAddress = connectingAddr\n- e.workerRunning = true\n- go e.protocolMainLoop(false) // S/R-SAFE: will be drained before save.\n+ // Connect in the restore phase does not perform handshake. Restore its\n+ // connection setting here.\n+ if !handshake {\n+ e.segmentQueue.mu.Lock()\n+ for _, l := range []segmentList{e.segmentQueue.list, e.sndQueue, e.snd.writeList} {\n+ for s := l.Front(); s != nil; s = s.Next() {\n+ s.id = e.id\n+ s.route = r.Clone()\n+ e.sndWaker.Assert()\n+ }\n+ }\n+ e.segmentQueue.mu.Unlock()\n+ e.snd.updateMaxPayloadSize(int(e.route.MTU()), 0)\n+ e.state = stateConnected\n+ }\n+\n+ if run {\n+ e.workerRunning = true\n+ go e.protocolMainLoop(handshake) // S/R-SAFE: will be drained before save.\n+ }\nreturn tcpip.ErrConnectStarted\n}\n@@ -971,6 +1009,9 @@ func (e *endpoint) Listen(backlog int) *tcpip.Error {\nif len(e.acceptedChan) > backlog {\nreturn tcpip.ErrInvalidEndpointState\n}\n+ if cap(e.acceptedChan) == backlog {\n+ return nil\n+ }\norigChan := e.acceptedChan\ne.acceptedChan = make(chan *endpoint, backlog)\nclose(origChan)\n@@ -1008,7 +1049,7 @@ func (e *endpoint) Listen(backlog int) *tcpip.Error {\nfunc (e *endpoint) startAcceptedLoop(waiterQueue *waiter.Queue) {\ne.waiterQueue = waiterQueue\ne.workerRunning = true\n- go e.protocolMainLoop(true) // S/R-FIXME\n+ go e.protocolMainLoop(false) // S/R-SAFE: drained on save.\n}\n// Accept returns a new endpoint if a peer has established a connection\n@@ -1049,6 +1090,7 @@ func (e *endpoint) Bind(addr tcpip.FullAddress, commit func() *tcpip.Error) (ret\nreturn tcpip.ErrAlreadyBound\n}\n+ e.bindAddress = addr.Addr\nnetProto, err := e.checkV4Mapped(&addr)\nif err != nil {\nreturn err\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint_state.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint_state.go",
"diff": "@@ -9,6 +9,7 @@ import (\n\"sync\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n+ \"gvisor.googlesource.com/gvisor/pkg/tcpip/header\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/stack\"\n)\n@@ -22,7 +23,7 @@ func (e *endpoint) drainSegmentLocked() {\ne.undrain = make(chan struct{})\ne.mu.Unlock()\n- e.notificationWaker.Assert()\n+ e.notifyProtocolGoroutine(notifyDrain)\n<-e.drainDone\ne.mu.Lock()\n@@ -38,37 +39,98 @@ func (e *endpoint) beforeSave() {\nswitch e.state {\ncase stateInitial, stateBound:\n- case stateListen:\n- if !e.segmentQueue.empty() {\n- e.drainSegmentLocked()\n+ case stateListen, stateConnecting, stateConnected:\n+ if e.state == stateConnected && !e.workerRunning {\n+ // The endpoint must be in acceptedChan.\n+ break\n}\n- case stateConnecting:\ne.drainSegmentLocked()\n- if e.state != stateConnected {\n+ if e.state != stateClosed && e.state != stateError {\n+ if !e.workerRunning {\n+ panic(\"endpoint has no worker running in listen, connecting, or connected state\")\n+ }\nbreak\n}\nfallthrough\n- case stateConnected:\n- // FIXME\n- panic(tcpip.ErrSaveRejection{fmt.Errorf(\"endpoint cannot be saved in connected state: local %v:%v, remote %v:%v\", e.id.LocalAddress, e.id.LocalPort, e.id.RemoteAddress, e.id.RemotePort)})\ncase stateClosed, stateError:\nif e.workerRunning {\n- panic(fmt.Sprintf(\"endpoint still has worker running in closed or error state\"))\n+ panic(\"endpoint still has worker running in closed or error state\")\n}\ndefault:\npanic(fmt.Sprintf(\"endpoint in unknown state %v\", e.state))\n}\n+\n+ if e.waiterQueue != nil && !e.waiterQueue.IsEmpty() {\n+ panic(\"endpoint still has waiters upon save\")\n+ }\n+\n+ if !((e.state == stateBound || e.state == stateListen) == e.isPortReserved) {\n+ panic(\"endpoint port must and must only be reserved in bound or listen state\")\n+ }\n+\n+ if e.acceptedChan != nil {\n+ close(e.acceptedChan)\n+ e.acceptedEndpoints = make([]*endpoint, len(e.acceptedChan), cap(e.acceptedChan))\n+ i := 0\n+ for ep := range e.acceptedChan {\n+ e.acceptedEndpoints[i] = ep\n+ i++\n+ }\n+ if i != len(e.acceptedEndpoints) {\n+ panic(\"endpoint acceptedChan buffer got consumed by background context\")\n+ }\n+ }\n+}\n+\n+// saveState is invoked by stateify.\n+func (e *endpoint) saveState() endpointState {\n+ return e.state\n+}\n+\n+// Endpoint loading must be done in the following ordering by their state, to\n+// avoid dangling connecting w/o listening peer, and to avoid conflicts in port\n+// reservation.\n+var connectedLoading sync.WaitGroup\n+var listenLoading sync.WaitGroup\n+var connectingLoading sync.WaitGroup\n+\n+// Bound endpoint loading happens last.\n+\n+// loadState is invoked by stateify.\n+func (e *endpoint) loadState(state endpointState) {\n+ // This is to ensure that the loading wait groups include all applicable\n+ // endpoints before any asynchronous calls to the Wait() methods.\n+ switch state {\n+ case stateConnected:\n+ connectedLoading.Add(1)\n+ case stateListen:\n+ listenLoading.Add(1)\n+ case stateConnecting:\n+ connectingLoading.Add(1)\n+ }\n+ e.state = state\n}\n// afterLoad is invoked by stateify.\nfunc (e *endpoint) afterLoad() {\n+ // We load acceptedChan buffer indirectly here. Note that closed\n+ // endpoints might not need to allocate the channel.\n+ // FIXME\n+ if cap(e.acceptedEndpoints) > 0 {\n+ e.acceptedChan = make(chan *endpoint, cap(e.acceptedEndpoints))\n+ for _, ep := range e.acceptedEndpoints {\n+ e.acceptedChan <- ep\n+ }\n+ e.acceptedEndpoints = nil\n+ }\n+\ne.stack = stack.StackFromEnv\ne.segmentQueue.setLimit(2 * e.rcvBufSize)\ne.workMu.Init()\nstate := e.state\nswitch state {\n- case stateInitial, stateBound, stateListen, stateConnecting:\n+ case stateInitial, stateBound, stateListen, stateConnecting, stateConnected:\nvar ss SendBufferSizeOption\nif err := e.stack.TransportProtocolOption(ProtocolNumber, &ss); err == nil {\nif e.sndBufSize < ss.Min || e.sndBufSize > ss.Max {\n@@ -80,65 +142,72 @@ func (e *endpoint) afterLoad() {\n}\n}\n- switch state {\n- case stateBound, stateListen, stateConnecting:\n+ bind := func() {\ne.state = stateInitial\n- if err := e.Bind(tcpip.FullAddress{Addr: e.id.LocalAddress, Port: e.id.LocalPort}, nil); err != nil {\n+ if len(e.bindAddress) == 0 {\n+ e.bindAddress = e.id.LocalAddress\n+ }\n+ if err := e.Bind(tcpip.FullAddress{Addr: e.bindAddress, Port: e.id.LocalPort}, nil); err != nil {\npanic(\"endpoint binding failed: \" + err.String())\n}\n}\nswitch state {\n+ case stateConnected:\n+ bind()\n+ if len(e.connectingAddress) == 0 {\n+ // This endpoint is accepted by netstack but not yet by\n+ // the app. If the endpoint is IPv6 but the remote\n+ // address is IPv4, we need to connect as IPv6 so that\n+ // dual-stack mode can be properly activated.\n+ if e.netProto == header.IPv6ProtocolNumber && len(e.id.RemoteAddress) != header.IPv6AddressSize {\n+ e.connectingAddress = \"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xff\" + e.id.RemoteAddress\n+ } else {\n+ e.connectingAddress = e.id.RemoteAddress\n+ }\n+ }\n+ if err := e.connect(tcpip.FullAddress{NIC: e.boundNICID, Addr: e.connectingAddress, Port: e.id.RemotePort}, false, e.workerRunning); err != tcpip.ErrConnectStarted {\n+ panic(\"endpoint connecting failed: \" + err.String())\n+ }\n+ connectedLoading.Done()\ncase stateListen:\n+ tcpip.AsyncLoading.Add(1)\n+ go func() {\n+ connectedLoading.Wait()\n+ bind()\nbacklog := cap(e.acceptedChan)\n- e.acceptedChan = nil\nif err := e.Listen(backlog); err != nil {\npanic(\"endpoint listening failed: \" + err.String())\n}\n- }\n-\n- switch state {\n+ listenLoading.Done()\n+ tcpip.AsyncLoading.Done()\n+ }()\ncase stateConnecting:\n+ tcpip.AsyncLoading.Add(1)\n+ go func() {\n+ connectedLoading.Wait()\n+ listenLoading.Wait()\n+ bind()\nif err := e.Connect(tcpip.FullAddress{NIC: e.boundNICID, Addr: e.connectingAddress, Port: e.id.RemotePort}); err != tcpip.ErrConnectStarted {\npanic(\"endpoint connecting failed: \" + err.String())\n}\n+ connectingLoading.Done()\n+ tcpip.AsyncLoading.Done()\n+ }()\n+ case stateBound:\n+ tcpip.AsyncLoading.Add(1)\n+ go func() {\n+ connectedLoading.Wait()\n+ listenLoading.Wait()\n+ connectingLoading.Wait()\n+ bind()\n+ tcpip.AsyncLoading.Done()\n+ }()\n+ case stateClosed, stateError:\n+ tcpip.DeleteDanglingEndpoint(e)\n}\n}\n-// saveAcceptedChan is invoked by stateify.\n-func (e *endpoint) saveAcceptedChan() endpointChan {\n- if e.acceptedChan == nil {\n- return endpointChan{}\n- }\n- close(e.acceptedChan)\n- buffer := make([]*endpoint, 0, len(e.acceptedChan))\n- for ep := range e.acceptedChan {\n- buffer = append(buffer, ep)\n- }\n- if len(buffer) != cap(buffer) {\n- panic(\"endpoint.acceptedChan buffer got consumed by background context\")\n- }\n- c := cap(e.acceptedChan)\n- e.acceptedChan = nil\n- return endpointChan{buffer: buffer, cap: c}\n-}\n-\n-// loadAcceptedChan is invoked by stateify.\n-func (e *endpoint) loadAcceptedChan(c endpointChan) {\n- if c.cap == 0 {\n- return\n- }\n- e.acceptedChan = make(chan *endpoint, c.cap)\n- for _, ep := range c.buffer {\n- e.acceptedChan <- ep\n- }\n-}\n-\n-type endpointChan struct {\n- buffer []*endpoint\n- cap int\n-}\n-\n// saveLastError is invoked by stateify.\nfunc (e *endpoint) saveLastError() string {\nif e.lastError == nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/segment.go",
"new_path": "pkg/tcpip/transport/tcp/segment.go",
"diff": "@@ -29,9 +29,9 @@ const (\ntype segment struct {\nsegmentEntry\nrefCnt int32\n- id stack.TransportEndpointID\n+ id stack.TransportEndpointID `state:\"manual\"`\nroute stack.Route `state:\"manual\"`\n- data buffer.VectorisedView\n+ data buffer.VectorisedView `state:\".(buffer.VectorisedView)\"`\n// views is used as buffer for data when its length is large\n// enough to store a VectorisedView.\nviews [8]buffer.View\n@@ -45,7 +45,7 @@ type segment struct {\n// parsedOptions stores the parsed values from the options in the segment.\nparsedOptions header.TCPOptions\n- options []byte\n+ options []byte `state:\".([]byte)\"`\n}\nfunc newSegment(r *stack.Route, id stack.TransportEndpointID, vv *buffer.VectorisedView) *segment {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/segment_queue.go",
"new_path": "pkg/tcpip/transport/tcp/segment_queue.go",
"diff": "@@ -12,8 +12,8 @@ import (\n// segmentQueue is a bounded, thread-safe queue of TCP segments.\ntype segmentQueue struct {\n- mu sync.Mutex\n- list segmentList\n+ mu sync.Mutex `state:\"nosave\"`\n+ list segmentList `state:\"wait\"`\nlimit int\nused int\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/transport/tcp/segment_state.go",
"diff": "+// Copyright 2018 The Netstack Authors. All rights reserved.\n+// Use of this source code is governed by a BSD-style\n+// license that can be found in the LICENSE file.\n+\n+package tcp\n+\n+import (\n+ \"gvisor.googlesource.com/gvisor/pkg/tcpip/buffer\"\n+)\n+\n+// saveData is invoked by stateify.\n+func (s *segment) saveData() buffer.VectorisedView {\n+ // We cannot save s.data directly as s.data.views may alias to s.views,\n+ // which is not allowed by state framework (in-struct pointer).\n+ return s.data.Clone(nil)\n+}\n+\n+// loadData is invoked by stateify.\n+func (s *segment) loadData(data buffer.VectorisedView) {\n+ // NOTE: We cannot do the s.data = data.Clone(s.views[:]) optimization\n+ // here because data.views is not guaranteed to be loaded by now. Plus,\n+ // data.views will be allocated anyway so there really is little point\n+ // of utilizing s.views for data.views.\n+ s.data = data\n+}\n+\n+// saveOptions is invoked by stateify.\n+func (s *segment) saveOptions() []byte {\n+ // We cannot save s.options directly as it may point to s.data's trimmed\n+ // tail, which is not allowed by state framework (in-struct pointer).\n+ b := make([]byte, 0, cap(s.options))\n+ return append(b, s.options...)\n+}\n+\n+// loadOptions is invoked by stateify.\n+func (s *segment) loadOptions(options []byte) {\n+ // NOTE: We cannot point s.options back into s.data's trimmed tail. But\n+ // it is OK as they do not need to aliased. Plus, options is already\n+ // allocated so there is no cost here.\n+ s.options = options\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/snd.go",
"new_path": "pkg/tcpip/transport/tcp/snd.go",
"diff": "@@ -28,7 +28,7 @@ type sender struct {\nep *endpoint\n// lastSendTime is the timestamp when the last packet was sent.\n- lastSendTime time.Time\n+ lastSendTime time.Time `state:\".(unixTime)\"`\n// dupAckCount is the number of duplicated acks received. It is used for\n// fast retransmit.\n@@ -71,7 +71,7 @@ type sender struct {\nrttMeasureSeqNum seqnum.Value\n// rttMeasureTime is the time when the rttMeasureSeqNum was sent.\n- rttMeasureTime time.Time\n+ rttMeasureTime time.Time `state:\".(unixTime)\"`\nclosed bool\nwriteNext *segment\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/transport/tcp/snd_state.go",
"diff": "+// Copyright 2018 The Netstack Authors. All rights reserved.\n+// Use of this source code is governed by a BSD-style\n+// license that can be found in the LICENSE file.\n+\n+package tcp\n+\n+import (\n+ \"time\"\n+)\n+\n+type unixTime struct {\n+ second int64\n+ nano int64\n+}\n+\n+// saveLastSendTime is invoked by stateify.\n+func (s *sender) saveLastSendTime() unixTime {\n+ return unixTime{s.lastSendTime.Unix(), s.lastSendTime.UnixNano()}\n+}\n+\n+// loadLastSendTime is invoked by stateify.\n+func (s *sender) loadLastSendTime(unix unixTime) {\n+ s.lastSendTime = time.Unix(unix.second, unix.nano)\n+}\n+\n+// saveRttMeasureTime is invoked by stateify.\n+func (s *sender) saveRttMeasureTime() unixTime {\n+ return unixTime{s.rttMeasureTime.Unix(), s.rttMeasureTime.UnixNano()}\n+}\n+\n+// loadRttMeasureTime is invoked by stateify.\n+func (s *sender) loadRttMeasureTime(unix unixTime) {\n+ s.rttMeasureTime = time.Unix(unix.second, unix.nano)\n+}\n+\n+// afterLoad is invoked by stateify.\n+func (s *sender) afterLoad() {\n+ s.resendTimer.init(&s.resendWaker)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | netstack: tcp socket connected state S/R support.
PiperOrigin-RevId: 201596247
Change-Id: Id22f47b2cdcbe14aa0d930f7807ba75f91a56724 |
260,013 | 22.06.2018 10:18:19 | 25,200 | 5d45f88f2c2840123e2f5ec2e45ac6d5b5a5729f | Netstack should return EOF on closed read.
The shutdown behavior where we return EAGAIN for sockets
which are non-blocking is only correct for packet based sockets.
SOCK_STREAM sockets should return EOF. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/epsocket/epsocket.go",
"new_path": "pkg/sentry/socket/epsocket/epsocket.go",
"diff": "@@ -945,7 +945,6 @@ func (s *SocketOperations) nonBlockingRead(ctx context.Context, dst usermem.IOSe\n// tcpip.Endpoint.\nfunc (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (n int, senderAddr interface{}, senderAddrLen uint32, controlMessages socket.ControlMessages, err *syserr.Error) {\ntrunc := flags&linux.MSG_TRUNC != 0\n-\npeek := flags&linux.MSG_PEEK != 0\nif senderRequested && !s.isPacketBased() {\n// Stream sockets ignore the sender address.\n@@ -953,7 +952,7 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\n}\nn, senderAddr, senderAddrLen, controlMessages, err = s.nonBlockingRead(t, dst, peek, trunc, senderRequested)\n- if err == syserr.ErrClosedForReceive && flags&linux.MSG_DONTWAIT != 0 {\n+ if s.isPacketBased() && err == syserr.ErrClosedForReceive && flags&linux.MSG_DONTWAIT != 0 {\n// In this situation we should return EAGAIN.\nreturn 0, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Netstack should return EOF on closed read.
The shutdown behavior where we return EAGAIN for sockets
which are non-blocking is only correct for packet based sockets.
SOCK_STREAM sockets should return EOF.
PiperOrigin-RevId: 201703055
Change-Id: I20b25ceca7286c37766936475855959706fc5397 |
259,885 | 22.06.2018 13:07:21 | 25,200 | fe3fc44da3ca47fa27d55294e6c31d51b6b5dc14 | Handle mremap(old_size=0). | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/memmap/memmap.go",
"new_path": "pkg/sentry/memmap/memmap.go",
"diff": "@@ -49,13 +49,14 @@ type Mappable interface {\n// CopyMapping notifies the Mappable of an attempt to copy a mapping in ms\n// from srcAR to dstAR. For most Mappables, this is equivalent to\n- // AddMapping.\n+ // AddMapping. Note that it is possible that srcAR.Length() != dstAR.Length(),\n+ // and also that srcAR.Length() == 0.\n//\n// CopyMapping is only called when a mapping is copied within a given\n// MappingSpace; it is analogous to Linux's vm_operations_struct::mremap.\n//\n- // Preconditions: offset+dstAR.Length() does not overflow. The mapping at\n- // srcAR must exist.\n+ // Preconditions: offset+srcAR.Length() and offset+dstAR.Length() do not\n+ // overflow. The mapping at srcAR must exist.\nCopyMapping(ctx context.Context, ms MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64) error\n// Translate returns the Mappable's current mappings for at least the range\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/syscalls.go",
"new_path": "pkg/sentry/mm/syscalls.go",
"diff": "@@ -320,8 +320,21 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi\nreturn 0, syserror.EFAULT\n}\n+ // Behavior matrix:\n+ //\n+ // Move | oldSize = 0 | oldSize < newSize | oldSize = newSize | oldSize > newSize\n+ // ---------+-------------+-------------------+-------------------+------------------\n+ // NoMove | ENOMEM [1] | Grow in-place | No-op | Shrink in-place\n+ // MayMove | Copy [1] | Grow in-place or | No-op | Shrink in-place\n+ // | | move | |\n+ // MustMove | Copy | Move and grow | Move | Shrink and move\n+ //\n+ // [1] In-place growth is impossible because the vma at oldAddr already\n+ // occupies at least part of the destination. Thus the NoMove case always\n+ // fails and the MayMove case always falls back to copying.\n+\nif opts.Move != MRemapMustMove {\n- // Handle noops and in-place shrinking. These cases don't care if\n+ // Handle no-ops and in-place shrinking. These cases don't care if\n// [oldAddr, oldEnd) maps to a single vma, or is even mapped at all\n// (aside from oldAddr).\nif newSize <= oldSize {\n@@ -363,15 +376,13 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi\nreturn oldAddr, nil\n}\n// In-place growth failed. In the MRemapMayMove case, fall through to\n- // moving below.\n+ // copying/moving below.\nif opts.Move == MRemapNoMove {\nreturn 0, err\n}\n}\n- // Handle moving, which is the only remaining case.\n-\n- // Find a destination for the move.\n+ // Find a location for the new mapping.\nvar newAR usermem.AddrRange\nswitch opts.Move {\ncase MRemapMayMove:\n@@ -399,7 +410,9 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi\nmm.unmapLocked(ctx, newAR)\n// If the sizes specify shrinking, unmap everything between the new and\n- // old sizes at the source.\n+ // old sizes at the source. Unmapping before the following checks is\n+ // correct: compare Linux's mm/mremap.c:mremap_to() => do_munmap(),\n+ // vma_to_resize().\nif newSize < oldSize {\noldNewEnd := oldAddr + usermem.Addr(newSize)\nmm.unmapLocked(ctx, usermem.AddrRange{oldNewEnd, oldEnd})\n@@ -412,9 +425,6 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi\noldAR := usermem.AddrRange{oldAddr, oldEnd}\n- // In the MRemapMustMove case, these checks happen after unmapping:\n- // mm/mremap.c:mremap_to() => do_munmap(), vma_to_resize().\n-\n// Check that oldEnd maps to the same vma as oldAddr.\nif vseg.End() < oldEnd {\nreturn 0, syserror.EFAULT\n@@ -431,12 +441,32 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi\nif vma.off+uint64(newAR.Length()) < vma.off {\nreturn 0, syserror.EINVAL\n}\n- // Inform the Mappable, if any, of the copied mapping.\n+ // Inform the Mappable, if any, of the new mapping.\nif err := vma.mappable.CopyMapping(ctx, mm, oldAR, newAR, vseg.mappableOffsetAt(oldAR.Start)); err != nil {\nreturn 0, err\n}\n}\n+ if oldSize == 0 {\n+ // Handle copying.\n+ //\n+ // We can't use createVMALocked because it calls Mappable.AddMapping,\n+ // whereas we've already called Mappable.CopyMapping (which is\n+ // consistent with Linux). Call vseg.Value() (rather than\n+ // vseg.ValuePtr()) to make a copy of the vma.\n+ vma := vseg.Value()\n+ if vma.mappable != nil {\n+ vma.off = vseg.mappableOffsetAt(oldAR.Start)\n+ }\n+ if vma.id != nil {\n+ vma.id.IncRef()\n+ }\n+ mm.vmas.Add(newAR, vma)\n+ return newAR.Start, nil\n+ }\n+\n+ // Handle moving.\n+ //\n// Remove the existing vma before inserting the new one to minimize\n// iterator invalidation. We do this directly (instead of calling\n// removeVMAsLocked) because:\n"
}
] | Go | Apache License 2.0 | google/gvisor | Handle mremap(old_size=0).
PiperOrigin-RevId: 201729703
Change-Id: I486900b0c6ec59533b88da225a5829c474e35a70 |
259,891 | 22.06.2018 14:30:33 | 25,200 | 04bdcc7b65ac03eeca9b14608a12067e1205081b | runsc: Enable waiting on individual containers within a sandbox. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/controller.go",
"new_path": "runsc/boot/controller.go",
"diff": "@@ -145,10 +145,11 @@ type containerManager struct {\n}\n// StartRoot will start the root container process.\n-func (cm *containerManager) StartRoot(_, _ *struct{}) error {\n+func (cm *containerManager) StartRoot(cid *string, _ *struct{}) error {\nlog.Debugf(\"containerManager.StartRoot\")\n// Tell the root container to start and wait for the result.\ncm.startChan <- struct{}{}\n+ cm.l.setRootContainerID(*cid)\nreturn <-cm.startResultChan\n}\n@@ -166,6 +167,9 @@ type StartArgs struct {\n// TODO: Separate sandbox and container configs.\n// Config is the runsc-specific configuration for the sandbox.\nConf *Config\n+\n+ // CID is the ID of the container to start.\n+ CID string\n}\n// Start runs a created container within a sandbox.\n@@ -182,8 +186,16 @@ func (cm *containerManager) Start(args *StartArgs, _ *struct{}) error {\nif args.Conf == nil {\nreturn errors.New(\"start arguments missing config\")\n}\n+ if args.CID == \"\" {\n+ return errors.New(\"start argument missing container ID\")\n+ }\n+\n+ tgid, err := cm.l.startContainer(args, cm.k)\n+ if err != nil {\n+ return err\n+ }\n+ log.Debugf(\"Container %q started with root PID of %d\", args.CID, tgid)\n- cm.l.startContainer(args, cm.k)\nreturn nil\n}\n@@ -222,15 +234,7 @@ func (cm *containerManager) Resume(_, _ *struct{}) error {\n// Wait waits for the init process in the given container.\nfunc (cm *containerManager) Wait(cid *string, waitStatus *uint32) error {\nlog.Debugf(\"containerManager.Wait\")\n- // TODO: Use the cid and wait on the init process in that\n- // container. Currently we just wait on PID 1 in the sandbox.\n- tg := cm.k.TaskSet().Root.ThreadGroupWithID(1)\n- if tg == nil {\n- return fmt.Errorf(\"cannot wait: no thread group with id 1\")\n- }\n- tg.WaitExited()\n- *waitStatus = tg.ExitStatus().Status()\n- return nil\n+ return cm.l.wait(cid, waitStatus)\n}\n// SignalArgs are arguments to the Signal method.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "package boot\nimport (\n+ \"errors\"\n\"fmt\"\n\"math/rand\"\n\"os\"\n\"runtime\"\n+ \"sync\"\n\"sync/atomic\"\n\"syscall\"\ngtime \"time\"\n@@ -81,6 +83,16 @@ type Loader struct {\n// rootProcArgs refers to the root sandbox init task.\nrootProcArgs kernel.CreateProcessArgs\n+\n+ // mu guards containerRootTGIDs.\n+ mu sync.Mutex\n+\n+ // containerRootTGIDs maps container IDs to their root processes. It\n+ // can be used to determine which process to manipulate when clients\n+ // call methods on particular containers.\n+ //\n+ // containerRootTGIDs is guarded by mu.\n+ containerRootTGIDs map[string]kernel.ThreadID\n}\nfunc init() {\n@@ -377,12 +389,14 @@ func (l *Loader) run() error {\nreturn l.k.Start()\n}\n-func (l *Loader) startContainer(args *StartArgs, k *kernel.Kernel) error {\n+// startContainer starts a child container. It returns the thread group ID of\n+// the newly created process.\n+func (l *Loader) startContainer(args *StartArgs, k *kernel.Kernel) (kernel.ThreadID, error) {\nspec := args.Spec\n// Create capabilities.\ncaps, err := specutils.Capabilities(spec.Process.Capabilities)\nif err != nil {\n- return fmt.Errorf(\"error creating capabilities: %v\", err)\n+ return 0, fmt.Errorf(\"error creating capabilities: %v\", err)\n}\n// Convert the spec's additional GIDs to KGIDs.\n@@ -416,19 +430,62 @@ func (l *Loader) startContainer(args *StartArgs, k *kernel.Kernel) error {\nk.RootIPCNamespace(),\nk)\nif err != nil {\n- return fmt.Errorf(\"failed to create new process: %v\", err)\n+ return 0, fmt.Errorf(\"failed to create new process: %v\", err)\n+ }\n+\n+ tg, err := l.k.CreateProcess(procArgs)\n+ if err != nil {\n+ return 0, fmt.Errorf(\"failed to create process in sentry: %v\", err)\n}\n- if _, err := l.k.CreateProcess(procArgs); err != nil {\n- return fmt.Errorf(\"failed to create process in sentry: %v\", err)\n+ ts := k.TaskSet()\n+ tgid := ts.Root.IDOfThreadGroup(tg)\n+ if tgid == 0 {\n+ return 0, errors.New(\"failed to get thread group ID of new process\")\n}\n// CreateProcess takes a reference on FDMap if successful.\nprocArgs.FDMap.DecRef()\n+ l.mu.Lock()\n+ defer l.mu.Unlock()\n+ l.containerRootTGIDs[args.CID] = tgid\n+\n+ return tgid, nil\n+}\n+\n+// wait waits for the init process in the given container.\n+func (l *Loader) wait(cid *string, waitStatus *uint32) error {\n+ l.mu.Lock()\n+ defer l.mu.Unlock()\n+ tgid, ok := l.containerRootTGIDs[*cid]\n+ if !ok {\n+ return fmt.Errorf(\"can't find process for container %q in %v\", *cid, l.containerRootTGIDs)\n+ }\n+\n+ // TODO: Containers don't map 1:1 with their root\n+ // processes. Container exits should be managed explicitly\n+ // rather than via PID.\n+ // If the thread either has already exited or exits during waiting,\n+ // consider the container exited.\n+ defer delete(l.containerRootTGIDs, *cid)\n+\n+ tg := l.k.TaskSet().Root.ThreadGroupWithID(tgid)\n+ if tg == nil {\n+ return fmt.Errorf(\"no thread group with ID %d\", tgid)\n+ }\n+ tg.WaitExited()\n+ *waitStatus = tg.ExitStatus().Status()\nreturn nil\n}\n+func (l *Loader) setRootContainerID(cid string) {\n+ l.mu.Lock()\n+ defer l.mu.Unlock()\n+ // The root container has PID 1.\n+ l.containerRootTGIDs = map[string]kernel.ThreadID{cid: 1}\n+}\n+\n// WaitForStartSignal waits for a start signal from the control server.\nfunc (l *Loader) WaitForStartSignal() {\n<-l.ctrl.manager.startChan\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader_test.go",
"new_path": "runsc/boot/loader_test.go",
"diff": "@@ -133,7 +133,8 @@ func TestStartSignal(t *testing.T) {\n}\n// Trigger the control server StartRoot method.\n- if err := s.ctrl.manager.StartRoot(nil, nil); err != nil {\n+ cid := \"foo\"\n+ if err := s.ctrl.manager.StartRoot(&cid, nil); err != nil {\nt.Errorf(\"error calling StartRoot: %v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -293,7 +293,7 @@ func (c *Container) Start(conf *boot.Config) error {\nreturn err\n}\n} else {\n- if err := c.Sandbox.Start(c.Spec, conf); err != nil {\n+ if err := c.Sandbox.Start(c.Spec, conf, c.ID); err != nil {\nc.Destroy()\nreturn err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -1020,3 +1020,92 @@ func TestMultiContainerSanity(t *testing.T) {\nt.Errorf(\"failed to wait for sleep to start: %v\", err)\n}\n}\n+\n+func TestMultiContainerWait(t *testing.T) {\n+ containerIDs := []string{\n+ testutil.UniqueContainerID(),\n+ testutil.UniqueContainerID(),\n+ }\n+ containerAnnotations := []map[string]string{\n+ // The first container creates a sandbox.\n+ map[string]string{\n+ specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeSandbox,\n+ },\n+ // The second container creates a container within the first\n+ // container's sandbox.\n+ map[string]string{\n+ specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeContainer,\n+ specutils.ContainerdSandboxIDAnnotation: containerIDs[0],\n+ },\n+ }\n+ args := [][]string{\n+ // The first container should run the entire duration of the\n+ // test.\n+ {\"sleep\", \"100\"},\n+ // We'll wait on the second container, which is much shorter\n+ // lived.\n+ {\"sleep\", \"1\"},\n+ }\n+\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\n+ // Setup the containers.\n+ containers := make([]*container.Container, 0, len(containerIDs))\n+ for i, annotations := range containerAnnotations {\n+ spec := testutil.NewSpecWithArgs(args[i][0], args[i][1])\n+ spec.Annotations = annotations\n+ bundleDir, conf, err := testutil.SetupContainerInRoot(rootDir, spec)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer os.RemoveAll(bundleDir)\n+ cont, err := container.Create(containerIDs[i], spec, conf, bundleDir, \"\", \"\", \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer cont.Destroy()\n+ if err := cont.Start(conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n+ }\n+ containers = append(containers, cont)\n+ }\n+\n+ expectedPL := []*control.Process{\n+ {\n+ UID: 0,\n+ PID: 1,\n+ PPID: 0,\n+ C: 0,\n+ Cmd: \"sleep\",\n+ },\n+ {\n+ UID: 0,\n+ PID: 2,\n+ PPID: 0,\n+ C: 0,\n+ Cmd: \"sleep\",\n+ },\n+ }\n+\n+ // Check via ps that multiple processes are running.\n+ if err := waitForProcessList(containers[0], expectedPL); err != nil {\n+ t.Errorf(\"failed to wait for sleep to start: %v\", err)\n+ }\n+\n+ // Wait on the short lived container.\n+ if ws, err := containers[1].Wait(); err != nil {\n+ t.Fatalf(\"failed to wait for process %q: %v\", strings.Join(containers[1].Spec.Process.Args, \" \"), err)\n+ } else if es := ws.ExitStatus(); es != 0 {\n+ t.Fatalf(\"process %q exited with non-zero status %d\", strings.Join(containers[1].Spec.Process.Args, \" \"), es)\n+ }\n+\n+ // After Wait returns, ensure that the root container is running and\n+ // the child has finished.\n+ if err := waitForProcessList(containers[0], expectedPL[:1]); err != nil {\n+ t.Errorf(\"failed to wait for %q to start: %v\", strings.Join(containers[0].Spec.Process.Args, \" \"), err)\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -99,7 +99,7 @@ func (s *Sandbox) StartRoot(spec *specs.Spec, conf *boot.Config) error {\n// Send a message to the sandbox control server to start the root\n// container.\n- if err := conn.Call(boot.RootContainerStart, nil, nil); err != nil {\n+ if err := conn.Call(boot.RootContainerStart, &s.ID, nil); err != nil {\nreturn fmt.Errorf(\"error starting root container %v: %v\", spec.Process.Args, err)\n}\n@@ -107,7 +107,7 @@ func (s *Sandbox) StartRoot(spec *specs.Spec, conf *boot.Config) error {\n}\n// Start starts running a non-root container inside the sandbox.\n-func (s *Sandbox) Start(spec *specs.Spec, conf *boot.Config) error {\n+func (s *Sandbox) Start(spec *specs.Spec, conf *boot.Config, cid string) error {\nlog.Debugf(\"Start non-root container sandbox %q, pid: %d\", s.ID, s.Pid)\nconn, err := s.connect()\nif err != nil {\n@@ -118,6 +118,7 @@ func (s *Sandbox) Start(spec *specs.Spec, conf *boot.Config) error {\nargs := boot.StartArgs{\nSpec: spec,\nConf: conf,\n+ CID: cid,\n}\nif err := conn.Call(boot.ContainerStart, args, nil); err != nil {\nreturn fmt.Errorf(\"error starting non-root container %v: %v\", spec.Process.Args, err)\n"
}
] | Go | Apache License 2.0 | google/gvisor | runsc: Enable waiting on individual containers within a sandbox.
PiperOrigin-RevId: 201742160
Change-Id: Ia9fa1442287c5f9e1196fb117c41536a80f6bb31 |
260,013 | 22.06.2018 14:47:15 | 25,200 | 7c645ac27355a9d7016e0d5c74ce70eed2add600 | Add rpcinet support for SIOCGIFCONF.
The interfaces and their addresses are already available via
the stack Intefaces and InterfaceAddrs.
Also add some tests as we had no tests around SIOCGIFCONF. I also added the socket_netgofer lifecycle for IOCTL tests. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/rpcinet/socket.go",
"new_path": "pkg/sentry/socket/rpcinet/socket.go",
"diff": "@@ -64,10 +64,6 @@ type socketOperations struct {\n// Verify that we actually implement socket.Socket.\nvar _ = socket.Socket(&socketOperations{})\n-const (\n- sizeOfIfReq = 40\n-)\n-\n// New creates a new RPC socket.\nfunc newSocketFile(ctx context.Context, stack *Stack, family int, skType int, protocol int) (*fs.File, *syserr.Error) {\nid, c := stack.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_Socket{&pb.SocketRequest{Family: int64(family), Type: int64(skType | syscall.SOCK_NONBLOCK), Protocol: int64(protocol)}}}, false /* ignoreResult */)\n@@ -465,6 +461,55 @@ func rpcIoctl(t *kernel.Task, fd, cmd uint32, arg []byte) ([]byte, error) {\nreturn res.(*pb.IOCtlResponse_Value).Value, nil\n}\n+// ifconfIoctlFromStack populates a struct ifconf for the SIOCGIFCONF ioctl.\n+func ifconfIoctlFromStack(ctx context.Context, io usermem.IO, ifc *linux.IFConf) error {\n+ // If Ptr is NULL, return the necessary buffer size via Len.\n+ // Otherwise, write up to Len bytes starting at Ptr containing ifreq\n+ // structs.\n+ t := ctx.(*kernel.Task)\n+ s := t.NetworkContext().(*Stack)\n+ if s == nil {\n+ return syserr.ErrNoDevice.ToError()\n+ }\n+\n+ if ifc.Ptr == 0 {\n+ ifc.Len = int32(len(s.Interfaces())) * int32(linux.SizeOfIFReq)\n+ return nil\n+ }\n+\n+ max := ifc.Len\n+ ifc.Len = 0\n+ for key, ifaceAddrs := range s.InterfaceAddrs() {\n+ iface := s.Interfaces()[key]\n+ for _, ifaceAddr := range ifaceAddrs {\n+ // Don't write past the end of the buffer.\n+ if ifc.Len+int32(linux.SizeOfIFReq) > max {\n+ break\n+ }\n+ if ifaceAddr.Family != linux.AF_INET {\n+ continue\n+ }\n+\n+ // Populate ifr.ifr_addr.\n+ ifr := linux.IFReq{}\n+ ifr.SetName(iface.Name)\n+ usermem.ByteOrder.PutUint16(ifr.Data[0:2], uint16(ifaceAddr.Family))\n+ usermem.ByteOrder.PutUint16(ifr.Data[2:4], 0)\n+ copy(ifr.Data[4:8], ifaceAddr.Addr[:4])\n+\n+ // Copy the ifr to userspace.\n+ dst := uintptr(ifc.Ptr) + uintptr(ifc.Len)\n+ ifc.Len += int32(linux.SizeOfIFReq)\n+ if _, err := usermem.CopyObjectOut(ctx, io, usermem.Addr(dst), ifr, usermem.IOOpts{\n+ AddressSpaceActive: true,\n+ }); err != nil {\n+ return err\n+ }\n+ }\n+ }\n+ return nil\n+}\n+\n// Ioctl implements fs.FileOperations.Ioctl.\nfunc (s *socketOperations) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {\nt := ctx.(*kernel.Task)\n@@ -491,7 +536,25 @@ func (s *socketOperations) Ioctl(ctx context.Context, io usermem.IO, args arch.S\nsyscall.SIOCGIFNAME,\nsyscall.SIOCGIFNETMASK,\nsyscall.SIOCGIFTXQLEN:\n- buf = make([]byte, sizeOfIfReq)\n+ buf = make([]byte, linux.SizeOfIFReq)\n+ case syscall.SIOCGIFCONF:\n+ // SIOCGIFCONF has slightly different behavior than the others, in that it\n+ // will need to populate the array of ifreqs.\n+ var ifc linux.IFConf\n+ if _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &ifc, usermem.IOOpts{\n+ AddressSpaceActive: true,\n+ }); err != nil {\n+ return 0, err\n+ }\n+\n+ if err := ifconfIoctlFromStack(ctx, io, &ifc); err != nil {\n+ return 0, err\n+ }\n+ _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), ifc, usermem.IOOpts{\n+ AddressSpaceActive: true,\n+ })\n+\n+ return 0, err\ndefault:\nreturn 0, syserror.ENOTTY\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add rpcinet support for SIOCGIFCONF.
The interfaces and their addresses are already available via
the stack Intefaces and InterfaceAddrs.
Also add some tests as we had no tests around SIOCGIFCONF. I also added the socket_netgofer lifecycle for IOCTL tests.
PiperOrigin-RevId: 201744863
Change-Id: Ie0a285a2a2f859fa0cafada13201d5941b95499a |
259,881 | 22.06.2018 16:36:36 | 25,200 | 9c0c4fd8d05cca4905a0f8e4f391045566e4d401 | Remove nginx failure note now that it works
Updates | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -373,6 +373,7 @@ The following applications/images have been tested:\n* memcached\n* mongo\n* mysql\n+* nginx\n* node\n* php\n* postgres\n@@ -389,8 +390,6 @@ The following applications have been tested and may not yet work:\n* elasticsearch: Requires unimplemented socket ioctls. See [bug\n#2](https://github.com/google/gvisor/issues/2).\n-* nginx: Requires `ioctl(FIOASYNC)`, but see workaround in [bug\n- #1](https://github.com/google/gvisor/issues/1).\n### Will my container work with gVisor?\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove nginx failure note now that it works
Updates #1
PiperOrigin-RevId: 201760129
Change-Id: Ifd8ce9e0f93c6771083dc9bf8d35a2800c13481a |
259,881 | 25.06.2018 15:22:04 | 25,200 | 478f0ac0038afda267814fa154bcd32feb07c3b3 | Don't read FSContext.root without holding FSContext.mu
IsChrooted still has the opportunity to race with another thread
entering the FSContext into a chroot, but that is unchanged (and
fine, AFAIK). | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_resources.go",
"new_path": "pkg/sentry/kernel/task_resources.go",
"diff": "@@ -122,5 +122,7 @@ func (t *Task) AbstractSockets() *AbstractSocketNamespace {\nfunc (t *Task) IsChrooted() bool {\nrealRoot := t.k.mounts.Root()\ndefer realRoot.DecRef()\n- return t.tr.FSContext.root != realRoot\n+ root := t.tr.FSContext.RootDirectory()\n+ defer root.DecRef()\n+ return root != realRoot\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Don't read FSContext.root without holding FSContext.mu
IsChrooted still has the opportunity to race with another thread
entering the FSContext into a chroot, but that is unchanged (and
fine, AFAIK).
PiperOrigin-RevId: 202029117
Change-Id: I38bce763b3a7715fa6ae98aa200a19d51a0235f1 |
259,881 | 25.06.2018 16:45:31 | 25,200 | 4ac79312b093f2831079d0d71846747a4996d9ad | Don't read cwd or root without holding mu | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/fs_context.go",
"new_path": "pkg/sentry/kernel/fs_context.go",
"diff": "@@ -114,11 +114,14 @@ func (f *FSContext) SetWorkingDirectory(d *fs.Dirent) {\nif d == nil {\npanic(\"FSContext.SetWorkingDirectory called with nil dirent\")\n}\n+\n+ f.mu.Lock()\n+ defer f.mu.Unlock()\n+\nif f.cwd == nil {\npanic(fmt.Sprintf(\"FSContext.SetWorkingDirectory(%v)) called after destroy\", d))\n}\n- f.mu.Lock()\n- defer f.mu.Unlock()\n+\nold := f.cwd\nf.cwd = d\nd.IncRef()\n@@ -144,11 +147,14 @@ func (f *FSContext) SetRootDirectory(d *fs.Dirent) {\nif d == nil {\npanic(\"FSContext.SetRootDirectory called with nil dirent\")\n}\n+\n+ f.mu.Lock()\n+ defer f.mu.Unlock()\n+\nif f.root == nil {\npanic(fmt.Sprintf(\"FSContext.SetRootDirectory(%v)) called after destroy\", d))\n}\n- f.mu.Lock()\n- defer f.mu.Unlock()\n+\nold := f.root\nf.root = d\nd.IncRef()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Don't read cwd or root without holding mu
PiperOrigin-RevId: 202043090
Change-Id: I3c47fb3413ca8615d50d8a0503d72fcce9b09421 |
259,885 | 25.06.2018 16:49:47 | 25,200 | 16882484f96f9d75348904bd5a4e2a53acb67378 | Check for empty applicationAddrRange in MM.DecUsers. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/lifecycle.go",
"new_path": "pkg/sentry/mm/lifecycle.go",
"diff": "@@ -214,5 +214,9 @@ func (mm *MemoryManager) DecUsers(ctx context.Context) {\nmm.mappingMu.Lock()\ndefer mm.mappingMu.Unlock()\n- mm.unmapLocked(ctx, mm.applicationAddrRange())\n+ // If mm is being dropped before mm.SetMmapLayout was called,\n+ // mm.applicationAddrRange() will be empty.\n+ if ar := mm.applicationAddrRange(); ar.Length() != 0 {\n+ mm.unmapLocked(ctx, ar)\n+ }\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Check for empty applicationAddrRange in MM.DecUsers.
PiperOrigin-RevId: 202043776
Change-Id: I4373abbcf735dc1cf4bebbbbb0c7124df36e9e78 |
259,881 | 25.06.2018 18:16:20 | 25,200 | db94befb634b05aab0255214cd8c5eab0f5daaf2 | Fix panic message
The arguments are backwards from the message. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/dirent.go",
"new_path": "pkg/sentry/fs/dirent.go",
"diff": "@@ -964,7 +964,7 @@ func direntReaddir(ctx context.Context, d *Dirent, it DirIterator, root *Dirent,\noffset -= 2\nnewOffset, err := it.IterateDir(ctx, dirCtx, int(offset))\nif int64(newOffset) < offset {\n- panic(fmt.Sprintf(\"node.Readdir returned offset %v less that input offset %v\", offset, newOffset))\n+ panic(fmt.Sprintf(\"node.Readdir returned offset %v less than input offset %v\", newOffset, offset))\n}\n// Add the initial nodes back to the offset count.\nnewOffset += 2\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix panic message
The arguments are backwards from the message.
PiperOrigin-RevId: 202054887
Change-Id: Id5750a84ca091f8b8fbe15be8c648d4fa3e31eb2 |
259,885 | 26.06.2018 11:34:16 | 25,200 | 33041b36cb7e8e9795545837355e4576ff2be4da | Add Context to seqfile.SeqSource.ReadSeqFileData. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/filesystems.go",
"new_path": "pkg/sentry/fs/proc/filesystems.go",
"diff": "@@ -18,6 +18,7 @@ import (\n\"bytes\"\n\"fmt\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile\"\n)\n@@ -33,7 +34,7 @@ func (*filesystemsData) NeedsUpdate(generation int64) bool {\n// ReadSeqFileData returns data for the SeqFile reader.\n// SeqData, the current generation and where in the file the handle corresponds to.\n-func (*filesystemsData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+func (*filesystemsData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n// We don't ever expect to see a non-nil SeqHandle.\nif h != nil {\nreturn nil, 0\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/loadavg.go",
"new_path": "pkg/sentry/fs/proc/loadavg.go",
"diff": "@@ -18,6 +18,7 @@ import (\n\"bytes\"\n\"fmt\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile\"\n)\n@@ -29,7 +30,8 @@ func (*loadavgData) NeedsUpdate(generation int64) bool {\nreturn true\n}\n-func (d *loadavgData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.\n+func (d *loadavgData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\nif h != nil {\nreturn nil, 0\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/meminfo.go",
"new_path": "pkg/sentry/fs/proc/meminfo.go",
"diff": "@@ -18,6 +18,7 @@ import (\n\"bytes\"\n\"fmt\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/usage\"\n@@ -36,7 +37,7 @@ func (*meminfoData) NeedsUpdate(generation int64) bool {\n}\n// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.\n-func (d *meminfoData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+func (d *meminfoData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\nif h != nil {\nreturn nil, 0\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/mounts.go",
"new_path": "pkg/sentry/fs/proc/mounts.go",
"diff": "@@ -19,6 +19,7 @@ import (\n\"fmt\"\n\"sort\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n@@ -67,7 +68,7 @@ func (mif *mountInfoFile) NeedsUpdate(_ int64) bool {\n}\n// ReadSeqFileData implements SeqSource.ReadSeqFileData.\n-func (mif *mountInfoFile) ReadSeqFileData(handle seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+func (mif *mountInfoFile) ReadSeqFileData(ctx context.Context, handle seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\nif handle != nil {\nreturn nil, 0\n}\n@@ -148,7 +149,7 @@ func (mf *mountsFile) NeedsUpdate(_ int64) bool {\n}\n// ReadSeqFileData implements SeqSource.ReadSeqFileData.\n-func (mf *mountsFile) ReadSeqFileData(handle seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+func (mf *mountsFile) ReadSeqFileData(ctx context.Context, handle seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\nif handle != nil {\nreturn nil, 0\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/net.go",
"new_path": "pkg/sentry/fs/proc/net.go",
"diff": "@@ -94,7 +94,7 @@ func (*ifinet6) NeedsUpdate(generation int64) bool {\n}\n// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.\n-func (n *ifinet6) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+func (n *ifinet6) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\nif h != nil {\nreturn nil, 0\n}\n@@ -119,7 +119,7 @@ func (n *netDev) NeedsUpdate(generation int64) bool {\n// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. See Linux's\n// net/core/net-procfs.c:dev_seq_show.\n-func (n *netDev) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+func (n *netDev) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\nif h != nil {\nreturn nil, 0\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/seqfile/seqfile.go",
"new_path": "pkg/sentry/fs/proc/seqfile/seqfile.go",
"diff": "@@ -49,7 +49,7 @@ type SeqSource interface {\n// generation. The first entry in the slice is greater than the handle.\n// If handle is nil then all known records are returned. Generation\n// must always be greater than 0.\n- ReadSeqFileData(handle SeqHandle) ([]SeqData, int64)\n+ ReadSeqFileData(ctx context.Context, handle SeqHandle) ([]SeqData, int64)\n}\n// SeqGenerationCounter is a counter to keep track if the SeqSource should be\n@@ -155,7 +155,7 @@ func (s *SeqFile) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence,\nreturn 0, io.EOF\n}\noldLen := len(s.source)\n- s.updateSourceLocked(len(s.source))\n+ s.updateSourceLocked(ctx, len(s.source))\nupdated = true\n// We know that we had consumed everything up until this point\n// so we search in the new slice instead of starting over.\n@@ -187,7 +187,7 @@ func (s *SeqFile) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence,\n// check to see if we've seeked backwards and if so always update our\n// data source.\nif !updated && (s.SeqSource.NeedsUpdate(s.generation) || s.lastRead > offset) {\n- s.updateSourceLocked(i)\n+ s.updateSourceLocked(ctx, i)\n// recordOffset is 0 here and we won't update records behind the\n// current one so recordOffset is still 0 even though source\n// just got updated. Just read the next record.\n@@ -212,7 +212,7 @@ func (s *SeqFile) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence,\n}\n// updateSourceLocked requires that s.mu is held.\n-func (s *SeqFile) updateSourceLocked(record int) {\n+func (s *SeqFile) updateSourceLocked(ctx context.Context, record int) {\nvar h SeqHandle\nif record == 0 {\nh = nil\n@@ -222,7 +222,7 @@ func (s *SeqFile) updateSourceLocked(record int) {\n// Save what we have previously read.\ns.source = s.source[:record]\nvar newSource []SeqData\n- newSource, s.generation = s.SeqSource.ReadSeqFileData(h)\n+ newSource, s.generation = s.SeqSource.ReadSeqFileData(ctx, h)\ns.source = append(s.source, newSource...)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/seqfile/seqfile_test.go",
"new_path": "pkg/sentry/fs/proc/seqfile/seqfile_test.go",
"diff": "@@ -55,7 +55,7 @@ func (s *seqTest) NeedsUpdate(int64) bool {\n// ReadSeqFiledata returns a slice of SeqData which contains elements\n// greater than the handle.\n-func (s *seqTest) ReadSeqFileData(handle SeqHandle) ([]SeqData, int64) {\n+func (s *seqTest) ReadSeqFileData(ctx context.Context, handle SeqHandle) ([]SeqData, int64) {\nif handle == nil {\nreturn s.actual, 0\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/stat.go",
"new_path": "pkg/sentry/fs/proc/stat.go",
"diff": "@@ -19,6 +19,7 @@ import (\n\"fmt\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n)\n@@ -73,7 +74,7 @@ func (c cpuStats) String() string {\n}\n// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.\n-func (s *statData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+func (s *statData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\nif h != nil {\nreturn nil, 0\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/sys.go",
"new_path": "pkg/sentry/fs/proc/sys.go",
"diff": "@@ -62,7 +62,7 @@ func (*mmapMinAddrData) NeedsUpdate(generation int64) bool {\n}\n// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.\n-func (d *mmapMinAddrData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+func (d *mmapMinAddrData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\nif h != nil {\nreturn nil, 0\n}\n@@ -81,7 +81,7 @@ func (*overcommitMemory) NeedsUpdate(generation int64) bool {\n}\n// ReadSeqFileData implements seqfile.SeqSource.\n-func (*overcommitMemory) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+func (*overcommitMemory) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\nif h != nil {\nreturn nil, 0\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/task.go",
"new_path": "pkg/sentry/fs/proc/task.go",
"diff": "@@ -304,7 +304,7 @@ func (md *mapsData) NeedsUpdate(generation int64) bool {\n}\n// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.\n-func (md *mapsData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+func (md *mapsData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\nif mm := md.mm(); mm != nil {\nreturn mm.ReadSeqFileData(md.t.AsyncContext(), h)\n}\n@@ -334,7 +334,7 @@ func (s *taskStatData) NeedsUpdate(generation int64) bool {\n// ReadSeqFileData returns data for the SeqFile reader.\n// SeqData, the current generation and where in the file the handle corresponds to.\n-func (s *taskStatData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+func (s *taskStatData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\nif h != nil {\nreturn nil, 0\n}\n@@ -405,7 +405,7 @@ func (s *statmData) NeedsUpdate(generation int64) bool {\n}\n// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.\n-func (s *statmData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+func (s *statmData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\nif h != nil {\nreturn nil, 0\n}\n@@ -440,7 +440,7 @@ func (s *statusData) NeedsUpdate(generation int64) bool {\n}\n// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.\n-func (s *statusData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+func (s *statusData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\nif h != nil {\nreturn nil, 0\n}\n@@ -505,7 +505,7 @@ func (i *ioData) NeedsUpdate(generation int64) bool {\n// ReadSeqFileData returns data for the SeqFile reader.\n// SeqData, the current generation and where in the file the handle corresponds to.\n-func (i *ioData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+func (i *ioData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\nif h != nil {\nreturn nil, 0\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/uid_gid_map.go",
"new_path": "pkg/sentry/fs/proc/uid_gid_map.go",
"diff": "@@ -40,7 +40,7 @@ func (imss *idMapSeqSource) NeedsUpdate(generation int64) bool {\n}\n// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.\n-func (imss *idMapSeqSource) ReadSeqFileData(handle seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+func (imss *idMapSeqSource) ReadSeqFileData(ctx context.Context, handle seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\nvar start int\nif handle != nil {\nstart = handle.(*idMapSeqHandle).value\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/version.go",
"new_path": "pkg/sentry/fs/proc/version.go",
"diff": "@@ -17,6 +17,7 @@ package proc\nimport (\n\"fmt\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n)\n@@ -33,7 +34,7 @@ func (*versionData) NeedsUpdate(generation int64) bool {\n}\n// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.\n-func (v *versionData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+func (v *versionData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\nif h != nil {\nreturn nil, 0\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add Context to seqfile.SeqSource.ReadSeqFileData.
PiperOrigin-RevId: 202163895
Change-Id: Ib9942fcff80c0834216f4f10780662bef5b52270 |
259,854 | 26.06.2018 12:40:23 | 25,200 | 5f7f78c1d7ee19b6a193d17c48f78edb220412aa | Fix data races in Unix sockets | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/queue/queue.go",
"new_path": "pkg/tcpip/transport/queue/queue.go",
"diff": "@@ -157,6 +157,8 @@ func (q *Queue) Peek() (Entry, *tcpip.Error) {\n// QueuedSize returns the number of bytes currently in the queue, that is, the\n// number of readable bytes.\nfunc (q *Queue) QueuedSize() int64 {\n+ q.mu.Lock()\n+ defer q.mu.Unlock()\nreturn q.used\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/unix/unix.go",
"new_path": "pkg/tcpip/transport/unix/unix.go",
"diff": "@@ -384,14 +384,22 @@ func vecCopy(data [][]byte, buf []byte) (uintptr, [][]byte, []byte) {\n// Readable implements Receiver.Readable.\nfunc (q *streamQueueReceiver) Readable() bool {\n+ q.mu.Lock()\n+ bl := len(q.buffer)\n+ r := q.readQueue.IsReadable()\n+ q.mu.Unlock()\n// We're readable if we have data in our buffer or if the queue receiver is\n// readable.\n- return len(q.buffer) > 0 || q.readQueue.IsReadable()\n+ return bl > 0 || r\n}\n// RecvQueuedSize implements Receiver.RecvQueuedSize.\nfunc (q *streamQueueReceiver) RecvQueuedSize() int64 {\n- return int64(len(q.buffer)) + q.readQueue.QueuedSize()\n+ q.mu.Lock()\n+ bl := len(q.buffer)\n+ qs := q.readQueue.QueuedSize()\n+ q.mu.Unlock()\n+ return int64(bl) + qs\n}\n// RecvMaxQueueSize implements Receiver.RecvMaxQueueSize.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix data races in Unix sockets
PiperOrigin-RevId: 202175558
Change-Id: I0113cb9a90d7a0cd7964bf43eef67f70c92d9589 |
259,885 | 26.06.2018 13:09:02 | 25,200 | ea10949a0036cdef95a1397ccad8fcc138ce3c0d | Use the correct Context for /proc/[pid]/maps. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/task.go",
"new_path": "pkg/sentry/fs/proc/task.go",
"diff": "@@ -306,7 +306,7 @@ func (md *mapsData) NeedsUpdate(generation int64) bool {\n// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.\nfunc (md *mapsData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\nif mm := md.mm(); mm != nil {\n- return mm.ReadSeqFileData(md.t.AsyncContext(), h)\n+ return mm.ReadSeqFileData(ctx, h)\n}\nreturn []seqfile.SeqData{}, 0\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Use the correct Context for /proc/[pid]/maps.
PiperOrigin-RevId: 202180487
Change-Id: I95cce41a4842ab731a4821b387b32008bfbdcb08 |
259,992 | 26.06.2018 19:04:51 | 25,200 | c186e408cc61cbefd6d72c2ff3e9d629572570db | Add KVM, overlay and host network to image tests | [
{
"change_type": "MODIFY",
"old_path": "kokoro/gcp_ubuntu/run_tests.sh",
"new_path": "kokoro/gcp_ubuntu/run_tests.sh",
"diff": "@@ -46,6 +46,9 @@ exit_code=${?}\nif [[ ${exit_code} -eq 0 ]]; then\n# image_test is tagged manual\nbazel test --test_output=errors --test_env=RUNSC_RUNTIME=${runtime} //runsc/test/image:image_test\n+ bazel test --test_output=errors --test_env=RUNSC_RUNTIME=${runtime}-kvm //runsc/test/image:image_test\n+ bazel test --test_output=errors --test_env=RUNSC_RUNTIME=${runtime}-nethost //runsc/test/image:image_test\n+ bazel test --test_output=errors --test_env=RUNSC_RUNTIME=${runtime}-overlay //runsc/test/image:image_test\nexit_code=${?}\nfi\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/overlay.go",
"new_path": "pkg/sentry/fs/overlay.go",
"diff": "@@ -113,7 +113,7 @@ func NewOverlayRoot(ctx context.Context, upper *Inode, lower *Inode, flags Mount\n// - lower must not require that file objects be revalidated.\n// - lower must not have dynamic file/directory content.\nfunc NewOverlayRootFile(ctx context.Context, upperMS *MountSource, lower *Inode, flags MountSourceFlags) (*Inode, error) {\n- if IsRegular(lower.StableAttr) {\n+ if !IsRegular(lower.StableAttr) {\nreturn nil, fmt.Errorf(\"lower Inode is not a regular file\")\n}\nmsrc := newOverlayMountSource(upperMS, lower.MountSource, flags)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/install.sh",
"new_path": "runsc/test/image/install.sh",
"diff": "@@ -75,10 +75,17 @@ if [[ ${uninstall} == 0 ]]; then\nmkdir -p \"${logdir}\"\nsudo -n chmod a+wx \"${logdir}\"\n- sudo -n \"${dockercfg}\" runtime-add \"${runtime}\" \"${runsc}\" --debug-log-dir \"${logdir}\" --debug --strace --log-packets\n+ declare -r args=\"--debug-log-dir \"${logdir}\" --debug --strace --log-packets\"\n+ sudo -n \"${dockercfg}\" runtime-add \"${runtime}\" \"${runsc}\" ${args}\n+ sudo -n \"${dockercfg}\" runtime-add \"${runtime}\"-kvm \"${runsc}\" --platform=kvm ${args}\n+ sudo -n \"${dockercfg}\" runtime-add \"${runtime}\"-hostnet \"${runsc}\" --network=host ${args}\n+ sudo -n \"${dockercfg}\" runtime-add \"${runtime}\"-overlay \"${runsc}\" --overlay ${args}\nelse\nsudo -n \"${dockercfg}\" runtime-rm \"${runtime}\"\n+ sudo -n \"${dockercfg}\" runtime-rm \"${runtime}\"-kvm\n+ sudo -n \"${dockercfg}\" runtime-rm \"${runtime}\"-hostnet\n+ sudo -n \"${dockercfg}\" runtime-rm \"${runtime}\"-overlay\nfi\necho \"Restarting docker service...\"\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add KVM, overlay and host network to image tests
PiperOrigin-RevId: 202236006
Change-Id: I4ea964a70fc49e8b51c9da27d77301c4eadaae71 |
259,885 | 27.06.2018 13:41:50 | 25,200 | 4215e059e24c5ed6298060769444b0eeaa03da8a | Ignore MADV_DONTDUMP and MADV_DODUMP. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_mmap.go",
"new_path": "pkg/sentry/syscalls/linux/sys_mmap.go",
"diff": "@@ -181,6 +181,10 @@ func Madvise(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca\nfallthrough\ncase linux.MADV_MERGEABLE, linux.MADV_UNMERGEABLE:\nfallthrough\n+ case linux.MADV_DONTDUMP, linux.MADV_DODUMP:\n+ // TODO: Core dumping isn't implemented, so these are\n+ // no-ops.\n+ fallthrough\ncase linux.MADV_NORMAL, linux.MADV_RANDOM, linux.MADV_SEQUENTIAL, linux.MADV_WILLNEED:\n// Do nothing, we totally ignore the suggestions above.\nreturn 0, nil, nil\n"
}
] | Go | Apache License 2.0 | google/gvisor | Ignore MADV_DONTDUMP and MADV_DODUMP.
PiperOrigin-RevId: 202361912
Change-Id: I1d0ee529073954d467b870872f494cebbf8ea61a |
259,992 | 27.06.2018 14:40:37 | 25,200 | 6b6852bceb12900f27a541682ddfe47893911c6e | Fix semaphore data races | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/semaphore/semaphore.go",
"new_path": "pkg/sentry/kernel/semaphore/semaphore.go",
"diff": "@@ -118,6 +118,9 @@ func (r *Registry) FindOrCreate(ctx context.Context, key, nsems int32, mode linu\nif !private {\n// Look up an existing semaphore.\nif set := r.findByKey(key); set != nil {\n+ set.mu.Lock()\n+ defer set.mu.Unlock()\n+\n// Check that caller can access semaphore set.\ncreds := auth.CredentialsFromContext(ctx)\nif !set.checkPerms(creds, fs.PermsFromMode(mode)) {\n@@ -170,6 +173,9 @@ func (r *Registry) RemoveID(id int32, creds *auth.Credentials) error {\nreturn syserror.EINVAL\n}\n+ set.mu.Lock()\n+ defer set.mu.Unlock()\n+\n// \"The effective user ID of the calling process must match the creator or\n// owner of the semaphore set, or the caller must be privileged.\"\nif !set.checkCredentials(creds) && !set.checkCapability(creds) {\n@@ -444,11 +450,9 @@ func (s *Set) checkPerms(creds *auth.Credentials, reqPerms fs.PermMask) bool {\nreturn s.checkCapability(creds)\n}\n+// destroy destroys the set. Caller must hold 's.mu'.\nfunc (s *Set) destroy() {\n- s.mu.Lock()\n- defer s.mu.Unlock()\n-\n- // Notify all waiters. Tney will fail on the next attempt to execute\n+ // Notify all waiters. They will fail on the next attempt to execute\n// operations and return error.\ns.dead = true\nfor _, s := range s.sems {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix semaphore data races
PiperOrigin-RevId: 202371908
Change-Id: I72603b1d321878cae6404987c49e64732b676331 |
259,992 | 28.06.2018 09:45:52 | 25,200 | 1f207de315430fb178b7025a5afd419afdc31449 | Add option to configure watchdog action | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/config.go",
"new_path": "runsc/boot/config.go",
"diff": "@@ -18,6 +18,8 @@ import (\n\"fmt\"\n\"strconv\"\n\"strings\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/watchdog\"\n)\n// PlatformType tells which platform to use.\n@@ -130,6 +132,18 @@ func (n NetworkType) String() string {\n}\n}\n+// MakeWatchdogAction converts type from string.\n+func MakeWatchdogAction(s string) (watchdog.Action, error) {\n+ switch strings.ToLower(s) {\n+ case \"log\", \"logwarning\":\n+ return watchdog.LogWarning, nil\n+ case \"panic\":\n+ return watchdog.Panic, nil\n+ default:\n+ return 0, fmt.Errorf(\"invalid watchdog action %q\", s)\n+ }\n+}\n+\n// Config holds configuration that is not part of the runtime spec.\ntype Config struct {\n// RootDir is the runtime root directory.\n@@ -180,6 +194,8 @@ type Config struct {\n// MultiContainer enables multiple containers support inside one sandbox.\n// TODO: Remove this when multiple container is fully supported.\nMultiContainer bool\n+\n+ WatchdogAction watchdog.Action\n}\n// ToFlags returns a slice of flags that correspond to the given Config.\n@@ -199,5 +215,6 @@ func (c *Config) ToFlags() []string {\n\"--strace=\" + strconv.FormatBool(c.Strace),\n\"--strace-syscalls=\" + strings.Join(c.StraceSyscalls, \",\"),\n\"--strace-log-size=\" + strconv.Itoa(int(c.StraceLogSize)),\n+ \"--watchdog-action=\" + c.WatchdogAction.String(),\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -205,7 +205,7 @@ func New(spec *specs.Spec, conf *Config, controllerFD, restoreFD int, ioFDs []in\n}\n// Create a watchdog.\n- watchdog := watchdog.New(k, watchdog.DefaultTimeout, watchdog.LogWarning)\n+ watchdog := watchdog.New(k, watchdog.DefaultTimeout, conf.WatchdogAction)\n// Create the control server using the provided FD.\n//\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/main.go",
"new_path": "runsc/main.go",
"diff": "@@ -60,6 +60,7 @@ var (\nfileAccess = flag.String(\"file-access\", \"proxy\", \"specifies which filesystem to use: proxy (default), direct. Using a proxy is more secure because it disallows the sandbox from opennig files directly in the host.\")\noverlay = flag.Bool(\"overlay\", false, \"wrap filesystem mounts with writable overlay. All modifications are stored in memory inside the sandbox.\")\nmultiContainer = flag.Bool(\"multi-container\", false, \"enable *experimental* multi-container support.\")\n+ watchdogAction = flag.String(\"watchdog-action\", \"log\", \"sets what action the watchdog takes when triggered: log (default), panic.\")\n)\nvar gitRevision = \"\"\n@@ -110,6 +111,11 @@ func main() {\ncmd.Fatalf(\"%v\", err)\n}\n+ wa, err := boot.MakeWatchdogAction(*watchdogAction)\n+ if err != nil {\n+ cmd.Fatalf(\"%v\", err)\n+ }\n+\n// Create a new Config from the flags.\nconf := &boot.Config{\nRootDir: *rootDir,\n@@ -125,6 +131,7 @@ func main() {\nStrace: *strace,\nStraceLogSize: *straceLogSize,\nMultiContainer: *multiContainer,\n+ WatchdogAction: wa,\n}\nif len(*straceSyscalls) != 0 {\nconf.StraceSyscalls = strings.Split(*straceSyscalls, \",\")\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add option to configure watchdog action
PiperOrigin-RevId: 202494747
Change-Id: I4d4a18e71468690b785060e580a5f83c616bd90f |
259,992 | 28.06.2018 09:56:23 | 25,200 | 8459390cdd81ef1c8180948566e893b06233923c | Error out if spec is invalid
Closes | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -267,7 +267,7 @@ func newProcess(spec *specs.Spec, conf *Config, ioFDs []int, console bool, creds\nFilename: exec,\nArgv: spec.Process.Args,\nEnvv: spec.Process.Env,\n- WorkingDirectory: spec.Process.Cwd,\n+ WorkingDirectory: spec.Process.Cwd, // Defaults to '/' if empty.\nCredentials: creds,\nUmask: 0022,\nLimits: ls,\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/boot.go",
"new_path": "runsc/cmd/boot.go",
"diff": "@@ -23,6 +23,7 @@ import (\n\"context\"\n\"flag\"\n\"github.com/google/subcommands\"\n+ specs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n\"gvisor.googlesource.com/gvisor/runsc/boot\"\n\"gvisor.googlesource.com/gvisor/runsc/specutils\"\n@@ -116,6 +117,9 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nif b.applyCaps {\ncaps := spec.Process.Capabilities\n+ if caps == nil {\n+ caps = &specs.LinuxCapabilities{}\n+ }\nif conf.Platform == boot.PlatformPtrace {\n// Ptrace platform requires extra capabilities.\nconst c = \"CAP_SYS_PTRACE\"\n@@ -131,7 +135,7 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nargs = append(args, arg)\n}\n}\n- if err := setCapsAndCallSelf(spec, args, caps); err != nil {\n+ if err := setCapsAndCallSelf(args, caps); err != nil {\nFatalf(\"%v\", err)\n}\npanic(\"setCapsAndCallSelf must never return success\")\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/cmd.go",
"new_path": "runsc/cmd/cmd.go",
"diff": "@@ -72,7 +72,7 @@ func (i *intFlags) Set(s string) error {\n// setCapsAndCallSelf sets capabilities to the current thread and then execve's\n// itself again with the arguments specified in 'args' to restart the process\n// with the desired capabilities.\n-func setCapsAndCallSelf(spec *specs.Spec, args []string, caps *specs.LinuxCapabilities) error {\n+func setCapsAndCallSelf(args []string, caps *specs.LinuxCapabilities) error {\n// Keep thread locked while capabilities are changed.\nruntime.LockOSThread()\ndefer runtime.UnlockOSThread()\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/gofer.go",
"new_path": "runsc/cmd/gofer.go",
"diff": "@@ -95,7 +95,7 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n// Note: minimal argument handling for the default case to keep it simple.\nargs := os.Args\nargs = append(args, \"--apply-caps=false\")\n- if err := setCapsAndCallSelf(spec, args, lc); err != nil {\n+ if err := setCapsAndCallSelf(args, lc); err != nil {\nFatalf(\"Unable to apply caps: %v\", err)\n}\npanic(\"unreachable\")\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -193,9 +193,6 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo\nif err := validateID(id); err != nil {\nreturn nil, err\n}\n- if err := specutils.ValidateSpec(spec); err != nil {\n- return nil, err\n- }\ncontainerRoot := filepath.Join(conf.RootDir, id)\nif _, err := os.Stat(containerRoot); err == nil {\n@@ -434,9 +431,11 @@ func (c *Container) Destroy() error {\nlog.Debugf(\"Destroy container %q\", c.ID)\n// First stop the container.\n+ if c.Sandbox != nil {\nif err := c.Sandbox.Stop(c.ID); err != nil {\nreturn err\n}\n+ }\n// \"If any poststop hook fails, the runtime MUST log a warning, but the\n// remaining hooks and lifecycle continue as if the hook had succeeded\" -OCI spec.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -812,28 +812,6 @@ func TestConsoleSocket(t *testing.T) {\n}\n}\n-func TestSpecUnsupported(t *testing.T) {\n- spec := testutil.NewSpecWithArgs(\"/bin/true\")\n- spec.Process.SelinuxLabel = \"somelabel\"\n-\n- // These are normally set by docker and will just cause warnings to be logged.\n- spec.Process.ApparmorProfile = \"someprofile\"\n- spec.Linux = &specs.Linux{Seccomp: &specs.LinuxSeccomp{}}\n-\n- rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n- if err != nil {\n- t.Fatalf(\"error setting up container: %v\", err)\n- }\n- defer os.RemoveAll(rootDir)\n- defer os.RemoveAll(bundleDir)\n-\n- id := testutil.UniqueContainerID()\n- _, err = container.Create(id, spec, conf, bundleDir, \"\", \"\", \"\")\n- if err == nil || !strings.Contains(err.Error(), \"is not supported\") {\n- t.Errorf(\"container.Create() wrong error, got: %v, want: *is not supported, spec.Process: %+v\", err, spec.Process)\n- }\n-}\n-\n// TestRunNonRoot checks that sandbox can be configured when running as\n// non-privileged user.\nfunc TestRunNonRoot(t *testing.T) {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/specutils/BUILD",
"new_path": "runsc/specutils/BUILD",
"diff": "@@ -22,4 +22,5 @@ go_test(\nsize = \"small\",\nsrcs = [\"specutils_test.go\"],\nembed = [\":specutils\"],\n+ deps = [\"@com_github_opencontainers_runtime-spec//specs-go:go_default_library\"],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/specutils/specutils.go",
"new_path": "runsc/specutils/specutils.go",
"diff": "@@ -47,10 +47,28 @@ func LogSpec(spec *specs.Spec) {\n// ValidateSpec validates that the spec is compatible with runsc.\nfunc ValidateSpec(spec *specs.Spec) error {\n+ // Mandatory fields.\nif spec.Process == nil {\n- return fmt.Errorf(\"Process must be defined\")\n+ return fmt.Errorf(\"Spec.Process must be defined: %+v\", spec)\n}\n- if spec.Process.SelinuxLabel != \"\" {\n+ if len(spec.Process.Args) == 0 {\n+ return fmt.Errorf(\"Spec.Process.Arg must be defined: %+v\", spec.Process)\n+ }\n+ if spec.Root == nil {\n+ return fmt.Errorf(\"Spec.Root must be defined: %+v\", spec)\n+ }\n+ if len(spec.Root.Path) == 0 {\n+ return fmt.Errorf(\"Spec.Root.Path must be defined: %+v\", spec.Root)\n+ }\n+\n+ // Unsupported fields.\n+ if spec.Solaris != nil {\n+ return fmt.Errorf(\"Spec.Solaris is not supported: %+v\", spec)\n+ }\n+ if spec.Windows != nil {\n+ return fmt.Errorf(\"Spec.Windows is not supported: %+v\", spec)\n+ }\n+ if len(spec.Process.SelinuxLabel) != 0 {\nreturn fmt.Errorf(\"SELinux is not supported: %s\", spec.Process.SelinuxLabel)\n}\n@@ -64,7 +82,7 @@ func ValidateSpec(spec *specs.Spec) error {\nlog.Warningf(\"Seccomp spec is being ignored\")\n}\n- // 2 annotations are use by containerd to support multi-container pods.\n+ // Two annotations are use by containerd to support multi-container pods.\n// \"io.kubernetes.cri.container-type\"\n// \"io.kubernetes.cri.sandbox-id\"\ncontainerType, hasContainerType := spec.Annotations[ContainerdContainerTypeAnnotation]\n@@ -98,6 +116,9 @@ func ReadSpec(bundleDir string) (*specs.Spec, error) {\nif err := json.Unmarshal(specBytes, &spec); err != nil {\nreturn nil, fmt.Errorf(\"error unmarshaling spec from file %q: %v\\n %s\", specFile, err, string(specBytes))\n}\n+ if err := ValidateSpec(&spec); err != nil {\n+ return nil, err\n+ }\nreturn &spec, nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/specutils/specutils_test.go",
"new_path": "runsc/specutils/specutils_test.go",
"diff": "@@ -20,6 +20,8 @@ import (\n\"strings\"\n\"testing\"\n\"time\"\n+\n+ specs \"github.com/opencontainers/runtime-spec/specs-go\"\n)\nfunc TestWaitForReadyHappy(t *testing.T) {\n@@ -94,3 +96,114 @@ func TestWaitForReadyTimeout(t *testing.T) {\n}\ncmd.Process.Kill()\n}\n+\n+func TestSpecInvalid(t *testing.T) {\n+ for _, test := range []struct {\n+ name string\n+ spec specs.Spec\n+ error string\n+ }{\n+ {\n+ name: \"valid\",\n+ spec: specs.Spec{\n+ Root: &specs.Root{Path: \"/\"},\n+ Process: &specs.Process{\n+ Args: []string{\"/bin/true\"},\n+ },\n+ },\n+ error: \"\",\n+ },\n+ {\n+ name: \"valid+warning\",\n+ spec: specs.Spec{\n+ Root: &specs.Root{Path: \"/\"},\n+ Process: &specs.Process{\n+ Args: []string{\"/bin/true\"},\n+ // This is normally set by docker and will just cause warnings to be logged.\n+ ApparmorProfile: \"someprofile\",\n+ },\n+ // This is normally set by docker and will just cause warnings to be logged.\n+ Linux: &specs.Linux{Seccomp: &specs.LinuxSeccomp{}},\n+ },\n+ error: \"\",\n+ },\n+ {\n+ name: \"no root\",\n+ spec: specs.Spec{\n+ Process: &specs.Process{\n+ Args: []string{\"/bin/true\"},\n+ },\n+ },\n+ error: \"must be defined\",\n+ },\n+ {\n+ name: \"empty root\",\n+ spec: specs.Spec{\n+ Root: &specs.Root{},\n+ Process: &specs.Process{\n+ Args: []string{\"/bin/true\"},\n+ },\n+ },\n+ error: \"must be defined\",\n+ },\n+ {\n+ name: \"no process\",\n+ spec: specs.Spec{\n+ Root: &specs.Root{Path: \"/\"},\n+ },\n+ error: \"must be defined\",\n+ },\n+ {\n+ name: \"empty args\",\n+ spec: specs.Spec{\n+ Root: &specs.Root{Path: \"/\"},\n+ Process: &specs.Process{},\n+ },\n+ error: \"must be defined\",\n+ },\n+ {\n+ name: \"selinux\",\n+ spec: specs.Spec{\n+ Root: &specs.Root{Path: \"/\"},\n+ Process: &specs.Process{\n+ Args: []string{\"/bin/true\"},\n+ SelinuxLabel: \"somelabel\",\n+ },\n+ },\n+ error: \"is not supported\",\n+ },\n+ {\n+ name: \"solaris\",\n+ spec: specs.Spec{\n+ Root: &specs.Root{Path: \"/\"},\n+ Process: &specs.Process{\n+ Args: []string{\"/bin/true\"},\n+ },\n+ Solaris: &specs.Solaris{},\n+ },\n+ error: \"is not supported\",\n+ },\n+ {\n+ name: \"windows\",\n+ spec: specs.Spec{\n+ Root: &specs.Root{Path: \"/\"},\n+ Process: &specs.Process{\n+ Args: []string{\"/bin/true\"},\n+ },\n+ Windows: &specs.Windows{},\n+ },\n+ error: \"is not supported\",\n+ },\n+ } {\n+ err := ValidateSpec(&test.spec)\n+ if len(test.error) == 0 {\n+ if err != nil {\n+ t.Errorf(\"ValidateSpec(%q) failed, err: %v\", test.name, err)\n+ }\n+ } else {\n+ if err == nil || !strings.Contains(err.Error(), test.error) {\n+ t.Errorf(\"ValidateSpec(%q) wrong error, got: %v, want: .*%s.*\", test.name, err, test.error)\n+ }\n+ }\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Error out if spec is invalid
Closes #66
PiperOrigin-RevId: 202496258
Change-Id: Ib9287c5bf1279ffba1db21ebd9e6b59305cddf34 |
259,992 | 28.06.2018 13:22:12 | 25,200 | bb31a119035dd5266737b41456d967789693cf20 | Wait for sandbox process when waiting for root container
Closes | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -449,7 +449,7 @@ func (c *Container) Destroy() error {\n// If we are the first container in the sandbox, take the sandbox down\n// as well.\n- if c.Sandbox != nil && c.Sandbox.ID == c.ID {\n+ if c.Sandbox != nil && c.Sandbox.IsRootContainer(c.ID) {\nif err := c.Sandbox.Destroy(); err != nil {\nlog.Warningf(\"Failed to destroy sandbox %q: %v\", c.Sandbox.ID, err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -434,9 +434,29 @@ func (s *Sandbox) Wait(cid string) (syscall.WaitStatus, error) {\nif err := conn.Call(boot.ContainerWait, &cid, &ws); err != nil {\nreturn ws, fmt.Errorf(\"err waiting on container %q: %v\", cid, err)\n}\n+\n+ if s.IsRootContainer(cid) {\n+ // If waiting for the root, give some time for the sandbox process to exit\n+ // to prevent races with resources that might still be in use.\n+ timeout := time.Now().Add(time.Second)\n+ log.Debugf(\"Waiting for the sandbox process to exit\")\n+ for s.IsRunning() {\n+ if time.Now().After(timeout) {\n+ log.Debugf(\"Timeout waiting for sandbox process to exit\")\n+ break\n+ }\n+ time.Sleep(100 * time.Millisecond)\n+ }\n+ }\nreturn ws, nil\n}\n+// IsRootContainer returns true if the specified container ID belongs to the\n+// root container.\n+func (s *Sandbox) IsRootContainer(cid string) bool {\n+ return s.ID == cid\n+}\n+\n// Stop stops the container in the sandbox.\nfunc (s *Sandbox) Stop(cid string) error {\n// TODO: This should stop the container with the given ID\n"
}
] | Go | Apache License 2.0 | google/gvisor | Wait for sandbox process when waiting for root container
Closes #71
PiperOrigin-RevId: 202532762
Change-Id: I80a446ff638672ff08e6fd853cd77e28dd05d540 |
260,013 | 29.06.2018 12:39:22 | 25,200 | 23f49097c77213175e9b11755c28c3ff5ccc1118 | Panic in netstack during cleanup where a FIN becomes a RST.
There is a subtle bug where during cleanup with unread data a FIN can
be converted to a RST, at that point the entire connection should be
aborted as we're not expecting any ACKs to the RST. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/connect.go",
"new_path": "pkg/tcpip/transport/tcp/connect.go",
"diff": "@@ -938,6 +938,10 @@ func (e *endpoint) protocolMainLoop(passive bool) *tcpip.Error {\ne.snd.updateMaxPayloadSize(mtu, count)\n}\n+ if n¬ifyReset != 0 {\n+ e.resetConnectionLocked(tcpip.ErrConnectionAborted)\n+ }\n+\nif n¬ifyClose != 0 && closeTimer == nil {\n// Reset the connection 3 seconds after the\n// endpoint has been closed.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -40,6 +40,7 @@ const (\nnotifyClose\nnotifyMTUChanged\nnotifyDrain\n+ notifyReset\n)\n// SACKInfo holds TCP SACK related information for a given endpoint.\n@@ -919,7 +920,20 @@ func (e *endpoint) Shutdown(flags tcpip.ShutdownFlags) *tcpip.Error {\nswitch e.state {\ncase stateConnected:\n// Close for write.\n- if (flags & tcpip.ShutdownWrite) != 0 {\n+ if (e.shutdownFlags & tcpip.ShutdownWrite) != 0 {\n+ if (e.shutdownFlags & tcpip.ShutdownRead) != 0 {\n+ // We're fully closed, if we have unread data we need to abort\n+ // the connection with a RST.\n+ e.rcvListMu.Lock()\n+ rcvBufUsed := e.rcvBufUsed\n+ e.rcvListMu.Unlock()\n+\n+ if rcvBufUsed > 0 {\n+ e.notifyProtocolGoroutine(notifyReset)\n+ return nil\n+ }\n+ }\n+\ne.sndBufMu.Lock()\nif e.sndClosed {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/snd.go",
"new_path": "pkg/tcpip/transport/tcp/snd.go",
"diff": "@@ -333,28 +333,17 @@ func (s *sender) sendData() {\nvar segEnd seqnum.Value\nif seg.data.Size() == 0 {\n- seg.flags = flagAck\n-\n- s.ep.rcvListMu.Lock()\n- rcvBufUsed := s.ep.rcvBufUsed\n- s.ep.rcvListMu.Unlock()\n-\n- s.ep.mu.Lock()\n- // We're sending a FIN by default\n- fl := flagFin\n- segEnd = seg.sequenceNumber\n- if (s.ep.shutdownFlags&tcpip.ShutdownRead) != 0 && rcvBufUsed > 0 {\n- // If there is unread data we must send a RST.\n- // For more information see RFC 2525 section 2.17.\n- fl = flagRst\n- } else {\n- segEnd = seg.sequenceNumber.Add(1)\n+ if s.writeList.Back() != seg {\n+ panic(\"FIN segments must be the final segment in the write list.\")\n}\n-\n- s.ep.mu.Unlock()\n- seg.flags |= uint8(fl)\n+ seg.flags = flagAck | flagFin\n+ segEnd = seg.sequenceNumber.Add(1)\n} else {\n// We're sending a non-FIN segment.\n+ if seg.flags&flagFin != 0 {\n+ panic(\"Netstack queues FIN segments without data.\")\n+ }\n+\nif !seg.sequenceNumber.LessThan(end) {\nbreak\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/tcp_test.go",
"new_path": "pkg/tcpip/transport/tcp/tcp_test.go",
"diff": "@@ -416,6 +416,83 @@ func TestRstOnCloseWithUnreadData(t *testing.T) {\n})\n}\n+func TestRstOnCloseWithUnreadDataFinConvertRst(t *testing.T) {\n+ c := context.New(t, defaultMTU)\n+ defer c.Cleanup()\n+\n+ c.CreateConnected(789, 30000, nil)\n+\n+ we, ch := waiter.NewChannelEntry(nil)\n+ c.WQ.EventRegister(&we, waiter.EventIn)\n+ defer c.WQ.EventUnregister(&we)\n+\n+ if _, _, err := c.EP.Read(nil); err != tcpip.ErrWouldBlock {\n+ t.Fatalf(\"Unexpected error from Read: %v\", err)\n+ }\n+\n+ data := []byte{1, 2, 3}\n+ c.SendPacket(data, &context.Headers{\n+ SrcPort: context.TestPort,\n+ DstPort: c.Port,\n+ Flags: header.TCPFlagAck,\n+ SeqNum: 790,\n+ AckNum: c.IRS.Add(1),\n+ RcvWnd: 30000,\n+ })\n+\n+ // Wait for receive to be notified.\n+ select {\n+ case <-ch:\n+ case <-time.After(3 * time.Second):\n+ t.Fatalf(\"Timed out waiting for data to arrive\")\n+ }\n+\n+ // Check that ACK is received, this happens regardless of the read.\n+ checker.IPv4(t, c.GetPacket(),\n+ checker.TCP(\n+ checker.DstPort(context.TestPort),\n+ checker.SeqNum(uint32(c.IRS)+1),\n+ checker.AckNum(uint32(790+len(data))),\n+ checker.TCPFlags(header.TCPFlagAck),\n+ ),\n+ )\n+\n+ // Cause a FIN to be generated.\n+ c.EP.Shutdown(tcpip.ShutdownWrite)\n+\n+ // Make sure we get the FIN but DON't ACK IT.\n+ checker.IPv4(t, c.GetPacket(),\n+ checker.TCP(\n+ checker.DstPort(context.TestPort),\n+ checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin),\n+ checker.SeqNum(uint32(c.IRS)+1),\n+ ))\n+\n+ // Cause a RST to be generated by closing the read end now since we have\n+ // unread data.\n+ c.EP.Shutdown(tcpip.ShutdownRead)\n+\n+ // Make sure we get the RST\n+ checker.IPv4(t, c.GetPacket(),\n+ checker.TCP(\n+ checker.DstPort(context.TestPort),\n+ checker.TCPFlags(header.TCPFlagAck|header.TCPFlagRst),\n+ // We shouldn't consume a sequence number on RST.\n+ checker.SeqNum(uint32(c.IRS)+1),\n+ ))\n+\n+ // The ACK to the FIN should now be rejected since the connection has been\n+ // closed by a RST.\n+ c.SendPacket(nil, &context.Headers{\n+ SrcPort: context.TestPort,\n+ DstPort: c.Port,\n+ Flags: header.TCPFlagAck,\n+ SeqNum: seqnum.Value(790 + len(data)),\n+ AckNum: c.IRS.Add(seqnum.Size(2)),\n+ RcvWnd: 30000,\n+ })\n+}\n+\nfunc TestFullWindowReceive(t *testing.T) {\nc := context.New(t, defaultMTU)\ndefer c.Cleanup()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Panic in netstack during cleanup where a FIN becomes a RST.
There is a subtle bug where during cleanup with unread data a FIN can
be converted to a RST, at that point the entire connection should be
aborted as we're not expecting any ACKs to the RST.
PiperOrigin-RevId: 202691271
Change-Id: Idae70800208ca26e07a379bc6b2b8090805d0a22 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.