author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
259,907
17.12.2020 08:45:38
28,800
74788b1b6194ef62f8355f7e4721c00f615d16ad
[netstack] Implement MSG_ERRQUEUE flag for recvmsg(2). Introduces the per-socket error queue and the necessary cmsg mechanisms.
[ { "change_type": "MODIFY", "old_path": "pkg/abi/linux/BUILD", "new_path": "pkg/abi/linux/BUILD", "diff": "@@ -21,6 +21,7 @@ go_library(\n\"epoll_amd64.go\",\n\"epoll_arm64.go\",\n\"errors.go\",\n+ \"errqueue.go\",\n\"eventfd.go\",\n\"exec.go\",\n\"fadvise.go\",\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/abi/linux/errqueue.go", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package linux\n+\n+import (\n+ \"gvisor.dev/gvisor/pkg/marshal\"\n+)\n+\n+// Socket error origin codes as defined in include/uapi/linux/errqueue.h.\n+const (\n+ SO_EE_ORIGIN_NONE = 0\n+ SO_EE_ORIGIN_LOCAL = 1\n+ SO_EE_ORIGIN_ICMP = 2\n+ SO_EE_ORIGIN_ICMP6 = 3\n+)\n+\n+// SockExtendedErr represents struct sock_extended_err in Linux defined in\n+// include/uapi/linux/errqueue.h.\n+//\n+// +marshal\n+type SockExtendedErr struct {\n+ Errno uint32\n+ Origin uint8\n+ Type uint8\n+ Code uint8\n+ Pad uint8\n+ Info uint32\n+ Data uint32\n+}\n+\n+// SockErrCMsg represents the IP*_RECVERR control message.\n+type SockErrCMsg interface {\n+ marshal.Marshallable\n+\n+ CMsgLevel() uint32\n+ CMsgType() uint32\n+}\n+\n+// SockErrCMsgIPv4 is the IP_RECVERR control message used in\n+// recvmsg(MSG_ERRQUEUE) by ipv4 sockets. This is equilavent to `struct errhdr`\n+// defined in net/ipv4/ip_sockglue.c:ip_recv_error().\n+//\n+// +marshal\n+type SockErrCMsgIPv4 struct {\n+ SockExtendedErr\n+ Offender SockAddrInet\n+}\n+\n+var _ SockErrCMsg = (*SockErrCMsgIPv4)(nil)\n+\n+// CMsgLevel implements SockErrCMsg.CMsgLevel.\n+func (*SockErrCMsgIPv4) CMsgLevel() uint32 {\n+ return SOL_IP\n+}\n+\n+// CMsgType implements SockErrCMsg.CMsgType.\n+func (*SockErrCMsgIPv4) CMsgType() uint32 {\n+ return IP_RECVERR\n+}\n+\n+// SockErrCMsgIPv6 is the IPV6_RECVERR control message used in\n+// recvmsg(MSG_ERRQUEUE) by ipv6 sockets. This is equilavent to `struct errhdr`\n+// defined in net/ipv6/datagram.c:ipv6_recv_error().\n+//\n+// +marshal\n+type SockErrCMsgIPv6 struct {\n+ SockExtendedErr\n+ Offender SockAddrInet6\n+}\n+\n+var _ SockErrCMsg = (*SockErrCMsgIPv6)(nil)\n+\n+// CMsgLevel implements SockErrCMsg.CMsgLevel.\n+func (*SockErrCMsgIPv6) CMsgLevel() uint32 {\n+ return SOL_IPV6\n+}\n+\n+// CMsgType implements SockErrCMsg.CMsgType.\n+func (*SockErrCMsgIPv6) CMsgType() uint32 {\n+ return IPV6_RECVERR\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/control/control.go", "new_path": "pkg/sentry/socket/control/control.go", "diff": "@@ -371,6 +371,17 @@ func PackOriginalDstAddress(t *kernel.Task, originalDstAddress linux.SockAddr, b\nbuf, level, optType, t.Arch().Width(), originalDstAddress)\n}\n+// PackSockExtendedErr packs an IP*_RECVERR socket control message.\n+func PackSockExtendedErr(t *kernel.Task, sockErr linux.SockErrCMsg, buf []byte) []byte {\n+ return putCmsgStruct(\n+ buf,\n+ sockErr.CMsgLevel(),\n+ sockErr.CMsgType(),\n+ t.Arch().Width(),\n+ sockErr,\n+ )\n+}\n+\n// PackControlMessages packs control messages into the given buffer.\n//\n// We skip control messages specific to Unix domain sockets.\n@@ -403,6 +414,10 @@ func PackControlMessages(t *kernel.Task, cmsgs socket.ControlMessages, buf []byt\nbuf = PackOriginalDstAddress(t, cmsgs.IP.OriginalDstAddress, buf)\n}\n+ if cmsgs.IP.SockErr != nil {\n+ buf = PackSockExtendedErr(t, cmsgs.IP.SockErr, buf)\n+ }\n+\nreturn buf\n}\n@@ -440,6 +455,10 @@ func CmsgsSpace(t *kernel.Task, cmsgs socket.ControlMessages) int {\nspace += cmsgSpace(t, cmsgs.IP.OriginalDstAddress.SizeBytes())\n}\n+ if cmsgs.IP.SockErr != nil {\n+ space += cmsgSpace(t, cmsgs.IP.SockErr.SizeBytes())\n+ }\n+\nreturn space\n}\n@@ -546,6 +565,16 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte) (socket.Con\ncmsgs.IP.OriginalDstAddress = &addr\ni += binary.AlignUp(length, width)\n+ case linux.IP_RECVERR:\n+ var errCmsg linux.SockErrCMsgIPv4\n+ if length < errCmsg.SizeBytes() {\n+ return socket.ControlMessages{}, syserror.EINVAL\n+ }\n+\n+ errCmsg.UnmarshalBytes(buf[i : i+errCmsg.SizeBytes()])\n+ cmsgs.IP.SockErr = &errCmsg\n+ i += binary.AlignUp(length, width)\n+\ndefault:\nreturn socket.ControlMessages{}, syserror.EINVAL\n}\n@@ -568,6 +597,16 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte) (socket.Con\ncmsgs.IP.OriginalDstAddress = &addr\ni += binary.AlignUp(length, width)\n+ case linux.IPV6_RECVERR:\n+ var errCmsg linux.SockErrCMsgIPv6\n+ if length < errCmsg.SizeBytes() {\n+ return socket.ControlMessages{}, syserror.EINVAL\n+ }\n+\n+ errCmsg.UnmarshalBytes(buf[i : i+errCmsg.SizeBytes()])\n+ cmsgs.IP.SockErr = &errCmsg\n+ i += binary.AlignUp(length, width)\n+\ndefault:\nreturn socket.ControlMessages{}, syserror.EINVAL\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/hostinet/socket.go", "new_path": "pkg/sentry/socket/hostinet/socket.go", "diff": "@@ -450,11 +450,7 @@ func (s *socketOpsCommon) recvMsgFromHost(iovs []syscall.Iovec, flags int, sende\n// RecvMsg implements socket.Socket.RecvMsg.\nfunc (s *socketOpsCommon) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlLen uint64) (int, int, linux.SockAddr, uint32, socket.ControlMessages, *syserr.Error) {\n// Only allow known and safe flags.\n- //\n- // FIXME(jamieliu): We can't support MSG_ERRQUEUE because it uses ancillary\n- // messages that gvisor/pkg/tcpip/transport/unix doesn't understand. Kill the\n- // Socket interface's dependence on netstack.\n- if flags&^(syscall.MSG_DONTWAIT|syscall.MSG_PEEK|syscall.MSG_TRUNC) != 0 {\n+ if flags&^(syscall.MSG_DONTWAIT|syscall.MSG_PEEK|syscall.MSG_TRUNC|syscall.MSG_ERRQUEUE) != 0 {\nreturn 0, 0, nil, 0, socket.ControlMessages{}, syserr.ErrInvalidArgument\n}\n@@ -488,7 +484,8 @@ func (s *socketOpsCommon) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nvar ch chan struct{}\nn, err := copyToDst()\n- if flags&syscall.MSG_DONTWAIT == 0 {\n+ // recv*(MSG_ERRQUEUE) never blocks, even without MSG_DONTWAIT.\n+ if flags&(syscall.MSG_DONTWAIT|syscall.MSG_ERRQUEUE) == 0 {\nfor err == syserror.ErrWouldBlock {\n// We only expect blocking to come from the actual syscall, in which\n// case it can't have returned any data.\n@@ -551,6 +548,11 @@ func parseUnixControlMessages(unixControlMessages []unix.SocketControlMessage) s\nvar addr linux.SockAddrInet\nbinary.Unmarshal(unixCmsg.Data[:addr.SizeBytes()], usermem.ByteOrder, &addr)\ncontrolMessages.IP.OriginalDstAddress = &addr\n+\n+ case syscall.IP_RECVERR:\n+ var errCmsg linux.SockErrCMsgIPv4\n+ errCmsg.UnmarshalBytes(unixCmsg.Data)\n+ controlMessages.IP.SockErr = &errCmsg\n}\ncase linux.SOL_IPV6:\n@@ -563,6 +565,11 @@ func parseUnixControlMessages(unixControlMessages []unix.SocketControlMessage) s\nvar addr linux.SockAddrInet6\nbinary.Unmarshal(unixCmsg.Data[:addr.SizeBytes()], usermem.ByteOrder, &addr)\ncontrolMessages.IP.OriginalDstAddress = &addr\n+\n+ case syscall.IPV6_RECVERR:\n+ var errCmsg linux.SockErrCMsgIPv6\n+ errCmsg.UnmarshalBytes(unixCmsg.Data)\n+ controlMessages.IP.SockErr = &errCmsg\n}\ncase linux.SOL_TCP:\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netstack/netstack.go", "new_path": "pkg/sentry/socket/netstack/netstack.go", "diff": "@@ -2772,6 +2772,8 @@ func (s *socketOpsCommon) controlMessages() socket.ControlMessages {\nIP: socket.IPControlMessages{\nHasTimestamp: s.readCM.HasTimestamp && s.sockOptTimestamp,\nTimestamp: s.readCM.Timestamp,\n+ HasInq: s.readCM.HasInq,\n+ Inq: s.readCM.Inq,\nHasTOS: s.readCM.HasTOS,\nTOS: s.readCM.TOS,\nHasTClass: s.readCM.HasTClass,\n@@ -2779,6 +2781,7 @@ func (s *socketOpsCommon) controlMessages() socket.ControlMessages {\nHasIPPacketInfo: s.readCM.HasIPPacketInfo,\nPacketInfo: s.readCM.PacketInfo,\nOriginalDstAddress: s.readCM.OriginalDstAddress,\n+ SockErr: s.readCM.SockErr,\n},\n}\n}\n@@ -2795,9 +2798,49 @@ func (s *socketOpsCommon) updateTimestamp() {\n}\n}\n+// addrFamilyFromNetProto returns the address family identifier for the given\n+// network protocol.\n+func addrFamilyFromNetProto(net tcpip.NetworkProtocolNumber) int {\n+ switch net {\n+ case header.IPv4ProtocolNumber:\n+ return linux.AF_INET\n+ case header.IPv6ProtocolNumber:\n+ return linux.AF_INET6\n+ default:\n+ panic(fmt.Sprintf(\"invalid net proto for addr family inference: %d\", net))\n+ }\n+}\n+\n+// recvErr handles MSG_ERRQUEUE for recvmsg(2).\n+// This is analogous to net/ipv4/ip_sockglue.c:ip_recv_error().\n+func (s *socketOpsCommon) recvErr(t *kernel.Task, dst usermem.IOSequence) (int, int, linux.SockAddr, uint32, socket.ControlMessages, *syserr.Error) {\n+ sockErr := s.Endpoint.SocketOptions().DequeueErr()\n+ if sockErr == nil {\n+ return 0, 0, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain\n+ }\n+\n+ // The payload of the original packet that caused the error is passed as\n+ // normal data via msg_iovec. -- recvmsg(2)\n+ msgFlags := linux.MSG_ERRQUEUE\n+ if int(dst.NumBytes()) < len(sockErr.Payload) {\n+ msgFlags |= linux.MSG_TRUNC\n+ }\n+ n, err := dst.CopyOut(t, sockErr.Payload)\n+\n+ // The original destination address of the datagram that caused the error is\n+ // supplied via msg_name. -- recvmsg(2)\n+ dstAddr, dstAddrLen := socket.ConvertAddress(addrFamilyFromNetProto(sockErr.NetProto), sockErr.Dst)\n+ cmgs := socket.ControlMessages{IP: socket.NewIPControlMessages(s.family, tcpip.ControlMessages{SockErr: sockErr})}\n+ return n, msgFlags, dstAddr, dstAddrLen, cmgs, syserr.FromError(err)\n+}\n+\n// RecvMsg implements the linux syscall recvmsg(2) for sockets backed by\n// tcpip.Endpoint.\nfunc (s *socketOpsCommon) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (n int, msgFlags int, senderAddr linux.SockAddr, senderAddrLen uint32, controlMessages socket.ControlMessages, err *syserr.Error) {\n+ if flags&linux.MSG_ERRQUEUE != 0 {\n+ return s.recvErr(t, dst)\n+ }\n+\ntrunc := flags&linux.MSG_TRUNC != 0\npeek := flags&linux.MSG_PEEK != 0\ndontWait := flags&linux.MSG_DONTWAIT != 0\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/socket.go", "new_path": "pkg/sentry/socket/socket.go", "diff": "@@ -56,6 +56,57 @@ func packetInfoToLinux(packetInfo tcpip.IPPacketInfo) linux.ControlMessageIPPack\nreturn p\n}\n+// errOriginToLinux maps tcpip socket origin to Linux socket origin constants.\n+func errOriginToLinux(origin tcpip.SockErrOrigin) uint8 {\n+ switch origin {\n+ case tcpip.SockExtErrorOriginNone:\n+ return linux.SO_EE_ORIGIN_NONE\n+ case tcpip.SockExtErrorOriginLocal:\n+ return linux.SO_EE_ORIGIN_LOCAL\n+ case tcpip.SockExtErrorOriginICMP:\n+ return linux.SO_EE_ORIGIN_ICMP\n+ case tcpip.SockExtErrorOriginICMP6:\n+ return linux.SO_EE_ORIGIN_ICMP6\n+ default:\n+ panic(fmt.Sprintf(\"unknown socket origin: %d\", origin))\n+ }\n+}\n+\n+// sockErrCmsgToLinux converts SockError control message from tcpip format to\n+// Linux format.\n+func sockErrCmsgToLinux(sockErr *tcpip.SockError) linux.SockErrCMsg {\n+ if sockErr == nil {\n+ return nil\n+ }\n+\n+ ee := linux.SockExtendedErr{\n+ Errno: uint32(syserr.TranslateNetstackError(sockErr.Err).ToLinux().Number()),\n+ Origin: errOriginToLinux(sockErr.ErrOrigin),\n+ Type: sockErr.ErrType,\n+ Code: sockErr.ErrCode,\n+ Info: sockErr.ErrInfo,\n+ }\n+\n+ switch sockErr.NetProto {\n+ case header.IPv4ProtocolNumber:\n+ errMsg := &linux.SockErrCMsgIPv4{SockExtendedErr: ee}\n+ if len(sockErr.Offender.Addr) > 0 {\n+ addr, _ := ConvertAddress(linux.AF_INET, sockErr.Offender)\n+ errMsg.Offender = *addr.(*linux.SockAddrInet)\n+ }\n+ return errMsg\n+ case header.IPv6ProtocolNumber:\n+ errMsg := &linux.SockErrCMsgIPv6{SockExtendedErr: ee}\n+ if len(sockErr.Offender.Addr) > 0 {\n+ addr, _ := ConvertAddress(linux.AF_INET6, sockErr.Offender)\n+ errMsg.Offender = *addr.(*linux.SockAddrInet6)\n+ }\n+ return errMsg\n+ default:\n+ panic(fmt.Sprintf(\"invalid net proto for creating SockErrCMsg: %d\", sockErr.NetProto))\n+ }\n+}\n+\n// NewIPControlMessages converts the tcpip ControlMessgaes (which does not\n// have Linux specific format) to Linux format.\nfunc NewIPControlMessages(family int, cmgs tcpip.ControlMessages) IPControlMessages {\n@@ -75,6 +126,7 @@ func NewIPControlMessages(family int, cmgs tcpip.ControlMessages) IPControlMessa\nHasIPPacketInfo: cmgs.HasIPPacketInfo,\nPacketInfo: packetInfoToLinux(cmgs.PacketInfo),\nOriginalDstAddress: orgDstAddr,\n+ SockErr: sockErrCmsgToLinux(cmgs.SockErr),\n}\n}\n@@ -117,6 +169,9 @@ type IPControlMessages struct {\n// OriginalDestinationAddress holds the original destination address\n// and port of the incoming packet.\nOriginalDstAddress linux.SockAddr\n+\n+ // SockErr is the dequeued socket error on recvmsg(MSG_ERRQUEUE).\n+ SockErr linux.SockErrCMsg\n}\n// Release releases Unix domain socket credentials and rights.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_socket.go", "new_path": "pkg/sentry/syscalls/linux/sys_socket.go", "diff": "@@ -749,11 +749,6 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags i\nreturn 0, err\n}\n- // FIXME(b/63594852): Pretend we have an empty error queue.\n- if flags&linux.MSG_ERRQUEUE != 0 {\n- return 0, syserror.EAGAIN\n- }\n-\n// Fast path when no control message nor name buffers are provided.\nif msg.ControlLen == 0 && msg.NameLen == 0 {\nn, mflags, _, _, cms, err := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, false, 0)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/vfs2/socket.go", "new_path": "pkg/sentry/syscalls/linux/vfs2/socket.go", "diff": "@@ -752,11 +752,6 @@ func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr usermem.Addr, fla\nreturn 0, err\n}\n- // FIXME(b/63594852): Pretend we have an empty error queue.\n- if flags&linux.MSG_ERRQUEUE != 0 {\n- return 0, syserror.EAGAIN\n- }\n-\n// Fast path when no control message nor name buffers are provided.\nif msg.ControlLen == 0 && msg.NameLen == 0 {\nn, mflags, _, _, cms, err := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, false, 0)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/BUILD", "new_path": "pkg/tcpip/BUILD", "diff": "load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\n+load(\"//tools/go_generics:defs.bzl\", \"go_template_instance\")\npackage(licenses = [\"notice\"])\n+go_template_instance(\n+ name = \"sock_err_list\",\n+ out = \"sock_err_list.go\",\n+ package = \"tcpip\",\n+ prefix = \"sockError\",\n+ template = \"//pkg/ilist:generic_list\",\n+ types = {\n+ \"Element\": \"*SockError\",\n+ \"Linker\": \"*SockError\",\n+ },\n+)\n+\ngo_library(\nname = \"tcpip\",\nsrcs = [\n+ \"sock_err_list.go\",\n\"socketops.go\",\n\"tcpip.go\",\n\"time_unsafe.go\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/socketops.go", "new_path": "pkg/tcpip/socketops.go", "diff": "@@ -104,7 +104,7 @@ type SocketOptions struct {\nkeepAliveEnabled uint32\n// multicastLoopEnabled determines whether multicast packets sent over a\n- // non-loopback interface will be looped back. Analogous to inet->mc_loop.\n+ // non-loopback interface will be looped back.\nmulticastLoopEnabled uint32\n// receiveTOSEnabled is used to specify if the TOS ancillary message is\n@@ -145,6 +145,10 @@ type SocketOptions struct {\n// the incoming packet should be returned as an ancillary message.\nreceiveOriginalDstAddress uint32\n+ // errQueue is the per-socket error queue. It is protected by errQueueMu.\n+ errQueueMu sync.Mutex `state:\"nosave\"`\n+ errQueue sockErrorList\n+\n// mu protects the access to the below fields.\nmu sync.Mutex `state:\"nosave\"`\n@@ -362,3 +366,60 @@ func (so *SocketOptions) SetLinger(linger LingerOption) {\nso.linger = linger\nso.mu.Unlock()\n}\n+\n+// SockErrOrigin represents the constants for error origin.\n+type SockErrOrigin uint8\n+\n+const (\n+ // SockExtErrorOriginNone represents an unknown error origin.\n+ SockExtErrorOriginNone SockErrOrigin = iota\n+\n+ // SockExtErrorOriginLocal indicates a local error.\n+ SockExtErrorOriginLocal\n+\n+ // SockExtErrorOriginICMP indicates an IPv4 ICMP error.\n+ SockExtErrorOriginICMP\n+\n+ // SockExtErrorOriginICMP6 indicates an IPv6 ICMP error.\n+ SockExtErrorOriginICMP6\n+)\n+\n+// SockError represents a queue entry in the per-socket error queue.\n+//\n+// +stateify savable\n+type SockError struct {\n+ sockErrorEntry\n+\n+ // Err is the error caused by the errant packet.\n+ Err *Error\n+ // ErrOrigin indicates the error origin.\n+ ErrOrigin SockErrOrigin\n+ // ErrType is the type in the ICMP header.\n+ ErrType uint8\n+ // ErrCode is the code in the ICMP header.\n+ ErrCode uint8\n+ // ErrInfo is additional info about the error.\n+ ErrInfo uint32\n+\n+ // Payload is the errant packet's payload.\n+ Payload []byte\n+ // Dst is the original destination address of the errant packet.\n+ Dst FullAddress\n+ // Offender is the original sender address of the errant packet.\n+ Offender FullAddress\n+ // NetProto is the network protocol being used to transmit the packet.\n+ NetProto NetworkProtocolNumber\n+}\n+\n+// DequeueErr dequeues a socket extended error from the error queue and returns\n+// it. Returns nil if queue is empty.\n+func (so *SocketOptions) DequeueErr() *SockError {\n+ so.errQueueMu.Lock()\n+ defer so.errQueueMu.Unlock()\n+\n+ err := so.errQueue.Front()\n+ if err != nil {\n+ so.errQueue.Remove(err)\n+ }\n+ return err\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tcpip.go", "new_path": "pkg/tcpip/tcpip.go", "diff": "@@ -500,6 +500,9 @@ type ControlMessages struct {\n// OriginalDestinationAddress holds the original destination address\n// and port of the incoming packet.\nOriginalDstAddress FullAddress\n+\n+ // SockErr is the dequeued socket error on recvmsg(MSG_ERRQUEUE).\n+ SockErr *SockError\n}\n// PacketOwner is used to get UID and GID of the packet.\n" } ]
Go
Apache License 2.0
google/gvisor
[netstack] Implement MSG_ERRQUEUE flag for recvmsg(2). Introduces the per-socket error queue and the necessary cmsg mechanisms. PiperOrigin-RevId: 348028508
259,992
17.12.2020 10:43:55
28,800
e7493a9e23325c00ad9a0db341d5887afe3ae5eb
Set max memory not min Closes
[ { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -294,7 +294,7 @@ func New(args Args) (*Loader, error) {\nif args.TotalMem > 0 {\n// Adjust the total memory returned by the Sentry so that applications that\n// use /proc/meminfo can make allocations based on this limit.\n- usage.MinimumTotalMemoryBytes = args.TotalMem\n+ usage.MaximumTotalMemoryBytes = args.TotalMem\nlog.Infof(\"Setting total memory to %.2f GB\", float64(args.TotalMem)/(1<<30))\n}\n" }, { "change_type": "MODIFY", "old_path": "test/e2e/integration_test.go", "new_path": "test/e2e/integration_test.go", "diff": "@@ -260,12 +260,10 @@ func TestMemLimit(t *testing.T) {\nd := dockerutil.MakeContainer(ctx, t)\ndefer d.CleanUp(ctx)\n- // N.B. Because the size of the memory file may grow in large chunks,\n- // there is a minimum threshold of 1GB for the MemTotal figure.\n- allocMemory := 1024 * 1024 // In kb.\n+ allocMemoryKb := 50 * 1024\nout, err := d.Run(ctx, dockerutil.RunOpts{\nImage: \"basic/alpine\",\n- Memory: allocMemory * 1024, // In bytes.\n+ Memory: allocMemoryKb * 1024, // In bytes.\n}, \"sh\", \"-c\", \"cat /proc/meminfo | grep MemTotal: | awk '{print $2}'\")\nif err != nil {\nt.Fatalf(\"docker run failed: %v\", err)\n@@ -285,7 +283,7 @@ func TestMemLimit(t *testing.T) {\nif err != nil {\nt.Fatalf(\"failed to parse %q: %v\", out, err)\n}\n- if want := uint64(allocMemory); got != want {\n+ if want := uint64(allocMemoryKb); got != want {\nt.Errorf(\"MemTotal got: %d, want: %d\", got, want)\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Set max memory not min Closes #5048 PiperOrigin-RevId: 348050472
259,992
17.12.2020 10:53:50
28,800
30860902f6953348577e6a1d742521c6fbc4c75d
Set process group and session on host TTY Closes
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/host/host.go", "new_path": "pkg/sentry/fsimpl/host/host.go", "diff": "@@ -31,6 +31,7 @@ import (\nfslock \"gvisor.dev/gvisor/pkg/sentry/fs/lock\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs\"\n\"gvisor.dev/gvisor/pkg/sentry/hostfd\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.dev/gvisor/pkg/sentry/memmap\"\nunixsocket \"gvisor.dev/gvisor/pkg/sentry/socket/unix\"\n@@ -499,6 +500,10 @@ func (i *inode) open(ctx context.Context, d *kernfs.Dentry, mnt *vfs.Mount, flag\nfileDescription: fileDescription{inode: i},\ntermios: linux.DefaultReplicaTermios,\n}\n+ if task := kernel.TaskFromContext(ctx); task != nil {\n+ fd.fgProcessGroup = task.ThreadGroup().ProcessGroup()\n+ fd.session = fd.fgProcessGroup.Session()\n+ }\nfd.LockFD.Init(&i.locks)\nvfsfd := &fd.vfsfd\nif err := vfsfd.Init(fd, flags, mnt, d.VFSDentry(), &vfs.FileDescriptionOptions{}); err != nil {\n" } ]
Go
Apache License 2.0
google/gvisor
Set process group and session on host TTY Closes #5128 PiperOrigin-RevId: 348052446
259,907
17.12.2020 11:07:56
28,800
028271b5308708463d2aa593122840e70c93f02c
[netstack] Implement IP(V6)_RECVERR socket option.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/hostinet/socket.go", "new_path": "pkg/sentry/socket/hostinet/socket.go", "diff": "@@ -331,12 +331,12 @@ func (s *socketOpsCommon) GetSockOpt(t *kernel.Task, level int, name int, outPtr\nswitch level {\ncase linux.SOL_IP:\nswitch name {\n- case linux.IP_TOS, linux.IP_RECVTOS, linux.IP_PKTINFO, linux.IP_RECVORIGDSTADDR:\n+ case linux.IP_TOS, linux.IP_RECVTOS, linux.IP_PKTINFO, linux.IP_RECVORIGDSTADDR, linux.IP_RECVERR:\noptlen = sizeofInt32\n}\ncase linux.SOL_IPV6:\nswitch name {\n- case linux.IPV6_TCLASS, linux.IPV6_RECVTCLASS, linux.IPV6_V6ONLY, linux.IPV6_RECVORIGDSTADDR:\n+ case linux.IPV6_TCLASS, linux.IPV6_RECVTCLASS, linux.IPV6_RECVERR, linux.IPV6_V6ONLY, linux.IPV6_RECVORIGDSTADDR:\noptlen = sizeofInt32\n}\ncase linux.SOL_SOCKET:\n@@ -377,14 +377,14 @@ func (s *socketOpsCommon) SetSockOpt(t *kernel.Task, level int, name int, opt []\nswitch level {\ncase linux.SOL_IP:\nswitch name {\n- case linux.IP_TOS, linux.IP_RECVTOS, linux.IP_RECVORIGDSTADDR:\n+ case linux.IP_TOS, linux.IP_RECVTOS, linux.IP_RECVORIGDSTADDR, linux.IP_RECVERR:\noptlen = sizeofInt32\ncase linux.IP_PKTINFO:\noptlen = linux.SizeOfControlMessageIPPacketInfo\n}\ncase linux.SOL_IPV6:\nswitch name {\n- case linux.IPV6_TCLASS, linux.IPV6_RECVTCLASS, linux.IPV6_V6ONLY, linux.IPV6_RECVORIGDSTADDR:\n+ case linux.IPV6_TCLASS, linux.IPV6_RECVTCLASS, linux.IPV6_RECVERR, linux.IPV6_V6ONLY, linux.IPV6_RECVORIGDSTADDR:\noptlen = sizeofInt32\n}\ncase linux.SOL_SOCKET:\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netstack/netstack.go", "new_path": "pkg/sentry/socket/netstack/netstack.go", "diff": "@@ -1405,6 +1405,13 @@ func getSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name\nv := primitive.Int32(boolToInt32(ep.SocketOptions().GetReceiveTClass()))\nreturn &v, nil\n+ case linux.IPV6_RECVERR:\n+ if outLen < sizeOfInt32 {\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ v := primitive.Int32(boolToInt32(ep.SocketOptions().GetRecvError()))\n+ return &v, nil\ncase linux.IPV6_RECVORIGDSTADDR:\nif outLen < sizeOfInt32 {\n@@ -1579,6 +1586,14 @@ func getSockOptIP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name in\nv := primitive.Int32(boolToInt32(ep.SocketOptions().GetReceiveTOS()))\nreturn &v, nil\n+ case linux.IP_RECVERR:\n+ if outLen < sizeOfInt32 {\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ v := primitive.Int32(boolToInt32(ep.SocketOptions().GetRecvError()))\n+ return &v, nil\n+\ncase linux.IP_PKTINFO:\nif outLen < sizeOfInt32 {\nreturn nil, syserr.ErrInvalidArgument\n@@ -2129,6 +2144,16 @@ func setSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name\nep.SocketOptions().SetReceiveTClass(v != 0)\nreturn nil\n+ case linux.IPV6_RECVERR:\n+ if len(optVal) == 0 {\n+ return nil\n+ }\n+ v, err := parseIntOrChar(optVal)\n+ if err != nil {\n+ return err\n+ }\n+ ep.SocketOptions().SetRecvError(v != 0)\n+ return nil\ncase linux.IP6T_SO_SET_REPLACE:\nif len(optVal) < linux.SizeOfIP6TReplace {\n@@ -2317,6 +2342,17 @@ func setSockOptIP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name in\nep.SocketOptions().SetReceiveTOS(v != 0)\nreturn nil\n+ case linux.IP_RECVERR:\n+ if len(optVal) == 0 {\n+ return nil\n+ }\n+ v, err := parseIntOrChar(optVal)\n+ if err != nil {\n+ return err\n+ }\n+ ep.SocketOptions().SetRecvError(v != 0)\n+ return nil\n+\ncase linux.IP_PKTINFO:\nif len(optVal) == 0 {\nreturn nil\n@@ -2386,7 +2422,6 @@ func setSockOptIP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name in\nlinux.IP_NODEFRAG,\nlinux.IP_OPTIONS,\nlinux.IP_PASSSEC,\n- linux.IP_RECVERR,\nlinux.IP_RECVFRAGSIZE,\nlinux.IP_RECVOPTS,\nlinux.IP_RECVTTL,\n@@ -2462,7 +2497,6 @@ func emitUnimplementedEventIPv6(t *kernel.Task, name int) {\nlinux.IPV6_MULTICAST_IF,\nlinux.IPV6_MULTICAST_LOOP,\nlinux.IPV6_RECVDSTOPTS,\n- linux.IPV6_RECVERR,\nlinux.IPV6_RECVFRAGSIZE,\nlinux.IPV6_RECVHOPLIMIT,\nlinux.IPV6_RECVHOPOPTS,\n@@ -2496,7 +2530,6 @@ func emitUnimplementedEventIP(t *kernel.Task, name int) {\nlinux.IP_PKTINFO,\nlinux.IP_PKTOPTIONS,\nlinux.IP_MTU_DISCOVER,\n- linux.IP_RECVERR,\nlinux.IP_RECVTTL,\nlinux.IP_RECVTOS,\nlinux.IP_MTU,\n@@ -2798,6 +2831,23 @@ func (s *socketOpsCommon) updateTimestamp() {\n}\n}\n+// dequeueErr is analogous to net/core/skbuff.c:sock_dequeue_err_skb().\n+func (s *socketOpsCommon) dequeueErr() *tcpip.SockError {\n+ so := s.Endpoint.SocketOptions()\n+ err := so.DequeueErr()\n+ if err == nil {\n+ return nil\n+ }\n+\n+ // Update socket error to reflect ICMP errors in queue.\n+ if nextErr := so.PeekErr(); nextErr != nil && nextErr.ErrOrigin.IsICMPErr() {\n+ so.SetLastError(nextErr.Err)\n+ } else if err.ErrOrigin.IsICMPErr() {\n+ so.SetLastError(nil)\n+ }\n+ return err\n+}\n+\n// addrFamilyFromNetProto returns the address family identifier for the given\n// network protocol.\nfunc addrFamilyFromNetProto(net tcpip.NetworkProtocolNumber) int {\n@@ -2814,7 +2864,7 @@ func addrFamilyFromNetProto(net tcpip.NetworkProtocolNumber) int {\n// recvErr handles MSG_ERRQUEUE for recvmsg(2).\n// This is analogous to net/ipv4/ip_sockglue.c:ip_recv_error().\nfunc (s *socketOpsCommon) recvErr(t *kernel.Task, dst usermem.IOSequence) (int, int, linux.SockAddr, uint32, socket.ControlMessages, *syserr.Error) {\n- sockErr := s.Endpoint.SocketOptions().DequeueErr()\n+ sockErr := s.dequeueErr()\nif sockErr == nil {\nreturn 0, 0, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/icmpv4.go", "new_path": "pkg/tcpip/header/icmpv4.go", "diff": "@@ -16,6 +16,7 @@ package header\nimport (\n\"encoding/binary\"\n+ \"fmt\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n@@ -213,3 +214,16 @@ func ICMPv4Checksum(h ICMPv4, vv buffer.VectorisedView) uint16 {\nreturn xsum\n}\n+\n+// ICMPOriginFromNetProto returns the appropriate SockErrOrigin to use when\n+// a packet having a `net` header causing an ICMP error.\n+func ICMPOriginFromNetProto(net tcpip.NetworkProtocolNumber) tcpip.SockErrOrigin {\n+ switch net {\n+ case IPv4ProtocolNumber:\n+ return tcpip.SockExtErrorOriginICMP\n+ case IPv6ProtocolNumber:\n+ return tcpip.SockExtErrorOriginICMP6\n+ default:\n+ panic(fmt.Sprintf(\"unsupported net proto to extract ICMP error origin: %d\", net))\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/socketops.go", "new_path": "pkg/tcpip/socketops.go", "diff": "@@ -42,6 +42,9 @@ type SocketOptionsHandler interface {\n// LastError is invoked when SO_ERROR is read for an endpoint.\nLastError() *Error\n+\n+ // UpdateLastError updates the endpoint specific last error field.\n+ UpdateLastError(err *Error)\n}\n// DefaultSocketOptionsHandler is an embeddable type that implements no-op\n@@ -70,6 +73,9 @@ func (*DefaultSocketOptionsHandler) LastError() *Error {\nreturn nil\n}\n+// UpdateLastError implements SocketOptionsHandler.UpdateLastError.\n+func (*DefaultSocketOptionsHandler) UpdateLastError(*Error) {}\n+\n// SocketOptions contains all the variables which store values for SOL_SOCKET,\n// SOL_IP, SOL_IPV6 and SOL_TCP level options.\n//\n@@ -145,6 +151,10 @@ type SocketOptions struct {\n// the incoming packet should be returned as an ancillary message.\nreceiveOriginalDstAddress uint32\n+ // recvErrEnabled determines whether extended reliable error message passing\n+ // is enabled.\n+ recvErrEnabled uint32\n+\n// errQueue is the per-socket error queue. It is protected by errQueueMu.\nerrQueueMu sync.Mutex `state:\"nosave\"`\nerrQueue sockErrorList\n@@ -171,6 +181,11 @@ func storeAtomicBool(addr *uint32, v bool) {\natomic.StoreUint32(addr, val)\n}\n+// SetLastError sets the last error for a socket.\n+func (so *SocketOptions) SetLastError(err *Error) {\n+ so.handler.UpdateLastError(err)\n+}\n+\n// GetBroadcast gets value for SO_BROADCAST option.\nfunc (so *SocketOptions) GetBroadcast() bool {\nreturn atomic.LoadUint32(&so.broadcastEnabled) != 0\n@@ -338,6 +353,19 @@ func (so *SocketOptions) SetReceiveOriginalDstAddress(v bool) {\nstoreAtomicBool(&so.receiveOriginalDstAddress, v)\n}\n+// GetRecvError gets value for IP*_RECVERR option.\n+func (so *SocketOptions) GetRecvError() bool {\n+ return atomic.LoadUint32(&so.recvErrEnabled) != 0\n+}\n+\n+// SetRecvError sets value for IP*_RECVERR option.\n+func (so *SocketOptions) SetRecvError(v bool) {\n+ storeAtomicBool(&so.recvErrEnabled, v)\n+ if !v {\n+ so.pruneErrQueue()\n+ }\n+}\n+\n// GetLastError gets value for SO_ERROR option.\nfunc (so *SocketOptions) GetLastError() *Error {\nreturn so.handler.LastError()\n@@ -384,6 +412,11 @@ const (\nSockExtErrorOriginICMP6\n)\n+// IsICMPErr indicates if the error originated from an ICMP error.\n+func (origin SockErrOrigin) IsICMPErr() bool {\n+ return origin == SockExtErrorOriginICMP || origin == SockExtErrorOriginICMP6\n+}\n+\n// SockError represents a queue entry in the per-socket error queue.\n//\n// +stateify savable\n@@ -411,6 +444,13 @@ type SockError struct {\nNetProto NetworkProtocolNumber\n}\n+// pruneErrQueue resets the queue.\n+func (so *SocketOptions) pruneErrQueue() {\n+ so.errQueueMu.Lock()\n+ so.errQueue.Reset()\n+ so.errQueueMu.Unlock()\n+}\n+\n// DequeueErr dequeues a socket extended error from the error queue and returns\n// it. Returns nil if queue is empty.\nfunc (so *SocketOptions) DequeueErr() *SockError {\n@@ -423,3 +463,32 @@ func (so *SocketOptions) DequeueErr() *SockError {\n}\nreturn err\n}\n+\n+// PeekErr returns the error in the front of the error queue. Returns nil if\n+// the error queue is empty.\n+func (so *SocketOptions) PeekErr() *SockError {\n+ so.errQueueMu.Lock()\n+ defer so.errQueueMu.Unlock()\n+ return so.errQueue.Front()\n+}\n+\n+// QueueErr inserts the error at the back of the error queue.\n+//\n+// Preconditions: so.GetRecvError() == true.\n+func (so *SocketOptions) QueueErr(err *SockError) {\n+ so.errQueueMu.Lock()\n+ defer so.errQueueMu.Unlock()\n+ so.errQueue.PushBack(err)\n+}\n+\n+// QueueLocalErr queues a local error onto the local queue.\n+func (so *SocketOptions) QueueLocalErr(err *Error, net NetworkProtocolNumber, info uint32, dst FullAddress, payload []byte) {\n+ so.QueueErr(&SockError{\n+ Err: err,\n+ ErrOrigin: SockExtErrorOriginLocal,\n+ ErrInfo: info,\n+ Payload: payload,\n+ Dst: dst,\n+ NetProto: net,\n+ })\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/packet/endpoint.go", "new_path": "pkg/tcpip/transport/packet/endpoint.go", "diff": "@@ -366,6 +366,13 @@ func (ep *endpoint) LastError() *tcpip.Error {\nreturn err\n}\n+// UpdateLastError implements tcpip.SocketOptionsHandler.UpdateLastError.\n+func (ep *endpoint) UpdateLastError(err *tcpip.Error) {\n+ ep.lastErrorMu.Lock()\n+ ep.lastError = err\n+ ep.lastErrorMu.Unlock()\n+}\n+\n// GetSockOpt implements tcpip.Endpoint.GetSockOpt.\nfunc (ep *endpoint) GetSockOpt(opt tcpip.GettableSocketOption) *tcpip.Error {\nreturn tcpip.ErrNotSupported\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -1303,6 +1303,15 @@ func (e *endpoint) LastError() *tcpip.Error {\nreturn e.lastErrorLocked()\n}\n+// UpdateLastError implements tcpip.SocketOptionsHandler.UpdateLastError.\n+func (e *endpoint) UpdateLastError(err *tcpip.Error) {\n+ e.LockUser()\n+ e.lastErrorMu.Lock()\n+ e.lastError = err\n+ e.lastErrorMu.Unlock()\n+ e.UnlockUser()\n+}\n+\n// Read reads data from the endpoint.\nfunc (e *endpoint) Read(*tcpip.FullAddress) (buffer.View, tcpip.ControlMessages, *tcpip.Error) {\ne.LockUser()\n@@ -2708,6 +2717,41 @@ func (e *endpoint) enqueueSegment(s *segment) bool {\nreturn true\n}\n+func (e *endpoint) onICMPError(err *tcpip.Error, id stack.TransportEndpointID, errType byte, errCode byte, extra uint32, pkt *stack.PacketBuffer) {\n+ // Update last error first.\n+ e.lastErrorMu.Lock()\n+ e.lastError = err\n+ e.lastErrorMu.Unlock()\n+\n+ // Update the error queue if IP_RECVERR is enabled.\n+ if e.SocketOptions().GetRecvError() {\n+ e.SocketOptions().QueueErr(&tcpip.SockError{\n+ Err: err,\n+ ErrOrigin: header.ICMPOriginFromNetProto(pkt.NetworkProtocolNumber),\n+ ErrType: errType,\n+ ErrCode: errCode,\n+ ErrInfo: extra,\n+ // Linux passes the payload with the TCP header. We don't know if the TCP\n+ // header even exists, it may not for fragmented packets.\n+ Payload: pkt.Data.ToView(),\n+ Dst: tcpip.FullAddress{\n+ NIC: pkt.NICID,\n+ Addr: id.RemoteAddress,\n+ Port: id.RemotePort,\n+ },\n+ Offender: tcpip.FullAddress{\n+ NIC: pkt.NICID,\n+ Addr: id.LocalAddress,\n+ Port: id.LocalPort,\n+ },\n+ NetProto: pkt.NetworkProtocolNumber,\n+ })\n+ }\n+\n+ // Notify of the error.\n+ e.notifyProtocolGoroutine(notifyError)\n+}\n+\n// HandleControlPacket implements stack.TransportEndpoint.HandleControlPacket.\nfunc (e *endpoint) HandleControlPacket(id stack.TransportEndpointID, typ stack.ControlType, extra uint32, pkt *stack.PacketBuffer) {\nswitch typ {\n@@ -2722,16 +2766,10 @@ func (e *endpoint) HandleControlPacket(id stack.TransportEndpointID, typ stack.C\ne.notifyProtocolGoroutine(notifyMTUChanged)\ncase stack.ControlNoRoute:\n- e.lastErrorMu.Lock()\n- e.lastError = tcpip.ErrNoRoute\n- e.lastErrorMu.Unlock()\n- e.notifyProtocolGoroutine(notifyError)\n+ e.onICMPError(tcpip.ErrNoRoute, id, byte(header.ICMPv4DstUnreachable), byte(header.ICMPv4HostUnreachable), extra, pkt)\ncase stack.ControlNetworkUnreachable:\n- e.lastErrorMu.Lock()\n- e.lastError = tcpip.ErrNetworkUnreachable\n- e.lastErrorMu.Unlock()\n- e.notifyProtocolGoroutine(notifyError)\n+ e.onICMPError(tcpip.ErrNetworkUnreachable, id, byte(header.ICMPv6DstUnreachable), byte(header.ICMPv6NetworkUnreachable), extra, pkt)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/udp/endpoint.go", "new_path": "pkg/tcpip/transport/udp/endpoint.go", "diff": "@@ -226,6 +226,13 @@ func (e *endpoint) LastError() *tcpip.Error {\nreturn err\n}\n+// UpdateLastError implements tcpip.SocketOptionsHandler.UpdateLastError.\n+func (e *endpoint) UpdateLastError(err *tcpip.Error) {\n+ e.lastErrorMu.Lock()\n+ e.lastError = err\n+ e.lastErrorMu.Unlock()\n+}\n+\n// Abort implements stack.TransportEndpoint.Abort.\nfunc (e *endpoint) Abort() {\ne.Close()\n@@ -511,6 +518,20 @@ func (e *endpoint) write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, <-c\n}\nif len(v) > header.UDPMaximumPacketSize {\n// Payload can't possibly fit in a packet.\n+ so := e.SocketOptions()\n+ if so.GetRecvError() {\n+ so.QueueLocalErr(\n+ tcpip.ErrMessageTooLong,\n+ route.NetProto,\n+ header.UDPMaximumPacketSize,\n+ tcpip.FullAddress{\n+ NIC: route.NICID(),\n+ Addr: route.RemoteAddress,\n+ Port: dstPort,\n+ },\n+ v,\n+ )\n+ }\nreturn 0, nil, tcpip.ErrMessageTooLong\n}\n@@ -1338,15 +1359,63 @@ func (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt *stack.PacketB\n}\n}\n-// HandleControlPacket implements stack.TransportEndpoint.HandleControlPacket.\n-func (e *endpoint) HandleControlPacket(id stack.TransportEndpointID, typ stack.ControlType, extra uint32, pkt *stack.PacketBuffer) {\n- if typ == stack.ControlPortUnreachable {\n- if e.EndpointState() == StateConnected {\n+func (e *endpoint) onICMPError(err *tcpip.Error, id stack.TransportEndpointID, errType byte, errCode byte, extra uint32, pkt *stack.PacketBuffer) {\n+ // Update last error first.\ne.lastErrorMu.Lock()\n- e.lastError = tcpip.ErrConnectionRefused\n+ e.lastError = err\ne.lastErrorMu.Unlock()\n+ // Update the error queue if IP_RECVERR is enabled.\n+ if e.SocketOptions().GetRecvError() {\n+ // Linux passes the payload without the UDP header.\n+ var payload []byte\n+ udp := header.UDP(pkt.Data.ToView())\n+ if len(udp) >= header.UDPMinimumSize {\n+ payload = udp.Payload()\n+ }\n+\n+ e.SocketOptions().QueueErr(&tcpip.SockError{\n+ Err: err,\n+ ErrOrigin: header.ICMPOriginFromNetProto(pkt.NetworkProtocolNumber),\n+ ErrType: errType,\n+ ErrCode: errCode,\n+ ErrInfo: extra,\n+ Payload: payload,\n+ Dst: tcpip.FullAddress{\n+ NIC: pkt.NICID,\n+ Addr: id.RemoteAddress,\n+ Port: id.RemotePort,\n+ },\n+ Offender: tcpip.FullAddress{\n+ NIC: pkt.NICID,\n+ Addr: id.LocalAddress,\n+ Port: id.LocalPort,\n+ },\n+ NetProto: pkt.NetworkProtocolNumber,\n+ })\n+ }\n+\n+ // Notify of the error.\ne.waiterQueue.Notify(waiter.EventErr)\n+}\n+\n+// HandleControlPacket implements stack.TransportEndpoint.HandleControlPacket.\n+func (e *endpoint) HandleControlPacket(id stack.TransportEndpointID, typ stack.ControlType, extra uint32, pkt *stack.PacketBuffer) {\n+ if typ == stack.ControlPortUnreachable {\n+ if e.EndpointState() == StateConnected {\n+ var errType byte\n+ var errCode byte\n+ switch pkt.NetworkProtocolNumber {\n+ case header.IPv4ProtocolNumber:\n+ errType = byte(header.ICMPv4DstUnreachable)\n+ errCode = byte(header.ICMPv4PortUnreachable)\n+ case header.IPv6ProtocolNumber:\n+ errType = byte(header.ICMPv6DstUnreachable)\n+ errCode = byte(header.ICMPv6PortUnreachable)\n+ default:\n+ panic(fmt.Sprintf(\"unsupported net proto for infering ICMP type and code: %d\", pkt.NetworkProtocolNumber))\n+ }\n+ e.onICMPError(tcpip.ErrConnectionRefused, id, errType, errCode, extra, pkt)\nreturn\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/filter/config.go", "new_path": "runsc/boot/filter/config.go", "diff": "@@ -351,6 +351,11 @@ func hostInetFilters() seccomp.SyscallRules {\nseccomp.EqualTo(syscall.SOL_IP),\nseccomp.EqualTo(syscall.IP_RECVORIGDSTADDR),\n},\n+ {\n+ seccomp.MatchAny{},\n+ seccomp.EqualTo(syscall.SOL_IP),\n+ seccomp.EqualTo(syscall.IP_RECVERR),\n+ },\n{\nseccomp.MatchAny{},\nseccomp.EqualTo(syscall.SOL_IPV6),\n@@ -361,6 +366,11 @@ func hostInetFilters() seccomp.SyscallRules {\nseccomp.EqualTo(syscall.SOL_IPV6),\nseccomp.EqualTo(syscall.IPV6_RECVTCLASS),\n},\n+ {\n+ seccomp.MatchAny{},\n+ seccomp.EqualTo(syscall.SOL_IPV6),\n+ seccomp.EqualTo(syscall.IPV6_RECVERR),\n+ },\n{\nseccomp.MatchAny{},\nseccomp.EqualTo(syscall.SOL_IPV6),\n@@ -444,13 +454,6 @@ func hostInetFilters() seccomp.SyscallRules {\nsyscall.SYS_SENDMSG: {},\nsyscall.SYS_SENDTO: {},\nsyscall.SYS_SETSOCKOPT: []seccomp.Rule{\n- {\n- seccomp.MatchAny{},\n- seccomp.EqualTo(syscall.SOL_IPV6),\n- seccomp.EqualTo(syscall.IPV6_V6ONLY),\n- seccomp.MatchAny{},\n- seccomp.EqualTo(4),\n- },\n{\nseccomp.MatchAny{},\nseccomp.EqualTo(syscall.SOL_SOCKET),\n@@ -521,6 +524,13 @@ func hostInetFilters() seccomp.SyscallRules {\nseccomp.MatchAny{},\nseccomp.EqualTo(4),\n},\n+ {\n+ seccomp.MatchAny{},\n+ seccomp.EqualTo(syscall.SOL_IP),\n+ seccomp.EqualTo(syscall.IP_RECVERR),\n+ seccomp.MatchAny{},\n+ seccomp.EqualTo(4),\n+ },\n{\nseccomp.MatchAny{},\nseccomp.EqualTo(syscall.SOL_IPV6),\n@@ -542,6 +552,20 @@ func hostInetFilters() seccomp.SyscallRules {\nseccomp.MatchAny{},\nseccomp.EqualTo(4),\n},\n+ {\n+ seccomp.MatchAny{},\n+ seccomp.EqualTo(syscall.SOL_IPV6),\n+ seccomp.EqualTo(syscall.IPV6_RECVERR),\n+ seccomp.MatchAny{},\n+ seccomp.EqualTo(4),\n+ },\n+ {\n+ seccomp.MatchAny{},\n+ seccomp.EqualTo(syscall.SOL_IPV6),\n+ seccomp.EqualTo(syscall.IPV6_V6ONLY),\n+ seccomp.MatchAny{},\n+ seccomp.EqualTo(4),\n+ },\n},\nsyscall.SYS_SHUTDOWN: []seccomp.Rule{\n{\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/udp_socket.cc", "new_path": "test/syscalls/linux/udp_socket.cc", "diff": "#include <arpa/inet.h>\n#include <fcntl.h>\n+#include <netinet/icmp6.h>\n+#include <netinet/ip_icmp.h>\n#include <ctime>\n@@ -779,6 +781,94 @@ TEST_P(UdpSocketTest, ConnectAndSendNoReceiver) {\nSyscallFailsWithErrno(ECONNREFUSED));\n}\n+#ifdef __linux__\n+TEST_P(UdpSocketTest, RecvErrorConnRefused) {\n+ // We will simulate an ICMP error and verify that we do receive that error via\n+ // recvmsg(MSG_ERRQUEUE).\n+ ASSERT_NO_ERRNO(BindLoopback());\n+ // Close the socket to release the port so that we get an ICMP error.\n+ ASSERT_THAT(close(bind_.release()), SyscallSucceeds());\n+\n+ // Set IP_RECVERR socket option to enable error queueing.\n+ int v = kSockOptOn;\n+ socklen_t optlen = sizeof(v);\n+ int opt_level = SOL_IP;\n+ int opt_type = IP_RECVERR;\n+ if (GetParam() != AddressFamily::kIpv4) {\n+ opt_level = SOL_IPV6;\n+ opt_type = IPV6_RECVERR;\n+ }\n+ ASSERT_THAT(setsockopt(sock_.get(), opt_level, opt_type, &v, optlen),\n+ SyscallSucceeds());\n+\n+ // Connect to loopback:bind_addr_ which should *hopefully* not be bound by an\n+ // UDP socket. There is no easy way to ensure that the UDP port is not bound\n+ // by another conncurrently running test. *This is potentially flaky*.\n+ const int kBufLen = 300;\n+ ASSERT_THAT(connect(sock_.get(), bind_addr_, addrlen_), SyscallSucceeds());\n+ char buf[kBufLen];\n+ RandomizeBuffer(buf, sizeof(buf));\n+ // Send from sock_ to an unbound port. This should cause ECONNREFUSED.\n+ EXPECT_THAT(send(sock_.get(), buf, sizeof(buf), 0),\n+ SyscallSucceedsWithValue(sizeof(buf)));\n+\n+ // Dequeue error using recvmsg(MSG_ERRQUEUE).\n+ char got[kBufLen];\n+ struct iovec iov;\n+ iov.iov_base = reinterpret_cast<void*>(got);\n+ iov.iov_len = kBufLen;\n+\n+ size_t control_buf_len = CMSG_SPACE(sizeof(sock_extended_err) + addrlen_);\n+ char* control_buf = static_cast<char*>(calloc(1, control_buf_len));\n+ struct sockaddr_storage remote;\n+ memset(&remote, 0, sizeof(remote));\n+ struct msghdr msg = {};\n+ msg.msg_iov = &iov;\n+ msg.msg_iovlen = 1;\n+ msg.msg_flags = 0;\n+ msg.msg_control = control_buf;\n+ msg.msg_controllen = control_buf_len;\n+ msg.msg_name = reinterpret_cast<void*>(&remote);\n+ msg.msg_namelen = addrlen_;\n+ ASSERT_THAT(recvmsg(sock_.get(), &msg, MSG_ERRQUEUE),\n+ SyscallSucceedsWithValue(kBufLen));\n+\n+ // Check the contents of msg.\n+ EXPECT_EQ(memcmp(got, buf, sizeof(buf)), 0); // iovec check\n+ EXPECT_NE(msg.msg_flags & MSG_ERRQUEUE, 0);\n+ EXPECT_EQ(memcmp(&remote, bind_addr_, addrlen_), 0);\n+\n+ // Check the contents of the control message.\n+ struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);\n+ ASSERT_NE(cmsg, nullptr);\n+ EXPECT_EQ(CMSG_NXTHDR(&msg, cmsg), nullptr);\n+ EXPECT_EQ(cmsg->cmsg_level, opt_level);\n+ EXPECT_EQ(cmsg->cmsg_type, opt_type);\n+\n+ // Check the contents of socket error.\n+ struct sock_extended_err* sock_err =\n+ (struct sock_extended_err*)CMSG_DATA(cmsg);\n+ EXPECT_EQ(sock_err->ee_errno, ECONNREFUSED);\n+ if (GetParam() == AddressFamily::kIpv4) {\n+ EXPECT_EQ(sock_err->ee_origin, SO_EE_ORIGIN_ICMP);\n+ EXPECT_EQ(sock_err->ee_type, ICMP_DEST_UNREACH);\n+ EXPECT_EQ(sock_err->ee_code, ICMP_PORT_UNREACH);\n+ } else {\n+ EXPECT_EQ(sock_err->ee_origin, SO_EE_ORIGIN_ICMP6);\n+ EXPECT_EQ(sock_err->ee_type, ICMP6_DST_UNREACH);\n+ EXPECT_EQ(sock_err->ee_code, ICMP6_DST_UNREACH_NOPORT);\n+ }\n+\n+ // Now verify that the socket error was cleared by recvmsg(MSG_ERRQUEUE).\n+ int err;\n+ optlen = sizeof(err);\n+ ASSERT_THAT(getsockopt(sock_.get(), SOL_SOCKET, SO_ERROR, &err, &optlen),\n+ SyscallSucceeds());\n+ ASSERT_EQ(err, 0);\n+ ASSERT_EQ(optlen, sizeof(err));\n+}\n+#endif // __linux__\n+\nTEST_P(UdpSocketTest, ZerolengthWriteAllowed) {\n// TODO(gvisor.dev/issue/1202): Hostinet does not support zero length writes.\nSKIP_IF(IsRunningWithHostinet());\n" } ]
Go
Apache License 2.0
google/gvisor
[netstack] Implement IP(V6)_RECVERR socket option. PiperOrigin-RevId: 348055514
259,992
17.12.2020 12:40:26
28,800
eeee055d60bed55c864c3b87c23785b00f1609e8
Set --nocache_test_results to runtime tests If not set, the cached result is used even when runtime options are changed, because they are not visible to blaze/bazel.
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -130,7 +130,9 @@ configure = $(call configure_noreload,$(1),$(2)) && $(reload_docker)\n# Helpers for above. Requires $(RUNTIME_BIN) dependency.\ninstall_runtime = $(call configure,$(RUNTIME),$(1) --TESTONLY-test-name-env=RUNSC_TEST_NAME)\n-test_runtime = $(call test,--test_arg=--runtime=$(RUNTIME) $(PARTITIONS) $(1))\n+# Don't use cached results, otherwise multiple runs using different runtimes\n+# are skipped.\n+test_runtime = $(call test,--test_arg=--runtime=$(RUNTIME) --nocache_test_results $(PARTITIONS) $(1))\nrefresh: $(RUNTIME_BIN) ## Updates the runtime binary.\n.PHONY: refresh\n" } ]
Go
Apache License 2.0
google/gvisor
Set --nocache_test_results to runtime tests If not set, the cached result is used even when runtime options are changed, because they are not visible to blaze/bazel. PiperOrigin-RevId: 348074339
260,001
17.12.2020 14:20:56
28,800
433fd0e64650e31ab28e9d918d6dfcd9a67b4246
Set verityMu to be state nosave
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/verity/verity.go", "new_path": "pkg/sentry/fsimpl/verity/verity.go", "diff": "@@ -180,7 +180,7 @@ type filesystem struct {\n// its children. So they shouldn't be enabled the same time. This lock\n// is for the whole file system to ensure that no more than one file is\n// enabled the same time.\n- verityMu sync.RWMutex\n+ verityMu sync.RWMutex `state:\"nosave\"`\n}\n// InternalFilesystemOptions may be passed as\n" } ]
Go
Apache License 2.0
google/gvisor
Set verityMu to be state nosave PiperOrigin-RevId: 348092999
259,891
21.12.2020 14:42:32
28,800
981faa2c122922e0cb9f2996c56b4b7c38e18bfb
RLock Endpoint in raw.Endpoint.HandlePacket
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/raw/endpoint.go", "new_path": "pkg/tcpip/transport/raw/endpoint.go", "diff": "@@ -620,6 +620,7 @@ func (e *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, *tcpip.Error) {\n// HandlePacket implements stack.RawTransportEndpoint.HandlePacket.\nfunc (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {\n+ e.mu.RLock()\ne.rcvMu.Lock()\n// Drop the packet if our buffer is currently full or if this is an unassociated\n@@ -632,6 +633,7 @@ func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {\n// sockets.\nif e.rcvClosed || !e.associated {\ne.rcvMu.Unlock()\n+ e.mu.RUnlock()\ne.stack.Stats().DroppedPackets.Increment()\ne.stats.ReceiveErrors.ClosedReceiver.Increment()\nreturn\n@@ -639,6 +641,7 @@ func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {\nif e.rcvBufSize >= e.rcvBufSizeMax {\ne.rcvMu.Unlock()\n+ e.mu.RUnlock()\ne.stack.Stats().DroppedPackets.Increment()\ne.stats.ReceiveErrors.ReceiveBufferOverflow.Increment()\nreturn\n@@ -650,11 +653,13 @@ func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {\n// If bound to a NIC, only accept data for that NIC.\nif e.BindNICID != 0 && e.BindNICID != pkt.NICID {\ne.rcvMu.Unlock()\n+ e.mu.RUnlock()\nreturn\n}\n// If bound to an address, only accept data for that address.\nif e.BindAddr != \"\" && e.BindAddr != remoteAddr {\ne.rcvMu.Unlock()\n+ e.mu.RUnlock()\nreturn\n}\n}\n@@ -663,6 +668,7 @@ func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {\n// connected to.\nif e.connected && e.route.RemoteAddress != remoteAddr {\ne.rcvMu.Unlock()\n+ e.mu.RUnlock()\nreturn\n}\n@@ -697,6 +703,7 @@ func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {\ne.rcvList.PushBack(packet)\ne.rcvBufSize += packet.data.Size()\ne.rcvMu.Unlock()\n+ e.mu.RUnlock()\ne.stats.PacketsReceived.Increment()\n// Notify waiters that there's data to be read.\nif wasEmpty {\n" } ]
Go
Apache License 2.0
google/gvisor
RLock Endpoint in raw.Endpoint.HandlePacket PiperOrigin-RevId: 348530530
259,853
21.12.2020 15:47:58
28,800
946cb909e62e0aaca9e3bbb7cf059dd6b0eab2ce
Don't modify a packet header when it can be used by other endpoints Reported-by:
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/checksum_test.go", "new_path": "pkg/tcpip/header/checksum_test.go", "diff": "@@ -19,6 +19,7 @@ package header_test\nimport (\n\"fmt\"\n\"math/rand\"\n+ \"sync\"\n\"testing\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n@@ -169,3 +170,96 @@ func BenchmarkChecksum(b *testing.B) {\n}\n}\n}\n+\n+func testICMPChecksum(t *testing.T, headerChecksum func() uint16, icmpChecksum func() uint16, want uint16, pktStr string) {\n+ // icmpChecksum should not do any modifications of the header to\n+ // calculate its checksum. Let's call it from a few go-routines and the\n+ // race detector will trigger a warning if there are any concurrent\n+ // read/write accesses.\n+\n+ const concurrency = 5\n+ start := make(chan int)\n+ ready := make(chan bool, concurrency)\n+ var wg sync.WaitGroup\n+ wg.Add(concurrency)\n+ defer wg.Wait()\n+\n+ for i := 0; i < concurrency; i++ {\n+ go func() {\n+ defer wg.Done()\n+\n+ ready <- true\n+ <-start\n+\n+ if got := headerChecksum(); want != got {\n+ t.Errorf(\"new checksum for %s does not match old got: %x, want: %x\", pktStr, got, want)\n+ }\n+ if got := icmpChecksum(); want != got {\n+ t.Errorf(\"new checksum for %s does not match old got: %x, want: %x\", pktStr, got, want)\n+ }\n+ }()\n+ }\n+ for i := 0; i < concurrency; i++ {\n+ <-ready\n+ }\n+ close(start)\n+}\n+\n+func TestICMPv4Checksum(t *testing.T) {\n+ rnd := rand.New(rand.NewSource(42))\n+\n+ h := header.ICMPv4(make([]byte, header.ICMPv4MinimumSize))\n+ if _, err := rnd.Read(h); err != nil {\n+ t.Fatalf(\"rnd.Read failed: %v\", err)\n+ }\n+ h.SetChecksum(0)\n+\n+ buf := make([]byte, 13)\n+ if _, err := rnd.Read(buf); err != nil {\n+ t.Fatalf(\"rnd.Read failed: %v\", err)\n+ }\n+ vv := buffer.NewVectorisedView(len(buf), []buffer.View{\n+ buffer.NewViewFromBytes(buf[:5]),\n+ buffer.NewViewFromBytes(buf[5:]),\n+ })\n+\n+ want := header.Checksum(vv.ToView(), 0)\n+ want = ^header.Checksum(h, want)\n+ h.SetChecksum(want)\n+\n+ testICMPChecksum(t, h.Checksum, func() uint16 {\n+ return header.ICMPv4Checksum(h, vv)\n+ }, want, fmt.Sprintf(\"header: {% x} data {% x}\", h, vv.ToView()))\n+}\n+\n+func TestICMPv6Checksum(t *testing.T) {\n+ rnd := rand.New(rand.NewSource(42))\n+\n+ h := header.ICMPv6(make([]byte, header.ICMPv6MinimumSize))\n+ if _, err := rnd.Read(h); err != nil {\n+ t.Fatalf(\"rnd.Read failed: %v\", err)\n+ }\n+ h.SetChecksum(0)\n+\n+ buf := make([]byte, 13)\n+ if _, err := rnd.Read(buf); err != nil {\n+ t.Fatalf(\"rnd.Read failed: %v\", err)\n+ }\n+ vv := buffer.NewVectorisedView(len(buf), []buffer.View{\n+ buffer.NewViewFromBytes(buf[:7]),\n+ buffer.NewViewFromBytes(buf[7:10]),\n+ buffer.NewViewFromBytes(buf[10:]),\n+ })\n+\n+ dst := header.IPv6Loopback\n+ src := header.IPv6Loopback\n+\n+ want := header.PseudoHeaderChecksum(header.ICMPv6ProtocolNumber, src, dst, uint16(len(h)+vv.Size()))\n+ want = header.Checksum(vv.ToView(), want)\n+ want = ^header.Checksum(h, want)\n+ h.SetChecksum(want)\n+\n+ testICMPChecksum(t, h.Checksum, func() uint16 {\n+ return header.ICMPv6Checksum(h, src, dst, vv)\n+ }, want, fmt.Sprintf(\"header: {% x} data {% x}\", h, vv.ToView()))\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/icmpv4.go", "new_path": "pkg/tcpip/header/icmpv4.go", "diff": "@@ -200,19 +200,13 @@ func (b ICMPv4) SetSequence(sequence uint16) {\n// ICMPv4Checksum calculates the ICMP checksum over the provided ICMP header,\n// and payload.\nfunc ICMPv4Checksum(h ICMPv4, vv buffer.VectorisedView) uint16 {\n- // Calculate the IPv6 pseudo-header upper-layer checksum.\n- xsum := uint16(0)\n- for _, v := range vv.Views() {\n- xsum = Checksum(v, xsum)\n- }\n+ xsum := ChecksumVV(vv, 0)\n- // h[2:4] is the checksum itself, set it aside to avoid checksumming the checksum.\n- h2, h3 := h[2], h[3]\n- h[2], h[3] = 0, 0\n- xsum = ^Checksum(h, xsum)\n- h[2], h[3] = h2, h3\n+ // h[2:4] is the checksum itself, skip it to avoid checksumming the checksum.\n+ xsum = Checksum(h[:2], xsum)\n+ xsum = Checksum(h[4:], xsum)\n- return xsum\n+ return ^xsum\n}\n// ICMPOriginFromNetProto returns the appropriate SockErrOrigin to use when\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/icmpv6.go", "new_path": "pkg/tcpip/header/icmpv6.go", "diff": "@@ -265,22 +265,13 @@ func (b ICMPv6) Payload() []byte {\n// ICMPv6Checksum calculates the ICMP checksum over the provided ICMPv6 header,\n// IPv6 src/dst addresses and the payload.\nfunc ICMPv6Checksum(h ICMPv6, src, dst tcpip.Address, vv buffer.VectorisedView) uint16 {\n- // Calculate the IPv6 pseudo-header upper-layer checksum.\n- xsum := Checksum([]byte(src), 0)\n- xsum = Checksum([]byte(dst), xsum)\n- var upperLayerLength [4]byte\n- binary.BigEndian.PutUint32(upperLayerLength[:], uint32(len(h)+vv.Size()))\n- xsum = Checksum(upperLayerLength[:], xsum)\n- xsum = Checksum([]byte{0, 0, 0, uint8(ICMPv6ProtocolNumber)}, xsum)\n- for _, v := range vv.Views() {\n- xsum = Checksum(v, xsum)\n- }\n+ xsum := PseudoHeaderChecksum(ICMPv6ProtocolNumber, src, dst, uint16(len(h)+vv.Size()))\n+\n+ xsum = ChecksumVV(vv, xsum)\n- // h[2:4] is the checksum itself, set it aside to avoid checksumming the checksum.\n- h2, h3 := h[2], h[3]\n- h[2], h[3] = 0, 0\n- xsum = ^Checksum(h, xsum)\n- h[2], h[3] = h2, h3\n+ // h[2:4] is the checksum itself, skip it to avoid checksumming the checksum.\n+ xsum = Checksum(h[:2], xsum)\n+ xsum = Checksum(h[4:], xsum)\n- return xsum\n+ return ^xsum\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Don't modify a packet header when it can be used by other endpoints Reported-by: [email protected] PiperOrigin-RevId: 348540796
259,896
22.12.2020 14:41:11
28,800
7c8ba72b026db3b79f12e679ab69078a25c143e8
Move SO_BINDTODEVICE to socketops.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netstack/netstack.go", "new_path": "pkg/sentry/socket/netstack/netstack.go", "diff": "@@ -1042,10 +1042,7 @@ func getSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, fam\nreturn &v, nil\ncase linux.SO_BINDTODEVICE:\n- var v tcpip.BindToDeviceOption\n- if err := ep.GetSockOpt(&v); err != nil {\n- return nil, syserr.TranslateNetstackError(err)\n- }\n+ v := ep.SocketOptions().GetBindToDevice()\nif v == 0 {\nvar b primitive.ByteSlice\nreturn &b, nil\n@@ -1804,8 +1801,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam\n}\nname := string(optVal[:n])\nif name == \"\" {\n- v := tcpip.BindToDeviceOption(0)\n- return syserr.TranslateNetstackError(ep.SetSockOpt(&v))\n+ return syserr.TranslateNetstackError(ep.SocketOptions().SetBindToDevice(0))\n}\ns := t.NetworkContext()\nif s == nil {\n@@ -1813,8 +1809,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam\n}\nfor nicID, nic := range s.Interfaces() {\nif nic.Name == name {\n- v := tcpip.BindToDeviceOption(nicID)\n- return syserr.TranslateNetstackError(ep.SetSockOpt(&v))\n+ return syserr.TranslateNetstackError(ep.SocketOptions().SetBindToDevice(nicID))\n}\n}\nreturn syserr.ErrUnknownDevice\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/socketops.go", "new_path": "pkg/tcpip/socketops.go", "diff": "@@ -45,6 +45,9 @@ type SocketOptionsHandler interface {\n// UpdateLastError updates the endpoint specific last error field.\nUpdateLastError(err *Error)\n+\n+ // HasNIC is invoked to check if the NIC is valid for SO_BINDTODEVICE.\n+ HasNIC(v int32) bool\n}\n// DefaultSocketOptionsHandler is an embeddable type that implements no-op\n@@ -76,6 +79,11 @@ func (*DefaultSocketOptionsHandler) LastError() *Error {\n// UpdateLastError implements SocketOptionsHandler.UpdateLastError.\nfunc (*DefaultSocketOptionsHandler) UpdateLastError(*Error) {}\n+// HasNIC implements SocketOptionsHandler.HasNIC.\n+func (*DefaultSocketOptionsHandler) HasNIC(int32) bool {\n+ return false\n+}\n+\n// SocketOptions contains all the variables which store values for SOL_SOCKET,\n// SOL_IP, SOL_IPV6 and SOL_TCP level options.\n//\n@@ -159,6 +167,9 @@ type SocketOptions struct {\nerrQueueMu sync.Mutex `state:\"nosave\"`\nerrQueue sockErrorList\n+ // bindToDevice determines the device to which the socket is bound.\n+ bindToDevice int32\n+\n// mu protects the access to the below fields.\nmu sync.Mutex `state:\"nosave\"`\n@@ -492,3 +503,18 @@ func (so *SocketOptions) QueueLocalErr(err *Error, net NetworkProtocolNumber, in\nNetProto: net,\n})\n}\n+\n+// GetBindToDevice gets value for SO_BINDTODEVICE option.\n+func (so *SocketOptions) GetBindToDevice() int32 {\n+ return atomic.LoadInt32(&so.bindToDevice)\n+}\n+\n+// SetBindToDevice sets value for SO_BINDTODEVICE option.\n+func (so *SocketOptions) SetBindToDevice(bindToDevice int32) *Error {\n+ if !so.handler.HasNIC(bindToDevice) {\n+ return ErrUnknownDevice\n+ }\n+\n+ atomic.StoreInt32(&so.bindToDevice, bindToDevice)\n+ return nil\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/transport_demuxer_test.go", "new_path": "pkg/tcpip/stack/transport_demuxer_test.go", "diff": "@@ -308,9 +308,8 @@ func TestBindToDeviceDistribution(t *testing.T) {\ndefer ep.Close()\nep.SocketOptions().SetReusePort(endpoint.reuse)\n- bindToDeviceOption := tcpip.BindToDeviceOption(endpoint.bindToDevice)\n- if err := ep.SetSockOpt(&bindToDeviceOption); err != nil {\n- t.Fatalf(\"SetSockOpt(&%T(%d)) on endpoint %d failed: %s\", bindToDeviceOption, bindToDeviceOption, i, err)\n+ if err := ep.SocketOptions().SetBindToDevice(int32(endpoint.bindToDevice)); err != nil {\n+ t.Fatalf(\"SetSockOpt(&%T(%d)) on endpoint %d failed: %s\", endpoint.bindToDevice, endpoint.bindToDevice, i, err)\n}\nvar dstAddr tcpip.Address\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tcpip.go", "new_path": "pkg/tcpip/tcpip.go", "diff": "@@ -955,14 +955,6 @@ type SettableSocketOption interface {\nisSettableSocketOption()\n}\n-// BindToDeviceOption is used by SetSockOpt/GetSockOpt to specify that sockets\n-// should bind only on a specific NIC.\n-type BindToDeviceOption NICID\n-\n-func (*BindToDeviceOption) isGettableSocketOption() {}\n-\n-func (*BindToDeviceOption) isSettableSocketOption() {}\n-\n// TCPInfoOption is used by GetSockOpt to expose TCP statistics.\n//\n// TODO(b/64800844): Add and populate stat fields.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -502,9 +502,6 @@ type endpoint struct {\n// sack holds TCP SACK related information for this endpoint.\nsack SACKInfo\n- // bindToDevice is set to the NIC on which to bind or disabled if 0.\n- bindToDevice tcpip.NICID\n-\n// delay enables Nagle's algorithm.\n//\n// delay is a boolean (0 is false) and must be accessed atomically.\n@@ -1821,18 +1818,13 @@ func (e *endpoint) SetSockOptInt(opt tcpip.SockOptInt, v int) *tcpip.Error {\nreturn nil\n}\n+func (e *endpoint) HasNIC(id int32) bool {\n+ return id == 0 || e.stack.HasNIC(tcpip.NICID(id))\n+}\n+\n// SetSockOpt sets a socket option.\nfunc (e *endpoint) SetSockOpt(opt tcpip.SettableSocketOption) *tcpip.Error {\nswitch v := opt.(type) {\n- case *tcpip.BindToDeviceOption:\n- id := tcpip.NICID(*v)\n- if id != 0 && !e.stack.HasNIC(id) {\n- return tcpip.ErrUnknownDevice\n- }\n- e.LockUser()\n- e.bindToDevice = id\n- e.UnlockUser()\n-\ncase *tcpip.KeepaliveIdleOption:\ne.keepalive.Lock()\ne.keepalive.idle = time.Duration(*v)\n@@ -2013,11 +2005,6 @@ func (e *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, *tcpip.Error) {\n// GetSockOpt implements tcpip.Endpoint.GetSockOpt.\nfunc (e *endpoint) GetSockOpt(opt tcpip.GettableSocketOption) *tcpip.Error {\nswitch o := opt.(type) {\n- case *tcpip.BindToDeviceOption:\n- e.LockUser()\n- *o = tcpip.BindToDeviceOption(e.bindToDevice)\n- e.UnlockUser()\n-\ncase *tcpip.TCPInfoOption:\n*o = tcpip.TCPInfoOption{}\ne.LockUser()\n@@ -2220,11 +2207,12 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) *tc\n}\n}\n+ bindToDevice := tcpip.NICID(e.ops.GetBindToDevice())\nif _, err := e.stack.PickEphemeralPortStable(portOffset, func(p uint16) (bool, *tcpip.Error) {\nif sameAddr && p == e.ID.RemotePort {\nreturn false, nil\n}\n- if _, err := e.stack.ReservePort(netProtos, ProtocolNumber, e.ID.LocalAddress, p, e.portFlags, e.bindToDevice, addr, nil /* testPort */); err != nil {\n+ if _, err := e.stack.ReservePort(netProtos, ProtocolNumber, e.ID.LocalAddress, p, e.portFlags, bindToDevice, addr, nil /* testPort */); err != nil {\nif err != tcpip.ErrPortInUse || !reuse {\nreturn false, nil\n}\n@@ -2262,15 +2250,15 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) *tc\ntcpEP.notifyProtocolGoroutine(notifyAbort)\ntcpEP.UnlockUser()\n// Now try and Reserve again if it fails then we skip.\n- if _, err := e.stack.ReservePort(netProtos, ProtocolNumber, e.ID.LocalAddress, p, e.portFlags, e.bindToDevice, addr, nil /* testPort */); err != nil {\n+ if _, err := e.stack.ReservePort(netProtos, ProtocolNumber, e.ID.LocalAddress, p, e.portFlags, bindToDevice, addr, nil /* testPort */); err != nil {\nreturn false, nil\n}\n}\nid := e.ID\nid.LocalPort = p\n- if err := e.stack.RegisterTransportEndpoint(nicID, netProtos, ProtocolNumber, id, e, e.portFlags, e.bindToDevice); err != nil {\n- e.stack.ReleasePort(netProtos, ProtocolNumber, e.ID.LocalAddress, p, e.portFlags, e.bindToDevice, addr)\n+ if err := e.stack.RegisterTransportEndpoint(nicID, netProtos, ProtocolNumber, id, e, e.portFlags, bindToDevice); err != nil {\n+ e.stack.ReleasePort(netProtos, ProtocolNumber, e.ID.LocalAddress, p, e.portFlags, bindToDevice, addr)\nif err == tcpip.ErrPortInUse {\nreturn false, nil\n}\n@@ -2281,7 +2269,7 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) *tc\n// the selected port.\ne.ID = id\ne.isPortReserved = true\n- e.boundBindToDevice = e.bindToDevice\n+ e.boundBindToDevice = bindToDevice\ne.boundPortFlags = e.portFlags\ne.boundDest = addr\nreturn true, nil\n@@ -2634,7 +2622,8 @@ func (e *endpoint) bindLocked(addr tcpip.FullAddress) (err *tcpip.Error) {\ne.ID.LocalAddress = addr.Addr\n}\n- port, err := e.stack.ReservePort(netProtos, ProtocolNumber, addr.Addr, addr.Port, e.portFlags, e.bindToDevice, tcpip.FullAddress{}, func(p uint16) bool {\n+ bindToDevice := tcpip.NICID(e.ops.GetBindToDevice())\n+ port, err := e.stack.ReservePort(netProtos, ProtocolNumber, addr.Addr, addr.Port, e.portFlags, bindToDevice, tcpip.FullAddress{}, func(p uint16) bool {\nid := e.ID\nid.LocalPort = p\n// CheckRegisterTransportEndpoint should only return an error if there is a\n@@ -2645,7 +2634,7 @@ func (e *endpoint) bindLocked(addr tcpip.FullAddress) (err *tcpip.Error) {\n// demuxer. Further connected endpoints always have a remote\n// address/port. Hence this will only return an error if there is a matching\n// listening endpoint.\n- if err := e.stack.CheckRegisterTransportEndpoint(nic, netProtos, ProtocolNumber, id, e.portFlags, e.bindToDevice); err != nil {\n+ if err := e.stack.CheckRegisterTransportEndpoint(nic, netProtos, ProtocolNumber, id, e.portFlags, bindToDevice); err != nil {\nreturn false\n}\nreturn true\n@@ -2654,7 +2643,7 @@ func (e *endpoint) bindLocked(addr tcpip.FullAddress) (err *tcpip.Error) {\nreturn err\n}\n- e.boundBindToDevice = e.bindToDevice\n+ e.boundBindToDevice = bindToDevice\ne.boundPortFlags = e.portFlags\n// TODO(gvisor.dev/issue/3691): Add test to verify boundNICID is correct.\ne.boundNICID = nic\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/tcp_test.go", "new_path": "pkg/tcpip/transport/tcp/tcp_test.go", "diff": "@@ -1380,9 +1380,8 @@ func TestConnectBindToDevice(t *testing.T) {\ndefer c.Cleanup()\nc.Create(-1)\n- bindToDevice := tcpip.BindToDeviceOption(test.device)\n- if err := c.EP.SetSockOpt(&bindToDevice); err != nil {\n- t.Fatalf(\"c.EP.SetSockOpt(&%T(%d)): %s\", bindToDevice, bindToDevice, err)\n+ if err := c.EP.SocketOptions().SetBindToDevice(int32(test.device)); err != nil {\n+ t.Fatalf(\"c.EP.SetSockOpt(&%T(%d)): %s\", test.device, test.device, err)\n}\n// Start connection attempt.\nwaitEntry, _ := waiter.NewChannelEntry(nil)\n@@ -4507,7 +4506,7 @@ func TestBindToDeviceOption(t *testing.T) {\nname string\nsetBindToDevice *tcpip.NICID\nsetBindToDeviceError *tcpip.Error\n- getBindToDevice tcpip.BindToDeviceOption\n+ getBindToDevice int32\n}{\n{\"GetDefaultValue\", nil, nil, 0},\n{\"BindToNonExistent\", nicIDPtr(999), tcpip.ErrUnknownDevice, 0},\n@@ -4517,15 +4516,13 @@ func TestBindToDeviceOption(t *testing.T) {\nfor _, testAction := range testActions {\nt.Run(testAction.name, func(t *testing.T) {\nif testAction.setBindToDevice != nil {\n- bindToDevice := tcpip.BindToDeviceOption(*testAction.setBindToDevice)\n- if gotErr, wantErr := ep.SetSockOpt(&bindToDevice), testAction.setBindToDeviceError; gotErr != wantErr {\n+ bindToDevice := int32(*testAction.setBindToDevice)\n+ if gotErr, wantErr := ep.SocketOptions().SetBindToDevice(bindToDevice), testAction.setBindToDeviceError; gotErr != wantErr {\nt.Errorf(\"got SetSockOpt(&%T(%d)) = %s, want = %s\", bindToDevice, bindToDevice, gotErr, wantErr)\n}\n}\n- bindToDevice := tcpip.BindToDeviceOption(88888)\n- if err := ep.GetSockOpt(&bindToDevice); err != nil {\n- t.Errorf(\"GetSockOpt(&%T): %s\", bindToDevice, err)\n- } else if bindToDevice != testAction.getBindToDevice {\n+ bindToDevice := ep.SocketOptions().GetBindToDevice()\n+ if bindToDevice != testAction.getBindToDevice {\nt.Errorf(\"got bindToDevice = %d, want %d\", bindToDevice, testAction.getBindToDevice)\n}\n})\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/udp/endpoint.go", "new_path": "pkg/tcpip/transport/udp/endpoint.go", "diff": "@@ -109,7 +109,6 @@ type endpoint struct {\nmulticastAddr tcpip.Address\nmulticastNICID tcpip.NICID\nportFlags ports.Flags\n- bindToDevice tcpip.NICID\nlastErrorMu sync.Mutex `state:\"nosave\"`\nlastError *tcpip.Error `state:\".(string)\"`\n@@ -659,6 +658,10 @@ func (e *endpoint) SetSockOptInt(opt tcpip.SockOptInt, v int) *tcpip.Error {\nreturn nil\n}\n+func (e *endpoint) HasNIC(id int32) bool {\n+ return id == 0 || e.stack.HasNIC(tcpip.NICID(id))\n+}\n+\n// SetSockOpt implements tcpip.Endpoint.SetSockOpt.\nfunc (e *endpoint) SetSockOpt(opt tcpip.SettableSocketOption) *tcpip.Error {\nswitch v := opt.(type) {\n@@ -775,15 +778,6 @@ func (e *endpoint) SetSockOpt(opt tcpip.SettableSocketOption) *tcpip.Error {\ndelete(e.multicastMemberships, memToRemove)\n- case *tcpip.BindToDeviceOption:\n- id := tcpip.NICID(*v)\n- if id != 0 && !e.stack.HasNIC(id) {\n- return tcpip.ErrUnknownDevice\n- }\n- e.mu.Lock()\n- e.bindToDevice = id\n- e.mu.Unlock()\n-\ncase *tcpip.SocketDetachFilterOption:\nreturn nil\n}\n@@ -859,11 +853,6 @@ func (e *endpoint) GetSockOpt(opt tcpip.GettableSocketOption) *tcpip.Error {\n}\ne.mu.Unlock()\n- case *tcpip.BindToDeviceOption:\n- e.mu.RLock()\n- *o = tcpip.BindToDeviceOption(e.bindToDevice)\n- e.mu.RUnlock()\n-\ndefault:\nreturn tcpip.ErrUnknownProtocolOption\n}\n@@ -1113,21 +1102,22 @@ func (*endpoint) Accept(*tcpip.FullAddress) (tcpip.Endpoint, *waiter.Queue, *tcp\n}\nfunc (e *endpoint) registerWithStack(nicID tcpip.NICID, netProtos []tcpip.NetworkProtocolNumber, id stack.TransportEndpointID) (stack.TransportEndpointID, tcpip.NICID, *tcpip.Error) {\n+ bindToDevice := tcpip.NICID(e.ops.GetBindToDevice())\nif e.ID.LocalPort == 0 {\n- port, err := e.stack.ReservePort(netProtos, ProtocolNumber, id.LocalAddress, id.LocalPort, e.portFlags, e.bindToDevice, tcpip.FullAddress{}, nil /* testPort */)\n+ port, err := e.stack.ReservePort(netProtos, ProtocolNumber, id.LocalAddress, id.LocalPort, e.portFlags, bindToDevice, tcpip.FullAddress{}, nil /* testPort */)\nif err != nil {\n- return id, e.bindToDevice, err\n+ return id, bindToDevice, err\n}\nid.LocalPort = port\n}\ne.boundPortFlags = e.portFlags\n- err := e.stack.RegisterTransportEndpoint(nicID, netProtos, ProtocolNumber, id, e, e.boundPortFlags, e.bindToDevice)\n+ err := e.stack.RegisterTransportEndpoint(nicID, netProtos, ProtocolNumber, id, e, e.boundPortFlags, bindToDevice)\nif err != nil {\n- e.stack.ReleasePort(netProtos, ProtocolNumber, id.LocalAddress, id.LocalPort, e.boundPortFlags, e.bindToDevice, tcpip.FullAddress{})\n+ e.stack.ReleasePort(netProtos, ProtocolNumber, id.LocalAddress, id.LocalPort, e.boundPortFlags, bindToDevice, tcpip.FullAddress{})\ne.boundPortFlags = ports.Flags{}\n}\n- return id, e.bindToDevice, err\n+ return id, bindToDevice, err\n}\nfunc (e *endpoint) bindLocked(addr tcpip.FullAddress) *tcpip.Error {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/udp/forwarder.go", "new_path": "pkg/tcpip/transport/udp/forwarder.go", "diff": "@@ -78,7 +78,7 @@ func (r *ForwarderRequest) CreateEndpoint(queue *waiter.Queue) (tcpip.Endpoint,\nroute.ResolveWith(r.pkt.SourceLinkAddress())\nep := newEndpoint(r.stack, r.pkt.NetworkProtocolNumber, queue)\n- if err := r.stack.RegisterTransportEndpoint(r.pkt.NICID, []tcpip.NetworkProtocolNumber{r.pkt.NetworkProtocolNumber}, ProtocolNumber, r.id, ep, ep.portFlags, ep.bindToDevice); err != nil {\n+ if err := r.stack.RegisterTransportEndpoint(r.pkt.NICID, []tcpip.NetworkProtocolNumber{r.pkt.NetworkProtocolNumber}, ProtocolNumber, r.id, ep, ep.portFlags, tcpip.NICID(ep.ops.GetBindToDevice())); err != nil {\nep.Close()\nroute.Release()\nreturn nil, err\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/udp/udp_test.go", "new_path": "pkg/tcpip/transport/udp/udp_test.go", "diff": "@@ -554,7 +554,7 @@ func TestBindToDeviceOption(t *testing.T) {\nname string\nsetBindToDevice *tcpip.NICID\nsetBindToDeviceError *tcpip.Error\n- getBindToDevice tcpip.BindToDeviceOption\n+ getBindToDevice int32\n}{\n{\"GetDefaultValue\", nil, nil, 0},\n{\"BindToNonExistent\", nicIDPtr(999), tcpip.ErrUnknownDevice, 0},\n@@ -564,15 +564,13 @@ func TestBindToDeviceOption(t *testing.T) {\nfor _, testAction := range testActions {\nt.Run(testAction.name, func(t *testing.T) {\nif testAction.setBindToDevice != nil {\n- bindToDevice := tcpip.BindToDeviceOption(*testAction.setBindToDevice)\n- if gotErr, wantErr := ep.SetSockOpt(&bindToDevice), testAction.setBindToDeviceError; gotErr != wantErr {\n+ bindToDevice := int32(*testAction.setBindToDevice)\n+ if gotErr, wantErr := ep.SocketOptions().SetBindToDevice(bindToDevice), testAction.setBindToDeviceError; gotErr != wantErr {\nt.Errorf(\"got SetSockOpt(&%T(%d)) = %s, want = %s\", bindToDevice, bindToDevice, gotErr, wantErr)\n}\n}\n- bindToDevice := tcpip.BindToDeviceOption(88888)\n- if err := ep.GetSockOpt(&bindToDevice); err != nil {\n- t.Errorf(\"GetSockOpt(&%T): %s\", bindToDevice, err)\n- } else if bindToDevice != testAction.getBindToDevice {\n+ bindToDevice := ep.SocketOptions().GetBindToDevice()\n+ if bindToDevice != testAction.getBindToDevice {\nt.Errorf(\"got bindToDevice = %d, want = %d\", bindToDevice, testAction.getBindToDevice)\n}\n})\n" } ]
Go
Apache License 2.0
google/gvisor
Move SO_BINDTODEVICE to socketops. PiperOrigin-RevId: 348696094
259,853
23.12.2020 11:08:42
28,800
d07915987631f4c3c6345275019a5b5b0cf28dbb
vfs1: don't allow to open socket files open() has to return ENXIO in this case. O_PATH isn't supported by vfs1.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/gofer/inode.go", "new_path": "pkg/sentry/fs/gofer/inode.go", "diff": "@@ -475,6 +475,9 @@ func (i *inodeOperations) Check(ctx context.Context, inode *fs.Inode, p fs.PermM\nfunc (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) {\nswitch d.Inode.StableAttr.Type {\ncase fs.Socket:\n+ if i.session().overrides != nil {\n+ return nil, syserror.ENXIO\n+ }\nreturn i.getFileSocket(ctx, d, flags)\ncase fs.Pipe:\nreturn i.getFilePipe(ctx, d, flags)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/host/inode.go", "new_path": "pkg/sentry/fs/host/inode.go", "diff": "@@ -276,6 +276,10 @@ func (i *inodeOperations) BoundEndpoint(inode *fs.Inode, path string) transport.\n// GetFile implements fs.InodeOperations.GetFile.\nfunc (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) {\n+ if fs.IsSocket(d.Inode.StableAttr) {\n+ return nil, syserror.ENXIO\n+ }\n+\nreturn newFile(ctx, d, flags, i), nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/ramfs/socket.go", "new_path": "pkg/sentry/fs/ramfs/socket.go", "diff": "@@ -20,6 +20,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/fs\"\n\"gvisor.dev/gvisor/pkg/sentry/fs/fsutil\"\n\"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport\"\n+ \"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/waiter\"\n)\n@@ -63,7 +64,7 @@ func (s *Socket) BoundEndpoint(*fs.Inode, string) transport.BoundEndpoint {\n// GetFile implements fs.FileOperations.GetFile.\nfunc (s *Socket) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) {\n- return fs.NewFile(ctx, dirent, flags, &socketFileOperations{}), nil\n+ return nil, syserror.ENXIO\n}\n// +stateify savable\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/tmpfs/inode_file.go", "new_path": "pkg/sentry/fs/tmpfs/inode_file.go", "diff": "@@ -148,6 +148,10 @@ func (*fileInodeOperations) Rename(ctx context.Context, inode *fs.Inode, oldPare\n// GetFile implements fs.InodeOperations.GetFile.\nfunc (f *fileInodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) {\n+ if fs.IsSocket(d.Inode.StableAttr) {\n+ return nil, syserror.ENXIO\n+ }\n+\nif flags.Write {\nfsmetric.TmpfsOpensW.Increment()\n} else if flags.Read {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_file.go", "new_path": "pkg/sentry/syscalls/linux/sys_file.go", "diff": "@@ -175,6 +175,12 @@ func openAt(t *kernel.Task, dirFD int32, addr usermem.Addr, flags uint) (fd uint\n}\n}\n+ file, err := d.Inode.GetFile(t, d, fileFlags)\n+ if err != nil {\n+ return syserror.ConvertIntr(err, syserror.ERESTARTSYS)\n+ }\n+ defer file.DecRef(t)\n+\n// Truncate is called when O_TRUNC is specified for any kind of\n// existing Dirent. Behavior is delegated to the entry's Truncate\n// implementation.\n@@ -184,12 +190,6 @@ func openAt(t *kernel.Task, dirFD int32, addr usermem.Addr, flags uint) (fd uint\n}\n}\n- file, err := d.Inode.GetFile(t, d, fileFlags)\n- if err != nil {\n- return syserror.ConvertIntr(err, syserror.ERESTARTSYS)\n- }\n- defer file.DecRef(t)\n-\n// Success.\nnewFD, err := t.NewFDFrom(0, file, kernel.FDFlags{\nCloseOnExec: flags&linux.O_CLOEXEC != 0,\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -3324,6 +3324,7 @@ cc_binary(\n\":socket_test_util\",\n\":unix_domain_socket_test_util\",\ngtest,\n+ \"//test/util:file_descriptor\",\n\"//test/util:test_main\",\n\"//test/util:test_util\",\n],\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_unix_unbound_filesystem.cc", "new_path": "test/syscalls/linux/socket_unix_unbound_filesystem.cc", "diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n+#include <fcntl.h>\n#include <stdio.h>\n#include <sys/un.h>\n#include \"gtest/gtest.h\"\n#include \"test/syscalls/linux/socket_test_util.h\"\n#include \"test/syscalls/linux/unix_domain_socket_test_util.h\"\n+#include \"test/util/file_descriptor.h\"\n#include \"test/util/test_util.h\"\nnamespace gvisor {\n@@ -70,6 +72,20 @@ TEST_P(UnboundFilesystemUnixSocketPairTest, GetSockNameLength) {\nstrlen(want_addr.sun_path) + 1 + sizeof(want_addr.sun_family));\n}\n+TEST_P(UnboundFilesystemUnixSocketPairTest, OpenSocketWithTruncate) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),\n+ sockets->first_addr_size()),\n+ SyscallSucceeds());\n+\n+ const struct sockaddr_un *addr =\n+ reinterpret_cast<const struct sockaddr_un *>(sockets->first_addr());\n+ EXPECT_THAT(chmod(addr->sun_path, 0777), SyscallSucceeds());\n+ EXPECT_THAT(open(addr->sun_path, O_RDONLY | O_TRUNC),\n+ SyscallFailsWithErrno(ENXIO));\n+}\n+\nINSTANTIATE_TEST_SUITE_P(\nAllUnixDomainSockets, UnboundFilesystemUnixSocketPairTest,\n::testing::ValuesIn(ApplyVec<SocketPairKind>(\n" } ]
Go
Apache License 2.0
google/gvisor
vfs1: don't allow to open socket files open() has to return ENXIO in this case. O_PATH isn't supported by vfs1. PiperOrigin-RevId: 348820478
259,891
23.12.2020 15:18:54
28,800
5259b90dac7201c2e2f80ff5e1c25050e11d4035
Resolve compiler warnings The buildkite syscall tests start with a lot of C++ warnings. This cleans that up a little.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/pipe.cc", "new_path": "test/syscalls/linux/pipe.cc", "diff": "@@ -71,13 +71,13 @@ class PipeTest : public ::testing::TestWithParam<PipeCreator> {\n// Returns true iff the pipe represents a named pipe.\nbool IsNamedPipe() const { return named_pipe_; }\n- int Size() const {\n+ size_t Size() const {\nint s1 = fcntl(rfd_.get(), F_GETPIPE_SZ);\nint s2 = fcntl(wfd_.get(), F_GETPIPE_SZ);\nEXPECT_GT(s1, 0);\nEXPECT_GT(s2, 0);\nEXPECT_EQ(s1, s2);\n- return s1;\n+ return static_cast<size_t>(s1);\n}\nstatic void TearDownTestSuite() {\n@@ -568,7 +568,7 @@ TEST_P(PipeTest, Streaming) {\nDisableSave ds;\n// Size() requires 2 syscalls, call it once and remember the value.\n- const int pipe_size = Size();\n+ const size_t pipe_size = Size();\nconst size_t streamed_bytes = 4 * pipe_size;\nabsl::Notification notify;\n@@ -576,7 +576,7 @@ TEST_P(PipeTest, Streaming) {\nstd::vector<char> buf(1024);\n// Don't start until it's full.\nnotify.WaitForNotification();\n- ssize_t total = 0;\n+ size_t total = 0;\nwhile (total < streamed_bytes) {\nASSERT_THAT(read(rfd_.get(), buf.data(), buf.size()),\nSyscallSucceedsWithValue(buf.size()));\n@@ -593,7 +593,7 @@ TEST_P(PipeTest, Streaming) {\n// page) for the check for notify.Notify() below to be correct.\nstd::vector<char> buf(1024);\nRandomizeBuffer(buf.data(), buf.size());\n- ssize_t total = 0;\n+ size_t total = 0;\nwhile (total < streamed_bytes) {\nASSERT_THAT(write(wfd_.get(), buf.data(), buf.size()),\nSyscallSucceedsWithValue(buf.size()));\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/proc_net.cc", "new_path": "test/syscalls/linux/proc_net.cc", "diff": "@@ -420,14 +420,14 @@ TEST(ProcNetSnmp, CheckNetStat) {\nint name_count = 0;\nint value_count = 0;\nstd::vector<absl::string_view> lines = absl::StrSplit(contents, '\\n');\n- for (int i = 0; i + 1 < lines.size(); i += 2) {\n+ for (long unsigned int i = 0; i + 1 < lines.size(); i += 2) {\nstd::vector<absl::string_view> names =\nabsl::StrSplit(lines[i], absl::ByAnyChar(\"\\t \"));\nstd::vector<absl::string_view> values =\nabsl::StrSplit(lines[i + 1], absl::ByAnyChar(\"\\t \"));\nEXPECT_EQ(names.size(), values.size()) << \" mismatch in lines '\" << lines[i]\n<< \"' and '\" << lines[i + 1] << \"'\";\n- for (int j = 0; j < names.size() && j < values.size(); ++j) {\n+ for (long unsigned int j = 0; j < names.size() && j < values.size(); ++j) {\nif (names[j] == \"TCPOrigDataSent\" || names[j] == \"TCPSynRetrans\" ||\nnames[j] == \"TCPDSACKRecv\" || names[j] == \"TCPDSACKOfoRecv\") {\n++name_count;\n@@ -457,14 +457,14 @@ TEST(ProcNetSnmp, CheckSnmp) {\nint name_count = 0;\nint value_count = 0;\nstd::vector<absl::string_view> lines = absl::StrSplit(contents, '\\n');\n- for (int i = 0; i + 1 < lines.size(); i += 2) {\n+ for (long unsigned int i = 0; i + 1 < lines.size(); i += 2) {\nstd::vector<absl::string_view> names =\nabsl::StrSplit(lines[i], absl::ByAnyChar(\"\\t \"));\nstd::vector<absl::string_view> values =\nabsl::StrSplit(lines[i + 1], absl::ByAnyChar(\"\\t \"));\nEXPECT_EQ(names.size(), values.size()) << \" mismatch in lines '\" << lines[i]\n<< \"' and '\" << lines[i + 1] << \"'\";\n- for (int j = 0; j < names.size() && j < values.size(); ++j) {\n+ for (long unsigned int j = 0; j < names.size() && j < values.size(); ++j) {\nif (names[j] == \"RetransSegs\") {\n++name_count;\nint64_t val;\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/proc_net_unix.cc", "new_path": "test/syscalls/linux/proc_net_unix.cc", "diff": "@@ -181,7 +181,7 @@ PosixErrorOr<std::vector<UnixEntry>> ProcNetUnixEntries() {\n// Returns true on match, and sets 'match' to point to the matching entry.\nbool FindBy(std::vector<UnixEntry> entries, UnixEntry* match,\nstd::function<bool(const UnixEntry&)> predicate) {\n- for (int i = 0; i < entries.size(); ++i) {\n+ for (long unsigned int i = 0; i < entries.size(); ++i) {\nif (predicate(entries[i])) {\n*match = entries[i];\nreturn true;\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/proc_pid_uid_gid_map.cc", "new_path": "test/syscalls/linux/proc_pid_uid_gid_map.cc", "diff": "@@ -203,7 +203,7 @@ TEST_P(ProcSelfUidGidMapTest, IdentityMapOwnID) {\nEXPECT_THAT(\nInNewUserNamespaceWithMapFD([&](int fd) {\nDenySelfSetgroups();\n- TEST_PCHECK(write(fd, line.c_str(), line.size()) == line.size());\n+ TEST_PCHECK(static_cast<long unsigned int>(write(fd, line.c_str(), line.size())) == line.size());\n}),\nIsPosixErrorOkAndHolds(0));\n}\n@@ -220,7 +220,7 @@ TEST_P(ProcSelfUidGidMapTest, TrailingNewlineAndNULIgnored) {\nDenySelfSetgroups();\n// The write should return the full size of the write, even though\n// characters after the NUL were ignored.\n- TEST_PCHECK(write(fd, line.c_str(), line.size()) == line.size());\n+ TEST_PCHECK(static_cast<long unsigned int>(write(fd, line.c_str(), line.size())) == line.size());\n}),\nIsPosixErrorOkAndHolds(0));\n}\n@@ -233,7 +233,7 @@ TEST_P(ProcSelfUidGidMapTest, NonIdentityMapOwnID) {\nEXPECT_THAT(\nInNewUserNamespaceWithMapFD([&](int fd) {\nDenySelfSetgroups();\n- TEST_PCHECK(write(fd, line.c_str(), line.size()) == line.size());\n+ TEST_PCHECK(static_cast<long unsigned int>(write(fd, line.c_str(), line.size())) == line.size());\n}),\nIsPosixErrorOkAndHolds(0));\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/semaphore.cc", "new_path": "test/syscalls/linux/semaphore.cc", "diff": "@@ -600,7 +600,7 @@ TEST(SemaphoreTest, SemopGetzcnt) {\nbuf.sem_num = 0;\nbuf.sem_op = 0;\nconstexpr size_t kLoops = 10;\n- for (auto i = 0; i < kLoops; i++) {\n+ for (size_t i = 0; i < kLoops; i++) {\nauto child_pid = fork();\nif (child_pid == 0) {\nTEST_PCHECK(RetryEINTR(semop)(sem.get(), &buf, 1) == 0);\n@@ -707,7 +707,7 @@ TEST(SemaphoreTest, SemopGetncnt) {\nbuf.sem_num = 0;\nbuf.sem_op = -1;\nconstexpr size_t kLoops = 10;\n- for (auto i = 0; i < kLoops; i++) {\n+ for (size_t i = 0; i < kLoops; i++) {\nauto child_pid = fork();\nif (child_pid == 0) {\nTEST_PCHECK(RetryEINTR(semop)(sem.get(), &buf, 1) == 0);\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket.cc", "new_path": "test/syscalls/linux/socket.cc", "diff": "@@ -46,7 +46,7 @@ TEST(SocketTest, ProtocolUnix) {\n{AF_UNIX, SOCK_SEQPACKET, PF_UNIX},\n{AF_UNIX, SOCK_DGRAM, PF_UNIX},\n};\n- for (int i = 0; i < ABSL_ARRAYSIZE(tests); i++) {\n+ for (long unsigned int i = 0; i < ABSL_ARRAYSIZE(tests); i++) {\nASSERT_NO_ERRNO_AND_VALUE(\nSocket(tests[i].domain, tests[i].type, tests[i].protocol));\n}\n@@ -59,7 +59,7 @@ TEST(SocketTest, ProtocolInet) {\n{AF_INET, SOCK_DGRAM, IPPROTO_UDP},\n{AF_INET, SOCK_STREAM, IPPROTO_TCP},\n};\n- for (int i = 0; i < ABSL_ARRAYSIZE(tests); i++) {\n+ for (long unsigned int i = 0; i < ABSL_ARRAYSIZE(tests); i++) {\nASSERT_NO_ERRNO_AND_VALUE(\nSocket(tests[i].domain, tests[i].type, tests[i].protocol));\n}\n@@ -87,7 +87,7 @@ TEST(SocketTest, UnixSocketStat) {\nASSERT_THAT(stat(addr.sun_path, &statbuf), SyscallSucceeds());\n// Mode should be S_IFSOCK.\n- EXPECT_EQ(statbuf.st_mode, S_IFSOCK | sock_perm & ~mask);\n+ EXPECT_EQ(statbuf.st_mode, S_IFSOCK | (sock_perm & ~mask));\n// Timestamps should be equal and non-zero.\n// TODO(b/158882152): Sockets currently don't implement timestamps.\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_bind_to_device_distribution.cc", "new_path": "test/syscalls/linux/socket_bind_to_device_distribution.cc", "diff": "@@ -168,7 +168,7 @@ TEST_P(BindToDeviceDistributionTest, Tcp) {\nstd::vector<std::unique_ptr<ScopedThread>> listen_threads(\nlistener_fds.size());\n- for (int i = 0; i < listener_fds.size(); i++) {\n+ for (long unsigned int i = 0; i < listener_fds.size(); i++) {\nlisten_threads[i] = absl::make_unique<ScopedThread>(\n[&listener_fds, &accept_counts, &connects_received, i,\nkConnectAttempts]() {\n@@ -235,7 +235,7 @@ TEST_P(BindToDeviceDistributionTest, Tcp) {\nlisten_thread->Join();\n}\n// Check that connections are distributed correctly among listening sockets.\n- for (int i = 0; i < accept_counts.size(); i++) {\n+ for (long unsigned int i = 0; i < accept_counts.size(); i++) {\nEXPECT_THAT(\naccept_counts[i],\nEquivalentWithin(static_cast<int>(kConnectAttempts *\n@@ -308,7 +308,7 @@ TEST_P(BindToDeviceDistributionTest, Udp) {\nstd::vector<std::unique_ptr<ScopedThread>> receiver_threads(\nlistener_fds.size());\n- for (int i = 0; i < listener_fds.size(); i++) {\n+ for (long unsigned int i = 0; i < listener_fds.size(); i++) {\nreceiver_threads[i] = absl::make_unique<ScopedThread>(\n[&listener_fds, &packets_per_socket, &packets_received, i]() {\ndo {\n@@ -366,7 +366,7 @@ TEST_P(BindToDeviceDistributionTest, Udp) {\nreceiver_thread->Join();\n}\n// Check that packets are distributed correctly among listening sockets.\n- for (int i = 0; i < packets_per_socket.size(); i++) {\n+ for (long unsigned int i = 0; i < packets_per_socket.size(); i++) {\nEXPECT_THAT(\npackets_per_socket[i],\nEquivalentWithin(static_cast<int>(kConnectAttempts *\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_ip_udp_generic.cc", "new_path": "test/syscalls/linux/socket_ip_udp_generic.cc", "diff": "@@ -493,7 +493,7 @@ TEST_P(UDPSocketPairTest, TClassRecvMismatch) {\n// This should only test AF_INET6 sockets for the mismatch behavior.\nSKIP_IF(GetParam().domain != AF_INET6);\n// IPV6_RECVTCLASS is only valid for SOCK_DGRAM and SOCK_RAW.\n- SKIP_IF(GetParam().type != SOCK_DGRAM | GetParam().type != SOCK_RAW);\n+ SKIP_IF((GetParam().type != SOCK_DGRAM) | (GetParam().type != SOCK_RAW));\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_ipv4_udp_unbound_netlink.cc", "new_path": "test/syscalls/linux/socket_ipv4_udp_unbound_netlink.cc", "diff": "@@ -177,7 +177,7 @@ TEST_P(IPv4UDPUnboundSocketNetlinkTest, ReuseAddrSubnetDirectedBroadcast) {\n// Broadcasts from each socket should be received by every socket (including\n// the sending socket).\n- for (int w = 0; w < socks.size(); w++) {\n+ for (long unsigned int w = 0; w < socks.size(); w++) {\nauto& w_sock = socks[w];\nASSERT_THAT(\nRetryEINTR(sendto)(w_sock->get(), send_buf, kSendBufSize, 0,\n@@ -187,7 +187,7 @@ TEST_P(IPv4UDPUnboundSocketNetlinkTest, ReuseAddrSubnetDirectedBroadcast) {\n<< \"write socks[\" << w << \"]\";\n// Check that we received the packet on all sockets.\n- for (int r = 0; r < socks.size(); r++) {\n+ for (long unsigned int r = 0; r < socks.size(); r++) {\nauto& r_sock = socks[r];\nstruct pollfd poll_fd = {r_sock->get(), POLLIN, 0};\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/tuntap.cc", "new_path": "test/syscalls/linux/tuntap.cc", "diff": "@@ -324,8 +324,9 @@ TEST_F(TuntapTest, PingKernel) {\n};\nwhile (1) {\ninpkt r = {};\n- int n = read(fd.get(), &r, sizeof(r));\n- EXPECT_THAT(n, SyscallSucceeds());\n+ int nread = read(fd.get(), &r, sizeof(r));\n+ EXPECT_THAT(nread, SyscallSucceeds());\n+ long unsigned int n = static_cast<long unsigned int>(nread);\nif (n < sizeof(pihdr)) {\nstd::cerr << \"Ignored packet, protocol: \" << r.pi.pi_protocol\n@@ -383,8 +384,9 @@ TEST_F(TuntapTest, SendUdpTriggersArpResolution) {\n};\nwhile (1) {\ninpkt r = {};\n- int n = read(fd.get(), &r, sizeof(r));\n- EXPECT_THAT(n, SyscallSucceeds());\n+ int nread = read(fd.get(), &r, sizeof(r));\n+ EXPECT_THAT(nread, SyscallSucceeds());\n+ long unsigned int n = static_cast<long unsigned int>(nread);\nif (n < sizeof(pihdr)) {\nstd::cerr << \"Ignored packet, protocol: \" << r.pi.pi_protocol\n" } ]
Go
Apache License 2.0
google/gvisor
Resolve compiler warnings The buildkite syscall tests start with a lot of C++ warnings. This cleans that up a little.
259,857
26.12.2020 11:20:38
-28,800
d56ea8dfe40f02600de233797ba0ba0c4c340cbb
typo: change whicy to which
[ { "change_type": "MODIFY", "old_path": "tools/checkescape/checkescape.go", "new_path": "tools/checkescape/checkescape.go", "diff": "// heap: A direct allocation is made on the heap (hard).\n// builtin: A call is made to a built-in allocation function (hard).\n// stack: A stack split as part of a function preamble (soft).\n-// interface: A call is made via an interface whicy *may* escape (soft).\n+// interface: A call is made via an interface which *may* escape (soft).\n// dynamic: A dynamic function is dispatched which *may* escape (soft).\n//\n// To the use the package, annotate a function-level comment with either the\n" } ]
Go
Apache License 2.0
google/gvisor
typo: change whicy to which Signed-off-by: Lai Jiangshan <[email protected]>
260,019
29.12.2020 17:40:50
-28,800
7e91b3cdec3d744d86f808662fcfb1510470d1c0
arm64 kvm: revert some kpti related codes, and configure upper pagetable as global In order to improve the performance, some kpti related codes(TCR.A1) have been reverted, and set kernel pagetable as global.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/machine_arm64.go", "new_path": "pkg/sentry/platform/kvm/machine_arm64.go", "diff": "@@ -54,7 +54,7 @@ func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) {\npageTable.Map(\nusermem.Addr(ring0.KernelStartAddress|pr.virtual),\npr.length,\n- pagetables.MapOpts{AccessType: usermem.AnyAccess},\n+ pagetables.MapOpts{AccessType: usermem.AnyAccess, Global: true},\npr.physical)\nreturn true // Keep iterating.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/machine_arm64_unsafe.go", "new_path": "pkg/sentry/platform/kvm/machine_arm64_unsafe.go", "diff": "@@ -79,7 +79,7 @@ func (c *vCPU) initArchState() error {\n}\n// tcr_el1\n- data = _TCR_TXSZ_VA48 | _TCR_CACHE_FLAGS | _TCR_SHARED | _TCR_TG_FLAGS | _TCR_ASID16 | _TCR_IPS_40BITS | _TCR_A1\n+ data = _TCR_TXSZ_VA48 | _TCR_CACHE_FLAGS | _TCR_SHARED | _TCR_TG_FLAGS | _TCR_ASID16 | _TCR_IPS_40BITS\nreg.id = _KVM_ARM64_REGS_TCR_EL1\nif err := c.setOneRegister(&reg); err != nil {\nreturn err\n@@ -103,7 +103,7 @@ func (c *vCPU) initArchState() error {\nc.SetTtbr0Kvm(uintptr(data))\n// ttbr1_el1\n- data = c.machine.kernel.PageTables.TTBR1_EL1(false, 1)\n+ data = c.machine.kernel.PageTables.TTBR1_EL1(false, 0)\nreg.id = _KVM_ARM64_REGS_TTBR1_EL1\nif err := c.setOneRegister(&reg); err != nil {\n" } ]
Go
Apache License 2.0
google/gvisor
arm64 kvm: revert some kpti related codes, and configure upper pagetable as global In order to improve the performance, some kpti related codes(TCR.A1) have been reverted, and set kernel pagetable as global. Signed-off-by: Robin Luk <[email protected]>
259,858
29.12.2020 16:21:27
28,800
85c1c3ed4b8d32c499c53917765acef20cb16248
Make profiling commands synchronous. This allows for a model of profiling when you can start collection, and it will terminate when the sandbox terminates. Without this synchronous call, it is effectively impossible to collect length blocking and mutex profiles.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/control/pprof.go", "new_path": "pkg/sentry/control/pprof.go", "diff": "package control\nimport (\n- \"errors\"\n\"runtime\"\n\"runtime/pprof\"\n\"runtime/trace\"\n+ \"time\"\n\"gvisor.dev/gvisor/pkg/fd\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n@@ -26,184 +26,253 @@ import (\n\"gvisor.dev/gvisor/pkg/urpc\"\n)\n-var errNoOutput = errors.New(\"no output writer provided\")\n+// Profile includes profile-related RPC stubs. It provides a way to\n+// control the built-in runtime profiling facilities.\n+//\n+// The profile object must be instantied via NewProfile.\n+type Profile struct {\n+ // kernel is the kernel under profile. It's immutable.\n+ kernel *kernel.Kernel\n-// ProfileOpts contains options for the StartCPUProfile/Goroutine RPC call.\n-type ProfileOpts struct {\n- // File is the filesystem path for the profile.\n- File string `json:\"path\"`\n+ // cpuMu protects CPU profiling.\n+ cpuMu sync.Mutex\n- // FilePayload is the destination for the profiling output.\n- urpc.FilePayload\n+ // blockMu protects block profiling.\n+ blockMu sync.Mutex\n+\n+ // mutexMu protects mutex profiling.\n+ mutexMu sync.Mutex\n+\n+ // traceMu protects trace profiling.\n+ traceMu sync.Mutex\n+\n+ // done is closed when profiling is done.\n+ done chan struct{}\n}\n-// Profile includes profile-related RPC stubs. It provides a way to\n-// control the built-in pprof facility in sentry via sentryctl.\n-//\n-// The following options to sentryctl are added:\n+// NewProfile returns a new Profile object, and a stop callback.\n//\n-// - collect CPU profile on-demand.\n-// sentryctl -pid <pid> pprof-cpu-start\n-// sentryctl -pid <pid> pprof-cpu-stop\n-//\n-// - dump out the stack trace of current go routines.\n-// sentryctl -pid <pid> pprof-goroutine\n-type Profile struct {\n- // Kernel is the kernel under profile. It's immutable.\n- Kernel *kernel.Kernel\n+// The stop callback should be used at most once.\n+func NewProfile(k *kernel.Kernel) (*Profile, func()) {\n+ p := &Profile{\n+ kernel: k,\n+ done: make(chan struct{}),\n+ }\n+ return p, func() {\n+ close(p.done)\n+ }\n+}\n- // mu protects the fields below.\n- mu sync.Mutex\n+// CPUProfileOpts contains options specifically for CPU profiles.\n+type CPUProfileOpts struct {\n+ // FilePayload is the destination for the profiling output.\n+ urpc.FilePayload\n- // cpuFile is the current CPU profile output file.\n- cpuFile *fd.FD\n+ // Duration is the duration of the profile.\n+ Duration time.Duration `json:\"duration\"`\n- // traceFile is the current execution trace output file.\n- traceFile *fd.FD\n+ // Hz is the rate, which may be zero.\n+ Hz int `json:\"hz\"`\n}\n-// StartCPUProfile is an RPC stub which starts recording the CPU profile in a\n-// file.\n-func (p *Profile) StartCPUProfile(o *ProfileOpts, _ *struct{}) error {\n+// CPU is an RPC stub which collects a CPU profile.\n+func (p *Profile) CPU(o *CPUProfileOpts, _ *struct{}) error {\nif len(o.FilePayload.Files) < 1 {\n- return errNoOutput\n+ return nil // Allowed.\n}\noutput, err := fd.NewFromFile(o.FilePayload.Files[0])\nif err != nil {\nreturn err\n}\n+ defer output.Close()\n- p.mu.Lock()\n- defer p.mu.Unlock()\n+ p.cpuMu.Lock()\n+ defer p.cpuMu.Unlock()\n// Returns an error if profiling is already started.\n+ if o.Hz != 0 {\n+ runtime.SetCPUProfileRate(o.Hz)\n+ }\nif err := pprof.StartCPUProfile(output); err != nil {\n- output.Close()\nreturn err\n}\n+ defer pprof.StopCPUProfile()\n- p.cpuFile = output\n- return nil\n+ // Collect the profile.\n+ select {\n+ case <-time.After(o.Duration):\n+ case <-p.done:\n}\n-// StopCPUProfile is an RPC stub which stops the CPU profiling and flush out the\n-// profile data. It takes no argument.\n-func (p *Profile) StopCPUProfile(_, _ *struct{}) error {\n- p.mu.Lock()\n- defer p.mu.Unlock()\n-\n- if p.cpuFile == nil {\n- return errors.New(\"CPU profiling not started\")\n+ return nil\n}\n- pprof.StopCPUProfile()\n- p.cpuFile.Close()\n- p.cpuFile = nil\n- return nil\n+// HeapProfileOpts contains options specifically for heap profiles.\n+type HeapProfileOpts struct {\n+ // FilePayload is the destination for the profiling output.\n+ urpc.FilePayload\n}\n-// HeapProfile generates a heap profile for the sentry.\n-func (p *Profile) HeapProfile(o *ProfileOpts, _ *struct{}) error {\n+// Heap generates a heap profile.\n+func (p *Profile) Heap(o *HeapProfileOpts, _ *struct{}) error {\nif len(o.FilePayload.Files) < 1 {\n- return errNoOutput\n+ return nil // Allowed.\n}\n+\noutput := o.FilePayload.Files[0]\ndefer output.Close()\n+\nruntime.GC() // Get up-to-date statistics.\n- if err := pprof.WriteHeapProfile(output); err != nil {\n- return err\n+ return pprof.WriteHeapProfile(output)\n}\n- return nil\n+\n+// GoroutineProfileOpts contains options specifically for goroutine profiles.\n+type GoroutineProfileOpts struct {\n+ // FilePayload is the destination for the profiling output.\n+ urpc.FilePayload\n}\n-// GoroutineProfile is an RPC stub which dumps out the stack trace for all\n-// running goroutines.\n-func (p *Profile) GoroutineProfile(o *ProfileOpts, _ *struct{}) error {\n+// Goroutine dumps out the stack trace for all running goroutines.\n+func (p *Profile) Goroutine(o *GoroutineProfileOpts, _ *struct{}) error {\nif len(o.FilePayload.Files) < 1 {\n- return errNoOutput\n+ return nil // Allowed.\n}\n+\noutput := o.FilePayload.Files[0]\ndefer output.Close()\n- if err := pprof.Lookup(\"goroutine\").WriteTo(output, 2); err != nil {\n- return err\n+\n+ return pprof.Lookup(\"goroutine\").WriteTo(output, 2)\n}\n- return nil\n+\n+// BlockProfileOpts contains options specifically for block profiles.\n+type BlockProfileOpts struct {\n+ // FilePayload is the destination for the profiling output.\n+ urpc.FilePayload\n+\n+ // Duration is the duration of the profile.\n+ Duration time.Duration `json:\"duration\"`\n+\n+ // Rate is the block profile rate.\n+ Rate int `json:\"rate\"`\n}\n-// BlockProfile is an RPC stub which dumps out the stack trace that led to\n-// blocking on synchronization primitives.\n-func (p *Profile) BlockProfile(o *ProfileOpts, _ *struct{}) error {\n+// Block dumps a blocking profile.\n+func (p *Profile) Block(o *BlockProfileOpts, _ *struct{}) error {\nif len(o.FilePayload.Files) < 1 {\n- return errNoOutput\n+ return nil // Allowed.\n}\n+\noutput := o.FilePayload.Files[0]\ndefer output.Close()\n- if err := pprof.Lookup(\"block\").WriteTo(output, 0); err != nil {\n- return err\n+\n+ p.blockMu.Lock()\n+ defer p.blockMu.Unlock()\n+\n+ // Always set the rate. We then wait to collect a profile at this rate,\n+ // and disable when we're done.\n+ rate := 1\n+ if o.Rate != 0 {\n+ rate = o.Rate\n}\n- return nil\n+ runtime.SetBlockProfileRate(rate)\n+ defer runtime.SetBlockProfileRate(0)\n+\n+ // Collect the profile.\n+ select {\n+ case <-time.After(o.Duration):\n+ case <-p.done:\n+ }\n+\n+ return pprof.Lookup(\"block\").WriteTo(output, 0)\n+}\n+\n+// MutexProfileOpts contains options specifically for mutex profiles.\n+type MutexProfileOpts struct {\n+ // FilePayload is the destination for the profiling output.\n+ urpc.FilePayload\n+\n+ // Duration is the duration of the profile.\n+ Duration time.Duration `json:\"duration\"`\n+\n+ // Fraction is the mutex profile fraction.\n+ Fraction int `json:\"fraction\"`\n}\n-// MutexProfile is an RPC stub which dumps out the stack trace of holders of\n-// contended mutexes.\n-func (p *Profile) MutexProfile(o *ProfileOpts, _ *struct{}) error {\n+// Mutex dumps a mutex profile.\n+func (p *Profile) Mutex(o *MutexProfileOpts, _ *struct{}) error {\nif len(o.FilePayload.Files) < 1 {\n- return errNoOutput\n+ return nil // Allowed.\n}\n+\noutput := o.FilePayload.Files[0]\ndefer output.Close()\n- if err := pprof.Lookup(\"mutex\").WriteTo(output, 0); err != nil {\n- return err\n+\n+ p.mutexMu.Lock()\n+ defer p.mutexMu.Unlock()\n+\n+ // Always set the fraction.\n+ fraction := 1\n+ if o.Fraction != 0 {\n+ fraction = o.Fraction\n}\n- return nil\n+ runtime.SetMutexProfileFraction(fraction)\n+ defer runtime.SetMutexProfileFraction(0)\n+\n+ // Collect the profile.\n+ select {\n+ case <-time.After(o.Duration):\n+ case <-p.done:\n+ }\n+\n+ return pprof.Lookup(\"mutex\").WriteTo(output, 0)\n+}\n+\n+// TraceProfileOpts contains options specifically for traces.\n+type TraceProfileOpts struct {\n+ // FilePayload is the destination for the profiling output.\n+ urpc.FilePayload\n+\n+ // Duration is the duration of the profile.\n+ Duration time.Duration `json:\"duration\"`\n}\n-// StartTrace is an RPC stub which starts collection of an execution trace.\n-func (p *Profile) StartTrace(o *ProfileOpts, _ *struct{}) error {\n+// Trace is an RPC stub which starts collection of an execution trace.\n+func (p *Profile) Trace(o *TraceProfileOpts, _ *struct{}) error {\nif len(o.FilePayload.Files) < 1 {\n- return errNoOutput\n+ return nil // Allowed.\n}\noutput, err := fd.NewFromFile(o.FilePayload.Files[0])\nif err != nil {\nreturn err\n}\n+ defer output.Close()\n- p.mu.Lock()\n- defer p.mu.Unlock()\n+ p.traceMu.Lock()\n+ defer p.traceMu.Unlock()\n// Returns an error if profiling is already started.\nif err := trace.Start(output); err != nil {\noutput.Close()\nreturn err\n}\n+ defer trace.Stop()\n// Ensure all trace contexts are registered.\n- p.Kernel.RebuildTraceContexts()\n-\n- p.traceFile = output\n- return nil\n-}\n-\n-// StopTrace is an RPC stub which stops collection of an ongoing execution\n-// trace and flushes the trace data. It takes no argument.\n-func (p *Profile) StopTrace(_, _ *struct{}) error {\n- p.mu.Lock()\n- defer p.mu.Unlock()\n+ p.kernel.RebuildTraceContexts()\n- if p.traceFile == nil {\n- return errors.New(\"execution tracing not started\")\n+ // Wait for the trace.\n+ select {\n+ case <-time.After(o.Duration):\n+ case <-p.done:\n}\n// Similarly to the case above, if tasks have not ended traces, we will\n// lose information. Thus we need to rebuild the tasks in order to have\n// complete information. This will not lose information if multiple\n// traces are overlapping.\n- p.Kernel.RebuildTraceContexts()\n+ p.kernel.RebuildTraceContexts()\n- trace.Stop()\n- p.traceFile.Close()\n- p.traceFile = nil\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/urpc/urpc.go", "new_path": "pkg/urpc/urpc.go", "diff": "@@ -283,12 +283,10 @@ func (s *Server) handleOne(client *unet.Socket) error {\n// Client is dead.\nreturn err\n}\n-\n- defer func() {\nif s.afterRPCCallback != nil {\n- s.afterRPCCallback()\n+ defer s.afterRPCCallback()\n}\n- }()\n+\n// Explicitly close all these files after the call.\n//\n// This is also explicitly a reference to the files after the call,\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/controller.go", "new_path": "runsc/boot/controller.go", "diff": "@@ -104,13 +104,11 @@ const (\n// Profiling related commands (see pprof.go for more details).\nconst (\n- StartCPUProfile = \"Profile.StartCPUProfile\"\n- StopCPUProfile = \"Profile.StopCPUProfile\"\n- HeapProfile = \"Profile.HeapProfile\"\n- BlockProfile = \"Profile.BlockProfile\"\n- MutexProfile = \"Profile.MutexProfile\"\n- StartTrace = \"Profile.StartTrace\"\n- StopTrace = \"Profile.StopTrace\"\n+ CPUProfile = \"Profile.CPU\"\n+ HeapProfile = \"Profile.Heap\"\n+ BlockProfile = \"Profile.Block\"\n+ MutexProfile = \"Profile.Mutex\"\n+ Trace = \"Profile.Trace\"\n)\n// Logging related commands (see logging.go for more details).\n@@ -132,8 +130,13 @@ type controller struct {\n// manager holds the containerManager methods.\nmanager *containerManager\n- // pprop holds the profile instance if enabled. It may be nil.\n+ // pprof holds the profile instance if enabled. It may be nil.\npprof *control.Profile\n+\n+ // stopProfiling has the callback to stop profiling calls. As\n+ // this may be executed only once at most, it will be set to nil\n+ // after it is executed for the first time.\n+ stopProfiling func()\n}\n// newController creates a new controller. The caller must call\n@@ -164,7 +167,7 @@ func newController(fd int, l *Loader) (*controller, error) {\nctrl.srv.Register(&control.Logging{})\nif l.root.conf.ProfileEnable {\n- ctrl.pprof = &control.Profile{Kernel: l.k}\n+ ctrl.pprof, ctrl.stopProfiling = control.NewProfile(l.k)\nctrl.srv.Register(ctrl.pprof)\n}\n@@ -172,10 +175,9 @@ func newController(fd int, l *Loader) (*controller, error) {\n}\nfunc (c *controller) stop() {\n- if c.pprof != nil {\n- // These are noop if there is nothing being profiled.\n- _ = c.pprof.StopCPUProfile(nil, nil)\n- _ = c.pprof.StopTrace(nil, nil)\n+ if c.stopProfiling != nil {\n+ c.stopProfiling()\n+ c.stopProfiling = nil\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -598,7 +598,6 @@ func (l *Loader) run() error {\nif err != nil {\nreturn err\n}\n-\n}\nep.tg = l.k.GlobalInit()\n@@ -1045,9 +1044,10 @@ func (l *Loader) WaitExit() kernel.ExitStatus {\n// Wait for container.\nl.k.WaitExited()\n- // Cleanup\n+ // Stop the control server.\nl.ctrl.stop()\n+ // Check all references.\nrefs.OnExit()\nreturn l.k.GlobalInit().ExitStatus()\n" }, { "change_type": "MODIFY", "old_path": "runsc/cmd/debug.go", "new_path": "runsc/cmd/debug.go", "diff": "@@ -19,6 +19,7 @@ import (\n\"os\"\n\"strconv\"\n\"strings\"\n+ \"sync\"\n\"syscall\"\n\"time\"\n@@ -70,10 +71,10 @@ func (d *Debug) SetFlags(f *flag.FlagSet) {\nf.StringVar(&d.profileCPU, \"profile-cpu\", \"\", \"writes CPU profile to the given file.\")\nf.StringVar(&d.profileBlock, \"profile-block\", \"\", \"writes block profile to the given file.\")\nf.StringVar(&d.profileMutex, \"profile-mutex\", \"\", \"writes mutex profile to the given file.\")\n- f.DurationVar(&d.duration, \"duration\", time.Second, \"amount of time to wait for CPU and trace profiles\")\n+ f.DurationVar(&d.duration, \"duration\", time.Second, \"amount of time to wait for CPU and trace profiles.\")\nf.StringVar(&d.trace, \"trace\", \"\", \"writes an execution trace to the given file.\")\nf.IntVar(&d.signal, \"signal\", -1, \"sends signal to the sandbox\")\n- f.StringVar(&d.strace, \"strace\", \"\", `A comma separated list of syscalls to trace. \"all\" enables all traces, \"off\" disables all`)\n+ f.StringVar(&d.strace, \"strace\", \"\", `A comma separated list of syscalls to trace. \"all\" enables all traces, \"off\" disables all.`)\nf.StringVar(&d.logLevel, \"log-level\", \"\", \"The log level to set: warning (0), info (1), or debug (2).\")\nf.StringVar(&d.logPackets, \"log-packets\", \"\", \"A boolean value to enable or disable packet logging: true or false.\")\nf.BoolVar(&d.ps, \"ps\", false, \"lists processes\")\n@@ -128,6 +129,7 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n}\nlog.Infof(\"Found sandbox %q, PID: %d\", c.Sandbox.ID, c.Sandbox.Pid)\n+ // Perform synchronous actions.\nif d.signal > 0 {\nlog.Infof(\"Sending signal %d to process: %d\", d.signal, c.Sandbox.Pid)\nif err := syscall.Kill(c.Sandbox.Pid, syscall.Signal(d.signal)); err != nil {\n@@ -143,80 +145,15 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nlog.Infof(\" *** Stack dump ***\\n%s\", stacks)\n}\nif d.profileHeap != \"\" {\n- f, err := os.Create(d.profileHeap)\n+ f, err := os.OpenFile(d.profileHeap, os.O_CREATE|os.O_TRUNC, 0644)\nif err != nil {\n- return Errorf(err.Error())\n+ return Errorf(\"error opening heap profile output: %v\", err)\n}\ndefer f.Close()\n-\nif err := c.Sandbox.HeapProfile(f); err != nil {\n- return Errorf(err.Error())\n- }\n- log.Infof(\"Heap profile written to %q\", d.profileHeap)\n- }\n- if d.profileBlock != \"\" {\n- f, err := os.Create(d.profileBlock)\n- if err != nil {\n- return Errorf(err.Error())\n- }\n- defer f.Close()\n-\n- if err := c.Sandbox.BlockProfile(f); err != nil {\n- return Errorf(err.Error())\n- }\n- log.Infof(\"Block profile written to %q\", d.profileBlock)\n- }\n- if d.profileMutex != \"\" {\n- f, err := os.Create(d.profileMutex)\n- if err != nil {\n- return Errorf(err.Error())\n- }\n- defer f.Close()\n-\n- if err := c.Sandbox.MutexProfile(f); err != nil {\n- return Errorf(err.Error())\n- }\n- log.Infof(\"Mutex profile written to %q\", d.profileMutex)\n- }\n-\n- delay := false\n- if d.profileCPU != \"\" {\n- delay = true\n- f, err := os.Create(d.profileCPU)\n- if err != nil {\n- return Errorf(err.Error())\n- }\n- defer func() {\n- f.Close()\n- if err := c.Sandbox.StopCPUProfile(); err != nil {\n- Fatalf(err.Error())\n- }\n- log.Infof(\"CPU profile written to %q\", d.profileCPU)\n- }()\n- if err := c.Sandbox.StartCPUProfile(f); err != nil {\n- return Errorf(err.Error())\n- }\n- log.Infof(\"CPU profile started for %v, writing to %q\", d.duration, d.profileCPU)\n- }\n- if d.trace != \"\" {\n- delay = true\n- f, err := os.Create(d.trace)\n- if err != nil {\n- return Errorf(err.Error())\n- }\n- defer func() {\n- f.Close()\n- if err := c.Sandbox.StopTrace(); err != nil {\n- Fatalf(err.Error())\n- }\n- log.Infof(\"Trace written to %q\", d.trace)\n- }()\n- if err := c.Sandbox.StartTrace(f); err != nil {\n- return Errorf(err.Error())\n+ return Errorf(\"error collecting heap profile: %v\", err)\n}\n- log.Infof(\"Tracing started for %v, writing to %q\", d.duration, d.trace)\n}\n-\nif d.strace != \"\" || len(d.logLevel) != 0 || len(d.logPackets) != 0 {\nargs := control.LoggingArgs{}\nswitch strings.ToLower(d.strace) {\n@@ -285,8 +222,98 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nlog.Infof(o)\n}\n- if delay {\n- time.Sleep(d.duration)\n+ // Open profiling files.\n+ var (\n+ cpuFile *os.File\n+ traceFile *os.File\n+ blockFile *os.File\n+ mutexFile *os.File\n+ )\n+ if d.profileCPU != \"\" {\n+ f, err := os.OpenFile(d.profileCPU, os.O_CREATE|os.O_TRUNC, 0644)\n+ if err != nil {\n+ return Errorf(\"error opening cpu profile output: %v\", err)\n+ }\n+ defer f.Close()\n+ cpuFile = f\n+ }\n+ if d.trace != \"\" {\n+ f, err := os.OpenFile(d.trace, os.O_CREATE|os.O_TRUNC, 0644)\n+ if err != nil {\n+ return Errorf(\"error opening trace profile output: %v\", err)\n+ }\n+ traceFile = f\n+ }\n+ if d.profileBlock != \"\" {\n+ f, err := os.OpenFile(d.profileBlock, os.O_CREATE|os.O_TRUNC, 0644)\n+ if err != nil {\n+ return Errorf(\"error opening blocking profile output: %v\", err)\n+ }\n+ defer f.Close()\n+ blockFile = f\n+ }\n+ if d.profileMutex != \"\" {\n+ f, err := os.OpenFile(d.profileMutex, os.O_CREATE|os.O_TRUNC, 0644)\n+ if err != nil {\n+ return Errorf(\"error opening mutex profile output: %v\", err)\n+ }\n+ defer f.Close()\n+ mutexFile = f\n+ }\n+\n+ // Collect profiles.\n+ var (\n+ wg sync.WaitGroup\n+ cpuErr error\n+ traceErr error\n+ blockErr error\n+ mutexErr error\n+ )\n+ if cpuFile != nil {\n+ wg.Add(1)\n+ go func() {\n+ defer wg.Done()\n+ cpuErr = c.Sandbox.CPUProfile(cpuFile, d.duration)\n+ }()\n+ }\n+ if traceFile != nil {\n+ wg.Add(1)\n+ go func() {\n+ defer wg.Done()\n+ traceErr = c.Sandbox.Trace(traceFile, d.duration)\n+ }()\n+ }\n+ if blockFile != nil {\n+ wg.Add(1)\n+ go func() {\n+ defer wg.Done()\n+ blockErr = c.Sandbox.BlockProfile(blockFile, d.duration)\n+ }()\n+ }\n+ if mutexFile != nil {\n+ wg.Add(1)\n+ go func() {\n+ defer wg.Done()\n+ mutexErr = c.Sandbox.MutexProfile(mutexFile, d.duration)\n+ }()\n+ }\n+\n+ wg.Wait()\n+ errorCount := 0\n+ if cpuErr != nil {\n+ log.Infof(\"error collecting cpu profile: %v\", cpuErr)\n+ }\n+ if traceErr != nil {\n+ log.Infof(\"error collecting trace profile: %v\", traceErr)\n+ }\n+ if blockErr != nil {\n+ log.Infof(\"error collecting block profile: %v\", blockErr)\n+ }\n+ if mutexErr != nil {\n+ log.Infof(\"error collecting mutex profile: %v\", mutexErr)\n+ }\n+ if errorCount > 0 {\n+ return subcommands.ExitFailure\n}\nreturn subcommands.ExitSuccess\n" }, { "change_type": "MODIFY", "old_path": "runsc/sandbox/sandbox.go", "new_path": "runsc/sandbox/sandbox.go", "diff": "@@ -999,54 +999,30 @@ func (s *Sandbox) HeapProfile(f *os.File) error {\n}\ndefer conn.Close()\n- opts := control.ProfileOpts{\n- FilePayload: urpc.FilePayload{\n- Files: []*os.File{f},\n- },\n+ opts := control.HeapProfileOpts{\n+ FilePayload: urpc.FilePayload{Files: []*os.File{f}},\n}\n- if err := conn.Call(boot.HeapProfile, &opts, nil); err != nil {\n- return fmt.Errorf(\"getting sandbox %q heap profile: %v\", s.ID, err)\n- }\n- return nil\n+ return conn.Call(boot.HeapProfile, &opts, nil)\n}\n-// StartCPUProfile start CPU profile writing to the given file.\n-func (s *Sandbox) StartCPUProfile(f *os.File) error {\n- log.Debugf(\"CPU profile start %q\", s.ID)\n+// CPUProfile collects a CPU profile.\n+func (s *Sandbox) CPUProfile(f *os.File, duration time.Duration) error {\n+ log.Debugf(\"CPU profile %q\", s.ID)\nconn, err := s.sandboxConnect()\nif err != nil {\nreturn err\n}\ndefer conn.Close()\n- opts := control.ProfileOpts{\n- FilePayload: urpc.FilePayload{\n- Files: []*os.File{f},\n- },\n- }\n- if err := conn.Call(boot.StartCPUProfile, &opts, nil); err != nil {\n- return fmt.Errorf(\"starting sandbox %q CPU profile: %v\", s.ID, err)\n+ opts := control.CPUProfileOpts{\n+ FilePayload: urpc.FilePayload{Files: []*os.File{f}},\n+ Duration: duration,\n}\n- return nil\n-}\n-\n-// StopCPUProfile stops a previously started CPU profile.\n-func (s *Sandbox) StopCPUProfile() error {\n- log.Debugf(\"CPU profile stop %q\", s.ID)\n- conn, err := s.sandboxConnect()\n- if err != nil {\n- return err\n- }\n- defer conn.Close()\n-\n- if err := conn.Call(boot.StopCPUProfile, nil, nil); err != nil {\n- return fmt.Errorf(\"stopping sandbox %q CPU profile: %v\", s.ID, err)\n- }\n- return nil\n+ return conn.Call(boot.CPUProfile, &opts, nil)\n}\n// BlockProfile writes a block profile to the given file.\n-func (s *Sandbox) BlockProfile(f *os.File) error {\n+func (s *Sandbox) BlockProfile(f *os.File, duration time.Duration) error {\nlog.Debugf(\"Block profile %q\", s.ID)\nconn, err := s.sandboxConnect()\nif err != nil {\n@@ -1054,19 +1030,15 @@ func (s *Sandbox) BlockProfile(f *os.File) error {\n}\ndefer conn.Close()\n- opts := control.ProfileOpts{\n- FilePayload: urpc.FilePayload{\n- Files: []*os.File{f},\n- },\n+ opts := control.BlockProfileOpts{\n+ FilePayload: urpc.FilePayload{Files: []*os.File{f}},\n+ Duration: duration,\n}\n- if err := conn.Call(boot.BlockProfile, &opts, nil); err != nil {\n- return fmt.Errorf(\"getting sandbox %q block profile: %v\", s.ID, err)\n- }\n- return nil\n+ return conn.Call(boot.BlockProfile, &opts, nil)\n}\n// MutexProfile writes a mutex profile to the given file.\n-func (s *Sandbox) MutexProfile(f *os.File) error {\n+func (s *Sandbox) MutexProfile(f *os.File, duration time.Duration) error {\nlog.Debugf(\"Mutex profile %q\", s.ID)\nconn, err := s.sandboxConnect()\nif err != nil {\n@@ -1074,50 +1046,27 @@ func (s *Sandbox) MutexProfile(f *os.File) error {\n}\ndefer conn.Close()\n- opts := control.ProfileOpts{\n- FilePayload: urpc.FilePayload{\n- Files: []*os.File{f},\n- },\n- }\n- if err := conn.Call(boot.MutexProfile, &opts, nil); err != nil {\n- return fmt.Errorf(\"getting sandbox %q mutex profile: %v\", s.ID, err)\n- }\n- return nil\n-}\n-\n-// StartTrace start trace writing to the given file.\n-func (s *Sandbox) StartTrace(f *os.File) error {\n- log.Debugf(\"Trace start %q\", s.ID)\n- conn, err := s.sandboxConnect()\n- if err != nil {\n- return err\n- }\n- defer conn.Close()\n-\n- opts := control.ProfileOpts{\n- FilePayload: urpc.FilePayload{\n- Files: []*os.File{f},\n- },\n+ opts := control.MutexProfileOpts{\n+ FilePayload: urpc.FilePayload{Files: []*os.File{f}},\n+ Duration: duration,\n}\n- if err := conn.Call(boot.StartTrace, &opts, nil); err != nil {\n- return fmt.Errorf(\"starting sandbox %q trace: %v\", s.ID, err)\n- }\n- return nil\n+ return conn.Call(boot.MutexProfile, &opts, nil)\n}\n-// StopTrace stops a previously started trace.\n-func (s *Sandbox) StopTrace() error {\n- log.Debugf(\"Trace stop %q\", s.ID)\n+// Trace collects an execution trace.\n+func (s *Sandbox) Trace(f *os.File, duration time.Duration) error {\n+ log.Debugf(\"Trace %q\", s.ID)\nconn, err := s.sandboxConnect()\nif err != nil {\nreturn err\n}\ndefer conn.Close()\n- if err := conn.Call(boot.StopTrace, nil, nil); err != nil {\n- return fmt.Errorf(\"stopping sandbox %q trace: %v\", s.ID, err)\n+ opts := control.TraceProfileOpts{\n+ FilePayload: urpc.FilePayload{Files: []*os.File{f}},\n+ Duration: duration,\n}\n- return nil\n+ return conn.Call(boot.Trace, &opts, nil)\n}\n// ChangeLogging changes logging options.\n" } ]
Go
Apache License 2.0
google/gvisor
Make profiling commands synchronous. This allows for a model of profiling when you can start collection, and it will terminate when the sandbox terminates. Without this synchronous call, it is effectively impossible to collect length blocking and mutex profiles. PiperOrigin-RevId: 349483418
259,875
29.12.2020 17:36:45
28,800
d302c05700ab3474facded345d3d59d7cbc38184
Deflake semaphore_test_native
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/semaphore.cc", "new_path": "test/syscalls/linux/semaphore.cc", "diff": "#include \"test/util/test_util.h\"\n#include \"test/util/thread_util.h\"\n-using ::testing::Contains;\n-\nnamespace gvisor {\nnamespace testing {\nnamespace {\n@@ -793,7 +791,6 @@ TEST(SemaphoreTest, IpcInfo) {\nstruct seminfo info;\n// Drop CAP_IPC_OWNER which allows us to bypass semaphore permissions.\nASSERT_NO_ERRNO(SetCapability(CAP_IPC_OWNER, false));\n- ASSERT_THAT(semctl(0, 0, IPC_INFO, &info), SyscallSucceedsWithValue(0));\nfor (int i = 0; i < kLoops; i++) {\nAutoSem sem(semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT));\nASSERT_THAT(sem.get(), SyscallSucceeds());\n@@ -805,13 +802,12 @@ TEST(SemaphoreTest, IpcInfo) {\nEXPECT_THAT(max_used_index = semctl(0, 0, IPC_INFO, &info),\nSyscallSucceeds());\n- int index_count = 0;\n+ std::set<int> sem_ids_before_max_index;\nfor (int i = 0; i <= max_used_index; i++) {\nstruct semid_ds ds = {};\nint sem_id = semctl(i, 0, SEM_STAT, &ds);\n// Only if index i is used within the registry.\n- if (sem_id != -1) {\n- ASSERT_THAT(sem_ids, Contains(sem_id));\n+ if (sem_ids.find(sem_id) != sem_ids.end()) {\nstruct semid_ds ipc_stat_ds;\nASSERT_THAT(semctl(sem_id, 0, IPC_STAT, &ipc_stat_ds), SyscallSucceeds());\nEXPECT_EQ(ds.sem_perm.__key, ipc_stat_ds.sem_perm.__key);\n@@ -833,17 +829,15 @@ TEST(SemaphoreTest, IpcInfo) {\nASSERT_THAT(semctl(sem_id, 0, IPC_SET, &ipc_set_ds), SyscallSucceeds());\nASSERT_THAT(semctl(i, 0, SEM_STAT, &ds), SyscallFailsWithErrno(EACCES));\n- index_count += 1;\n+ sem_ids_before_max_index.insert(sem_id);\n}\n}\n- EXPECT_EQ(index_count, kLoops);\n- ASSERT_THAT(semctl(0, 0, IPC_INFO, &info),\n- SyscallSucceedsWithValue(max_used_index));\n+ EXPECT_EQ(sem_ids_before_max_index.size(), kLoops);\nfor (const int sem_id : sem_ids) {\nASSERT_THAT(semctl(sem_id, 0, IPC_RMID), SyscallSucceeds());\n}\n- ASSERT_THAT(semctl(0, 0, IPC_INFO, &info), SyscallSucceedsWithValue(0));\n+ ASSERT_THAT(semctl(0, 0, IPC_INFO, &info), SyscallSucceeds());\nEXPECT_EQ(info.semmap, kSemMap);\nEXPECT_EQ(info.semmni, kSemMni);\nEXPECT_EQ(info.semmns, kSemMns);\n@@ -863,7 +857,6 @@ TEST(SemaphoreTest, SemInfo) {\nstruct seminfo info;\n// Drop CAP_IPC_OWNER which allows us to bypass semaphore permissions.\nASSERT_NO_ERRNO(SetCapability(CAP_IPC_OWNER, false));\n- ASSERT_THAT(semctl(0, 0, IPC_INFO, &info), SyscallSucceedsWithValue(0));\nfor (int i = 0; i < kLoops; i++) {\nAutoSem sem(semget(IPC_PRIVATE, kSemSetSize, 0600 | IPC_CREAT));\nASSERT_THAT(sem.get(), SyscallSucceeds());\n@@ -880,17 +873,19 @@ TEST(SemaphoreTest, SemInfo) {\nEXPECT_EQ(info.semmsl, kSemMsl);\nEXPECT_EQ(info.semopm, kSemOpm);\nEXPECT_EQ(info.semume, kSemUme);\n- EXPECT_EQ(info.semusz, sem_ids.size());\n+ // There could be semaphores existing in the system during the test, which\n+ // prevents the test from getting a exact number, but the test could expect at\n+ // least the number of sempahroes it creates in the begining of the test.\n+ EXPECT_GE(info.semusz, sem_ids.size());\nEXPECT_EQ(info.semvmx, kSemVmx);\n- EXPECT_EQ(info.semaem, sem_ids.size() * kSemSetSize);\n+ EXPECT_GE(info.semaem, sem_ids.size() * kSemSetSize);\n- int index_count = 0;\n+ std::set<int> sem_ids_before_max_index;\nfor (int i = 0; i <= max_used_index; i++) {\nstruct semid_ds ds = {};\nint sem_id = semctl(i, 0, SEM_STAT, &ds);\n// Only if index i is used within the registry.\n- if (sem_id != -1) {\n- ASSERT_THAT(sem_ids, Contains(sem_id));\n+ if (sem_ids.find(sem_id) != sem_ids.end()) {\nstruct semid_ds ipc_stat_ds;\nASSERT_THAT(semctl(sem_id, 0, IPC_STAT, &ipc_stat_ds), SyscallSucceeds());\nEXPECT_EQ(ds.sem_perm.__key, ipc_stat_ds.sem_perm.__key);\n@@ -912,17 +907,15 @@ TEST(SemaphoreTest, SemInfo) {\nASSERT_THAT(semctl(sem_id, 0, IPC_SET, &ipc_set_ds), SyscallSucceeds());\nASSERT_THAT(semctl(i, 0, SEM_STAT, &ds), SyscallFailsWithErrno(EACCES));\n- index_count += 1;\n+ sem_ids_before_max_index.insert(sem_id);\n}\n}\n- EXPECT_EQ(index_count, kLoops);\n- ASSERT_THAT(semctl(0, 0, SEM_INFO, &info),\n- SyscallSucceedsWithValue(max_used_index));\n+ EXPECT_EQ(sem_ids_before_max_index.size(), kLoops);\nfor (const int sem_id : sem_ids) {\nASSERT_THAT(semctl(sem_id, 0, IPC_RMID), SyscallSucceeds());\n}\n- ASSERT_THAT(semctl(0, 0, SEM_INFO, &info), SyscallSucceedsWithValue(0));\n+ ASSERT_THAT(semctl(0, 0, SEM_INFO, &info), SyscallSucceeds());\nEXPECT_EQ(info.semmap, kSemMap);\nEXPECT_EQ(info.semmni, kSemMni);\nEXPECT_EQ(info.semmns, kSemMns);\n@@ -930,9 +923,11 @@ TEST(SemaphoreTest, SemInfo) {\nEXPECT_EQ(info.semmsl, kSemMsl);\nEXPECT_EQ(info.semopm, kSemOpm);\nEXPECT_EQ(info.semume, kSemUme);\n- EXPECT_EQ(info.semusz, 0);\n+ // Apart from semapahores that are not created by the test, we can't determine\n+ // the exact number of semaphore sets and semaphores, as a result, semusz and\n+ // semaem range from 0 to a random number. Since the numbers are always\n+ // non-negative, the test will not check the reslts of semusz and semaem.\nEXPECT_EQ(info.semvmx, kSemVmx);\n- EXPECT_EQ(info.semaem, 0);\n}\n} // namespace\n" } ]
Go
Apache License 2.0
google/gvisor
Deflake semaphore_test_native PiperOrigin-RevId: 349490873
259,858
29.12.2020 17:44:25
28,800
3c58405a544dcd599bd84406b5d52848941675f7
Exclude changing generated file from go_branch.
[ { "change_type": "MODIFY", "old_path": "tools/go_branch.sh", "new_path": "tools/go_branch.sh", "diff": "@@ -89,8 +89,14 @@ git merge --no-commit --strategy ours \"${head}\" || \\\nfind . -type f -exec chmod 0644 {} \\;\nfind . -type d -exec chmod 0755 {} \\;\n-# Sync the entire gopath_dir.\n-rsync --recursive --delete --exclude .git -L \"${gopath_dir}/\" .\n+# Sync the entire gopath_dir. Note that we exclude auto-generated source\n+# files that will change here. Otherwise, it adds a tremendous amount of noise\n+# to commits. If this file disappears in the future, then presumably we will\n+# still delete the underlying directory.\n+rsync --recursive --delete \\\n+ --exclude .git \\\n+ --exclude webhook/pkg/injector/certs.go \\\n+ -L \"${gopath_dir}/\" .\n# Add additional files.\nfor file in \"${othersrc[@]}\"; do\n" } ]
Go
Apache License 2.0
google/gvisor
Exclude changing generated file from go_branch. PiperOrigin-RevId: 349491589
259,858
29.12.2020 23:26:29
28,800
ed5850e8eb98c33d90f7233c0fb196d74e347439
Add continuous VFS1 tests.
[ { "change_type": "MODIFY", "old_path": ".buildkite/pipeline.yaml", "new_path": ".buildkite/pipeline.yaml", "diff": "@@ -105,3 +105,30 @@ steps:\nlabel: \":python: Python runtime tests\"\ncommand: make python3.7.3-runtime-tests_vfs2\nparallelism: 10\n+\n+ # Runtime tests (VFS1).\n+ - <<: *common\n+ label: \":php: PHP runtime tests (VFS1)\"\n+ command: make php7.3.6-runtime-tests\n+ parallelism: 10\n+ if: build.message =~ /VFS1/ || build.branch == \"master\"\n+ - <<: *common\n+ label: \":java: Java runtime tests (VFS1)\"\n+ command: make java11-runtime-tests\n+ parallelism: 40\n+ if: build.message =~ /VFS1/ || build.branch == \"master\"\n+ - <<: *common\n+ label: \":golang: Go runtime tests (VFS1)\"\n+ command: make go1.12-runtime-tests\n+ parallelism: 10\n+ if: build.message =~ /VFS1/ || build.branch == \"master\"\n+ - <<: *common\n+ label: \":node: NodeJS runtime tests (VFS1)\"\n+ command: make nodejs12.4.0-runtime-tests\n+ parallelism: 10\n+ if: build.message =~ /VFS1/ || build.branch == \"master\"\n+ - <<: *common\n+ label: \":python: Python runtime tests (VFS1)\"\n+ command: make python3.7.3-runtime-tests\n+ parallelism: 10\n+ if: build.message =~ /VFS1/ || build.branch == \"master\"\n" } ]
Go
Apache License 2.0
google/gvisor
Add continuous VFS1 tests. PiperOrigin-RevId: 349517093
259,992
30.12.2020 11:16:12
28,800
1b66bad7c47e914994e19f39119d91ab6805002a
Fix condition checking in `runsc debug` Closes
[ { "change_type": "MODIFY", "old_path": "runsc/cmd/debug.go", "new_path": "runsc/cmd/debug.go", "diff": "@@ -124,7 +124,7 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n}\n}\n- if c.IsSandboxRunning() {\n+ if !c.IsSandboxRunning() {\nreturn Errorf(\"container sandbox is not running\")\n}\nlog.Infof(\"Found sandbox %q, PID: %d\", c.Sandbox.ID, c.Sandbox.Pid)\n" } ]
Go
Apache License 2.0
google/gvisor
Fix condition checking in `runsc debug` Closes #5052 PiperOrigin-RevId: 349579814
259,858
30.12.2020 14:51:23
28,800
0fb5de1154411bd207dadae31c37054db9941061
Use a stable ordering for generated types. Otherwise this pollutes the 'go' branch and doesn't conform to standards for generate bazel files.
[ { "change_type": "MODIFY", "old_path": "tools/go_marshal/gomarshal/generator.go", "new_path": "tools/go_marshal/gomarshal/generator.go", "diff": "@@ -447,7 +447,15 @@ func (g *Generator) Run() error {\nfor i, a := range asts {\n// Collect type declarations marked for code generation and generate\n// Marshallable interfaces.\n+ var sortedTypes []*marshallableType\nfor _, t := range g.collectMarshallableTypes(a, fsets[i]) {\n+ sortedTypes = append(sortedTypes, t)\n+ }\n+ sort.Slice(sortedTypes, func(x, y int) bool {\n+ // Sort by type name, which should be unique within a package.\n+ return sortedTypes[x].spec.Name.String() < sortedTypes[y].spec.Name.String()\n+ })\n+ for _, t := range sortedTypes {\nimpl := g.generateOne(t, fsets[i])\n// Collect Marshallable types referenced by the generated code.\nfor ref := range impl.ms {\n" } ]
Go
Apache License 2.0
google/gvisor
Use a stable ordering for generated types. Otherwise this pollutes the 'go' branch and doesn't conform to standards for generate bazel files. PiperOrigin-RevId: 349605037
259,860
30.12.2020 15:20:34
28,800
4691a8125370d61ccb9732574e97ccc26362af42
Add test for open(2) with O_WRONLY|O_RDWR.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/open.cc", "new_path": "test/syscalls/linux/open.cc", "diff": "@@ -505,6 +505,18 @@ TEST_F(OpenTest, OpenNonDirectoryWithTrailingSlash) {\nEXPECT_THAT(open(bad_path.c_str(), O_RDONLY), SyscallFailsWithErrno(ENOTDIR));\n}\n+TEST_F(OpenTest, OpenWithStrangeFlags) {\n+ // VFS1 incorrectly allows read/write operations on such file descriptors.\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n+ const FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_WRONLY | O_RDWR));\n+ EXPECT_THAT(write(fd.get(), \"x\", 1), SyscallFailsWithErrno(EBADF));\n+ char c;\n+ EXPECT_THAT(read(fd.get(), &c, 1), SyscallFailsWithErrno(EBADF));\n+}\n+\n} // namespace\n} // namespace testing\n" } ]
Go
Apache License 2.0
google/gvisor
Add test for open(2) with O_WRONLY|O_RDWR. PiperOrigin-RevId: 349607959
259,858
30.12.2020 16:52:06
28,800
3b1d37f6ab5ca547020fdd573d3bf6a621313132
Remove remote execution support.
[ { "change_type": "MODIFY", "old_path": ".bazelrc", "new_path": ".bazelrc", "diff": "# See the License for the specific language governing permissions and\n# limitations under the License.\n-# RBE requires a strong hash function, such as SHA256.\n+# Ensure a strong hash function.\nstartup --host_jvm_args=-Dbazel.DigestFunction=SHA256\n# Build with C++17.\n@@ -20,27 +20,3 @@ build --cxxopt=-std=c++17\n# Display the current git revision in the info block.\nbuild --stamp --workspace_status_command tools/workspace_status.sh\n-\n-# Enable remote execution so actions are performed on the remote systems.\n-build:remote --remote_executor=grpcs://remotebuildexecution.googleapis.com\n-build:remote --bes_backend=buildeventservice.googleapis.com\n-build:remote --bes_results_url=\"https://source.cloud.google.com/results/invocations\"\n-build:remote --bes_timeout=600s\n-build:remote --project_id=gvisor-rbe\n-build:remote --remote_instance_name=projects/gvisor-rbe/instances/default_instance\n-\n-# Enable authentication. This will pick up application default credentials by\n-# default. You can use --google_credentials=some_file.json to use a service\n-# account credential instead.\n-build:remote --google_default_credentials=true\n-build:remote --auth_scope=\"https://www.googleapis.com/auth/cloud-source-tools\"\n-\n-# Add a custom platform and toolchain that builds in a privileged docker\n-# container, which is required by our syscall tests.\n-build:remote --host_platform=//tools/bazeldefs:rbe_ubuntu1604\n-build:remote --extra_toolchains=//tools/bazeldefs:cc-toolchain-clang-x86_64-default\n-build:remote --extra_execution_platforms=//tools/bazeldefs:rbe_ubuntu1604\n-build:remote --platforms=//tools/bazeldefs:rbe_ubuntu1604\n-build:remote --crosstool_top=@rbe_default//cc:toolchain\n-build:remote --jobs=100\n-build:remote --remote_timeout=3600\n" }, { "change_type": "MODIFY", "old_path": "tools/bazel.mk", "new_path": "tools/bazel.mk", "diff": "@@ -59,15 +59,10 @@ DOCKER_CONFIG := /etc/docker\n## to control which flags are passed:\n##\n## STARTUP_OPTIONS - Startup options passed to Bazel.\n-## BAZEL_CONFIG - A bazel config file.\n##\nSTARTUP_OPTIONS :=\n-BAZEL_CONFIG :=\nBAZEL := bazel $(STARTUP_OPTIONS)\nBASE_OPTIONS := --color=no --curses=no\n-ifneq (,$(BAZEL_CONFIG))\n-BASE_OPTIONS += --config=$(BAZEL_CONFIG)\n-endif\nTEST_OPTIONS := $(BASE_OPTIONS) \\\n--test_output=errors \\\n--keep_going \\\n" }, { "change_type": "MODIFY", "old_path": "tools/bazeldefs/BUILD", "new_path": "tools/bazeldefs/BUILD", "diff": "-load(\"//tools:defs.bzl\", \"bzl_library\", \"rbe_platform\", \"rbe_toolchain\")\n+load(\"//tools:defs.bzl\", \"bzl_library\")\npackage(licenses = [\"notice\"])\n-# We need to define a bazel platform and toolchain to specify dockerPrivileged\n-# and dockerRunAsRoot options, they are required to run tests on the RBE\n-# cluster in Kokoro.\n-rbe_platform(\n- name = \"rbe_ubuntu1604\",\n- constraint_values = [\n- \"@bazel_tools//platforms:x86_64\",\n- \"@bazel_tools//platforms:linux\",\n- \"@bazel_tools//tools/cpp:clang\",\n- \"@bazel_toolchains//constraints:xenial\",\n- \"@bazel_toolchains//constraints/sanitizers:support_msan\",\n- ],\n- remote_execution_properties = \"\"\"\n- properties: {\n- name: \"container-image\"\n- value:\"docker://gcr.io/cloud-marketplace/google/rbe-ubuntu16-04@sha256:b516a2d69537cb40a7c6a7d92d0008abb29fba8725243772bdaf2c83f1be2272\"\n- }\n- properties: {\n- name: \"dockerAddCapabilities\"\n- value: \"SYS_ADMIN\"\n- }\n- properties: {\n- name: \"dockerPrivileged\"\n- value: \"true\"\n- }\n- \"\"\",\n-)\n-\n-rbe_toolchain(\n- name = \"cc-toolchain-clang-x86_64-default\",\n- exec_compatible_with = [],\n- tags = [\n- \"manual\",\n- ],\n- target_compatible_with = [],\n- toolchain = \"@bazel_toolchains//configs/ubuntu16_04_clang/11.0.0/bazel_3.1.0/cc:cc-compiler-k8\",\n- toolchain_type = \"@bazel_tools//tools/cpp:toolchain_type\",\n-)\n-\nbzl_library(\nname = \"platforms_bzl\",\nsrcs = [\"platforms.bzl\"],\n" }, { "change_type": "MODIFY", "old_path": "tools/bazeldefs/defs.bzl", "new_path": "tools/bazeldefs/defs.bzl", "diff": "@@ -5,8 +5,6 @@ load(\"@bazel_skylib//:bzl_library.bzl\", _bzl_library = \"bzl_library\")\nbuild_test = _build_test\nbzl_library = _bzl_library\n-rbe_platform = native.platform\n-rbe_toolchain = native.toolchain\nmore_shards = 4\nmost_shards = 8\n" }, { "change_type": "MODIFY", "old_path": "tools/defs.bzl", "new_path": "tools/defs.bzl", "diff": "@@ -8,7 +8,7 @@ change for Google-internal and bazel-compatible rules.\nload(\"//tools/go_stateify:defs.bzl\", \"go_stateify\")\nload(\"//tools/go_marshal:defs.bzl\", \"go_marshal\", \"marshal_deps\", \"marshal_test_deps\")\nload(\"//tools/nogo:defs.bzl\", \"nogo_test\")\n-load(\"//tools/bazeldefs:defs.bzl\", _arch_genrule = \"arch_genrule\", _build_test = \"build_test\", _bzl_library = \"bzl_library\", _coreutil = \"coreutil\", _default_installer = \"default_installer\", _default_net_util = \"default_net_util\", _more_shards = \"more_shards\", _most_shards = \"most_shards\", _proto_library = \"proto_library\", _rbe_platform = \"rbe_platform\", _rbe_toolchain = \"rbe_toolchain\", _select_arch = \"select_arch\", _select_system = \"select_system\", _short_path = \"short_path\")\n+load(\"//tools/bazeldefs:defs.bzl\", _arch_genrule = \"arch_genrule\", _build_test = \"build_test\", _bzl_library = \"bzl_library\", _coreutil = \"coreutil\", _default_installer = \"default_installer\", _default_net_util = \"default_net_util\", _more_shards = \"more_shards\", _most_shards = \"most_shards\", _proto_library = \"proto_library\", _select_arch = \"select_arch\", _select_system = \"select_system\", _short_path = \"short_path\")\nload(\"//tools/bazeldefs:cc.bzl\", _cc_binary = \"cc_binary\", _cc_flags_supplier = \"cc_flags_supplier\", _cc_grpc_library = \"cc_grpc_library\", _cc_library = \"cc_library\", _cc_proto_library = \"cc_proto_library\", _cc_test = \"cc_test\", _cc_toolchain = \"cc_toolchain\", _gbenchmark = \"gbenchmark\", _grpcpp = \"grpcpp\", _gtest = \"gtest\", _vdso_linker_option = \"vdso_linker_option\")\nload(\"//tools/bazeldefs:go.bzl\", _gazelle = \"gazelle\", _go_binary = \"go_binary\", _go_embed_data = \"go_embed_data\", _go_grpc_and_proto_libraries = \"go_grpc_and_proto_libraries\", _go_library = \"go_library\", _go_path = \"go_path\", _go_proto_library = \"go_proto_library\", _go_test = \"go_test\", _select_goarch = \"select_goarch\", _select_goos = \"select_goos\")\nload(\"//tools/bazeldefs:pkg.bzl\", _pkg_deb = \"pkg_deb\", _pkg_tar = \"pkg_tar\")\n@@ -24,8 +24,6 @@ default_net_util = _default_net_util\nselect_arch = _select_arch\nselect_system = _select_system\nshort_path = _short_path\n-rbe_platform = _rbe_platform\n-rbe_toolchain = _rbe_toolchain\ncoreutil = _coreutil\nmore_shards = _more_shards\nmost_shards = _most_shards\n" } ]
Go
Apache License 2.0
google/gvisor
Remove remote execution support. PiperOrigin-RevId: 349616845
259,853
05.01.2021 09:45:27
28,800
2a200811d4c95e1c84d2bdd56068f02d46ebc524
fs/fuse: check that a task has a specified file descriptor Reported-by:
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/fuse/fusefs.go", "new_path": "pkg/sentry/fsimpl/fuse/fusefs.go", "diff": "@@ -129,6 +129,9 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\nreturn nil, nil, syserror.EINVAL\n}\nfuseFDGeneric := kernelTask.GetFileVFS2(int32(deviceDescriptor))\n+ if fuseFDGeneric == nil {\n+ return nil, nil, syserror.EINVAL\n+ }\ndefer fuseFDGeneric.DecRef(ctx)\nfuseFD, ok := fuseFDGeneric.Impl().(*DeviceFD)\nif !ok {\n" }, { "change_type": "MODIFY", "old_path": "test/fuse/linux/BUILD", "new_path": "test/fuse/linux/BUILD", "diff": "@@ -235,6 +235,7 @@ cc_binary(\nsrcs = [\"mount_test.cc\"],\ndeps = [\ngtest,\n+ \"//test/util:mount_util\",\n\"//test/util:temp_path\",\n\"//test/util:test_main\",\n\"//test/util:test_util\",\n" }, { "change_type": "MODIFY", "old_path": "test/fuse/linux/mount_test.cc", "new_path": "test/fuse/linux/mount_test.cc", "diff": "#include <sys/mount.h>\n#include \"gtest/gtest.h\"\n+#include \"test/util/mount_util.h\"\n#include \"test/util/temp_path.h\"\n#include \"test/util/test_util.h\"\n@@ -25,6 +26,17 @@ namespace testing {\nnamespace {\n+TEST(FuseMount, Success) {\n+ const FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(\"/dev/fuse\", O_WRONLY));\n+ std::string mopts = absl::StrCat(\"fd=\", std::to_string(fd.get()));\n+\n+ const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+\n+ const auto mount =\n+ ASSERT_NO_ERRNO_AND_VALUE(Mount(\"\", dir.path(), \"fuse\", 0, mopts, 0));\n+}\n+\nTEST(FuseMount, FDNotParsable) {\nint devfd;\nEXPECT_THAT(devfd = open(\"/dev/fuse\", O_RDWR), SyscallSucceeds());\n@@ -35,6 +47,36 @@ TEST(FuseMount, FDNotParsable) {\nSyscallFailsWithErrno(EINVAL));\n}\n+TEST(FuseMount, NoDevice) {\n+ const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+\n+ EXPECT_THAT(mount(\"\", dir.path().c_str(), \"fuse\", 0, \"\"),\n+ SyscallFailsWithErrno(EINVAL));\n+}\n+\n+TEST(FuseMount, ClosedFD) {\n+ FileDescriptor f = ASSERT_NO_ERRNO_AND_VALUE(Open(\"/dev/fuse\", O_WRONLY));\n+ int fd = f.release();\n+ close(fd);\n+ std::string mopts = absl::StrCat(\"fd=\", std::to_string(fd));\n+\n+ const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+\n+ EXPECT_THAT(mount(\"\", dir.path().c_str(), \"fuse\", 0, mopts.c_str()),\n+ SyscallFailsWithErrno(EINVAL));\n+}\n+\n+TEST(FuseMount, BadFD) {\n+ const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n+ const FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));\n+ std::string mopts = absl::StrCat(\"fd=\", std::to_string(fd.get()));\n+\n+ EXPECT_THAT(mount(\"\", dir.path().c_str(), \"fuse\", 0, mopts.c_str()),\n+ SyscallFailsWithErrno(EINVAL));\n+}\n+\n} // namespace\n} // namespace testing\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/mount.cc", "new_path": "test/syscalls/linux/mount.cc", "diff": "@@ -345,42 +345,6 @@ TEST(MountTest, RenameRemoveMountPoint) {\nASSERT_THAT(rmdir(dir.path().c_str()), SyscallFailsWithErrno(EBUSY));\n}\n-TEST(MountTest, MountFuseFilesystemNoDevice) {\n- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n- SKIP_IF(IsRunningOnGvisor() && !IsFUSEEnabled());\n-\n- auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n-\n- // Before kernel version 4.16-rc6, FUSE mount is protected by\n- // capable(CAP_SYS_ADMIN). After this version, it uses\n- // ns_capable(CAP_SYS_ADMIN) to protect. Before the 4.16 kernel, it was not\n- // allowed to mount fuse file systems without the global CAP_SYS_ADMIN.\n- int res = mount(\"\", dir.path().c_str(), \"fuse\", 0, \"\");\n- SKIP_IF(!IsRunningOnGvisor() && res == -1 && errno == EPERM);\n-\n- EXPECT_THAT(mount(\"\", dir.path().c_str(), \"fuse\", 0, \"\"),\n- SyscallFailsWithErrno(EINVAL));\n-}\n-\n-TEST(MountTest, MountFuseFilesystem) {\n- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n- SKIP_IF(IsRunningOnGvisor() && !IsFUSEEnabled());\n-\n- const FileDescriptor fd =\n- ASSERT_NO_ERRNO_AND_VALUE(Open(\"/dev/fuse\", O_WRONLY));\n- std::string mopts = \"fd=\" + std::to_string(fd.get());\n-\n- auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n-\n- // See comments in MountFuseFilesystemNoDevice for the reason why we skip\n- // EPERM when running on Linux.\n- int res = mount(\"\", dir.path().c_str(), \"fuse\", 0, \"\");\n- SKIP_IF(!IsRunningOnGvisor() && res == -1 && errno == EPERM);\n-\n- auto const mount =\n- ASSERT_NO_ERRNO_AND_VALUE(Mount(\"\", dir.path(), \"fuse\", 0, mopts, 0));\n-}\n-\n} // namespace\n} // namespace testing\n" } ]
Go
Apache License 2.0
google/gvisor
fs/fuse: check that a task has a specified file descriptor Reported-by: [email protected] PiperOrigin-RevId: 350159452
259,858
05.01.2021 12:38:12
28,800
33d59811fc4882a1fffeb7452be7a370fb769ddf
Change the build badge to point to BuildKite.
[ { "change_type": "MODIFY", "old_path": ".github/workflows/build.yml", "new_path": ".github/workflows/build.yml", "diff": "# posts them to GitHub, if applicable. This leverages the fact that the\n# workflow token has appropriate permissions to do so, and attempts to\n# leverage the GitHub workflow caches.\n-#\n-# This workflow also generates the build badge that is referred to by\n-# the main README.\nname: \"Build\"\n\"on\":\npush:\n" }, { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "![gVisor](g3doc/logo.png)\n-![](https://github.com/google/gvisor/workflows/Build/badge.svg)\n+[![Build status](https://badge.buildkite.com/3b159f20b9830461a71112566c4171c0bdfd2f980a8e4c0ae6.svg?branch=master)](https://buildkite.com/gvisor/pipeline)\n[![gVisor chat](https://badges.gitter.im/gvisor/community.png)](https://gitter.im/gvisor/community)\n[![code search](https://img.shields.io/badge/code-search-blue)](https://cs.opensource.google/gvisor/gvisor)\n" } ]
Go
Apache License 2.0
google/gvisor
Change the build badge to point to BuildKite. PiperOrigin-RevId: 350197814
259,891
05.01.2021 14:43:35
28,800
ce7a4440cae8ee4b2a41808f967c9847cafd2937
Fix panic when parsing SO_TIMESTAMP cmsg
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/control/BUILD", "new_path": "pkg/sentry/socket/control/BUILD", "diff": "-load(\"//tools:defs.bzl\", \"go_library\")\n+load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\npackage(licenses = [\"notice\"])\n@@ -26,3 +26,17 @@ go_library(\n\"//pkg/usermem\",\n],\n)\n+\n+go_test(\n+ name = \"control_test\",\n+ size = \"small\",\n+ srcs = [\"control_test.go\"],\n+ library = \":control\",\n+ deps = [\n+ \"//pkg/abi/linux\",\n+ \"//pkg/binary\",\n+ \"//pkg/sentry/socket\",\n+ \"//pkg/usermem\",\n+ \"@com_github_google_go_cmp//cmp:go_default_library\",\n+ ],\n+)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/control/control.go", "new_path": "pkg/sentry/socket/control/control.go", "diff": "@@ -463,7 +463,7 @@ func CmsgsSpace(t *kernel.Task, cmsgs socket.ControlMessages) int {\n}\n// Parse parses a raw socket control message into portable objects.\n-func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte) (socket.ControlMessages, error) {\n+func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) (socket.ControlMessages, error) {\nvar (\ncmsgs socket.ControlMessages\nfds linux.ControlMessageRights\n@@ -487,10 +487,6 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte) (socket.Con\ni += linux.SizeOfControlMessageHeader\nlength := int(h.Length) - linux.SizeOfControlMessageHeader\n- // The use of t.Arch().Width() is analogous to Linux's use of\n- // sizeof(long) in CMSG_ALIGN.\n- width := t.Arch().Width()\n-\nswitch h.Level {\ncase linux.SOL_SOCKET:\nswitch h.Type {\n@@ -526,8 +522,10 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte) (socket.Con\nif length < linux.SizeOfTimeval {\nreturn socket.ControlMessages{}, syserror.EINVAL\n}\n+ var ts linux.Timeval\n+ binary.Unmarshal(buf[i:i+linux.SizeOfTimeval], usermem.ByteOrder, &ts)\n+ cmsgs.IP.Timestamp = ts.ToNsecCapped()\ncmsgs.IP.HasTimestamp = true\n- binary.Unmarshal(buf[i:i+linux.SizeOfTimeval], usermem.ByteOrder, &cmsgs.IP.Timestamp)\ni += binary.AlignUp(length, width)\ndefault:\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/sentry/socket/control/control_test.go", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package control provides internal representations of socket control\n+// messages.\n+package control\n+\n+import (\n+ \"testing\"\n+\n+ \"github.com/google/go-cmp/cmp\"\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/binary\"\n+ \"gvisor.dev/gvisor/pkg/sentry/socket\"\n+ \"gvisor.dev/gvisor/pkg/usermem\"\n+)\n+\n+func TestParse(t *testing.T) {\n+ // Craft the control message to parse.\n+ length := linux.SizeOfControlMessageHeader + linux.SizeOfTimeval\n+ hdr := linux.ControlMessageHeader{\n+ Length: uint64(length),\n+ Level: linux.SOL_SOCKET,\n+ Type: linux.SO_TIMESTAMP,\n+ }\n+ buf := make([]byte, 0, length)\n+ buf = binary.Marshal(buf, usermem.ByteOrder, &hdr)\n+ ts := linux.Timeval{\n+ Sec: 2401,\n+ Usec: 343,\n+ }\n+ buf = binary.Marshal(buf, usermem.ByteOrder, &ts)\n+\n+ cmsg, err := Parse(nil, nil, buf, 8 /* width */)\n+ if err != nil {\n+ t.Fatalf(\"Parse(_, _, %+v, _): %v\", cmsg, err)\n+ }\n+\n+ want := socket.ControlMessages{\n+ IP: socket.IPControlMessages{\n+ HasTimestamp: true,\n+ Timestamp: ts.ToNsecCapped(),\n+ },\n+ }\n+ if diff := cmp.Diff(want, cmsg); diff != \"\" {\n+ t.Errorf(\"unexpected message parsed, (-want, +got):\\n%s\", diff)\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_socket.go", "new_path": "pkg/sentry/syscalls/linux/sys_socket.go", "diff": "@@ -1030,7 +1030,7 @@ func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr userme\nreturn 0, err\n}\n- controlMessages, err := control.Parse(t, s, controlData)\n+ controlMessages, err := control.Parse(t, s, controlData, t.Arch().Width())\nif err != nil {\nreturn 0, err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/vfs2/socket.go", "new_path": "pkg/sentry/syscalls/linux/vfs2/socket.go", "diff": "@@ -1033,7 +1033,7 @@ func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescriptio\nreturn 0, err\n}\n- controlMessages, err := control.Parse(t, s, controlData)\n+ controlMessages, err := control.Parse(t, s, controlData, t.Arch().Width())\nif err != nil {\nreturn 0, err\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Fix panic when parsing SO_TIMESTAMP cmsg PiperOrigin-RevId: 350223482
259,853
05.01.2021 16:46:00
28,800
b9b99d3d26b0d4907e8d24b4a842b31a91151aab
Don't check that msg_flags contains MSG_ERRQUEUE on gvisor platforms.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/udp_socket.cc", "new_path": "test/syscalls/linux/udp_socket.cc", "diff": "@@ -835,6 +835,11 @@ TEST_P(UdpSocketTest, RecvErrorConnRefused) {\n// Check the contents of msg.\nEXPECT_EQ(memcmp(got, buf, sizeof(buf)), 0); // iovec check\n+ // TODO(b/176251997): The next check fails on the gvisor platform due to the\n+ // kernel bug.\n+ if (!IsRunningWithHostinet() || GvisorPlatform() == Platform::kPtrace ||\n+ GvisorPlatform() == Platform::kKVM ||\n+ GvisorPlatform() == Platform::kNative)\nEXPECT_NE(msg.msg_flags & MSG_ERRQUEUE, 0);\nEXPECT_EQ(memcmp(&remote, bind_addr_, addrlen_), 0);\n" } ]
Go
Apache License 2.0
google/gvisor
Don't check that msg_flags contains MSG_ERRQUEUE on gvisor platforms. PiperOrigin-RevId: 350246333
259,858
05.01.2021 18:13:18
28,800
ab32fa2481d84e3f390f2cd81e25c537756b7aa1
Make type sanity checking happen only in race builds. This adds significant costs to startup, since it is done for every type in the system. Since the state package already saves sanity checks for race builds, use this for type registration.
[ { "change_type": "MODIFY", "old_path": "pkg/state/tests/register_test.go", "new_path": "pkg/state/tests/register_test.go", "diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n+// +build race\n+\npackage tests\nimport (\n@@ -165,3 +167,12 @@ func TestRegisterBad(t *testing.T) {\n}\n}\n+\n+func TestRegisterTypeOnlyStruct(t *testing.T) {\n+ defer func() {\n+ if r := recover(); r == nil {\n+ t.Errorf(\"Register did not panic\")\n+ }\n+ }()\n+ state.Register((*typeOnlyEmptyStruct)(nil))\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/state/tests/struct_test.go", "new_path": "pkg/state/tests/struct_test.go", "diff": "@@ -17,8 +17,6 @@ package tests\nimport (\n\"math/rand\"\n\"testing\"\n-\n- \"gvisor.dev/gvisor/pkg/state\"\n)\nfunc TestEmptyStruct(t *testing.T) {\n@@ -58,15 +56,6 @@ func TestEmptyStruct(t *testing.T) {\n})\n}\n-func TestRegisterTypeOnlyStruct(t *testing.T) {\n- defer func() {\n- if r := recover(); r == nil {\n- t.Errorf(\"Register did not panic\")\n- }\n- }()\n- state.Register((*typeOnlyEmptyStruct)(nil))\n-}\n-\nfunc TestEmbeddedPointers(t *testing.T) {\n// Give each int64 a random value to prevent Go from using\n// runtime.staticuint64s, which confounds tests for struct duplication.\n" }, { "change_type": "MODIFY", "old_path": "pkg/state/types.go", "new_path": "pkg/state/types.go", "diff": "@@ -329,14 +329,16 @@ var reverseTypeDatabase = map[reflect.Type]string{}\n// This must be called on init and only done once.\nfunc Register(t Type) {\nname := t.StateTypeName()\n- fields := t.StateFields()\n- assertValidType(name, fields)\n- // Register must always be called on pointers.\ntyp := reflect.TypeOf(t)\n+ if raceEnabled {\n+ assertValidType(name, t.StateFields())\n+ // Register must always be called on pointers.\nif typ.Kind() != reflect.Ptr {\nFailf(\"Register must be called on pointers\")\n}\n+ }\ntyp = typ.Elem()\n+ if raceEnabled {\nif typ.Kind() == reflect.Struct {\n// All registered structs must implement SaverLoader. We allow\n// the registration is non-struct types with just the Type\n@@ -350,7 +352,7 @@ func Register(t Type) {\n// calling StateSave/StateLoad methods on any non-struct types.\n// If custom behavior is required, these types should be\n// wrapped in a structure of some kind.\n- if len(fields) != 0 {\n+ if fields := t.StateFields(); len(fields) != 0 {\nFailf(\"non-struct %T has non-zero fields %v\", t, fields)\n}\n// We don't allow non-structs to implement StateSave/StateLoad\n@@ -368,8 +370,7 @@ func Register(t Type) {\nif name == interfaceType {\nFailf(\"conflicting name for %T: matches interfaceType\", t)\n}\n- globalTypeDatabase[name] = typ\n- if raceEnabled {\nreverseTypeDatabase[typ] = name\n}\n+ globalTypeDatabase[name] = typ\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Make type sanity checking happen only in race builds. This adds significant costs to startup, since it is done for every type in the system. Since the state package already saves sanity checks for race builds, use this for type registration. PiperOrigin-RevId: 350259336
259,858
05.01.2021 18:52:55
28,800
a1e3845b65c266fe083e67811657bb7b764c4413
Make build command more robust. This returns all targets, and handles no targets.
[ { "change_type": "MODIFY", "old_path": "tools/bazel.mk", "new_path": "tools/bazel.mk", "diff": "@@ -189,13 +189,13 @@ build_paths = \\\n(set -euo pipefail; \\\n$(call wrapper,$(BAZEL) build $(BASE_OPTIONS) $(1)) 2>&1 \\\n| tee /proc/self/fd/2 \\\n- | grep -A1 -E '^Target' \\\n- | grep -E '^ ($(subst $(SPACE),|,$(BUILD_ROOTS)))' \\\n- | sed \"s/ /\\n/g\" \\\n- | strings -n 10 \\\n+ | sed -n -e '/^Target/,$$p' \\\n+ | sed -n -e '/^ \\($(subst /,\\/,$(subst $(SPACE),\\|,$(BUILD_ROOTS)))\\)/p' \\\n+ | sed -e 's/ /\\n/g' \\\n| awk '{$$1=$$1};1' \\\n- | xargs -n 1 -I {} readlink -f \"{}\" \\\n- | xargs -n 1 -I {} bash -c 'set -xeuo pipefail; $(2)')\n+ | strings \\\n+ | xargs -r -n 1 -I {} readlink -f \"{}\" \\\n+ | xargs -r -n 1 -I {} bash -c 'set -xeuo pipefail; $(2)')\nclean = $(call header,CLEAN) && $(call wrapper,$(BAZEL) clean)\nbuild = $(call header,BUILD $(1)) && $(call build_paths,$(1),echo {})\n" }, { "change_type": "MODIFY", "old_path": "tools/workspace_status.sh", "new_path": "tools/workspace_status.sh", "diff": "# limitations under the License.\n# The STABLE_ prefix will trigger a re-link if it changes.\n-echo STABLE_VERSION $(git describe --always --tags --abbrev=12 --dirty || echo 0.0.0)\n+echo STABLE_VERSION \"$(git describe --always --tags --abbrev=12 --dirty 2>/dev/null || echo 0.0.0)\"\n" } ]
Go
Apache License 2.0
google/gvisor
Make build command more robust. This returns all targets, and handles no targets. PiperOrigin-RevId: 350263578
259,881
06.01.2021 08:15:48
28,800
23f94cee675744dd4e5d6cbcca5a3492d08e28fb
Include objdump failures in test output. We log a warning if objdump fails, but this appears in the build log, not test log, which can make it hard to notice. Include it with the actual escape output as context on "(possible)" to make it more clear when something is wrong.
[ { "change_type": "MODIFY", "old_path": "tools/checkescape/checkescape.go", "new_path": "tools/checkescape/checkescape.go", "diff": "@@ -618,12 +618,12 @@ func findReasons(pass *analysis.Pass, fdecl *ast.FuncDecl) ([]EscapeReason, bool\n// run performs the analysis.\nfunc run(pass *analysis.Pass, localEscapes bool) (interface{}, error) {\n- calls, err := loadObjdump()\n- if err != nil {\n+ calls, callsErr := loadObjdump()\n+ if callsErr != nil {\n// Note that if this analysis fails, then we don't actually\n// fail the analyzer itself. We simply report every possible\n// escape. In most cases this will work just fine.\n- log.Printf(\"WARNING: unable to load objdump: %v\", err)\n+ log.Printf(\"WARNING: unable to load objdump: %v\", callsErr)\n}\nallEscapes := make(map[string][]Escapes)\nmergedEscapes := make(map[string]Escapes)\n@@ -645,10 +645,10 @@ func run(pass *analysis.Pass, localEscapes bool) (interface{}, error) {\n}\nhasCall := func(inst poser) (string, bool) {\np := linePosition(inst, nil)\n- if calls == nil {\n+ if callsErr != nil {\n// See above: we don't have access to the binary\n// itself, so need to include every possible call.\n- return \"(possible)\", true\n+ return fmt.Sprintf(\"(possible, unable to load objdump: %v)\", callsErr), true\n}\ns, ok := calls[p.Simplified()]\nif !ok {\n" } ]
Go
Apache License 2.0
google/gvisor
Include objdump failures in test output. We log a warning if objdump fails, but this appears in the build log, not test log, which can make it hard to notice. Include it with the actual escape output as context on "(possible)" to make it more clear when something is wrong. PiperOrigin-RevId: 350355759
260,004
06.01.2021 14:01:05
28,800
7817e3b5e4202eb2001806b5043052a2f5b591a4
Do not filter frames in ethernet link endpoint Ethernet frames are usually filtered at the hardware-level so there is no need to filter the frames in software. For test purposes, a new link endpoint was introduced to filter frames based on their destination.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/ethernet/BUILD", "new_path": "pkg/tcpip/link/ethernet/BUILD", "diff": "-load(\"//tools:defs.bzl\", \"go_library\")\n+load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\npackage(licenses = [\"notice\"])\n@@ -13,3 +13,17 @@ go_library(\n\"//pkg/tcpip/stack\",\n],\n)\n+\n+go_test(\n+ name = \"ethernet_test\",\n+ size = \"small\",\n+ srcs = [\"ethernet_test.go\"],\n+ deps = [\n+ \":ethernet\",\n+ \"//pkg/tcpip\",\n+ \"//pkg/tcpip/buffer\",\n+ \"//pkg/tcpip/header\",\n+ \"//pkg/tcpip/link/channel\",\n+ \"//pkg/tcpip/stack\",\n+ ],\n+)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/ethernet/ethernet.go", "new_path": "pkg/tcpip/link/ethernet/ethernet.go", "diff": "@@ -49,10 +49,10 @@ func (e *Endpoint) DeliverNetworkPacket(_, _ tcpip.LinkAddress, _ tcpip.NetworkP\nreturn\n}\n+ // Note, there is no need to check the destination link address here since\n+ // the ethernet hardware filters frames based on their destination addresses.\neth := header.Ethernet(hdr)\n- if dst := eth.DestinationAddress(); dst == e.Endpoint.LinkAddress() || dst == header.EthernetBroadcastAddress || header.IsMulticastEthernetAddress(dst) {\n- e.Endpoint.DeliverNetworkPacket(eth.SourceAddress() /* remote */, dst /* local */, eth.Type() /* protocol */, pkt)\n- }\n+ e.Endpoint.DeliverNetworkPacket(eth.SourceAddress() /* remote */, eth.DestinationAddress() /* local */, eth.Type() /* protocol */, pkt)\n}\n// Capabilities implements stack.LinkEndpoint.\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/tcpip/link/ethernet/ethernet_test.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package ethernet_test\n+\n+import (\n+ \"testing\"\n+\n+ \"gvisor.dev/gvisor/pkg/tcpip\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/header\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/link/channel\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/link/ethernet\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/stack\"\n+)\n+\n+var _ stack.NetworkDispatcher = (*testNetworkDispatcher)(nil)\n+\n+type testNetworkDispatcher struct {\n+ networkPackets int\n+}\n+\n+func (t *testNetworkDispatcher) DeliverNetworkPacket(_, _ tcpip.LinkAddress, _ tcpip.NetworkProtocolNumber, _ *stack.PacketBuffer) {\n+ t.networkPackets++\n+}\n+\n+func (*testNetworkDispatcher) DeliverOutboundPacket(_, _ tcpip.LinkAddress, _ tcpip.NetworkProtocolNumber, _ *stack.PacketBuffer) {\n+}\n+\n+func TestDeliverNetworkPacket(t *testing.T) {\n+ const (\n+ linkAddr = tcpip.LinkAddress(\"\\x02\\x02\\x03\\x04\\x05\\x06\")\n+ otherLinkAddr1 = tcpip.LinkAddress(\"\\x02\\x02\\x03\\x04\\x05\\x07\")\n+ otherLinkAddr2 = tcpip.LinkAddress(\"\\x02\\x02\\x03\\x04\\x05\\x08\")\n+ )\n+\n+ e := ethernet.New(channel.New(0, 0, linkAddr))\n+ var networkDispatcher testNetworkDispatcher\n+ e.Attach(&networkDispatcher)\n+\n+ if networkDispatcher.networkPackets != 0 {\n+ t.Fatalf(\"got networkDispatcher.networkPackets = %d, want = 0\", networkDispatcher.networkPackets)\n+ }\n+\n+ // An ethernet frame with a destination link address that is not assigned to\n+ // our ethernet link endpoint should still be delivered to the network\n+ // dispatcher since the ethernet endpoint is not expected to filter frames.\n+ eth := buffer.NewView(header.EthernetMinimumSize)\n+ header.Ethernet(eth).Encode(&header.EthernetFields{\n+ SrcAddr: otherLinkAddr1,\n+ DstAddr: otherLinkAddr2,\n+ Type: header.IPv4ProtocolNumber,\n+ })\n+ e.DeliverNetworkPacket(\"\", \"\", 0, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ Data: eth.ToVectorisedView(),\n+ }))\n+ if networkDispatcher.networkPackets != 1 {\n+ t.Fatalf(\"got networkDispatcher.networkPackets = %d, want = 1\", networkDispatcher.networkPackets)\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tests/integration/BUILD", "new_path": "pkg/tcpip/tests/integration/BUILD", "diff": "@@ -19,6 +19,7 @@ go_test(\n\"//pkg/tcpip/link/channel\",\n\"//pkg/tcpip/link/ethernet\",\n\"//pkg/tcpip/link/loopback\",\n+ \"//pkg/tcpip/link/nested\",\n\"//pkg/tcpip/link/pipe\",\n\"//pkg/tcpip/network/arp\",\n\"//pkg/tcpip/network/ipv4\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tests/integration/forward_test.go", "new_path": "pkg/tcpip/tests/integration/forward_test.go", "diff": "@@ -21,7 +21,9 @@ import (\n\"github.com/google/go-cmp/cmp\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/header\"\n\"gvisor.dev/gvisor/pkg/tcpip/link/ethernet\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/link/nested\"\n\"gvisor.dev/gvisor/pkg/tcpip/link/pipe\"\n\"gvisor.dev/gvisor/pkg/tcpip/network/arp\"\n\"gvisor.dev/gvisor/pkg/tcpip/network/ipv4\"\n@@ -31,6 +33,33 @@ import (\n\"gvisor.dev/gvisor/pkg/waiter\"\n)\n+var _ stack.NetworkDispatcher = (*endpointWithDestinationCheck)(nil)\n+var _ stack.LinkEndpoint = (*endpointWithDestinationCheck)(nil)\n+\n+// newEthernetEndpoint returns an ethernet link endpoint that wraps an inner\n+// link endpoint and checks the destination link address before delivering\n+// network packets to the network dispatcher.\n+//\n+// See ethernet.Endpoint for more details.\n+func newEthernetEndpoint(ep stack.LinkEndpoint) *endpointWithDestinationCheck {\n+ var e endpointWithDestinationCheck\n+ e.Endpoint.Init(ethernet.New(ep), &e)\n+ return &e\n+}\n+\n+// endpointWithDestinationCheck is a link endpoint that checks the destination\n+// link address before delivering network packets to the network dispatcher.\n+type endpointWithDestinationCheck struct {\n+ nested.Endpoint\n+}\n+\n+// DeliverNetworkPacket implements stack.NetworkDispatcher.\n+func (e *endpointWithDestinationCheck) DeliverNetworkPacket(src, dst tcpip.LinkAddress, proto tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {\n+ if dst == e.Endpoint.LinkAddress() || dst == header.EthernetBroadcastAddress || header.IsMulticastEthernetAddress(dst) {\n+ e.Endpoint.DeliverNetworkPacket(src, dst, proto, pkt)\n+ }\n+}\n+\nfunc TestForwarding(t *testing.T) {\nconst (\nhost1NICID = 1\n@@ -209,16 +238,16 @@ func TestForwarding(t *testing.T) {\nhost1NIC, routerNIC1 := pipe.New(linkAddr1, linkAddr2)\nrouterNIC2, host2NIC := pipe.New(linkAddr3, linkAddr4)\n- if err := host1Stack.CreateNIC(host1NICID, ethernet.New(host1NIC)); err != nil {\n+ if err := host1Stack.CreateNIC(host1NICID, newEthernetEndpoint(host1NIC)); err != nil {\nt.Fatalf(\"host1Stack.CreateNIC(%d, _): %s\", host1NICID, err)\n}\n- if err := routerStack.CreateNIC(routerNICID1, ethernet.New(routerNIC1)); err != nil {\n+ if err := routerStack.CreateNIC(routerNICID1, newEthernetEndpoint(routerNIC1)); err != nil {\nt.Fatalf(\"routerStack.CreateNIC(%d, _): %s\", routerNICID1, err)\n}\n- if err := routerStack.CreateNIC(routerNICID2, ethernet.New(routerNIC2)); err != nil {\n+ if err := routerStack.CreateNIC(routerNICID2, newEthernetEndpoint(routerNIC2)); err != nil {\nt.Fatalf(\"routerStack.CreateNIC(%d, _): %s\", routerNICID2, err)\n}\n- if err := host2Stack.CreateNIC(host2NICID, ethernet.New(host2NIC)); err != nil {\n+ if err := host2Stack.CreateNIC(host2NICID, newEthernetEndpoint(host2NIC)); err != nil {\nt.Fatalf(\"host2Stack.CreateNIC(%d, _): %s\", host2NICID, err)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tests/integration/link_resolution_test.go", "new_path": "pkg/tcpip/tests/integration/link_resolution_test.go", "diff": "@@ -22,7 +22,6 @@ import (\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n- \"gvisor.dev/gvisor/pkg/tcpip/link/ethernet\"\n\"gvisor.dev/gvisor/pkg/tcpip/link/pipe\"\n\"gvisor.dev/gvisor/pkg/tcpip/network/arp\"\n\"gvisor.dev/gvisor/pkg/tcpip/network/ipv4\"\n@@ -133,10 +132,10 @@ func TestPing(t *testing.T) {\nhost1NIC, host2NIC := pipe.New(linkAddr1, linkAddr2)\n- if err := host1Stack.CreateNIC(host1NICID, ethernet.New(host1NIC)); err != nil {\n+ if err := host1Stack.CreateNIC(host1NICID, newEthernetEndpoint(host1NIC)); err != nil {\nt.Fatalf(\"host1Stack.CreateNIC(%d, _): %s\", host1NICID, err)\n}\n- if err := host2Stack.CreateNIC(host2NICID, ethernet.New(host2NIC)); err != nil {\n+ if err := host2Stack.CreateNIC(host2NICID, newEthernetEndpoint(host2NIC)); err != nil {\nt.Fatalf(\"host2Stack.CreateNIC(%d, _): %s\", host2NICID, err)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Do not filter frames in ethernet link endpoint Ethernet frames are usually filtered at the hardware-level so there is no need to filter the frames in software. For test purposes, a new link endpoint was introduced to filter frames based on their destination. PiperOrigin-RevId: 350422941
259,891
06.01.2021 15:23:47
28,800
084ad582105901e8f655539ace21339db8bf8be4
Run external networking test
[ { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -639,6 +639,10 @@ syscall_test(\ntest = \"//test/syscalls/linux:socket_inet_loopback_nogotsan_test\",\n)\n+syscall_test(\n+ test = \"//test/syscalls/linux:socket_ipv4_udp_unbound_external_networking_test\",\n+)\n+\nsyscall_test(\nsize = \"large\",\nshard_count = most_shards,\n" } ]
Go
Apache License 2.0
google/gvisor
Run external networking test PiperOrigin-RevId: 350438564
259,858
06.01.2021 15:36:02
28,800
f89af8b5045bfdcb24ce8cc74ed676d77c9e67a6
Don't canonicalize cache directory. ... Otherwise it will be mounted in via some other path, and will not be available inside the container at all.
[ { "change_type": "MODIFY", "old_path": "tools/bazel.mk", "new_path": "tools/bazel.mk", "diff": "@@ -47,8 +47,8 @@ HASH := $(shell readlink -m $(CURDIR) | md5sum | cut -c1-8)\nBUILDER_NAME := gvisor-builder-$(HASH)-$(ARCH)\nDOCKER_NAME := gvisor-bazel-$(HASH)-$(ARCH)\nDOCKER_PRIVILEGED := --privileged\n-BAZEL_CACHE := $(shell readlink -m ~/.cache/bazel/)\n-GCLOUD_CONFIG := $(shell readlink -m ~/.config/gcloud/)\n+BAZEL_CACHE := $(HOME)/.cache/bazel/\n+GCLOUD_CONFIG := $(HOME)/.config/gcloud/\nDOCKER_SOCKET := /var/run/docker.sock\nDOCKER_CONFIG := /etc/docker\n@@ -77,8 +77,8 @@ DOCKER_RUN_OPTIONS :=\nDOCKER_RUN_OPTIONS += --user $(UID):$(GID)\nDOCKER_RUN_OPTIONS += --entrypoint \"\"\nDOCKER_RUN_OPTIONS += --init\n-DOCKER_RUN_OPTIONS += -v \"$(BAZEL_CACHE):$(BAZEL_CACHE)\"\n-DOCKER_RUN_OPTIONS += -v \"$(GCLOUD_CONFIG):$(GCLOUD_CONFIG)\"\n+DOCKER_RUN_OPTIONS += -v \"$(shell readlink -m $(BAZEL_CACHE)):$(BAZEL_CACHE)\"\n+DOCKER_RUN_OPTIONS += -v \"$(shell readlink -m $(GCLOUD_CONFIG)):$(GCLOUD_CONFIG)\"\nDOCKER_RUN_OPTIONS += -v \"/tmp:/tmp\"\nDOCKER_EXEC_OPTIONS := --user $(UID):$(GID)\nDOCKER_EXEC_OPTIONS += --interactive\n" } ]
Go
Apache License 2.0
google/gvisor
Don't canonicalize cache directory. ... Otherwise it will be mounted in via some other path, and will not be available inside the container at all. PiperOrigin-RevId: 350440843
259,858
06.01.2021 15:36:23
28,800
fa8682da0fd43556ae0a405c02bac27e6d15a8e6
Export a pprof visualization endpoint. This allows us to link directly to profiling results from the build results. The code uses the standard pprof http server, exported from the Cloud Run instance.
[ { "change_type": "MODIFY", "old_path": ".buildkite/hooks/post-command", "new_path": ".buildkite/hooks/post-command", "diff": "@@ -27,10 +27,14 @@ make -s testlogs 2>/dev/null | grep // | sort | uniq | (\n# Upload all profiles, and include in an annotation.\ndeclare profile_output=$(mktemp --tmpdir)\nfor file in $(find /tmp/profile -name \\*.pprof -print 2>/dev/null | sort); do\n- # Generate a link to the profile file at the top.\n+ # Generate a link to the profile parsing function in gvisor.dev, which\n+ # implicitly uses a prefix of https://storage.googleapis.com. Note that\n+ # this relies on the specific BuildKite bucket location, and will break if\n+ # this changes (although the artifacts will still exist and be just fine).\nprofile_name=\"${file#/tmp/profile/}\"\n+ profile_url=\"https://gvisor.dev/profile/gvisor-buildkite/${BUILDKITE_BUILD_ID}/${BUILDKITE_JOB_ID}/${file#/}/\"\nbuildkite-agent artifact upload \"${file}\"\n- echo \"<li><a href='artifact://${file#/}'>${profile_name}</a></li>\" >> \"${profile_output}\"\n+ echo \"<li><a href='${profile_url}'>${profile_name}</a></li>\" >> \"${profile_output}\"\ndone\n# Upload if we had outputs.\n" }, { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -389,7 +389,7 @@ website-push: website-build ## Push a new image and update the service.\n.PHONY: website-push\nwebsite-deploy: website-push ## Deploy a new version of the website.\n- @gcloud run deploy $(WEBSITE_SERVICE) --platform=managed --region=$(WEBSITE_REGION) --project=$(WEBSITE_PROJECT) --image=$(WEBSITE_IMAGE)\n+ @gcloud run deploy $(WEBSITE_SERVICE) --platform=managed --region=$(WEBSITE_REGION) --project=$(WEBSITE_PROJECT) --image=$(WEBSITE_IMAGE) --memory 1Gi\n.PHONY: website-deploy\n##\n" }, { "change_type": "MODIFY", "old_path": "WORKSPACE", "new_path": "WORKSPACE", "diff": "@@ -866,8 +866,8 @@ go_repository(\ngo_repository(\nname = \"com_github_google_pprof\",\nimportpath = \"github.com/google/pprof\",\n- sum = \"h1:DLpL8pWq0v4JYoRpEhDfsJhhJyGKCcQM2WPW2TJs31c=\",\n- version = \"v0.0.0-20191218002539-d4f498aebedc\",\n+ sum = \"h1:LR89qFljJ48s990kEKGsk213yIJDPI4205OKOzbURK8=\",\n+ version = \"v0.0.0-20201218002935-b9804c9f04c2\",\n)\ngo_repository(\n" }, { "change_type": "MODIFY", "old_path": "website/cmd/server/BUILD", "new_path": "website/cmd/server/BUILD", "diff": "@@ -7,4 +7,7 @@ go_binary(\nsrcs = [\"main.go\"],\npure = True,\nvisibility = [\"//website:__pkg__\"],\n+ deps = [\n+ \"@com_github_google_pprof//driver:go_default_library\",\n+ ],\n)\n" }, { "change_type": "MODIFY", "old_path": "website/cmd/server/main.go", "new_path": "website/cmd/server/main.go", "diff": "@@ -21,8 +21,11 @@ import (\n\"log\"\n\"net/http\"\n\"os\"\n+ \"path\"\n\"regexp\"\n\"strings\"\n+\n+ \"github.com/google/pprof/driver\"\n)\nvar redirects = map[string]string{\n@@ -170,28 +173,155 @@ func redirectHandler(target string) http.Handler {\n// redirectRedirects registers redirect http handlers.\nfunc registerRedirects(mux *http.ServeMux) {\n- if mux == nil {\n- mux = http.DefaultServeMux\n- }\n-\nfor prefix, baseURL := range prefixHelpers {\np := \"/\" + prefix + \"/\"\nmux.Handle(p, hostRedirectHandler(wrappedHandler(prefixRedirectHandler(p, baseURL))))\n}\n-\nfor path, redirect := range redirects {\nmux.Handle(path, hostRedirectHandler(wrappedHandler(redirectHandler(redirect))))\n}\n}\n-// registerStatic registers static file handlers\n+// registerStatic registers static file handlers.\nfunc registerStatic(mux *http.ServeMux, staticDir string) {\n- if mux == nil {\n- mux = http.DefaultServeMux\n- }\nmux.Handle(\"/\", hostRedirectHandler(wrappedHandler(http.FileServer(http.Dir(staticDir)))))\n}\n+// profileMeta implements synthetic flags for pprof.\n+type profileMeta struct {\n+ // Mux is the mux to register on.\n+ Mux *http.ServeMux\n+\n+ // SourceURL is the source of the profile.\n+ SourceURL string\n+}\n+\n+func (*profileMeta) ExtraUsage() string { return \"\" }\n+func (*profileMeta) AddExtraUsage(string) {}\n+func (*profileMeta) Bool(_ string, def bool, _ string) *bool { return &def }\n+func (*profileMeta) Int(_ string, def int, _ string) *int { return &def }\n+func (*profileMeta) Float64(_ string, def float64, _ string) *float64 { return &def }\n+func (*profileMeta) StringList(_ string, def string, _ string) *[]*string { return new([]*string) }\n+func (*profileMeta) String(option string, def string, _ string) *string {\n+ switch option {\n+ case \"http\":\n+ // Only http is specified. Other options may be accessible via\n+ // the web interface, so we just need to spoof a valid option\n+ // here. The server is actually bound by HTTPServer, below.\n+ value := \"localhost:80\"\n+ return &value\n+ case \"symbolize\":\n+ // Don't attempt symbolization. Most profiles should come with\n+ // mappings built-in to the profile itself.\n+ value := \"none\"\n+ return &value\n+ default:\n+ return &def // Default.\n+ }\n+}\n+\n+// Parse implements plugin.FlagSet.Parse.\n+func (p *profileMeta) Parse(usage func()) []string {\n+ // Just return the SourceURL. This is interpreted as the profile to\n+ // download. We validate that the URL corresponds to a Google Cloud\n+ // Storage URL below.\n+ return []string{p.SourceURL}\n+}\n+\n+// pprofFixedPrefix is used to limit the exposure to SSRF.\n+//\n+// See registerProfile below.\n+const pprofFixedPrefix = \"https://storage.googleapis.com/\"\n+\n+// Target returns the URL target.\n+func (p *profileMeta) Target() string {\n+ return fmt.Sprintf(\"/profile/%s/\", p.SourceURL[len(pprofFixedPrefix):])\n+}\n+\n+// HTTPServer is a function passed to driver.PProf.\n+func (p *profileMeta) HTTPServer(args *driver.HTTPServerArgs) error {\n+ target := p.Target()\n+ for subpath, handler := range args.Handlers {\n+ handlerPath := path.Join(target, subpath)\n+ if len(handlerPath) < len(target) {\n+ // Don't clean the target, match only as the literal\n+ // directory path in order to keep relative links\n+ // working in the profile. E.g. /profile/foo/ is the\n+ // base URL for the profile at https://.../foo.\n+ handlerPath = target\n+ }\n+ p.Mux.Handle(handlerPath, handler)\n+ }\n+ return nil\n+}\n+\n+// registerProfile registers the profile handler.\n+//\n+// Note that this has a security surface worth considering.\n+//\n+// We are passed effectively a URL, which we fetch and parse,\n+// then display the profile output. We limit the possibility of\n+// SSRF by interpreting the URL strictly as a part to an object\n+// in Google Cloud Storage, but we allow the user to specify any\n+// bucket (since this may change with the CI system).\n+//\n+// We additionally must consider the possibility that users may\n+// craft malicious profile objects (somehow) and pass those URLs\n+// here as well. It seems feasible that we could parse a profile\n+// that causes a crash (DOS), but this would be automatically\n+// handled without a blip. It seems unlikely that we could parse a\n+// profile that gives full code execution, but even so there is\n+// nothing in this image except this code and CA certs. At worst,\n+// code execution would enable someone to serve up content under the\n+// web domain. This would be ephemeral with the specific instance,\n+// and persisting such an attack would require constantly crashing\n+// instances in whatever way gives remote code execution. Even if\n+// this were possible, it's unlikely that exploiting such a crash\n+// could be done so constantly and consistently.\n+//\n+// The user can also fill the \"disk\" of this container instance,\n+// causing an OOM and a crash. This has similar semantics to the\n+// DOS scenario above, and would just be handled by Cloud Run.\n+//\n+// Finally, a malicious user could cause us to repeatedly fetch\n+// extremely large objects. However, since we fetch objects via\n+// the unauthenticated URL, such accesses would always be charged\n+// to the object owner. Downloading large objects can lead to the\n+// filling of the \"disk\" scenario above, but this is similarly a\n+// minor issue and immediately mitigated.\n+func registerProfile(mux *http.ServeMux) {\n+ const urlPrefix = \"/profile/\"\n+ mux.Handle(urlPrefix, hostRedirectHandler(wrappedHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n+ // Extract the URL; this is everything except the final /.\n+ parts := strings.Split(r.URL.Path[len(urlPrefix):], \"/\")\n+ url := pprofFixedPrefix + strings.Join(parts[:len(parts)-1], \"/\")\n+ if url == pprofFixedPrefix {\n+ http.Error(w, \"Invalid URL: no path provided.\", http.StatusNotFound)\n+ return\n+ }\n+\n+ // Set up the meta handler. This will modify the original mux\n+ // accordingly, and we ultimately return a redirect that\n+ // includes all the original arguments. This means that if we\n+ // ever hit a server that does not have this profile loaded, it\n+ // will load and redirect again.\n+ meta := &profileMeta{\n+ Mux: mux,\n+ SourceURL: url,\n+ }\n+ if err := driver.PProf(&driver.Options{\n+ Flagset: meta,\n+ HTTPServer: meta.HTTPServer,\n+ }); err != nil {\n+ http.Error(w, fmt.Sprintf(\"Invalid profile: %v\", err), http.StatusNotImplemented)\n+ return\n+ }\n+\n+ // Serve the path directly.\n+ mux.ServeHTTP(w, r)\n+ }))))\n+}\n+\nfunc envFlagString(name, def string) string {\nif val := os.Getenv(name); val != \"\" {\nreturn val\n@@ -211,8 +341,9 @@ var (\nfunc main() {\nflag.Parse()\n- registerRedirects(nil)\n- registerStatic(nil, *staticDir)\n+ registerRedirects(http.DefaultServeMux)\n+ registerStatic(http.DefaultServeMux, *staticDir)\n+ registerProfile(http.DefaultServeMux)\nlog.Printf(\"Listening on %s...\", *addr)\nlog.Fatal(http.ListenAndServe(*addr, nil))\n" } ]
Go
Apache License 2.0
google/gvisor
Export a pprof visualization endpoint. This allows us to link directly to profiling results from the build results. The code uses the standard pprof http server, exported from the Cloud Run instance. PiperOrigin-RevId: 350440910
259,858
07.01.2021 01:15:20
28,800
776016ac6412006671a5dbccb5e9af21bf9b01f7
Fix native benchmarks.
[ { "change_type": "MODIFY", "old_path": ".buildkite/pipeline.yaml", "new_path": ".buildkite/pipeline.yaml", "diff": "@@ -12,7 +12,7 @@ _templates:\nretry:\nautomatic: false\nsoft_fail: true\n- if: build.message =~ /benchmarks/ || build.branch == \"master\"\n+ if: build.branch == \"master\"\nenv:\n# BENCHMARKS_OFFICIAL is set from hooks/pre-command, based\n# on whether this is executing on the master branch.\n@@ -152,7 +152,7 @@ steps:\nlabel: \":fire: Benchmarks smoke test\"\ncommand: make benchmark-platforms\n# Use the opposite of the benchmarks filter.\n- if: build.message !~ /benchmarks/ && build.branch != \"master\"\n+ if: build.branch != \"master\"\n# Run all benchmarks.\n- <<: *benchmarks\n" }, { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -353,7 +353,7 @@ benchmark-platforms: load-benchmarks $(RUNTIME_BIN) ## Runs benchmarks for runc\n@$(foreach PLATFORM,$(BENCHMARKS_PLATFORMS), \\\n$(call run_benchmark,$(PLATFORM),--platform=$(PLATFORM) --vfs2) && \\\n) true\n- @$(call run-benchmark,runc)\n+ @$(call run_benchmark,runc)\n.PHONY: benchmark-platforms\nrun-benchmark: load-benchmarks $(RUNTIME_BIN) ## Runs single benchmark and optionally sends data to BigQuery.\n" }, { "change_type": "MODIFY", "old_path": "test/benchmarks/harness/machine.go", "new_path": "test/benchmarks/harness/machine.go", "diff": "@@ -16,6 +16,7 @@ package harness\nimport (\n\"context\"\n+ \"errors\"\n\"net\"\n\"os/exec\"\n@@ -66,14 +67,19 @@ func (l *localMachine) RunCommand(cmd string, args ...string) (string, error) {\n// IPAddress implements Machine.IPAddress.\nfunc (l *localMachine) IPAddress() (net.IP, error) {\n- conn, err := net.Dial(\"udp\", \"8.8.8.8:80\")\n+ addrs, err := net.InterfaceAddrs()\nif err != nil {\n- return nil, err\n+ return net.IP{}, err\n}\n- defer conn.Close()\n-\n- addr := conn.LocalAddr().(*net.UDPAddr)\n- return addr.IP, nil\n+ for _, a := range addrs {\n+ if ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {\n+ if ipnet.IP.To4() != nil {\n+ return ipnet.IP, nil\n+ }\n+ }\n+ }\n+ // Unable to locate non-loopback address.\n+ return nil, errors.New(\"no IPAddress available\")\n}\n// CleanUp implements Machine.CleanUp and does nothing for localMachine.\n" } ]
Go
Apache License 2.0
google/gvisor
Fix native benchmarks. PiperOrigin-RevId: 350509137
259,881
07.01.2021 09:53:33
18,000
a0037b8976d1ea482e1545adfa65403ca43dbb1a
website: redirect module to pkg.go.dev Add redirects from any module package path to its corressponding pkg.go.dev documentation. e.g., gvisor.dev/gvisor/pkg/sentry/kernel -> This is a handy way to get to documentation, also used by other vanity domains, like golang.org/x/tools.
[ { "change_type": "MODIFY", "old_path": "website/cmd/server/main.go", "new_path": "website/cmd/server/main.go", "diff": "@@ -171,7 +171,7 @@ func redirectHandler(target string) http.Handler {\n})\n}\n-// redirectRedirects registers redirect http handlers.\n+// registerRedirects registers redirect http handlers.\nfunc registerRedirects(mux *http.ServeMux) {\nfor prefix, baseURL := range prefixHelpers {\np := \"/\" + prefix + \"/\"\n@@ -180,6 +180,17 @@ func registerRedirects(mux *http.ServeMux) {\nfor path, redirect := range redirects {\nmux.Handle(path, hostRedirectHandler(wrappedHandler(redirectHandler(redirect))))\n}\n+ registerModuleDocRedirects(http.DefaultServeMux)\n+}\n+\n+// registerModuleDocs registers redirect http handlers to redirect module paths\n+// directly to their docs on pkg.go.dev.\n+func registerModuleDocRedirects(mux *http.ServeMux) {\n+ const prefix = \"/gvisor/\"\n+ mux.Handle(prefix, hostRedirectHandler(wrappedHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n+ pkg := r.URL.Path[len(prefix):]\n+ redirectWithQuery(w, r, fmt.Sprintf(\"https://pkg.go.dev/gvisor.dev/gvisor/%s\", pkg))\n+ }))))\n}\n// registerStatic registers static file handlers.\n" } ]
Go
Apache License 2.0
google/gvisor
website: redirect module to pkg.go.dev Add redirects from any module package path to its corressponding pkg.go.dev documentation. e.g., gvisor.dev/gvisor/pkg/sentry/kernel -> https://pkg.go.dev/gvisor.dev/gvisor/pkg/sentry/kernel. This is a handy way to get to documentation, also used by other vanity domains, like golang.org/x/tools.
259,992
07.01.2021 09:29:48
28,800
4c5f36e7bd4781fc9984ef4fdf98009b5ec4cb4c
Update link to benchmarks Closes
[ { "change_type": "MODIFY", "old_path": "g3doc/architecture_guide/performance.md", "new_path": "g3doc/architecture_guide/performance.md", "diff": "@@ -269,7 +269,7 @@ operations are less of an issue. The above figure shows the total time required\nfor an `ffmpeg` container to start, load and transcode a 27MB input video.\n[ab]: https://en.wikipedia.org/wiki/ApacheBench\n-[benchmark-tools]: https://github.com/google/gvisor/tree/master/benchmarks\n+[benchmark-tools]: https://github.com/google/gvisor/tree/master/test/benchmarks\n[gce]: https://cloud.google.com/compute/\n[cnn]: https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/convolutional_network.py\n[docker]: https://docker.io\n" } ]
Go
Apache License 2.0
google/gvisor
Update link to benchmarks Closes #5192 PiperOrigin-RevId: 350578130
259,975
07.01.2021 12:47:12
28,800
f4b4ed666d13eef6aebe23189b1431a933de0d8e
Add runsc build job to BuildKite.
[ { "change_type": "MODIFY", "old_path": ".buildkite/pipeline.yaml", "new_path": ".buildkite/pipeline.yaml", "diff": "@@ -158,6 +158,9 @@ steps:\n- <<: *benchmarks\nlabel: \":bazel: ABSL build benchmarks\"\ncommand: make benchmark-platforms BENCHMARKS_FILTER=\"ABSL/page_cache.clean\" BENCHMARKS_SUITE=absl BENCHMARKS_TARGETS=test/benchmarks/fs:bazel_test\n+ - <<: *benchmarks\n+ label: \":go: runsc build benchmarks\"\n+ command: make benchmark-platforms BENCHMARKS_FILTER=\"Runsc/page_cache.clean/filesystem.bind\" BENCHMARKS_SUITE=runsc BENCHMARKS_TARGETS=test/benchmarks/fs:bazel_test\n- <<: *benchmarks\nlabel: \":metal: FFMPEG benchmarks\"\ncommand: make benchmark-platforms BENCHMARKS_SUITE=ffmpeg BENCHMARKS_TARGETS=test/benchmarks/media:ffmpeg_test\n" } ]
Go
Apache License 2.0
google/gvisor
Add runsc build job to BuildKite. PiperOrigin-RevId: 350619346
260,023
07.01.2021 15:06:18
28,800
04b37c822022c27cb144e4af5ef21043a74127f3
Fix tuntap_test to cleanup after CreateInterfaceNoCap
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/tuntap.cc", "new_path": "test/syscalls/linux/tuntap.cc", "diff": "@@ -162,12 +162,19 @@ TEST(TuntapStaticTest, NetTunExists) {\nclass TuntapTest : public ::testing::Test {\nprotected:\n+ void SetUp() override {\n+ have_net_admin_cap_ =\n+ ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN));\n+ }\n+\nvoid TearDown() override {\n- if (ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN))) {\n+ if (have_net_admin_cap_) {\n// Bring back capability if we had dropped it in test case.\nASSERT_NO_ERRNO(SetCapability(CAP_NET_ADMIN, true));\n}\n}\n+\n+ bool have_net_admin_cap_;\n};\nTEST_F(TuntapTest, CreateInterfaceNoCap) {\n" } ]
Go
Apache License 2.0
google/gvisor
Fix tuntap_test to cleanup after CreateInterfaceNoCap PiperOrigin-RevId: 350646249
259,858
07.01.2021 15:28:41
28,800
77b340ce82230e4e0bded01f43232c708328cd7e
Require specific buckets for pprof handler. This further restricts the surface exposed only to artifacts generated by the continuous integration system. This change also installs appropriate root certificates, so that objects can be fetched from
[ { "change_type": "MODIFY", "old_path": "WORKSPACE", "new_path": "WORKSPACE", "diff": "load(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\", \"http_file\")\nload(\"@bazel_tools//tools/build_defs/repo:git.bzl\", \"git_repository\")\n+# Root certificates.\n+#\n+# Note that the sha256 hash is ommitted here intentionally. This should not be\n+# used in any part of the build other than as certificates present in images.\n+http_file(\n+ name = \"google_root_pem\",\n+ urls = [\n+ \"https://pki.goog/roots.pem\"\n+ ],\n+)\n+\n# Bazel/starlark utilities.\nhttp_archive(\nname = \"bazel_skylib\",\n" }, { "change_type": "MODIFY", "old_path": "website/BUILD", "new_path": "website/BUILD", "diff": "@@ -38,6 +38,7 @@ genrule(\n\":syscallmd\",\n\"//website/blog:posts\",\n\"//website/cmd/server\",\n+ \"@google_root_pem//file\",\n],\nouts = [\"files.tgz\"],\ncmd = \"set -x; \" +\n@@ -61,6 +62,8 @@ genrule(\n\"ruby /checks.rb \" +\n\"/output && \" +\n\"cp $(location //website/cmd/server) $$T/output/server && \" +\n+ \"mkdir -p $$T/output/etc/ssl && \" +\n+ \"cp $(location @google_root_pem//file) $$T/output/etc/ssl/cert.pem && \" +\n\"tar -zcf $@ -C $$T/output . && \" +\n\"rm -rf $$T\",\ntags = [\n" }, { "change_type": "MODIFY", "old_path": "website/cmd/server/main.go", "new_path": "website/cmd/server/main.go", "diff": "@@ -244,6 +244,14 @@ func (p *profileMeta) Parse(usage func()) []string {\n// See registerProfile below.\nconst pprofFixedPrefix = \"https://storage.googleapis.com/\"\n+// allowedBuckets enforces constraints on the pprof target.\n+//\n+// If the continuous integration system is changed in the future to use\n+// additional buckets, they may be whitelisted here. See registerProfile.\n+var allowedBuckets = map[string]bool{\n+ \"gvisor-buildkite\": true,\n+}\n+\n// Target returns the URL target.\nfunc (p *profileMeta) Target() string {\nreturn fmt.Sprintf(\"/profile/%s/\", p.SourceURL[len(pprofFixedPrefix):])\n@@ -259,7 +267,14 @@ func (p *profileMeta) HTTPServer(args *driver.HTTPServerArgs) error {\n// directory path in order to keep relative links\n// working in the profile. E.g. /profile/foo/ is the\n// base URL for the profile at https://.../foo.\n+ //\n+ // The base target typically shows the dot-based graph,\n+ // which will not work in the image (due to the lack of\n+ // a dot binary to execute). Therefore, we redirect to\n+ // the flamegraph handler. Everything should otherwise\n+ // work the exact same way, except the \"Graph\" link.\nhandlerPath = target\n+ handler = redirectHandler(path.Join(handlerPath, \"flamegraph\"))\n}\np.Mux.Handle(handlerPath, handler)\n}\n@@ -273,10 +288,11 @@ func (p *profileMeta) HTTPServer(args *driver.HTTPServerArgs) error {\n// We are passed effectively a URL, which we fetch and parse,\n// then display the profile output. We limit the possibility of\n// SSRF by interpreting the URL strictly as a part to an object\n-// in Google Cloud Storage, but we allow the user to specify any\n-// bucket (since this may change with the CI system).\n+// in Google Cloud Storage, and further limit the buckets that\n+// may be used. This contains the vast majority of concerns,\n+// since objects must at least be uploaded by our CI system.\n//\n-// We additionally must consider the possibility that users may\n+// However, we additionally consider the possibility that users\n// craft malicious profile objects (somehow) and pass those URLs\n// here as well. It seems feasible that we could parse a profile\n// that causes a crash (DOS), but this would be automatically\n@@ -294,17 +310,22 @@ func (p *profileMeta) HTTPServer(args *driver.HTTPServerArgs) error {\n// causing an OOM and a crash. This has similar semantics to the\n// DOS scenario above, and would just be handled by Cloud Run.\n//\n-// Finally, a malicious user could cause us to repeatedly fetch\n-// extremely large objects. However, since we fetch objects via\n-// the unauthenticated URL, such accesses would always be charged\n-// to the object owner. Downloading large objects can lead to the\n-// filling of the \"disk\" scenario above, but this is similarly a\n-// minor issue and immediately mitigated.\n+// Note that all of the above scenarios would require uploading\n+// malicious profiles to controller buckets, and a clear audit\n+// trail would exist in those cases.\nfunc registerProfile(mux *http.ServeMux) {\nconst urlPrefix = \"/profile/\"\nmux.Handle(urlPrefix, hostRedirectHandler(wrappedHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n// Extract the URL; this is everything except the final /.\nparts := strings.Split(r.URL.Path[len(urlPrefix):], \"/\")\n+ if len(parts) == 0 {\n+ http.Error(w, \"Invalid URL: no bucket provided.\", http.StatusNotFound)\n+ return\n+ }\n+ if !allowedBuckets[parts[0]] {\n+ http.Error(w, fmt.Sprintf(\"Invalid URL: not an allowed bucket (%s).\", parts[0]), http.StatusNotFound)\n+ return\n+ }\nurl := pprofFixedPrefix + strings.Join(parts[:len(parts)-1], \"/\")\nif url == pprofFixedPrefix {\nhttp.Error(w, \"Invalid URL: no path provided.\", http.StatusNotFound)\n" } ]
Go
Apache License 2.0
google/gvisor
Require specific buckets for pprof handler. This further restricts the surface exposed only to artifacts generated by the continuous integration system. This change also installs appropriate root certificates, so that objects can be fetched from https://storage.googleapis.com. PiperOrigin-RevId: 350650197
259,853
30.12.2020 01:28:26
28,800
8de562b79942f5383ffbe67873df07509ca7fcb0
Add ARM smoke test make BAZEL_CONFIG=aarch64 arm-qemu-smoke-test
[ { "change_type": "MODIFY", "old_path": ".bazelrc", "new_path": ".bazelrc", "diff": "@@ -20,3 +20,8 @@ build --cxxopt=-std=c++17\n# Display the current git revision in the info block.\nbuild --stamp --workspace_status_command tools/workspace_status.sh\n+\n+# Set flags for aarch64.\n+build:cross-aarch64 --crosstool_top=@crosstool//:toolchains --compiler=gcc\n+build:cross-aarch64 --cpu=aarch64\n+build:cross-aarch64 --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64\n" }, { "change_type": "MODIFY", "old_path": ".buildkite/pipeline.yaml", "new_path": ".buildkite/pipeline.yaml", "diff": "@@ -147,6 +147,11 @@ steps:\nparallelism: 10\nif: build.message =~ /VFS1/ || build.branch == \"master\"\n+ # ARM tests.\n+ - <<: *common\n+ label: \":mechanical_arm: ARM\"\n+ command: make arm-qemu-smoke-test\n+\n# Run basic benchmarks smoke tests (no upload).\n- <<: *common\nlabel: \":fire: Benchmarks smoke test\"\n" }, { "change_type": "DELETE", "old_path": ".travis.yml", "new_path": null, "diff": "-language: shell\n-dist: xenial\n-git:\n- clone: false # Clone manually in before_install\n-before_install:\n- - set -e -o pipefail\n- - |\n- if [ \"${TRAVIS_PULL_REQUEST}\" = false ]; then\n- # This is not a PR build, fetch and checkout the commit being tested\n- git clone -q --depth 1 \"https://github.com/${TRAVIS_REPO_SLUG}.git\" \"${TRAVIS_REPO_SLUG}\"\n- cd \"${TRAVIS_REPO_SLUG}\"\n- git fetch origin \"${TRAVIS_COMMIT}\" --depth 1\n- git checkout -qf \"${TRAVIS_COMMIT}\"\n- else\n- # This is a PR build, simulate +refs/pull/{num}/merge.\n- # We can do that by fetching +refs/pull/{num}/head and cherry picking it\n- # onto the target branch.\n- git clone -q --branch \"${TRAVIS_BRANCH}\" --depth 1 \"https://github.com/${TRAVIS_REPO_SLUG}.git\" \"${TRAVIS_REPO_SLUG}\"\n- cd \"${TRAVIS_REPO_SLUG}\"\n- git fetch origin \"+refs/pull/${TRAVIS_PULL_REQUEST}/head\" --depth 1\n- git config --global user.email \"$(git log -1 FETCH_HEAD --pretty=\"%cE\")\"\n- git config --global user.name \"$(git log -1 FETCH_HEAD --pretty=\"%aN\")\"\n- git cherry-pick --strategy=recursive -X theirs --keep-redundant-commits FETCH_HEAD\n- fi\n-cache:\n- directories:\n- - /home/travis/.cache/bazel/\n-os: linux\n-services:\n- - docker\n-jobs:\n- include:\n- # AMD64 builds are tested on kokoro, so don't run them in travis to save\n- # capacity for arm64 builds.\n- # - os: linux\n- # arch: amd64\n- - os: linux\n- arch: arm64\n-script:\n- # On arm64, we need to create our own pipes for stderr and stdout,\n- # otherwise we will not be able to open /dev/stderr. This is probably\n- # due to AppArmor rules.\n- - bash -xeo pipefail -c 'uname -a && make smoke-tests 2>&1 | cat'\n-branches:\n- except:\n- # Skip copybara branches.\n- - /^test\\/cl.*$/\n" }, { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -232,6 +232,14 @@ do-tests:\n@$(call sudo,//runsc,do true)\n.PHONY: do-tests\n+arm-qemu-smoke-test: BAZEL_OPTIONS=--config=cross-aarch64\n+arm-qemu-smoke-test: load-arm-qemu\n+ export T=$$(mktemp -d --tmpdir release.XXXXXX); \\\n+ mkdir -p $$T/bin/arm64/ && \\\n+ $(call copy,//runsc:runsc,$$T/bin/arm64) && \\\n+ docker run --rm -v $$T/bin/arm64/runsc:/workdir/initramfs/runsc gvisor.dev/images/arm-qemu\n+.PHONY: arm-qemu-smoke-test\n+\nsimple-tests: unit-tests # Compatibility target.\n.PHONY: simple-tests\n" }, { "change_type": "MODIFY", "old_path": "WORKSPACE", "new_path": "WORKSPACE", "diff": "@@ -83,6 +83,20 @@ http_archive(\n],\n)\n+# Load C++ cross-compilation toolchains.\n+http_archive(\n+ name = \"coral_crosstool\",\n+ sha256 = \"088ef98b19a45d7224be13636487e3af57b1564880b67df7be8b3b7eee4a1bfc\",\n+ strip_prefix = \"crosstool-142e930ac6bf1295ff3ba7ba2b5b6324dfb42839\",\n+ urls = [\n+ \"https://github.com/google-coral/crosstool/archive/142e930ac6bf1295ff3ba7ba2b5b6324dfb42839.tar.gz\",\n+ ],\n+)\n+\n+load(\"@coral_crosstool//:configure.bzl\", \"cc_crosstool\")\n+\n+cc_crosstool(name = \"crosstool\")\n+\n# Load protobuf dependencies.\nhttp_archive(\nname = \"rules_proto\",\n" }, { "change_type": "ADD", "old_path": null, "new_path": "images/arm-qemu/Dockerfile", "diff": "+FROM fedora:33\n+\n+RUN dnf install -y qemu-system-aarch64 gzip cpio wget\n+\n+WORKDIR /workdir\n+RUN wget -4 http://dl-cdn.alpinelinux.org/alpine/edge/releases/aarch64/netboot/vmlinuz-lts\n+RUN wget -4 http://dl-cdn.alpinelinux.org/alpine/edge/releases/aarch64/netboot/initramfs-lts\n+\n+COPY initramfs /workdir/initramfs\n+COPY test.sh /workdir/\n+\n+CMD ./test.sh\n" }, { "change_type": "ADD", "old_path": null, "new_path": "images/arm-qemu/initramfs/init", "diff": "+#!/bin/sh\n+\n+# Copyright 2020 The gVisor Authors.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# This script is started as the init process in a test virtual machine,\n+# it does all required initialization steps and run a test command inside a\n+# gVisor instance.\n+\n+set -x -e\n+\n+/bin/busybox mkdir -p /usr/bin /usr/sbin /proc /sys /dev /tmp\n+\n+/bin/busybox --install -s\n+export PATH=/usr/bin:/bin:/usr/sbin:/sbin\n+\n+mount -t proc -o noexec,nosuid,nodev proc /proc\n+mount -t sysfs -o noexec,nosuid,nodev sysfs /sys\n+mount -t devtmpfs -o exec,nosuid,mode=0755,size=2M devtmpfs /dev\n+\n+uname -a\n+/runsc --TESTONLY-unsafe-nonroot --rootless --network none --debug --alsologtostderr do uname -a\n+echo \"runsc exited with code $?\"\n+\n+# Shutdown the VM. poweroff and halt doesn't work for unknown reasons.\n+# qemu is started with the -no-reboot flag, so the VM will be terminated.\n+reboot -f\n+exit 1\n" }, { "change_type": "ADD", "old_path": null, "new_path": "images/arm-qemu/test.sh", "diff": "+#!/bin/bash\n+\n+# Copyright 2020 The gVisor Authors.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+set -xeuo pipefail -m\n+\n+cd initramfs\n+find . | cpio -v -o -c -R root:root | gzip -9 >> ../initramfs-lts\n+cd ..\n+\n+qemu-system-aarch64 -M virt -m 512M -cpu cortex-a57 \\\n+ -kernel vmlinuz-lts -initrd initramfs-lts \\\n+ -append \"console=ttyAMA0 panic=-1\" -nographic -no-reboot \\\n+ | tee /dev/stderr | grep \"runsc exited with code 0\"\n+\n+echo \"PASS\"\n" }, { "change_type": "MODIFY", "old_path": "images/default/Dockerfile", "new_path": "images/default/Dockerfile", "diff": "-FROM fedora:31\n+FROM ubuntu:focal\n-# Install bazel.\n-RUN dnf install -y dnf-plugins-core && dnf copr enable -y vbatts/bazel\n-RUN dnf install -y git gcc make golang gcc-c++ glibc-devel python3 which python3-pip python3-devel libffi-devel openssl-devel pkg-config glibc-static libstdc++-static patch diffutils\n-RUN pip install --no-cache-dir pycparser\n-RUN dnf install -y bazel3\n+ENV DEBIAN_FRONTEND=\"noninteractive\"\n+RUN apt-get update && apt-get install -y curl gnupg2 git \\\n+ python python3 python3-distutils python3-pip \\\n+ build-essential crossbuild-essential-arm64 qemu-user-static \\\n+ openjdk-11-jdk-headless zip unzip \\\n+ apt-transport-https ca-certificates gnupg-agent \\\n+ software-properties-common \\\n+ pkg-config libffi-dev patch diffutils libssl-dev\n-# Install gcloud. Note that while this is \"x86_64\", it doesn't actually matter.\n+# Install Docker client for the website build.\n+RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -\n+RUN add-apt-repository \\\n+ \"deb https://download.docker.com/linux/ubuntu \\\n+ $(lsb_release -cs) \\\n+ stable\"\n+RUN apt-get install docker-ce-cli\n+\n+# Install gcloud.\nRUN curl https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-289.0.0-linux-x86_64.tar.gz | \\\ntar zxf - google-cloud-sdk && \\\n- google-cloud-sdk/install.sh && \\\n+ google-cloud-sdk/install.sh --quiet && \\\nln -s /google-cloud-sdk/bin/gcloud /usr/bin/gcloud\n-# Install Docker client for the website build.\n-RUN dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo\n-RUN dnf install -y docker-ce-cli\n-\n+# Download the official bazel binary. The APT repository isn't used because there is not packages for arm64.\n+RUN sh -c 'curl -o /usr/local/bin/bazel https://releases.bazel.build/3.5.1/release/bazel-3.5.1-linux-$(uname -m | sed s/aarch64/arm64/) && chmod ugo+x /usr/local/bin/bazel'\nWORKDIR /workspace\n-ENTRYPOINT [\"/usr/bin/bazel\"]\n+ENTRYPOINT [\"/usr/local/bin/bazel\"]\n" }, { "change_type": "MODIFY", "old_path": "tools/bazel.mk", "new_path": "tools/bazel.mk", "diff": "@@ -61,6 +61,7 @@ DOCKER_CONFIG := /etc/docker\n## STARTUP_OPTIONS - Startup options passed to Bazel.\n##\nSTARTUP_OPTIONS :=\n+BAZEL_OPTIONS :=\nBAZEL := bazel $(STARTUP_OPTIONS)\nBASE_OPTIONS := --color=no --curses=no\nTEST_OPTIONS := $(BASE_OPTIONS) \\\n@@ -155,7 +156,7 @@ bazel-image: load-default ## Ensures that the local builder exists.\n@$(call header,DOCKER BUILD)\n@docker rm -f $(BUILDER_NAME) 2>/dev/null || true\n@docker run --user 0:0 --entrypoint \"\" --name $(BUILDER_NAME) gvisor.dev/images/default \\\n- sh -c \"$(GROUPADD_DOCKER) $(USERADD_DOCKER) if test -e /dev/kvm; then chmod a+rw /dev/kvm; fi\" >&2\n+ bash -c \"$(GROUPADD_DOCKER) $(USERADD_DOCKER) if test -e /dev/kvm; then chmod a+rw /dev/kvm; fi\" >&2\n@docker commit $(BUILDER_NAME) gvisor.dev/images/builder >&2\n.PHONY: bazel-image\n@@ -170,7 +171,7 @@ bazel-server: bazel-image ## Ensures that the server exists.\n--workdir \"$(CURDIR)\" \\\n$(DOCKER_RUN_OPTIONS) \\\ngvisor.dev/images/builder \\\n- sh -c \"set -x; tail -f --pid=\\$$($(BAZEL) info server_pid) /dev/null\" >&2\n+ bash -c \"set -x; tail -f --pid=\\$$($(BAZEL) info server_pid) /dev/null\" >&2\nelse\nbazel-server:\n@\n@@ -187,7 +188,7 @@ endif\n# The last line is used to prevent terminal shenanigans.\nbuild_paths = \\\n(set -euo pipefail; \\\n- $(call wrapper,$(BAZEL) build $(BASE_OPTIONS) $(1)) 2>&1 \\\n+ $(call wrapper,$(BAZEL) build $(BASE_OPTIONS) $(BAZEL_OPTIONS) $(1)) 2>&1 \\\n| tee /proc/self/fd/2 \\\n| sed -n -e '/^Target/,$$p' \\\n| sed -n -e '/^ \\($(subst /,\\/,$(subst $(SPACE),\\|,$(BUILD_ROOTS)))\\)/p' \\\n" }, { "change_type": "MODIFY", "old_path": "tools/bazeldefs/cc.bzl", "new_path": "tools/bazeldefs/cc.bzl", "diff": "\"\"\"C++ rules.\"\"\"\n-load(\"@bazel_tools//tools/cpp:cc_flags_supplier.bzl\", _cc_flags_supplier = \"cc_flags_supplier\")\nload(\"@rules_cc//cc:defs.bzl\", _cc_binary = \"cc_binary\", _cc_library = \"cc_library\", _cc_proto_library = \"cc_proto_library\", _cc_test = \"cc_test\")\nload(\"@com_github_grpc_grpc//bazel:cc_grpc_library.bzl\", _cc_grpc_library = \"cc_grpc_library\")\ncc_library = _cc_library\n-cc_flags_supplier = _cc_flags_supplier\ncc_proto_library = _cc_proto_library\ncc_test = _cc_test\ncc_toolchain = \"@bazel_tools//tools/cpp:current_cc_toolchain\"\n@@ -14,6 +12,16 @@ gbenchmark = \"@com_google_benchmark//:benchmark\"\ngrpcpp = \"@com_github_grpc_grpc//:grpc++\"\nvdso_linker_option = \"-fuse-ld=gold \"\n+def _cc_flags_supplier_impl(ctx):\n+ variables = platform_common.TemplateVariableInfo({\n+ \"CC_FLAGS\": \"\",\n+ })\n+ return [variables]\n+\n+cc_flags_supplier = rule(\n+ implementation = _cc_flags_supplier_impl,\n+)\n+\ndef cc_grpc_library(name, **kwargs):\n_cc_grpc_library(name = name, grpc_only = True, **kwargs)\n" } ]
Go
Apache License 2.0
google/gvisor
Add ARM smoke test make BAZEL_CONFIG=aarch64 arm-qemu-smoke-test Signed-off-by: Andrei Vagin <[email protected]>
259,858
08.01.2021 08:14:58
28,800
df1b23c8dedfa6b29f74cbd00d04182a9fa01925
Fix sha256 for github-workflow.json. This was not being tested as part of the unit test workflows, and thus was not being hit normally. These tests are also added to the unit tests target.
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -192,7 +192,7 @@ fuse-tests:\n.PHONY: fuse-tests\nunit-tests: ## Local package unit tests in pkg/..., runsc/, tools/.., etc.\n- @$(call test,pkg/... runsc/... tools/...)\n+ @$(call test,//:all pkg/... runsc/... tools/...)\n.PHONY: unit-tests\ntests: ## Runs all unit tests and syscall tests.\n" }, { "change_type": "MODIFY", "old_path": "WORKSPACE", "new_path": "WORKSPACE", "diff": "@@ -210,7 +210,7 @@ http_file(\nhttp_file(\nname = \"github_workflow_schema\",\n- sha256 = \"2c375bb43dbc8b32b1bed46c290d0b70a8fa2aca7a5484dfca1b6e9c38cf9e7a\",\n+ sha256 = \"60603d1095b11d136e04a8b95be83a23ad8044169e46f82f925c320c1cf47a49\",\nurls = [\"https://raw.githubusercontent.com/SchemaStore/schemastore/27612065234778feaac216ce14dd47846fe0a2dd/src/schemas/json/github-workflow.json\"],\n)\n" } ]
Go
Apache License 2.0
google/gvisor
Fix sha256 for github-workflow.json. This was not being tested as part of the unit test workflows, and thus was not being hit normally. These tests are also added to the unit tests target. PiperOrigin-RevId: 350766814
259,858
08.01.2021 09:06:52
28,800
0538ffa8ba6105e83a392c135453eb432f0860ca
Add prefix helper to link to code search results. This is extremely convenient similar to being able to link to Go package documentation via the canonical package names.
[ { "change_type": "MODIFY", "old_path": "website/cmd/server/main.go", "new_path": "website/cmd/server/main.go", "diff": "@@ -20,6 +20,7 @@ import (\n\"fmt\"\n\"log\"\n\"net/http\"\n+ \"net/url\"\n\"os\"\n\"path\"\n\"regexp\"\n@@ -61,19 +62,37 @@ var redirects = map[string]string{\n// Deprecated, but links continue to work.\n\"/cl\": \"https://gvisor-review.googlesource.com\",\n+\n+ // Access package documentation.\n+ \"/gvisor\": \"https://pkg.go.dev/gvisor.dev/gvisor\",\n+\n+ // Code search root.\n+ \"/cs\": \"https://cs.opensource.google/gvisor/gvisor\",\n}\n-var prefixHelpers = map[string]string{\n- \"change\": \"https://github.com/google/gvisor/commit/%s\",\n- \"issue\": \"https://github.com/google/gvisor/issues/%s\",\n- \"issues\": \"https://github.com/google/gvisor/issues/%s\",\n- \"pr\": \"https://github.com/google/gvisor/pull/%s\",\n+type prefixInfo struct {\n+ baseURL string\n+ checkValidID bool\n+ queryEscape bool\n+}\n+\n+var prefixHelpers = map[string]prefixInfo{\n+ \"change\": {baseURL: \"https://github.com/google/gvisor/commit/%s\", checkValidID: true},\n+ \"issue\": {baseURL: \"https://github.com/google/gvisor/issues/%s\", checkValidID: true},\n+ \"issues\": {baseURL: \"https://github.com/google/gvisor/issues/%s\", checkValidID: true},\n+ \"pr\": {baseURL: \"https://github.com/google/gvisor/pull/%s\", checkValidID: true},\n// Redirects to compatibility docs.\n- \"c/linux/amd64\": \"/docs/user_guide/compatibility/linux/amd64/#%s\",\n+ \"c/linux/amd64\": {baseURL: \"/docs/user_guide/compatibility/linux/amd64/#%s\", checkValidID: true},\n// Deprecated, but links continue to work.\n- \"cl\": \"https://gvisor-review.googlesource.com/c/gvisor/+/%s\",\n+ \"cl\": {baseURL: \"https://gvisor-review.googlesource.com/c/gvisor/+/%s\", checkValidID: true},\n+\n+ // Redirect to source documentation.\n+ \"gvisor\": {baseURL: \"https://pkg.go.dev/gvisor.dev/gvisor/%s\"},\n+\n+ // Redirect to code search, with the path as the query.\n+ \"cs\": {baseURL: \"https://cs.opensource.google/search?q=%s&ss=gvisor\", queryEscape: true},\n}\nvar (\n@@ -147,7 +166,7 @@ func hostRedirectHandler(h http.Handler) http.Handler {\n}\n// prefixRedirectHandler returns a handler that redirects to the given formated url.\n-func prefixRedirectHandler(prefix, baseURL string) http.Handler {\n+func prefixRedirectHandler(prefix string, info prefixInfo) http.Handler {\nreturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\nif p := r.URL.Path; p == prefix {\n// Redirect /prefix/ to /prefix.\n@@ -155,11 +174,14 @@ func prefixRedirectHandler(prefix, baseURL string) http.Handler {\nreturn\n}\nid := r.URL.Path[len(prefix):]\n- if !validID.MatchString(id) {\n+ if info.checkValidID && !validID.MatchString(id) {\nhttp.Error(w, \"Not found\", http.StatusNotFound)\nreturn\n}\n- target := fmt.Sprintf(baseURL, id)\n+ if info.queryEscape {\n+ id = url.QueryEscape(id)\n+ }\n+ target := fmt.Sprintf(info.baseURL, id)\nredirectWithQuery(w, r, target)\n})\n}\n@@ -173,24 +195,13 @@ func redirectHandler(target string) http.Handler {\n// registerRedirects registers redirect http handlers.\nfunc registerRedirects(mux *http.ServeMux) {\n- for prefix, baseURL := range prefixHelpers {\n+ for prefix, info := range prefixHelpers {\np := \"/\" + prefix + \"/\"\n- mux.Handle(p, hostRedirectHandler(wrappedHandler(prefixRedirectHandler(p, baseURL))))\n+ mux.Handle(p, hostRedirectHandler(wrappedHandler(prefixRedirectHandler(p, info))))\n}\nfor path, redirect := range redirects {\nmux.Handle(path, hostRedirectHandler(wrappedHandler(redirectHandler(redirect))))\n}\n- registerModuleDocRedirects(http.DefaultServeMux)\n-}\n-\n-// registerModuleDocs registers redirect http handlers to redirect module paths\n-// directly to their docs on pkg.go.dev.\n-func registerModuleDocRedirects(mux *http.ServeMux) {\n- const prefix = \"/gvisor/\"\n- mux.Handle(prefix, hostRedirectHandler(wrappedHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n- pkg := r.URL.Path[len(prefix):]\n- redirectWithQuery(w, r, fmt.Sprintf(\"https://pkg.go.dev/gvisor.dev/gvisor/%s\", pkg))\n- }))))\n}\n// registerStatic registers static file handlers.\n" } ]
Go
Apache License 2.0
google/gvisor
Add prefix helper to link to code search results. This is extremely convenient similar to being able to link to Go package documentation via the canonical package names. PiperOrigin-RevId: 350774913
259,858
08.01.2021 18:01:05
28,800
0c99ab70905fa4eaf8bc7b0ca846e12d7bbc6e39
Support releasing aarch64 builds. This change works around an issue in rules_pkg, described here:
[ { "change_type": "MODIFY", "old_path": ".buildkite/pipeline.yaml", "new_path": ".buildkite/pipeline.yaml", "diff": "@@ -40,7 +40,10 @@ steps:\n# Release workflow.\n- <<: *common\nlabel: \":ship: Release tests\"\n- commands: make release\n+ commands:\n+ - make artifacts/x86_64\n+ - make BAZEL_OPTIONS=--config=cross-aarch64 artifacts/aarch64\n+ - make release\n# Basic unit tests.\n- <<: *common\n" }, { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -406,13 +406,15 @@ website-deploy: website-push ## Deploy a new version of the website.\n## This builds a local apt repository. The following variables may be set:\n## RELEASE_ROOT - The repository root (default: \"repo\" directory).\n## RELEASE_KEY - The repository GPG private key file (default: dummy key is created).\n+## RELEASE_ARTIFACTS - The release artifacts directory. May contain multiple.\n## RELEASE_NIGHTLY - Set to true if a nightly release (default: false).\n## RELEASE_COMMIT - The commit or Change-Id for the release (needed for tag).\n## RELEASE_NAME - The name of the release in the proper format (needed for tag).\n## RELEASE_NOTES - The file containing release notes (needed for tag).\n##\n-RELEASE_ROOT := $(CURDIR)/repo\n+RELEASE_ROOT := repo\nRELEASE_KEY := repo.key\n+RELEASE_ARTIFACTS := artifacts\nRELEASE_NIGHTLY := false\nRELEASE_COMMIT :=\nRELEASE_NAME :=\n@@ -433,15 +435,16 @@ $(RELEASE_KEY):\ngpg --batch $(GPG_TEST_OPTIONS) --export-secret-keys --no-default-keyring --secret-keyring $$T > $@; \\\nrc=$$?; rm -f $$T $$C; exit $$rc\n-release: $(RELEASE_KEY) ## Builds a release.\n- @mkdir -p $(RELEASE_ROOT)\n- @export T=$$(mktemp -d --tmpdir release.XXXXXX); \\\n- $(call copy,//runsc:runsc,$$T) && \\\n- $(call copy,//shim/v1:gvisor-containerd-shim,$$T) && \\\n- $(call copy,//shim/v2:containerd-shim-runsc-v1,$$T) && \\\n- $(call copy,//debian:debian,$$T) && \\\n- NIGHTLY=$(RELEASE_NIGHTLY) tools/make_release.sh $(RELEASE_KEY) $(RELEASE_ROOT) $$T/*; \\\n- rc=$$?; rm -rf $$T; exit $$rc\n+$(RELEASE_ARTIFACTS)/%:\n+ @mkdir -p $@\n+ @$(call copy,//runsc:runsc,$@)\n+ @$(call copy,//shim/v1:gvisor-containerd-shim,$@)\n+ @$(call copy,//shim/v2:containerd-shim-runsc-v1,$@)\n+ @$(call copy,//debian:debian,$@)\n+\n+release: $(RELEASE_KEY) $(RELEASE_ARTIFACTS)/$(ARCH)\n+ @rm -rf $(RELEASE_ROOT) && mkdir -p $(RELEASE_ROOT)\n+ @NIGHTLY=$(RELEASE_NIGHTLY) tools/make_release.sh $(RELEASE_KEY) $(RELEASE_ROOT) $$(find $(RELEASE_ARTIFACTS) -type f)\n.PHONY: release\ntag: ## Creates and pushes a release tag.\n" }, { "change_type": "MODIFY", "old_path": "WORKSPACE", "new_path": "WORKSPACE", "diff": "@@ -40,10 +40,10 @@ http_archive(\n# binaries of symbols, which we don't want.\n\"//tools:rules_go.patch\",\n],\n- sha256 = \"b725e6497741d7fc2d55fcc29a276627d10e43fa5d0bb692692890ae30d98d00\",\n+ sha256 = \"a515569b4903776eae90ac2696b34ee1dd45600cf9dfd7d16475e2df32867521\",\nurls = [\n- \"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.24.3/rules_go-v0.24.3.tar.gz\",\n- \"https://github.com/bazelbuild/rules_go/releases/download/v0.24.3/rules_go-v0.24.3.tar.gz\",\n+ \"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.24.10/rules_go-v0.24.10.tar.gz\",\n+ \"https://github.com/bazelbuild/rules_go/releases/download/v0.24.10/rules_go-v0.24.10.tar.gz\",\n],\n)\n@@ -144,8 +144,8 @@ rbe_autoconfig(name = \"rbe_default\")\nhttp_archive(\nname = \"rules_pkg\",\n- sha256 = \"5bdc04987af79bd27bc5b00fe30f59a858f77ffa0bd2d8143d5b31ad8b1bd71c\",\n- url = \"https://github.com/bazelbuild/rules_pkg/releases/download/0.2.0/rules_pkg-0.2.0.tar.gz\",\n+ sha256 = \"6b5969a7acd7b60c02f816773b06fcf32fbe8ba0c7919ccdc2df4f8fb923804a\",\n+ url = \"https://github.com/bazelbuild/rules_pkg/releases/download/0.3.0/rules_pkg-0.3.0.tar.gz\",\n)\nload(\"@rules_pkg//:deps.bzl\", \"rules_pkg_dependencies\")\n" }, { "change_type": "MODIFY", "old_path": "debian/BUILD", "new_path": "debian/BUILD", "diff": "-load(\"//tools:defs.bzl\", \"pkg_deb\", \"pkg_tar\")\n+load(\"//tools:defs.bzl\", \"pkg_deb\", \"pkg_tar\", \"select_arch\", \"version\")\npackage(licenses = [\"notice\"])\n@@ -22,27 +22,16 @@ pkg_tar(\n],\n)\n-genrule(\n- name = \"debian-version\",\n- # Note that runsc must appear in the srcs parameter and not the tools\n- # parameter, otherwise it will not be stamped. This is reasonable, as tools\n- # may be encoded differently in the build graph (cached more aggressively\n- # because they are assumes to be hermetic).\n- srcs = [\"//runsc\"],\n- outs = [\"version.txt\"],\n- # Note that the little dance here is necessary because files in the $(SRCS)\n- # attribute are not executable by default, and we can't touch in place.\n- cmd = \"cp $(location //runsc:runsc) $(@D)/runsc && \\\n- chmod a+x $(@D)/runsc && \\\n- $(@D)/runsc -version | grep version | sed 's/^[^0-9]*//' > $@ && \\\n- rm -f $(@D)/runsc\",\n- stamp = 1,\n-)\n-\npkg_deb(\nname = \"debian\",\n- architecture = \"amd64\",\n+ out = \"runsc-latest.deb\",\n+ architecture = select_arch(\n+ amd64 = \"amd64\",\n+ arm64 = \"arm64\",\n+ ),\n+ changes = \"runsc.changes\",\ndata = \":debian-data\",\n+ deb = \"runsc.deb\",\n# Note that the description_file will be flatten (all newlines removed),\n# and therefore it is kept to a simple one-line description. The expected\n# format for debian packages is \"short summary\\nLonger explanation of\n@@ -52,7 +41,7 @@ pkg_deb(\nmaintainer = \"The gVisor Authors <[email protected]>\",\npackage = \"runsc\",\npostinst = \"postinst.sh\",\n- version_file = \":version.txt\",\n+ version_file = version,\nvisibility = [\n\"//visibility:public\",\n],\n" }, { "change_type": "MODIFY", "old_path": "g3doc/user_guide/install.md", "new_path": "g3doc/user_guide/install.md", "diff": "@@ -12,7 +12,8 @@ To download and install the latest release manually follow these steps:\n```bash\n(\nset -e\n- URL=https://storage.googleapis.com/gvisor/releases/release/latest\n+ ARCH=$(uname -m)\n+ URL=https://storage.googleapis.com/gvisor/releases/release/latest/${ARCH}\nwget ${URL}/runsc ${URL}/runsc.sha512 \\\n${URL}/gvisor-containerd-shim ${URL}/gvisor-containerd-shim.sha512 \\\n${URL}/containerd-shim-runsc-v1 ${URL}/containerd-shim-runsc-v1.sha512\n@@ -29,7 +30,7 @@ To install gVisor as a Docker runtime, run the following commands:\n```bash\n/usr/local/bin/runsc install\n-sudo systemctl restart docker\n+sudo systemctl reload docker\ndocker run --rm --runtime=runsc hello-world\n```\n@@ -81,13 +82,15 @@ latest release is recommended.\nAfter selecting an appropriate release channel from the options below, proceed\nto the preferred installation mechanism: manual or from an `apt` repository.\n+> Note: Older releases are still available but may not have an `${ARCH}`\n+> component in the URL. These release were available for `x86_64` only.\n+\n### HEAD\nBinaries are available for every commit on the `master` branch, and are\navailable at the following URL:\n-`https://storage.googleapis.com/gvisor/releases/master/latest/runsc`\n-`https://storage.googleapis.com/gvisor/releases/master/latest/runsc.sha512`\n+`https://storage.googleapis.com/gvisor/releases/master/latest/${ARCH}`\nYou can use this link with the steps described in\n[Install latest release](#install-latest).\n@@ -103,15 +106,14 @@ sudo add-apt-repository \"deb https://storage.googleapis.com/gvisor/releases mast\nNightly releases are built most nights from the master branch, and are available\nat the following URL:\n-`https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc`\n-`https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc.sha512`\n+`https://storage.googleapis.com/gvisor/releases/nightly/latest/${ARCH}`\nYou can use this link with the steps described in\n[Install latest release](#install-latest).\nSpecific nightly releases can be found at:\n-`https://storage.googleapis.com/gvisor/releases/nightly/${yyyy-mm-dd}/runsc`\n+`https://storage.googleapis.com/gvisor/releases/nightly/${yyyy-mm-dd}/${ARCH}`\nNote that a release may not be available for every day.\n@@ -125,7 +127,7 @@ sudo add-apt-repository \"deb https://storage.googleapis.com/gvisor/releases nigh\nThe latest official release is available at the following URL:\n-`https://storage.googleapis.com/gvisor/releases/release/latest`\n+`https://storage.googleapis.com/gvisor/releases/release/latest/${ARCH}`\nYou can use this link with the steps described in\n[Install latest release](#install-latest).\n@@ -140,7 +142,7 @@ sudo add-apt-repository \"deb https://storage.googleapis.com/gvisor/releases rele\nA given release release is available at the following URL:\n-`https://storage.googleapis.com/gvisor/releases/release/${yyyymmdd}`\n+`https://storage.googleapis.com/gvisor/releases/release/${yyyymmdd}/${ARCH}`\nYou can use this link with the steps described in\n[Install latest release](#install-latest).\n@@ -161,7 +163,7 @@ sudo add-apt-repository \"deb https://storage.googleapis.com/gvisor/releases yyyy\nA given point release is available at the following URL:\n-`https://storage.googleapis.com/gvisor/releases/release/${yyyymmdd}.${rc}`\n+`https://storage.googleapis.com/gvisor/releases/release/${yyyymmdd}.${rc}/${ARCH}`\nYou can use this link with the steps described in\n[Install latest release](#install-latest).\n" }, { "change_type": "MODIFY", "old_path": "tools/bazel.mk", "new_path": "tools/bazel.mk", "diff": "@@ -160,6 +160,12 @@ bazel-image: load-default ## Ensures that the local builder exists.\n@docker commit $(BUILDER_NAME) gvisor.dev/images/builder >&2\n.PHONY: bazel-image\n+# Note: when starting the bazel server, we tie the life of the container to the\n+# bazel server's life, so that the container disappears naturally. We also call\n+# bazel shutdown prior to startup, to ensure that any existing bazel instance in\n+# the workspace (perhaps of a different architecture) stops. If the instance is\n+# compatible and the container is already running, then the wrapper if statement\n+# here will succeed, and we wouldn't have needed a new server at all.\nifneq (true,$(shell $(wrapper echo true)))\nbazel-server: bazel-image ## Ensures that the server exists.\n@$(call header,DOCKER RUN)\n@@ -171,7 +177,7 @@ bazel-server: bazel-image ## Ensures that the server exists.\n--workdir \"$(CURDIR)\" \\\n$(DOCKER_RUN_OPTIONS) \\\ngvisor.dev/images/builder \\\n- bash -c \"set -x; tail -f --pid=\\$$($(BAZEL) info server_pid) /dev/null\" >&2\n+ bash -c \"set -x; $(BAZEL) shutdown; tail -f --pid=\\$$($(BAZEL) info server_pid) /dev/null\"\nelse\nbazel-server:\n@\n" }, { "change_type": "MODIFY", "old_path": "tools/bazeldefs/BUILD", "new_path": "tools/bazeldefs/BUILD", "diff": "@@ -37,3 +37,11 @@ config_setting(\n},\nvisibility = [\"//visibility:private\"],\n)\n+\n+genrule(\n+ name = \"version\",\n+ outs = [\"version.txt\"],\n+ cmd = \"cat bazel-out/stable-status.txt | grep STABLE_VERSION | cut -d' ' -f2- >$@\",\n+ stamp = True,\n+ visibility = [\"//:sandbox\"],\n+)\n" }, { "change_type": "MODIFY", "old_path": "tools/bazeldefs/defs.bzl", "new_path": "tools/bazeldefs/defs.bzl", "diff": "@@ -7,6 +7,7 @@ build_test = _build_test\nbzl_library = _bzl_library\nmore_shards = 4\nmost_shards = 8\n+version = \"//tools/bazeldefs:version\"\ndef short_path(path):\nreturn path\n" }, { "change_type": "MODIFY", "old_path": "tools/bazeldefs/pkg.bzl", "new_path": "tools/bazeldefs/pkg.bzl", "diff": "\"\"\"Packaging rules.\"\"\"\n-load(\"@rules_pkg//:pkg.bzl\", _pkg_deb = \"pkg_deb\", _pkg_tar = \"pkg_tar\")\n+# N.B. We refer to pkg_deb_impl to avoid the macro, which cannot use select.\n+load(\"@rules_pkg//:pkg.bzl\", _pkg_deb = \"pkg_deb_impl\", _pkg_tar = \"pkg_tar\")\npkg_deb = _pkg_deb\npkg_tar = _pkg_tar\n" }, { "change_type": "MODIFY", "old_path": "tools/defs.bzl", "new_path": "tools/defs.bzl", "diff": "@@ -8,7 +8,7 @@ change for Google-internal and bazel-compatible rules.\nload(\"//tools/go_stateify:defs.bzl\", \"go_stateify\")\nload(\"//tools/go_marshal:defs.bzl\", \"go_marshal\", \"marshal_deps\", \"marshal_test_deps\")\nload(\"//tools/nogo:defs.bzl\", \"nogo_test\")\n-load(\"//tools/bazeldefs:defs.bzl\", _arch_genrule = \"arch_genrule\", _build_test = \"build_test\", _bzl_library = \"bzl_library\", _coreutil = \"coreutil\", _default_installer = \"default_installer\", _default_net_util = \"default_net_util\", _more_shards = \"more_shards\", _most_shards = \"most_shards\", _proto_library = \"proto_library\", _select_arch = \"select_arch\", _select_system = \"select_system\", _short_path = \"short_path\")\n+load(\"//tools/bazeldefs:defs.bzl\", _arch_genrule = \"arch_genrule\", _build_test = \"build_test\", _bzl_library = \"bzl_library\", _coreutil = \"coreutil\", _default_installer = \"default_installer\", _default_net_util = \"default_net_util\", _more_shards = \"more_shards\", _most_shards = \"most_shards\", _proto_library = \"proto_library\", _select_arch = \"select_arch\", _select_system = \"select_system\", _short_path = \"short_path\", _version = \"version\")\nload(\"//tools/bazeldefs:cc.bzl\", _cc_binary = \"cc_binary\", _cc_flags_supplier = \"cc_flags_supplier\", _cc_grpc_library = \"cc_grpc_library\", _cc_library = \"cc_library\", _cc_proto_library = \"cc_proto_library\", _cc_test = \"cc_test\", _cc_toolchain = \"cc_toolchain\", _gbenchmark = \"gbenchmark\", _grpcpp = \"grpcpp\", _gtest = \"gtest\", _vdso_linker_option = \"vdso_linker_option\")\nload(\"//tools/bazeldefs:go.bzl\", _gazelle = \"gazelle\", _go_binary = \"go_binary\", _go_embed_data = \"go_embed_data\", _go_grpc_and_proto_libraries = \"go_grpc_and_proto_libraries\", _go_library = \"go_library\", _go_path = \"go_path\", _go_proto_library = \"go_proto_library\", _go_test = \"go_test\", _select_goarch = \"select_goarch\", _select_goos = \"select_goos\")\nload(\"//tools/bazeldefs:pkg.bzl\", _pkg_deb = \"pkg_deb\", _pkg_tar = \"pkg_tar\")\n@@ -27,6 +27,7 @@ short_path = _short_path\ncoreutil = _coreutil\nmore_shards = _more_shards\nmost_shards = _most_shards\n+version = _version\n# C++ rules.\ncc_binary = _cc_binary\n" }, { "change_type": "MODIFY", "old_path": "tools/make_apt.sh", "new_path": "tools/make_apt.sh", "diff": "@@ -18,9 +18,16 @@ if [[ \"$#\" -le 3 ]]; then\necho \"usage: $0 <private-key> <suite> <root> <packages...>\"\nexit 1\nfi\n-declare -r private_key=$(readlink -e \"$1\"); shift\n-declare -r suite=\"$1\"; shift\n-declare -r root=\"$1\"; shift\n+declare private_key\n+declare suite\n+declare root\n+private_key=\"$(readlink -e \"$1\")\"\n+suite=\"$2\"\n+root=\"$(readlink -m \"$3\")\"\n+readonly private_key\n+readonly suite\n+readonly root\n+shift; shift; shift # For \"$@\" below.\n# Ensure that we have the correct packages installed.\nfunction apt_install() {\n@@ -56,9 +63,15 @@ mkdir -p \"${release}\"\n# Create a temporary keyring, and ensure it is cleaned up.\n# Using separate homedir allows us to install apt repositories multiple times\n# using the same key. This is a limitation in GnuPG pre-2.1.\n-declare -r keyring=$(mktemp /tmp/keyringXXXXXX.gpg)\n-declare -r homedir=$(mktemp -d /tmp/homedirXXXXXX)\n-declare -r gpg_opts=(\"--no-default-keyring\" \"--secret-keyring\" \"${keyring}\" \"--homedir\" \"${homedir}\")\n+declare keyring\n+declare homedir\n+declare gpg_opts\n+keyring=\"$(mktemp /tmp/keyringXXXXXX.gpg)\"\n+homedir=\"$(mktemp -d /tmp/homedirXXXXXX)\"\n+gpg_opts=(\"--no-default-keyring\" \"--secret-keyring\" \"${keyring}\" \"--homedir\" \"${homedir}\")\n+readonly keyring\n+readonly homedir\n+readonly gpg_opts\ncleanup() {\nrm -rf \"${keyring}\" \"${homedir}\"\n}\n@@ -73,40 +86,29 @@ gpg \"${gpg_opts[@]}\" --import \"${private_key}\" || \\\n# Copy the packages into the root.\nfor pkg in \"$@\"; do\n- ext=${pkg##*.}\n- name=$(basename \"${pkg}\" \".${ext}\")\n- arch=${name##*_}\n- if [[ \"${name}\" == \"${arch}\" ]]; then\n- continue # Not a regular package.\n+ if ! [[ -f \"${pkg}\" ]]; then\n+ continue\nfi\n- if [[ \"${pkg}\" =~ ^.*\\.deb$ ]]; then\n- # Extract from the debian file.\n- version=$(dpkg --info \"${pkg}\" | grep -E 'Version:' | cut -d':' -f2)\n- elif [[ \"${pkg}\" =~ ^.*\\.changes$ ]]; then\n- # Extract from the changes file.\n- version=$(grep -E 'Version:' \"${pkg}\" | cut -d':' -f2)\n- else\n- # Unsupported file type.\n- echo \"Unknown file type: ${pkg}\"\n- exit 1\n+ ext=${pkg##*.}\n+ if [[ \"${ext}\" != \"deb\" ]]; then\n+ continue\nfi\n- # The package may already exist, in which case we leave it alone.\n- version=${version// /} # Trim whitespace.\n+ # Extract package information.\n+ name=$(basename \"${pkg}\" \".${ext}\")\n+ arch=$(dpkg --info \"${pkg}\" | grep 'Architecture:' | cut -d':' -f2)\n+ version=$(dpkg --info \"${pkg}\" | grep 'Version:' | cut -d':' -f2)\n+ arch=${arch// /} # Trim whitespace.\n+ version=${version// /} # Ditto.\ndestdir=\"${root}/pool/${version}/binary-${arch}\"\n- target=\"${destdir}/${name}.${ext}\"\n- if [[ -f \"${target}\" ]]; then\n- continue\n- fi\n# Copy & sign the package.\nmkdir -p \"${destdir}\"\n- cp -a \"${pkg}\" \"${target}\"\n- chmod 0644 \"${target}\"\n- if [[ \"${ext}\" == \"deb\" ]]; then\n+ cp -a -L \"$(dirname \"${pkg}\")/${name}.deb\" \"${destdir}\"\n+ cp -a -L \"$(dirname \"${pkg}\")/${name}.changes\" \"${destdir}\"\n+ chmod 0644 \"${destdir}\"/\"${name}\".*\n# We use [*] here to expand the gpg_opts array into a single shell-word.\n- dpkg-sig -g \"${gpg_opts[*]}\" --sign builder \"${target}\"\n- fi\n+ dpkg-sig -g \"${gpg_opts[*]}\" --sign builder \"${destdir}/${name}.deb\"\ndone\n# Build the package list.\n" }, { "change_type": "MODIFY", "old_path": "tools/make_release.sh", "new_path": "tools/make_release.sh", "diff": "@@ -38,12 +38,13 @@ done\n# install_raw installs raw artifacts.\ninstall_raw() {\n- mkdir -p \"${root}/$1\"\nfor binary in \"${binaries[@]}\"; do\n- # Copy the raw file & generate a sha512sum.\n+ # Copy the raw file & generate a sha512sum, sorted by architecture.\n+ arch=$(file \"${binary}\" | cut -d',' -f2 | awk '{print $NF}' | tr '-' '_')\nname=$(basename \"${binary}\")\n- cp -f \"${binary}\" \"${root}/$1\"\n- (cd \"${root}/$1\" && sha512sum \"${name}\" > \"${name}.sha512\")\n+ mkdir -p \"${root}/$1/${arch}\"\n+ cp -f \"${binary}\" \"${root}/$1/${arch}\"\n+ (cd \"${root}/$1/${arch}\" && sha512sum \"${name}\" > \"${name}.sha512\")\ndone\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Support releasing aarch64 builds. This change works around an issue in rules_pkg, described here: https://github.com/bazelbuild/rules_pkg/pull/263 PiperOrigin-RevId: 350869030
259,858
11.01.2021 13:15:38
28,800
4c4de66443174f2ed7f4fa533a1d09c709be9427
Make ilist split safe. This allows for use in restricted contexts. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/ilist/list.go", "new_path": "pkg/ilist/list.go", "diff": "@@ -72,16 +72,22 @@ func (l *List) Reset() {\n}\n// Empty returns true iff the list is empty.\n+//\n+//go:nosplit\nfunc (l *List) Empty() bool {\nreturn l.head == nil\n}\n// Front returns the first element of list l or nil.\n+//\n+//go:nosplit\nfunc (l *List) Front() Element {\nreturn l.head\n}\n// Back returns the last element of list l or nil.\n+//\n+//go:nosplit\nfunc (l *List) Back() Element {\nreturn l.tail\n}\n@@ -89,6 +95,8 @@ func (l *List) Back() Element {\n// Len returns the number of elements in the list.\n//\n// NOTE: This is an O(n) operation.\n+//\n+//go:nosplit\nfunc (l *List) Len() (count int) {\nfor e := l.Front(); e != nil; e = (ElementMapper{}.linkerFor(e)).Next() {\ncount++\n@@ -97,6 +105,8 @@ func (l *List) Len() (count int) {\n}\n// PushFront inserts the element e at the front of list l.\n+//\n+//go:nosplit\nfunc (l *List) PushFront(e Element) {\nlinker := ElementMapper{}.linkerFor(e)\nlinker.SetNext(l.head)\n@@ -111,6 +121,8 @@ func (l *List) PushFront(e Element) {\n}\n// PushBack inserts the element e at the back of list l.\n+//\n+//go:nosplit\nfunc (l *List) PushBack(e Element) {\nlinker := ElementMapper{}.linkerFor(e)\nlinker.SetNext(nil)\n@@ -125,6 +137,8 @@ func (l *List) PushBack(e Element) {\n}\n// PushBackList inserts list m at the end of list l, emptying m.\n+//\n+//go:nosplit\nfunc (l *List) PushBackList(m *List) {\nif l.head == nil {\nl.head = m.head\n@@ -140,6 +154,8 @@ func (l *List) PushBackList(m *List) {\n}\n// InsertAfter inserts e after b.\n+//\n+//go:nosplit\nfunc (l *List) InsertAfter(b, e Element) {\nbLinker := ElementMapper{}.linkerFor(b)\neLinker := ElementMapper{}.linkerFor(e)\n@@ -158,6 +174,8 @@ func (l *List) InsertAfter(b, e Element) {\n}\n// InsertBefore inserts e before a.\n+//\n+//go:nosplit\nfunc (l *List) InsertBefore(a, e Element) {\naLinker := ElementMapper{}.linkerFor(a)\neLinker := ElementMapper{}.linkerFor(e)\n@@ -175,6 +193,8 @@ func (l *List) InsertBefore(a, e Element) {\n}\n// Remove removes e from l.\n+//\n+//go:nosplit\nfunc (l *List) Remove(e Element) {\nlinker := ElementMapper{}.linkerFor(e)\nprev := linker.Prev()\n@@ -207,21 +227,29 @@ type Entry struct {\n}\n// Next returns the entry that follows e in the list.\n+//\n+//go:nosplit\nfunc (e *Entry) Next() Element {\nreturn e.next\n}\n// Prev returns the entry that precedes e in the list.\n+//\n+//go:nosplit\nfunc (e *Entry) Prev() Element {\nreturn e.prev\n}\n// SetNext assigns 'entry' as the entry that follows e in the list.\n+//\n+//go:nosplit\nfunc (e *Entry) SetNext(elem Element) {\ne.next = elem\n}\n// SetPrev assigns 'entry' as the entry that precedes e in the list.\n+//\n+//go:nosplit\nfunc (e *Entry) SetPrev(elem Element) {\ne.prev = elem\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Make ilist split safe. This allows for use in restricted contexts. Updates #5039 PiperOrigin-RevId: 351220385
259,992
11.01.2021 16:23:44
28,800
7e462a1c7f56b9b8439ad1ac92906bd8dd376ab7
OCI spec may contain duplicate environment variables Closes
[ { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -440,6 +440,10 @@ func createProcessArgs(id string, spec *specs.Spec, creds *auth.Credentials, k *\nif err != nil {\nreturn kernel.CreateProcessArgs{}, fmt.Errorf(\"creating limits: %v\", err)\n}\n+ env, err := specutils.ResolveEnvs(spec.Process.Env)\n+ if err != nil {\n+ return kernel.CreateProcessArgs{}, fmt.Errorf(\"resolving env: %w\", err)\n+ }\nwd := spec.Process.Cwd\nif wd == \"\" {\n@@ -449,7 +453,7 @@ func createProcessArgs(id string, spec *specs.Spec, creds *auth.Credentials, k *\n// Create the process arguments.\nprocArgs := kernel.CreateProcessArgs{\nArgv: spec.Process.Args,\n- Envv: spec.Process.Env,\n+ Envv: env,\nWorkingDirectory: wd,\nCredentials: creds,\nUmask: 0022,\n@@ -933,6 +937,11 @@ func (l *Loader) executeAsync(args *control.ExecArgs) (kernel.ThreadID, error) {\n}\n}\n+ args.Envv, err = specutils.ResolveEnvs(args.Envv)\n+ if err != nil {\n+ return 0, fmt.Errorf(\"resolving env: %w\", err)\n+ }\n+\n// Add the HOME environment variable if it is not already set.\nif kernel.VFS2Enabled {\nroot := args.MountNamespaceVFS2.Root()\n" }, { "change_type": "MODIFY", "old_path": "runsc/cmd/exec.go", "new_path": "runsc/cmd/exec.go", "diff": "@@ -118,14 +118,14 @@ func (ex *Exec) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n}\nlog.Debugf(\"Exec arguments: %+v\", e)\n- log.Debugf(\"Exec capablities: %+v\", e.Capabilities)\n+ log.Debugf(\"Exec capabilities: %+v\", e.Capabilities)\n// Replace empty settings with defaults from container.\nif e.WorkingDirectory == \"\" {\ne.WorkingDirectory = c.Spec.Process.Cwd\n}\nif e.Envv == nil {\n- e.Envv, err = resolveEnvs(c.Spec.Process.Env, ex.env)\n+ e.Envv, err = specutils.ResolveEnvs(c.Spec.Process.Env, ex.env)\nif err != nil {\nFatalf(\"getting environment variables: %v\", err)\n}\n@@ -382,31 +382,6 @@ func argsFromProcess(p *specs.Process, enableRaw bool) (*control.ExecArgs, error\n}, nil\n}\n-// resolveEnvs transforms lists of environment variables into a single list of\n-// environment variables. If a variable is defined multiple times, the last\n-// value is used.\n-func resolveEnvs(envs ...[]string) ([]string, error) {\n- // First create a map of variable names to values. This removes any\n- // duplicates.\n- envMap := make(map[string]string)\n- for _, env := range envs {\n- for _, str := range env {\n- parts := strings.SplitN(str, \"=\", 2)\n- if len(parts) != 2 {\n- return nil, fmt.Errorf(\"invalid variable: %s\", str)\n- }\n- envMap[parts[0]] = parts[1]\n- }\n- }\n- // Reassemble envMap into a list of environment variables of the form\n- // NAME=VALUE.\n- env := make([]string, 0, len(envMap))\n- for k, v := range envMap {\n- env = append(env, fmt.Sprintf(\"%s=%s\", k, v))\n- }\n- return env, nil\n-}\n-\n// capabilities takes a list of capabilities as strings and returns an\n// auth.TaskCapabilities struct with those capabilities in every capability set.\n// This mimics runc's behavior.\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/multi_container_test.go", "new_path": "runsc/container/multi_container_test.go", "diff": "@@ -1803,3 +1803,91 @@ func TestMultiContainerEvent(t *testing.T) {\n}\n}\n}\n+\n+// Tests that duplicate variables in the spec are merged into a single one.\n+func TestDuplicateEnvVariable(t *testing.T) {\n+ conf := testutil.TestConfig(t)\n+\n+ rootDir, cleanup, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer cleanup()\n+ conf.RootDir = rootDir\n+\n+ // Create files to dump `env` output.\n+ files := [3]*os.File{}\n+ for i := 0; i < len(files); i++ {\n+ var err error\n+ files[i], err = ioutil.TempFile(testutil.TmpDir(), \"env-var-test\")\n+ if err != nil {\n+ t.Fatalf(\"creating temp file: %v\", err)\n+ }\n+ defer files[i].Close()\n+ defer os.Remove(files[i].Name())\n+ }\n+\n+ // Setup the containers. Use root container to test exec too.\n+ cmd1 := fmt.Sprintf(\"env > %q; sleep 1000\", files[0].Name())\n+ cmd2 := fmt.Sprintf(\"env > %q\", files[1].Name())\n+ cmdExec := fmt.Sprintf(\"env > %q\", files[2].Name())\n+ testSpecs, ids := createSpecs([]string{\"/bin/bash\", \"-c\", cmd1}, []string{\"/bin/bash\", \"-c\", cmd2})\n+ testSpecs[0].Process.Env = append(testSpecs[0].Process.Env, \"VAR=foo\", \"VAR=bar\")\n+ testSpecs[1].Process.Env = append(testSpecs[1].Process.Env, \"VAR=foo\", \"VAR=bar\")\n+\n+ containers, cleanup, err := startContainers(conf, testSpecs, ids)\n+ if err != nil {\n+ t.Fatalf(\"error starting containers: %v\", err)\n+ }\n+ defer cleanup()\n+\n+ // Wait for the `env` from the root container to finish.\n+ expectedPL := []*control.Process{\n+ newProcessBuilder().Cmd(\"bash\").Process(),\n+ newProcessBuilder().Cmd(\"sleep\").Process(),\n+ }\n+ if err := waitForProcessList(containers[0], expectedPL); err != nil {\n+ t.Errorf(\"failed to wait for sleep to start: %v\", err)\n+ }\n+ if ws, err := containers[1].Wait(); err != nil {\n+ t.Errorf(\"failed to wait container 1: %v\", err)\n+ } else if es := ws.ExitStatus(); es != 0 {\n+ t.Errorf(\"container %s exited with non-zero status: %v\", containers[1].ID, es)\n+ }\n+\n+ execArgs := &control.ExecArgs{\n+ Filename: \"/bin/bash\",\n+ Argv: []string{\"/bin/bash\", \"-c\", cmdExec},\n+ Envv: []string{\"VAR=foo\", \"VAR=bar\"},\n+ }\n+ if ws, err := containers[0].executeSync(execArgs); err != nil || ws.ExitStatus() != 0 {\n+ t.Fatalf(\"exec failed, ws: %v, err: %v\", ws, err)\n+ }\n+\n+ // Now read and check that none of the env has repeated values.\n+ for _, file := range files {\n+ out, err := ioutil.ReadAll(file)\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+ t.Logf(\"Checking env %q:\\n%s\", file.Name(), out)\n+ envs := make(map[string]string)\n+ for _, line := range strings.Split(string(out), \"\\n\") {\n+ if len(line) == 0 {\n+ continue\n+ }\n+ envVar := strings.SplitN(line, \"=\", 2)\n+ if len(envVar) != 2 {\n+ t.Fatalf(\"invalid env variable: %s\", line)\n+ }\n+ key := envVar[0]\n+ if val, ok := envs[key]; ok {\n+ t.Errorf(\"env variable %q is duplicated: %q and %q\", key, val, envVar[1])\n+ }\n+ envs[key] = envVar[1]\n+ }\n+ if _, ok := envs[\"VAR\"]; !ok {\n+ t.Errorf(\"variable VAR missing: %v\", envs)\n+ }\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "runsc/specutils/specutils.go", "new_path": "runsc/specutils/specutils.go", "diff": "@@ -493,6 +493,31 @@ func EnvVar(env []string, name string) (string, bool) {\nreturn \"\", false\n}\n+// ResolveEnvs transforms lists of environment variables into a single list of\n+// environment variables. If a variable is defined multiple times, the last\n+// value is used.\n+func ResolveEnvs(envs ...[]string) ([]string, error) {\n+ // First create a map of variable names to values. This removes any\n+ // duplicates.\n+ envMap := make(map[string]string)\n+ for _, env := range envs {\n+ for _, str := range env {\n+ parts := strings.SplitN(str, \"=\", 2)\n+ if len(parts) != 2 {\n+ return nil, fmt.Errorf(\"invalid variable: %s\", str)\n+ }\n+ envMap[parts[0]] = parts[1]\n+ }\n+ }\n+ // Reassemble envMap into a list of environment variables of the form\n+ // NAME=VALUE.\n+ env := make([]string, 0, len(envMap))\n+ for k, v := range envMap {\n+ env = append(env, fmt.Sprintf(\"%s=%s\", k, v))\n+ }\n+ return env, nil\n+}\n+\n// FaqErrorMsg returns an error message pointing to the FAQ.\nfunc FaqErrorMsg(anchor, msg string) string {\nreturn fmt.Sprintf(\"%s; see https://gvisor.dev/faq#%s for more details\", msg, anchor)\n" } ]
Go
Apache License 2.0
google/gvisor
OCI spec may contain duplicate environment variables Closes #5226 PiperOrigin-RevId: 351259576
259,858
11.01.2021 16:44:35
28,800
aac477733f68eb32fc65383f2974752e11b617f0
Add additional required packages.
[ { "change_type": "MODIFY", "old_path": ".buildkite/hooks/pre-command", "new_path": ".buildkite/hooks/pre-command", "diff": "@@ -7,7 +7,8 @@ function install_pkgs() {\nfi\ndone\n}\n-install_pkgs graphviz jq curl binutils gnupg gnupg-agent linux-libc-dev \\\n+install_pkgs make \"linux-headers-$(uname -r)\" linux-libc-dev \\\n+ graphviz jq curl binutils gnupg gnupg-agent golang-go \\\napt-transport-https ca-certificates software-properties-common\n# Setup for parallelization with PARTITION and TOTAL_PARTITIONS.\n" } ]
Go
Apache License 2.0
google/gvisor
Add additional required packages. PiperOrigin-RevId: 351263241
259,858
11.01.2021 16:57:53
28,800
e06c2b1264f5800730b93eff5c9913fd870025b9
Make segment range type split safe. This allows for use in restricted contexts. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/segment/range.go", "new_path": "pkg/segment/range.go", "diff": "@@ -30,27 +30,37 @@ type Range struct {\n// WellFormed returns true if r.Start <= r.End. All other methods on a Range\n// require that the Range is well-formed.\n+//\n+//go:nosplit\nfunc (r Range) WellFormed() bool {\nreturn r.Start <= r.End\n}\n// Length returns the length of the range.\n+//\n+//go:nosplit\nfunc (r Range) Length() T {\nreturn r.End - r.Start\n}\n// Contains returns true if r contains x.\n+//\n+//go:nosplit\nfunc (r Range) Contains(x T) bool {\nreturn r.Start <= x && x < r.End\n}\n// Overlaps returns true if r and r2 overlap.\n+//\n+//go:nosplit\nfunc (r Range) Overlaps(r2 Range) bool {\nreturn r.Start < r2.End && r2.Start < r.End\n}\n// IsSupersetOf returns true if r is a superset of r2; that is, the range r2 is\n// contained within r.\n+//\n+//go:nosplit\nfunc (r Range) IsSupersetOf(r2 Range) bool {\nreturn r.Start <= r2.Start && r.End >= r2.End\n}\n@@ -58,6 +68,8 @@ func (r Range) IsSupersetOf(r2 Range) bool {\n// Intersect returns a range consisting of the intersection between r and r2.\n// If r and r2 do not overlap, Intersect returns a range with unspecified\n// bounds, but for which Length() == 0.\n+//\n+//go:nosplit\nfunc (r Range) Intersect(r2 Range) Range {\nif r.Start < r2.Start {\nr.Start = r2.Start\n@@ -74,6 +86,8 @@ func (r Range) Intersect(r2 Range) Range {\n// CanSplitAt returns true if it is legal to split a segment spanning the range\n// r at x; that is, splitting at x would produce two ranges, both of which have\n// non-zero length.\n+//\n+//go:nosplit\nfunc (r Range) CanSplitAt(x T) bool {\nreturn r.Contains(x) && r.Start < x\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Make segment range type split safe. This allows for use in restricted contexts. Updates #5039 PiperOrigin-RevId: 351265378
259,907
12.01.2021 15:57:48
28,800
ad0ac73626c3d65712791eba652c05869ed287f8
[rack] Set up TLP timer and configure timeout. This change implements TLP details enumerated in Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/connect.go", "new_path": "pkg/tcpip/transport/tcp/connect.go", "diff": "@@ -1357,6 +1357,7 @@ func (e *endpoint) protocolMainLoop(handshake bool, wakerInitDone chan<- struct{\n// e.mu is expected to be hold upon entering this section.\nif e.snd != nil {\ne.snd.resendTimer.cleanup()\n+ e.snd.rc.probeTimer.cleanup()\n}\nif closeTimer != nil {\n@@ -1436,6 +1437,10 @@ func (e *endpoint) protocolMainLoop(handshake bool, wakerInitDone chan<- struct{\nreturn nil\n},\n},\n+ {\n+ w: &e.snd.rc.probeWaker,\n+ f: e.snd.probeTimerExpired,\n+ },\n{\nw: &e.newSegmentWaker,\nf: func() *tcpip.Error {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -508,6 +508,9 @@ type endpoint struct {\n// shutdownFlags represent the current shutdown state of the endpoint.\nshutdownFlags tcpip.ShutdownFlags\n+ // tcpRecovery is the loss deteoction algorithm used by TCP.\n+ tcpRecovery tcpip.TCPRecovery\n+\n// sackPermitted is set to true if the peer sends the TCPSACKPermitted\n// option in the SYN/SYN-ACK.\nsackPermitted bool\n@@ -918,6 +921,8 @@ func newEndpoint(s *stack.Stack, netProto tcpip.NetworkProtocolNumber, waiterQue\ne.maxSynRetries = uint8(synRetries)\n}\n+ s.TransportProtocolOption(ProtocolNumber, &e.tcpRecovery)\n+\nif p := s.GetTCPProbe(); p != nil {\ne.probe = p\n}\n@@ -3072,7 +3077,7 @@ func (e *endpoint) completeState() stack.TCPEndpointState {\n}\n}\n- rc := e.snd.rc\n+ rc := &e.snd.rc\ns.Sender.RACKState = stack.TCPRACKState{\nXmitTime: rc.xmitTime,\nEndSequence: rc.endSequence,\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/protocol.go", "new_path": "pkg/tcpip/transport/tcp/protocol.go", "diff": "@@ -405,7 +405,7 @@ func (p *protocol) Option(option tcpip.GettableTransportProtocolOption) *tcpip.E\ncase *tcpip.TCPRecovery:\np.mu.RLock()\n- *v = tcpip.TCPRecovery(p.recovery)\n+ *v = p.recovery\np.mu.RUnlock()\nreturn nil\n@@ -543,7 +543,8 @@ func NewProtocol(s *stack.Stack) stack.TransportProtocol {\nminRTO: MinRTO,\nmaxRTO: MaxRTO,\nmaxRetries: MaxRetries,\n- recovery: tcpip.TCPRACKLossDetection,\n+ // TODO(gvisor.dev/issue/5243): Set recovery to tcpip.TCPRACKLossDetection.\n+ recovery: 0,\n}\np.dispatcher.init(runtime.GOMAXPROCS(0))\nreturn &p\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/rack.go", "new_path": "pkg/tcpip/transport/tcp/rack.go", "diff": "@@ -17,9 +17,18 @@ package tcp\nimport (\n\"time\"\n+ \"gvisor.dev/gvisor/pkg/sleep\"\n+ \"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/seqnum\"\n)\n+// wcDelayedACKTimeout is the recommended maximum delayed ACK timer value as\n+// defined in https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.5.\n+// It stands for worst case delayed ACK timer (WCDelAckT). When FlightSize is\n+// 1, PTO is inflated by WCDelAckT time to compensate for a potential long\n+// delayed ACK timer at the receiver.\n+const wcDelayedACKTimeout = 200 * time.Millisecond\n+\n// RACK is a loss detection algorithm used in TCP to detect packet loss and\n// reordering using transmission timestamp of the packets instead of packet or\n// sequence counts. To use RACK, SACK should be enabled on the connection.\n@@ -54,6 +63,15 @@ type rackControl struct {\n// xmitTime is the latest transmission timestamp of rackControl.seg.\nxmitTime time.Time `state:\".(unixTime)\"`\n+\n+ // probeTimer and probeWaker are used to schedule PTO for RACK TLP algorithm.\n+ probeTimer timer `state:\"nosave\"`\n+ probeWaker sleep.Waker `state:\"nosave\"`\n+}\n+\n+// init initializes RACK specific fields.\n+func (rc *rackControl) init() {\n+ rc.probeTimer.init(&rc.probeWaker)\n}\n// update will update the RACK related fields when an ACK has been received.\n@@ -127,3 +145,61 @@ func (rc *rackControl) detectReorder(seg *segment) {\nfunc (rc *rackControl) setDSACKSeen() {\nrc.dsackSeen = true\n}\n+\n+// shouldSchedulePTO dictates whether we should schedule a PTO or not.\n+// See https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.5.1.\n+func (s *sender) shouldSchedulePTO() bool {\n+ // Schedule PTO only if RACK loss detection is enabled.\n+ return s.ep.tcpRecovery&tcpip.TCPRACKLossDetection != 0 &&\n+ // The connection supports SACK.\n+ s.ep.sackPermitted &&\n+ // The connection is not in loss recovery.\n+ (s.state != RTORecovery && s.state != SACKRecovery) &&\n+ // The connection has no SACKed sequences in the SACK scoreboard.\n+ s.ep.scoreboard.Sacked() == 0\n+}\n+\n+// schedulePTO schedules the probe timeout as defined in\n+// https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.5.1.\n+func (s *sender) schedulePTO() {\n+ pto := time.Second\n+ s.rtt.Lock()\n+ if s.rtt.srttInited && s.rtt.srtt > 0 {\n+ pto = s.rtt.srtt * 2\n+ if s.outstanding == 1 {\n+ pto += wcDelayedACKTimeout\n+ }\n+ }\n+ s.rtt.Unlock()\n+\n+ now := time.Now()\n+ if s.resendTimer.enabled() {\n+ if now.Add(pto).After(s.resendTimer.target) {\n+ pto = s.resendTimer.target.Sub(now)\n+ }\n+ s.resendTimer.disable()\n+ }\n+\n+ s.rc.probeTimer.enable(pto)\n+}\n+\n+// probeTimerExpired is the same as TLP_send_probe() as defined in\n+// https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.5.2.\n+func (s *sender) probeTimerExpired() *tcpip.Error {\n+ if !s.rc.probeTimer.checkExpiration() {\n+ return nil\n+ }\n+ // TODO(gvisor.dev/issue/5084): Implement this pseudo algorithm.\n+ // If an unsent segment exists AND\n+ // the receive window allows new data to be sent:\n+ // Transmit the lowest-sequence unsent segment of up to SMSS\n+ // Increment FlightSize by the size of the newly-sent segment\n+ // Else if TLPRxtOut is not set:\n+ // Retransmit the highest-sequence segment sent so far\n+ // TLPRxtOut = true\n+ // TLPHighRxt = SND.NXT\n+ // The cwnd remains unchanged\n+ // If FlightSize != 0:\n+ // Arm RTO timer only.\n+ return nil\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/rack_state.go", "new_path": "pkg/tcpip/transport/tcp/rack_state.go", "diff": "@@ -27,3 +27,8 @@ func (rc *rackControl) saveXmitTime() unixTime {\nfunc (rc *rackControl) loadXmitTime(unix unixTime) {\nrc.xmitTime = time.Unix(unix.second, unix.nano)\n}\n+\n+// afterLoad is invoked by stateify.\n+func (rc *rackControl) afterLoad() {\n+ rc.probeTimer.init(&rc.probeWaker)\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/snd.go", "new_path": "pkg/tcpip/transport/tcp/snd.go", "diff": "@@ -286,6 +286,8 @@ func newSender(ep *endpoint, iss, irs seqnum.Value, sndWnd seqnum.Size, mss uint\ngso: ep.gso != nil,\n}\n+ s.rc.init()\n+\nif s.gso {\ns.ep.gso.MSS = uint16(maxPayloadSize)\n}\n@@ -1455,6 +1457,7 @@ func (s *sender) handleRcvdSegment(rcvdSeg *segment) {\n// Reset firstRetransmittedSegXmitTime to the zero value.\ns.firstRetransmittedSegXmitTime = time.Time{}\ns.resendTimer.disable()\n+ s.rc.probeTimer.disable()\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/proc_net.cc", "new_path": "test/syscalls/linux/proc_net.cc", "diff": "@@ -499,7 +499,13 @@ TEST(ProcSysNetIpv4Recovery, CanReadAndWrite) {\n// Check initial value is set to 1.\nEXPECT_THAT(PreadFd(fd.get(), &buf, sizeof(buf), 0),\nSyscallSucceedsWithValue(sizeof(to_write) + 1));\n+ if (IsRunningOnGvisor()) {\n+ // TODO(gvisor.dev/issue/5243): TCPRACKLossDetection = 1 should be turned on\n+ // by default.\n+ EXPECT_EQ(strcmp(buf, \"0\\n\"), 0);\n+ } else {\nEXPECT_EQ(strcmp(buf, \"1\\n\"), 0);\n+ }\n// Set tcp_recovery to one of the allowed constants.\nEXPECT_THAT(PwriteFd(fd.get(), &to_write, sizeof(to_write), 0),\n" } ]
Go
Apache License 2.0
google/gvisor
[rack] Set up TLP timer and configure timeout. This change implements TLP details enumerated in https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.5.1. Fixes #5083 PiperOrigin-RevId: 351467357
259,858
12.01.2021 16:59:23
28,800
be2b9d75d75e0e7371cd868589d57f4ddee44781
Drop shutdown in docker run. This can race and cause issues. Instead, a manual shutdown can be done via 'bazel-shutdown' if required for specific cases. The ARM64 builds are now done using cross-compilation, so this hack is not necessary.
[ { "change_type": "MODIFY", "old_path": "tools/bazel.mk", "new_path": "tools/bazel.mk", "diff": "@@ -161,11 +161,7 @@ bazel-image: load-default ## Ensures that the local builder exists.\n.PHONY: bazel-image\n# Note: when starting the bazel server, we tie the life of the container to the\n-# bazel server's life, so that the container disappears naturally. We also call\n-# bazel shutdown prior to startup, to ensure that any existing bazel instance in\n-# the workspace (perhaps of a different architecture) stops. If the instance is\n-# compatible and the container is already running, then the wrapper if statement\n-# here will succeed, and we wouldn't have needed a new server at all.\n+# bazel server's life, so that the container disappears naturally.\nifneq (true,$(shell $(wrapper echo true)))\nbazel-server: bazel-image ## Ensures that the server exists.\n@$(call header,DOCKER RUN)\n@@ -177,7 +173,7 @@ bazel-server: bazel-image ## Ensures that the server exists.\n--workdir \"$(CURDIR)\" \\\n$(DOCKER_RUN_OPTIONS) \\\ngvisor.dev/images/builder \\\n- bash -c \"set -x; $(BAZEL) shutdown; tail -f --pid=\\$$($(BAZEL) info server_pid) /dev/null\"\n+ bash -c \"set -x; tail -f --pid=\\$$($(BAZEL) info server_pid) /dev/null\"\nelse\nbazel-server:\n@\n" } ]
Go
Apache License 2.0
google/gvisor
Drop shutdown in docker run. This can race and cause issues. Instead, a manual shutdown can be done via 'bazel-shutdown' if required for specific cases. The ARM64 builds are now done using cross-compilation, so this hack is not necessary. PiperOrigin-RevId: 351477908
260,004
12.01.2021 19:34:43
28,800
62b4c2f5173dfa75387c079bd3dd6d5e5c3abae9
Drop TransportEndpointID from HandleControlPacket When a control packet is delivered, it is delivered to a transport endpoint with a matching stack.TransportEndpointID so there is no need to pass the ID to the endpoint as it already knows its ID.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/registration.go", "new_path": "pkg/tcpip/stack/registration.go", "diff": "@@ -84,7 +84,7 @@ type TransportEndpoint interface {\n// HandleControlPacket is called by the stack when new control (e.g.\n// ICMP) packets arrive to this transport endpoint.\n// HandleControlPacket takes ownership of pkt.\n- HandleControlPacket(id TransportEndpointID, typ ControlType, extra uint32, pkt *PacketBuffer)\n+ HandleControlPacket(typ ControlType, extra uint32, pkt *PacketBuffer)\n// Abort initiates an expedited endpoint teardown. It puts the endpoint\n// in a closed state and frees all resources associated with it. This\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/transport_demuxer.go", "new_path": "pkg/tcpip/stack/transport_demuxer.go", "diff": "@@ -182,7 +182,8 @@ func (epsByNIC *endpointsByNIC) handlePacket(id TransportEndpointID, pkt *Packet\nepsByNIC.mu.RUnlock() // Don't use defer for performance reasons.\n}\n-// HandleControlPacket implements stack.TransportEndpoint.HandleControlPacket.\n+// handleControlPacket delivers a control packet to the transport endpoint\n+// identified by id.\nfunc (epsByNIC *endpointsByNIC) handleControlPacket(n *NIC, id TransportEndpointID, typ ControlType, extra uint32, pkt *PacketBuffer) {\nepsByNIC.mu.RLock()\ndefer epsByNIC.mu.RUnlock()\n@@ -199,7 +200,7 @@ func (epsByNIC *endpointsByNIC) handleControlPacket(n *NIC, id TransportEndpoint\n// broadcast like we are doing with handlePacket above?\n// multiPortEndpoints are guaranteed to have at least one element.\n- selectEndpoint(id, mpep, epsByNIC.seed).HandleControlPacket(id, typ, extra, pkt)\n+ selectEndpoint(id, mpep, epsByNIC.seed).HandleControlPacket(typ, extra, pkt)\n}\n// registerEndpoint returns true if it succeeds. It fails and returns\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/transport_test.go", "new_path": "pkg/tcpip/stack/transport_test.go", "diff": "@@ -237,7 +237,7 @@ func (f *fakeTransportEndpoint) HandlePacket(id stack.TransportEndpointID, pkt *\nf.acceptQueue = append(f.acceptQueue, ep)\n}\n-func (f *fakeTransportEndpoint) HandleControlPacket(stack.TransportEndpointID, stack.ControlType, uint32, *stack.PacketBuffer) {\n+func (f *fakeTransportEndpoint) HandleControlPacket(stack.ControlType, uint32, *stack.PacketBuffer) {\n// Increment the number of received control packets.\nf.proto.controlCount++\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/icmp/endpoint.go", "new_path": "pkg/tcpip/transport/icmp/endpoint.go", "diff": "@@ -789,7 +789,7 @@ func (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt *stack.PacketB\n}\n// HandleControlPacket implements stack.TransportEndpoint.HandleControlPacket.\n-func (e *endpoint) HandleControlPacket(id stack.TransportEndpointID, typ stack.ControlType, extra uint32, pkt *stack.PacketBuffer) {\n+func (e *endpoint) HandleControlPacket(typ stack.ControlType, extra uint32, pkt *stack.PacketBuffer) {\n}\n// State implements tcpip.Endpoint.State. The ICMP endpoint currently doesn't\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -2728,7 +2728,7 @@ func (e *endpoint) enqueueSegment(s *segment) bool {\nreturn true\n}\n-func (e *endpoint) onICMPError(err *tcpip.Error, id stack.TransportEndpointID, errType byte, errCode byte, extra uint32, pkt *stack.PacketBuffer) {\n+func (e *endpoint) onICMPError(err *tcpip.Error, errType byte, errCode byte, extra uint32, pkt *stack.PacketBuffer) {\n// Update last error first.\ne.lastErrorMu.Lock()\ne.lastError = err\n@@ -2747,13 +2747,13 @@ func (e *endpoint) onICMPError(err *tcpip.Error, id stack.TransportEndpointID, e\nPayload: pkt.Data.ToView(),\nDst: tcpip.FullAddress{\nNIC: pkt.NICID,\n- Addr: id.RemoteAddress,\n- Port: id.RemotePort,\n+ Addr: e.ID.RemoteAddress,\n+ Port: e.ID.RemotePort,\n},\nOffender: tcpip.FullAddress{\nNIC: pkt.NICID,\n- Addr: id.LocalAddress,\n- Port: id.LocalPort,\n+ Addr: e.ID.LocalAddress,\n+ Port: e.ID.LocalPort,\n},\nNetProto: pkt.NetworkProtocolNumber,\n})\n@@ -2764,7 +2764,7 @@ func (e *endpoint) onICMPError(err *tcpip.Error, id stack.TransportEndpointID, e\n}\n// HandleControlPacket implements stack.TransportEndpoint.HandleControlPacket.\n-func (e *endpoint) HandleControlPacket(id stack.TransportEndpointID, typ stack.ControlType, extra uint32, pkt *stack.PacketBuffer) {\n+func (e *endpoint) HandleControlPacket(typ stack.ControlType, extra uint32, pkt *stack.PacketBuffer) {\nswitch typ {\ncase stack.ControlPacketTooBig:\ne.sndBufMu.Lock()\n@@ -2777,10 +2777,10 @@ func (e *endpoint) HandleControlPacket(id stack.TransportEndpointID, typ stack.C\ne.notifyProtocolGoroutine(notifyMTUChanged)\ncase stack.ControlNoRoute:\n- e.onICMPError(tcpip.ErrNoRoute, id, byte(header.ICMPv4DstUnreachable), byte(header.ICMPv4HostUnreachable), extra, pkt)\n+ e.onICMPError(tcpip.ErrNoRoute, byte(header.ICMPv4DstUnreachable), byte(header.ICMPv4HostUnreachable), extra, pkt)\ncase stack.ControlNetworkUnreachable:\n- e.onICMPError(tcpip.ErrNetworkUnreachable, id, byte(header.ICMPv6DstUnreachable), byte(header.ICMPv6NetworkUnreachable), extra, pkt)\n+ e.onICMPError(tcpip.ErrNetworkUnreachable, byte(header.ICMPv6DstUnreachable), byte(header.ICMPv6NetworkUnreachable), extra, pkt)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/udp/endpoint.go", "new_path": "pkg/tcpip/transport/udp/endpoint.go", "diff": "@@ -1352,7 +1352,7 @@ func (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt *stack.PacketB\n}\n}\n-func (e *endpoint) onICMPError(err *tcpip.Error, id stack.TransportEndpointID, errType byte, errCode byte, extra uint32, pkt *stack.PacketBuffer) {\n+func (e *endpoint) onICMPError(err *tcpip.Error, errType byte, errCode byte, extra uint32, pkt *stack.PacketBuffer) {\n// Update last error first.\ne.lastErrorMu.Lock()\ne.lastError = err\n@@ -1376,13 +1376,13 @@ func (e *endpoint) onICMPError(err *tcpip.Error, id stack.TransportEndpointID, e\nPayload: payload,\nDst: tcpip.FullAddress{\nNIC: pkt.NICID,\n- Addr: id.RemoteAddress,\n- Port: id.RemotePort,\n+ Addr: e.ID.RemoteAddress,\n+ Port: e.ID.RemotePort,\n},\nOffender: tcpip.FullAddress{\nNIC: pkt.NICID,\n- Addr: id.LocalAddress,\n- Port: id.LocalPort,\n+ Addr: e.ID.LocalAddress,\n+ Port: e.ID.LocalPort,\n},\nNetProto: pkt.NetworkProtocolNumber,\n})\n@@ -1393,7 +1393,7 @@ func (e *endpoint) onICMPError(err *tcpip.Error, id stack.TransportEndpointID, e\n}\n// HandleControlPacket implements stack.TransportEndpoint.HandleControlPacket.\n-func (e *endpoint) HandleControlPacket(id stack.TransportEndpointID, typ stack.ControlType, extra uint32, pkt *stack.PacketBuffer) {\n+func (e *endpoint) HandleControlPacket(typ stack.ControlType, extra uint32, pkt *stack.PacketBuffer) {\nif typ == stack.ControlPortUnreachable {\nif e.EndpointState() == StateConnected {\nvar errType byte\n@@ -1408,7 +1408,7 @@ func (e *endpoint) HandleControlPacket(id stack.TransportEndpointID, typ stack.C\ndefault:\npanic(fmt.Sprintf(\"unsupported net proto for infering ICMP type and code: %d\", pkt.NetworkProtocolNumber))\n}\n- e.onICMPError(tcpip.ErrConnectionRefused, id, errType, errCode, extra, pkt)\n+ e.onICMPError(tcpip.ErrConnectionRefused, errType, errCode, extra, pkt)\nreturn\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Drop TransportEndpointID from HandleControlPacket When a control packet is delivered, it is delivered to a transport endpoint with a matching stack.TransportEndpointID so there is no need to pass the ID to the endpoint as it already knows its ID. PiperOrigin-RevId: 351497588
259,858
12.01.2021 22:23:13
28,800
fb95e13df5749082e4be0a2b5c470dd09f1f8554
Don't remove release directory. If the release directory is a parent directory (for reasons), then this causes a few problems (to say the least).
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -433,7 +433,7 @@ $(RELEASE_ARTIFACTS)/%:\n@$(call copy,//debian:debian,$@)\nrelease: $(RELEASE_KEY) $(RELEASE_ARTIFACTS)/$(ARCH)\n- @rm -rf $(RELEASE_ROOT) && mkdir -p $(RELEASE_ROOT)\n+ @mkdir -p $(RELEASE_ROOT)\n@NIGHTLY=$(RELEASE_NIGHTLY) tools/make_release.sh $(RELEASE_KEY) $(RELEASE_ROOT) $$(find $(RELEASE_ARTIFACTS) -type f)\n.PHONY: release\n" } ]
Go
Apache License 2.0
google/gvisor
Don't remove release directory. If the release directory is a parent directory (for reasons), then this causes a few problems (to say the least). PiperOrigin-RevId: 351515882
259,907
13.01.2021 04:23:30
28,800
19ab0f15f3d2069611257d619acf551071a2aedc
[rack] TLP: Recovery detection. This change implements TLP details enumerated in Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/rack.go", "new_path": "pkg/tcpip/transport/tcp/rack.go", "diff": "@@ -67,6 +67,14 @@ type rackControl struct {\n// probeTimer and probeWaker are used to schedule PTO for RACK TLP algorithm.\nprobeTimer timer `state:\"nosave\"`\nprobeWaker sleep.Waker `state:\"nosave\"`\n+\n+ // tlpRxtOut indicates whether there is an unacknowledged\n+ // TLP retransmission.\n+ tlpRxtOut bool\n+\n+ // tlpHighRxt the value of sender.sndNxt at the time of sending\n+ // a TLP retransmission.\n+ tlpHighRxt seqnum.Value\n}\n// init initializes RACK specific fields.\n@@ -203,3 +211,40 @@ func (s *sender) probeTimerExpired() *tcpip.Error {\n// Arm RTO timer only.\nreturn nil\n}\n+\n+// detectTLPRecovery detects if recovery was accomplished by the loss probes\n+// and updates TLP state accordingly.\n+// See https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.6.3.\n+func (s *sender) detectTLPRecovery(ack seqnum.Value, rcvdSeg *segment) {\n+ if !(s.ep.sackPermitted && s.rc.tlpRxtOut) {\n+ return\n+ }\n+\n+ // Step 1.\n+ if s.isDupAck(rcvdSeg) && ack == s.rc.tlpHighRxt {\n+ var sbAboveTLPHighRxt bool\n+ for _, sb := range rcvdSeg.parsedOptions.SACKBlocks {\n+ if s.rc.tlpHighRxt.LessThan(sb.End) {\n+ sbAboveTLPHighRxt = true\n+ break\n+ }\n+ }\n+ if !sbAboveTLPHighRxt {\n+ // TLP episode is complete.\n+ s.rc.tlpRxtOut = false\n+ }\n+ }\n+\n+ if s.rc.tlpRxtOut && s.rc.tlpHighRxt.LessThanEq(ack) {\n+ // TLP episode is complete.\n+ s.rc.tlpRxtOut = false\n+ if !checkDSACK(rcvdSeg) {\n+ // Step 2. Either the original packet or the retransmission (in the\n+ // form of a probe) was lost. Invoke a congestion control response\n+ // equivalent to fast recovery.\n+ s.cc.HandleNDupAcks()\n+ s.enterRecovery()\n+ s.leaveRecovery()\n+ }\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/snd.go", "new_path": "pkg/tcpip/transport/tcp/snd.go", "diff": "@@ -533,6 +533,10 @@ func (s *sender) retransmitTimerExpired() bool {\ns.ep.stack.Stats().TCP.Timeouts.Increment()\ns.ep.stats.SendErrors.Timeouts.Increment()\n+ // Set TLPRxtOut to false according to\n+ // https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.6.1.\n+ s.rc.tlpRxtOut = false\n+\n// Give up if we've waited more than a minute since the last resend or\n// if a user time out is set and we have exceeded the user specified\n// timeout since the first retransmission.\n@@ -1060,6 +1064,9 @@ func (s *sender) enterRecovery() {\nif s.ep.sackPermitted {\ns.state = SACKRecovery\ns.ep.stack.Stats().TCP.SACKRecovery.Increment()\n+ // Set TLPRxtOut to false according to\n+ // https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.6.1.\n+ s.rc.tlpRxtOut = false\nreturn\n}\ns.state = FastRecovery\n@@ -1143,20 +1150,12 @@ func (s *sender) SetPipe() {\n// detected. It manages the state related to duplicate acks and determines if\n// a retransmit is needed according to the rules in RFC 6582 (NewReno).\nfunc (s *sender) detectLoss(seg *segment) (fastRetransmit bool) {\n- ack := seg.ackNumber\n-\n- // We're not in fast recovery yet. A segment is considered a duplicate\n- // only if it doesn't carry any data and doesn't update the send window,\n- // because if it does, it wasn't sent in response to an out-of-order\n- // segment. If SACK is enabled then we have an additional check to see\n- // if the segment carries new SACK information. If it does then it is\n- // considered a duplicate ACK as per RFC6675.\n- if ack != s.sndUna || seg.logicalLen() != 0 || s.sndWnd != seg.window || ack == s.sndNxt {\n- if !s.ep.sackPermitted || !seg.hasNewSACKInfo {\n+ // We're not in fast recovery yet.\n+\n+ if !s.isDupAck(seg) {\ns.dupAckCount = 0\nreturn false\n}\n- }\ns.dupAckCount++\n@@ -1186,6 +1185,31 @@ func (s *sender) detectLoss(seg *segment) (fastRetransmit bool) {\nreturn true\n}\n+// isDupAck determines if seg is a duplicate ack as defined in\n+// https://tools.ietf.org/html/rfc5681#section-2.\n+func (s *sender) isDupAck(seg *segment) bool {\n+ // A TCP that utilizes selective acknowledgments (SACKs) [RFC2018, RFC2883]\n+ // can leverage the SACK information to determine when an incoming ACK is a\n+ // \"duplicate\" (e.g., if the ACK contains previously unknown SACK\n+ // information).\n+ if s.ep.sackPermitted && !seg.hasNewSACKInfo {\n+ return false\n+ }\n+\n+ // (a) The receiver of the ACK has outstanding data.\n+ return s.sndUna != s.sndNxt &&\n+ // (b) The incoming acknowledgment carries no data.\n+ seg.logicalLen() == 0 &&\n+ // (c) The SYN and FIN bits are both off.\n+ !seg.flagIsSet(header.TCPFlagFin) && !seg.flagIsSet(header.TCPFlagSyn) &&\n+ // (d) the ACK number is equal to the greatest acknowledgment received on\n+ // the given connection (TCP.UNA from RFC793).\n+ seg.ackNumber == s.sndUna &&\n+ // (e) the advertised window in the incoming acknowledgment equals the\n+ // advertised window in the last incoming acknowledgment.\n+ s.sndWnd == seg.window\n+}\n+\n// Iterate the writeList and update RACK for each segment which is newly acked\n// either cumulatively or selectively. Loop through the segments which are\n// sacked, and update the RACK related variables and check for reordering.\n@@ -1196,7 +1220,7 @@ func (s *sender) walkSACK(rcvdSeg *segment) {\n// Look for DSACK block.\nidx := 0\nn := len(rcvdSeg.parsedOptions.SACKBlocks)\n- if s.checkDSACK(rcvdSeg) {\n+ if checkDSACK(rcvdSeg) {\ns.rc.setDSACKSeen()\nidx = 1\nn--\n@@ -1228,8 +1252,8 @@ func (s *sender) walkSACK(rcvdSeg *segment) {\n}\n}\n-// checkDSACK checks if a DSACK is reported and updates it in RACK.\n-func (s *sender) checkDSACK(rcvdSeg *segment) bool {\n+// checkDSACK checks if a DSACK is reported.\n+func checkDSACK(rcvdSeg *segment) bool {\nn := len(rcvdSeg.parsedOptions.SACKBlocks)\nif n == 0 {\nreturn false\n@@ -1338,6 +1362,9 @@ func (s *sender) handleRcvdSegment(rcvdSeg *segment) {\nfastRetransmit = s.detectLoss(rcvdSeg)\n}\n+ // See if TLP based recovery was successful.\n+ s.detectTLPRecovery(ack, rcvdSeg)\n+\n// Stash away the current window size.\ns.sndWnd = rcvdSeg.window\n" } ]
Go
Apache License 2.0
google/gvisor
[rack] TLP: Recovery detection. This change implements TLP details enumerated in https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.6 Fixes #5131 PiperOrigin-RevId: 351558449
259,951
13.01.2021 10:19:35
28,800
0f25aa24f9f0502bfb1fbead37bbbc2a012dfd64
Clean up the dummy network interface used by UDP tests It is now composed by a NetworkInterface interface which lets us delete the methods we don't need.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/udp/udp_test.go", "new_path": "pkg/tcpip/transport/udp/udp_test.go", "diff": "@@ -1560,33 +1560,17 @@ func TestNoChecksum(t *testing.T) {\nvar _ stack.NetworkInterface = (*testInterface)(nil)\ntype testInterface struct {\n- stack.NetworkLinkEndpoint\n+ stack.NetworkInterface\n}\nfunc (*testInterface) ID() tcpip.NICID {\nreturn 0\n}\n-func (*testInterface) IsLoopback() bool {\n- return false\n-}\n-\n-func (*testInterface) Name() string {\n- return \"\"\n-}\n-\nfunc (*testInterface) Enabled() bool {\nreturn true\n}\n-func (*testInterface) Promiscuous() bool {\n- return false\n-}\n-\n-func (*testInterface) WritePacketToRemote(tcpip.LinkAddress, *stack.GSO, tcpip.NetworkProtocolNumber, *stack.PacketBuffer) *tcpip.Error {\n- return tcpip.ErrNotSupported\n-}\n-\nfunc TestTTL(t *testing.T) {\nfor _, flow := range []testFlow{unicastV4, unicastV4in6, unicastV6, unicastV6Only, multicastV4, multicastV4in6, multicastV6, broadcast, broadcastIn6} {\nt.Run(fmt.Sprintf(\"flow:%s\", flow), func(t *testing.T) {\n" } ]
Go
Apache License 2.0
google/gvisor
Clean up the dummy network interface used by UDP tests It is now composed by a NetworkInterface interface which lets us delete the methods we don't need. PiperOrigin-RevId: 351613267
259,858
13.01.2021 11:46:25
28,800
a684bfb6c035ac6f283649b007176f8bac713acd
Split container tests from unit tests.
[ { "change_type": "MODIFY", "old_path": ".buildkite/pipeline.yaml", "new_path": ".buildkite/pipeline.yaml", "diff": "@@ -49,6 +49,9 @@ steps:\n- <<: *common\nlabel: \":test_tube: Unit tests\"\ncommand: make unit-tests\n+ - <<: *common\n+ label: \":test_tube: runsc tests\"\n+ command: make runsc-tests\n# All system call tests.\n- <<: *common\n" }, { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -186,12 +186,16 @@ fuse-tests:\n@$(call test,--test_tag_filters=fuse $(PARTITIONS) test/fuse/...)\n.PHONY: fuse-tests\n-unit-tests: ## Local package unit tests in pkg/..., runsc/, tools/.., etc.\n- @$(call test,//:all pkg/... runsc/... tools/...)\n+unit-tests: ## Local package unit tests in pkg/..., tools/.., etc.\n+ @$(call test,//:all pkg/... tools/...)\n.PHONY: unit-tests\n+runsc-tests: ## Run all tests in runsc/...\n+ @$(call test,runsc/...)\n+.PHONY: runsc-tests\n+\ntests: ## Runs all unit tests and syscall tests.\n-tests: unit-tests syscall-tests\n+tests: unit-tests runsc-tests syscall-tests\n.PHONY: tests\nintegration-tests: ## Run all standard integration tests.\n" } ]
Go
Apache License 2.0
google/gvisor
Split container tests from unit tests. PiperOrigin-RevId: 351632484
260,020
10.01.2021 22:52:15
28,800
37855aff121a7cf9deae0405910a62518e806a9e
Add support for pause/restore in containerd shim
[ { "change_type": "MODIFY", "old_path": "pkg/shim/runsc/runsc.go", "new_path": "pkg/shim/runsc/runsc.go", "diff": "@@ -167,6 +167,20 @@ func (r *Runsc) Create(context context.Context, id, bundle string, opts *CreateO\nreturn err\n}\n+func (r *Runsc) Pause(context context.Context, id string) error {\n+ if _, err := cmdOutput(r.command(context, \"pause\", id), true); err != nil {\n+ return fmt.Errorf(\"unable to pause: %s\", err)\n+ }\n+ return nil\n+}\n+\n+func (r *Runsc) Resume(context context.Context, id string) error {\n+ if _, err := cmdOutput(r.command(context, \"pause\", id), true); err != nil {\n+ return fmt.Errorf(\"unable to resume: %s\", err)\n+ }\n+ return nil\n+}\n+\n// Start will start an already created container.\nfunc (r *Runsc) Start(context context.Context, id string, cio runc.IO) error {\ncmd := r.command(context, \"start\", id)\n" }, { "change_type": "MODIFY", "old_path": "pkg/shim/service.go", "new_path": "pkg/shim/service.go", "diff": "@@ -612,13 +612,15 @@ func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (*taskAPI.\n// Pause the container.\nfunc (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (*types.Empty, error) {\nlog.L.Debugf(\"Pause, id: %s\", r.ID)\n- return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented)\n+ err := s.task.Runtime().Pause(ctx, r.ID)\n+ return empty, err\n}\n// Resume the container.\nfunc (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (*types.Empty, error) {\nlog.L.Debugf(\"Resume, id: %s\", r.ID)\n- return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented)\n+ err := s.task.Runtime().Resume(ctx, r.ID)\n+ return empty, err\n}\n// Kill a process with the provided signal.\n" } ]
Go
Apache License 2.0
google/gvisor
Add support for pause/restore in containerd shim
259,868
13.01.2021 15:07:24
28,800
f34aaf7ef17aa10c7ba1923d0694347e47634192
testutil: Create a `multiLogger` that logs to multiple `Loggers`. This is useful when using the shell library in order to log to both the test log and the standard logs.
[ { "change_type": "MODIFY", "old_path": "pkg/test/testutil/testutil.go", "new_path": "pkg/test/testutil/testutil.go", "diff": "@@ -111,6 +111,30 @@ func (d DefaultLogger) Logf(fmt string, args ...interface{}) {\nlog.Printf(fmt, args...)\n}\n+// multiLogger logs to multiple Loggers.\n+type multiLogger []Logger\n+\n+// Name implements Logger.Name.\n+func (m multiLogger) Name() string {\n+ names := make([]string, len(m))\n+ for i, l := range m {\n+ names[i] = l.Name()\n+ }\n+ return strings.Join(names, \"+\")\n+}\n+\n+// Logf implements Logger.Logf.\n+func (m multiLogger) Logf(fmt string, args ...interface{}) {\n+ for _, l := range m {\n+ l.Logf(fmt, args...)\n+ }\n+}\n+\n+// NewMultiLogger returns a new Logger that logs on multiple Loggers.\n+func NewMultiLogger(loggers ...Logger) Logger {\n+ return multiLogger(loggers)\n+}\n+\n// Cmd is a simple wrapper.\ntype Cmd struct {\nlogger Logger\n" } ]
Go
Apache License 2.0
google/gvisor
testutil: Create a `multiLogger` that logs to multiple `Loggers`. This is useful when using the shell library in order to log to both the test log and the standard logs. PiperOrigin-RevId: 351673465
259,860
13.01.2021 15:10:03
28,800
1efe0ebc5973ec8a06b881c087dae2183898504b
Switch uses of os.Getenv that check for empty string to os.LookupEnv. Whether the variable was found is already returned by syscall.Getenv. os.Getenv drops this value while os.Lookupenv passes it along.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/sharedmem/sharedmem_test.go", "new_path": "pkg/tcpip/link/sharedmem/sharedmem_test.go", "diff": "@@ -191,8 +191,8 @@ func shuffle(b []int) {\n}\nfunc createFile(t *testing.T, size int64, initQueue bool) int {\n- tmpDir := os.Getenv(\"TEST_TMPDIR\")\n- if tmpDir == \"\" {\n+ tmpDir, ok := os.LookupEnv(\"TEST_TMPDIR\")\n+ if !ok {\ntmpDir = os.Getenv(\"TMPDIR\")\n}\nf, err := ioutil.TempFile(tmpDir, \"sharedmem_test\")\n" }, { "change_type": "MODIFY", "old_path": "pkg/test/testutil/testutil.go", "new_path": "pkg/test/testutil/testutil.go", "diff": "@@ -83,12 +83,11 @@ func ConfigureExePath() error {\n// TmpDir returns the absolute path to a writable directory that can be used as\n// scratch by the test.\nfunc TmpDir() string {\n- dir := os.Getenv(\"TEST_TMPDIR\")\n- if dir == \"\" {\n- dir = \"/tmp\"\n- }\n+ if dir, ok := os.LookupEnv(\"TEST_TMPDIR\"); ok {\nreturn dir\n}\n+ return \"/tmp\"\n+}\n// Logger is a simple logging wrapper.\n//\n@@ -543,7 +542,7 @@ func IsStatic(filename string) (bool, error) {\n//\n// See https://docs.bazel.build/versions/master/test-encyclopedia.html#role-of-the-test-runner.\nfunc TouchShardStatusFile() error {\n- if statusFile := os.Getenv(\"TEST_SHARD_STATUS_FILE\"); statusFile != \"\" {\n+ if statusFile, ok := os.LookupEnv(\"TEST_SHARD_STATUS_FILE\"); ok {\ncmd := exec.Command(\"touch\", statusFile)\nif b, err := cmd.CombinedOutput(); err != nil {\nreturn fmt.Errorf(\"touch %q failed:\\n output: %s\\n error: %s\", statusFile, string(b), err.Error())\n@@ -565,8 +564,9 @@ func TestIndicesForShard(numTests int) ([]int, error) {\nshardTotal = 1\n)\n- indexStr, totalStr := os.Getenv(\"TEST_SHARD_INDEX\"), os.Getenv(\"TEST_TOTAL_SHARDS\")\n- if indexStr != \"\" && totalStr != \"\" {\n+ indexStr, indexOk := os.LookupEnv(\"TEST_SHARD_INDEX\")\n+ totalStr, totalOk := os.LookupEnv(\"TEST_TOTAL_SHARDS\")\n+ if indexOk && totalOk {\n// Parse index and total to ints.\nvar err error\nshardIndex, err = strconv.Atoi(indexStr)\n" }, { "change_type": "MODIFY", "old_path": "runsc/cmd/gofer_test.go", "new_path": "runsc/cmd/gofer_test.go", "diff": "@@ -24,12 +24,11 @@ import (\n)\nfunc tmpDir() string {\n- dir := os.Getenv(\"TEST_TMPDIR\")\n- if dir == \"\" {\n- dir = \"/tmp\"\n- }\n+ if dir, ok := os.LookupEnv(\"TEST_TMPDIR\"); ok {\nreturn dir\n}\n+ return \"/tmp\"\n+}\ntype dir struct {\nrel string\n" }, { "change_type": "MODIFY", "old_path": "runsc/config/flags.go", "new_path": "runsc/config/flags.go", "diff": "@@ -114,7 +114,7 @@ func NewFromFlags() (*Config, error) {\nif len(conf.RootDir) == 0 {\n// If not set, set default root dir to something (hopefully) user-writeable.\nconf.RootDir = \"/var/run/runsc\"\n- if runtimeDir := os.Getenv(\"XDG_RUNTIME_DIR\"); runtimeDir != \"\" {\n+ if runtimeDir, ok := os.LookupEnv(\"XDG_RUNTIME_DIR\"); ok {\nconf.RootDir = filepath.Join(runtimeDir, \"runsc\")\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "test/root/crictl_test.go", "new_path": "test/root/crictl_test.go", "diff": "@@ -353,8 +353,8 @@ func setup(t *testing.T) (*criutil.Crictl, func(), error) {\n// because the shims will be installed there, and containerd may infer\n// the binary name and search the PATH.\nruntimeDir := path.Dir(runtime)\n- modifiedPath := os.Getenv(\"PATH\")\n- if modifiedPath != \"\" {\n+ modifiedPath, ok := os.LookupEnv(\"PATH\")\n+ if ok {\nmodifiedPath = \":\" + modifiedPath // We prepend below.\n}\nmodifiedPath = path.Dir(getContainerd()) + modifiedPath\n" }, { "change_type": "MODIFY", "old_path": "website/cmd/server/main.go", "new_path": "website/cmd/server/main.go", "diff": "@@ -366,7 +366,7 @@ func registerProfile(mux *http.ServeMux) {\n}\nfunc envFlagString(name, def string) string {\n- if val := os.Getenv(name); val != \"\" {\n+ if val, ok := os.LookupEnv(name); ok {\nreturn val\n}\nreturn def\n" } ]
Go
Apache License 2.0
google/gvisor
Switch uses of os.Getenv that check for empty string to os.LookupEnv. Whether the variable was found is already returned by syscall.Getenv. os.Getenv drops this value while os.Lookupenv passes it along. PiperOrigin-RevId: 351674032
259,992
14.01.2021 13:41:25
28,800
dbe4176565b56d9e2f5395e410468a4c98aafd37
Check for existence before permissions Return EEXIST when overwritting a file as long as the caller has exec permission on the parent directory, even if the caller doesn't have write permission. Also reordered the mount write check, which happens before permission is checked. Closes
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "new_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "diff": "@@ -407,33 +407,44 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir\nif err != nil {\nreturn err\n}\n- if err := parent.checkPermissions(rp.Credentials(), vfs.MayWrite|vfs.MayExec); err != nil {\n+\n+ // Order of checks is important. First check if parent directory can be\n+ // executed, then check for existence, and lastly check if mount is writable.\n+ if err := parent.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {\nreturn err\n}\nname := rp.Component()\nif name == \".\" || name == \"..\" {\nreturn syserror.EEXIST\n}\n- if len(name) > maxFilenameLen {\n- return syserror.ENAMETOOLONG\n- }\nif parent.isDeleted() {\nreturn syserror.ENOENT\n}\n+\n+ parent.dirMu.Lock()\n+ defer parent.dirMu.Unlock()\n+\n+ child, err := fs.getChildLocked(ctx, rp.VirtualFilesystem(), parent, name, &ds)\n+ switch {\n+ case err != nil && err != syserror.ENOENT:\n+ return err\n+ case child != nil:\n+ return syserror.EEXIST\n+ }\n+\nmnt := rp.Mount()\nif err := mnt.CheckBeginWrite(); err != nil {\nreturn err\n}\ndefer mnt.EndWrite()\n- parent.dirMu.Lock()\n- defer parent.dirMu.Unlock()\n- if parent.isSynthetic() {\n- if child := parent.children[name]; child != nil {\n- return syserror.EEXIST\n+\n+ if err := parent.checkPermissions(rp.Credentials(), vfs.MayWrite); err != nil {\n+ return err\n}\nif !dir && rp.MustBeDir() {\nreturn syserror.ENOENT\n}\n+ if parent.isSynthetic() {\nif createInSyntheticDir == nil {\nreturn syserror.EPERM\n}\n@@ -449,47 +460,20 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir\nparent.watches.Notify(ctx, name, uint32(ev), 0, vfs.InodeEvent, false /* unlinked */)\nreturn nil\n}\n- if fs.opts.interop == InteropModeShared {\n- if child := parent.children[name]; child != nil && child.isSynthetic() {\n- return syserror.EEXIST\n- }\n- if !dir && rp.MustBeDir() {\n- return syserror.ENOENT\n- }\n- // The existence of a non-synthetic dentry at name would be inconclusive\n- // because the file it represents may have been deleted from the remote\n- // filesystem, so we would need to make an RPC to revalidate the dentry.\n- // Just attempt the file creation RPC instead. If a file does exist, the\n- // RPC will fail with EEXIST like we would have. If the RPC succeeds, and a\n- // stale dentry exists, the dentry will fail revalidation next time it's\n- // used.\n- if err := createInRemoteDir(parent, name, &ds); err != nil {\n- return err\n- }\n- ev := linux.IN_CREATE\n- if dir {\n- ev |= linux.IN_ISDIR\n- }\n- parent.watches.Notify(ctx, name, uint32(ev), 0, vfs.InodeEvent, false /* unlinked */)\n- return nil\n- }\n- if child := parent.children[name]; child != nil {\n- return syserror.EEXIST\n- }\n- if !dir && rp.MustBeDir() {\n- return syserror.ENOENT\n- }\n- // No cached dentry exists; however, there might still be an existing file\n- // at name. As above, we attempt the file creation RPC anyway.\n+ // No cached dentry exists; however, in InteropModeShared there might still be\n+ // an existing file at name. Just attempt the file creation RPC anyways. If a\n+ // file does exist, the RPC will fail with EEXIST like we would have.\nif err := createInRemoteDir(parent, name, &ds); err != nil {\nreturn err\n}\n+ if fs.opts.interop != InteropModeShared {\nif child, ok := parent.children[name]; ok && child == nil {\n// Delete the now-stale negative dentry.\ndelete(parent.children, name)\n}\nparent.touchCMtime()\nparent.dirents = nil\n+ }\nev := linux.IN_CREATE\nif dir {\nev |= linux.IN_ISDIR\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/kernfs/filesystem.go", "new_path": "pkg/sentry/fsimpl/kernfs/filesystem.go", "diff": "@@ -208,7 +208,9 @@ func (fs *Filesystem) walkParentDirLocked(ctx context.Context, rp *vfs.Resolving\n// * Filesystem.mu must be locked for at least reading.\n// * isDir(parentInode) == true.\nfunc checkCreateLocked(ctx context.Context, creds *auth.Credentials, name string, parent *Dentry) error {\n- if err := parent.inode.CheckPermissions(ctx, creds, vfs.MayWrite|vfs.MayExec); err != nil {\n+ // Order of checks is important. First check if parent directory can be\n+ // executed, then check for existence, and lastly check if mount is writable.\n+ if err := parent.inode.CheckPermissions(ctx, creds, vfs.MayExec); err != nil {\nreturn err\n}\nif name == \".\" || name == \"..\" {\n@@ -223,6 +225,9 @@ func checkCreateLocked(ctx context.Context, creds *auth.Credentials, name string\nif parent.VFSDentry().IsDead() {\nreturn syserror.ENOENT\n}\n+ if err := parent.inode.CheckPermissions(ctx, creds, vfs.MayWrite); err != nil {\n+ return err\n+ }\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/overlay/filesystem.go", "new_path": "pkg/sentry/fsimpl/overlay/filesystem.go", "diff": "@@ -480,9 +480,6 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir\nif err != nil {\nreturn err\n}\n- if err := parent.checkPermissions(rp.Credentials(), vfs.MayWrite|vfs.MayExec); err != nil {\n- return err\n- }\nname := rp.Component()\nif name == \".\" || name == \"..\" {\nreturn syserror.EEXIST\n@@ -490,11 +487,11 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir\nif parent.vfsd.IsDead() {\nreturn syserror.ENOENT\n}\n- mnt := rp.Mount()\n- if err := mnt.CheckBeginWrite(); err != nil {\n+\n+ if err := parent.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {\nreturn err\n}\n- defer mnt.EndWrite()\n+\nparent.dirMu.Lock()\ndefer parent.dirMu.Unlock()\n@@ -514,6 +511,14 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir\nreturn syserror.ENOENT\n}\n+ mnt := rp.Mount()\n+ if err := mnt.CheckBeginWrite(); err != nil {\n+ return err\n+ }\n+ defer mnt.EndWrite()\n+ if err := parent.checkPermissions(rp.Credentials(), vfs.MayWrite|vfs.MayExec); err != nil {\n+ return err\n+ }\n// Ensure that the parent directory is copied-up so that we can create the\n// new file in the upper layer.\nif err := parent.copyUpLocked(ctx); err != nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "new_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "diff": "@@ -153,7 +153,10 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir\nif err != nil {\nreturn err\n}\n- if err := parentDir.inode.checkPermissions(rp.Credentials(), vfs.MayWrite|vfs.MayExec); err != nil {\n+\n+ // Order of checks is important. First check if parent directory can be\n+ // executed, then check for existence, and lastly check if mount is writable.\n+ if err := parentDir.inode.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {\nreturn err\n}\nname := rp.Component()\n@@ -179,6 +182,10 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir\nreturn err\n}\ndefer mnt.EndWrite()\n+\n+ if err := parentDir.inode.checkPermissions(rp.Credentials(), vfs.MayWrite); err != nil {\n+ return err\n+ }\nif err := create(parentDir, name); err != nil {\nreturn err\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/mkdir.cc", "new_path": "test/syscalls/linux/mkdir.cc", "diff": "@@ -82,6 +82,39 @@ TEST_F(MkdirTest, FailsOnDirWithoutWritePerms) {\nSyscallFailsWithErrno(EACCES));\n}\n+TEST_F(MkdirTest, DirAlreadyExists) {\n+ // Drop capabilities that allow us to override file and directory permissions.\n+ ASSERT_NO_ERRNO(SetCapability(CAP_DAC_OVERRIDE, false));\n+ ASSERT_NO_ERRNO(SetCapability(CAP_DAC_READ_SEARCH, false));\n+\n+ ASSERT_THAT(mkdir(dirname_.c_str(), 0777), SyscallSucceeds());\n+ auto dir = JoinPath(dirname_.c_str(), \"foo\");\n+ EXPECT_THAT(mkdir(dir.c_str(), 0777), SyscallSucceeds());\n+\n+ struct {\n+ int mode;\n+ int err;\n+ } tests[] = {\n+ {.mode = 0000, .err = EACCES}, // No perm\n+ {.mode = 0100, .err = EEXIST}, // Exec only\n+ {.mode = 0200, .err = EACCES}, // Write only\n+ {.mode = 0300, .err = EEXIST}, // Write+exec\n+ {.mode = 0400, .err = EACCES}, // Read only\n+ {.mode = 0500, .err = EEXIST}, // Read+exec\n+ {.mode = 0600, .err = EACCES}, // Read+write\n+ {.mode = 0700, .err = EEXIST}, // All\n+ };\n+ for (const auto& t : tests) {\n+ printf(\"mode: 0%o\\n\", t.mode);\n+ EXPECT_THAT(chmod(dirname_.c_str(), t.mode), SyscallSucceeds());\n+ EXPECT_THAT(mkdir(dir.c_str(), 0777), SyscallFailsWithErrno(t.err));\n+ }\n+\n+ // Clean up.\n+ EXPECT_THAT(chmod(dirname_.c_str(), 0777), SyscallSucceeds());\n+ ASSERT_THAT(rmdir(dir.c_str()), SyscallSucceeds());\n+}\n+\nTEST_F(MkdirTest, MkdirAtEmptyPath) {\nASSERT_THAT(mkdir(dirname_.c_str(), 0777), SyscallSucceeds());\nauto fd =\n" } ]
Go
Apache License 2.0
google/gvisor
Check for existence before permissions Return EEXIST when overwritting a file as long as the caller has exec permission on the parent directory, even if the caller doesn't have write permission. Also reordered the mount write check, which happens before permission is checked. Closes #5164 PiperOrigin-RevId: 351868123
259,951
14.01.2021 15:14:11
28,800
833516c139b5fde1b23abab1868798c8309eaa6b
Add stats for ARP Fixes Startblock: has LGTM from sbalana and then add reviewer ghanan
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netstack/netstack.go", "new_path": "pkg/sentry/socket/netstack/netstack.go", "diff": "@@ -186,6 +186,21 @@ var Metrics = tcpip.Stats{\nIPTablesInputDropped: mustCreateMetric(\"/netstack/ip/iptables/input_dropped\", \"Total number of IP packets dropped in the Input chain.\"),\nIPTablesOutputDropped: mustCreateMetric(\"/netstack/ip/iptables/output_dropped\", \"Total number of IP packets dropped in the Output chain.\"),\n},\n+ ARP: tcpip.ARPStats{\n+ PacketsReceived: mustCreateMetric(\"/netstack/arp/packets_received\", \"Number of ARP packets received from the link layer.\"),\n+ DisabledPacketsReceived: mustCreateMetric(\"/netstack/arp/disabled_packets_received\", \"Number of ARP packets received from the link layer when the ARP layer is disabled.\"),\n+ MalformedPacketsReceived: mustCreateMetric(\"/netstack/arp/malformed_packets_received\", \"Number of ARP packets which failed ARP header validation checks.\"),\n+ RequestsReceived: mustCreateMetric(\"/netstack/arp/requests_received\", \"Number of ARP requests received.\"),\n+ RequestsReceivedUnknownTargetAddress: mustCreateMetric(\"/netstack/arp/requests_received_unknown_addr\", \"Number of ARP requests received with an unknown target address.\"),\n+ OutgoingRequestInterfaceHasNoLocalAddressErrors: mustCreateMetric(\"/netstack/arp/outgoing_requests_iface_has_no_addr\", \"Number of failed attempts to send an ARP request with an interface that has no network address.\"),\n+ OutgoingRequestBadLocalAddressErrors: mustCreateMetric(\"/netstack/arp/outgoing_requests_invalid_local_addr\", \"Number of failed attempts to send an ARP request with a provided local address that is invalid.\"),\n+ OutgoingRequestNetworkUnreachableErrors: mustCreateMetric(\"/netstack/arp/outgoing_requests_network_unreachable\", \"Number of failed attempts to send an ARP request with a network unreachable error.\"),\n+ OutgoingRequestsDropped: mustCreateMetric(\"/netstack/arp/outgoing_requests_dropped\", \"Number of ARP requests which failed to write to a link-layer endpoint.\"),\n+ OutgoingRequestsSent: mustCreateMetric(\"/netstack/arp/outgoing_requests_sent\", \"Number of ARP requests sent.\"),\n+ RepliesReceived: mustCreateMetric(\"/netstack/arp/replies_received\", \"Number of ARP replies received.\"),\n+ OutgoingRepliesDropped: mustCreateMetric(\"/netstack/arp/outgoing_replies_dropped\", \"Number of ARP replies which failed to write to a link-layer endpoint.\"),\n+ OutgoingRepliesSent: mustCreateMetric(\"/netstack/arp/outgoing_replies_sent\", \"Number of ARP replies sent.\"),\n+ },\nTCP: tcpip.TCPStats{\nActiveConnectionOpenings: mustCreateMetric(\"/netstack/tcp/active_connection_openings\", \"Number of connections opened successfully via Connect.\"),\nPassiveConnectionOpenings: mustCreateMetric(\"/netstack/tcp/passive_connection_openings\", \"Number of connections opened successfully via Listen.\"),\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/arp/arp.go", "new_path": "pkg/tcpip/network/arp/arp.go", "diff": "@@ -119,21 +119,28 @@ func (*endpoint) WriteHeaderIncludedPacket(*stack.Route, *stack.PacketBuffer) *t\n}\nfunc (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {\n+ stats := e.protocol.stack.Stats().ARP\n+ stats.PacketsReceived.Increment()\n+\nif !e.isEnabled() {\n+ stats.DisabledPacketsReceived.Increment()\nreturn\n}\nh := header.ARP(pkt.NetworkHeader().View())\nif !h.IsValid() {\n+ stats.MalformedPacketsReceived.Increment()\nreturn\n}\nswitch h.Op() {\ncase header.ARPRequest:\n+ stats.RequestsReceived.Increment()\nlocalAddr := tcpip.Address(h.ProtocolAddressTarget())\nif e.nud == nil {\nif e.linkAddrCache.CheckLocalAddress(e.nic.ID(), header.IPv4ProtocolNumber, localAddr) == 0 {\n+ stats.RequestsReceivedUnknownTargetAddress.Increment()\nreturn // we have no useful answer, ignore the request\n}\n@@ -142,6 +149,7 @@ func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {\ne.linkAddrCache.AddLinkAddress(e.nic.ID(), addr, linkAddr)\n} else {\nif e.protocol.stack.CheckLocalAddress(e.nic.ID(), header.IPv4ProtocolNumber, localAddr) == 0 {\n+ stats.RequestsReceivedUnknownTargetAddress.Increment()\nreturn // we have no useful answer, ignore the request\n}\n@@ -177,9 +185,14 @@ func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {\n//\n// Send the packet to the (new) target hardware address on the same\n// hardware on which the request was received.\n- _ = e.nic.WritePacketToRemote(tcpip.LinkAddress(origSender), nil /* gso */, ProtocolNumber, respPkt)\n+ if err := e.nic.WritePacketToRemote(tcpip.LinkAddress(origSender), nil /* gso */, ProtocolNumber, respPkt); err != nil {\n+ stats.OutgoingRepliesDropped.Increment()\n+ } else {\n+ stats.OutgoingRepliesSent.Increment()\n+ }\ncase header.ARPReply:\n+ stats.RepliesReceived.Increment()\naddr := tcpip.Address(h.ProtocolAddressSender())\nlinkAddr := tcpip.LinkAddress(h.HardwareAddressSender())\n@@ -233,6 +246,8 @@ func (*protocol) LinkAddressProtocol() tcpip.NetworkProtocolNumber {\n// LinkAddressRequest implements stack.LinkAddressResolver.LinkAddressRequest.\nfunc (p *protocol) LinkAddressRequest(targetAddr, localAddr tcpip.Address, remoteLinkAddr tcpip.LinkAddress, nic stack.NetworkInterface) *tcpip.Error {\n+ stats := p.stack.Stats().ARP\n+\nif len(remoteLinkAddr) == 0 {\nremoteLinkAddr = header.EthernetBroadcastAddress\n}\n@@ -241,15 +256,18 @@ func (p *protocol) LinkAddressRequest(targetAddr, localAddr tcpip.Address, remot\nif len(localAddr) == 0 {\naddr, err := p.stack.GetMainNICAddress(nicID, header.IPv4ProtocolNumber)\nif err != nil {\n+ stats.OutgoingRequestInterfaceHasNoLocalAddressErrors.Increment()\nreturn err\n}\nif len(addr.Address) == 0 {\n+ stats.OutgoingRequestNetworkUnreachableErrors.Increment()\nreturn tcpip.ErrNetworkUnreachable\n}\nlocalAddr = addr.Address\n} else if p.stack.CheckLocalAddress(nicID, header.IPv4ProtocolNumber, localAddr) == 0 {\n+ stats.OutgoingRequestBadLocalAddressErrors.Increment()\nreturn tcpip.ErrBadLocalAddress\n}\n@@ -269,7 +287,12 @@ func (p *protocol) LinkAddressRequest(targetAddr, localAddr tcpip.Address, remot\nif n := copy(h.ProtocolAddressTarget(), targetAddr); n != header.IPv4AddressSize {\npanic(fmt.Sprintf(\"copied %d bytes, expected %d bytes\", n, header.IPv4AddressSize))\n}\n- return nic.WritePacketToRemote(remoteLinkAddr, nil /* gso */, ProtocolNumber, pkt)\n+ if err := nic.WritePacketToRemote(remoteLinkAddr, nil /* gso */, ProtocolNumber, pkt); err != nil {\n+ stats.OutgoingRequestsDropped.Increment()\n+ return err\n+ }\n+ stats.OutgoingRequestsSent.Increment()\n+ return nil\n}\n// ResolveStaticAddress implements stack.LinkAddressResolver.ResolveStaticAddress.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/arp/arp_test.go", "new_path": "pkg/tcpip/network/arp/arp_test.go", "diff": "@@ -240,6 +240,10 @@ func TestDirectRequest(t *testing.T) {\nfor i, address := range []tcpip.Address{stackAddr, remoteAddr} {\nt.Run(strconv.Itoa(i), func(t *testing.T) {\n+ expectedPacketsReceived := c.s.Stats().ARP.PacketsReceived.Value() + 1\n+ expectedRequestsReceived := c.s.Stats().ARP.RequestsReceived.Value() + 1\n+ expectedRepliesSent := c.s.Stats().ARP.OutgoingRepliesSent.Value() + 1\n+\ninject(address)\npi, _ := c.linkEP.ReadContext(context.Background())\nif pi.Proto != arp.ProtocolNumber {\n@@ -249,6 +253,9 @@ func TestDirectRequest(t *testing.T) {\nif !rep.IsValid() {\nt.Fatalf(\"invalid ARP response: len = %d; response = %x\", len(rep), rep)\n}\n+ if got := rep.Op(); got != header.ARPReply {\n+ t.Fatalf(\"got Op = %d, want = %d\", got, header.ARPReply)\n+ }\nif got, want := tcpip.LinkAddress(rep.HardwareAddressSender()), stackLinkAddr; got != want {\nt.Errorf(\"got HardwareAddressSender = %s, want = %s\", got, want)\n}\n@@ -261,6 +268,16 @@ func TestDirectRequest(t *testing.T) {\nif got, want := tcpip.Address(rep.ProtocolAddressTarget()), tcpip.Address(h.ProtocolAddressSender()); got != want {\nt.Errorf(\"got ProtocolAddressTarget = %s, want = %s\", got, want)\n}\n+\n+ if got := c.s.Stats().ARP.PacketsReceived.Value(); got != expectedPacketsReceived {\n+ t.Errorf(\"got c.s.Stats().ARP.PacketsReceived.Value() = %d, want = %d\", got, expectedPacketsReceived)\n+ }\n+ if got := c.s.Stats().ARP.RequestsReceived.Value(); got != expectedRequestsReceived {\n+ t.Errorf(\"got c.s.Stats().ARP.PacketsReceived.Value() = %d, want = %d\", got, expectedRequestsReceived)\n+ }\n+ if got := c.s.Stats().ARP.OutgoingRepliesSent.Value(); got != expectedRepliesSent {\n+ t.Errorf(\"got c.s.Stats().ARP.OutgoingRepliesSent.Value() = %d, want = %d\", got, expectedRepliesSent)\n+ }\n})\n}\n@@ -273,6 +290,84 @@ func TestDirectRequest(t *testing.T) {\nif pkt, ok := c.linkEP.ReadContext(ctx); ok {\nt.Errorf(\"stackAddrBad: unexpected packet sent, Proto=%v\", pkt.Proto)\n}\n+ if got := c.s.Stats().ARP.RequestsReceivedUnknownTargetAddress.Value(); got != 1 {\n+ t.Errorf(\"got c.s.Stats().ARP.RequestsReceivedUnKnownTargetAddress.Value() = %d, want = 1\", got)\n+ }\n+}\n+\n+func TestMalformedPacket(t *testing.T) {\n+ c := newTestContext(t, false)\n+ defer c.cleanup()\n+\n+ v := make(buffer.View, header.ARPSize)\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ Data: v.ToVectorisedView(),\n+ })\n+\n+ c.linkEP.InjectInbound(arp.ProtocolNumber, pkt)\n+\n+ if got := c.s.Stats().ARP.PacketsReceived.Value(); got != 1 {\n+ t.Errorf(\"got c.s.Stats().ARP.PacketsReceived.Value() = %d, want = 1\", got)\n+ }\n+ if got := c.s.Stats().ARP.MalformedPacketsReceived.Value(); got != 1 {\n+ t.Errorf(\"got c.s.Stats().ARP.MalformedPacketsReceived.Value() = %d, want = 1\", got)\n+ }\n+}\n+\n+func TestDisabledEndpoint(t *testing.T) {\n+ c := newTestContext(t, false)\n+ defer c.cleanup()\n+\n+ ep, err := c.s.GetNetworkEndpoint(nicID, header.ARPProtocolNumber)\n+ if err != nil {\n+ t.Fatalf(\"GetNetworkEndpoint(%d, header.ARPProtocolNumber) failed: %s\", nicID, err)\n+ }\n+ ep.Disable()\n+\n+ v := make(buffer.View, header.ARPSize)\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ Data: v.ToVectorisedView(),\n+ })\n+\n+ c.linkEP.InjectInbound(arp.ProtocolNumber, pkt)\n+\n+ if got := c.s.Stats().ARP.PacketsReceived.Value(); got != 1 {\n+ t.Errorf(\"got c.s.Stats().ARP.PacketsReceived.Value() = %d, want = 1\", got)\n+ }\n+ if got := c.s.Stats().ARP.DisabledPacketsReceived.Value(); got != 1 {\n+ t.Errorf(\"got c.s.Stats().ARP.DisabledPacketsReceived.Value() = %d, want = 1\", got)\n+ }\n+}\n+\n+func TestDirectReply(t *testing.T) {\n+ c := newTestContext(t, false)\n+ defer c.cleanup()\n+\n+ const senderMAC = \"\\x01\\x02\\x03\\x04\\x05\\x06\"\n+ const senderIPv4 = \"\\x0a\\x00\\x00\\x02\"\n+\n+ v := make(buffer.View, header.ARPSize)\n+ h := header.ARP(v)\n+ h.SetIPv4OverEthernet()\n+ h.SetOp(header.ARPReply)\n+\n+ copy(h.HardwareAddressSender(), senderMAC)\n+ copy(h.ProtocolAddressSender(), senderIPv4)\n+ copy(h.HardwareAddressTarget(), stackLinkAddr)\n+ copy(h.ProtocolAddressTarget(), stackAddr)\n+\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ Data: v.ToVectorisedView(),\n+ })\n+\n+ c.linkEP.InjectInbound(arp.ProtocolNumber, pkt)\n+\n+ if got := c.s.Stats().ARP.PacketsReceived.Value(); got != 1 {\n+ t.Errorf(\"got c.s.Stats().ARP.PacketsReceived.Value() = %d, want = 1\", got)\n+ }\n+ if got := c.s.Stats().ARP.RepliesReceived.Value(); got != 1 {\n+ t.Errorf(\"got c.s.Stats().ARP.PacketsReceived.Value() = %d, want = 1\", got)\n+ }\n}\nfunc TestDirectRequestWithNeighborCache(t *testing.T) {\n@@ -311,6 +406,11 @@ func TestDirectRequestWithNeighborCache(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n+ packetsRecv := c.s.Stats().ARP.PacketsReceived.Value()\n+ requestsRecv := c.s.Stats().ARP.RequestsReceived.Value()\n+ requestsRecvUnknownAddr := c.s.Stats().ARP.RequestsReceivedUnknownTargetAddress.Value()\n+ outgoingReplies := c.s.Stats().ARP.OutgoingRepliesSent.Value()\n+\n// Inject an incoming ARP request.\nv := make(buffer.View, header.ARPSize)\nh := header.ARP(v)\n@@ -323,6 +423,13 @@ func TestDirectRequestWithNeighborCache(t *testing.T) {\nData: v.ToVectorisedView(),\n}))\n+ if got, want := c.s.Stats().ARP.PacketsReceived.Value(), packetsRecv+1; got != want {\n+ t.Errorf(\"got c.s.Stats().ARP.PacketsReceived.Value() = %d, want = %d\", got, want)\n+ }\n+ if got, want := c.s.Stats().ARP.RequestsReceived.Value(), requestsRecv+1; got != want {\n+ t.Errorf(\"got c.s.Stats().ARP.PacketsReceived.Value() = %d, want = %d\", got, want)\n+ }\n+\nif !test.isValid {\n// No packets should be sent after receiving an invalid ARP request.\n// There is no need to perform a blocking read here, since packets are\n@@ -330,9 +437,20 @@ func TestDirectRequestWithNeighborCache(t *testing.T) {\nif pkt, ok := c.linkEP.Read(); ok {\nt.Errorf(\"unexpected packet sent with network protocol number %d\", pkt.Proto)\n}\n+ if got, want := c.s.Stats().ARP.RequestsReceivedUnknownTargetAddress.Value(), requestsRecvUnknownAddr+1; got != want {\n+ t.Errorf(\"got c.s.Stats().ARP.RequestsReceivedUnknownTargetAddress.Value() = %d, want = %d\", got, want)\n+ }\n+ if got, want := c.s.Stats().ARP.OutgoingRepliesSent.Value(), outgoingReplies; got != want {\n+ t.Errorf(\"got c.s.Stats().ARP.OutgoingRepliesSent.Value() = %d, want = %d\", got, want)\n+ }\n+\nreturn\n}\n+ if got, want := c.s.Stats().ARP.OutgoingRepliesSent.Value(), outgoingReplies+1; got != want {\n+ t.Errorf(\"got c.s.Stats().ARP.OutgoingRepliesSent.Value() = %d, want = %d\", got, want)\n+ }\n+\n// Verify an ARP response was sent.\npi, ok := c.linkEP.Read()\nif !ok {\n@@ -418,6 +536,8 @@ type testInterface struct {\nstack.LinkEndpoint\nnicID tcpip.NICID\n+\n+ writeErr *tcpip.Error\n}\nfunc (t *testInterface) ID() tcpip.NICID {\n@@ -441,6 +561,10 @@ func (*testInterface) Promiscuous() bool {\n}\nfunc (t *testInterface) WritePacketToRemote(remoteLinkAddr tcpip.LinkAddress, gso *stack.GSO, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) *tcpip.Error {\n+ if t.writeErr != nil {\n+ return t.writeErr\n+ }\n+\nvar r stack.Route\nr.NetProto = protocol\nr.ResolveWith(remoteLinkAddr)\n@@ -458,9 +582,14 @@ func TestLinkAddressRequest(t *testing.T) {\nlocalAddr tcpip.Address\nremoteLinkAddr tcpip.LinkAddress\n+ linkErr *tcpip.Error\nexpectedErr *tcpip.Error\nexpectedLocalAddr tcpip.Address\nexpectedRemoteLinkAddr tcpip.LinkAddress\n+ expectedRequestsSent uint64\n+ expectedRequestBadLocalAddressErrors uint64\n+ expectedRequestNetworkUnreachableErrors uint64\n+ expectedRequestDroppedErrors uint64\n}{\n{\nname: \"Unicast\",\n@@ -469,6 +598,9 @@ func TestLinkAddressRequest(t *testing.T) {\nremoteLinkAddr: remoteLinkAddr,\nexpectedLocalAddr: stackAddr,\nexpectedRemoteLinkAddr: remoteLinkAddr,\n+ expectedRequestsSent: 1,\n+ expectedRequestBadLocalAddressErrors: 0,\n+ expectedRequestNetworkUnreachableErrors: 0,\n},\n{\nname: \"Multicast\",\n@@ -477,6 +609,9 @@ func TestLinkAddressRequest(t *testing.T) {\nremoteLinkAddr: \"\",\nexpectedLocalAddr: stackAddr,\nexpectedRemoteLinkAddr: header.EthernetBroadcastAddress,\n+ expectedRequestsSent: 1,\n+ expectedRequestBadLocalAddressErrors: 0,\n+ expectedRequestNetworkUnreachableErrors: 0,\n},\n{\nname: \"Unicast with unspecified source\",\n@@ -484,6 +619,9 @@ func TestLinkAddressRequest(t *testing.T) {\nremoteLinkAddr: remoteLinkAddr,\nexpectedLocalAddr: stackAddr,\nexpectedRemoteLinkAddr: remoteLinkAddr,\n+ expectedRequestsSent: 1,\n+ expectedRequestBadLocalAddressErrors: 0,\n+ expectedRequestNetworkUnreachableErrors: 0,\n},\n{\nname: \"Multicast with unspecified source\",\n@@ -491,28 +629,52 @@ func TestLinkAddressRequest(t *testing.T) {\nremoteLinkAddr: \"\",\nexpectedLocalAddr: stackAddr,\nexpectedRemoteLinkAddr: header.EthernetBroadcastAddress,\n+ expectedRequestsSent: 1,\n+ expectedRequestBadLocalAddressErrors: 0,\n+ expectedRequestNetworkUnreachableErrors: 0,\n},\n{\nname: \"Unicast with unassigned address\",\nlocalAddr: testAddr,\nremoteLinkAddr: remoteLinkAddr,\nexpectedErr: tcpip.ErrBadLocalAddress,\n+ expectedRequestsSent: 0,\n+ expectedRequestBadLocalAddressErrors: 1,\n+ expectedRequestNetworkUnreachableErrors: 0,\n},\n{\nname: \"Multicast with unassigned address\",\nlocalAddr: testAddr,\nremoteLinkAddr: \"\",\nexpectedErr: tcpip.ErrBadLocalAddress,\n+ expectedRequestsSent: 0,\n+ expectedRequestBadLocalAddressErrors: 1,\n+ expectedRequestNetworkUnreachableErrors: 0,\n},\n{\nname: \"Unicast with no local address available\",\nremoteLinkAddr: remoteLinkAddr,\nexpectedErr: tcpip.ErrNetworkUnreachable,\n+ expectedRequestsSent: 0,\n+ expectedRequestBadLocalAddressErrors: 0,\n+ expectedRequestNetworkUnreachableErrors: 1,\n},\n{\nname: \"Multicast with no local address available\",\nremoteLinkAddr: \"\",\nexpectedErr: tcpip.ErrNetworkUnreachable,\n+ expectedRequestsSent: 0,\n+ expectedRequestBadLocalAddressErrors: 0,\n+ expectedRequestNetworkUnreachableErrors: 1,\n+ },\n+ {\n+ name: \"Link error\",\n+ nicAddr: stackAddr,\n+ localAddr: stackAddr,\n+ remoteLinkAddr: remoteLinkAddr,\n+ linkErr: tcpip.ErrInvalidEndpointState,\n+ expectedErr: tcpip.ErrInvalidEndpointState,\n+ expectedRequestDroppedErrors: 1,\n},\n}\n@@ -543,10 +705,24 @@ func TestLinkAddressRequest(t *testing.T) {\n// can mock a link address request and observe the packets sent to the\n// link endpoint even though the stack uses the real NIC to validate the\n// local address.\n- if err := linkRes.LinkAddressRequest(remoteAddr, test.localAddr, test.remoteLinkAddr, &testInterface{LinkEndpoint: linkEP, nicID: nicID}); err != test.expectedErr {\n+ iface := testInterface{LinkEndpoint: linkEP, nicID: nicID, writeErr: test.linkErr}\n+ if err := linkRes.LinkAddressRequest(remoteAddr, test.localAddr, test.remoteLinkAddr, &iface); err != test.expectedErr {\nt.Fatalf(\"got p.LinkAddressRequest(%s, %s, %s, _) = %s, want = %s\", remoteAddr, test.localAddr, test.remoteLinkAddr, err, test.expectedErr)\n}\n+ if got := s.Stats().ARP.OutgoingRequestsSent.Value(); got != test.expectedRequestsSent {\n+ t.Errorf(\"got s.Stats().ARP.OutgoingRequestsSent.Value() = %d, want = %d\", got, test.expectedRequestsSent)\n+ }\n+ if got := s.Stats().ARP.OutgoingRequestBadLocalAddressErrors.Value(); got != test.expectedRequestBadLocalAddressErrors {\n+ t.Errorf(\"got s.Stats().ARP.OutgoingRequestBadLocalAddressErrors.Value() = %d, want = %d\", got, test.expectedRequestBadLocalAddressErrors)\n+ }\n+ if got := s.Stats().ARP.OutgoingRequestNetworkUnreachableErrors.Value(); got != test.expectedRequestNetworkUnreachableErrors {\n+ t.Errorf(\"got s.Stats().ARP.OutgoingRequestNetworkUnreachableErrors.Value() = %d, want = %d\", got, test.expectedRequestNetworkUnreachableErrors)\n+ }\n+ if got := s.Stats().ARP.OutgoingRequestsDropped.Value(); got != test.expectedRequestDroppedErrors {\n+ t.Errorf(\"got s.Stats().ARP.OutgoingRequestsDropped.Value() = %d, want = %d\", got, test.expectedRequestDroppedErrors)\n+ }\n+\nif test.expectedErr != nil {\nreturn\n}\n@@ -561,6 +737,9 @@ func TestLinkAddressRequest(t *testing.T) {\n}\nrep := header.ARP(stack.PayloadSince(pkt.Pkt.NetworkHeader()))\n+ if got := rep.Op(); got != header.ARPRequest {\n+ t.Errorf(\"got Op = %d, want = %d\", got, header.ARPRequest)\n+ }\nif got := tcpip.LinkAddress(rep.HardwareAddressSender()); got != stackLinkAddr {\nt.Errorf(\"got HardwareAddressSender = %s, want = %s\", got, stackLinkAddr)\n}\n@@ -576,3 +755,22 @@ func TestLinkAddressRequest(t *testing.T) {\n})\n}\n}\n+\n+func TestLinkAddressRequestWithoutNIC(t *testing.T) {\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol},\n+ })\n+ p := s.NetworkProtocolInstance(arp.ProtocolNumber)\n+ linkRes, ok := p.(stack.LinkAddressResolver)\n+ if !ok {\n+ t.Fatal(\"expected ARP protocol to implement stack.LinkAddressResolver\")\n+ }\n+\n+ if err := linkRes.LinkAddressRequest(remoteAddr, \"\", remoteLinkAddr, &testInterface{nicID: nicID}); err != tcpip.ErrUnknownNICID {\n+ t.Fatalf(\"got p.LinkAddressRequest(%s, %s, %s, _) = %s, want = %s\", remoteAddr, \"\", remoteLinkAddr, err, tcpip.ErrUnknownNICID)\n+ }\n+\n+ if got := s.Stats().ARP.OutgoingRequestInterfaceHasNoLocalAddressErrors.Value(); got != 1 {\n+ t.Errorf(\"got s.Stats().ARP.OutgoingRequestInterfaceHasNoLocalAddressErrors.Value() = %d, want = 1\", got)\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tcpip.go", "new_path": "pkg/tcpip/tcpip.go", "diff": "@@ -1591,6 +1591,59 @@ type IPStats struct {\nOptionUnknownReceived *StatCounter\n}\n+// ARPStats collects ARP-specific stats.\n+type ARPStats struct {\n+ // PacketsReceived is the number of ARP packets received from the link layer.\n+ PacketsReceived *StatCounter\n+\n+ // DisabledPacketsReceived is the number of ARP packets received from the link\n+ // layer when the ARP layer is disabled.\n+ DisabledPacketsReceived *StatCounter\n+\n+ // MalformedPacketsReceived is the number of ARP packets that were dropped due\n+ // to being malformed.\n+ MalformedPacketsReceived *StatCounter\n+\n+ // RequestsReceived is the number of ARP requests received.\n+ RequestsReceived *StatCounter\n+\n+ // RequestsReceivedUnknownTargetAddress is the number of ARP requests that\n+ // were targeted to an interface different from the one it was received on.\n+ RequestsReceivedUnknownTargetAddress *StatCounter\n+\n+ // OutgoingRequestInterfaceHasNoLocalAddressErrors is the number of failures\n+ // to send an ARP request because the interface has no network address\n+ // assigned to it.\n+ OutgoingRequestInterfaceHasNoLocalAddressErrors *StatCounter\n+\n+ // OutgoingRequestBadLocalAddressErrors is the number of failures to send an\n+ // ARP request with a bad local address.\n+ OutgoingRequestBadLocalAddressErrors *StatCounter\n+\n+ // OutgoingRequestNetworkUnreachableErrors is the number of failures to send\n+ // an ARP request with a network unreachable error.\n+ OutgoingRequestNetworkUnreachableErrors *StatCounter\n+\n+ // OutgoingRequestsDropped is the number of ARP requests which failed to write\n+ // to a link-layer endpoint.\n+ OutgoingRequestsDropped *StatCounter\n+\n+ // OutgoingRequestSent is the number of ARP requests successfully written to a\n+ // link-layer endpoint.\n+ OutgoingRequestsSent *StatCounter\n+\n+ // RepliesReceived is the number of ARP replies received.\n+ RepliesReceived *StatCounter\n+\n+ // OutgoingRepliesDropped is the number of ARP replies which failed to write\n+ // to a link-layer endpoint.\n+ OutgoingRepliesDropped *StatCounter\n+\n+ // OutgoingRepliesSent is the number of ARP replies successfully written to a\n+ // link-layer endpoint.\n+ OutgoingRepliesSent *StatCounter\n+}\n+\n// TCPStats collects TCP-specific stats.\ntype TCPStats struct {\n// ActiveConnectionOpenings is the number of connections opened\n@@ -1743,6 +1796,9 @@ type Stats struct {\n// IP breaks out IP-specific stats (both v4 and v6).\nIP IPStats\n+ // ARP breaks out ARP-specific stats.\n+ ARP ARPStats\n+\n// TCP breaks out TCP-specific stats.\nTCP TCPStats\n" } ]
Go
Apache License 2.0
google/gvisor
Add stats for ARP Fixes #4963 Startblock: has LGTM from sbalana and then add reviewer ghanan PiperOrigin-RevId: 351886320
259,975
14.01.2021 17:02:01
28,800
95371cff350ef5c22c0e0b76ef9474c16e29a6f6
Don't run profiles on runc.
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -323,6 +323,7 @@ containerd-tests: containerd-test-1.4.3\n## BENCHMARKS_PLATFORMS - platforms to run benchmarks (e.g. ptrace kvm).\n## BENCHMARKS_FILTER - filter to be applied to the test suite.\n## BENCHMARKS_OPTIONS - options to be passed to the test.\n+## BENCHMARKS_PROFILE - profile options to be passed to the test.\n##\nBENCHMARKS_PROJECT ?= gvisor-benchmarks\nBENCHMARKS_DATASET ?= kokoro\n@@ -334,7 +335,8 @@ BENCHMARKS_PLATFORMS ?= ptrace\nBENCHMARKS_TARGETS := //test/benchmarks/media:ffmpeg_test\nBENCHMARKS_FILTER := .\nBENCHMARKS_OPTIONS := -test.benchtime=30s\n-BENCHMARKS_ARGS := -test.v -test.bench=$(BENCHMARKS_FILTER) -pprof-dir=/tmp/profile -pprof-cpu -pprof-heap -pprof-block -pprof-mutex $(BENCHMARKS_OPTIONS)\n+BENCHMARKS_ARGS := -test.v -test.bench=$(BENCHMARKS_FILTER) $(BENCHMARKS_OPTIONS)\n+BENCHMARKS_PROFILE := -pprof-dir=/tmp/profile -pprof-cpu -pprof-heap -pprof-block -pprof-mutex\ninit-benchmark-table: ## Initializes a BigQuery table with the benchmark schema.\n@$(call run,//tools/parsers:parser,init --project=$(BENCHMARKS_PROJECT) --dataset=$(BENCHMARKS_DATASET) --table=$(BENCHMARKS_TABLE))\n@@ -344,9 +346,10 @@ init-benchmark-table: ## Initializes a BigQuery table with the benchmark schema.\nrun_benchmark = \\\n($(call header,BENCHMARK $(1) $(2)); \\\nset -euo pipefail; \\\n- if test \"$(1)\" != \"runc\"; then $(call install_runtime,$(1),--profile $(2)); fi; \\\nexport T=$$(mktemp --tmpdir logs.$(1).XXXXXX); \\\n- $(call sudo,$(BENCHMARKS_TARGETS),-runtime=$(1) $(BENCHMARKS_ARGS)) | tee $$T; \\\n+ if test \"$(1)\" = \"runc\"; then $(call sudo,$(BENCHMARKS_TARGETS),-runtime=$(1) $(BENCHMARKS_ARGS)) | tee $$T; fi; \\\n+ if test \"$(1)\" != \"runc\"; then $(call install_runtime,$(1),--profile $(2)); \\\n+ $(call sudo,$(BENCHMARKS_TARGETS),-runtime=$(1) $(BENCHMARKS_ARGS) $(BENCHMARKS_PROFILE)) | tee $$T; fi; \\\nif test \"$(BENCHMARKS_UPLOAD)\" = \"true\"; then \\\n$(call run,tools/parsers:parser,parse --debug --file=$$T --runtime=$(1) --suite_name=$(BENCHMARKS_SUITE) --project=$(BENCHMARKS_PROJECT) --dataset=$(BENCHMARKS_DATASET) --table=$(BENCHMARKS_TABLE) --official=$(BENCHMARKS_OFFICIAL)); \\\nfi; \\\n" } ]
Go
Apache License 2.0
google/gvisor
Don't run profiles on runc. PiperOrigin-RevId: 351906812
260,023
15.01.2021 12:01:14
28,800
f7f66c8c6cb5284afe11f4571568866e3c605466
Add tests for cases of ARP failures on TCP connect Also fix test expectation for UDP sendto() case in tuntap syscall test. Fixes
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/tuntap.cc", "new_path": "test/syscalls/linux/tuntap.cc", "diff": "#include <linux/if_tun.h>\n#include <netinet/ip.h>\n#include <netinet/ip_icmp.h>\n+#include <poll.h>\n#include <sys/ioctl.h>\n#include <sys/socket.h>\n#include <sys/types.h>\n@@ -44,6 +45,9 @@ constexpr int kIPLen = 4;\nconstexpr const char kDevNetTun[] = \"/dev/net/tun\";\nconstexpr const char kTapName[] = \"tap0\";\n+#define kTapIPAddr htonl(0x0a000001) /* Inet 10.0.0.1 */\n+#define kTapPeerIPAddr htonl(0x0a000002) /* Inet 10.0.0.2 */\n+\nconstexpr const uint8_t kMacA[ETH_ALEN] = {0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA};\nconstexpr const uint8_t kMacB[ETH_ALEN] = {0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB};\n@@ -79,8 +83,9 @@ struct ping_pkt {\nchar payload[64];\n} __attribute__((packed));\n-ping_pkt CreatePingPacket(const uint8_t srcmac[ETH_ALEN], const char* srcip,\n- const uint8_t dstmac[ETH_ALEN], const char* dstip) {\n+ping_pkt CreatePingPacket(const uint8_t srcmac[ETH_ALEN], const in_addr_t srcip,\n+ const uint8_t dstmac[ETH_ALEN],\n+ const in_addr_t dstip) {\nping_pkt pkt = {};\npkt.pi.pi_protocol = htons(ETH_P_IP);\n@@ -98,8 +103,8 @@ ping_pkt CreatePingPacket(const uint8_t srcmac[ETH_ALEN], const char* srcip,\npkt.ip.frag_off = 1 << 6; // Do not fragment\npkt.ip.ttl = 64;\npkt.ip.protocol = IPPROTO_ICMP;\n- inet_pton(AF_INET, dstip, &pkt.ip.daddr);\n- inet_pton(AF_INET, srcip, &pkt.ip.saddr);\n+ pkt.ip.daddr = dstip;\n+ pkt.ip.saddr = srcip;\npkt.ip.check = IPChecksum(pkt.ip);\npkt.icmp.type = ICMP_ECHO;\n@@ -124,8 +129,10 @@ struct arp_pkt {\nuint8_t arp_tpa[kIPLen];\n} __attribute__((packed));\n-std::string CreateArpPacket(const uint8_t srcmac[ETH_ALEN], const char* srcip,\n- const uint8_t dstmac[ETH_ALEN], const char* dstip) {\n+std::string CreateArpPacket(const uint8_t srcmac[ETH_ALEN],\n+ const in_addr_t srcip,\n+ const uint8_t dstmac[ETH_ALEN],\n+ const in_addr_t dstip) {\nstd::string buffer;\nbuffer.resize(sizeof(arp_pkt));\n@@ -144,9 +151,9 @@ std::string CreateArpPacket(const uint8_t srcmac[ETH_ALEN], const char* srcip,\npkt->arp.ar_op = htons(ARPOP_REPLY);\nmemcpy(pkt->arp_sha, srcmac, sizeof(pkt->arp_sha));\n- inet_pton(AF_INET, srcip, pkt->arp_spa);\n+ memcpy(pkt->arp_spa, &srcip, sizeof(pkt->arp_spa));\nmemcpy(pkt->arp_tha, dstmac, sizeof(pkt->arp_tha));\n- inet_pton(AF_INET, dstip, pkt->arp_tpa);\n+ memcpy(pkt->arp_tpa, &dstip, sizeof(pkt->arp_tpa));\n}\nreturn buffer;\n}\n@@ -165,6 +172,16 @@ class TuntapTest : public ::testing::Test {\nvoid SetUp() override {\nhave_net_admin_cap_ =\nASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN));\n+\n+ if (have_net_admin_cap_ && !IsRunningOnGvisor()) {\n+ // gVisor always creates enabled/up'd interfaces, while Linux does not (as\n+ // observed in b/110961832). Some of the tests require the Linux stack to\n+ // notify the socket of any link-address-resolution failures. Those\n+ // notifications do not seem to show up when the loopback interface in the\n+ // namespace is down.\n+ auto link = ASSERT_NO_ERRNO_AND_VALUE(GetLinkByName(\"lo\"));\n+ ASSERT_NO_ERRNO(LinkChangeFlags(link.index, IFF_UP, IFF_UP));\n+ }\n}\nvoid TearDown() override {\n@@ -263,8 +280,8 @@ TEST_F(TuntapTest, WriteToDownDevice) {\nEXPECT_THAT(write(fd.get(), buf, sizeof(buf)), SyscallFailsWithErrno(EIO));\n}\n-PosixErrorOr<FileDescriptor> OpenAndAttachTap(\n- const std::string& dev_name, const std::string& dev_ipv4_addr) {\n+PosixErrorOr<FileDescriptor> OpenAndAttachTap(const std::string& dev_name,\n+ const in_addr_t dev_addr) {\n// Interface creation.\nASSIGN_OR_RETURN_ERRNO(FileDescriptor fd, Open(kDevNetTun, O_RDWR));\n@@ -277,11 +294,10 @@ PosixErrorOr<FileDescriptor> OpenAndAttachTap(\nASSIGN_OR_RETURN_ERRNO(auto link, GetLinkByName(dev_name));\n+ const struct in_addr dev_ipv4_addr = {.s_addr = dev_addr};\n// Interface setup.\n- struct in_addr addr;\n- inet_pton(AF_INET, dev_ipv4_addr.c_str(), &addr);\n- EXPECT_NO_ERRNO(LinkAddLocalAddr(link.index, AF_INET, /*prefixlen=*/24, &addr,\n- sizeof(addr)));\n+ EXPECT_NO_ERRNO(LinkAddLocalAddr(link.index, AF_INET, /*prefixlen=*/24,\n+ &dev_ipv4_addr, sizeof(dev_ipv4_addr)));\nif (!IsRunningOnGvisor()) {\n// FIXME(b/110961832): gVisor doesn't support setting MAC address on\n@@ -313,9 +329,11 @@ TEST_F(TuntapTest, PingKernel) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));\nFileDescriptor fd =\n- ASSERT_NO_ERRNO_AND_VALUE(OpenAndAttachTap(kTapName, \"10.0.0.1\"));\n- ping_pkt ping_req = CreatePingPacket(kMacB, \"10.0.0.2\", kMacA, \"10.0.0.1\");\n- std::string arp_rep = CreateArpPacket(kMacB, \"10.0.0.2\", kMacA, \"10.0.0.1\");\n+ ASSERT_NO_ERRNO_AND_VALUE(OpenAndAttachTap(kTapName, kTapIPAddr));\n+ ping_pkt ping_req =\n+ CreatePingPacket(kMacB, kTapPeerIPAddr, kMacA, kTapIPAddr);\n+ std::string arp_rep =\n+ CreateArpPacket(kMacB, kTapPeerIPAddr, kMacA, kTapIPAddr);\n// Send ping, this would trigger an ARP request on Linux.\nEXPECT_THAT(write(fd.get(), &ping_req, sizeof(ping_req)),\n@@ -368,20 +386,20 @@ TEST_F(TuntapTest, SendUdpTriggersArpResolution) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));\nFileDescriptor fd =\n- ASSERT_NO_ERRNO_AND_VALUE(OpenAndAttachTap(kTapName, \"10.0.0.1\"));\n+ ASSERT_NO_ERRNO_AND_VALUE(OpenAndAttachTap(kTapName, kTapIPAddr));\n// Send a UDP packet to remote.\nint sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);\nASSERT_THAT(sock, SyscallSucceeds());\n- struct sockaddr_in remote = {};\n- remote.sin_family = AF_INET;\n- remote.sin_port = htons(42);\n- inet_pton(AF_INET, \"10.0.0.2\", &remote.sin_addr);\n- int ret = sendto(sock, \"hello\", 5, 0, reinterpret_cast<sockaddr*>(&remote),\n- sizeof(remote));\n- ASSERT_THAT(ret, ::testing::AnyOf(SyscallSucceeds(),\n- SyscallFailsWithErrno(EHOSTDOWN)));\n+ struct sockaddr_in remote = {\n+ .sin_family = AF_INET,\n+ .sin_port = htons(42),\n+ .sin_addr = {.s_addr = kTapPeerIPAddr},\n+ };\n+ ASSERT_THAT(sendto(sock, \"hello\", 5, 0, reinterpret_cast<sockaddr*>(&remote),\n+ sizeof(remote)),\n+ SyscallSucceeds());\nstruct inpkt {\nunion {\n@@ -407,21 +425,78 @@ TEST_F(TuntapTest, SendUdpTriggersArpResolution) {\n}\n}\n+// TCPBlockingConnectFailsArpResolution tests for TCP connect to fail on link\n+// address resolution failure to a routable, but non existent peer.\n+TEST_F(TuntapTest, TCPBlockingConnectFailsArpResolution) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));\n+\n+ FileDescriptor sender =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_STREAM, IPPROTO_TCP));\n+\n+ FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(OpenAndAttachTap(kTapName, kTapIPAddr));\n+\n+ sockaddr_in connect_addr = {\n+ .sin_family = AF_INET,\n+ .sin_addr = {.s_addr = kTapPeerIPAddr},\n+ };\n+ ASSERT_THAT(connect(sender.get(),\n+ reinterpret_cast<const struct sockaddr*>(&connect_addr),\n+ sizeof(connect_addr)),\n+ SyscallFailsWithErrno(EHOSTUNREACH));\n+}\n+\n+// TCPNonBlockingConnectFailsArpResolution tests for TCP non-blocking connect to\n+// to trigger an error event to be notified to poll on link address resolution\n+// failure to a routable, but non existent peer.\n+TEST_F(TuntapTest, TCPNonBlockingConnectFailsArpResolution) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));\n+\n+ FileDescriptor sender = ASSERT_NO_ERRNO_AND_VALUE(\n+ Socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP));\n+\n+ FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(OpenAndAttachTap(kTapName, kTapIPAddr));\n+\n+ sockaddr_in connect_addr = {\n+ .sin_family = AF_INET,\n+ .sin_addr = {.s_addr = kTapPeerIPAddr},\n+ };\n+ ASSERT_THAT(connect(sender.get(),\n+ reinterpret_cast<const struct sockaddr*>(&connect_addr),\n+ sizeof(connect_addr)),\n+ SyscallFailsWithErrno(EINPROGRESS));\n+\n+ constexpr int kTimeout = 10000;\n+ struct pollfd pfd = {\n+ .fd = sender.get(),\n+ .events = POLLIN | POLLOUT,\n+ };\n+ ASSERT_THAT(poll(&pfd, 1, kTimeout), SyscallSucceedsWithValue(1));\n+ ASSERT_EQ(pfd.revents, POLLIN | POLLOUT | POLLHUP | POLLERR);\n+\n+ ASSERT_THAT(connect(sender.get(),\n+ reinterpret_cast<const struct sockaddr*>(&connect_addr),\n+ sizeof(connect_addr)),\n+ SyscallFailsWithErrno(EHOSTUNREACH));\n+}\n+\n// Write hang bug found by syskaller: b/155928773\n// https://syzkaller.appspot.com/bug?id=065b893bd8d1d04a4e0a1d53c578537cde1efe99\nTEST_F(TuntapTest, WriteHangBug155928773) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));\nFileDescriptor fd =\n- ASSERT_NO_ERRNO_AND_VALUE(OpenAndAttachTap(kTapName, \"10.0.0.1\"));\n+ ASSERT_NO_ERRNO_AND_VALUE(OpenAndAttachTap(kTapName, kTapIPAddr));\nint sock = socket(AF_INET, SOCK_DGRAM, 0);\nASSERT_THAT(sock, SyscallSucceeds());\n- struct sockaddr_in remote = {};\n- remote.sin_family = AF_INET;\n- remote.sin_port = htons(42);\n- inet_pton(AF_INET, \"10.0.0.1\", &remote.sin_addr);\n+ struct sockaddr_in remote = {\n+ .sin_family = AF_INET,\n+ .sin_port = htons(42),\n+ .sin_addr = {.s_addr = kTapIPAddr},\n+ };\n// Return values do not matter in this test.\nconnect(sock, reinterpret_cast<struct sockaddr*>(&remote), sizeof(remote));\nwrite(sock, \"hello\", 5);\n" } ]
Go
Apache License 2.0
google/gvisor
Add tests for cases of ARP failures on TCP connect Also fix test expectation for UDP sendto() case in tuntap syscall test. Fixes #5155 PiperOrigin-RevId: 352056612
260,003
15.01.2021 12:48:58
28,800
f1420cf48418c01694eaf3110ac411915b217d36
Add sanity check on return values from Write io.Writer.Write requires err to be non-nil if n < len(v). We could allow this but it will be irreversible if users depend on this behavior. Ported the test that discovered this.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/buffer/view.go", "new_path": "pkg/tcpip/buffer/view.go", "diff": "@@ -17,6 +17,7 @@ package buffer\nimport (\n\"bytes\"\n+ \"fmt\"\n\"io\"\n)\n@@ -167,6 +168,9 @@ func (vv *VectorisedView) ReadTo(dst io.Writer, count int, peek bool) (int, erro\nif err != nil {\nbreak\n}\n+ if n != len(v) {\n+ panic(fmt.Sprintf(\"io.Writer.Write succeeded with incomplete write: %d != %d\", n, len(v)))\n+ }\n}\nif !peek {\nvv.TrimFront(done)\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -2304,9 +2304,11 @@ cc_binary(\ndeps = [\n\":ip_socket_test_util\",\n\":socket_test_util\",\n+ \"@com_google_absl//absl/strings\",\ngtest,\n\"//test/util:test_main\",\n\"//test/util:test_util\",\n+ \"//test/util:thread_util\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_generic_stress.cc", "new_path": "test/syscalls/linux/socket_generic_stress.cc", "diff": "#include <sys/socket.h>\n#include <sys/un.h>\n+#include <array>\n+#include <string>\n+\n#include \"gtest/gtest.h\"\n+#include \"absl/strings/string_view.h\"\n#include \"test/syscalls/linux/ip_socket_test_util.h\"\n#include \"test/syscalls/linux/socket_test_util.h\"\n#include \"test/util/test_util.h\"\n+#include \"test/util/thread_util.h\"\nnamespace gvisor {\nnamespace testing {\n@@ -138,5 +143,71 @@ INSTANTIATE_TEST_SUITE_P(\nSetSockOpt(SOL_SOCKET, SO_REUSEADDR, &kSockOptOn)(\nDualStackTCPAcceptBindPersistentListenerSocketPair(0))));\n+using DataTransferStressTest = SocketPairTest;\n+\n+TEST_P(DataTransferStressTest, BigDataTransfer) {\n+ // TODO(b/165912341): These are too slow on KVM platform with nested virt.\n+ SKIP_IF(GvisorPlatform() == Platform::kKVM);\n+\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+ int client_fd = sockets->first_fd();\n+ int server_fd = sockets->second_fd();\n+\n+ ScopedThread echo([server_fd]() {\n+ std::array<uint8_t, 1024> buf;\n+ for (;;) {\n+ ssize_t r = read(server_fd, buf.data(), buf.size());\n+ ASSERT_THAT(r, SyscallSucceeds());\n+ if (r == 0) {\n+ break;\n+ }\n+ for (size_t i = 0; i < r;) {\n+ ssize_t w = write(server_fd, buf.data() + i, r - i);\n+ ASSERT_GE(w, 0);\n+ i += w;\n+ }\n+ }\n+ ASSERT_THAT(shutdown(server_fd, SHUT_WR), SyscallSucceeds());\n+ });\n+\n+ const std::string chunk = \"Though this upload be but little, it is fierce.\";\n+ std::string big_string;\n+ while (big_string.size() < 31 << 20) {\n+ big_string += chunk;\n+ }\n+ absl::string_view data = big_string;\n+\n+ ScopedThread writer([client_fd, data]() {\n+ absl::string_view view = data;\n+ while (!view.empty()) {\n+ ssize_t n = write(client_fd, view.data(), view.size());\n+ ASSERT_GE(n, 0);\n+ view = view.substr(n);\n+ }\n+ ASSERT_THAT(shutdown(client_fd, SHUT_WR), SyscallSucceeds());\n+ });\n+\n+ std::string buf;\n+ buf.resize(1 << 20);\n+ while (!data.empty()) {\n+ ssize_t n = read(client_fd, buf.data(), buf.size());\n+ ASSERT_GE(n, 0);\n+ for (size_t i = 0; i < n; i += chunk.size()) {\n+ size_t c = std::min(chunk.size(), n - i);\n+ ASSERT_EQ(buf.substr(i, c), data.substr(i, c)) << \"offset \" << i;\n+ }\n+ data = data.substr(n);\n+ }\n+ // Should read EOF now.\n+ ASSERT_THAT(read(client_fd, buf.data(), buf.size()),\n+ SyscallSucceedsWithValue(0));\n+}\n+\n+INSTANTIATE_TEST_SUITE_P(\n+ AllConnectedSockets, DataTransferStressTest,\n+ ::testing::Values(IPv6TCPAcceptBindPersistentListenerSocketPair(0),\n+ IPv4TCPAcceptBindPersistentListenerSocketPair(0),\n+ DualStackTCPAcceptBindPersistentListenerSocketPair(0)));\n+\n} // namespace testing\n} // namespace gvisor\n" } ]
Go
Apache License 2.0
google/gvisor
Add sanity check on return values from Write io.Writer.Write requires err to be non-nil if n < len(v). We could allow this but it will be irreversible if users depend on this behavior. Ported the test that discovered this. PiperOrigin-RevId: 352065946
259,992
15.01.2021 13:01:21
28,800
f03144d886791afcdd37962388e9a6294a08c49f
Support TEST_PREMATURE_EXIT_FILE in syscall tests
[ { "change_type": "MODIFY", "old_path": "test/runner/runner.go", "new_path": "test/runner/runner.go", "diff": "@@ -49,7 +49,6 @@ var (\noverlay = flag.Bool(\"overlay\", false, \"wrap filesystem mounts with writable tmpfs overlay\")\nvfs2 = flag.Bool(\"vfs2\", false, \"enable VFS2\")\nfuse = flag.Bool(\"fuse\", false, \"enable FUSE\")\n- parallel = flag.Bool(\"parallel\", false, \"run tests in parallel\")\nrunscPath = flag.String(\"runsc\", \"\", \"path to runsc binary\")\naddUDSTree = flag.Bool(\"add-uds-tree\", false, \"expose a tree of UDS utilities for use in tests\")\n@@ -83,13 +82,8 @@ func runTestCaseNative(testBin string, tc gtest.TestCase, t *testing.T) {\nif !found {\nenv = append(env, newEnvVar)\n}\n- // Remove env variables that cause the gunit binary to write output\n- // files, since they will stomp on eachother, and on the output files\n- // from this go test.\n- env = filterEnv(env, []string{\"GUNIT_OUTPUT\", \"TEST_PREMATURE_EXIT_FILE\", \"XML_OUTPUT_FILE\"})\n-\n// Remove shard env variables so that the gunit binary does not try to\n- // intepret them.\n+ // interpret them.\nenv = filterEnv(env, []string{\"TEST_SHARD_INDEX\", \"TEST_TOTAL_SHARDS\", \"GTEST_SHARD_INDEX\", \"GTEST_TOTAL_SHARDS\"})\nif *addUDSTree {\n@@ -390,13 +384,8 @@ func runTestCaseRunsc(testBin string, tc gtest.TestCase, t *testing.T) {\nenv = append(env, vfsVar+\"=VFS1\")\n}\n- // Remove env variables that cause the gunit binary to write output\n- // files, since they will stomp on eachother, and on the output files\n- // from this go test.\n- env = filterEnv(env, []string{\"GUNIT_OUTPUT\", \"TEST_PREMATURE_EXIT_FILE\", \"XML_OUTPUT_FILE\"})\n-\n// Remove shard env variables so that the gunit binary does not try to\n- // intepret them.\n+ // interpret them.\nenv = filterEnv(env, []string{\"TEST_SHARD_INDEX\", \"TEST_TOTAL_SHARDS\", \"GTEST_SHARD_INDEX\", \"GTEST_TOTAL_SHARDS\"})\n// Set TEST_TMPDIR to /tmp, as some of the syscall tests require it to\n@@ -507,9 +496,6 @@ func main() {\ntests = append(tests, testing.InternalTest{\nName: fmt.Sprintf(\"%s_%s\", tc.Suite, tc.Name),\nF: func(t *testing.T) {\n- if *parallel {\n- t.Parallel()\n- }\nif *platform == \"native\" {\n// Run the test case on host.\nrunTestCaseNative(testBin, tc, t)\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/fault.cc", "new_path": "test/syscalls/linux/fault.cc", "diff": "@@ -52,6 +52,13 @@ void sigact_handler(int sig, siginfo_t* siginfo, void* context) {\nuintptr_t fault_addr = reinterpret_cast<uintptr_t>(&Fault);\nEXPECT_GE(pc, fault_addr);\nEXPECT_LT(pc, fault_addr + 64);\n+\n+ // The following file is used to detect tests that exit prematurely. Since\n+ // we need to call exit() here, delete the file by hand.\n+ const char* exit_file = getenv(\"TEST_PREMATURE_EXIT_FILE\");\n+ if (exit_file != nullptr) {\n+ ASSERT_THAT(unlink(exit_file), SyscallSucceeds());\n+ }\nexit(0);\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Support TEST_PREMATURE_EXIT_FILE in syscall tests PiperOrigin-RevId: 352068182
259,860
15.01.2021 14:38:44
28,800
9db126eb65ada4b2233ccb85a0e9c402b7efc5de
Combine reference count logs into one message. Otherwise, races can occur between concurrent logs.
[ { "change_type": "MODIFY", "old_path": "pkg/refsvfs2/refs_map.go", "new_path": "pkg/refsvfs2/refs_map.go", "diff": "@@ -109,8 +109,7 @@ func LogDecRef(obj CheckedObject, refs int64) {\n// obj.LogRefs() should be checked before calling logEvent, in order to avoid\n// calling any text processing needed to evaluate msg.\nfunc logEvent(obj CheckedObject, msg string) {\n- log.Infof(\"[%s %p] %s:\", obj.RefType(), obj, msg)\n- log.Infof(refs_vfs1.FormatStack(refs_vfs1.RecordStack()))\n+ log.Infof(\"[%s %p] %s:\\n%s\", obj.RefType(), obj, msg, refs_vfs1.FormatStack(refs_vfs1.RecordStack()))\n}\n// DoLeakCheck iterates through the live object map and logs a message for each\n@@ -122,10 +121,11 @@ func DoLeakCheck() {\ndefer liveObjectsMu.Unlock()\nleaked := len(liveObjects)\nif leaked > 0 {\n- log.Warningf(\"Leak checking detected %d leaked objects:\", leaked)\n+ msg := fmt.Sprintf(\"Leak checking detected %d leaked objects:\\n\", leaked)\nfor obj := range liveObjects {\n- log.Warningf(obj.LeakMessage())\n+ msg += obj.LeakMessage() + \"\\n\"\n}\n+ log.Warningf(msg)\n}\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Combine reference count logs into one message. Otherwise, races can occur between concurrent logs. PiperOrigin-RevId: 352086914
259,992
15.01.2021 14:49:27
28,800
c7fc4a5d66dc5d6680219819f872c070a0590ca9
Add fsstress tests Updates
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -295,6 +295,11 @@ packetimpact-tests: load-packetimpact $(RUNTIME_BIN)\n@$(call test_runtime,$(RUNTIME),--jobs=HOST_CPUS*3 --local_test_jobs=HOST_CPUS*3 //test/packetimpact/tests:all_tests)\n.PHONY: packetimpact-tests\n+fsstress-test: load-basic $(RUNTIME_BIN)\n+ @$(call install_runtime,$(RUNTIME),--vfs2)\n+ @$(call test_runtime,$(RUNTIME),//test/fsstress:fsstress_test)\n+.PHONY: fsstress-test\n+\n# Specific containerd version tests.\ncontainerd-test-%: load-basic_alpine load-basic_python load-basic_busybox load-basic_resolv load-basic_httpd load-basic_ubuntu $(RUNTIME_BIN)\n@$(call install_runtime,$(RUNTIME),) # Clear flags.\n" }, { "change_type": "ADD", "old_path": null, "new_path": "images/basic/fsstress/Dockerfile", "diff": "+# Usage: docker run --rm fsstress -d /test -n 10000 -p 100 -X -v\n+FROM alpine\n+\n+RUN apk add git\n+RUN git clone https://github.com/linux-test-project/ltp.git --depth 1\n+\n+WORKDIR /ltp\n+RUN ./travis/alpine.sh\n+RUN make autotools && ./configure\n+RUN make -C testcases/kernel/fs/fsstress\n+RUN cp ./testcases/kernel/fs/fsstress/fsstress /usr/bin\n+RUN rm -rf /fsstress /tmp\n+\n+WORKDIR /\n+# This is required, otherwise running with -p > 1 prematurelly exits.\n+COPY run.sh .\n+ENTRYPOINT [\"/run.sh\"]\n" }, { "change_type": "ADD", "old_path": null, "new_path": "images/basic/fsstress/run.sh", "diff": "+#!/bin/sh\n+\n+# Copyright 2021 The gVisor Authors.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+/usr/bin/fsstress \"$@\"\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/fsstress/BUILD", "diff": "+load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\n+\n+package(licenses = [\"notice\"])\n+\n+go_test(\n+ name = \"fsstress_test\",\n+ size = \"large\",\n+ srcs = [\n+ \"fsstress_test.go\",\n+ ],\n+ library = \":fsstress\",\n+ tags = [\n+ # Requires docker and runsc to be configured before the test runs.\n+ \"manual\",\n+ \"local\",\n+ ],\n+ deps = [\n+ \"//pkg/test/dockerutil\",\n+ ],\n+)\n+\n+go_library(\n+ name = \"fsstress\",\n+ srcs = [\"fsstress.go\"],\n+)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/fsstress/fsstress.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package fsstress is empty. See fsstress_test.go for description.\n+package fsstress\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/fsstress/fsstress_test.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package fsstress runs fsstress tool inside a docker container.\n+package fsstress\n+\n+import (\n+ \"context\"\n+ \"math/rand\"\n+ \"strconv\"\n+ \"strings\"\n+ \"testing\"\n+ \"time\"\n+\n+ \"gvisor.dev/gvisor/pkg/test/dockerutil\"\n+)\n+\n+func init() {\n+ rand.Seed(int64(time.Now().Nanosecond()))\n+}\n+\n+func fsstress(t *testing.T, dir string) {\n+ ctx := context.Background()\n+ d := dockerutil.MakeContainer(ctx, t)\n+ defer d.CleanUp(ctx)\n+\n+ const (\n+ operations = \"10000\"\n+ processes = \"100\"\n+ image = \"basic/fsstress\"\n+ )\n+ seed := strconv.FormatUint(uint64(rand.Uint32()), 10)\n+ args := []string{\"-d\", dir, \"-n\", operations, \"-p\", processes, \"-seed\", seed, \"-X\"}\n+ t.Logf(\"Repro: docker run --rm --runtime=runsc %s %s\", image, strings.Join(args, \"\"))\n+ out, err := d.Run(ctx, dockerutil.RunOpts{Image: image}, args...)\n+ if err != nil {\n+ t.Fatalf(\"docker run failed: %v\\noutput: %s\", err, out)\n+ }\n+ lines := strings.SplitN(out, \"\\n\", 2)\n+ if len(lines) > 1 || !strings.HasPrefix(out, \"seed =\") {\n+ t.Fatalf(\"unexpected output: %s\", out)\n+ }\n+}\n+\n+func TestFsstressGofer(t *testing.T) {\n+ fsstress(t, \"/test\")\n+}\n+\n+func TestFsstressTmpfs(t *testing.T) {\n+ fsstress(t, \"/tmp\")\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Add fsstress tests Updates #5273 PiperOrigin-RevId: 352088736
260,004
15.01.2021 15:01:18
28,800
55c7fe48d223ee5678dff7f5bf9a9e5f0482ab37
Populate EgressRoute, GSO, Netproto for batch writes We loop over the list of packets anyways so setting these aren't expensive. Now that they are populated only by the link endpoint that uses them, TCP does not need to.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/packetsocket/endpoint.go", "new_path": "pkg/tcpip/link/packetsocket/endpoint.go", "diff": "@@ -43,7 +43,7 @@ func (e *endpoint) WritePacket(r *stack.Route, gso *stack.GSO, protocol tcpip.Ne\n// WritePackets implements stack.LinkEndpoint.WritePackets.\nfunc (e *endpoint) WritePackets(r *stack.Route, gso *stack.GSO, pkts stack.PacketBufferList, proto tcpip.NetworkProtocolNumber) (int, *tcpip.Error) {\nfor pkt := pkts.Front(); pkt != nil; pkt = pkt.Next() {\n- e.Endpoint.DeliverOutboundPacket(pkt.EgressRoute.RemoteLinkAddress(), pkt.EgressRoute.LocalLinkAddress, pkt.NetworkProtocolNumber, pkt)\n+ e.Endpoint.DeliverOutboundPacket(r.RemoteLinkAddress(), r.LocalLinkAddress, pkt.NetworkProtocolNumber, pkt)\n}\nreturn e.Endpoint.WritePackets(r, gso, pkts, proto)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/qdisc/fifo/endpoint.go", "new_path": "pkg/tcpip/link/qdisc/fifo/endpoint.go", "diff": "@@ -166,15 +166,12 @@ func (e *endpoint) WritePacket(r *stack.Route, gso *stack.GSO, protocol tcpip.Ne\n}\n// WritePackets implements stack.LinkEndpoint.WritePackets.\n-//\n-// Being a batch API, each packet in pkts should have the following fields\n-// populated:\n-// - pkt.EgressRoute\n-// - pkt.GSOOptions\n-// - pkt.NetworkProtocolNumber\n-func (e *endpoint) WritePackets(_ *stack.Route, _ *stack.GSO, pkts stack.PacketBufferList, _ tcpip.NetworkProtocolNumber) (int, *tcpip.Error) {\n+func (e *endpoint) WritePackets(r *stack.Route, gso *stack.GSO, pkts stack.PacketBufferList, protocol tcpip.NetworkProtocolNumber) (int, *tcpip.Error) {\nenqueued := 0\nfor pkt := pkts.Front(); pkt != nil; {\n+ pkt.EgressRoute = r\n+ pkt.GSOOptions = gso\n+ pkt.NetworkProtocolNumber = protocol\nd := e.dispatchers[int(pkt.Hash)%len(e.dispatchers)]\nnxt := pkt.Next()\nif !d.q.enqueue(pkt) {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/connect.go", "new_path": "pkg/tcpip/transport/tcp/connect.go", "diff": "@@ -784,9 +784,6 @@ func sendTCPBatch(r *stack.Route, tf tcpFields, data buffer.VectorisedView, gso\n})\npkt.Hash = tf.txHash\npkt.Owner = owner\n- pkt.EgressRoute = r\n- pkt.GSOOptions = gso\n- pkt.NetworkProtocolNumber = r.NetProto\ndata.ReadToVV(&pkt.Data, packetSize)\nbuildTCPHdr(r, tf, pkt, gso)\ntf.seq = tf.seq.Add(seqnum.Size(packetSize))\n" } ]
Go
Apache License 2.0
google/gvisor
Populate EgressRoute, GSO, Netproto for batch writes We loop over the list of packets anyways so setting these aren't expensive. Now that they are populated only by the link endpoint that uses them, TCP does not need to. PiperOrigin-RevId: 352090853
260,003
15.01.2021 15:03:30
28,800
ec9e263f213c59e93f9c8b8123012b3db2dddc9a
Correctly return EMSGSIZE when packet is too big in raw socket. IPv4 previously accepts the packet, while IPv6 panics. Neither is the behavior in Linux. splice() in Linux has different behavior than in gVisor. This change documents it in the SpliceTooLong test. Reported-by:
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv4/igmp.go", "new_path": "pkg/tcpip/network/ipv4/igmp.go", "diff": "@@ -262,13 +262,15 @@ func (igmp *igmpState) writePacket(destAddress tcpip.Address, groupAddress tcpip\nlocalAddr := addressEndpoint.AddressWithPrefix().Address\naddressEndpoint.DecRef()\naddressEndpoint = nil\n- igmp.ep.addIPHeader(localAddr, destAddress, pkt, stack.NetworkHeaderParams{\n+ if err := igmp.ep.addIPHeader(localAddr, destAddress, pkt, stack.NetworkHeaderParams{\nProtocol: header.IGMPProtocolNumber,\nTTL: header.IGMPTTL,\nTOS: stack.DefaultTOS,\n}, header.IPv4OptionsSerializer{\n&header.IPv4SerializableRouterAlertOption{},\n- })\n+ }); err != nil {\n+ panic(fmt.Sprintf(\"failed to add IP header: %s\", err))\n+ }\nsentStats := igmp.ep.protocol.stack.Stats().IGMP.PacketsSent\nif err := igmp.ep.nic.WritePacketToRemote(header.EthernetAddressFromMulticastIPv4Address(destAddress), nil /* gso */, ProtocolNumber, pkt); err != nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv4/ipv4.go", "new_path": "pkg/tcpip/network/ipv4/ipv4.go", "diff": "@@ -237,7 +237,7 @@ func (e *endpoint) NetworkProtocolNumber() tcpip.NetworkProtocolNumber {\nreturn e.protocol.Number()\n}\n-func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt *stack.PacketBuffer, params stack.NetworkHeaderParams, options header.IPv4OptionsSerializer) {\n+func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt *stack.PacketBuffer, params stack.NetworkHeaderParams, options header.IPv4OptionsSerializer) *tcpip.Error {\nhdrLen := header.IPv4MinimumSize\nvar optLen int\nif options != nil {\n@@ -245,19 +245,19 @@ func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt *stack.Packet\n}\nhdrLen += optLen\nif hdrLen > header.IPv4MaximumHeaderSize {\n- // Since we have no way to report an error we must either panic or create\n- // a packet which is different to what was requested. Choose panic as this\n- // would be a programming error that should be caught in testing.\n- panic(fmt.Sprintf(\"IPv4 Options %d bytes, Max %d\", optLen, header.IPv4MaximumOptionsSize))\n+ return tcpip.ErrMessageTooLong\n}\nip := header.IPv4(pkt.NetworkHeader().Push(hdrLen))\n- length := uint16(pkt.Size())\n+ length := pkt.Size()\n+ if length > math.MaxUint16 {\n+ return tcpip.ErrMessageTooLong\n+ }\n// RFC 6864 section 4.3 mandates uniqueness of ID values for non-atomic\n// datagrams. Since the DF bit is never being set here, all datagrams\n// are non-atomic and need an ID.\nid := atomic.AddUint32(&e.protocol.ids[hashRoute(srcAddr, dstAddr, params.Protocol, e.protocol.hashIV)%buckets], 1)\nip.Encode(&header.IPv4Fields{\n- TotalLength: length,\n+ TotalLength: uint16(length),\nID: uint16(id),\nTTL: params.TTL,\nTOS: params.TOS,\n@@ -268,6 +268,7 @@ func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt *stack.Packet\n})\nip.SetChecksum(^ip.CalculateChecksum())\npkt.NetworkProtocolNumber = ProtocolNumber\n+ return nil\n}\n// handleFragments fragments pkt and calls the handler function on each\n@@ -295,7 +296,9 @@ func (e *endpoint) handleFragments(r *stack.Route, gso *stack.GSO, networkMTU ui\n// WritePacket writes a packet to the given destination address and protocol.\nfunc (e *endpoint) WritePacket(r *stack.Route, gso *stack.GSO, params stack.NetworkHeaderParams, pkt *stack.PacketBuffer) *tcpip.Error {\n- e.addIPHeader(r.LocalAddress, r.RemoteAddress, pkt, params, nil /* options */)\n+ if err := e.addIPHeader(r.LocalAddress, r.RemoteAddress, pkt, params, nil /* options */); err != nil {\n+ return err\n+ }\n// iptables filtering. All packets that reach here are locally\n// generated.\n@@ -383,7 +386,10 @@ func (e *endpoint) WritePackets(r *stack.Route, gso *stack.GSO, pkts stack.Packe\n}\nfor pkt := pkts.Front(); pkt != nil; pkt = pkt.Next() {\n- e.addIPHeader(r.LocalAddress, r.RemoteAddress, pkt, params, nil /* options */)\n+ if err := e.addIPHeader(r.LocalAddress, r.RemoteAddress, pkt, params, nil /* options */); err != nil {\n+ return 0, err\n+ }\n+\nnetworkMTU, err := calculateNetworkMTU(e.nic.MTU(), uint32(pkt.NetworkHeader().View().Size()))\nif err != nil {\nr.Stats().IP.OutgoingPacketErrors.IncrementBy(uint64(pkts.Len()))\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/ipv6.go", "new_path": "pkg/tcpip/network/ipv6/ipv6.go", "diff": "@@ -553,11 +553,11 @@ func (e *endpoint) MaxHeaderLength() uint16 {\nreturn e.nic.MaxHeaderLength() + header.IPv6MinimumSize\n}\n-func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt *stack.PacketBuffer, params stack.NetworkHeaderParams, extensionHeaders header.IPv6ExtHdrSerializer) {\n+func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt *stack.PacketBuffer, params stack.NetworkHeaderParams, extensionHeaders header.IPv6ExtHdrSerializer) *tcpip.Error {\nextHdrsLen := extensionHeaders.Length()\nlength := pkt.Size() + extensionHeaders.Length()\nif length > math.MaxUint16 {\n- panic(fmt.Sprintf(\"IPv6 payload too large: %d, must be <= %d\", length, math.MaxUint16))\n+ return tcpip.ErrMessageTooLong\n}\nip := header.IPv6(pkt.NetworkHeader().Push(header.IPv6MinimumSize + extHdrsLen))\nip.Encode(&header.IPv6Fields{\n@@ -570,6 +570,7 @@ func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt *stack.Packet\nExtensionHeaders: extensionHeaders,\n})\npkt.NetworkProtocolNumber = ProtocolNumber\n+ return nil\n}\nfunc packetMustBeFragmented(pkt *stack.PacketBuffer, networkMTU uint32, gso *stack.GSO) bool {\n@@ -622,7 +623,9 @@ func (e *endpoint) handleFragments(r *stack.Route, gso *stack.GSO, networkMTU ui\n// WritePacket writes a packet to the given destination address and protocol.\nfunc (e *endpoint) WritePacket(r *stack.Route, gso *stack.GSO, params stack.NetworkHeaderParams, pkt *stack.PacketBuffer) *tcpip.Error {\n- e.addIPHeader(r.LocalAddress, r.RemoteAddress, pkt, params, nil /* extensionHeaders */)\n+ if err := e.addIPHeader(r.LocalAddress, r.RemoteAddress, pkt, params, nil /* extensionHeaders */); err != nil {\n+ return err\n+ }\n// iptables filtering. All packets that reach here are locally\n// generated.\n@@ -711,7 +714,9 @@ func (e *endpoint) WritePackets(r *stack.Route, gso *stack.GSO, pkts stack.Packe\nlinkMTU := e.nic.MTU()\nfor pb := pkts.Front(); pb != nil; pb = pb.Next() {\n- e.addIPHeader(r.LocalAddress, r.RemoteAddress, pb, params, nil /* extensionHeaders */)\n+ if err := e.addIPHeader(r.LocalAddress, r.RemoteAddress, pb, params, nil /* extensionHeaders */); err != nil {\n+ return 0, err\n+ }\nnetworkMTU, err := calculateNetworkMTU(linkMTU, uint32(pb.NetworkHeader().View().Size()))\nif err != nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/mld.go", "new_path": "pkg/tcpip/network/ipv6/mld.go", "diff": "@@ -249,10 +249,12 @@ func (mld *mldState) writePacket(destAddress, groupAddress tcpip.Address, mldTyp\nData: buffer.View(icmp).ToVectorisedView(),\n})\n- mld.ep.addIPHeader(localAddress, destAddress, pkt, stack.NetworkHeaderParams{\n+ if err := mld.ep.addIPHeader(localAddress, destAddress, pkt, stack.NetworkHeaderParams{\nProtocol: header.ICMPv6ProtocolNumber,\nTTL: header.MLDHopLimit,\n- }, extensionHeaders)\n+ }, extensionHeaders); err != nil {\n+ panic(fmt.Sprintf(\"failed to add IP header: %s\", err))\n+ }\nif err := mld.ep.nic.WritePacketToRemote(header.EthernetAddressFromMulticastIPv6Address(destAddress), nil /* gso */, ProtocolNumber, pkt); err != nil {\nsentStats.Dropped.Increment()\nreturn false, err\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/ndp.go", "new_path": "pkg/tcpip/network/ipv6/ndp.go", "diff": "@@ -732,10 +732,12 @@ func (ndp *ndpState) sendDADPacket(addr tcpip.Address, addressEndpoint stack.Add\n})\nsent := ndp.ep.protocol.stack.Stats().ICMP.V6.PacketsSent\n- ndp.ep.addIPHeader(header.IPv6Any, snmc, pkt, stack.NetworkHeaderParams{\n+ if err := ndp.ep.addIPHeader(header.IPv6Any, snmc, pkt, stack.NetworkHeaderParams{\nProtocol: header.ICMPv6ProtocolNumber,\nTTL: header.NDPHopLimit,\n- }, nil /* extensionHeaders */)\n+ }, nil /* extensionHeaders */); err != nil {\n+ panic(fmt.Sprintf(\"failed to add IP header: %s\", err))\n+ }\nif err := ndp.ep.nic.WritePacketToRemote(header.EthernetAddressFromMulticastIPv6Address(snmc), nil /* gso */, ProtocolNumber, pkt); err != nil {\nsent.Dropped.Increment()\n@@ -1854,11 +1856,12 @@ func (ndp *ndpState) startSolicitingRouters() {\n})\nsent := ndp.ep.protocol.stack.Stats().ICMP.V6.PacketsSent\n- ndp.ep.addIPHeader(localAddr, header.IPv6AllRoutersMulticastAddress, pkt, stack.NetworkHeaderParams{\n+ if err := ndp.ep.addIPHeader(localAddr, header.IPv6AllRoutersMulticastAddress, pkt, stack.NetworkHeaderParams{\nProtocol: header.ICMPv6ProtocolNumber,\nTTL: header.NDPHopLimit,\n- }, nil /* extensionHeaders */)\n-\n+ }, nil /* extensionHeaders */); err != nil {\n+ panic(fmt.Sprintf(\"failed to add IP header: %s\", err))\n+ }\nif err := ndp.ep.nic.WritePacketToRemote(header.EthernetAddressFromMulticastIPv6Address(header.IPv6AllRoutersMulticastAddress), nil /* gso */, ProtocolNumber, pkt); err != nil {\nsent.Dropped.Increment()\nlog.Printf(\"startSolicitingRouters: error writing NDP router solicit message on NIC(%d); err = %s\", ndp.ep.nic.ID(), err)\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/packet_socket_raw.cc", "new_path": "test/syscalls/linux/packet_socket_raw.cc", "diff": "@@ -678,6 +678,58 @@ TEST_P(RawPacketTest, GetSocketAcceptConn) {\nINSTANTIATE_TEST_SUITE_P(AllInetTests, RawPacketTest,\n::testing::Values(ETH_P_IP, ETH_P_ALL));\n+class RawPacketMsgSizeTest : public ::testing::TestWithParam<TestAddress> {};\n+\n+TEST_P(RawPacketMsgSizeTest, SendTooLong) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ TestAddress addr = GetParam().WithPort(kPort);\n+\n+ FileDescriptor udp_sock =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(addr.family(), SOCK_RAW, IPPROTO_UDP));\n+\n+ ASSERT_THAT(\n+ connect(udp_sock.get(), reinterpret_cast<struct sockaddr*>(&addr.addr),\n+ addr.addr_len),\n+ SyscallSucceeds());\n+\n+ const char buf[65536] = {};\n+ ASSERT_THAT(send(udp_sock.get(), buf, sizeof(buf), 0),\n+ SyscallFailsWithErrno(EMSGSIZE));\n+}\n+\n+TEST_P(RawPacketMsgSizeTest, SpliceTooLong) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ const char buf[65536] = {};\n+ int fds[2];\n+ ASSERT_THAT(pipe(fds), SyscallSucceeds());\n+ ASSERT_THAT(write(fds[1], buf, sizeof(buf)),\n+ SyscallSucceedsWithValue(sizeof(buf)));\n+\n+ TestAddress addr = GetParam().WithPort(kPort);\n+\n+ FileDescriptor udp_sock =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(addr.family(), SOCK_RAW, IPPROTO_UDP));\n+\n+ ASSERT_THAT(\n+ connect(udp_sock.get(), reinterpret_cast<struct sockaddr*>(&addr.addr),\n+ addr.addr_len),\n+ SyscallSucceeds());\n+\n+ ssize_t n = splice(fds[0], nullptr, udp_sock.get(), nullptr, sizeof(buf), 0);\n+ if (IsRunningOnGvisor()) {\n+ EXPECT_THAT(n, SyscallFailsWithErrno(EMSGSIZE));\n+ } else {\n+ // TODO(gvisor.dev/issue/138): Linux sends out multiple UDP datagrams, each\n+ // of the size of a page.\n+ EXPECT_THAT(n, SyscallSucceedsWithValue(sizeof(buf)));\n+ }\n+}\n+\n+INSTANTIATE_TEST_SUITE_P(AllRawPacketMsgSizeTest, RawPacketMsgSizeTest,\n+ ::testing::Values(V4Loopback(), V6Loopback()));\n+\n} // namespace\n} // namespace testing\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_test_util.cc", "new_path": "test/syscalls/linux/socket_test_util.cc", "diff": "@@ -791,6 +791,19 @@ void RecvNoData(int sock) {\nSyscallFailsWithErrno(EAGAIN));\n}\n+TestAddress TestAddress::WithPort(uint16_t port) const {\n+ TestAddress addr = *this;\n+ switch (addr.family()) {\n+ case AF_INET:\n+ reinterpret_cast<sockaddr_in*>(&addr.addr)->sin_port = htons(port);\n+ break;\n+ case AF_INET6:\n+ reinterpret_cast<sockaddr_in6*>(&addr.addr)->sin6_port = htons(port);\n+ break;\n+ }\n+ return addr;\n+}\n+\nTestAddress V4Any() {\nTestAddress t(\"V4Any\");\nt.addr.ss_family = AF_INET;\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_test_util.h", "new_path": "test/syscalls/linux/socket_test_util.h", "diff": "@@ -486,9 +486,14 @@ struct TestAddress {\nsockaddr_storage addr;\nsocklen_t addr_len;\n- int family() const { return addr.ss_family; }\nexplicit TestAddress(std::string description = \"\")\n: description(std::move(description)), addr(), addr_len() {}\n+\n+ int family() const { return addr.ss_family; }\n+\n+ // Returns a new TestAddress with specified port. If port is not supported,\n+ // the same TestAddress is returned.\n+ TestAddress WithPort(uint16_t port) const;\n};\nconstexpr char kMulticastAddress[] = \"224.0.2.1\";\n" } ]
Go
Apache License 2.0
google/gvisor
Correctly return EMSGSIZE when packet is too big in raw socket. IPv4 previously accepts the packet, while IPv6 panics. Neither is the behavior in Linux. splice() in Linux has different behavior than in gVisor. This change documents it in the SpliceTooLong test. Reported-by: [email protected] PiperOrigin-RevId: 352091286
259,907
15.01.2021 15:17:23
28,800
f37ace6661dfed8acae7e22ed0eb9ad78bdeab34
[rack] Retransmit the probe segment after the probe timer expires. This change implements TLP details enumerated in Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/rack.go", "new_path": "pkg/tcpip/transport/tcp/rack.go", "diff": "@@ -197,18 +197,42 @@ func (s *sender) probeTimerExpired() *tcpip.Error {\nif !s.rc.probeTimer.checkExpiration() {\nreturn nil\n}\n- // TODO(gvisor.dev/issue/5084): Implement this pseudo algorithm.\n- // If an unsent segment exists AND\n- // the receive window allows new data to be sent:\n- // Transmit the lowest-sequence unsent segment of up to SMSS\n- // Increment FlightSize by the size of the newly-sent segment\n- // Else if TLPRxtOut is not set:\n- // Retransmit the highest-sequence segment sent so far\n- // TLPRxtOut = true\n- // TLPHighRxt = SND.NXT\n- // The cwnd remains unchanged\n- // If FlightSize != 0:\n- // Arm RTO timer only.\n+\n+ var dataSent bool\n+ if s.writeNext != nil && s.writeNext.xmitCount == 0 && s.outstanding < s.sndCwnd {\n+ dataSent = s.maybeSendSegment(s.writeNext, int(s.ep.scoreboard.SMSS()), s.sndUna.Add(s.sndWnd))\n+ if dataSent {\n+ s.outstanding += s.pCount(s.writeNext, s.maxPayloadSize)\n+ s.writeNext = s.writeNext.Next()\n+ }\n+ }\n+\n+ if !dataSent && !s.rc.tlpRxtOut {\n+ var highestSeqXmit *segment\n+ for highestSeqXmit = s.writeList.Front(); highestSeqXmit != nil; highestSeqXmit = highestSeqXmit.Next() {\n+ if highestSeqXmit.xmitCount == 0 {\n+ // Nothing in writeList is transmitted, no need to send a probe.\n+ highestSeqXmit = nil\n+ break\n+ }\n+ if highestSeqXmit.Next() == nil || highestSeqXmit.Next().xmitCount == 0 {\n+ // Either everything in writeList has been transmitted or the next\n+ // sequence has not been transmitted. Either way this is the highest\n+ // sequence segment that was transmitted.\n+ break\n+ }\n+ }\n+\n+ if highestSeqXmit != nil {\n+ dataSent = s.maybeSendSegment(highestSeqXmit, int(s.ep.scoreboard.SMSS()), s.sndUna.Add(s.sndWnd))\n+ if dataSent {\n+ s.rc.tlpRxtOut = true\n+ s.rc.tlpHighRxt = s.sndNxt\n+ }\n+ }\n+ }\n+\n+ s.postXmit(dataSent)\nreturn nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
[rack] Retransmit the probe segment after the probe timer expires. This change implements TLP details enumerated in https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.5.2. Fixes #5084 PiperOrigin-RevId: 352093473
260,004
15.01.2021 18:12:50
28,800
2814a032be7b34e4cc0c0607dba8030e74e11208
Support GetLinkAddress with neighborCache Test: integration_test.TestGetLinkAddress
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/pipe/pipe.go", "new_path": "pkg/tcpip/link/pipe/pipe.go", "diff": "@@ -55,7 +55,22 @@ func (e *Endpoint) WritePacket(r stack.RouteInfo, _ *stack.GSO, proto tcpip.Netw\n// remote address from the perspective of the other end of the pipe\n// (e.linked). Similarly, the remote address from the perspective of this\n// endpoint is the local address on the other end.\n- e.linked.dispatcher.DeliverNetworkPacket(r.LocalLinkAddress /* remote */, r.RemoteLinkAddress /* local */, proto, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ //\n+ // Deliver the packet in a new goroutine to escape this goroutine's stack and\n+ // avoid a deadlock when a packet triggers a response which leads the stack to\n+ // try and take a lock it already holds.\n+ //\n+ // As of writing, a deadlock may occur when performing link resolution as the\n+ // neighbor table will send a solicitation while holding a lock and the\n+ // response advertisement will be sent in the same stack that sent the\n+ // solictation. When the response is received, the stack attempts to take the\n+ // same lock it already took before sending the solicitation, leading to a\n+ // deadlock. Basically, we attempt to lock the same lock twice in the same\n+ // call stack.\n+ //\n+ // TODO(gvisor.dev/issue/5289): don't use a new goroutine once we support send\n+ // and receive queues.\n+ go e.linked.dispatcher.DeliverNetworkPacket(r.LocalLinkAddress /* remote */, r.RemoteLinkAddress /* local */, proto, stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: buffer.NewVectorisedView(pkt.Size(), pkt.Views()),\n}))\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/nic.go", "new_path": "pkg/tcpip/stack/nic.go", "diff": "@@ -547,6 +547,15 @@ func (n *NIC) removeAddress(addr tcpip.Address) *tcpip.Error {\nreturn tcpip.ErrBadLocalAddress\n}\n+func (n *NIC) getNeighborLinkAddress(addr, localAddr tcpip.Address, linkRes LinkAddressResolver, onResolve func(tcpip.LinkAddress, bool)) (tcpip.LinkAddress, <-chan struct{}, *tcpip.Error) {\n+ if n.neigh != nil {\n+ entry, ch, err := n.neigh.entry(addr, localAddr, linkRes, onResolve)\n+ return entry.LinkAddr, ch, err\n+ }\n+\n+ return n.stack.linkAddrCache.get(tcpip.FullAddress{NIC: n.ID(), Addr: addr}, linkRes, localAddr, n, onResolve)\n+}\n+\nfunc (n *NIC) neighbors() ([]NeighborEntry, *tcpip.Error) {\nif n.neigh == nil {\nreturn nil, tcpip.ErrNotSupported\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/registration.go", "new_path": "pkg/tcpip/stack/registration.go", "diff": "@@ -835,27 +835,6 @@ type LinkAddressCache interface {\n// AddLinkAddress adds a link address to the cache.\nAddLinkAddress(nicID tcpip.NICID, addr tcpip.Address, linkAddr tcpip.LinkAddress)\n-\n- // GetLinkAddress finds the link address corresponding to the remote address\n- // (e.g. IP -> MAC).\n- //\n- // Returns a link address for the remote address, if readily available.\n- //\n- // Returns ErrWouldBlock if the link address is not readily available, along\n- // with a notification channel for the caller to block on. Triggers address\n- // resolution asynchronously.\n- //\n- // If onResolve is provided, it will be called either immediately, if\n- // resolution is not required, or when address resolution is complete, with\n- // the resolved link address and whether resolution succeeded. After any\n- // callbacks have been called, the returned notification channel is closed.\n- //\n- // If specified, the local address must be an address local to the interface\n- // the neighbor cache belongs to. The local address is the source address of\n- // a packet prompting NUD/link address resolution.\n- //\n- // TODO(gvisor.dev/issue/5151): Don't return the link address.\n- GetLinkAddress(nicID tcpip.NICID, addr, localAddr tcpip.Address, protocol tcpip.NetworkProtocolNumber, onResolve func(tcpip.LinkAddress, bool)) (tcpip.LinkAddress, <-chan struct{}, *tcpip.Error)\n}\n// RawFactory produces endpoints for writing various types of raw packets.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/route.go", "new_path": "pkg/tcpip/stack/route.go", "diff": "@@ -51,10 +51,6 @@ type Route struct {\n// outgoingNIC is the interface this route uses to write packets.\noutgoingNIC *NIC\n- // linkCache is set if link address resolution is enabled for this protocol on\n- // the route's NIC.\n- linkCache LinkAddressCache\n-\n// linkRes is set if link address resolution is enabled for this protocol on\n// the route's NIC.\nlinkRes LinkAddressResolver\n@@ -191,7 +187,6 @@ func makeRouteInner(netProto tcpip.NetworkProtocolNumber, localAddr, remoteAddr\nif r.outgoingNIC.LinkEndpoint.Capabilities()&CapabilityResolutionRequired != 0 {\nif linkRes, ok := r.outgoingNIC.stack.linkAddrResolvers[r.NetProto]; ok {\nr.linkRes = linkRes\n- r.linkCache = r.outgoingNIC.stack\n}\n}\n@@ -338,20 +333,9 @@ func (r *Route) Resolve(afterResolve func()) (<-chan struct{}, *tcpip.Error) {\nr.Release()\n}\n- if neigh := r.outgoingNIC.neigh; neigh != nil {\n- _, ch, err := neigh.entry(nextAddr, linkAddressResolutionRequestLocalAddr, r.linkRes, finishResolution)\n- if err != nil {\n+ _, ch, err := r.outgoingNIC.getNeighborLinkAddress(nextAddr, linkAddressResolutionRequestLocalAddr, r.linkRes, finishResolution)\nreturn ch, err\n}\n- return nil, nil\n- }\n-\n- _, ch, err := r.linkCache.GetLinkAddress(r.outgoingNIC.ID(), nextAddr, linkAddressResolutionRequestLocalAddr, r.NetProto, finishResolution)\n- if err != nil {\n- return ch, err\n- }\n- return nil, nil\n-}\n// local returns true if the route is a local route.\nfunc (r *Route) local() bool {\n@@ -373,7 +357,7 @@ func (r *Route) isResolutionRequiredRLocked() bool {\nreturn false\n}\n- return (r.outgoingNIC.neigh != nil && r.linkRes != nil) || r.linkCache != nil\n+ return r.linkRes != nil\n}\nfunc (r *Route) isValidForOutgoing() bool {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/stack.go", "new_path": "pkg/tcpip/stack/stack.go", "diff": "@@ -1518,19 +1518,41 @@ func (s *Stack) AddLinkAddress(nicID tcpip.NICID, addr tcpip.Address, linkAddr t\n// that AddLinkAddress for a particular address has been called.\n}\n-// GetLinkAddress implements LinkAddressCache.GetLinkAddress.\n+// GetLinkAddress finds the link address corresponding to the remote address.\n+//\n+// Returns a link address for the remote address, if readily available.\n+//\n+// Returns ErrNotSupported if the stack is not configured with a link address\n+// resolver for the specified network protocol.\n+//\n+// Returns ErrWouldBlock if the link address is not readily available, along\n+// with a notification channel for the caller to block on. Triggers address\n+// resolution asynchronously.\n+//\n+// If onResolve is provided, it will be called either immediately, if\n+// resolution is not required, or when address resolution is complete, with\n+// the resolved link address and whether resolution succeeded. After any\n+// callbacks have been called, the returned notification channel is closed.\n+//\n+// If specified, the local address must be an address local to the interface\n+// the neighbor cache belongs to. The local address is the source address of\n+// a packet prompting NUD/link address resolution.\n+//\n+// TODO(gvisor.dev/issue/5151): Don't return the link address.\nfunc (s *Stack) GetLinkAddress(nicID tcpip.NICID, addr, localAddr tcpip.Address, protocol tcpip.NetworkProtocolNumber, onResolve func(tcpip.LinkAddress, bool)) (tcpip.LinkAddress, <-chan struct{}, *tcpip.Error) {\ns.mu.RLock()\n- nic := s.nics[nicID]\n- if nic == nil {\n+ nic, ok := s.nics[nicID]\ns.mu.RUnlock()\n+ if !ok {\nreturn \"\", nil, tcpip.ErrUnknownNICID\n}\n- s.mu.RUnlock()\n- fullAddr := tcpip.FullAddress{NIC: nicID, Addr: addr}\n- linkRes := s.linkAddrResolvers[protocol]\n- return s.linkAddrCache.get(fullAddr, linkRes, localAddr, nic, onResolve)\n+ linkRes, ok := s.linkAddrResolvers[protocol]\n+ if !ok {\n+ return \"\", nil, tcpip.ErrNotSupported\n+ }\n+\n+ return nic.getNeighborLinkAddress(addr, localAddr, linkRes, onResolve)\n}\n// Neighbors returns all IP to MAC address associations.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/stack_test.go", "new_path": "pkg/tcpip/stack/stack_test.go", "diff": "@@ -4357,3 +4357,24 @@ func TestClearNeighborCacheOnNICDisable(t *testing.T) {\nt.Fatalf(\"got len(neighbors) = %d, want = 0; neighbors = %#v\", len(neighbors), neighbors)\n}\n}\n+\n+func TestGetLinkAddressErrors(t *testing.T) {\n+ const (\n+ nicID = 1\n+ unknownNICID = nicID + 1\n+ )\n+\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},\n+ })\n+ if err := s.CreateNIC(nicID, channel.New(0, 0, \"\")); err != nil {\n+ t.Fatalf(\"CreateNIC(%d, _) = %s\", nicID, err)\n+ }\n+\n+ if addr, _, err := s.GetLinkAddress(unknownNICID, \"\", \"\", ipv4.ProtocolNumber, nil); err != tcpip.ErrUnknownNICID {\n+ t.Errorf(\"got s.GetLinkAddress(%d, '', '', %d, nil) = (%s, _, %s), want = (_, _, %s)\", unknownNICID, ipv4.ProtocolNumber, addr, err, tcpip.ErrUnknownNICID)\n+ }\n+ if addr, _, err := s.GetLinkAddress(nicID, \"\", \"\", ipv4.ProtocolNumber, nil); err != tcpip.ErrNotSupported {\n+ t.Errorf(\"got s.GetLinkAddress(%d, '', '', %d, nil) = (%s, _, %s), want = (_, _, %s)\", unknownNICID, ipv4.ProtocolNumber, addr, err, tcpip.ErrNotSupported)\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tests/integration/link_resolution_test.go", "new_path": "pkg/tcpip/tests/integration/link_resolution_test.go", "diff": "@@ -16,6 +16,7 @@ package integration_test\nimport (\n\"bytes\"\n+ \"fmt\"\n\"net\"\n\"testing\"\n@@ -395,3 +396,63 @@ func TestTCPLinkResolutionFailure(t *testing.T) {\n})\n}\n}\n+\n+func TestGetLinkAddress(t *testing.T) {\n+ const (\n+ host1NICID = 1\n+ host2NICID = 4\n+ )\n+\n+ tests := []struct {\n+ name string\n+ netProto tcpip.NetworkProtocolNumber\n+ remoteAddr tcpip.Address\n+ expectedLinkAddr bool\n+ }{\n+ {\n+ name: \"IPv4\",\n+ netProto: ipv4.ProtocolNumber,\n+ remoteAddr: ipv4Addr2.AddressWithPrefix.Address,\n+ },\n+ {\n+ name: \"IPv6\",\n+ netProto: ipv6.ProtocolNumber,\n+ remoteAddr: ipv6Addr2.AddressWithPrefix.Address,\n+ },\n+ }\n+\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\n+ for _, useNeighborCache := range []bool{true, false} {\n+ t.Run(fmt.Sprintf(\"UseNeighborCache=%t\", useNeighborCache), func(t *testing.T) {\n+ stackOpts := stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol, ipv6.NewProtocol},\n+ UseNeighborCache: useNeighborCache,\n+ }\n+\n+ host1Stack, _ := setupStack(t, stackOpts, host1NICID, host2NICID)\n+\n+ for i := 0; i < 2; i++ {\n+ addr, ch, err := host1Stack.GetLinkAddress(host1NICID, test.remoteAddr, \"\", test.netProto, func(tcpip.LinkAddress, bool) {})\n+ var want *tcpip.Error\n+ if i == 0 {\n+ want = tcpip.ErrWouldBlock\n+ }\n+ if err != want {\n+ t.Fatalf(\"got host1Stack.GetLinkAddress(%d, %s, '', %d, _) = (%s, _, %s), want = (_, _, %s)\", host1NICID, test.remoteAddr, test.netProto, addr, err, want)\n+ }\n+\n+ if i == 0 {\n+ <-ch\n+ continue\n+ }\n+\n+ if addr != linkAddr2 {\n+ t.Fatalf(\"got addr = %s, want = %s\", addr, linkAddr2)\n+ }\n+ }\n+ })\n+ }\n+ })\n+ }\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Support GetLinkAddress with neighborCache Test: integration_test.TestGetLinkAddress PiperOrigin-RevId: 352119404
260,004
19.01.2021 12:06:08
28,800
a2ec1932c9b76d80d3af93aa74acf945829cd582
Drop CheckLocalAddress from LinkAddressCache
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/arp/arp.go", "new_path": "pkg/tcpip/network/arp/arp.go", "diff": "@@ -138,16 +138,6 @@ func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {\nstats.RequestsReceived.Increment()\nlocalAddr := tcpip.Address(h.ProtocolAddressTarget())\n- if e.nud == nil {\n- if e.linkAddrCache.CheckLocalAddress(e.nic.ID(), header.IPv4ProtocolNumber, localAddr) == 0 {\n- stats.RequestsReceivedUnknownTargetAddress.Increment()\n- return // we have no useful answer, ignore the request\n- }\n-\n- addr := tcpip.Address(h.ProtocolAddressSender())\n- linkAddr := tcpip.LinkAddress(h.HardwareAddressSender())\n- e.linkAddrCache.AddLinkAddress(e.nic.ID(), addr, linkAddr)\n- } else {\nif e.protocol.stack.CheckLocalAddress(e.nic.ID(), header.IPv4ProtocolNumber, localAddr) == 0 {\nstats.RequestsReceivedUnknownTargetAddress.Increment()\nreturn // we have no useful answer, ignore the request\n@@ -155,6 +145,10 @@ func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {\nremoteAddr := tcpip.Address(h.ProtocolAddressSender())\nremoteLinkAddr := tcpip.LinkAddress(h.HardwareAddressSender())\n+\n+ if e.nud == nil {\n+ e.linkAddrCache.AddLinkAddress(e.nic.ID(), remoteAddr, remoteLinkAddr)\n+ } else {\ne.nud.HandleProbe(remoteAddr, ProtocolNumber, remoteLinkAddr, e.protocol)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/icmp_test.go", "new_path": "pkg/tcpip/network/ipv6/icmp_test.go", "diff": "@@ -91,16 +91,11 @@ func (*stubDispatcher) DeliverTransportPacket(tcpip.TransportProtocolNumber, *st\nreturn stack.TransportPacketHandled\n}\n-type stubLinkAddressCache struct {\n- stack.LinkAddressCache\n-}\n+var _ stack.LinkAddressCache = (*stubLinkAddressCache)(nil)\n-func (*stubLinkAddressCache) CheckLocalAddress(tcpip.NICID, tcpip.NetworkProtocolNumber, tcpip.Address) tcpip.NICID {\n- return 0\n-}\n+type stubLinkAddressCache struct{}\n-func (*stubLinkAddressCache) AddLinkAddress(tcpip.NICID, tcpip.Address, tcpip.LinkAddress) {\n-}\n+func (*stubLinkAddressCache) AddLinkAddress(tcpip.NICID, tcpip.Address, tcpip.LinkAddress) {}\ntype stubNUDHandler struct {\nprobeCount int\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/registration.go", "new_path": "pkg/tcpip/stack/registration.go", "diff": "@@ -829,10 +829,6 @@ type LinkAddressResolver interface {\n// A LinkAddressCache caches link addresses.\ntype LinkAddressCache interface {\n- // CheckLocalAddress determines if the given local address exists, and if it\n- // does not exist.\n- CheckLocalAddress(nicID tcpip.NICID, protocol tcpip.NetworkProtocolNumber, addr tcpip.Address) tcpip.NICID\n-\n// AddLinkAddress adds a link address to the cache.\nAddLinkAddress(nicID tcpip.NICID, addr tcpip.Address, linkAddr tcpip.LinkAddress)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Drop CheckLocalAddress from LinkAddressCache PiperOrigin-RevId: 352623277
259,891
19.01.2021 12:10:01
28,800
833ba3590b422d453012e5b2ec2e780211d9caf9
Ensure that IP{V6}_RECVORIGDSTADDR yields the post-NAT address and port.
[ { "change_type": "MODIFY", "old_path": "test/iptables/BUILD", "new_path": "test/iptables/BUILD", "diff": "@@ -15,7 +15,9 @@ go_library(\n],\nvisibility = [\"//test/iptables:__subpackages__\"],\ndeps = [\n+ \"//pkg/binary\",\n\"//pkg/test/testutil\",\n+ \"//pkg/usermem\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "test/iptables/iptables_test.go", "new_path": "test/iptables/iptables_test.go", "diff": "@@ -424,3 +424,11 @@ func TestNATPreOriginalDst(t *testing.T) {\nfunc TestNATOutOriginalDst(t *testing.T) {\nsingleTest(t, NATOutOriginalDst{})\n}\n+\n+func TestNATPreRECVORIGDSTADDR(t *testing.T) {\n+ singleTest(t, NATPreRECVORIGDSTADDR{})\n+}\n+\n+func TestNATOutRECVORIGDSTADDR(t *testing.T) {\n+ singleTest(t, NATOutRECVORIGDSTADDR{})\n+}\n" }, { "change_type": "MODIFY", "old_path": "test/iptables/nat.go", "new_path": "test/iptables/nat.go", "diff": "@@ -20,6 +20,9 @@ import (\n\"fmt\"\n\"net\"\n\"syscall\"\n+\n+ \"gvisor.dev/gvisor/pkg/binary\"\n+ \"gvisor.dev/gvisor/pkg/usermem\"\n)\nconst redirectPort = 42\n@@ -43,6 +46,8 @@ func init() {\nRegisterTestCase(NATLoopbackSkipsPrerouting{})\nRegisterTestCase(NATPreOriginalDst{})\nRegisterTestCase(NATOutOriginalDst{})\n+ RegisterTestCase(NATPreRECVORIGDSTADDR{})\n+ RegisterTestCase(NATOutRECVORIGDSTADDR{})\n}\n// NATPreRedirectUDPPort tests that packets are redirected to different port.\n@@ -538,9 +543,9 @@ func (NATOutOriginalDst) LocalAction(ctx context.Context, ip net.IP, ipv6 bool)\n}\nfunc listenForRedirectedConn(ctx context.Context, ipv6 bool, originalDsts []net.IP) error {\n- // The net package doesn't give guarantee access to the connection's\n+ // The net package doesn't give guaranteed access to the connection's\n// underlying FD, and thus we cannot call getsockopt. We have to use\n- // traditional syscalls for SO_ORIGINAL_DST.\n+ // traditional syscalls.\n// Create the listening socket, bind, listen, and accept.\nfamily := syscall.AF_INET\n@@ -609,36 +614,14 @@ func listenForRedirectedConn(ctx context.Context, ipv6 bool, originalDsts []net.\nif err != nil {\nreturn err\n}\n- // The original destination could be any of our IPs.\n- for _, dst := range originalDsts {\n- want := syscall.RawSockaddrInet6{\n- Family: syscall.AF_INET6,\n- Port: htons(dropPort),\n- }\n- copy(want.Addr[:], dst.To16())\n- if got == want {\n- return nil\n- }\n- }\n- return fmt.Errorf(\"SO_ORIGINAL_DST returned %+v, but wanted one of %+v (note: port numbers are in network byte order)\", got, originalDsts)\n+ return addrMatches6(got, originalDsts, dropPort)\n}\ngot, err := originalDestination4(connFD)\nif err != nil {\nreturn err\n}\n- // The original destination could be any of our IPs.\n- for _, dst := range originalDsts {\n- want := syscall.RawSockaddrInet4{\n- Family: syscall.AF_INET,\n- Port: htons(dropPort),\n- }\n- copy(want.Addr[:], dst.To4())\n- if got == want {\n- return nil\n- }\n- }\n- return fmt.Errorf(\"SO_ORIGINAL_DST returned %+v, but wanted one of %+v (note: port numbers are in network byte order)\", got, originalDsts)\n+ return addrMatches4(got, originalDsts, dropPort)\n}\n// loopbackTests runs an iptables rule and ensures that packets sent to\n@@ -662,3 +645,233 @@ func loopbackTest(ctx context.Context, ipv6 bool, dest net.IP, args ...string) e\nreturn err\n}\n}\n+\n+// NATPreRECVORIGDSTADDR tests that IP{V6}_RECVORIGDSTADDR gets the post-NAT\n+// address on the PREROUTING chain.\n+type NATPreRECVORIGDSTADDR struct{ containerCase }\n+\n+// Name implements TestCase.Name.\n+func (NATPreRECVORIGDSTADDR) Name() string {\n+ return \"NATPreRECVORIGDSTADDR\"\n+}\n+\n+// ContainerAction implements TestCase.ContainerAction.\n+func (NATPreRECVORIGDSTADDR) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {\n+ if err := natTable(ipv6, \"-A\", \"PREROUTING\", \"-p\", \"udp\", \"-j\", \"REDIRECT\", \"--to-ports\", fmt.Sprintf(\"%d\", redirectPort)); err != nil {\n+ return err\n+ }\n+\n+ if err := recvWithRECVORIGDSTADDR(ctx, ipv6, nil, redirectPort); err != nil {\n+ return err\n+ }\n+\n+ return nil\n+}\n+\n+// LocalAction implements TestCase.LocalAction.\n+func (NATPreRECVORIGDSTADDR) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {\n+ return sendUDPLoop(ctx, ip, acceptPort)\n+}\n+\n+// NATOutRECVORIGDSTADDR tests that IP{V6}_RECVORIGDSTADDR gets the post-NAT\n+// address on the OUTPUT chain.\n+type NATOutRECVORIGDSTADDR struct{ containerCase }\n+\n+// Name implements TestCase.Name.\n+func (NATOutRECVORIGDSTADDR) Name() string {\n+ return \"NATOutRECVORIGDSTADDR\"\n+}\n+\n+// ContainerAction implements TestCase.ContainerAction.\n+func (NATOutRECVORIGDSTADDR) ContainerAction(ctx context.Context, ip net.IP, ipv6 bool) error {\n+ if err := natTable(ipv6, \"-A\", \"OUTPUT\", \"-p\", \"udp\", \"-j\", \"REDIRECT\", \"--to-ports\", fmt.Sprintf(\"%d\", redirectPort)); err != nil {\n+ return err\n+ }\n+\n+ sendCh := make(chan error)\n+ go func() {\n+ // Packets will be sent to a non-container IP and redirected\n+ // back to the container.\n+ sendCh <- sendUDPLoop(ctx, ip, acceptPort)\n+ }()\n+\n+ expectedIP := &net.IP{127, 0, 0, 1}\n+ if ipv6 {\n+ expectedIP = &net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}\n+ }\n+ if err := recvWithRECVORIGDSTADDR(ctx, ipv6, expectedIP, redirectPort); err != nil {\n+ return err\n+ }\n+\n+ select {\n+ case err := <-sendCh:\n+ return err\n+ default:\n+ return nil\n+ }\n+}\n+\n+// LocalAction implements TestCase.LocalAction.\n+func (NATOutRECVORIGDSTADDR) LocalAction(ctx context.Context, ip net.IP, ipv6 bool) error {\n+ // No-op.\n+ return nil\n+}\n+\n+func recvWithRECVORIGDSTADDR(ctx context.Context, ipv6 bool, expectedDst *net.IP, port uint16) error {\n+ // The net package doesn't give guaranteed access to a connection's\n+ // underlying FD, and thus we cannot call getsockopt. We have to use\n+ // traditional syscalls for IP_RECVORIGDSTADDR.\n+\n+ // Create the listening socket.\n+ var (\n+ family = syscall.AF_INET\n+ level = syscall.SOL_IP\n+ option = syscall.IP_RECVORIGDSTADDR\n+ bindAddr syscall.Sockaddr = &syscall.SockaddrInet4{\n+ Port: int(port),\n+ Addr: [4]byte{0, 0, 0, 0}, // INADDR_ANY\n+ }\n+ )\n+ if ipv6 {\n+ family = syscall.AF_INET6\n+ level = syscall.SOL_IPV6\n+ option = 74 // IPV6_RECVORIGDSTADDR, which is missing from the syscall package.\n+ bindAddr = &syscall.SockaddrInet6{\n+ Port: int(port),\n+ Addr: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, // in6addr_any\n+ }\n+ }\n+ sockfd, err := syscall.Socket(family, syscall.SOCK_DGRAM, 0)\n+ if err != nil {\n+ return fmt.Errorf(\"failed Socket(%d, %d, 0): %w\", family, syscall.SOCK_DGRAM, err)\n+ }\n+ defer syscall.Close(sockfd)\n+\n+ if err := syscall.Bind(sockfd, bindAddr); err != nil {\n+ return fmt.Errorf(\"failed Bind(%d, %+v): %v\", sockfd, bindAddr, err)\n+ }\n+\n+ // Enable IP_RECVORIGDSTADDR.\n+ if err := syscall.SetsockoptInt(sockfd, level, option, 1); err != nil {\n+ return fmt.Errorf(\"failed SetsockoptByte(%d, %d, %d, 1): %v\", sockfd, level, option, err)\n+ }\n+\n+ addrCh := make(chan interface{})\n+ errCh := make(chan error)\n+ go func() {\n+ var addr interface{}\n+ var err error\n+ if ipv6 {\n+ addr, err = recvOrigDstAddr6(sockfd)\n+ } else {\n+ addr, err = recvOrigDstAddr4(sockfd)\n+ }\n+ if err != nil {\n+ errCh <- err\n+ } else {\n+ addrCh <- addr\n+ }\n+ }()\n+\n+ // Wait to receive a packet.\n+ var addr interface{}\n+ select {\n+ case <-ctx.Done():\n+ return ctx.Err()\n+ case err := <-errCh:\n+ return err\n+ case addr = <-addrCh:\n+ }\n+\n+ // Get a list of local IPs to verify that the packet now appears to have\n+ // been sent to us.\n+ var localAddrs []net.IP\n+ if expectedDst != nil {\n+ localAddrs = []net.IP{*expectedDst}\n+ } else {\n+ localAddrs, err = getInterfaceAddrs(ipv6)\n+ if err != nil {\n+ return fmt.Errorf(\"failed to get local interfaces: %w\", err)\n+ }\n+ }\n+\n+ // Verify that the address has the post-NAT port and address.\n+ if ipv6 {\n+ return addrMatches6(addr.(syscall.RawSockaddrInet6), localAddrs, redirectPort)\n+ }\n+ return addrMatches4(addr.(syscall.RawSockaddrInet4), localAddrs, redirectPort)\n+}\n+\n+func recvOrigDstAddr4(sockfd int) (syscall.RawSockaddrInet4, error) {\n+ buf, err := recvOrigDstAddr(sockfd, syscall.SOL_IP, syscall.SizeofSockaddrInet4)\n+ if err != nil {\n+ return syscall.RawSockaddrInet4{}, err\n+ }\n+ var addr syscall.RawSockaddrInet4\n+ binary.Unmarshal(buf, usermem.ByteOrder, &addr)\n+ return addr, nil\n+}\n+\n+func recvOrigDstAddr6(sockfd int) (syscall.RawSockaddrInet6, error) {\n+ buf, err := recvOrigDstAddr(sockfd, syscall.SOL_IP, syscall.SizeofSockaddrInet6)\n+ if err != nil {\n+ return syscall.RawSockaddrInet6{}, err\n+ }\n+ var addr syscall.RawSockaddrInet6\n+ binary.Unmarshal(buf, usermem.ByteOrder, &addr)\n+ return addr, nil\n+}\n+\n+func recvOrigDstAddr(sockfd int, level uintptr, addrSize int) ([]byte, error) {\n+ buf := make([]byte, 64)\n+ oob := make([]byte, syscall.CmsgSpace(addrSize))\n+ for {\n+ _, oobn, _, _, err := syscall.Recvmsg(\n+ sockfd,\n+ buf, // Message buffer.\n+ oob, // Out-of-band buffer.\n+ 0) // Flags.\n+ if errors.Is(err, syscall.EINTR) {\n+ continue\n+ }\n+ if err != nil {\n+ return nil, fmt.Errorf(\"failed when calling Recvmsg: %w\", err)\n+ }\n+ oob = oob[:oobn]\n+\n+ // Parse out the control message.\n+ msgs, err := syscall.ParseSocketControlMessage(oob)\n+ if err != nil {\n+ return nil, fmt.Errorf(\"failed to parse control message: %w\", err)\n+ }\n+ return msgs[0].Data, nil\n+ }\n+}\n+\n+func addrMatches4(got syscall.RawSockaddrInet4, wantAddrs []net.IP, port uint16) error {\n+ for _, wantAddr := range wantAddrs {\n+ want := syscall.RawSockaddrInet4{\n+ Family: syscall.AF_INET,\n+ Port: htons(port),\n+ }\n+ copy(want.Addr[:], wantAddr.To4())\n+ if got == want {\n+ return nil\n+ }\n+ }\n+ return fmt.Errorf(\"got %+v, but wanted one of %+v (note: port numbers are in network byte order)\", got, wantAddrs)\n+}\n+\n+func addrMatches6(got syscall.RawSockaddrInet6, wantAddrs []net.IP, port uint16) error {\n+ for _, wantAddr := range wantAddrs {\n+ want := syscall.RawSockaddrInet6{\n+ Family: syscall.AF_INET6,\n+ Port: htons(port),\n+ }\n+ copy(want.Addr[:], wantAddr.To16())\n+ if got == want {\n+ return nil\n+ }\n+ }\n+ return fmt.Errorf(\"got %+v, but wanted one of %+v (note: port numbers are in network byte order)\", got, wantAddrs)\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Ensure that IP{V6}_RECVORIGDSTADDR yields the post-NAT address and port. PiperOrigin-RevId: 352624174
260,004
19.01.2021 16:54:48
28,800
7ff5ceaeae66303ed6a2199963c00cb08b2fe7ca
Do not have a stack-wide linkAddressCache Link addresses are cached on a per NIC basis so instead of having a single cache that includes the NIC ID for neighbor entry lookups, use a single cache per NIC.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/arp/arp.go", "new_path": "pkg/tcpip/network/arp/arp.go", "diff": "@@ -147,7 +147,7 @@ func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {\nremoteLinkAddr := tcpip.LinkAddress(h.HardwareAddressSender())\nif e.nud == nil {\n- e.linkAddrCache.AddLinkAddress(e.nic.ID(), remoteAddr, remoteLinkAddr)\n+ e.linkAddrCache.AddLinkAddress(remoteAddr, remoteLinkAddr)\n} else {\ne.nud.HandleProbe(remoteAddr, ProtocolNumber, remoteLinkAddr, e.protocol)\n}\n@@ -191,7 +191,7 @@ func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {\nlinkAddr := tcpip.LinkAddress(h.HardwareAddressSender())\nif e.nud == nil {\n- e.linkAddrCache.AddLinkAddress(e.nic.ID(), addr, linkAddr)\n+ e.linkAddrCache.AddLinkAddress(addr, linkAddr)\nreturn\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/icmp.go", "new_path": "pkg/tcpip/network/ipv6/icmp.go", "diff": "@@ -290,7 +290,7 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool) {\n} else if e.nud != nil {\ne.nud.HandleProbe(srcAddr, header.IPv6ProtocolNumber, sourceLinkAddr, e.protocol)\n} else {\n- e.linkAddrCache.AddLinkAddress(e.nic.ID(), srcAddr, sourceLinkAddr)\n+ e.linkAddrCache.AddLinkAddress(srcAddr, sourceLinkAddr)\n}\n// As per RFC 4861 section 7.1.1:\n@@ -445,7 +445,7 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool) {\n// address cache with the link address for the target of the message.\nif e.nud == nil {\nif len(targetLinkAddr) != 0 {\n- e.linkAddrCache.AddLinkAddress(e.nic.ID(), targetAddr, targetLinkAddr)\n+ e.linkAddrCache.AddLinkAddress(targetAddr, targetLinkAddr)\n}\nreturn\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/icmp_test.go", "new_path": "pkg/tcpip/network/ipv6/icmp_test.go", "diff": "@@ -95,7 +95,7 @@ var _ stack.LinkAddressCache = (*stubLinkAddressCache)(nil)\ntype stubLinkAddressCache struct{}\n-func (*stubLinkAddressCache) AddLinkAddress(tcpip.NICID, tcpip.Address, tcpip.LinkAddress) {}\n+func (*stubLinkAddressCache) AddLinkAddress(tcpip.Address, tcpip.LinkAddress) {}\ntype stubNUDHandler struct {\nprobeCount int\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/forwarding_test.go", "new_path": "pkg/tcpip/stack/forwarding_test.go", "diff": "@@ -368,10 +368,6 @@ func fwdTestNetFactory(t *testing.T, proto *fwdTestNetworkProtocol, useNeighborC\nUseNeighborCache: useNeighborCache,\n})\n- if !useNeighborCache {\n- proto.addrCache = s.linkAddrCache\n- }\n-\n// Enable forwarding.\ns.SetForwarding(proto.Number(), true)\n@@ -401,13 +397,15 @@ func fwdTestNetFactory(t *testing.T, proto *fwdTestNetworkProtocol, useNeighborC\nt.Fatal(\"AddAddress #2 failed:\", err)\n}\n- if useNeighborCache {\n- // Control the neighbor cache for NIC 2.\nnic, ok := s.nics[2]\nif !ok {\n- t.Fatal(\"failed to get the neighbor cache for NIC 2\")\n+ t.Fatal(\"NIC 2 does not exist\")\n}\n+ if useNeighborCache {\n+ // Control the neighbor cache for NIC 2.\nproto.neigh = nic.neigh\n+ } else {\n+ proto.addrCache = nic.linkAddrCache\n}\n// Route all packets to NIC 2.\n@@ -493,7 +491,7 @@ func TestForwardingWithFakeResolver(t *testing.T) {\naddrResolveDelay: 500 * time.Millisecond,\nonLinkAddressResolved: func(cache *linkAddrCache, neigh *neighborCache, addr tcpip.Address, _ tcpip.LinkAddress) {\n// Any address will be resolved to the link address \"c\".\n- cache.add(tcpip.FullAddress{NIC: 2, Addr: addr}, \"c\")\n+ cache.AddLinkAddress(addr, \"c\")\n},\n},\n},\n@@ -619,7 +617,7 @@ func TestForwardingWithFakeResolverPartialTimeout(t *testing.T) {\n// Only packets to address 3 will be resolved to the\n// link address \"c\".\nif addr == \"\\x03\" {\n- cache.add(tcpip.FullAddress{NIC: 2, Addr: addr}, \"c\")\n+ cache.AddLinkAddress(addr, \"c\")\n}\n},\n},\n@@ -704,7 +702,7 @@ func TestForwardingWithFakeResolverTwoPackets(t *testing.T) {\naddrResolveDelay: 500 * time.Millisecond,\nonLinkAddressResolved: func(cache *linkAddrCache, neigh *neighborCache, addr tcpip.Address, _ tcpip.LinkAddress) {\n// Any packets will be resolved to the link address \"c\".\n- cache.add(tcpip.FullAddress{NIC: 2, Addr: addr}, \"c\")\n+ cache.AddLinkAddress(addr, \"c\")\n},\n},\n},\n@@ -780,7 +778,7 @@ func TestForwardingWithFakeResolverManyPackets(t *testing.T) {\naddrResolveDelay: 500 * time.Millisecond,\nonLinkAddressResolved: func(cache *linkAddrCache, neigh *neighborCache, addr tcpip.Address, _ tcpip.LinkAddress) {\n// Any packets will be resolved to the link address \"c\".\n- cache.add(tcpip.FullAddress{NIC: 2, Addr: addr}, \"c\")\n+ cache.AddLinkAddress(addr, \"c\")\n},\n},\n},\n@@ -870,7 +868,7 @@ func TestForwardingWithFakeResolverManyResolutions(t *testing.T) {\naddrResolveDelay: 500 * time.Millisecond,\nonLinkAddressResolved: func(cache *linkAddrCache, neigh *neighborCache, addr tcpip.Address, _ tcpip.LinkAddress) {\n// Any packets will be resolved to the link address \"c\".\n- cache.add(tcpip.FullAddress{NIC: 2, Addr: addr}, \"c\")\n+ cache.AddLinkAddress(addr, \"c\")\n},\n},\n},\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/linkaddrcache.go", "new_path": "pkg/tcpip/stack/linkaddrcache.go", "diff": "@@ -24,6 +24,8 @@ import (\nconst linkAddrCacheSize = 512 // max cache entries\n+var _ LinkAddressCache = (*linkAddrCache)(nil)\n+\n// linkAddrCache is a fixed-sized cache mapping IP addresses to link addresses.\n//\n// The entries are stored in a ring buffer, oldest entry replaced first.\n@@ -43,7 +45,7 @@ type linkAddrCache struct {\ncache struct {\nsync.Mutex\n- table map[tcpip.FullAddress]*linkAddrEntry\n+ table map[tcpip.Address]*linkAddrEntry\nlru linkAddrEntryList\n}\n}\n@@ -81,7 +83,7 @@ type linkAddrEntry struct {\n// mu protects the fields below.\nmu sync.RWMutex\n- addr tcpip.FullAddress\n+ addr tcpip.Address\nlinkAddr tcpip.LinkAddress\nexpiration time.Time\ns entryState\n@@ -125,7 +127,7 @@ func (e *linkAddrEntry) changeStateLocked(ns entryState, expiration time.Time) {\n}\n// add adds a k -> v mapping to the cache.\n-func (c *linkAddrCache) add(k tcpip.FullAddress, v tcpip.LinkAddress) {\n+func (c *linkAddrCache) AddLinkAddress(k tcpip.Address, v tcpip.LinkAddress) {\n// Calculate expiration time before acquiring the lock, since expiration is\n// relative to the time when information was learned, rather than when it\n// happened to be inserted into the cache.\n@@ -150,7 +152,7 @@ func (c *linkAddrCache) add(k tcpip.FullAddress, v tcpip.LinkAddress) {\n// reset to state incomplete, and returned. If no matching entry exists and the\n// cache is not full, a new entry with state incomplete is allocated and\n// returned.\n-func (c *linkAddrCache) getOrCreateEntryLocked(k tcpip.FullAddress) *linkAddrEntry {\n+func (c *linkAddrCache) getOrCreateEntryLocked(k tcpip.Address) *linkAddrEntry {\nif entry, ok := c.cache.table[k]; ok {\nc.cache.lru.Remove(entry)\nc.cache.lru.PushFront(entry)\n@@ -181,7 +183,7 @@ func (c *linkAddrCache) getOrCreateEntryLocked(k tcpip.FullAddress) *linkAddrEnt\n}\n// get reports any known link address for k.\n-func (c *linkAddrCache) get(k tcpip.FullAddress, linkRes LinkAddressResolver, localAddr tcpip.Address, nic NetworkInterface, onResolve func(tcpip.LinkAddress, bool)) (tcpip.LinkAddress, <-chan struct{}, *tcpip.Error) {\n+func (c *linkAddrCache) get(k tcpip.Address, linkRes LinkAddressResolver, localAddr tcpip.Address, nic NetworkInterface, onResolve func(tcpip.LinkAddress, bool)) (tcpip.LinkAddress, <-chan struct{}, *tcpip.Error) {\nc.cache.Lock()\ndefer c.cache.Unlock()\nentry := c.getOrCreateEntryLocked(k)\n@@ -214,11 +216,11 @@ func (c *linkAddrCache) get(k tcpip.FullAddress, linkRes LinkAddressResolver, lo\n}\n}\n-func (c *linkAddrCache) startAddressResolution(k tcpip.FullAddress, linkRes LinkAddressResolver, localAddr tcpip.Address, nic NetworkInterface, done <-chan struct{}) {\n+func (c *linkAddrCache) startAddressResolution(k tcpip.Address, linkRes LinkAddressResolver, localAddr tcpip.Address, nic NetworkInterface, done <-chan struct{}) {\nfor i := 0; ; i++ {\n// Send link request, then wait for the timeout limit and check\n// whether the request succeeded.\n- linkRes.LinkAddressRequest(k.Addr, localAddr, \"\" /* linkAddr */, nic)\n+ linkRes.LinkAddressRequest(k, localAddr, \"\" /* linkAddr */, nic)\nselect {\ncase now := <-time.After(c.resolutionTimeout):\n@@ -234,7 +236,7 @@ func (c *linkAddrCache) startAddressResolution(k tcpip.FullAddress, linkRes Link\n// checkLinkRequest checks whether previous attempt to resolve address has\n// succeeded and mark the entry accordingly. Returns true if request can stop,\n// false if another request should be sent.\n-func (c *linkAddrCache) checkLinkRequest(now time.Time, k tcpip.FullAddress, attempt int) bool {\n+func (c *linkAddrCache) checkLinkRequest(now time.Time, k tcpip.Address, attempt int) bool {\nc.cache.Lock()\ndefer c.cache.Unlock()\nentry, ok := c.cache.table[k]\n@@ -268,6 +270,6 @@ func newLinkAddrCache(ageLimit, resolutionTimeout time.Duration, resolutionAttem\nresolutionTimeout: resolutionTimeout,\nresolutionAttempts: resolutionAttempts,\n}\n- c.cache.table = make(map[tcpip.FullAddress]*linkAddrEntry, linkAddrCacheSize)\n+ c.cache.table = make(map[tcpip.Address]*linkAddrEntry, linkAddrCacheSize)\nreturn c\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/linkaddrcache_test.go", "new_path": "pkg/tcpip/stack/linkaddrcache_test.go", "diff": "@@ -26,7 +26,7 @@ import (\n)\ntype testaddr struct {\n- addr tcpip.FullAddress\n+ addr tcpip.Address\nlinkAddr tcpip.LinkAddress\n}\n@@ -35,7 +35,7 @@ var testAddrs = func() []testaddr {\nfor i := 0; i < 4*linkAddrCacheSize; i++ {\naddr := fmt.Sprintf(\"Addr%06d\", i)\naddrs = append(addrs, testaddr{\n- addr: tcpip.FullAddress{NIC: 1, Addr: tcpip.Address(addr)},\n+ addr: tcpip.Address(addr),\nlinkAddr: tcpip.LinkAddress(\"Link\" + addr),\n})\n}\n@@ -59,8 +59,8 @@ func (r *testLinkAddressResolver) LinkAddressRequest(targetAddr, _ tcpip.Address\nfunc (r *testLinkAddressResolver) fakeRequest(addr tcpip.Address) {\nfor _, ta := range testAddrs {\n- if ta.addr.Addr == addr {\n- r.cache.add(ta.addr, ta.linkAddr)\n+ if ta.addr == addr {\n+ r.cache.AddLinkAddress(ta.addr, ta.linkAddr)\nbreak\n}\n}\n@@ -77,7 +77,7 @@ func (*testLinkAddressResolver) LinkAddressProtocol() tcpip.NetworkProtocolNumbe\nreturn 1\n}\n-func getBlocking(c *linkAddrCache, addr tcpip.FullAddress, linkRes LinkAddressResolver) (tcpip.LinkAddress, *tcpip.Error) {\n+func getBlocking(c *linkAddrCache, addr tcpip.Address, linkRes LinkAddressResolver) (tcpip.LinkAddress, *tcpip.Error) {\nvar attemptedResolution bool\nfor {\ngot, ch, err := c.get(addr, linkRes, \"\", nil, nil)\n@@ -97,13 +97,13 @@ func TestCacheOverflow(t *testing.T) {\nc := newLinkAddrCache(1<<63-1, 1*time.Second, 3)\nfor i := len(testAddrs) - 1; i >= 0; i-- {\ne := testAddrs[i]\n- c.add(e.addr, e.linkAddr)\n+ c.AddLinkAddress(e.addr, e.linkAddr)\ngot, _, err := c.get(e.addr, nil, \"\", nil, nil)\nif err != nil {\n- t.Errorf(\"insert %d, c.get(%q)=%q, got error: %v\", i, string(e.addr.Addr), got, err)\n+ t.Errorf(\"insert %d, c.get(%s, nil, '', nil, nil): %s\", i, e.addr, err)\n}\nif got != e.linkAddr {\n- t.Errorf(\"insert %d, c.get(%q)=%q, want %q\", i, string(e.addr.Addr), got, e.linkAddr)\n+ t.Errorf(\"insert %d, got c.get(%s, nil, '', nil, nil) = %s, want = %s\", i, e.addr, got, e.linkAddr)\n}\n}\n// Expect to find at least half of the most recent entries.\n@@ -111,10 +111,10 @@ func TestCacheOverflow(t *testing.T) {\ne := testAddrs[i]\ngot, _, err := c.get(e.addr, nil, \"\", nil, nil)\nif err != nil {\n- t.Errorf(\"check %d, c.get(%q)=%q, got error: %v\", i, string(e.addr.Addr), got, err)\n+ t.Errorf(\"check %d, c.get(%s, nil, '', nil, nil): %s\", i, e.addr, err)\n}\nif got != e.linkAddr {\n- t.Errorf(\"check %d, c.get(%q)=%q, want %q\", i, string(e.addr.Addr), got, e.linkAddr)\n+ t.Errorf(\"check %d, got c.get(%s, nil, '', nil, nil) = %s, want = %s\", i, e.addr, got, e.linkAddr)\n}\n}\n// The earliest entries should no longer be in the cache.\n@@ -123,7 +123,7 @@ func TestCacheOverflow(t *testing.T) {\nfor i := len(testAddrs) - 1; i >= len(testAddrs)-linkAddrCacheSize; i-- {\ne := testAddrs[i]\nif entry, ok := c.cache.table[e.addr]; ok {\n- t.Errorf(\"unexpected entry at c.cache.table[%q]: %#v\", string(e.addr.Addr), entry)\n+ t.Errorf(\"unexpected entry at c.cache.table[%s]: %#v\", e.addr, entry)\n}\n}\n}\n@@ -137,7 +137,7 @@ func TestCacheConcurrent(t *testing.T) {\nwg.Add(1)\ngo func() {\nfor _, e := range testAddrs {\n- c.add(e.addr, e.linkAddr)\n+ c.AddLinkAddress(e.addr, e.linkAddr)\n}\nwg.Done()\n}()\n@@ -150,17 +150,17 @@ func TestCacheConcurrent(t *testing.T) {\ne := testAddrs[len(testAddrs)-1]\ngot, _, err := c.get(e.addr, linkRes, \"\", nil, nil)\nif err != nil {\n- t.Errorf(\"c.get(%q)=%q, got error: %v\", string(e.addr.Addr), got, err)\n+ t.Errorf(\"c.get(%s, _, '', nil, nil): %s\", e.addr, err)\n}\nif got != e.linkAddr {\n- t.Errorf(\"c.get(%q)=%q, want %q\", string(e.addr.Addr), got, e.linkAddr)\n+ t.Errorf(\"got c.get(%s, _, '', nil, nil) = %s, want = %s\", e.addr, got, e.linkAddr)\n}\ne = testAddrs[0]\nc.cache.Lock()\ndefer c.cache.Unlock()\nif entry, ok := c.cache.table[e.addr]; ok {\n- t.Errorf(\"unexpected entry at c.cache.table[%q]: %#v\", string(e.addr.Addr), entry)\n+ t.Errorf(\"unexpected entry at c.cache.table[%s]: %#v\", e.addr, entry)\n}\n}\n@@ -169,10 +169,10 @@ func TestCacheAgeLimit(t *testing.T) {\nlinkRes := &testLinkAddressResolver{cache: c}\ne := testAddrs[0]\n- c.add(e.addr, e.linkAddr)\n+ c.AddLinkAddress(e.addr, e.linkAddr)\ntime.Sleep(50 * time.Millisecond)\nif _, _, err := c.get(e.addr, linkRes, \"\", nil, nil); err != tcpip.ErrWouldBlock {\n- t.Errorf(\"got c.get(%q) = %s, want = ErrWouldBlock\", string(e.addr.Addr), err)\n+ t.Errorf(\"got c.get(%s, _, '', nil, nil) = %s, want = ErrWouldBlock\", e.addr, err)\n}\n}\n@@ -180,22 +180,22 @@ func TestCacheReplace(t *testing.T) {\nc := newLinkAddrCache(1<<63-1, 1*time.Second, 3)\ne := testAddrs[0]\nl2 := e.linkAddr + \"2\"\n- c.add(e.addr, e.linkAddr)\n+ c.AddLinkAddress(e.addr, e.linkAddr)\ngot, _, err := c.get(e.addr, nil, \"\", nil, nil)\nif err != nil {\n- t.Errorf(\"c.get(%q)=%q, got error: %v\", string(e.addr.Addr), got, err)\n+ t.Errorf(\"c.get(%s, nil, '', nil, nil): %s\", e.addr, err)\n}\nif got != e.linkAddr {\n- t.Errorf(\"c.get(%q)=%q, want %q\", string(e.addr.Addr), got, e.linkAddr)\n+ t.Errorf(\"got c.get(%s, nil, '', nil, nil) = %s, want = %s\", e.addr, got, e.linkAddr)\n}\n- c.add(e.addr, l2)\n+ c.AddLinkAddress(e.addr, l2)\ngot, _, err = c.get(e.addr, nil, \"\", nil, nil)\nif err != nil {\n- t.Errorf(\"c.get(%q)=%q, got error: %v\", string(e.addr.Addr), got, err)\n+ t.Errorf(\"c.get(%s, nil, '', nil, nil): %s\", e.addr, err)\n}\nif got != l2 {\n- t.Errorf(\"c.get(%q)=%q, want %q\", string(e.addr.Addr), got, l2)\n+ t.Errorf(\"got c.get(%s, nil, '', nil, nil) = %s, want = %s\", e.addr, got, l2)\n}\n}\n@@ -211,10 +211,10 @@ func TestCacheResolution(t *testing.T) {\nfor i, ta := range testAddrs {\ngot, err := getBlocking(c, ta.addr, linkRes)\nif err != nil {\n- t.Errorf(\"check %d, c.get(%q)=%q, got error: %v\", i, string(ta.addr.Addr), got, err)\n+ t.Errorf(\"check %d, getBlocking(_, %s, _): %s\", i, ta.addr, err)\n}\nif got != ta.linkAddr {\n- t.Errorf(\"check %d, c.get(%q)=%q, want %q\", i, string(ta.addr.Addr), got, ta.linkAddr)\n+ t.Errorf(\"check %d, got getBlocking(_, %s, _) = %s, want = %s\", i, ta.addr, got, ta.linkAddr)\n}\n}\n@@ -223,10 +223,10 @@ func TestCacheResolution(t *testing.T) {\ne := testAddrs[len(testAddrs)-1]\ngot, _, err := c.get(e.addr, linkRes, \"\", nil, nil)\nif err != nil {\n- t.Errorf(\"c.get(%q)=%q, got error: %v\", string(e.addr.Addr), got, err)\n+ t.Errorf(\"c.get(%s, _, '', nil, nil): %s\", e.addr, err)\n}\nif got != e.linkAddr {\n- t.Errorf(\"c.get(%q)=%q, want %q\", string(e.addr.Addr), got, e.linkAddr)\n+ t.Errorf(\"got c.get(%s, _, '', nil, nil) = %s, want = %s\", e.addr, got, e.linkAddr)\n}\n}\n}\n@@ -244,17 +244,17 @@ func TestCacheResolutionFailed(t *testing.T) {\ne := testAddrs[0]\ngot, err := getBlocking(c, e.addr, linkRes)\nif err != nil {\n- t.Errorf(\"c.get(%q)=%q, got error: %v\", string(e.addr.Addr), got, err)\n+ t.Errorf(\"getBlocking(_, %s, _): %s\", e.addr, err)\n}\nif got != e.linkAddr {\n- t.Errorf(\"c.get(%q)=%q, want %q\", string(e.addr.Addr), got, e.linkAddr)\n+ t.Errorf(\"got getBlocking(_, %s, _) = %s, want = %s\", e.addr, got, e.linkAddr)\n}\nbefore := atomic.LoadUint32(&requestCount)\n- e.addr.Addr += \"2\"\n+ e.addr += \"2\"\nif a, err := getBlocking(c, e.addr, linkRes); err != tcpip.ErrTimeout {\n- t.Errorf(\"got getBlocking(_, %#v, _) = (%s, %s), want = (_, %s)\", e.addr, a, err, tcpip.ErrTimeout)\n+ t.Errorf(\"got getBlocking(_, %s, _) = (%s, %s), want = (_, %s)\", e.addr, a, err, tcpip.ErrTimeout)\n}\nif got, want := int(atomic.LoadUint32(&requestCount)-before), c.resolutionAttempts; got != want {\n@@ -270,6 +270,6 @@ func TestCacheResolutionTimeout(t *testing.T) {\ne := testAddrs[0]\nif a, err := getBlocking(c, e.addr, linkRes); err != tcpip.ErrTimeout {\n- t.Errorf(\"got getBlocking(_, %#v, _) = (%s, %s), want = (_, %s)\", e.addr, a, err, tcpip.ErrTimeout)\n+ t.Errorf(\"got getBlocking(_, %s, _) = (%s, %s), want = (_, %s)\", e.addr, a, err, tcpip.ErrTimeout)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/ndp_test.go", "new_path": "pkg/tcpip/stack/ndp_test.go", "diff": "@@ -2808,6 +2808,7 @@ func stackAndNdpDispatcherWithDefaultRoute(t *testing.T, nicID tcpip.NICID, useN\nautoGenAddrC: make(chan ndpAutoGenAddrEvent, 1),\n}\ne := channel.New(0, 1280, linkAddr1)\n+ e.LinkEPCapabilities |= stack.CapabilityResolutionRequired\ns := stack.New(stack.Options{\nNetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{\nNDPConfigs: ipv6.NDPConfigurations{\n@@ -2827,10 +2828,15 @@ func stackAndNdpDispatcherWithDefaultRoute(t *testing.T, nicID tcpip.NICID, useN\nGateway: llAddr3,\nNIC: nicID,\n}})\n+\nif useNeighborCache {\n- s.AddStaticNeighbor(nicID, llAddr3, linkAddr3)\n+ if err := s.AddStaticNeighbor(nicID, llAddr3, linkAddr3); err != nil {\n+ t.Fatalf(\"s.AddStaticNeighbor(%d, %s, %s): %s\", nicID, llAddr3, linkAddr3, err)\n+ }\n} else {\n- s.AddLinkAddress(nicID, llAddr3, linkAddr3)\n+ if err := s.AddLinkAddress(nicID, llAddr3, linkAddr3); err != nil {\n+ t.Fatalf(\"s.AddLinkAddress(%d, %s, %s): %s\", nicID, llAddr3, linkAddr3, err)\n+ }\n}\nreturn ndpDisp, e, s\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/nic.go", "new_path": "pkg/tcpip/stack/nic.go", "diff": "@@ -53,6 +53,8 @@ type NIC struct {\n// complete.\nlinkResQueue packetsPendingLinkResolution\n+ linkAddrCache *linkAddrCache\n+\nmu struct {\nsync.RWMutex\nspoofing bool\n@@ -137,6 +139,7 @@ func newNIC(stack *Stack, id tcpip.NICID, name string, ep LinkEndpoint, ctx NICC\ncontext: ctx,\nstats: makeNICStats(),\nnetworkEndpoints: make(map[tcpip.NetworkProtocolNumber]NetworkEndpoint),\n+ linkAddrCache: newLinkAddrCache(ageLimit, resolutionTimeout, resolutionAttempts),\n}\nnic.linkResQueue.init()\nnic.mu.packetEPs = make(map[tcpip.NetworkProtocolNumber]*packetEndpointList)\n@@ -167,7 +170,7 @@ func newNIC(stack *Stack, id tcpip.NICID, name string, ep LinkEndpoint, ctx NICC\nfor _, netProto := range stack.networkProtocols {\nnetNum := netProto.Number()\nnic.mu.packetEPs[netNum] = new(packetEndpointList)\n- nic.networkEndpoints[netNum] = netProto.NewEndpoint(nic, stack, nud, nic)\n+ nic.networkEndpoints[netNum] = netProto.NewEndpoint(nic, nic.linkAddrCache, nud, nic)\n}\nnic.LinkEndpoint.Attach(nic)\n@@ -558,7 +561,7 @@ func (n *NIC) getNeighborLinkAddress(addr, localAddr tcpip.Address, linkRes Link\nreturn entry.LinkAddr, ch, err\n}\n- return n.stack.linkAddrCache.get(tcpip.FullAddress{NIC: n.ID(), Addr: addr}, linkRes, localAddr, n, onResolve)\n+ return n.linkAddrCache.get(addr, linkRes, localAddr, n, onResolve)\n}\nfunc (n *NIC) neighbors() ([]NeighborEntry, *tcpip.Error) {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/registration.go", "new_path": "pkg/tcpip/stack/registration.go", "diff": "@@ -850,7 +850,7 @@ type LinkAddressResolver interface {\n// A LinkAddressCache caches link addresses.\ntype LinkAddressCache interface {\n// AddLinkAddress adds a link address to the cache.\n- AddLinkAddress(nicID tcpip.NICID, addr tcpip.Address, linkAddr tcpip.LinkAddress)\n+ AddLinkAddress(addr tcpip.Address, linkAddr tcpip.LinkAddress)\n}\n// RawFactory produces endpoints for writing various types of raw packets.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/stack.go", "new_path": "pkg/tcpip/stack/stack.go", "diff": "@@ -382,8 +382,6 @@ type Stack struct {\nstats tcpip.Stats\n- linkAddrCache *linkAddrCache\n-\nmu sync.RWMutex\nnics map[tcpip.NICID]*NIC\n@@ -636,7 +634,6 @@ func New(opts Options) *Stack {\nlinkAddrResolvers: make(map[tcpip.NetworkProtocolNumber]LinkAddressResolver),\nnics: make(map[tcpip.NICID]*NIC),\ncleanupEndpoints: make(map[TransportEndpoint]struct{}),\n- linkAddrCache: newLinkAddrCache(ageLimit, resolutionTimeout, resolutionAttempts),\nPortManager: ports.NewPortManager(),\nclock: clock,\nstats: opts.Stats.FillIn(),\n@@ -1516,12 +1513,18 @@ func (s *Stack) SetSpoofing(nicID tcpip.NICID, enable bool) *tcpip.Error {\nreturn nil\n}\n-// AddLinkAddress adds a link address to the stack link cache.\n-func (s *Stack) AddLinkAddress(nicID tcpip.NICID, addr tcpip.Address, linkAddr tcpip.LinkAddress) {\n- fullAddr := tcpip.FullAddress{NIC: nicID, Addr: addr}\n- s.linkAddrCache.add(fullAddr, linkAddr)\n- // TODO: provide a way for a transport endpoint to receive a signal\n- // that AddLinkAddress for a particular address has been called.\n+// AddLinkAddress adds a link address for the neighbor on the specified NIC.\n+func (s *Stack) AddLinkAddress(nicID tcpip.NICID, neighbor tcpip.Address, linkAddr tcpip.LinkAddress) *tcpip.Error {\n+ s.mu.RLock()\n+ defer s.mu.RUnlock()\n+\n+ nic, ok := s.nics[nicID]\n+ if !ok {\n+ return tcpip.ErrUnknownNICID\n+ }\n+\n+ nic.linkAddrCache.AddLinkAddress(neighbor, linkAddr)\n+ return nil\n}\n// GetLinkAddress finds the link address corresponding to a neighbor's address.\n" } ]
Go
Apache License 2.0
google/gvisor
Do not have a stack-wide linkAddressCache Link addresses are cached on a per NIC basis so instead of having a single cache that includes the NIC ID for neighbor entry lookups, use a single cache per NIC. PiperOrigin-RevId: 352684111
259,885
20.01.2021 15:54:51
28,800
9af6150b5e36bcc731db6f4693fe8e9c5848a5e3
Remove string allocation from strings.Repeat() in /proc/[pid]/maps.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/mm/procfs.go", "new_path": "pkg/sentry/mm/procfs.go", "diff": "@@ -17,7 +17,6 @@ package mm\nimport (\n\"bytes\"\n\"fmt\"\n- \"strings\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile\"\n@@ -165,12 +164,12 @@ func (mm *MemoryManager) appendVMAMapsEntryLocked(ctx context.Context, vseg vmaI\n}\nif s != \"\" {\n// Per linux, we pad until the 74th character.\n- if pad := 73 - lineLen; pad > 0 {\n- b.WriteString(strings.Repeat(\" \", pad))\n+ for pad := 73 - lineLen; pad > 0; pad-- {\n+ b.WriteByte(' ')\n}\nb.WriteString(s)\n}\n- b.WriteString(\"\\n\")\n+ b.WriteByte('\\n')\n}\n// ReadSmapsDataInto is called by fsimpl/proc.smapsData.Generate to\n" } ]
Go
Apache License 2.0
google/gvisor
Remove string allocation from strings.Repeat() in /proc/[pid]/maps. PiperOrigin-RevId: 352894106
259,885
20.01.2021 16:46:18
28,800
ca9a45f282a08a88df19c93d5968d720b5153c41
Fix refcount increments in gofer.filesystem.Sync. Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "new_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "diff": "@@ -36,17 +36,27 @@ import (\n// Sync implements vfs.FilesystemImpl.Sync.\nfunc (fs *filesystem) Sync(ctx context.Context) error {\n// Snapshot current syncable dentries and special file FDs.\n+ fs.renameMu.RLock()\nfs.syncMu.Lock()\nds := make([]*dentry, 0, len(fs.syncableDentries))\nfor d := range fs.syncableDentries {\n+ // It's safe to use IncRef here even though fs.syncableDentries doesn't\n+ // hold references since we hold fs.renameMu. Note that we can't use\n+ // TryIncRef since cached dentries at zero references should still be\n+ // synced.\nd.IncRef()\nds = append(ds, d)\n}\n+ fs.renameMu.RUnlock()\nsffds := make([]*specialFileFD, 0, len(fs.specialFileFDs))\nfor sffd := range fs.specialFileFDs {\n- sffd.vfsfd.IncRef()\n+ // As above, fs.specialFileFDs doesn't hold references. However, unlike\n+ // dentries, an FD that has reached zero references can't be\n+ // resurrected, so we can use TryIncRef.\n+ if sffd.vfsfd.TryIncRef() {\nsffds = append(sffds, sffd)\n}\n+ }\nfs.syncMu.Unlock()\n// Return the first error we encounter, but sync everything we can\n" } ]
Go
Apache License 2.0
google/gvisor
Fix refcount increments in gofer.filesystem.Sync. Fixes #5263 PiperOrigin-RevId: 352903844
259,860
20.01.2021 16:56:58
28,800
a733a8142e42b5405cb9b0da41b051dfd2f03716
Fix typo boundry -> boundary.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_unix_cmsg.cc", "new_path": "test/syscalls/linux/socket_unix_cmsg.cc", "diff": "@@ -362,7 +362,7 @@ TEST_P(UnixSocketPairCmsgTest, BasicThreeFDPassTruncationMsgCtrunc) {\n// BasicFDPassUnalignedRecv starts off by sending a single FD just like\n// BasicFDPass. The difference is that when calling recvmsg, the length of the\n-// receive data is only aligned on a 4 byte boundry instead of the normal 8.\n+// receive data is only aligned on a 4 byte boundary instead of the normal 8.\nTEST_P(UnixSocketPairCmsgTest, BasicFDPassUnalignedRecv) {\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n" } ]
Go
Apache License 2.0
google/gvisor
Fix typo boundry -> boundary. PiperOrigin-RevId: 352905565
259,885
20.01.2021 17:14:04
28,800
a50bc8446c7f2167bf65588020924bd9c644d174
Don't use task goroutine context in fsimpl tests.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/proc/tasks_test.go", "new_path": "pkg/sentry/fsimpl/proc/tasks_test.go", "diff": "@@ -393,7 +393,7 @@ func TestProcSelf(t *testing.T) {\nt.Fatalf(\"CreateTask(): %v\", err)\n}\n- collector := s.WithTemporaryContext(task).ListDirents(&vfs.PathOperation{\n+ collector := s.WithTemporaryContext(task.AsyncContext()).ListDirents(&vfs.PathOperation{\nRoot: s.Root,\nStart: s.Root,\nPath: fspath.Parse(\"/proc/self/\"),\n@@ -491,11 +491,11 @@ func TestTree(t *testing.T) {\nt.Fatalf(\"CreateTask(): %v\", err)\n}\n// Add file to populate /proc/[pid]/fd and fdinfo directories.\n- task.FDTable().NewFDVFS2(task, 0, file, kernel.FDFlags{})\n+ task.FDTable().NewFDVFS2(task.AsyncContext(), 0, file, kernel.FDFlags{})\ntasks = append(tasks, task)\n}\n- ctx := tasks[0]\n+ ctx := tasks[0].AsyncContext()\nfd, err := s.VFS.OpenAt(\nctx,\nauth.CredentialsFromContext(s.Ctx),\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/verity/verity_test.go", "new_path": "pkg/sentry/fsimpl/verity/verity_test.go", "diff": "@@ -66,7 +66,7 @@ func dentryFromFD(t *testing.T, fd *vfs.FileDescription) *dentry {\n// newVerityRoot creates a new verity mount, and returns the root. The\n// underlying file system is tmpfs. If the error is not nil, then cleanup\n// should be called when the root is no longer needed.\n-func newVerityRoot(t *testing.T, hashAlg HashAlgorithm) (*vfs.VirtualFilesystem, vfs.VirtualDentry, *kernel.Task, error) {\n+func newVerityRoot(t *testing.T, hashAlg HashAlgorithm) (*vfs.VirtualFilesystem, vfs.VirtualDentry, context.Context, error) {\nt.Helper()\nk, err := testutil.Boot()\nif err != nil {\n@@ -119,7 +119,7 @@ func newVerityRoot(t *testing.T, hashAlg HashAlgorithm) (*vfs.VirtualFilesystem,\nroot.DecRef(ctx)\nmntns.DecRef(ctx)\n})\n- return vfsObj, root, task, nil\n+ return vfsObj, root, task.AsyncContext(), nil\n}\n// openVerityAt opens a verity file.\n" } ]
Go
Apache License 2.0
google/gvisor
Don't use task goroutine context in fsimpl tests. PiperOrigin-RevId: 352908368
259,896
20.01.2021 18:14:40
28,800
b0136253e6de393b2892e0fee8d85d636e82616c
Remove unimplemented message for SO_LINGER Removes the unimplemented message for SO_LINGER Fix the length for IP_PKTINFO option
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/hostinet/socket.go", "new_path": "pkg/sentry/socket/hostinet/socket.go", "diff": "@@ -377,10 +377,8 @@ func (s *socketOpsCommon) SetSockOpt(t *kernel.Task, level int, name int, opt []\nswitch level {\ncase linux.SOL_IP:\nswitch name {\n- case linux.IP_TOS, linux.IP_RECVTOS, linux.IP_RECVORIGDSTADDR, linux.IP_RECVERR:\n+ case linux.IP_TOS, linux.IP_RECVTOS, linux.IP_PKTINFO, linux.IP_RECVORIGDSTADDR, linux.IP_RECVERR:\noptlen = sizeofInt32\n- case linux.IP_PKTINFO:\n- optlen = linux.SizeOfControlMessageIPPacketInfo\n}\ncase linux.SOL_IPV6:\nswitch name {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netstack/netstack.go", "new_path": "pkg/sentry/socket/netstack/netstack.go", "diff": "@@ -1814,10 +1814,6 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam\nvar v linux.Linger\nbinary.Unmarshal(optVal[:linux.SizeOfLinger], usermem.ByteOrder, &v)\n- if v != (linux.Linger{}) {\n- socket.SetSockOptEmitUnimplementedEvent(t, name)\n- }\n-\nep.SocketOptions().SetLinger(tcpip.LingerOption{\nEnabled: v.OnOff != 0,\nTimeout: time.Second * time.Duration(v.Linger),\n" } ]
Go
Apache License 2.0
google/gvisor
Remove unimplemented message for SO_LINGER - Removes the unimplemented message for SO_LINGER - Fix the length for IP_PKTINFO option PiperOrigin-RevId: 352917611
259,884
20.01.2021 21:51:29
28,800
33b803e47f78206787bf0e53662534c5e428eb09
Add syscall docs back to menu Syscall docs were excluded from the navigation menu. This change adds them back.
[ { "change_type": "MODIFY", "old_path": "website/cmd/syscalldocs/main.go", "new_path": "website/cmd/syscalldocs/main.go", "diff": "@@ -52,6 +52,7 @@ layout: docs\ncategory: Compatibility\nweight: 50\npermalink: /docs/user_guide/compatibility/{{.OS}}/{{.Arch}}/\n+include_in_menu: True\n---\nThis table is a reference of {{.OS}} syscalls for the {{.Arch}} architecture and\n" } ]
Go
Apache License 2.0
google/gvisor
Add syscall docs back to menu Syscall docs were excluded from the navigation menu. This change adds them back. PiperOrigin-RevId: 352942238
259,860
20.01.2021 23:37:41
28,800
2c58af226476a47fa129cb4717738e36d44a4491
Update splice syscall documentation.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/linux64.go", "new_path": "pkg/sentry/syscalls/linux/linux64.go", "diff": "@@ -504,7 +504,7 @@ var ARM64 = &kernel.SyscallTable{\n73: syscalls.Supported(\"ppoll\", Ppoll),\n74: syscalls.PartiallySupported(\"signalfd4\", Signalfd4, \"Semantics are slightly different.\", []string{\"gvisor.dev/issue/139\"}),\n75: syscalls.ErrorWithEvent(\"vmsplice\", syserror.ENOSYS, \"\", []string{\"gvisor.dev/issue/138\"}), // TODO(b/29354098)\n- 76: syscalls.PartiallySupported(\"splice\", Splice, \"Stub implementation.\", []string{\"gvisor.dev/issue/138\"}), // TODO(b/29354098)\n+ 76: syscalls.Supported(\"splice\", Splice),\n77: syscalls.Supported(\"tee\", Tee),\n78: syscalls.Supported(\"readlinkat\", Readlinkat),\n79: syscalls.Supported(\"fstatat\", Fstatat),\n" } ]
Go
Apache License 2.0
google/gvisor
Update splice syscall documentation. PiperOrigin-RevId: 352954044
259,898
21.01.2021 00:42:26
28,800
60b07541dbfa3db29e9e21f554762cc192e14ab3
Syscall test for connecting to unspecified address For an active OPEN call with unspecified foreign socket, TCP MUST return error: foreign socket unspecified
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/tcp_socket.cc", "new_path": "test/syscalls/linux/tcp_socket.cc", "diff": "@@ -2008,6 +2008,29 @@ TEST_P(SimpleTcpSocketTest, GetSocketAcceptConnWithShutdown) {\nEXPECT_EQ(got, 0);\n}\n+// Tests that connecting to an unspecified address results in ECONNREFUSED.\n+TEST_P(SimpleTcpSocketTest, ConnectUnspecifiedAddress) {\n+ sockaddr_storage addr;\n+ socklen_t addrlen = sizeof(addr);\n+ memset(&addr, 0, addrlen);\n+ addr.ss_family = GetParam();\n+ auto do_connect = [&addr, addrlen]() {\n+ FileDescriptor s = ASSERT_NO_ERRNO_AND_VALUE(\n+ Socket(addr.ss_family, SOCK_STREAM, IPPROTO_TCP));\n+ ASSERT_THAT(\n+ RetryEINTR(connect)(s.get(), reinterpret_cast<struct sockaddr*>(&addr),\n+ addrlen),\n+ SyscallFailsWithErrno(ECONNREFUSED));\n+ };\n+ do_connect();\n+ // Test the v4 mapped address as well.\n+ if (GetParam() == AF_INET6) {\n+ auto sin6 = reinterpret_cast<struct sockaddr_in6*>(&addr);\n+ sin6->sin6_addr.s6_addr[10] = sin6->sin6_addr.s6_addr[11] = 0xff;\n+ do_connect();\n+ }\n+}\n+\nINSTANTIATE_TEST_SUITE_P(AllInetTests, SimpleTcpSocketTest,\n::testing::Values(AF_INET, AF_INET6));\n" } ]
Go
Apache License 2.0
google/gvisor
Syscall test for connecting to unspecified address For an active OPEN call with unspecified foreign socket, TCP MUST return error: foreign socket unspecified PiperOrigin-RevId: 352961691
259,992
21.01.2021 13:19:02
28,800
1005a8849173c84f12f4a86b32fb170ab9149082
Fix fsstress argument name Updates
[ { "change_type": "MODIFY", "old_path": "test/fsstress/fsstress_test.go", "new_path": "test/fsstress/fsstress_test.go", "diff": "@@ -41,7 +41,7 @@ func fsstress(t *testing.T, dir string) {\nimage = \"basic/fsstress\"\n)\nseed := strconv.FormatUint(uint64(rand.Uint32()), 10)\n- args := []string{\"-d\", dir, \"-n\", operations, \"-p\", processes, \"-seed\", seed, \"-X\"}\n+ args := []string{\"-d\", dir, \"-n\", operations, \"-p\", processes, \"-s\", seed, \"-X\"}\nt.Logf(\"Repro: docker run --rm --runtime=runsc %s %s\", image, strings.Join(args, \"\"))\nout, err := d.Run(ctx, dockerutil.RunOpts{Image: image}, args...)\nif err != nil {\n" } ]
Go
Apache License 2.0
google/gvisor
Fix fsstress argument name Updates #5273 PiperOrigin-RevId: 353087710
260,004
21.01.2021 14:08:20
28,800
0ca4cf769840afa7c44477d75d9f780540c01fcd
Populate EgressRoute, GSO, Netproto in NIC fdbased and qdisc layers expect these fields to already be populated before being reached.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/qdisc/fifo/endpoint.go", "new_path": "pkg/tcpip/link/qdisc/fifo/endpoint.go", "diff": "@@ -165,12 +165,15 @@ func (e *endpoint) WritePacket(r stack.RouteInfo, gso *stack.GSO, protocol tcpip\n}\n// WritePackets implements stack.LinkEndpoint.WritePackets.\n+//\n+// Being a batch API, each packet in pkts should have the following\n+// fields populated:\n+// - pkt.EgressRoute\n+// - pkt.GSOOptions\n+// - pkt.NetworkProtocolNumber\nfunc (e *endpoint) WritePackets(r stack.RouteInfo, gso *stack.GSO, pkts stack.PacketBufferList, protocol tcpip.NetworkProtocolNumber) (int, *tcpip.Error) {\nenqueued := 0\nfor pkt := pkts.Front(); pkt != nil; {\n- pkt.EgressRoute = r\n- pkt.GSOOptions = gso\n- pkt.NetworkProtocolNumber = protocol\nd := e.dispatchers[int(pkt.Hash)%len(e.dispatchers)]\nnxt := pkt.Next()\nif !d.q.enqueue(pkt) {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/nic.go", "new_path": "pkg/tcpip/stack/nic.go", "diff": "@@ -344,6 +344,9 @@ func (n *NIC) writePacket(r RouteInfo, gso *GSO, protocol tcpip.NetworkProtocolN\n// WritePacket takes ownership of pkt, calculate numBytes first.\nnumBytes := pkt.Size()\n+ pkt.EgressRoute = r\n+ pkt.GSOOptions = gso\n+ pkt.NetworkProtocolNumber = protocol\nif err := n.LinkEndpoint.WritePacket(r, gso, protocol, pkt); err != nil {\nreturn err\n}\n@@ -357,7 +360,14 @@ func (n *NIC) writePacket(r RouteInfo, gso *GSO, protocol tcpip.NetworkProtocolN\nfunc (n *NIC) WritePackets(r *Route, gso *GSO, pkts PacketBufferList, protocol tcpip.NetworkProtocolNumber) (int, *tcpip.Error) {\n// TODO(gvisor.dev/issue/4458): Queue packets whie link address resolution\n// is being peformed like WritePacket.\n- writtenPackets, err := n.LinkEndpoint.WritePackets(r.Fields(), gso, pkts, protocol)\n+ routeInfo := r.Fields()\n+ for pkt := pkts.Front(); pkt != nil; pkt = pkt.Next() {\n+ pkt.EgressRoute = routeInfo\n+ pkt.GSOOptions = gso\n+ pkt.NetworkProtocolNumber = protocol\n+ }\n+\n+ writtenPackets, err := n.LinkEndpoint.WritePackets(routeInfo, gso, pkts, protocol)\nn.stats.Tx.Packets.IncrementBy(uint64(writtenPackets))\nwrittenBytes := 0\nfor i, pb := 0, pkts.Front(); i < writtenPackets && pb != nil; i, pb = i+1, pb.Next() {\n" } ]
Go
Apache License 2.0
google/gvisor
Populate EgressRoute, GSO, Netproto in NIC fdbased and qdisc layers expect these fields to already be populated before being reached. PiperOrigin-RevId: 353099492
260,004
21.01.2021 14:51:24
28,800
89df5a681c004a3772facc08de73d930636227be
Queue packets in WritePackets when resolving link address Test: integration_test.TestWritePacketsLinkResolution Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/pipe/pipe.go", "new_path": "pkg/tcpip/link/pipe/pipe.go", "diff": "@@ -45,12 +45,7 @@ type Endpoint struct {\nlinkAddr tcpip.LinkAddress\n}\n-// WritePacket implements stack.LinkEndpoint.\n-func (e *Endpoint) WritePacket(r stack.RouteInfo, _ *stack.GSO, proto tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) *tcpip.Error {\n- if !e.linked.IsAttached() {\n- return nil\n- }\n-\n+func (e *Endpoint) deliverPackets(r stack.RouteInfo, proto tcpip.NetworkProtocolNumber, pkts stack.PacketBufferList) {\n// Note that the local address from the perspective of this endpoint is the\n// remote address from the perspective of the other end of the pipe\n// (e.linked). Similarly, the remote address from the perspective of this\n@@ -70,16 +65,33 @@ func (e *Endpoint) WritePacket(r stack.RouteInfo, _ *stack.GSO, proto tcpip.Netw\n//\n// TODO(gvisor.dev/issue/5289): don't use a new goroutine once we support send\n// and receive queues.\n- go e.linked.dispatcher.DeliverNetworkPacket(r.LocalLinkAddress /* remote */, r.RemoteLinkAddress /* local */, proto, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ go func() {\n+ for pkt := pkts.Front(); pkt != nil; pkt = pkt.Next() {\n+ e.linked.dispatcher.DeliverNetworkPacket(r.LocalLinkAddress /* remote */, r.RemoteLinkAddress /* local */, proto, stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: buffer.NewVectorisedView(pkt.Size(), pkt.Views()),\n}))\n+ }\n+ }()\n+}\n+\n+// WritePacket implements stack.LinkEndpoint.\n+func (e *Endpoint) WritePacket(r stack.RouteInfo, _ *stack.GSO, proto tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) *tcpip.Error {\n+ if e.linked.IsAttached() {\n+ var pkts stack.PacketBufferList\n+ pkts.PushBack(pkt)\n+ e.deliverPackets(r, proto, pkts)\n+ }\nreturn nil\n}\n// WritePackets implements stack.LinkEndpoint.\n-func (*Endpoint) WritePackets(stack.RouteInfo, *stack.GSO, stack.PacketBufferList, tcpip.NetworkProtocolNumber) (int, *tcpip.Error) {\n- panic(\"not implemented\")\n+func (e *Endpoint) WritePackets(r stack.RouteInfo, _ *stack.GSO, pkts stack.PacketBufferList, proto tcpip.NetworkProtocolNumber) (int, *tcpip.Error) {\n+ if e.linked.IsAttached() {\n+ e.deliverPackets(r, proto, pkts)\n+ }\n+\n+ return pkts.Len(), nil\n}\n// Attach implements stack.LinkEndpoint.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/nic.go", "new_path": "pkg/tcpip/stack/nic.go", "diff": "@@ -358,16 +358,43 @@ func (n *NIC) writePacket(r RouteInfo, gso *GSO, protocol tcpip.NetworkProtocolN\n// WritePackets implements NetworkLinkEndpoint.\nfunc (n *NIC) WritePackets(r *Route, gso *GSO, pkts PacketBufferList, protocol tcpip.NetworkProtocolNumber) (int, *tcpip.Error) {\n- // TODO(gvisor.dev/issue/4458): Queue packets whie link address resolution\n- // is being peformed like WritePacket.\n- routeInfo := r.Fields()\n+ // As per relevant RFCs, we should queue packets while we wait for link\n+ // resolution to complete.\n+ //\n+ // RFC 1122 section 2.3.2.2 (for IPv4):\n+ // The link layer SHOULD save (rather than discard) at least\n+ // one (the latest) packet of each set of packets destined to\n+ // the same unresolved IP address, and transmit the saved\n+ // packet when the address has been resolved.\n+ //\n+ // RFC 4861 section 7.2.2 (for IPv6):\n+ // While waiting for address resolution to complete, the sender MUST, for\n+ // each neighbor, retain a small queue of packets waiting for address\n+ // resolution to complete. The queue MUST hold at least one packet, and MAY\n+ // contain more. However, the number of queued packets per neighbor SHOULD\n+ // be limited to some small value. When a queue overflows, the new arrival\n+ // SHOULD replace the oldest entry. Once address resolution completes, the\n+ // node transmits any queued packets.\n+ if ch, err := r.Resolve(nil); err != nil {\n+ if err == tcpip.ErrWouldBlock {\n+ r.Acquire()\n+ n.linkResQueue.enqueue(ch, r, protocol, &pkts)\n+ return pkts.Len(), nil\n+ }\n+ return 0, err\n+ }\n+\n+ return n.writePackets(r.Fields(), gso, protocol, pkts)\n+}\n+\n+func (n *NIC) writePackets(r RouteInfo, gso *GSO, protocol tcpip.NetworkProtocolNumber, pkts PacketBufferList) (int, *tcpip.Error) {\nfor pkt := pkts.Front(); pkt != nil; pkt = pkt.Next() {\n- pkt.EgressRoute = routeInfo\n+ pkt.EgressRoute = r\npkt.GSOOptions = gso\npkt.NetworkProtocolNumber = protocol\n}\n- writtenPackets, err := n.LinkEndpoint.WritePackets(routeInfo, gso, pkts, protocol)\n+ writtenPackets, err := n.LinkEndpoint.WritePackets(r, gso, pkts, protocol)\nn.stats.Tx.Packets.IncrementBy(uint64(writtenPackets))\nwrittenBytes := 0\nfor i, pb := 0, pkts.Front(); i < writtenPackets && pb != nil; i, pb = i+1, pb.Next() {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/pending_packets.go", "new_path": "pkg/tcpip/stack/pending_packets.go", "diff": "@@ -28,10 +28,26 @@ const (\nmaxPendingPacketsPerResolution = 256\n)\n+// pendingPacketBuffer is a pending packet buffer.\n+//\n+// TODO(gvisor.dev/issue/5331): Drop this when we drop WritePacket and only use\n+// WritePackets so we can use a PacketBufferList everywhere.\n+type pendingPacketBuffer interface {\n+ len() int\n+}\n+\n+func (*PacketBuffer) len() int {\n+ return 1\n+}\n+\n+func (p *PacketBufferList) len() int {\n+ return p.Len()\n+}\n+\ntype pendingPacket struct {\nroute *Route\nproto tcpip.NetworkProtocolNumber\n- pkt *PacketBuffer\n+ pkt pendingPacketBuffer\n}\n// packetsPendingLinkResolution is a queue of packets pending link resolution.\n@@ -54,16 +70,17 @@ func (f *packetsPendingLinkResolution) init() {\nf.packets = make(map[<-chan struct{}][]pendingPacket)\n}\n-func incrementOutgoingPacketErrors(r *Route, proto tcpip.NetworkProtocolNumber) {\n- r.Stats().IP.OutgoingPacketErrors.Increment()\n+func incrementOutgoingPacketErrors(r *Route, proto tcpip.NetworkProtocolNumber, pkt pendingPacketBuffer) {\n+ n := uint64(pkt.len())\n+ r.Stats().IP.OutgoingPacketErrors.IncrementBy(n)\n// ok may be false if the endpoint's stats do not collect IP-related data.\nif ipEndpointStats, ok := r.outgoingNIC.getNetworkEndpoint(proto).Stats().(IPNetworkEndpointStats); ok {\n- ipEndpointStats.IPStats().OutgoingPacketErrors.Increment()\n+ ipEndpointStats.IPStats().OutgoingPacketErrors.IncrementBy(n)\n}\n}\n-func (f *packetsPendingLinkResolution) enqueue(ch <-chan struct{}, r *Route, proto tcpip.NetworkProtocolNumber, pkt *PacketBuffer) {\n+func (f *packetsPendingLinkResolution) enqueue(ch <-chan struct{}, r *Route, proto tcpip.NetworkProtocolNumber, pkt pendingPacketBuffer) {\nf.Lock()\ndefer f.Unlock()\n@@ -73,7 +90,7 @@ func (f *packetsPendingLinkResolution) enqueue(ch <-chan struct{}, r *Route, pro\npackets[0] = pendingPacket{}\npackets = packets[1:]\n- incrementOutgoingPacketErrors(r, proto)\n+ incrementOutgoingPacketErrors(r, proto, p.pkt)\np.route.Release()\n}\n@@ -113,13 +130,29 @@ func (f *packetsPendingLinkResolution) enqueue(ch <-chan struct{}, r *Route, pro\nfor _, p := range packets {\nif cancelled || p.route.IsResolutionRequired() {\n- incrementOutgoingPacketErrors(r, proto)\n+ incrementOutgoingPacketErrors(r, proto, p.pkt)\nif linkResolvableEP, ok := p.route.outgoingNIC.getNetworkEndpoint(p.route.NetProto).(LinkResolvableNetworkEndpoint); ok {\n+ switch pkt := p.pkt.(type) {\n+ case *PacketBuffer:\nlinkResolvableEP.HandleLinkResolutionFailure(pkt)\n+ case *PacketBufferList:\n+ for pb := pkt.Front(); pb != nil; pb = pb.Next() {\n+ linkResolvableEP.HandleLinkResolutionFailure(pb)\n+ }\n+ default:\n+ panic(fmt.Sprintf(\"unrecognized pending packet buffer type = %T\", p.pkt))\n+ }\n}\n} else {\n- p.route.outgoingNIC.writePacket(p.route.Fields(), nil /* gso */, p.proto, p.pkt)\n+ switch pkt := p.pkt.(type) {\n+ case *PacketBuffer:\n+ p.route.outgoingNIC.writePacket(p.route.Fields(), nil /* gso */, p.proto, pkt)\n+ case *PacketBufferList:\n+ p.route.outgoingNIC.writePackets(p.route.Fields(), nil /* gso */, p.proto, *pkt)\n+ default:\n+ panic(fmt.Sprintf(\"unrecognized pending packet buffer type = %T\", p.pkt))\n+ }\n}\np.route.Release()\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tests/integration/link_resolution_test.go", "new_path": "pkg/tcpip/tests/integration/link_resolution_test.go", "diff": "@@ -23,6 +23,7 @@ import (\n\"github.com/google/go-cmp/cmp\"\n\"github.com/google/go-cmp/cmp/cmpopts\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/checker\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n\"gvisor.dev/gvisor/pkg/tcpip/link/pipe\"\n@@ -32,6 +33,7 @@ import (\n\"gvisor.dev/gvisor/pkg/tcpip/stack\"\n\"gvisor.dev/gvisor/pkg/tcpip/transport/icmp\"\n\"gvisor.dev/gvisor/pkg/tcpip/transport/tcp\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/transport/udp\"\n\"gvisor.dev/gvisor/pkg/waiter\"\n)\n@@ -456,3 +458,126 @@ func TestGetLinkAddress(t *testing.T) {\n})\n}\n}\n+\n+func TestWritePacketsLinkResolution(t *testing.T) {\n+ const (\n+ host1NICID = 1\n+ host2NICID = 4\n+ )\n+\n+ tests := []struct {\n+ name string\n+ netProto tcpip.NetworkProtocolNumber\n+ remoteAddr tcpip.Address\n+ expectedWriteErr *tcpip.Error\n+ }{\n+ {\n+ name: \"IPv4\",\n+ netProto: ipv4.ProtocolNumber,\n+ remoteAddr: ipv4Addr2.AddressWithPrefix.Address,\n+ expectedWriteErr: nil,\n+ },\n+ {\n+ name: \"IPv6\",\n+ netProto: ipv6.ProtocolNumber,\n+ remoteAddr: ipv6Addr2.AddressWithPrefix.Address,\n+ expectedWriteErr: nil,\n+ },\n+ }\n+\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\n+ stackOpts := stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol, ipv6.NewProtocol},\n+ TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},\n+ }\n+\n+ host1Stack, host2Stack := setupStack(t, stackOpts, host1NICID, host2NICID)\n+\n+ var serverWQ waiter.Queue\n+ serverWE, serverCH := waiter.NewChannelEntry(nil)\n+ serverWQ.EventRegister(&serverWE, waiter.EventIn)\n+ serverEP, err := host2Stack.NewEndpoint(udp.ProtocolNumber, test.netProto, &serverWQ)\n+ if err != nil {\n+ t.Fatalf(\"host2Stack.NewEndpoint(%d, %d, _): %s\", udp.ProtocolNumber, test.netProto, err)\n+ }\n+ defer serverEP.Close()\n+\n+ serverAddr := tcpip.FullAddress{Port: 1234}\n+ if err := serverEP.Bind(serverAddr); err != nil {\n+ t.Fatalf(\"serverEP.Bind(%#v): %s\", serverAddr, err)\n+ }\n+\n+ r, err := host1Stack.FindRoute(host1NICID, \"\", test.remoteAddr, test.netProto, false /* multicastLoop */)\n+ if err != nil {\n+ t.Fatalf(\"host1Stack.FindRoute(%d, '', %s, %d, false): %s\", host1NICID, test.remoteAddr, test.netProto, err)\n+ }\n+ defer r.Release()\n+\n+ data := []byte{1, 2}\n+ var pkts stack.PacketBufferList\n+ for _, d := range data {\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ ReserveHeaderBytes: header.UDPMinimumSize + int(r.MaxHeaderLength()),\n+ Data: buffer.View([]byte{d}).ToVectorisedView(),\n+ })\n+ pkt.TransportProtocolNumber = udp.ProtocolNumber\n+ length := uint16(pkt.Size())\n+ udpHdr := header.UDP(pkt.TransportHeader().Push(header.UDPMinimumSize))\n+ udpHdr.Encode(&header.UDPFields{\n+ SrcPort: 5555,\n+ DstPort: serverAddr.Port,\n+ Length: length,\n+ })\n+ xsum := r.PseudoHeaderChecksum(udp.ProtocolNumber, length)\n+ for _, v := range pkt.Data.Views() {\n+ xsum = header.Checksum(v, xsum)\n+ }\n+ udpHdr.SetChecksum(^udpHdr.CalculateChecksum(xsum))\n+\n+ pkts.PushBack(pkt)\n+ }\n+\n+ params := stack.NetworkHeaderParams{\n+ Protocol: udp.ProtocolNumber,\n+ TTL: 64,\n+ TOS: stack.DefaultTOS,\n+ }\n+\n+ if n, err := r.WritePackets(nil /* gso */, pkts, params); err != nil {\n+ t.Fatalf(\"r.WritePackets(nil, %#v, _): %s\", params, err)\n+ } else if want := pkts.Len(); want != n {\n+ t.Fatalf(\"got r.WritePackets(nil, %#v, _) = %d, want = %d\", n, params, want)\n+ }\n+\n+ var writer bytes.Buffer\n+ count := 0\n+ for {\n+ var rOpts tcpip.ReadOptions\n+ res, err := serverEP.Read(&writer, rOpts)\n+ if err != nil {\n+ if err == tcpip.ErrWouldBlock {\n+ // Should not have anymore bytes to read after we read the sent\n+ // number of bytes.\n+ if count == len(data) {\n+ break\n+ }\n+\n+ <-serverCH\n+ continue\n+ }\n+\n+ t.Fatalf(\"serverEP.Read(_, %#v): %s\", rOpts, err)\n+ }\n+ count += res.Count\n+ }\n+\n+ if got, want := host2Stack.Stats().UDP.PacketsReceived.Value(), uint64(len(data)); got != want {\n+ t.Errorf(\"got host2Stack.Stats().UDP.PacketsReceived.Value() = %d, want = %d\", got, want)\n+ }\n+ if diff := cmp.Diff(data, writer.Bytes()); diff != \"\" {\n+ t.Errorf(\"read bytes mismatch (-want +got):\\n%s\", diff)\n+ }\n+ })\n+ }\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Queue packets in WritePackets when resolving link address Test: integration_test.TestWritePacketsLinkResolution Fixes #4458. PiperOrigin-RevId: 353108826
259,858
21.01.2021 15:39:24
28,800
48dfb8db9e784604e9c7ad8e1a36cc862dac1b4d
Add image presubmit tests and mark fsstress x86_64 only.
[ { "change_type": "MODIFY", "old_path": ".buildkite/pipeline.yaml", "new_path": ".buildkite/pipeline.yaml", "diff": "@@ -45,6 +45,14 @@ steps:\n- make BAZEL_OPTIONS=--config=cross-aarch64 artifacts/aarch64\n- make release\n+ # Images tests.\n+ - <<: *common\n+ label: \":docker: Images (x86_64)\"\n+ command: make ARCH=x86_64 load-all-images\n+ - <<: *common\n+ label: \":docker: Images (aarch64)\"\n+ command: make ARCH=aarch64 load-all-images\n+\n# Basic unit tests.\n- <<: *common\nlabel: \":test_tube: Unit tests\"\n" }, { "change_type": "RENAME", "old_path": "images/basic/fsstress/Dockerfile", "new_path": "images/basic/fsstress/Dockerfile.x86_64", "diff": "" } ]
Go
Apache License 2.0
google/gvisor
Add image presubmit tests and mark fsstress x86_64 only. PiperOrigin-RevId: 353118942
260,004
21.01.2021 19:53:31
28,800
9f46328e1174be6b8b5442467050ad0b2f0b260f
Only use callback for GetLinkAddress GetLinkAddress's callback will be called immediately with a stack.LinkResolutionResult which will hold the link address so no need to also return the link address from the function. Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/ndp_test.go", "new_path": "pkg/tcpip/network/ipv6/ndp_test.go", "diff": "@@ -162,6 +162,11 @@ func TestStackNDPEndpointInvalidateDefaultRouter(t *testing.T) {\n}\n}\n+type linkResolutionResult struct {\n+ linkAddr tcpip.LinkAddress\n+ ok bool\n+}\n+\n// TestNeighorSolicitationWithSourceLinkLayerOption tests that receiving a\n// valid NDP NS message with the Source Link Layer Address option results in a\n// new entry in the link address cache for the sender of the message.\n@@ -231,35 +236,28 @@ func TestNeighorSolicitationWithSourceLinkLayerOption(t *testing.T) {\nData: hdr.View().ToVectorisedView(),\n}))\n- linkAddr, c, err := s.GetLinkAddress(nicID, lladdr1, lladdr0, ProtocolNumber, nil)\n- if linkAddr != test.expectedLinkAddr {\n- t.Errorf(\"got link address = %s, want = %s\", linkAddr, test.expectedLinkAddr)\n- }\n+ ch := make(chan stack.LinkResolutionResult, 1)\n+ err := s.GetLinkAddress(nicID, lladdr1, lladdr0, ProtocolNumber, func(r stack.LinkResolutionResult) {\n+ ch <- r\n+ })\n- if test.expectedLinkAddr != \"\" {\n- if err != nil {\n- t.Errorf(\"s.GetLinkAddress(%d, %s, %s, %d, nil): %s\", nicID, lladdr1, lladdr0, ProtocolNumber, err)\n- }\n- if c != nil {\n- t.Errorf(\"got unexpected channel\")\n+ wantInvalid := uint64(0)\n+ wantErr := (*tcpip.Error)(nil)\n+ wantSucccess := true\n+ if len(test.expectedLinkAddr) == 0 {\n+ wantInvalid = 1\n+ wantErr = tcpip.ErrWouldBlock\n+ wantSucccess = false\n}\n- // Invalid count should not have increased.\n- if got := invalid.Value(); got != 0 {\n- t.Errorf(\"got invalid = %d, want = 0\", got)\n- }\n- } else {\n- if err != tcpip.ErrWouldBlock {\n- t.Errorf(\"got s.GetLinkAddress(%d, %s, %s, %d, nil) = (_, _, %v), want = (_, _, %s)\", nicID, lladdr1, lladdr0, ProtocolNumber, err, tcpip.ErrWouldBlock)\n- }\n- if c == nil {\n- t.Errorf(\"expected channel from call to s.GetLinkAddress(%d, %s, %s, %d, nil)\", nicID, lladdr1, lladdr0, ProtocolNumber)\n+ if err != wantErr {\n+ t.Errorf(\"got s.GetLinkAddress(%d, %s, %s, %d, _) = %s, want = %s\", nicID, lladdr1, lladdr0, ProtocolNumber, err, wantErr)\n}\n-\n- // Invalid count should have increased.\n- if got := invalid.Value(); got != 1 {\n- t.Errorf(\"got invalid = %d, want = 1\", got)\n+ if diff := cmp.Diff(stack.LinkResolutionResult{LinkAddress: test.expectedLinkAddr, Success: wantSucccess}, <-ch); diff != \"\" {\n+ t.Errorf(\"linkResolutionResult mismatch (-want +got):\\n%s\", diff)\n}\n+ if got := invalid.Value(); got != wantInvalid {\n+ t.Errorf(\"got invalid = %d, want = %d\", got, wantInvalid)\n}\n})\n}\n@@ -803,35 +801,28 @@ func TestNeighorAdvertisementWithTargetLinkLayerOption(t *testing.T) {\nData: hdr.View().ToVectorisedView(),\n}))\n- linkAddr, c, err := s.GetLinkAddress(nicID, lladdr1, lladdr0, ProtocolNumber, nil)\n- if linkAddr != test.expectedLinkAddr {\n- t.Errorf(\"got link address = %s, want = %s\", linkAddr, test.expectedLinkAddr)\n- }\n+ ch := make(chan stack.LinkResolutionResult, 1)\n+ err := s.GetLinkAddress(nicID, lladdr1, lladdr0, ProtocolNumber, func(r stack.LinkResolutionResult) {\n+ ch <- r\n+ })\n- if test.expectedLinkAddr != \"\" {\n- if err != nil {\n- t.Errorf(\"s.GetLinkAddress(%d, %s, %s, %d, nil): %s\", nicID, lladdr1, lladdr0, ProtocolNumber, err)\n- }\n- if c != nil {\n- t.Errorf(\"got unexpected channel\")\n+ wantInvalid := uint64(0)\n+ wantErr := (*tcpip.Error)(nil)\n+ wantSucccess := true\n+ if len(test.expectedLinkAddr) == 0 {\n+ wantInvalid = 1\n+ wantErr = tcpip.ErrWouldBlock\n+ wantSucccess = false\n}\n- // Invalid count should not have increased.\n- if got := invalid.Value(); got != 0 {\n- t.Errorf(\"got invalid = %d, want = 0\", got)\n+ if err != wantErr {\n+ t.Errorf(\"got s.GetLinkAddress(%d, %s, %s, %d, _) = %s, want = %s\", nicID, lladdr1, lladdr0, ProtocolNumber, err, wantErr)\n}\n- } else {\n- if err != tcpip.ErrWouldBlock {\n- t.Errorf(\"got s.GetLinkAddress(%d, %s, %s, %d, nil) = (_, _, %v), want = (_, _, %s)\", nicID, lladdr1, lladdr0, ProtocolNumber, err, tcpip.ErrWouldBlock)\n- }\n- if c == nil {\n- t.Errorf(\"expected channel from call to s.GetLinkAddress(%d, %s, %s, %d, nil)\", nicID, lladdr1, lladdr0, ProtocolNumber)\n- }\n-\n- // Invalid count should have increased.\n- if got := invalid.Value(); got != 1 {\n- t.Errorf(\"got invalid = %d, want = 1\", got)\n+ if diff := cmp.Diff(stack.LinkResolutionResult{LinkAddress: test.expectedLinkAddr, Success: wantSucccess}, <-ch); diff != \"\" {\n+ t.Errorf(\"linkResolutionResult mismatch (-want +got):\\n%s\", diff)\n}\n+ if got := invalid.Value(); got != wantInvalid {\n+ t.Errorf(\"got invalid = %d, want = %d\", got, wantInvalid)\n}\n})\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/linkaddrcache.go", "new_path": "pkg/tcpip/stack/linkaddrcache.go", "diff": "@@ -97,12 +97,13 @@ type linkAddrEntry struct {\ndone chan struct{}\n// onResolve is called with the result of address resolution.\n- onResolve []func(tcpip.LinkAddress, bool)\n+ onResolve []func(LinkResolutionResult)\n}\nfunc (e *linkAddrEntry) notifyCompletionLocked(linkAddr tcpip.LinkAddress) {\n+ res := LinkResolutionResult{LinkAddress: linkAddr, Success: len(linkAddr) != 0}\nfor _, callback := range e.onResolve {\n- callback(linkAddr, len(linkAddr) != 0)\n+ callback(res)\n}\ne.onResolve = nil\nif ch := e.done; ch != nil {\n@@ -196,7 +197,7 @@ func (c *linkAddrCache) getOrCreateEntryLocked(k tcpip.Address) *linkAddrEntry {\n}\n// get reports any known link address for k.\n-func (c *linkAddrCache) get(k tcpip.Address, linkRes LinkAddressResolver, localAddr tcpip.Address, nic NetworkInterface, onResolve func(tcpip.LinkAddress, bool)) (tcpip.LinkAddress, <-chan struct{}, *tcpip.Error) {\n+func (c *linkAddrCache) get(k tcpip.Address, linkRes LinkAddressResolver, localAddr tcpip.Address, nic NetworkInterface, onResolve func(LinkResolutionResult)) (tcpip.LinkAddress, <-chan struct{}, *tcpip.Error) {\nc.cache.Lock()\ndefer c.cache.Unlock()\nentry := c.getOrCreateEntryLocked(k)\n@@ -208,7 +209,7 @@ func (c *linkAddrCache) get(k tcpip.Address, linkRes LinkAddressResolver, localA\nif !time.Now().After(entry.expiration) {\n// Not expired.\nif onResolve != nil {\n- onResolve(entry.linkAddr, true)\n+ onResolve(LinkResolutionResult{LinkAddress: entry.linkAddr, Success: true})\n}\nreturn entry.linkAddr, nil, nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/neighbor_cache.go", "new_path": "pkg/tcpip/stack/neighbor_cache.go", "diff": "@@ -126,7 +126,7 @@ func (n *neighborCache) getOrCreateEntry(remoteAddr tcpip.Address, linkRes LinkA\n// packet prompting NUD/link address resolution.\n//\n// TODO(gvisor.dev/issue/5151): Don't return the neighbor entry.\n-func (n *neighborCache) entry(remoteAddr, localAddr tcpip.Address, linkRes LinkAddressResolver, onResolve func(tcpip.LinkAddress, bool)) (NeighborEntry, <-chan struct{}, *tcpip.Error) {\n+func (n *neighborCache) entry(remoteAddr, localAddr tcpip.Address, linkRes LinkAddressResolver, onResolve func(LinkResolutionResult)) (NeighborEntry, <-chan struct{}, *tcpip.Error) {\nentry := n.getOrCreateEntry(remoteAddr, linkRes)\nentry.mu.Lock()\ndefer entry.mu.Unlock()\n@@ -142,7 +142,7 @@ func (n *neighborCache) entry(remoteAddr, localAddr tcpip.Address, linkRes LinkA\n// a node continues sending packets to that neighbor using the cached\n// link-layer address.\"\nif onResolve != nil {\n- onResolve(entry.neigh.LinkAddr, true)\n+ onResolve(LinkResolutionResult{LinkAddress: entry.neigh.LinkAddr, Success: true})\n}\nreturn entry.neigh, nil, nil\ncase Unknown, Incomplete, Failed:\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/neighbor_cache_test.go", "new_path": "pkg/tcpip/stack/neighbor_cache_test.go", "diff": "@@ -1188,12 +1188,9 @@ func TestNeighborCacheKeepFrequentlyUsed(t *testing.T) {\nif !ok {\nt.Fatalf(\"store.entry(%d) not found\", i)\n}\n- _, ch, err := neigh.entry(entry.Addr, \"\", linkRes, func(linkAddr tcpip.LinkAddress, ok bool) {\n- if !ok {\n- t.Fatal(\"expected successful address resolution\")\n- }\n- if linkAddr != entry.LinkAddr {\n- t.Fatalf(\"got linkAddr = %s, want = %s\", linkAddr, entry.LinkAddr)\n+ _, ch, err := neigh.entry(entry.Addr, \"\", linkRes, func(r LinkResolutionResult) {\n+ if diff := cmp.Diff(LinkResolutionResult{LinkAddress: entry.LinkAddr, Success: true}, r); diff != \"\" {\n+ t.Fatalf(\"got link resolution result mismatch (-want +got):\\n%s\", diff)\n}\n})\nif err != tcpip.ErrWouldBlock {\n@@ -1247,12 +1244,9 @@ func TestNeighborCacheKeepFrequentlyUsed(t *testing.T) {\nt.Fatalf(\"store.entry(%d) not found\", i)\n}\n- _, ch, err := neigh.entry(entry.Addr, \"\", linkRes, func(linkAddr tcpip.LinkAddress, ok bool) {\n- if !ok {\n- t.Fatal(\"expected successful address resolution\")\n- }\n- if linkAddr != entry.LinkAddr {\n- t.Fatalf(\"got linkAddr = %s, want = %s\", linkAddr, entry.LinkAddr)\n+ _, ch, err := neigh.entry(entry.Addr, \"\", linkRes, func(r LinkResolutionResult) {\n+ if diff := cmp.Diff(LinkResolutionResult{LinkAddress: entry.LinkAddr, Success: true}, r); diff != \"\" {\n+ t.Fatalf(\"got link resolution result mismatch (-want +got):\\n%s\", diff)\n}\n})\nif err != tcpip.ErrWouldBlock {\n@@ -1423,12 +1417,9 @@ func TestNeighborCacheReplace(t *testing.T) {\nt.Fatal(\"store.entry(0) not found\")\n}\n- _, ch, err := neigh.entry(entry.Addr, \"\", linkRes, func(linkAddr tcpip.LinkAddress, ok bool) {\n- if !ok {\n- t.Fatal(\"expected successful address resolution\")\n- }\n- if linkAddr != entry.LinkAddr {\n- t.Fatalf(\"got linkAddr = %s, want = %s\", linkAddr, entry.LinkAddr)\n+ _, ch, err := neigh.entry(entry.Addr, \"\", linkRes, func(r LinkResolutionResult) {\n+ if diff := cmp.Diff(LinkResolutionResult{LinkAddress: entry.LinkAddr, Success: true}, r); diff != \"\" {\n+ t.Fatalf(\"got link resolution result mismatch (-want +got):\\n%s\", diff)\n}\n})\nif err != tcpip.ErrWouldBlock {\n@@ -1539,12 +1530,9 @@ func TestNeighborCacheResolutionFailed(t *testing.T) {\n// First, sanity check that resolution is working\n{\n- _, ch, err := neigh.entry(entry.Addr, \"\", linkRes, func(linkAddr tcpip.LinkAddress, ok bool) {\n- if !ok {\n- t.Fatal(\"expected successful address resolution\")\n- }\n- if linkAddr != entry.LinkAddr {\n- t.Fatalf(\"got linkAddr = %s, want = %s\", linkAddr, entry.LinkAddr)\n+ _, ch, err := neigh.entry(entry.Addr, \"\", linkRes, func(r LinkResolutionResult) {\n+ if diff := cmp.Diff(LinkResolutionResult{LinkAddress: entry.LinkAddr, Success: true}, r); diff != \"\" {\n+ t.Fatalf(\"got link resolution result mismatch (-want +got):\\n%s\", diff)\n}\n})\nif err != tcpip.ErrWouldBlock {\n@@ -1576,15 +1564,9 @@ func TestNeighborCacheResolutionFailed(t *testing.T) {\nentry.Addr += \"2\"\n{\n- _, ch, err := neigh.entry(entry.Addr, \"\", linkRes, func(linkAddr tcpip.LinkAddress, ok bool) {\n- if ok {\n- t.Error(\"expected unsuccessful address resolution\")\n- }\n- if len(linkAddr) != 0 {\n- t.Fatalf(\"got linkAddr = %s, want = \\\"\\\"\", linkAddr)\n- }\n- if t.Failed() {\n- t.FailNow()\n+ _, ch, err := neigh.entry(entry.Addr, \"\", linkRes, func(r LinkResolutionResult) {\n+ if diff := cmp.Diff(LinkResolutionResult{Success: false}, r); diff != \"\" {\n+ t.Fatalf(\"got link resolution result mismatch (-want +got):\\n%s\", diff)\n}\n})\nif err != tcpip.ErrWouldBlock {\n@@ -1627,15 +1609,9 @@ func TestNeighborCacheResolutionTimeout(t *testing.T) {\nt.Fatal(\"store.entry(0) not found\")\n}\n- _, ch, err := neigh.entry(entry.Addr, \"\", linkRes, func(linkAddr tcpip.LinkAddress, ok bool) {\n- if ok {\n- t.Error(\"expected unsuccessful address resolution\")\n- }\n- if len(linkAddr) != 0 {\n- t.Fatalf(\"got linkAddr = %s, want = \\\"\\\"\", linkAddr)\n- }\n- if t.Failed() {\n- t.FailNow()\n+ _, ch, err := neigh.entry(entry.Addr, \"\", linkRes, func(r LinkResolutionResult) {\n+ if diff := cmp.Diff(LinkResolutionResult{Success: false}, r); diff != \"\" {\n+ t.Fatalf(\"got link resolution result mismatch (-want +got):\\n%s\", diff)\n}\n})\nif err != tcpip.ErrWouldBlock {\n@@ -1674,15 +1650,9 @@ func TestNeighborCacheRetryResolution(t *testing.T) {\n// Perform address resolution with a faulty link, which will fail.\n{\n- _, ch, err := neigh.entry(entry.Addr, \"\", linkRes, func(linkAddr tcpip.LinkAddress, ok bool) {\n- if ok {\n- t.Error(\"expected unsuccessful address resolution\")\n- }\n- if len(linkAddr) != 0 {\n- t.Fatalf(\"got linkAddr = %s, want = \\\"\\\"\", linkAddr)\n- }\n- if t.Failed() {\n- t.FailNow()\n+ _, ch, err := neigh.entry(entry.Addr, \"\", linkRes, func(r LinkResolutionResult) {\n+ if diff := cmp.Diff(LinkResolutionResult{Success: false}, r); diff != \"\" {\n+ t.Fatalf(\"got link resolution result mismatch (-want +got):\\n%s\", diff)\n}\n})\nif err != tcpip.ErrWouldBlock {\n@@ -1713,9 +1683,9 @@ func TestNeighborCacheRetryResolution(t *testing.T) {\n// Retry address resolution with a working link.\nlinkRes.dropReplies = false\n{\n- incompleteEntry, ch, err := neigh.entry(entry.Addr, \"\", linkRes, func(linkAddr tcpip.LinkAddress, ok bool) {\n- if linkAddr != entry.LinkAddr {\n- t.Fatalf(\"got linkAddr = %s, want = %s\", linkAddr, entry.LinkAddr)\n+ incompleteEntry, ch, err := neigh.entry(entry.Addr, \"\", linkRes, func(r LinkResolutionResult) {\n+ if diff := cmp.Diff(LinkResolutionResult{LinkAddress: entry.LinkAddr, Success: true}, r); diff != \"\" {\n+ t.Fatalf(\"got link resolution result mismatch (-want +got):\\n%s\", diff)\n}\n})\nif err != tcpip.ErrWouldBlock {\n@@ -1772,12 +1742,9 @@ func BenchmarkCacheClear(b *testing.B) {\nb.Fatalf(\"store.entry(%d) not found\", i)\n}\n- _, ch, err := neigh.entry(entry.Addr, \"\", linkRes, func(linkAddr tcpip.LinkAddress, ok bool) {\n- if !ok {\n- b.Fatal(\"expected successful address resolution\")\n- }\n- if linkAddr != entry.LinkAddr {\n- b.Fatalf(\"got linkAddr = %s, want = %s\", linkAddr, entry.LinkAddr)\n+ _, ch, err := neigh.entry(entry.Addr, \"\", linkRes, func(r LinkResolutionResult) {\n+ if diff := cmp.Diff(LinkResolutionResult{LinkAddress: entry.LinkAddr, Success: true}, r); diff != \"\" {\n+ b.Fatalf(\"got link resolution result mismatch (-want +got):\\n%s\", diff)\n}\n})\nif err != tcpip.ErrWouldBlock {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/neighbor_entry.go", "new_path": "pkg/tcpip/stack/neighbor_entry.go", "diff": "@@ -96,7 +96,7 @@ type neighborEntry struct {\ndone chan struct{}\n// onResolve is called with the result of address resolution.\n- onResolve []func(tcpip.LinkAddress, bool)\n+ onResolve []func(LinkResolutionResult)\nisRouter bool\njob *tcpip.Job\n@@ -143,8 +143,9 @@ func newStaticNeighborEntry(nic *NIC, addr tcpip.Address, linkAddr tcpip.LinkAdd\n//\n// Precondition: e.mu MUST be locked.\nfunc (e *neighborEntry) notifyCompletionLocked(succeeded bool) {\n+ res := LinkResolutionResult{LinkAddress: e.neigh.LinkAddr, Success: succeeded}\nfor _, callback := range e.onResolve {\n- callback(e.neigh.LinkAddr, succeeded)\n+ callback(res)\n}\ne.onResolve = nil\nif ch := e.done; ch != nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/nic.go", "new_path": "pkg/tcpip/stack/nic.go", "diff": "@@ -561,7 +561,7 @@ func (n *NIC) removeAddress(addr tcpip.Address) *tcpip.Error {\nreturn tcpip.ErrBadLocalAddress\n}\n-func (n *NIC) getNeighborLinkAddress(addr, localAddr tcpip.Address, linkRes LinkAddressResolver, onResolve func(tcpip.LinkAddress, bool)) (tcpip.LinkAddress, <-chan struct{}, *tcpip.Error) {\n+func (n *NIC) getNeighborLinkAddress(addr, localAddr tcpip.Address, linkRes LinkAddressResolver, onResolve func(LinkResolutionResult)) (tcpip.LinkAddress, <-chan struct{}, *tcpip.Error) {\nif n.neigh != nil {\nentry, ch, err := n.neigh.entry(addr, localAddr, linkRes, onResolve)\nreturn entry.LinkAddr, ch, err\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/route.go", "new_path": "pkg/tcpip/stack/route.go", "diff": "@@ -347,7 +347,7 @@ func (r *Route) ResolvedFields(afterResolve func()) (RouteInfo, <-chan struct{},\nlinkAddressResolutionRequestLocalAddr = r.LocalAddress\n}\n- linkAddr, ch, err := r.outgoingNIC.getNeighborLinkAddress(nextAddr, linkAddressResolutionRequestLocalAddr, r.linkRes, func(tcpip.LinkAddress, bool) {\n+ linkAddr, ch, err := r.outgoingNIC.getNeighborLinkAddress(nextAddr, linkAddressResolutionRequestLocalAddr, r.linkRes, func(LinkResolutionResult) {\nif afterResolve != nil {\nafterResolve()\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/stack.go", "new_path": "pkg/tcpip/stack/stack.go", "diff": "@@ -1527,10 +1527,14 @@ func (s *Stack) AddLinkAddress(nicID tcpip.NICID, neighbor tcpip.Address, linkAd\nreturn nil\n}\n+// LinkResolutionResult is the result of a link address resolution attempt.\n+type LinkResolutionResult struct {\n+ LinkAddress tcpip.LinkAddress\n+ Success bool\n+}\n+\n// GetLinkAddress finds the link address corresponding to a neighbor's address.\n//\n-// Returns a link address for the remote address, if readily available.\n-//\n// Returns ErrNotSupported if the stack is not configured with a link address\n// resolver for the specified network protocol.\n//\n@@ -1538,30 +1542,28 @@ func (s *Stack) AddLinkAddress(nicID tcpip.NICID, neighbor tcpip.Address, linkAd\n// with a notification channel for the caller to block on. Triggers address\n// resolution asynchronously.\n//\n-// If onResolve is provided, it will be called either immediately, if\n-// resolution is not required, or when address resolution is complete, with\n-// the resolved link address and whether resolution succeeded. After any\n-// callbacks have been called, the returned notification channel is closed.\n+// onResolve will be called either immediately, if resolution is not required,\n+// or when address resolution is complete, with the resolved link address and\n+// whether resolution succeeded.\n//\n// If specified, the local address must be an address local to the interface\n// the neighbor cache belongs to. The local address is the source address of\n// a packet prompting NUD/link address resolution.\n-//\n-// TODO(gvisor.dev/issue/5151): Don't return the link address.\n-func (s *Stack) GetLinkAddress(nicID tcpip.NICID, addr, localAddr tcpip.Address, protocol tcpip.NetworkProtocolNumber, onResolve func(tcpip.LinkAddress, bool)) (tcpip.LinkAddress, <-chan struct{}, *tcpip.Error) {\n+func (s *Stack) GetLinkAddress(nicID tcpip.NICID, addr, localAddr tcpip.Address, protocol tcpip.NetworkProtocolNumber, onResolve func(LinkResolutionResult)) *tcpip.Error {\ns.mu.RLock()\nnic, ok := s.nics[nicID]\ns.mu.RUnlock()\nif !ok {\n- return \"\", nil, tcpip.ErrUnknownNICID\n+ return tcpip.ErrUnknownNICID\n}\nlinkRes, ok := s.linkAddrResolvers[protocol]\nif !ok {\n- return \"\", nil, tcpip.ErrNotSupported\n+ return tcpip.ErrNotSupported\n}\n- return nic.getNeighborLinkAddress(addr, localAddr, linkRes, onResolve)\n+ _, _, err := nic.getNeighborLinkAddress(addr, localAddr, linkRes, onResolve)\n+ return err\n}\n// Neighbors returns all IP to MAC address associations.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/stack_test.go", "new_path": "pkg/tcpip/stack/stack_test.go", "diff": "@@ -4384,10 +4384,10 @@ func TestGetLinkAddressErrors(t *testing.T) {\nt.Fatalf(\"CreateNIC(%d, _) = %s\", nicID, err)\n}\n- if addr, _, err := s.GetLinkAddress(unknownNICID, \"\", \"\", ipv4.ProtocolNumber, nil); err != tcpip.ErrUnknownNICID {\n- t.Errorf(\"got s.GetLinkAddress(%d, '', '', %d, nil) = (%s, _, %s), want = (_, _, %s)\", unknownNICID, ipv4.ProtocolNumber, addr, err, tcpip.ErrUnknownNICID)\n+ if err := s.GetLinkAddress(unknownNICID, \"\", \"\", ipv4.ProtocolNumber, nil); err != tcpip.ErrUnknownNICID {\n+ t.Errorf(\"got s.GetLinkAddress(%d, '', '', %d, nil) = %s, want = %s\", unknownNICID, ipv4.ProtocolNumber, err, tcpip.ErrUnknownNICID)\n}\n- if addr, _, err := s.GetLinkAddress(nicID, \"\", \"\", ipv4.ProtocolNumber, nil); err != tcpip.ErrNotSupported {\n- t.Errorf(\"got s.GetLinkAddress(%d, '', '', %d, nil) = (%s, _, %s), want = (_, _, %s)\", unknownNICID, ipv4.ProtocolNumber, addr, err, tcpip.ErrNotSupported)\n+ if err := s.GetLinkAddress(nicID, \"\", \"\", ipv4.ProtocolNumber, nil); err != tcpip.ErrNotSupported {\n+ t.Errorf(\"got s.GetLinkAddress(%d, '', '', %d, nil) = %s, want = %s\", unknownNICID, ipv4.ProtocolNumber, err, tcpip.ErrNotSupported)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tests/integration/link_resolution_test.go", "new_path": "pkg/tcpip/tests/integration/link_resolution_test.go", "diff": "@@ -409,17 +409,31 @@ func TestGetLinkAddress(t *testing.T) {\nname string\nnetProto tcpip.NetworkProtocolNumber\nremoteAddr tcpip.Address\n- expectedLinkAddr bool\n+ expectedOk bool\n}{\n{\n- name: \"IPv4\",\n+ name: \"IPv4 resolvable\",\nnetProto: ipv4.ProtocolNumber,\nremoteAddr: ipv4Addr2.AddressWithPrefix.Address,\n+ expectedOk: true,\n},\n{\n- name: \"IPv6\",\n+ name: \"IPv6 resolvable\",\nnetProto: ipv6.ProtocolNumber,\nremoteAddr: ipv6Addr2.AddressWithPrefix.Address,\n+ expectedOk: true,\n+ },\n+ {\n+ name: \"IPv4 not resolvable\",\n+ netProto: ipv4.ProtocolNumber,\n+ remoteAddr: ipv4Addr3.AddressWithPrefix.Address,\n+ expectedOk: false,\n+ },\n+ {\n+ name: \"IPv6 not resolvable\",\n+ netProto: ipv6.ProtocolNumber,\n+ remoteAddr: ipv6Addr3.AddressWithPrefix.Address,\n+ expectedOk: false,\n},\n}\n@@ -434,24 +448,18 @@ func TestGetLinkAddress(t *testing.T) {\nhost1Stack, _ := setupStack(t, stackOpts, host1NICID, host2NICID)\n- for i := 0; i < 2; i++ {\n- addr, ch, err := host1Stack.GetLinkAddress(host1NICID, test.remoteAddr, \"\", test.netProto, func(tcpip.LinkAddress, bool) {})\n- var want *tcpip.Error\n- if i == 0 {\n- want = tcpip.ErrWouldBlock\n- }\n- if err != want {\n- t.Fatalf(\"got host1Stack.GetLinkAddress(%d, %s, '', %d, _) = (%s, _, %s), want = (_, _, %s)\", host1NICID, test.remoteAddr, test.netProto, addr, err, want)\n- }\n-\n- if i == 0 {\n- <-ch\n- continue\n+ ch := make(chan stack.LinkResolutionResult, 1)\n+ if err := host1Stack.GetLinkAddress(host1NICID, test.remoteAddr, \"\", test.netProto, func(r stack.LinkResolutionResult) {\n+ ch <- r\n+ }); err != tcpip.ErrWouldBlock {\n+ t.Fatalf(\"got host1Stack.GetLinkAddress(%d, %s, '', %d, _) = %s, want = %s\", host1NICID, test.remoteAddr, test.netProto, err, tcpip.ErrWouldBlock)\n}\n-\n- if addr != linkAddr2 {\n- t.Fatalf(\"got addr = %s, want = %s\", addr, linkAddr2)\n+ wantRes := stack.LinkResolutionResult{Success: test.expectedOk}\n+ if test.expectedOk {\n+ wantRes.LinkAddress = linkAddr2\n}\n+ if diff := cmp.Diff(wantRes, <-ch); diff != \"\" {\n+ t.Fatalf(\"link resolution result mismatch (-want +got):\\n%s\", diff)\n}\n})\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Only use callback for GetLinkAddress GetLinkAddress's callback will be called immediately with a stack.LinkResolutionResult which will hold the link address so no need to also return the link address from the function. Fixes #5151. PiperOrigin-RevId: 353157857
259,884
21.01.2021 20:09:32
28,800
d02c03a268f98b471bcaf3afd99e61ddb161bfb4
Syscall docs update Moves the id to the <tr> tag so that the page aligns properly when using an anchor. Makes the syscall number a link to the anchor. Fixes some broken links to syscalls without man pages.
[ { "change_type": "MODIFY", "old_path": "website/cmd/syscalldocs/main.go", "new_path": "website/cmd/syscalldocs/main.go", "diff": "@@ -76,9 +76,9 @@ syscalls. {{if .Undocumented}}{{.Undocumented}} syscalls are not yet documented.\n</thead>\n<tbody>\n{{range $i, $syscall := .Syscalls}}\n- <tr>\n- <td><a class=\"doc-table-anchor\" id=\"{{.Name}}\"></a>{{.Number}}</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/{{.Name}}.2.html\" target=\"_blank\" rel=\"noopener\">{{.Name}}</a></td>\n+ <tr id=\"{{.Name}}\">\n+ <td><a href=\"#{{.Name}}\">{{.Number}}</a></td>\n+ <td><a href=\"{{.DocURL}}\" target=\"_blank\" rel=\"noopener\">{{.Name}}</a></td>\n<td>{{.Support}}</td>\n<td>{{.Note}} {{range $i, $url := .URLs}}<br/>See: <a href=\"{{.}}\">{{.}}</a>{{end}}</td>\n</tr>\n@@ -93,6 +93,27 @@ func Fatalf(format string, a ...interface{}) {\nos.Exit(1)\n}\n+// syscallDocURL returns a doc url for a given syscall, doing its best to return a url that exists.\n+func syscallDocURL(name string) string {\n+ customDocs := map[string]string{\n+ \"io_pgetevents\": \"https://man7.org/linux/man-pages/man2/syscalls.2.html\",\n+ \"rseq\": \"https://man7.org/linux/man-pages/man2/syscalls.2.html\",\n+ \"io_uring_setup\": \"https://manpages.debian.org/buster-backports/liburing-dev/io_uring_setup.2.en.html\",\n+ \"io_uring_enter\": \"https://manpages.debian.org/buster-backports/liburing-dev/io_uring_enter.2.en.html\",\n+ \"io_uring_register\": \"https://manpages.debian.org/buster-backports/liburing-dev/io_uring_register.2.en.html\",\n+ \"open_tree\": \"https://man7.org/linux/man-pages/man2/syscalls.2.html\",\n+ \"move_mount\": \"https://man7.org/linux/man-pages/man2/syscalls.2.html\",\n+ \"fsopen\": \"https://man7.org/linux/man-pages/man2/syscalls.2.html\",\n+ \"fsconfig\": \"https://man7.org/linux/man-pages/man2/syscalls.2.html\",\n+ \"fsmount\": \"https://man7.org/linux/man-pages/man2/syscalls.2.html\",\n+ \"fspick\": \"https://man7.org/linux/man-pages/man2/syscalls.2.html\",\n+ }\n+ if url, ok := customDocs[name]; ok {\n+ return url\n+ }\n+ return fmt.Sprintf(\"http://man7.org/linux/man-pages/man2/%s.2.html\", name)\n+}\n+\nfunc main() {\ninputFlag := flag.String(\"in\", \"-\", \"File to input ('-' for stdin)\")\noutputDir := flag.String(\"out\", \".\", \"Directory to output files.\")\n@@ -146,6 +167,7 @@ func main() {\nSyscalls []struct {\nName string\nNumber uintptr\n+ DocURL string\nSupport string\nNote string\nURLs []string\n@@ -162,6 +184,7 @@ func main() {\nSyscalls: []struct {\nName string\nNumber uintptr\n+ DocURL string\nSupport string\nNote string\nURLs []string\n@@ -188,14 +211,16 @@ func main() {\ndata.Syscalls = append(data.Syscalls, struct {\nName string\nNumber uintptr\n+ DocURL string\nSupport string\nNote string\nURLs []string\n}{\nName: s.Name,\nNumber: num,\n+ DocURL: syscallDocURL(s.Name),\nSupport: s.Support,\n- Note: s.Note, // TODO urls\n+ Note: s.Note,\nURLs: s.URLs,\n})\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Syscall docs update - Moves the id to the <tr> tag so that the page aligns properly when using an anchor. - Makes the syscall number a link to the anchor. - Fixes some broken links to syscalls without man pages. PiperOrigin-RevId: 353159903
260,004
21.01.2021 23:19:38
28,800
e0f4e46e340f2f5e666332ac3ff14f113239400a
Resolve static link addresses in GetLinkAddress If a network address has a static mapping to a link address, calculate it in GetLinkAddress. Test: stack_test.TestStaticGetLinkAddress
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/stack.go", "new_path": "pkg/tcpip/stack/stack.go", "diff": "@@ -1533,7 +1533,7 @@ type LinkResolutionResult struct {\nSuccess bool\n}\n-// GetLinkAddress finds the link address corresponding to a neighbor's address.\n+// GetLinkAddress finds the link address corresponding to a network address.\n//\n// Returns ErrNotSupported if the stack is not configured with a link address\n// resolver for the specified network protocol.\n@@ -1562,6 +1562,11 @@ func (s *Stack) GetLinkAddress(nicID tcpip.NICID, addr, localAddr tcpip.Address,\nreturn tcpip.ErrNotSupported\n}\n+ if linkAddr, ok := linkRes.ResolveStaticAddress(addr); ok {\n+ onResolve(LinkResolutionResult{LinkAddress: linkAddr, Success: true})\n+ return nil\n+ }\n+\n_, _, err := nic.getNeighborLinkAddress(addr, localAddr, linkRes, onResolve)\nreturn err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/stack_test.go", "new_path": "pkg/tcpip/stack/stack_test.go", "diff": "@@ -4391,3 +4391,51 @@ func TestGetLinkAddressErrors(t *testing.T) {\nt.Errorf(\"got s.GetLinkAddress(%d, '', '', %d, nil) = %s, want = %s\", unknownNICID, ipv4.ProtocolNumber, err, tcpip.ErrNotSupported)\n}\n}\n+\n+func TestStaticGetLinkAddress(t *testing.T) {\n+ const (\n+ nicID = 1\n+ )\n+\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol, ipv6.NewProtocol},\n+ })\n+ if err := s.CreateNIC(nicID, channel.New(0, 0, \"\")); err != nil {\n+ t.Fatalf(\"CreateNIC(%d, _) = %s\", nicID, err)\n+ }\n+\n+ tests := []struct {\n+ name string\n+ proto tcpip.NetworkProtocolNumber\n+ addr tcpip.Address\n+ expectedLinkAddr tcpip.LinkAddress\n+ }{\n+ {\n+ name: \"IPv4\",\n+ proto: ipv4.ProtocolNumber,\n+ addr: header.IPv4Broadcast,\n+ expectedLinkAddr: header.EthernetBroadcastAddress,\n+ },\n+ {\n+ name: \"IPv6\",\n+ proto: ipv6.ProtocolNumber,\n+ addr: header.IPv6AllNodesMulticastAddress,\n+ expectedLinkAddr: header.EthernetAddressFromMulticastIPv6Address(header.IPv6AllNodesMulticastAddress),\n+ },\n+ }\n+\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\n+ ch := make(chan stack.LinkResolutionResult, 1)\n+ if err := s.GetLinkAddress(nicID, test.addr, \"\", test.proto, func(r stack.LinkResolutionResult) {\n+ ch <- r\n+ }); err != nil {\n+ t.Fatalf(\"s.GetLinkAddress(%d, %s, '', %d, _): %s\", nicID, test.addr, test.proto, err)\n+ }\n+\n+ if diff := cmp.Diff(stack.LinkResolutionResult{LinkAddress: test.expectedLinkAddr, Success: true}, <-ch); diff != \"\" {\n+ t.Fatalf(\"link resolution result mismatch (-want +got):\\n%s\", diff)\n+ }\n+ })\n+ }\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Resolve static link addresses in GetLinkAddress If a network address has a static mapping to a link address, calculate it in GetLinkAddress. Test: stack_test.TestStaticGetLinkAddress PiperOrigin-RevId: 353179616
259,907
22.01.2021 09:39:52
28,800
010cadd3b8b9bc524b1c61c4338e32262d49632b
[infra] Fix Build Github action failure. The GH Build action has been failing with the error message: ``` BUILD -c opt //runsc tee: /proc/self/fd/2: No such device or address ``` tee /dev/fd/2 seems to be the canonical way of copying stdin to stderr. So use that instead.
[ { "change_type": "MODIFY", "old_path": "tools/bazel.mk", "new_path": "tools/bazel.mk", "diff": "@@ -191,7 +191,7 @@ endif\nbuild_paths = \\\n(set -euo pipefail; \\\n$(call wrapper,$(BAZEL) build $(BASE_OPTIONS) $(BAZEL_OPTIONS) $(1)) 2>&1 \\\n- | tee /proc/self/fd/2 \\\n+ | tee /dev/fd/2 \\\n| sed -n -e '/^Target/,$$p' \\\n| sed -n -e '/^ \\($(subst /,\\/,$(subst $(SPACE),\\|,$(BUILD_ROOTS)))\\)/p' \\\n| sed -e 's/ /\\n/g' \\\n" } ]
Go
Apache License 2.0
google/gvisor
[infra] Fix Build Github action failure. The GH Build action has been failing with the error message: ``` --- BUILD -c opt //runsc tee: /proc/self/fd/2: No such device or address ``` tee /dev/fd/2 seems to be the canonical way of copying stdin to stderr. So use that instead. PiperOrigin-RevId: 353259087
259,992
22.01.2021 09:55:31
28,800
f14f3ba3eff3a445a043a7b2f877ebf4ff862e7d
Fix TestDuplicateEnvVariable flakyness Updates
[ { "change_type": "MODIFY", "old_path": "runsc/container/container_test.go", "new_path": "runsc/container/container_test.go", "diff": "@@ -52,7 +52,7 @@ func waitForProcessList(cont *Container, want []*control.Process) error {\ncb := func() error {\ngot, err := cont.Processes()\nif err != nil {\n- err = fmt.Errorf(\"error getting process data from container: %v\", err)\n+ err = fmt.Errorf(\"error getting process data from container: %w\", err)\nreturn &backoff.PermanentError{Err: err}\n}\nif !procListsEqual(got, want) {\n@@ -64,11 +64,30 @@ func waitForProcessList(cont *Container, want []*control.Process) error {\nreturn testutil.Poll(cb, 30*time.Second)\n}\n+// waitForProcess waits for the given process to show up in the container.\n+func waitForProcess(cont *Container, want *control.Process) error {\n+ cb := func() error {\n+ gots, err := cont.Processes()\n+ if err != nil {\n+ err = fmt.Errorf(\"error getting process data from container: %w\", err)\n+ return &backoff.PermanentError{Err: err}\n+ }\n+ for _, got := range gots {\n+ if procEqual(got, want) {\n+ return nil\n+ }\n+ }\n+ return fmt.Errorf(\"container got process list: %s, want: %+v\", procListToString(gots), want)\n+ }\n+ // Gives plenty of time as tests can run slow under --race.\n+ return testutil.Poll(cb, 30*time.Second)\n+}\n+\nfunc waitForProcessCount(cont *Container, want int) error {\ncb := func() error {\npss, err := cont.Processes()\nif err != nil {\n- err = fmt.Errorf(\"error getting process data from container: %v\", err)\n+ err = fmt.Errorf(\"error getting process data from container: %w\", err)\nreturn &backoff.PermanentError{Err: err}\n}\nif got := len(pss); got != want {\n@@ -101,9 +120,14 @@ func procListsEqual(gots, wants []*control.Process) bool {\nreturn false\n}\nfor i := range gots {\n- got := gots[i]\n- want := wants[i]\n+ if !procEqual(gots[i], wants[i]) {\n+ return false\n+ }\n+ }\n+ return true\n+}\n+func procEqual(got, want *control.Process) bool {\nif want.UID != math.MaxUint32 && want.UID != got.UID {\nreturn false\n}\n@@ -119,7 +143,6 @@ func procListsEqual(gots, wants []*control.Process) bool {\nif len(want.Cmd) != 0 && want.Cmd != got.Cmd {\nreturn false\n}\n- }\nreturn true\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/multi_container_test.go", "new_path": "runsc/container/multi_container_test.go", "diff": "@@ -1708,12 +1708,9 @@ func TestMultiContainerHomeEnvDir(t *testing.T) {\nt.Errorf(\"wait on child container: %v\", err)\n}\n- // Wait for the root container to run.\n- expectedPL := []*control.Process{\n- newProcessBuilder().Cmd(\"sh\").Process(),\n- newProcessBuilder().Cmd(\"sleep\").Process(),\n- }\n- if err := waitForProcessList(containers[0], expectedPL); err != nil {\n+ // Wait until after `env` has executed.\n+ expectedProc := newProcessBuilder().Cmd(\"sleep\").Process()\n+ if err := waitForProcess(containers[0], expectedProc); err != nil {\nt.Errorf(\"failed to wait for sleep to start: %v\", err)\n}\n@@ -1831,7 +1828,7 @@ func TestDuplicateEnvVariable(t *testing.T) {\ncmd1 := fmt.Sprintf(\"env > %q; sleep 1000\", files[0].Name())\ncmd2 := fmt.Sprintf(\"env > %q\", files[1].Name())\ncmdExec := fmt.Sprintf(\"env > %q\", files[2].Name())\n- testSpecs, ids := createSpecs([]string{\"/bin/bash\", \"-c\", cmd1}, []string{\"/bin/bash\", \"-c\", cmd2})\n+ testSpecs, ids := createSpecs([]string{\"/bin/sh\", \"-c\", cmd1}, []string{\"/bin/sh\", \"-c\", cmd2})\ntestSpecs[0].Process.Env = append(testSpecs[0].Process.Env, \"VAR=foo\", \"VAR=bar\")\ntestSpecs[1].Process.Env = append(testSpecs[1].Process.Env, \"VAR=foo\", \"VAR=bar\")\n@@ -1841,12 +1838,9 @@ func TestDuplicateEnvVariable(t *testing.T) {\n}\ndefer cleanup()\n- // Wait for the `env` from the root container to finish.\n- expectedPL := []*control.Process{\n- newProcessBuilder().Cmd(\"bash\").Process(),\n- newProcessBuilder().Cmd(\"sleep\").Process(),\n- }\n- if err := waitForProcessList(containers[0], expectedPL); err != nil {\n+ // Wait until after `env` has executed.\n+ expectedProc := newProcessBuilder().Cmd(\"sleep\").Process()\n+ if err := waitForProcess(containers[0], expectedProc); err != nil {\nt.Errorf(\"failed to wait for sleep to start: %v\", err)\n}\nif ws, err := containers[1].Wait(); err != nil {\n@@ -1856,8 +1850,8 @@ func TestDuplicateEnvVariable(t *testing.T) {\n}\nexecArgs := &control.ExecArgs{\n- Filename: \"/bin/bash\",\n- Argv: []string{\"/bin/bash\", \"-c\", cmdExec},\n+ Filename: \"/bin/sh\",\n+ Argv: []string{\"/bin/sh\", \"-c\", cmdExec},\nEnvv: []string{\"VAR=foo\", \"VAR=bar\"},\n}\nif ws, err := containers[0].executeSync(execArgs); err != nil || ws.ExitStatus() != 0 {\n" } ]
Go
Apache License 2.0
google/gvisor
Fix TestDuplicateEnvVariable flakyness Updates #5226 PiperOrigin-RevId: 353262133
259,992
22.01.2021 10:43:19
28,800
9b4f4655ed5ddcbe34806a28b2bc5d8f61fbb215
Remove dependency to abi/linux abi package is to be used by the Sentry to implement the Linux ABI. Code dealing with the host should use x/sys/unix.
[ { "change_type": "MODIFY", "old_path": "runsc/fsgofer/BUILD", "new_path": "runsc/fsgofer/BUILD", "diff": "@@ -12,7 +12,6 @@ go_library(\n],\nvisibility = [\"//runsc:__subpackages__\"],\ndeps = [\n- \"//pkg/abi/linux\",\n\"//pkg/cleanup\",\n\"//pkg/fd\",\n\"//pkg/log\",\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/fsgofer.go", "new_path": "runsc/fsgofer/fsgofer.go", "diff": "@@ -31,7 +31,6 @@ import (\n\"strconv\"\n\"golang.org/x/sys/unix\"\n- \"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/cleanup\"\n\"gvisor.dev/gvisor/pkg/fd\"\n\"gvisor.dev/gvisor/pkg/log\"\n@@ -367,7 +366,7 @@ func fstat(fd int) (unix.Stat_t, error) {\n}\nfunc fchown(fd int, uid p9.UID, gid p9.GID) error {\n- return unix.Fchownat(fd, \"\", int(uid), int(gid), linux.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW)\n+ return unix.Fchownat(fd, \"\", int(uid), int(gid), unix.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW)\n}\nfunc setOwnerIfNeeded(fd int, uid p9.UID, gid p9.GID) (unix.Stat_t, error) {\n@@ -734,15 +733,15 @@ func (l *localFile) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error {\nif valid.ATime || valid.MTime {\nutimes := [2]unix.Timespec{\n- {Sec: 0, Nsec: linux.UTIME_OMIT},\n- {Sec: 0, Nsec: linux.UTIME_OMIT},\n+ {Sec: 0, Nsec: unix.UTIME_OMIT},\n+ {Sec: 0, Nsec: unix.UTIME_OMIT},\n}\nif valid.ATime {\nif valid.ATimeNotSystemTime {\nutimes[0].Sec = int64(attr.ATimeSeconds)\nutimes[0].Nsec = int64(attr.ATimeNanoSeconds)\n} else {\n- utimes[0].Nsec = linux.UTIME_NOW\n+ utimes[0].Nsec = unix.UTIME_NOW\n}\n}\nif valid.MTime {\n@@ -750,7 +749,7 @@ func (l *localFile) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error {\nutimes[1].Sec = int64(attr.MTimeSeconds)\nutimes[1].Nsec = int64(attr.MTimeNanoSeconds)\n} else {\n- utimes[1].Nsec = linux.UTIME_NOW\n+ utimes[1].Nsec = unix.UTIME_NOW\n}\n}\n@@ -764,7 +763,7 @@ func (l *localFile) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error {\n}\ndefer unix.Close(parent)\n- if tErr := utimensat(parent, path.Base(l.hostPath), utimes, linux.AT_SYMLINK_NOFOLLOW); tErr != nil {\n+ if tErr := utimensat(parent, path.Base(l.hostPath), utimes, unix.AT_SYMLINK_NOFOLLOW); tErr != nil {\nlog.Debugf(\"SetAttr utimens failed %q, err: %v\", l.hostPath, tErr)\nerr = extractErrno(tErr)\n}\n@@ -779,15 +778,15 @@ func (l *localFile) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error {\n}\nif valid.UID || valid.GID {\n- uid := -1\n+ uid := p9.NoUID\nif valid.UID {\n- uid = int(attr.UID)\n+ uid = attr.UID\n}\n- gid := -1\n+ gid := p9.NoGID\nif valid.GID {\n- gid = int(attr.GID)\n+ gid = attr.GID\n}\n- if oErr := unix.Fchownat(f.FD(), \"\", uid, gid, linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW); oErr != nil {\n+ if oErr := fchown(f.FD(), uid, gid); oErr != nil {\nlog.Debugf(\"SetAttr fchownat failed %q, err: %v\", l.hostPath, oErr)\nerr = extractErrno(oErr)\n}\n@@ -916,7 +915,7 @@ func (l *localFile) Link(target p9.File, newName string) error {\n}\ntargetFile := target.(*localFile)\n- if err := unix.Linkat(targetFile.file.FD(), \"\", l.file.FD(), newName, linux.AT_EMPTY_PATH); err != nil {\n+ if err := unix.Linkat(targetFile.file.FD(), \"\", l.file.FD(), newName, unix.AT_EMPTY_PATH); err != nil {\nreturn extractErrno(err)\n}\nreturn nil\n@@ -1103,7 +1102,8 @@ func (l *localFile) Connect(flags p9.ConnectFlags) (*fd.FD, error) {\n// mappings, the app path may have fit in the sockaddr, but we can't\n// fit f.path in our sockaddr. We'd need to redirect through a shorter\n// path in order to actually connect to this socket.\n- if len(l.hostPath) > linux.UnixPathMax {\n+ const UNIX_PATH_MAX = 108 // defined in afunix.h\n+ if len(l.hostPath) > UNIX_PATH_MAX {\nreturn nil, unix.ECONNREFUSED\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/fsgofer_amd64_unsafe.go", "new_path": "runsc/fsgofer/fsgofer_amd64_unsafe.go", "diff": "@@ -20,7 +20,6 @@ import (\n\"unsafe\"\n\"golang.org/x/sys/unix\"\n- \"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/syserr\"\n)\n@@ -39,7 +38,7 @@ func statAt(dirFd int, name string) (unix.Stat_t, error) {\nuintptr(dirFd),\nuintptr(namePtr),\nuintptr(statPtr),\n- linux.AT_SYMLINK_NOFOLLOW,\n+ unix.AT_SYMLINK_NOFOLLOW,\n0,\n0); errno != 0 {\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/fsgofer_arm64_unsafe.go", "new_path": "runsc/fsgofer/fsgofer_arm64_unsafe.go", "diff": "@@ -20,7 +20,6 @@ import (\n\"unsafe\"\n\"golang.org/x/sys/unix\"\n- \"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/syserr\"\n)\n@@ -39,7 +38,7 @@ func statAt(dirFd int, name string) (unix.Stat_t, error) {\nuintptr(dirFd),\nuintptr(namePtr),\nuintptr(statPtr),\n- linux.AT_SYMLINK_NOFOLLOW,\n+ unix.AT_SYMLINK_NOFOLLOW,\n0,\n0); errno != 0 {\n" } ]
Go
Apache License 2.0
google/gvisor
Remove dependency to abi/linux abi package is to be used by the Sentry to implement the Linux ABI. Code dealing with the host should use x/sys/unix. PiperOrigin-RevId: 353272679
259,975
22.01.2021 10:49:57
28,800
16b81308cfb7cf2265d69e7180cd024f7c5b89e4
Add initial mitigate code and cpu parsing.
[ { "change_type": "ADD", "old_path": null, "new_path": "runsc/mitigate/BUILD", "diff": "+load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\n+\n+package(licenses = [\"notice\"])\n+\n+go_library(\n+ name = \"mitigate\",\n+ srcs = [\n+ \"cpu.go\",\n+ \"mitigate.go\",\n+ ],\n+)\n+\n+go_test(\n+ name = \"mitigate_test\",\n+ size = \"small\",\n+ srcs = [\"cpu_test.go\"],\n+ library = \":mitigate\",\n+)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "runsc/mitigate/cpu.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package mitigate\n+\n+import (\n+ \"fmt\"\n+ \"regexp\"\n+ \"strconv\"\n+ \"strings\"\n+)\n+\n+const (\n+ // constants of coomm\n+ meltdown = \"cpu_meltdown\"\n+ l1tf = \"l1tf\"\n+ mds = \"mds\"\n+ swapgs = \"swapgs\"\n+ taa = \"taa\"\n+)\n+\n+const (\n+ processorKey = \"processor\"\n+ vendorIDKey = \"vendor_id\"\n+ cpuFamilyKey = \"cpu family\"\n+ modelKey = \"model\"\n+ coreIDKey = \"core id\"\n+ bugsKey = \"bugs\"\n+)\n+\n+// getCPUSet returns cpu structs from reading /proc/cpuinfo.\n+func getCPUSet(data string) ([]*cpu, error) {\n+ // Each processor entry should start with the\n+ // processor key. Find the beginings of each.\n+ r := buildRegex(processorKey, `\\d+`)\n+ indices := r.FindAllStringIndex(data, -1)\n+ if len(indices) < 1 {\n+ return nil, fmt.Errorf(\"no cpus found for: %s\", data)\n+ }\n+\n+ // Add the ending index for last entry.\n+ indices = append(indices, []int{len(data), -1})\n+\n+ // Valid cpus are now defined by strings in between\n+ // indexes (e.g. data[index[i], index[i+1]]).\n+ // There should be len(indicies) - 1 CPUs\n+ // since the last index is the end of the string.\n+ var cpus = make([]*cpu, 0, len(indices)-1)\n+ // Find each string that represents a CPU. These begin \"processor\".\n+ for i := 1; i < len(indices); i++ {\n+ start := indices[i-1][0]\n+ end := indices[i][0]\n+ // Parse the CPU entry, which should be between start/end.\n+ c, err := getCPU(data[start:end])\n+ if err != nil {\n+ return nil, err\n+ }\n+ cpus = append(cpus, c)\n+ }\n+ return cpus, nil\n+}\n+\n+// type cpu represents pertinent info about a cpu.\n+type cpu struct {\n+ processorNumber int64 // the processor number of this CPU.\n+ vendorID string // the vendorID of CPU (e.g. AuthenticAMD).\n+ cpuFamily int64 // CPU family number (e.g. 6 for CascadeLake/Skylake).\n+ model int64 // CPU model number (e.g. 85 for CascadeLake/Skylake).\n+ coreID int64 // This CPU's core id to match Hyperthread Pairs\n+ bugs map[string]struct{} // map of vulnerabilities parsed from the 'bugs' field.\n+}\n+\n+// getCPU parses a CPU from a single cpu entry from /proc/cpuinfo.\n+func getCPU(data string) (*cpu, error) {\n+ processor, err := parseProcessor(data)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ vendorID, err := parseVendorID(data)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ cpuFamily, err := parseCPUFamily(data)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ model, err := parseModel(data)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ coreID, err := parseCoreID(data)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ bugs, err := parseBugs(data)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ return &cpu{\n+ processorNumber: processor,\n+ vendorID: vendorID,\n+ cpuFamily: cpuFamily,\n+ model: model,\n+ coreID: coreID,\n+ bugs: bugs,\n+ }, nil\n+}\n+\n+// List of pertinent side channel vulnerablilites.\n+// For mds, see: https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html.\n+var vulnerabilities = []string{\n+ meltdown,\n+ l1tf,\n+ mds,\n+ swapgs,\n+ taa,\n+}\n+\n+// isVulnerable checks if a CPU is vulnerable to pertinent bugs.\n+func (c *cpu) isVulnerable() bool {\n+ for _, bug := range vulnerabilities {\n+ if _, ok := c.bugs[bug]; ok {\n+ return true\n+ }\n+ }\n+ return false\n+}\n+\n+// similarTo checks family/model/bugs fields for equality of two\n+// processors.\n+func (c *cpu) similarTo(other *cpu) bool {\n+ if c.vendorID != other.vendorID {\n+ return false\n+ }\n+\n+ if other.cpuFamily != c.cpuFamily {\n+ return false\n+ }\n+\n+ if other.model != c.model {\n+ return false\n+ }\n+\n+ if len(other.bugs) != len(c.bugs) {\n+ return false\n+ }\n+\n+ for bug := range c.bugs {\n+ if _, ok := other.bugs[bug]; !ok {\n+ return false\n+ }\n+ }\n+ return true\n+}\n+\n+// parseProcessor grabs the processor field from /proc/cpuinfo output.\n+func parseProcessor(data string) (int64, error) {\n+ return parseIntegerResult(data, processorKey)\n+}\n+\n+// parseVendorID grabs the vendor_id field from /proc/cpuinfo output.\n+func parseVendorID(data string) (string, error) {\n+ return parseRegex(data, vendorIDKey, `[\\w\\d]+`)\n+}\n+\n+// parseCPUFamily grabs the cpu family field from /proc/cpuinfo output.\n+func parseCPUFamily(data string) (int64, error) {\n+ return parseIntegerResult(data, cpuFamilyKey)\n+}\n+\n+// parseModel grabs the model field from /proc/cpuinfo output.\n+func parseModel(data string) (int64, error) {\n+ return parseIntegerResult(data, modelKey)\n+}\n+\n+// parseCoreID parses the core id field.\n+func parseCoreID(data string) (int64, error) {\n+ return parseIntegerResult(data, coreIDKey)\n+}\n+\n+// parseBugs grabs the bugs field from /proc/cpuinfo output.\n+func parseBugs(data string) (map[string]struct{}, error) {\n+ result, err := parseRegex(data, bugsKey, `[\\d\\w\\s]*`)\n+ if err != nil {\n+ return nil, err\n+ }\n+ bugs := strings.Split(result, \" \")\n+ ret := make(map[string]struct{}, len(bugs))\n+ for _, bug := range bugs {\n+ ret[bug] = struct{}{}\n+ }\n+ return ret, nil\n+}\n+\n+// parseIntegerResult parses fields expecting an integer.\n+func parseIntegerResult(data, key string) (int64, error) {\n+ result, err := parseRegex(data, key, `\\d+`)\n+ if err != nil {\n+ return 0, err\n+ }\n+ return strconv.ParseInt(result, 0, 64)\n+}\n+\n+// buildRegex builds a regex for parsing each CPU field.\n+func buildRegex(key, match string) *regexp.Regexp {\n+ reg := fmt.Sprintf(`(?m)^%s\\s*:\\s*(.*)$`, key)\n+ return regexp.MustCompile(reg)\n+}\n+\n+// parseRegex parses data with key inserted into a standard regex template.\n+func parseRegex(data, key, match string) (string, error) {\n+ r := buildRegex(key, match)\n+ matches := r.FindStringSubmatch(data)\n+ if len(matches) < 2 {\n+ return \"\", fmt.Errorf(\"failed to match key %s: %s\", key, data)\n+ }\n+ return matches[1], nil\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "runsc/mitigate/cpu_test.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package mitigate\n+\n+import (\n+ \"io/ioutil\"\n+ \"strings\"\n+ \"testing\"\n+)\n+\n+// CPU info for a Intel CascadeLake processor. Both Skylake and CascadeLake have\n+// the same family/model numbers, but with different bugs (e.g. skylake has\n+// cpu_meltdown).\n+var cascadeLake = &cpu{\n+ vendorID: \"GenuineIntel\",\n+ cpuFamily: 6,\n+ model: 85,\n+ bugs: map[string]struct{}{\n+ \"spectre_v1\": struct{}{},\n+ \"spectre_v2\": struct{}{},\n+ \"spec_store_bypass\": struct{}{},\n+ mds: struct{}{},\n+ swapgs: struct{}{},\n+ taa: struct{}{},\n+ },\n+}\n+\n+// TestGetCPU tests basic parsing of single CPU strings from reading\n+// /proc/cpuinfo.\n+func TestGetCPU(t *testing.T) {\n+ data := `processor : 0\n+vendor_id : GenuineIntel\n+cpu family : 6\n+model : 85\n+core id : 0\n+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit\n+`\n+ want := cpu{\n+ processorNumber: 0,\n+ vendorID: \"GenuineIntel\",\n+ cpuFamily: 6,\n+ model: 85,\n+ coreID: 0,\n+ bugs: map[string]struct{}{\n+ \"cpu_meltdown\": struct{}{},\n+ \"spectre_v1\": struct{}{},\n+ \"spectre_v2\": struct{}{},\n+ \"spec_store_bypass\": struct{}{},\n+ \"l1tf\": struct{}{},\n+ \"mds\": struct{}{},\n+ \"swapgs\": struct{}{},\n+ \"taa\": struct{}{},\n+ \"itlb_multihit\": struct{}{},\n+ },\n+ }\n+\n+ got, err := getCPU(data)\n+ if err != nil {\n+ t.Fatalf(\"getCpu failed with error: %v\", err)\n+ }\n+\n+ if !want.similarTo(got) {\n+ t.Fatalf(\"Failed cpus not similar: got: %+v, want: %+v\", got, want)\n+ }\n+\n+ if !got.isVulnerable() {\n+ t.Fatalf(\"Failed: cpu should be vulnerable.\")\n+ }\n+}\n+\n+func TestInvalid(t *testing.T) {\n+ result, err := getCPUSet(`something not a processor`)\n+ if err == nil {\n+ t.Fatalf(\"getCPU set didn't return an error: %+v\", result)\n+ }\n+\n+ if !strings.Contains(err.Error(), \"no cpus\") {\n+ t.Fatalf(\"Incorrect error returned: %v\", err)\n+ }\n+}\n+\n+// TestCPUSet tests getting the right number of CPUs from\n+// parsing full output of /proc/cpuinfo.\n+func TestCPUSet(t *testing.T) {\n+ data := `processor : 0\n+vendor_id : GenuineIntel\n+cpu family : 6\n+model : 63\n+model name : Intel(R) Xeon(R) CPU @ 2.30GHz\n+stepping : 0\n+microcode : 0x1\n+cpu MHz : 2299.998\n+cache size : 46080 KB\n+physical id : 0\n+siblings : 2\n+core id : 0\n+cpu cores : 1\n+apicid : 0\n+initial apicid : 0\n+fpu : yes\n+fpu_exception : yes\n+cpuid level : 13\n+wp : yes\n+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single pti ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear arch_capabilities\n+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs\n+bogomips : 4599.99\n+clflush size : 64\n+cache_alignment : 64\n+address sizes : 46 bits physical, 48 bits virtual\n+power management:\n+\n+processor : 1\n+vendor_id : GenuineIntel\n+cpu family : 6\n+model : 63\n+model name : Intel(R) Xeon(R) CPU @ 2.30GHz\n+stepping : 0\n+microcode : 0x1\n+cpu MHz : 2299.998\n+cache size : 46080 KB\n+physical id : 0\n+siblings : 2\n+core id : 0\n+cpu cores : 1\n+apicid : 1\n+initial apicid : 1\n+fpu : yes\n+fpu_exception : yes\n+cpuid level : 13\n+wp : yes\n+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single pti ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear arch_capabilities\n+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs\n+bogomips : 4599.99\n+clflush size : 64\n+cache_alignment : 64\n+address sizes : 46 bits physical, 48 bits virtual\n+power management:\n+`\n+ cpuSet, err := getCPUSet(data)\n+ if err != nil {\n+ t.Fatalf(\"getCPUSet failed: %v\", err)\n+ }\n+\n+ wantCPULen := 2\n+ if len(cpuSet) != wantCPULen {\n+ t.Fatalf(\"Num CPU mismatch: want: %d, got: %d\", wantCPULen, len(cpuSet))\n+ }\n+\n+ wantCPU := cpu{\n+ vendorID: \"GenuineIntel\",\n+ cpuFamily: 6,\n+ model: 63,\n+ bugs: map[string]struct{}{\n+ \"cpu_meltdown\": struct{}{},\n+ \"spectre_v1\": struct{}{},\n+ \"spectre_v2\": struct{}{},\n+ \"spec_store_bypass\": struct{}{},\n+ \"l1tf\": struct{}{},\n+ \"mds\": struct{}{},\n+ \"swapgs\": struct{}{},\n+ },\n+ }\n+\n+ for _, c := range cpuSet {\n+ if !wantCPU.similarTo(c) {\n+ t.Fatalf(\"Failed cpus not equal: got: %+v, want: %+v\", c, wantCPU)\n+ }\n+ }\n+}\n+\n+// TestReadFile is a smoke test for parsing methods.\n+func TestReadFile(t *testing.T) {\n+ data, err := ioutil.ReadFile(\"/proc/cpuinfo\")\n+ if err != nil {\n+ t.Fatalf(\"Failed to read cpuinfo: %v\", err)\n+ }\n+\n+ set, err := getCPUSet(string(data))\n+ if err != nil {\n+ t.Fatalf(\"Failed to parse CPU data %v\\n%s\", err, data)\n+ }\n+\n+ if len(set) < 1 {\n+ t.Fatalf(\"Failed to parse any CPUs: %d\", len(set))\n+ }\n+\n+ for _, c := range set {\n+ t.Logf(\"CPU: %+v: %t\", c, c.isVulnerable())\n+ }\n+}\n+\n+// TestVulnerable tests if the isVulnerable method is correct\n+// among known CPUs in GCP.\n+func TestVulnerable(t *testing.T) {\n+ const haswell = `processor : 0\n+vendor_id : GenuineIntel\n+cpu family : 6\n+model : 63\n+model name : Intel(R) Xeon(R) CPU @ 2.30GHz\n+stepping : 0\n+microcode : 0x1\n+cpu MHz : 2299.998\n+cache size : 46080 KB\n+physical id : 0\n+siblings : 4\n+core id : 0\n+cpu cores : 2\n+apicid : 0\n+initial apicid : 0\n+fpu : yes\n+fpu_exception : yes\n+cpuid level : 13\n+wp : yes\n+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single pti ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear arch_capabilities\n+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs\n+bogomips : 4599.99\n+clflush size : 64\n+cache_alignment : 64\n+address sizes : 46 bits physical, 48 bits virtual\n+power management:`\n+\n+ const skylake = `processor : 0\n+vendor_id : GenuineIntel\n+cpu family : 6\n+model : 85\n+model name : Intel(R) Xeon(R) CPU @ 2.00GHz\n+stepping : 3\n+microcode : 0x1\n+cpu MHz : 2000.180\n+cache size : 39424 KB\n+physical id : 0\n+siblings : 2\n+core id : 0\n+cpu cores : 1\n+apicid : 0\n+initial apicid : 0\n+fpu : yes\n+fpu_exception : yes\n+cpuid level : 13\n+wp : yes\n+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single pti ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat md_clear arch_capabilities\n+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa\n+bogomips : 4000.36\n+clflush size : 64\n+cache_alignment : 64\n+address sizes : 46 bits physical, 48 bits virtual\n+power management:`\n+\n+ const cascade = `processor : 0\n+vendor_id : GenuineIntel\n+cpu family : 6\n+model : 85\n+model name : Intel(R) Xeon(R) CPU\n+stepping : 7\n+microcode : 0x1\n+cpu MHz : 2800.198\n+cache size : 33792 KB\n+physical id : 0\n+siblings : 2\n+core id : 0\n+cpu cores : 1\n+apicid : 0\n+initial apicid : 0\n+fpu : yes\n+fpu_exception : yes\n+cpuid level : 13\n+wp : yes\n+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2\n+ ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmu\n+lqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowpr\n+efetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid r\n+tm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves a\n+rat avx512_vnni md_clear arch_capabilities\n+bugs : spectre_v1 spectre_v2 spec_store_bypass mds swapgs taa\n+bogomips : 5600.39\n+clflush size : 64\n+cache_alignment : 64\n+address sizes : 46 bits physical, 48 bits virtual\n+power management:`\n+\n+ const amd = `processor : 0\n+vendor_id : AuthenticAMD\n+cpu family : 23\n+model : 49\n+model name : AMD EPYC 7B12\n+stepping : 0\n+microcode : 0x1000065\n+cpu MHz : 2250.000\n+cache size : 512 KB\n+physical id : 0\n+siblings : 2\n+core id : 0\n+cpu cores : 1\n+apicid : 0\n+initial apicid : 0\n+fpu : yes\n+fpu_exception : yes\n+cpuid level : 13\n+wp : yes\n+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid extd_apicid tsc_known_freq pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip rdpid\n+bugs : sysret_ss_attrs spectre_v1 spectre_v2 spec_store_bypass\n+bogomips : 4500.00\n+TLB size : 3072 4K pages\n+clflush size : 64\n+cache_alignment : 64\n+address sizes : 48 bits physical, 48 bits virtual\n+power management:`\n+\n+ for _, tc := range []struct {\n+ name string\n+ cpuString string\n+ vulnerable bool\n+ }{\n+ {\n+ name: \"haswell\",\n+ cpuString: haswell,\n+ vulnerable: true,\n+ }, {\n+ name: \"skylake\",\n+ cpuString: skylake,\n+ vulnerable: true,\n+ }, {\n+ name: \"cascadeLake\",\n+ cpuString: cascade,\n+ vulnerable: false,\n+ }, {\n+ name: \"amd\",\n+ cpuString: amd,\n+ vulnerable: false,\n+ },\n+ } {\n+ t.Run(tc.name, func(t *testing.T) {\n+ set, err := getCPUSet(tc.cpuString)\n+ if err != nil {\n+ t.Fatalf(\"Failed to getCPUSet:%v\\n %s\", err, tc.cpuString)\n+ }\n+\n+ if len(set) < 1 {\n+ t.Fatalf(\"Returned empty cpu set: %v\", set)\n+ }\n+\n+ for _, c := range set {\n+ got := func() bool {\n+ if cascadeLake.similarTo(c) {\n+ return false\n+ }\n+ return c.isVulnerable()\n+ }()\n+\n+ if got != tc.vulnerable {\n+ t.Fatalf(\"Mismatch vulnerable for cpu %+s: got %t want: %t\", tc.name, tc.vulnerable, got)\n+ }\n+ }\n+ })\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "runsc/mitigate/mitigate.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package mitigate provides libraries for the mitigate command. The\n+// mitigate command mitigates side channel attacks such as MDS. Mitigate\n+// shuts down CPUs via /sys/devices/system/cpu/cpu{N}/online. In addition,\n+// the mitigate also handles computing available CPU in kubernetes kube_config\n+// files.\n+package mitigate\n" } ]
Go
Apache License 2.0
google/gvisor
Add initial mitigate code and cpu parsing. PiperOrigin-RevId: 353274135
259,860
22.01.2021 11:25:13
28,800
65594d30ad1b1a2ca676c7ea78f4815f83dc4d06
Avoid atomic operations in kcov processing. Atomic operations here significantly slow down gVisor builds with kcov/coverage enabled. Also mark these functions go:norace to avoid complaints from the race detector.
[ { "change_type": "MODIFY", "old_path": "pkg/coverage/coverage.go", "new_path": "pkg/coverage/coverage.go", "diff": "@@ -26,7 +26,6 @@ import (\n\"fmt\"\n\"io\"\n\"sort\"\n- \"sync/atomic\"\n\"testing\"\n\"gvisor.dev/gvisor/pkg/sync\"\n@@ -69,12 +68,18 @@ var globalData struct {\n}\n// ClearCoverageData clears existing coverage data.\n+//\n+//go:norace\nfunc ClearCoverageData() {\ncoverageMu.Lock()\ndefer coverageMu.Unlock()\n+\n+ // We do not use atomic operations while reading/writing to the counters,\n+ // which would drastically degrade performance. Slight discrepancies due to\n+ // racing is okay for the purposes of kcov.\nfor _, counters := range coverdata.Cover.Counters {\nfor index := 0; index < len(counters); index++ {\n- atomic.StoreUint32(&counters[index], 0)\n+ counters[index] = 0\n}\n}\n}\n@@ -114,6 +119,8 @@ var coveragePool = sync.Pool{\n// ensure that each event is only reported once. Due to the limitations of Go\n// coverage tools, we reset the global coverage data every time this function is\n// run.\n+//\n+//go:norace\nfunc ConsumeCoverageData(w io.Writer) int {\nInitCoverageData()\n@@ -125,11 +132,14 @@ func ConsumeCoverageData(w io.Writer) int {\nfor fileNum, file := range globalData.files {\ncounters := coverdata.Cover.Counters[file]\nfor index := 0; index < len(counters); index++ {\n- if atomic.LoadUint32(&counters[index]) == 0 {\n+ // We do not use atomic operations while reading/writing to the counters,\n+ // which would drastically degrade performance. Slight discrepancies due to\n+ // racing is okay for the purposes of kcov.\n+ if counters[index] == 0 {\ncontinue\n}\n// Non-zero coverage data found; consume it and report as a PC.\n- atomic.StoreUint32(&counters[index], 0)\n+ counters[index] = 0\npc := globalData.syntheticPCs[fileNum][index]\nusermem.ByteOrder.PutUint64(pcBuffer[:], pc)\nn, err := w.Write(pcBuffer[:])\n" } ]
Go
Apache License 2.0
google/gvisor
Avoid atomic operations in kcov processing. Atomic operations here significantly slow down gVisor builds with kcov/coverage enabled. Also mark these functions go:norace to avoid complaints from the race detector. PiperOrigin-RevId: 353281865
259,896
22.01.2021 11:27:13
28,800
527ef5fc0307102fa7cc0b32bcc2eb8cca3e21a8
Add tests for RACK Added packetimpact tests for RACK.
[ { "change_type": "MODIFY", "old_path": "test/packetimpact/runner/defs.bzl", "new_path": "test/packetimpact/runner/defs.bzl", "diff": "@@ -277,6 +277,10 @@ ALL_TESTS = [\nPacketimpactTestInfo(\nname = \"tcp_rcv_buf_space\",\n),\n+ PacketimpactTestInfo(\n+ name = \"tcp_rack\",\n+ expect_netstack_failure = True,\n+ ),\n]\ndef validate_all_tests():\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/BUILD", "new_path": "test/packetimpact/tests/BUILD", "diff": "@@ -376,6 +376,20 @@ packetimpact_testbench(\n],\n)\n+packetimpact_testbench(\n+ name = \"tcp_rack\",\n+ srcs = [\"tcp_rack_test.go\"],\n+ deps = [\n+ \"//pkg/abi/linux\",\n+ \"//pkg/binary\",\n+ \"//pkg/tcpip/header\",\n+ \"//pkg/tcpip/seqnum\",\n+ \"//pkg/usermem\",\n+ \"//test/packetimpact/testbench\",\n+ \"@org_golang_x_sys//unix:go_default_library\",\n+ ],\n+)\n+\nvalidate_all_tests()\n[packetimpact_go_test(\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/packetimpact/tests/tcp_rack_test.go", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package tcp_rack_test\n+\n+import (\n+ \"flag\"\n+ \"testing\"\n+ \"time\"\n+\n+ \"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/binary\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/header\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/seqnum\"\n+ \"gvisor.dev/gvisor/pkg/usermem\"\n+ \"gvisor.dev/gvisor/test/packetimpact/testbench\"\n+)\n+\n+func init() {\n+ testbench.Initialize(flag.CommandLine)\n+}\n+\n+const (\n+ // payloadSize is the size used to send packets.\n+ payloadSize = header.TCPDefaultMSS\n+\n+ // simulatedRTT is the time delay between packets sent and acked to\n+ // increase the RTT.\n+ simulatedRTT = 30 * time.Millisecond\n+\n+ // numPktsForRTT is the number of packets sent and acked to establish\n+ // RTT.\n+ numPktsForRTT = 10\n+)\n+\n+func createSACKConnection(t *testing.T) (testbench.DUT, testbench.TCPIPv4, int32, int32) {\n+ dut := testbench.NewDUT(t)\n+ listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)\n+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})\n+\n+ // Enable SACK.\n+ opts := make([]byte, 40)\n+ optsOff := 0\n+ optsOff += header.EncodeNOP(opts[optsOff:])\n+ optsOff += header.EncodeNOP(opts[optsOff:])\n+ optsOff += header.EncodeSACKPermittedOption(opts[optsOff:])\n+\n+ conn.ConnectWithOptions(t, opts[:optsOff])\n+ acceptFd, _ := dut.Accept(t, listenFd)\n+ return dut, conn, acceptFd, listenFd\n+}\n+\n+func closeSACKConnection(t *testing.T, dut testbench.DUT, conn testbench.TCPIPv4, acceptFd, listenFd int32) {\n+ dut.Close(t, acceptFd)\n+ dut.Close(t, listenFd)\n+ conn.Close(t)\n+}\n+\n+func getRTTAndRTO(t *testing.T, dut testbench.DUT, acceptFd int32) (rtt, rto time.Duration) {\n+ info := linux.TCPInfo{}\n+ ret := dut.GetSockOpt(t, acceptFd, unix.SOL_TCP, unix.TCP_INFO, int32(linux.SizeOfTCPInfo))\n+ binary.Unmarshal(ret, usermem.ByteOrder, &info)\n+ return time.Duration(info.RTT) * time.Microsecond, time.Duration(info.RTO) * time.Microsecond\n+}\n+\n+func sendAndReceive(t *testing.T, dut testbench.DUT, conn testbench.TCPIPv4, numPkts int, acceptFd int32, sendACK bool) time.Time {\n+ seqNum1 := *conn.RemoteSeqNum(t)\n+ payload := make([]byte, payloadSize)\n+ var lastSent time.Time\n+ for i, sn := 0, seqNum1; i < numPkts; i++ {\n+ lastSent = time.Now()\n+ dut.Send(t, acceptFd, payload, 0)\n+ gotOne, err := conn.Expect(t, testbench.TCP{SeqNum: testbench.Uint32(uint32(sn))}, time.Second)\n+ if err != nil {\n+ t.Fatalf(\"Expect #%d: %s\", i+1, err)\n+ continue\n+ }\n+ if gotOne == nil {\n+ t.Fatalf(\"#%d: expected a packet within a second but got none\", i+1)\n+ }\n+ sn.UpdateForward(seqnum.Size(payloadSize))\n+\n+ if sendACK {\n+ time.Sleep(simulatedRTT)\n+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(sn))})\n+ }\n+ }\n+ return lastSent\n+}\n+\n+// TestRACKTLPAllPacketsLost tests TLP when an entire flight of data is lost.\n+func TestRACKTLPAllPacketsLost(t *testing.T) {\n+ dut, conn, acceptFd, listenFd := createSACKConnection(t)\n+ seqNum1 := *conn.RemoteSeqNum(t)\n+\n+ // Send ACK for data packets to establish RTT.\n+ sendAndReceive(t, dut, conn, numPktsForRTT, acceptFd, true /* sendACK */)\n+ seqNum1.UpdateForward(seqnum.Size(numPktsForRTT * payloadSize))\n+\n+ // We are not sending ACK for these packets.\n+ const numPkts = 5\n+ lastSent := sendAndReceive(t, dut, conn, numPkts, acceptFd, false /* sendACK */)\n+\n+ // Probe Timeout (PTO) should be two times RTT. Check that the last\n+ // packet is retransmitted after probe timeout.\n+ rtt, _ := getRTTAndRTO(t, dut, acceptFd)\n+ pto := rtt * 2\n+ // We expect the 5th packet (the last unacknowledged packet) to be\n+ // retransmitted.\n+ tlpProbe := testbench.Uint32(uint32(seqNum1) + uint32((numPkts-1)*payloadSize))\n+ if _, err := conn.Expect(t, testbench.TCP{SeqNum: tlpProbe}, time.Second); err != nil {\n+ t.Fatalf(\"expected payload was not received: %s %v %v\", err, rtt, pto)\n+ }\n+ diff := time.Now().Sub(lastSent)\n+ if diff < pto {\n+ t.Fatalf(\"expected payload was received before the probe timeout, got: %v, want: %v\", diff, pto)\n+ }\n+ closeSACKConnection(t, dut, conn, acceptFd, listenFd)\n+}\n+\n+// TestRACKTLPLost tests TLP when there are tail losses.\n+// See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.4\n+func TestRACKTLPLost(t *testing.T) {\n+ dut, conn, acceptFd, listenFd := createSACKConnection(t)\n+ seqNum1 := *conn.RemoteSeqNum(t)\n+\n+ // Send ACK for data packets to establish RTT.\n+ sendAndReceive(t, dut, conn, numPktsForRTT, acceptFd, true /* sendACK */)\n+ seqNum1.UpdateForward(seqnum.Size(numPktsForRTT * payloadSize))\n+\n+ // We are not sending ACK for these packets.\n+ const numPkts = 10\n+ lastSent := sendAndReceive(t, dut, conn, numPkts, acceptFd, false /* sendACK */)\n+\n+ // Cumulative ACK for #[1-5] packets.\n+ ackNum := seqNum1.Add(seqnum.Size(6 * payloadSize))\n+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(ackNum))})\n+\n+ // Probe Timeout (PTO) should be two times RTT. Check that the last\n+ // packet is retransmitted after probe timeout.\n+ rtt, _ := getRTTAndRTO(t, dut, acceptFd)\n+ pto := rtt * 2\n+ // We expect the 10th packet (the last unacknowledged packet) to be\n+ // retransmitted.\n+ tlpProbe := testbench.Uint32(uint32(seqNum1) + uint32((numPkts-1)*payloadSize))\n+ if _, err := conn.Expect(t, testbench.TCP{SeqNum: tlpProbe}, time.Second); err != nil {\n+ t.Fatalf(\"expected payload was not received: %s\", err)\n+ }\n+ diff := time.Now().Sub(lastSent)\n+ if diff < pto {\n+ t.Fatalf(\"expected payload was received before the probe timeout, got: %v, want: %v\", diff, pto)\n+ }\n+ closeSACKConnection(t, dut, conn, acceptFd, listenFd)\n+}\n+\n+// TestRACKTLPWithSACK tests TLP by acknowledging out of order packets.\n+// See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-8.1\n+func TestRACKTLPWithSACK(t *testing.T) {\n+ dut, conn, acceptFd, listenFd := createSACKConnection(t)\n+ seqNum1 := *conn.RemoteSeqNum(t)\n+\n+ // Send ACK for data packets to establish RTT.\n+ sendAndReceive(t, dut, conn, numPktsForRTT, acceptFd, true /* sendACK */)\n+ seqNum1.UpdateForward(seqnum.Size(numPktsForRTT * payloadSize))\n+\n+ // We are not sending ACK for these packets.\n+ const numPkts = 3\n+ lastSent := sendAndReceive(t, dut, conn, numPkts, acceptFd, false /* sendACK */)\n+\n+ // SACK for #2 packet.\n+ sackBlock := make([]byte, 40)\n+ start := seqNum1.Add(seqnum.Size(payloadSize))\n+ end := start.Add(seqnum.Size(payloadSize))\n+ sbOff := 0\n+ sbOff += header.EncodeNOP(sackBlock[sbOff:])\n+ sbOff += header.EncodeNOP(sackBlock[sbOff:])\n+ sbOff += header.EncodeSACKBlocks([]header.SACKBlock{{\n+ start, end,\n+ }}, sackBlock[sbOff:])\n+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})\n+\n+ // RACK marks #1 packet as lost and retransmits it.\n+ if _, err := conn.Expect(t, testbench.TCP{SeqNum: testbench.Uint32(uint32(seqNum1))}, time.Second); err != nil {\n+ t.Fatalf(\"expected payload was not received: %s\", err)\n+ }\n+\n+ // ACK for #1 packet.\n+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(end))})\n+\n+ // Probe Timeout (PTO) should be two times RTT. TLP will trigger for #3\n+ // packet. RACK adds an additional timeout of 200ms if the number of\n+ // outstanding packets is equal to 1.\n+ rtt, rto := getRTTAndRTO(t, dut, acceptFd)\n+ pto := rtt*2 + (200 * time.Millisecond)\n+ if rto < pto {\n+ pto = rto\n+ }\n+ // We expect the 3rd packet (the last unacknowledged packet) to be\n+ // retransmitted.\n+ tlpProbe := testbench.Uint32(uint32(seqNum1) + uint32((numPkts-1)*payloadSize))\n+ if _, err := conn.Expect(t, testbench.TCP{SeqNum: tlpProbe}, time.Second); err != nil {\n+ t.Fatalf(\"expected payload was not received: %s\", err)\n+ }\n+ diff := time.Now().Sub(lastSent)\n+ if diff < pto {\n+ t.Fatalf(\"expected payload was received before the probe timeout, got: %v, want: %v\", diff, pto)\n+ }\n+ closeSACKConnection(t, dut, conn, acceptFd, listenFd)\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Add tests for RACK - Added packetimpact tests for RACK. PiperOrigin-RevId: 353282342
260,004
22.01.2021 14:23:48
28,800
f190e13a74c261176d8619a2fa03fd80a5c74f6d
Pass RouteInfo to the route resolve callback The route resolution callback will be called with a stack.ResolvedFieldsResult which will hold the route info so callers can avoid attempting resolution again to check if a previous resolution attempt succeeded or not. Test: integration_test.TestRouteResolvedFields
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/pending_packets.go", "new_path": "pkg/tcpip/stack/pending_packets.go", "diff": "@@ -145,7 +145,7 @@ func (f *packetsPendingLinkResolution) enqueue(r *Route, gso *GSO, proto tcpip.N\n//\n// To make sure B does not interleave with A and C, we make sure A and C are\n// done while holding the lock.\n- routeInfo, ch, err := r.ResolvedFields(nil)\n+ routeInfo, ch, err := r.resolvedFields(nil)\nswitch err {\ncase nil:\n// The route resolved immediately, so we don't need to wait for link\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/route.go", "new_path": "pkg/tcpip/stack/route.go", "diff": "@@ -315,23 +315,42 @@ func (r *Route) ResolveWith(addr tcpip.LinkAddress) {\nr.mu.remoteLinkAddress = addr\n}\n-// ResolvedFields is like Fields but also attempts to resolve the remote link\n-// address if it is not yet known.\n+// ResolvedFieldsResult is the result of a route resolution attempt.\n+type ResolvedFieldsResult struct {\n+ RouteInfo RouteInfo\n+ Success bool\n+}\n+\n+// ResolvedFields attempts to resolve the remote link address if it is not\n+// known.\n//\n-// If address resolution is required, returns tcpip.ErrWouldBlock and a\n-// notification channel for the caller to block on. The channel will be readable\n-// once address resolution is complete (successful or not). If a callback is\n-// provided, it will be called when address resolution is complete, regardless\n-// of success or failure before the notification channel is readable.\n+// If a callback is provided, it will be called before ResolvedFields returns\n+// when address resolution is not required. If address resolution is required,\n+// the callback will be called once address resolution is complete, regardless\n+// of success or failure.\n//\n// Note, the route will not cache the remote link address when address\n// resolution completes.\n-func (r *Route) ResolvedFields(afterResolve func()) (RouteInfo, <-chan struct{}, *tcpip.Error) {\n+func (r *Route) ResolvedFields(afterResolve func(ResolvedFieldsResult)) *tcpip.Error {\n+ _, _, err := r.resolvedFields(afterResolve)\n+ return err\n+}\n+\n+// resolvedFields is like ResolvedFields but also returns a notification channel\n+// when address resolution is required. This channel will become readable once\n+// address resolution is complete.\n+//\n+// The route's fields will also be returned, regardless of whether address\n+// resolution is required or not.\n+func (r *Route) resolvedFields(afterResolve func(ResolvedFieldsResult)) (RouteInfo, <-chan struct{}, *tcpip.Error) {\nr.mu.RLock()\nfields := r.fieldsLocked()\nresolutionRequired := r.isResolutionRequiredRLocked()\nr.mu.RUnlock()\nif !resolutionRequired {\n+ if afterResolve != nil {\n+ afterResolve(ResolvedFieldsResult{RouteInfo: fields, Success: true})\n+ }\nreturn fields, nil, nil\n}\n@@ -347,9 +366,14 @@ func (r *Route) ResolvedFields(afterResolve func()) (RouteInfo, <-chan struct{},\nlinkAddressResolutionRequestLocalAddr = r.LocalAddress\n}\n- linkAddr, ch, err := r.outgoingNIC.getNeighborLinkAddress(nextAddr, linkAddressResolutionRequestLocalAddr, r.linkRes, func(LinkResolutionResult) {\n+ afterResolveFields := fields\n+ linkAddr, ch, err := r.outgoingNIC.getNeighborLinkAddress(nextAddr, linkAddressResolutionRequestLocalAddr, r.linkRes, func(r LinkResolutionResult) {\nif afterResolve != nil {\n- afterResolve()\n+ if r.Success {\n+ afterResolveFields.RemoteLinkAddress = r.LinkAddress\n+ }\n+\n+ afterResolve(ResolvedFieldsResult{RouteInfo: afterResolveFields, Success: r.Success})\n}\n})\nif err == nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tests/integration/link_resolution_test.go", "new_path": "pkg/tcpip/tests/integration/link_resolution_test.go", "diff": "@@ -471,6 +471,141 @@ func TestGetLinkAddress(t *testing.T) {\n}\n}\n+func TestRouteResolvedFields(t *testing.T) {\n+ const (\n+ host1NICID = 1\n+ host2NICID = 4\n+ )\n+\n+ tests := []struct {\n+ name string\n+ netProto tcpip.NetworkProtocolNumber\n+ localAddr tcpip.Address\n+ remoteAddr tcpip.Address\n+ immediatelyResolvable bool\n+ expectedSuccess bool\n+ expectedLinkAddr tcpip.LinkAddress\n+ }{\n+ {\n+ name: \"IPv4 immediately resolvable\",\n+ netProto: ipv4.ProtocolNumber,\n+ localAddr: ipv4Addr1.AddressWithPrefix.Address,\n+ remoteAddr: header.IPv4AllSystems,\n+ immediatelyResolvable: true,\n+ expectedSuccess: true,\n+ expectedLinkAddr: header.EthernetAddressFromMulticastIPv4Address(header.IPv4AllSystems),\n+ },\n+ {\n+ name: \"IPv6 immediately resolvable\",\n+ netProto: ipv6.ProtocolNumber,\n+ localAddr: ipv6Addr1.AddressWithPrefix.Address,\n+ remoteAddr: header.IPv6AllNodesMulticastAddress,\n+ immediatelyResolvable: true,\n+ expectedSuccess: true,\n+ expectedLinkAddr: header.EthernetAddressFromMulticastIPv6Address(header.IPv6AllNodesMulticastAddress),\n+ },\n+ {\n+ name: \"IPv4 resolvable\",\n+ netProto: ipv4.ProtocolNumber,\n+ localAddr: ipv4Addr1.AddressWithPrefix.Address,\n+ remoteAddr: ipv4Addr2.AddressWithPrefix.Address,\n+ immediatelyResolvable: false,\n+ expectedSuccess: true,\n+ expectedLinkAddr: linkAddr2,\n+ },\n+ {\n+ name: \"IPv6 resolvable\",\n+ netProto: ipv6.ProtocolNumber,\n+ localAddr: ipv6Addr1.AddressWithPrefix.Address,\n+ remoteAddr: ipv6Addr2.AddressWithPrefix.Address,\n+ immediatelyResolvable: false,\n+ expectedSuccess: true,\n+ expectedLinkAddr: linkAddr2,\n+ },\n+ {\n+ name: \"IPv4 not resolvable\",\n+ netProto: ipv4.ProtocolNumber,\n+ localAddr: ipv4Addr1.AddressWithPrefix.Address,\n+ remoteAddr: ipv4Addr3.AddressWithPrefix.Address,\n+ immediatelyResolvable: false,\n+ expectedSuccess: false,\n+ },\n+ {\n+ name: \"IPv6 not resolvable\",\n+ netProto: ipv6.ProtocolNumber,\n+ localAddr: ipv6Addr1.AddressWithPrefix.Address,\n+ remoteAddr: ipv6Addr3.AddressWithPrefix.Address,\n+ immediatelyResolvable: false,\n+ expectedSuccess: false,\n+ },\n+ }\n+\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\n+ for _, useNeighborCache := range []bool{true, false} {\n+ t.Run(fmt.Sprintf(\"UseNeighborCache=%t\", useNeighborCache), func(t *testing.T) {\n+ stackOpts := stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocolFactory{arp.NewProtocol, ipv4.NewProtocol, ipv6.NewProtocol},\n+ UseNeighborCache: useNeighborCache,\n+ }\n+\n+ host1Stack, _ := setupStack(t, stackOpts, host1NICID, host2NICID)\n+ r, err := host1Stack.FindRoute(host1NICID, \"\", test.remoteAddr, test.netProto, false /* multicastLoop */)\n+ if err != nil {\n+ t.Fatalf(\"host1Stack.FindRoute(%d, '', %s, %d, false): %s\", host1NICID, test.remoteAddr, test.netProto, err)\n+ }\n+ defer r.Release()\n+\n+ var wantRouteInfo stack.RouteInfo\n+ wantRouteInfo.LocalLinkAddress = linkAddr1\n+ wantRouteInfo.LocalAddress = test.localAddr\n+ wantRouteInfo.RemoteAddress = test.remoteAddr\n+ wantRouteInfo.NetProto = test.netProto\n+ wantRouteInfo.Loop = stack.PacketOut\n+ wantRouteInfo.RemoteLinkAddress = test.expectedLinkAddr\n+\n+ ch := make(chan stack.ResolvedFieldsResult, 1)\n+\n+ if !test.immediatelyResolvable {\n+ wantUnresolvedRouteInfo := wantRouteInfo\n+ wantUnresolvedRouteInfo.RemoteLinkAddress = \"\"\n+\n+ if err := r.ResolvedFields(func(r stack.ResolvedFieldsResult) {\n+ ch <- r\n+ }); err != tcpip.ErrWouldBlock {\n+ t.Errorf(\"got r.ResolvedFields(_) = %s, want = %s\", err, tcpip.ErrWouldBlock)\n+ }\n+ if diff := cmp.Diff(stack.ResolvedFieldsResult{RouteInfo: wantRouteInfo, Success: test.expectedSuccess}, <-ch, cmp.AllowUnexported(stack.RouteInfo{})); diff != \"\" {\n+ t.Errorf(\"route resolve result mismatch (-want +got):\\n%s\", diff)\n+ }\n+\n+ if !test.expectedSuccess {\n+ return\n+ }\n+\n+ // At this point the neighbor table should be populated so the route\n+ // should be immediately resolvable.\n+ }\n+\n+ if err := r.ResolvedFields(func(r stack.ResolvedFieldsResult) {\n+ ch <- r\n+ }); err != nil {\n+ t.Errorf(\"r.ResolvedFields(_): %s\", err)\n+ }\n+ select {\n+ case routeResolveRes := <-ch:\n+ if diff := cmp.Diff(stack.ResolvedFieldsResult{RouteInfo: wantRouteInfo, Success: true}, routeResolveRes, cmp.AllowUnexported(stack.RouteInfo{})); diff != \"\" {\n+ t.Errorf(\"route resolve result from resolved route mismatch (-want +got):\\n%s\", diff)\n+ }\n+ default:\n+ t.Fatal(\"expected route to be immediately resolvable\")\n+ }\n+ })\n+ }\n+ })\n+ }\n+}\n+\nfunc TestWritePacketsLinkResolution(t *testing.T) {\nconst (\nhost1NICID = 1\n" } ]
Go
Apache License 2.0
google/gvisor
Pass RouteInfo to the route resolve callback The route resolution callback will be called with a stack.ResolvedFieldsResult which will hold the route info so callers can avoid attempting resolution again to check if a previous resolution attempt succeeded or not. Test: integration_test.TestRouteResolvedFields PiperOrigin-RevId: 353319019
260,004
22.01.2021 16:03:27
28,800
76da673a0ddaf33e410d48501a9b089979411d4d
Do not modify IGMP packets when verifying checksum
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv4/igmp.go", "new_path": "pkg/tcpip/network/ipv4/igmp.go", "diff": "@@ -157,14 +157,13 @@ func (igmp *igmpState) handleIGMP(pkt *stack.PacketBuffer) {\n}\nh := header.IGMP(headerView)\n- // Temporarily reset the checksum field to 0 in order to calculate the proper\n- // checksum.\n- wantChecksum := h.Checksum()\n- h.SetChecksum(0)\n- gotChecksum := ^header.ChecksumVV(pkt.Data, 0 /* initial */)\n- h.SetChecksum(wantChecksum)\n-\n- if gotChecksum != wantChecksum {\n+ // As per RFC 1071 section 1.3,\n+ //\n+ // To check a checksum, the 1's complement sum is computed over the\n+ // same set of octets, including the checksum field. If the result\n+ // is all 1 bits (-0 in 1's complement arithmetic), the check\n+ // succeeds.\n+ if header.ChecksumVV(pkt.Data, 0 /* initial */) != 0xFFFF {\nreceived.checksumErrors.Increment()\nreturn\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Do not modify IGMP packets when verifying checksum PiperOrigin-RevId: 353336894
259,992
22.01.2021 16:24:17
28,800
99aa5eedcfa3f2e458171cbc6b20ee6f78af3229
Update containerd minimal version
[ { "change_type": "MODIFY", "old_path": "g3doc/user_guide/FAQ.md", "new_path": "g3doc/user_guide/FAQ.md", "diff": "@@ -137,9 +137,16 @@ sandbox isolation. There are a few different workarounds you can try:\n* Use IPs instead of container names.\n* Use [Kubernetes][k8s]. Container name lookup works fine in Kubernetes.\n+### I'm getting an error like `dial unix /run/containerd/s/09e4...8cff: connect: connection refused: unknown` {#shim-connect}\n+\n+This error may happen when using `gvisor-containerd-shim` with a `containerd`\n+that does not contain the fix for [CVE-2020-15257]. The resolve the issue,\n+update containerd to 1.3.9 or 1.4.3 (or newer versions respectively).\n+\n[security-model]: /docs/architecture_guide/security/\n[host-net]: /docs/user_guide/networking/#network-passthrough\n[debugging]: /docs/user_guide/debugging/\n[filesystem]: /docs/user_guide/filesystem/\n[docker]: /docs/user_guide/quick_start/docker/\n[k8s]: /docs/user_guide/quick_start/kubernetes/\n+[CVE-2020-15257]: https://github.com/containerd/containerd/security/advisories/GHSA-36xw-fx78-c5r4\n" }, { "change_type": "MODIFY", "old_path": "g3doc/user_guide/containerd/configuration.md", "new_path": "g3doc/user_guide/containerd/configuration.md", "diff": "# Containerd Advanced Configuration\nThis document describes how to configure runtime options for\n-`containerd-shim-runsc-v1`. This follows the\n-[Containerd Quick Start](./quick_start.md) and requires containerd 1.2 or later.\n+`containerd-shim-runsc-v1`. You can find the installation instructions and\n+minimal requirements in [Containerd Quick Start](./quick_start.md).\n## Shim Configuration\n@@ -47,27 +47,6 @@ When you are done, restart containerd to pick up the changes.\nsudo systemctl restart containerd\n```\n-### Containerd 1.2\n-\n-For containerd 1.2, the config file is not configurable. It should be named\n-`config.toml` and located in the runtime root. By default, this is\n-`/run/containerd/runsc`.\n-\n-### Example: Enable the KVM platform\n-\n-gVisor enables the use of a number of platforms. This example shows how to\n-configure `containerd-shim-runsc-v1` to use gvisor with the KVM platform.\n-\n-Find out more about platform in the\n-[Platforms Guide](../../architecture_guide/platforms.md).\n-\n-```shell\n-cat <<EOF | sudo tee /etc/containerd/runsc.toml\n-[runsc_config]\n- platform = \"kvm\"\n-EOF\n-```\n-\n## Debug\nWhen `shim_debug` is enabled in `/etc/containerd/config.toml`, containerd will\n" } ]
Go
Apache License 2.0
google/gvisor
Update containerd minimal version PiperOrigin-RevId: 353340554
259,992
22.01.2021 18:04:14
28,800
cac70c65e6b5b8a7a3eda55f83f9ceffdfdaba89
Detect failures in forked function EXPECT*/ASSERT* functions are not async-signal-safe and should not be called from the function passed to InForkedProcess. However, these happen accidentally sometimes but do no cause InForkedProcess to fail. Detect and notify in such cases.
[ { "change_type": "MODIFY", "old_path": "test/util/BUILD", "new_path": "test/util/BUILD", "diff": "@@ -172,6 +172,7 @@ cc_library(\n\":posix_error\",\n\":save_util\",\n\":test_util\",\n+ gtest,\n\"@com_google_absl//absl/strings\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "test/util/multiprocess_util.cc", "new_path": "test/util/multiprocess_util.cc", "diff": "@@ -154,6 +154,9 @@ PosixErrorOr<int> InForkedProcess(const std::function<void()>& fn) {\npid_t pid = fork();\nif (pid == 0) {\nfn();\n+ TEST_CHECK_MSG(!::testing::Test::HasFailure(),\n+ \"EXPECT*/ASSERT* failed. These are not async-signal-safe \"\n+ \"and must not be called from fn.\");\n_exit(0);\n}\nMaybeSave();\n" } ]
Go
Apache License 2.0
google/gvisor
Detect failures in forked function EXPECT*/ASSERT* functions are not async-signal-safe and should not be called from the function passed to InForkedProcess. However, these happen accidentally sometimes but do no cause InForkedProcess to fail. Detect and notify in such cases. PiperOrigin-RevId: 353354540
259,877
24.01.2021 10:14:25
-10,800
a7f11f8161af71b4951ca99cf410ce120cccff97
FAQ.md: fix kubeadm init instructions The current version of FAQ.md contains an incorrect example of how to instruct kubelet to prefer containerd over docker. More specifically, it refers to a non-existent `--cni-socket` flag whereas it should have been `--cri-socket`. The suggested PR fixes that.
[ { "change_type": "MODIFY", "old_path": "g3doc/user_guide/FAQ.md", "new_path": "g3doc/user_guide/FAQ.md", "diff": "@@ -107,11 +107,11 @@ kubeadm to create your cluster please check if Docker is also installed on that\nsystem. Kubeadm prefers using Docker if both Docker and containerd are\ninstalled.\n-Please recreate your cluster and set the `--cni-socket` option on kubeadm\n+Please recreate your cluster and set the `--cri-socket` option on kubeadm\ncommands. For example:\n```bash\n-kubeadm init --cni-socket=/var/run/containerd/containerd.sock ...\n+kubeadm init --cri-socket=/var/run/containerd/containerd.sock ...\n```\nTo fix an existing cluster edit the `/var/lib/kubelet/kubeadm-flags.env` file\n" } ]
Go
Apache License 2.0
google/gvisor
FAQ.md: fix kubeadm init instructions The current version of FAQ.md contains an incorrect example of how to instruct kubelet to prefer containerd over docker. More specifically, it refers to a non-existent `--cni-socket` flag whereas it should have been `--cri-socket`. The suggested PR fixes that.
259,992
25.01.2021 10:28:33
28,800
a358de583b54be3eab2f960bdaffe74c619e84d4
Fix Inotify.Exec test The test was execve itself into `/bin/true`, so the test was not actually executing.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -980,6 +980,7 @@ cc_binary(\n\"//test/util:epoll_util\",\n\"//test/util:file_descriptor\",\n\"//test/util:fs_util\",\n+ \"//test/util:multiprocess_util\",\n\"//test/util:posix_error\",\n\"//test/util:temp_path\",\n\"//test/util:test_main\",\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/inotify.cc", "new_path": "test/syscalls/linux/inotify.cc", "diff": "#include \"test/util/epoll_util.h\"\n#include \"test/util/file_descriptor.h\"\n#include \"test/util/fs_util.h\"\n+#include \"test/util/multiprocess_util.h\"\n#include \"test/util/posix_error.h\"\n#include \"test/util/temp_path.h\"\n#include \"test/util/test_util.h\"\n@@ -315,8 +316,7 @@ PosixErrorOr<std::vector<Event>> DrainEvents(int fd) {\n}\nPosixErrorOr<FileDescriptor> InotifyInit1(int flags) {\n- int fd;\n- EXPECT_THAT(fd = inotify_init1(flags), SyscallSucceeds());\n+ int fd = inotify_init1(flags);\nif (fd < 0) {\nreturn PosixError(errno, \"inotify_init1() failed\");\n}\n@@ -325,9 +325,7 @@ PosixErrorOr<FileDescriptor> InotifyInit1(int flags) {\nPosixErrorOr<int> InotifyAddWatch(int fd, const std::string& path,\nuint32_t mask) {\n- int wd;\n- EXPECT_THAT(wd = inotify_add_watch(fd, path.c_str(), mask),\n- SyscallSucceeds());\n+ int wd = inotify_add_watch(fd, path.c_str(), mask);\nif (wd < 0) {\nreturn PosixError(errno, \"inotify_add_watch() failed\");\n}\n@@ -1936,24 +1934,31 @@ TEST(Inotify, Xattr) {\n}\nTEST(Inotify, Exec) {\n- const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n- const TempPath bin = ASSERT_NO_ERRNO_AND_VALUE(\n- TempPath::CreateSymlinkTo(dir.path(), \"/bin/true\"));\n+ // TODO(gvisor.dev/issues/5348)\n+ SKIP_IF(IsRunningOnGvisor());\nconst FileDescriptor fd =\nASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));\nconst int wd = ASSERT_NO_ERRNO_AND_VALUE(\n- InotifyAddWatch(fd.get(), bin.path(), IN_ALL_EVENTS));\n+ InotifyAddWatch(fd.get(), \"/bin/true\", IN_ALL_EVENTS));\n// Perform exec.\n- ScopedThread t([&bin]() {\n- ASSERT_THAT(execl(bin.path().c_str(), bin.path().c_str(), (char*)nullptr),\n- SyscallSucceeds());\n- });\n- t.Join();\n+ pid_t child = -1;\n+ int execve_errno = -1;\n+ auto kill = ASSERT_NO_ERRNO_AND_VALUE(\n+ ForkAndExec(\"/bin/true\", {}, {}, nullptr, &child, &execve_errno));\n+ ASSERT_EQ(0, execve_errno);\n+\n+ int status;\n+ ASSERT_THAT(RetryEINTR(waitpid)(child, &status, 0), SyscallSucceeds());\n+ EXPECT_EQ(0, status);\n+\n+ // Process cleanup no longer needed.\n+ kill.Release();\nstd::vector<Event> events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(fd.get()));\n- EXPECT_THAT(events, Are({Event(IN_OPEN, wd), Event(IN_ACCESS, wd)}));\n+ EXPECT_THAT(events, Are({Event(IN_OPEN, wd), Event(IN_ACCESS, wd),\n+ Event(IN_CLOSE_NOWRITE, wd)}));\n}\n// Watches without IN_EXCL_UNLINK, should continue to emit events for file\n" } ]
Go
Apache License 2.0
google/gvisor
Fix Inotify.Exec test The test was execve itself into `/bin/true`, so the test was not actually executing. PiperOrigin-RevId: 353676855
259,992
25.01.2021 11:00:26
28,800
0592dac851addb9b95c9c56b90587d809a894832
Remove side effect from shm tests Individual test cases must not rely on being executed in a clean environment.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/shm.cc", "new_path": "test/syscalls/linux/shm.cc", "diff": "@@ -256,32 +256,26 @@ TEST(ShmTest, IpcInfo) {\n}\nTEST(ShmTest, ShmInfo) {\n- struct shm_info info;\n-\n- // We generally can't know what other processes on a linux machine\n- // does with shared memory segments, so we can't test specific\n- // numbers on Linux. When running under gvisor, we're guaranteed to\n- // be the only ones using shm, so we can easily verify machine-wide\n- // numbers.\n- if (IsRunningOnGvisor()) {\n- ASSERT_NO_ERRNO(Shmctl(0, SHM_INFO, &info));\n- EXPECT_EQ(info.used_ids, 0);\n- EXPECT_EQ(info.shm_tot, 0);\n- EXPECT_EQ(info.shm_rss, 0);\n- EXPECT_EQ(info.shm_swp, 0);\n- }\n+ // Take a snapshot of the system before the test runs.\n+ struct shm_info snap;\n+ ASSERT_NO_ERRNO(Shmctl(0, SHM_INFO, &snap));\nconst ShmSegment shm = ASSERT_NO_ERRNO_AND_VALUE(\nShmget(IPC_PRIVATE, kAllocSize, IPC_CREAT | 0777));\nconst char* addr = ASSERT_NO_ERRNO_AND_VALUE(Shmat(shm.id(), nullptr, 0));\n+ struct shm_info info;\nASSERT_NO_ERRNO(Shmctl(1, SHM_INFO, &info));\n+ // We generally can't know what other processes on a linux machine do with\n+ // shared memory segments, so we can't test specific numbers on Linux. When\n+ // running under gvisor, we're guaranteed to be the only ones using shm, so\n+ // we can easily verify machine-wide numbers.\nif (IsRunningOnGvisor()) {\nASSERT_NO_ERRNO(Shmctl(shm.id(), SHM_INFO, &info));\n- EXPECT_EQ(info.used_ids, 1);\n- EXPECT_EQ(info.shm_tot, kAllocSize / kPageSize);\n- EXPECT_EQ(info.shm_rss, kAllocSize / kPageSize);\n+ EXPECT_EQ(info.used_ids, snap.used_ids + 1);\n+ EXPECT_EQ(info.shm_tot, snap.shm_tot + (kAllocSize / kPageSize));\n+ EXPECT_EQ(info.shm_rss, snap.shm_rss + (kAllocSize / kPageSize));\nEXPECT_EQ(info.shm_swp, 0); // Gvisor currently never swaps.\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Remove side effect from shm tests Individual test cases must not rely on being executed in a clean environment. PiperOrigin-RevId: 353684155
259,885
25.01.2021 11:56:16
28,800
032d14a06611304b18db659513f6db1edbc7cf75
Remove synchronous decommit for MADV_DONTNEED on private anonymous vmas.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/mm/syscalls.go", "new_path": "pkg/sentry/mm/syscalls.go", "diff": "@@ -1055,18 +1055,11 @@ func (mm *MemoryManager) Decommit(addr usermem.Addr, length uint64) error {\nmm.activeMu.Lock()\ndefer mm.activeMu.Unlock()\n- // Linux's mm/madvise.c:madvise_dontneed() => mm/memory.c:zap_page_range()\n- // is analogous to our mm.invalidateLocked(ar, true, true). We inline this\n- // here, with the special case that we synchronously decommit\n- // uniquely-owned (non-copy-on-write) pages for private anonymous vma,\n- // which is the common case for MADV_DONTNEED. Invalidating these pmas, and\n- // allowing them to be reallocated when touched again, increases pma\n- // fragmentation, which may significantly reduce performance for\n- // non-vectored I/O implementations. Also, decommitting synchronously\n- // ensures that Decommit immediately reduces host memory usage.\n+ // This is invalidateLocked(invalidatePrivate=true, invalidateShared=true),\n+ // with the additional wrinkle that we must refuse to invalidate pmas under\n+ // mlocked vmas.\nvar didUnmapAS bool\npseg := mm.pmas.LowerBoundSegment(ar.Start)\n- mf := mm.mfp.MemoryFile()\nfor vseg := mm.vmas.LowerBoundSegment(ar.Start); vseg.Ok() && vseg.Start() < ar.End; vseg = vseg.NextSegment() {\nvma := vseg.ValuePtr()\nif vma.mlockMode != memmap.MLockNone {\n@@ -1081,20 +1074,8 @@ func (mm *MemoryManager) Decommit(addr usermem.Addr, length uint64) error {\n}\n}\nfor pseg.Ok() && pseg.Start() < vsegAR.End {\n- pma := pseg.ValuePtr()\n- if pma.private && !mm.isPMACopyOnWriteLocked(vseg, pseg) {\n- psegAR := pseg.Range().Intersect(ar)\n- if vsegAR.IsSupersetOf(psegAR) && vma.mappable == nil {\n- if err := mf.Decommit(pseg.fileRangeOf(psegAR)); err == nil {\n- pseg = pseg.NextSegment()\n- continue\n- }\n- // If an error occurs, fall through to the general\n- // invalidation case below.\n- }\n- }\npseg = mm.pmas.Isolate(pseg, vsegAR)\n- pma = pseg.ValuePtr()\n+ pma := pseg.ValuePtr()\nif !didUnmapAS {\n// Unmap all of ar, not just pseg.Range(), to minimize host\n// syscalls. AddressSpace mappings must be removed before\n" } ]
Go
Apache License 2.0
google/gvisor
Remove synchronous decommit for MADV_DONTNEED on private anonymous vmas. PiperOrigin-RevId: 353697719