author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
259,872 | 03.12.2021 14:42:02 | -28,800 | b993286092b9ad1cdc90a5788c665e25c8f31514 | blog: adjust blog content
Resize image 2021-12-02-flamegraph-figure2.png. Adjust
some paragraph and getpid performance tables. | [
{
"change_type": "MODIFY",
"old_path": "website/assets/images/2021-12-02-flamegraph-figure2.png",
"new_path": "website/assets/images/2021-12-02-flamegraph-figure2.png",
"diff": "Binary files a/website/assets/images/2021-12-02-flamegraph-figure2.png and b/website/assets/images/2021-12-02-flamegraph-figure2.png differ\n"
},
{
"change_type": "MODIFY",
"old_path": "website/blog/2021-12-02-running-gvisor-in-production-at-scale-in-ant.md",
"new_path": "website/blog/2021-12-02-running-gvisor-in-production-at-scale-in-ant.md",
"diff": "@@ -25,11 +25,13 @@ overhead; another 25% have <3% overhead. Some of our most valued application are\nthe focus of our optimization, and get even better performance compared with\nrunc.\n-The rest of this blog is organized as follows: - First, we analyze the cost of\n-different syscall paths in gVisor. - Then, a way to profile a whole picture of a\n-instance is proposed to find out if some slow syscall paths are encountered. -\n-Some invisible overhead in Go runtime is discussed. - At last, a short summary\n-on performance optimization with some other factors on production adoption.\n+The rest of this blog is organized as follows:\n+* First, we analyze the cost of different syscall paths in gVisor.\n+* Then, a way to profile a whole picture of a instance is proposed to find out\n+ if some slow syscall paths are encountered. Some invisible overhead in Go\n+ runtime is discussed.\n+* At last, a short summary on performance optimization with some other factors\n+ on production adoption.\nFor convenience of discussion, we are targeting KVM-based, or hypervisor-based\nplatforms, unless explicitly stated.\n@@ -114,14 +116,14 @@ on a Intel(R) Xeon(R) CPU E5-2650 v2 platform, using\nAs we can see, for KVM platform, the syscall interception costs more than 10x\nthan a native Linux syscall.\n- | getpid benchmark (ns)\n+getpid | benchmark (ns)\n------------ | ---------------------\nNative | 62\nNative-KPTI | 236\nrunsc-KVM | 830\nrunsc-ptrace | 6249\n-* \"Native\" stands for using vanilla linux kernel.\n+\\* \"Native\" stands for using vanilla linux kernel.\nTo understand the structural cost of syscall interception, we did a\n[quantitative analysis](https://github.com/google/gvisor/issues/2354) on kvm\n"
}
] | Go | Apache License 2.0 | google/gvisor | blog: adjust blog content
Resize image 2021-12-02-flamegraph-figure2.png. Adjust
some paragraph and getpid performance tables.
Signed-off-by: Yong He <[email protected]>
Signed-off-by: Jianfeng Tan <[email protected]> |
260,004 | 09.12.2021 15:14:03 | 28,800 | 7f4b03068f03fa20e1f152b3ba4dc66f16a7d7ce | Support NATing ICMPv6 Echo packets
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/icmpv6.go",
"new_path": "pkg/tcpip/header/icmpv6.go",
"diff": "@@ -240,6 +240,13 @@ func (b ICMPv6) SetIdent(ident uint16) {\nbinary.BigEndian.PutUint16(b[icmpv6IdentOffset:], ident)\n}\n+// SetIdentWithChecksumUpdate sets the Ident field and updates the checksum.\n+func (b ICMPv6) SetIdentWithChecksumUpdate(new uint16) {\n+ old := b.Ident()\n+ b.SetIdent(new)\n+ b.SetChecksum(^checksumUpdate2ByteAlignedUint16(^b.Checksum(), old, new))\n+}\n+\n// Sequence retrieves the Sequence field from an ICMPv6 message.\nfunc (b ICMPv6) Sequence() uint16 {\nreturn binary.BigEndian.Uint16(b[icmpv6SequenceOffset:])\n@@ -284,3 +291,9 @@ func ICMPv6Checksum(params ICMPv6ChecksumParams) uint16 {\nreturn ^xsum\n}\n+\n+// UpdateChecksumPseudoHeaderAddress updates the checksum to reflect an\n+// updated address in the pseudo header.\n+func (b ICMPv6) UpdateChecksumPseudoHeaderAddress(old, new tcpip.Address) {\n+ b.SetChecksum(^checksumUpdate2ByteAlignedAddress(^b.Checksum(), old, new))\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/conntrack.go",
"new_path": "pkg/tcpip/stack/conntrack.go",
"diff": "@@ -300,14 +300,16 @@ func getHeaders(pkt *PacketBuffer) (netHdr header.Network, transHdr header.Trans\nif tcpHeader := header.TCP(pkt.TransportHeader().View()); len(tcpHeader) >= header.TCPMinimumSize {\nreturn pkt.Network(), tcpHeader, false, true\n}\n+ return nil, nil, false, false\ncase header.UDPProtocolNumber:\nif udpHeader := header.UDP(pkt.TransportHeader().View()); len(udpHeader) >= header.UDPMinimumSize {\nreturn pkt.Network(), udpHeader, false, true\n}\n+ return nil, nil, false, false\ncase header.ICMPv4ProtocolNumber:\nicmpHeader := header.ICMPv4(pkt.TransportHeader().View())\nif len(icmpHeader) < header.ICMPv4MinimumSize {\n- break\n+ return nil, nil, false, false\n}\nswitch icmpType := icmpHeader.Type(); icmpType {\n@@ -331,7 +333,21 @@ func getHeaders(pkt *PacketBuffer) (netHdr header.Network, transHdr header.Trans\nif netHdr, transHdr, ok := getEmbeddedNetAndTransHeaders(pkt, header.IPv4MinimumSize, v4NetAndTransHdr, pkt.tuple.id().transProto); ok {\nreturn netHdr, transHdr, true, true\n}\n+ return nil, nil, false, false\ncase header.ICMPv6ProtocolNumber:\n+ icmpHeader := header.ICMPv6(pkt.TransportHeader().View())\n+ if len(icmpHeader) < header.ICMPv6MinimumSize {\n+ return nil, nil, false, false\n+ }\n+\n+ switch icmpType := icmpHeader.Type(); icmpType {\n+ case header.ICMPv6EchoRequest, header.ICMPv6EchoReply:\n+ return pkt.Network(), icmpHeader, false, true\n+ case header.ICMPv6DstUnreachable, header.ICMPv6PacketTooBig, header.ICMPv6TimeExceeded, header.ICMPv6ParamProblem:\n+ default:\n+ panic(fmt.Sprintf(\"unexpected ICMPv6 type = %d\", icmpType))\n+ }\n+\nh, ok := pkt.Data().PullUp(header.IPv6MinimumSize)\nif !ok {\npanic(fmt.Sprintf(\"should have a valid IPv6 packet; only have %d bytes, want at least %d bytes\", pkt.Data().Size(), header.IPv6MinimumSize))\n@@ -349,9 +365,10 @@ func getHeaders(pkt *PacketBuffer) (netHdr header.Network, transHdr header.Trans\nif netHdr, transHdr, ok := getEmbeddedNetAndTransHeaders(pkt, header.IPv6MinimumSize, v6NetAndTransHdr, transProto); ok {\nreturn netHdr, transHdr, true, true\n}\n- }\n-\nreturn nil, nil, false, false\n+ default:\n+ panic(fmt.Sprintf(\"unexpected transport protocol = %d\", pkt.TransportProtocolNumber))\n+ }\n}\nfunc getTupleIDForRegularPacket(netHdr header.Network, netProto tcpip.NetworkProtocolNumber, transHdr header.Transport, transProto tcpip.TransportProtocolNumber) tupleID {\n@@ -458,6 +475,13 @@ func getTupleID(pkt *PacketBuffer) (tupleID, getTupleIDDisposition) {\n}\nswitch icmp.Type() {\n+ case header.ICMPv6EchoRequest:\n+ return getTupleIDForEchoPacket(pkt, icmp.Ident(), true /* request */), getTupleIDOKAndAllowNewConn\n+ case header.ICMPv6EchoReply:\n+ // Do not create a new connection in response to a reply packet as only\n+ // the first packet of a connection should create a conntrack entry but\n+ // a reply is never the first packet sent for a connection.\n+ return getTupleIDForEchoPacket(pkt, icmp.Ident(), false /* request */), getTupleIDOKAndDontAllowNewConn\ncase header.ICMPv6DstUnreachable, header.ICMPv6PacketTooBig, header.ICMPv6TimeExceeded, header.ICMPv6ParamProblem:\ndefault:\nreturn tupleID{}, getTupleIDNotOK\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/iptables_targets.go",
"new_path": "pkg/tcpip/stack/iptables_targets.go",
"diff": "@@ -208,7 +208,7 @@ func snatAction(pkt *PacketBuffer, hook Hook, r *Route, port uint16, address tcp\nif port == 0 {\nportsOrIdents = targetPortRangeForTCPAndUDP(header.TCP(pkt.TransportHeader().View()).SourcePort())\n}\n- case header.ICMPv4ProtocolNumber:\n+ case header.ICMPv4ProtocolNumber, header.ICMPv6ProtocolNumber:\n// Allow NAT-ing to any 16-bit value for ICMP's Ident field to match Linux\n// behaviour.\n//\n@@ -289,20 +289,20 @@ func (mt *MasqueradeTarget) Action(pkt *PacketBuffer, hook Hook, r *Route, addre\nreturn snatAction(pkt, hook, r, 0 /* port */, address)\n}\n-func rewritePacket(n header.Network, t header.Transport, updateSRCFields, fullChecksum, updatePseudoHeader bool, newPort uint16, newAddr tcpip.Address) {\n+func rewritePacket(n header.Network, t header.Transport, updateSRCFields, fullChecksum, updatePseudoHeader bool, newPortOrIdent uint16, newAddr tcpip.Address) {\nswitch t := t.(type) {\ncase header.ChecksummableTransport:\nif updateSRCFields {\nif fullChecksum {\n- t.SetSourcePortWithChecksumUpdate(newPort)\n+ t.SetSourcePortWithChecksumUpdate(newPortOrIdent)\n} else {\n- t.SetSourcePort(newPort)\n+ t.SetSourcePort(newPortOrIdent)\n}\n} else {\nif fullChecksum {\n- t.SetDestinationPortWithChecksumUpdate(newPort)\n+ t.SetDestinationPortWithChecksumUpdate(newPortOrIdent)\n} else {\n- t.SetDestinationPort(newPort)\n+ t.SetDestinationPort(newPortOrIdent)\n}\n}\n@@ -320,15 +320,37 @@ func rewritePacket(n header.Network, t header.Transport, updateSRCFields, fullCh\nswitch icmpType := t.Type(); icmpType {\ncase header.ICMPv4Echo:\nif updateSRCFields {\n- t.SetIdentWithChecksumUpdate(newPort)\n+ t.SetIdentWithChecksumUpdate(newPortOrIdent)\n}\ncase header.ICMPv4EchoReply:\nif !updateSRCFields {\n- t.SetIdentWithChecksumUpdate(newPort)\n+ t.SetIdentWithChecksumUpdate(newPortOrIdent)\n}\ndefault:\npanic(fmt.Sprintf(\"unexpected ICMPv4 type = %d\", icmpType))\n}\n+ case header.ICMPv6:\n+ switch icmpType := t.Type(); icmpType {\n+ case header.ICMPv6EchoRequest:\n+ if updateSRCFields {\n+ t.SetIdentWithChecksumUpdate(newPortOrIdent)\n+ }\n+ case header.ICMPv6EchoReply:\n+ if !updateSRCFields {\n+ t.SetIdentWithChecksumUpdate(newPortOrIdent)\n+ }\n+ default:\n+ panic(fmt.Sprintf(\"unexpected ICMPv4 type = %d\", icmpType))\n+ }\n+\n+ var oldAddr tcpip.Address\n+ if updateSRCFields {\n+ oldAddr = n.SourceAddress()\n+ } else {\n+ oldAddr = n.DestinationAddress()\n+ }\n+\n+ t.UpdateChecksumPseudoHeaderAddress(oldAddr, newAddr)\ndefault:\npanic(fmt.Sprintf(\"unhandled transport = %#v\", t))\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tests/integration/iptables_test.go",
"new_path": "pkg/tcpip/tests/integration/iptables_test.go",
"diff": "@@ -1372,6 +1372,32 @@ func TestNATEcho(t *testing.T) {\n)\n}\n+ v6EchoPkt := func(srcAddr, dstAddr tcpip.Address, reply bool) buffer.View {\n+ icmpType := header.ICMPv6EchoRequest\n+ if reply {\n+ icmpType = header.ICMPv6EchoReply\n+ }\n+\n+ return icmpv6Packet(srcAddr, dstAddr, icmpType, ident)\n+ }\n+\n+ checkV6EchoPkt := func(t *testing.T, v buffer.View, srcAddr, dstAddr tcpip.Address, reply bool) {\n+ t.Helper()\n+\n+ icmpType := header.ICMPv6EchoRequest\n+ if reply {\n+ icmpType = header.ICMPv6EchoReply\n+ }\n+\n+ checker.IPv6(t, v,\n+ checker.SrcAddr(srcAddr),\n+ checker.DstAddr(dstAddr),\n+ checker.ICMPv6(\n+ checker.ICMPv6Type(icmpType),\n+ ),\n+ )\n+ }\n+\ntype natTypeTest struct {\nname string\nnatTypes []natType\n@@ -1422,6 +1448,40 @@ func TestNATEcho(t *testing.T) {\n},\n},\n},\n+ {\n+ name: \"IPv6\",\n+ netProto: header.IPv6ProtocolNumber,\n+ transProto: header.ICMPv6ProtocolNumber,\n+ echoPkt: v6EchoPkt,\n+ checkEchoPkt: checkV6EchoPkt,\n+\n+ natTypes: []natTypeTest{\n+ {\n+ name: \"SNAT\",\n+ natTypes: snatTypes,\n+ requestSrc: utils.Host2IPv6Addr.AddressWithPrefix.Address,\n+ requestDst: utils.Host1IPv6Addr.AddressWithPrefix.Address,\n+ expectedRequestSrc: utils.RouterNIC1IPv6Addr.AddressWithPrefix.Address,\n+ expectedRequestDst: utils.Host1IPv6Addr.AddressWithPrefix.Address,\n+ },\n+ {\n+ name: \"DNAT\",\n+ natTypes: []natType{dnatTarget},\n+ requestSrc: utils.Host2IPv6Addr.AddressWithPrefix.Address,\n+ requestDst: utils.RouterNIC2IPv6Addr.AddressWithPrefix.Address,\n+ expectedRequestSrc: utils.Host2IPv6Addr.AddressWithPrefix.Address,\n+ expectedRequestDst: utils.Host1IPv6Addr.AddressWithPrefix.Address,\n+ },\n+ {\n+ name: \"Twice-NAT\",\n+ natTypes: twiceNATTypes,\n+ requestSrc: utils.Host2IPv6Addr.AddressWithPrefix.Address,\n+ requestDst: utils.RouterNIC2IPv6Addr.AddressWithPrefix.Address,\n+ expectedRequestSrc: utils.RouterNIC1IPv6Addr.AddressWithPrefix.Address,\n+ expectedRequestDst: utils.Host1IPv6Addr.AddressWithPrefix.Address,\n+ },\n+ },\n+ },\n}\nfor _, test := range tests {\n@@ -2057,6 +2117,27 @@ func tcpv6Packet(srcAddr, dstAddr tcpip.Address, srcPort, dstPort uint16, dataSi\nreturn hdr.View()\n}\n+func icmpv6Packet(srcAddr, dstAddr tcpip.Address, icmpType header.ICMPv6Type, ident uint16) buffer.View {\n+ hdr := buffer.NewPrependable(header.IPv6MinimumSize + header.ICMPv6MinimumSize)\n+ icmp := header.ICMPv6(hdr.Prepend(header.ICMPv6MinimumSize))\n+ icmp.SetType(icmpType)\n+ icmp.SetIdent(ident)\n+ icmp.SetChecksum(0)\n+ icmp.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{\n+ Header: icmp,\n+ Src: srcAddr,\n+ Dst: dstAddr,\n+ }))\n+ encodeIPv6Header(\n+ hdr.Prepend(header.IPv6MinimumSize),\n+ len(icmp),\n+ header.ICMPv6ProtocolNumber,\n+ srcAddr,\n+ dstAddr,\n+ )\n+ return hdr.View()\n+}\n+\nfunc TestNATICMPError(t *testing.T) {\nconst (\nsrcPort = 1234\n@@ -2655,6 +2736,27 @@ func TestSNATHandlePortOrIdentConflicts(t *testing.T) {\n},\nsrcPortOrIdentRanges: srcPortRanges,\n},\n+ {\n+ name: \"ICMP Echo\",\n+ proto: header.ICMPv6ProtocolNumber,\n+ buf: func(srcAddr tcpip.Address, ident uint16) buffer.View {\n+ return icmpv6Packet(srcAddr, utils.Host1IPv6Addr.AddressWithPrefix.Address, header.ICMPv6EchoRequest, ident)\n+ },\n+ checkNATed: func(t *testing.T, v buffer.View, originalIdent uint16, firstPacket bool, expectedRange portOrIdentRange) {\n+ checker.IPv6(t, v,\n+ checker.SrcAddr(utils.RouterNIC1IPv6Addr.AddressWithPrefix.Address),\n+ checker.DstAddr(utils.Host1IPv6Addr.AddressWithPrefix.Address),\n+ checker.ICMPv6(\n+ checker.ICMPv6Type(header.ICMPv6EchoRequest),\n+ ),\n+ )\n+\n+ if !t.Failed() {\n+ compareSrcPortOrIdent(t, header.ICMPv6(header.IPv6(v).Payload()).Ident(), originalIdent, firstPacket, expectedRange)\n+ }\n+ },\n+ srcPortOrIdentRanges: identRanges,\n+ },\n},\n},\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Support NATing ICMPv6 Echo packets
Updates #5915.
PiperOrigin-RevId: 415367088 |
259,853 | 09.12.2021 21:48:18 | 28,800 | 4d29819e13a14f0d14e79586851364a4939f0ffe | pipe: have separate notifiers for readers and writers
This change fixes a busy loop in the pipe code. VFSPipe.Open calls ctx.BlockOn
to wait an opposite side, but waitQueue.EventRegister always triggers
EventInternal, so we never block.
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/pipe/node.go",
"new_path": "pkg/sentry/kernel/pipe/node.go",
"diff": "@@ -75,7 +75,7 @@ func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.Fi\ncase flags.Read && !flags.Write: // O_RDONLY.\nr := i.p.Open(ctx, d, flags)\nfor i.p.isNamed && !flags.NonBlocking && !i.p.HasWriters() {\n- if !ctx.BlockOn((*waitQueue)(i.p), waiter.EventInternal) {\n+ if !ctx.BlockOn((*waitWriters)(i.p), waiter.EventInternal) {\nr.DecRef(ctx)\nreturn nil, linuxerr.ErrInterrupted\n}\n@@ -95,7 +95,7 @@ func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.Fi\nw.DecRef(ctx)\nreturn nil, linuxerr.ENXIO\n}\n- if !ctx.BlockOn((*waitQueue)(i.p), waiter.EventInternal) {\n+ if !ctx.BlockOn((*waitReaders)(i.p), waiter.EventInternal) {\nw.DecRef(ctx)\nreturn nil, linuxerr.ErrInterrupted\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/pipe/pipe.go",
"new_path": "pkg/sentry/kernel/pipe/pipe.go",
"diff": "@@ -49,23 +49,23 @@ const (\natomicIOBytes = 4096\n)\n-// waitQueue is a wrapper around Pipe.\n+// waitReaders is a wrapper around Pipe.\n//\n// This is used for ctx.Block operations that require the synchronization of\n// readers and writers, along with the careful grabbing and releasing of locks.\n-type waitQueue Pipe\n+type waitReaders Pipe\n// Readiness implements waiter.Waitable.Readiness.\n-func (wq *waitQueue) Readiness(mask waiter.EventMask) waiter.EventMask {\n+func (wq *waitReaders) Readiness(mask waiter.EventMask) waiter.EventMask {\nreturn ((*Pipe)(wq)).rwReadiness() & mask\n}\n// EventRegister implements waiter.Waitable.EventRegister.\n-func (wq *waitQueue) EventRegister(e *waiter.Entry) error {\n+func (wq *waitReaders) EventRegister(e *waiter.Entry) error {\n((*Pipe)(wq)).queue.EventRegister(e)\n// Notify synchronously.\n- if ((*Pipe)(wq)).HasReaders() || ((*Pipe)(wq)).HasWriters() {\n+ if ((*Pipe)(wq)).HasReaders() {\ne.NotifyEvent(waiter.EventInternal)\n}\n@@ -73,7 +73,35 @@ func (wq *waitQueue) EventRegister(e *waiter.Entry) error {\n}\n// EventUnregister implements waiter.Waitable.EventUnregister.\n-func (wq *waitQueue) EventUnregister(e *waiter.Entry) {\n+func (wq *waitReaders) EventUnregister(e *waiter.Entry) {\n+ ((*Pipe)(wq)).queue.EventUnregister(e)\n+}\n+\n+// waitWriters is a wrapper around Pipe.\n+//\n+// This is used for ctx.Block operations that require the synchronization of\n+// readers and writers, along with the careful grabbing and releasing of locks.\n+type waitWriters Pipe\n+\n+// Readiness implements waiter.Waitable.Readiness.\n+func (wq *waitWriters) Readiness(mask waiter.EventMask) waiter.EventMask {\n+ return ((*Pipe)(wq)).rwReadiness() & mask\n+}\n+\n+// EventRegister implements waiter.Waitable.EventRegister.\n+func (wq *waitWriters) EventRegister(e *waiter.Entry) error {\n+ ((*Pipe)(wq)).queue.EventRegister(e)\n+\n+ // Notify synchronously.\n+ if ((*Pipe)(wq)).HasWriters() {\n+ e.NotifyEvent(waiter.EventInternal)\n+ }\n+\n+ return nil\n+}\n+\n+// EventUnregister implements waiter.Waitable.EventUnregister.\n+func (wq *waitWriters) EventUnregister(e *waiter.Entry) {\n((*Pipe)(wq)).queue.EventUnregister(e)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/pipe/vfs.go",
"new_path": "pkg/sentry/kernel/pipe/vfs.go",
"diff": "@@ -99,7 +99,7 @@ func (vp *VFSPipe) Open(ctx context.Context, mnt *vfs.Mount, vfsd *vfs.Dentry, s\n// If this pipe is being opened as blocking and there's no\n// writer, we have to wait for a writer to open the other end.\nfor vp.pipe.isNamed && statusFlags&linux.O_NONBLOCK == 0 && !vp.pipe.HasWriters() {\n- if !ctx.BlockOn((*waitQueue)(&vp.pipe), waiter.EventInternal) {\n+ if !ctx.BlockOn((*waitWriters)(&vp.pipe), waiter.EventInternal) {\nfd.DecRef(ctx)\nreturn nil, linuxerr.EINTR\n}\n@@ -113,7 +113,7 @@ func (vp *VFSPipe) Open(ctx context.Context, mnt *vfs.Mount, vfsd *vfs.Dentry, s\nfd.DecRef(ctx)\nreturn nil, linuxerr.ENXIO\n}\n- if !ctx.BlockOn((*waitQueue)(&vp.pipe), waiter.EventInternal) {\n+ if !ctx.BlockOn((*waitReaders)(&vp.pipe), waiter.EventInternal) {\nfd.DecRef(ctx)\nreturn nil, linuxerr.EINTR\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | pipe: have separate notifiers for readers and writers
This change fixes a busy loop in the pipe code. VFSPipe.Open calls ctx.BlockOn
to wait an opposite side, but waitQueue.EventRegister always triggers
EventInternal, so we never block.
Reported-by: [email protected]
PiperOrigin-RevId: 415428542 |
259,909 | 10.12.2021 17:08:00 | 28,800 | 75bfc5e0e9cc68c87c0a4195b73cc786db2a9920 | Resolve to the last mount in the stack during umount.
This is done in linux with the LOOKUP_MOUNTPOINT flag. We emulate this behavior
in gVisor with getMountAt(). | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/mount.go",
"new_path": "pkg/sentry/vfs/mount.go",
"diff": "@@ -297,10 +297,21 @@ func (vfs *VirtualFilesystem) UmountAt(ctx context.Context, creds *auth.Credenti\nif err != nil {\nreturn err\n}\n- defer vd.DecRef(ctx)\n- if vd.dentry != vd.mount.root {\n+ defer func() {\n+ vd.DecRef(ctx)\n+ }()\n+ // Linux passes the LOOKUP_MOUNPOINT flag to user_path_at in ksys_umount to resolve to the\n+ // toppmost mount in the stack located at the specified path. vfs.GetMountAt() imitiates this\n+ // behavior. See fs/namei.c:user_path_at(...) and fs/namespace.c:ksys_umount(...).\n+ if vd.dentry.isMounted() {\n+ if realmnt := vfs.getMountAt(ctx, vd.mount, vd.dentry); realmnt != nil {\n+ vd.mount.DecRef(ctx)\n+ vd.mount = realmnt\n+ }\n+ } else if vd.dentry != vd.mount.root {\nreturn linuxerr.EINVAL\n}\n+\nvfs.mountMu.Lock()\nif mntns := MountNamespaceFromContext(ctx); mntns != nil {\ndefer mntns.DecRef(ctx)\n@@ -346,8 +357,8 @@ func (vfs *VirtualFilesystem) UmountAt(ctx context.Context, creds *auth.Credenti\nfor _, vd := range vdsToDecRef {\nvd.DecRef(ctx)\n}\n- for _, mnt := range mountsToDecRef {\n- mnt.DecRef(ctx)\n+ for _, m := range mountsToDecRef {\n+ m.DecRef(ctx)\n}\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/mount.cc",
"new_path": "test/syscalls/linux/mount.cc",
"diff": "@@ -220,6 +220,28 @@ TEST(MountTest, UmountDetach) {\nOpenAt(mounted_dir.get(), \"..\", O_DIRECTORY | O_RDONLY));\n}\n+TEST(MountTest, UmountMountsStackedOnDot) {\n+ SKIP_IF(IsRunningWithVFS1());\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+ // Verify that unmounting at \".\" properly unmounts the mount at the top of\n+ // mount stack.\n+ auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ TEST_CHECK_SUCCESS(chdir(dir.path().c_str()));\n+ const struct stat before = ASSERT_NO_ERRNO_AND_VALUE(Stat(\".\"));\n+\n+ TEST_CHECK_SUCCESS(mount(\"\", dir.path().c_str(), \"tmpfs\", 0, \"mode=0700\"));\n+ TEST_CHECK_SUCCESS(mount(\"\", dir.path().c_str(), \"tmpfs\", 0, \"mode=0700\"));\n+\n+ // Unmount the second mount at \".\"\n+ TEST_CHECK_SUCCESS(umount2(\".\", MNT_DETACH));\n+\n+ // Unmount the first mount at \".\"; this will fail if umount does not resolve\n+ // \".\" to the topmost mount.\n+ TEST_CHECK_SUCCESS(umount2(\".\", MNT_DETACH));\n+ const struct stat after2 = ASSERT_NO_ERRNO_AND_VALUE(Stat(\".\"));\n+ EXPECT_TRUE(before.st_dev == after2.st_dev && before.st_ino == after2.st_ino);\n+}\n+\nTEST(MountTest, ActiveSubmountBusy) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n"
}
] | Go | Apache License 2.0 | google/gvisor | Resolve to the last mount in the stack during umount.
This is done in linux with the LOOKUP_MOUNTPOINT flag. We emulate this behavior
in gVisor with getMountAt().
PiperOrigin-RevId: 415642095 |
259,909 | 13.12.2021 12:01:15 | 28,800 | a9938e0f142c02f9a0eb11121102b50392d596ef | Document confusing defer behavior in mount.go. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/mount.go",
"new_path": "pkg/sentry/vfs/mount.go",
"diff": "@@ -297,6 +297,11 @@ func (vfs *VirtualFilesystem) UmountAt(ctx context.Context, creds *auth.Credenti\nif err != nil {\nreturn err\n}\n+ // This defer statement is encapsulated in a function because vd.mount can be\n+ // modified in the block below. The arguments to defer are evaluated during\n+ // the construction of a defer statement, so if vd.DecRef() was not\n+ // encapsulated, the vd structure and its underlying pointers _at this point_\n+ // would be copied and DecRefd at the end of this function.\ndefer func() {\nvd.DecRef(ctx)\n}()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Document confusing defer behavior in mount.go.
PiperOrigin-RevId: 416100664 |
259,898 | 13.12.2021 13:57:00 | 28,800 | ef0119d3d08079a735129d4c62c4d9aec64b9328 | Ignore ethernet paddings in parsing
PacketImpact testbench did not take into account that ethernet frames can have
paddings, which can cause test failures. | [
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/testbench/layers.go",
"new_path": "test/packetimpact/testbench/layers.go",
"diff": "@@ -215,23 +215,34 @@ func NetworkProtocolNumber(v tcpip.NetworkProtocolNumber) *tcpip.NetworkProtocol\nreturn &v\n}\n+// bodySizeHint describes num of bytes left to parse for the rest of layers.\n+type bodySizeHint int\n+\n+const bodySizeUnknown bodySizeHint = -1\n+\n// layerParser parses the input bytes and returns a Layer along with the next\n// layerParser to run. If there is no more parsing to do, the returned\n// layerParser is nil.\n-type layerParser func([]byte) (Layer, layerParser)\n+type layerParser func([]byte) (Layer, bodySizeHint, layerParser)\n// parse parses bytes starting with the first layerParser and using successive\n// layerParsers until all the bytes are parsed.\nfunc parse(parser layerParser, b []byte) Layers {\nvar layers Layers\nfor {\n- var layer Layer\n- layer, parser = parser(b)\n+ layer, hint, next := parser(b)\nlayers = append(layers, layer)\nif parser == nil {\nbreak\n}\nb = b[layer.length():]\n+ if hint != bodySizeUnknown {\n+ b = b[:hint]\n+ }\n+ if next == nil {\n+ break\n+ }\n+ parser = next\n}\nlayers.linkLayers()\nreturn layers\n@@ -239,7 +250,7 @@ func parse(parser layerParser, b []byte) Layers {\n// parseEther parses the bytes assuming that they start with an ethernet header\n// and continues parsing further encapsulations.\n-func parseEther(b []byte) (Layer, layerParser) {\n+func parseEther(b []byte) (Layer, bodySizeHint, layerParser) {\nh := header.Ethernet(b)\nether := Ether{\nSrcAddr: LinkAddress(h.SourceAddress()),\n@@ -256,7 +267,7 @@ func parseEther(b []byte) (Layer, layerParser) {\n// Assume that the rest is a payload.\nnextParser = parsePayload\n}\n- return ðer, nextParser\n+ return ðer, bodySizeUnknown, nextParser\n}\nfunc (l *Ether) match(other Layer) bool {\n@@ -421,7 +432,7 @@ func Address(v tcpip.Address) *tcpip.Address {\n// parseIPv4 parses the bytes assuming that they start with an ipv4 header and\n// continues parsing further encapsulations.\n-func parseIPv4(b []byte) (Layer, layerParser) {\n+func parseIPv4(b []byte) (Layer, bodySizeHint, layerParser) {\nh := header.IPv4(b)\noptions := h.Options()\ntos, _ := h.TOS()\n@@ -442,7 +453,7 @@ func parseIPv4(b []byte) (Layer, layerParser) {\nvar nextParser layerParser\n// If it is a fragment, don't treat it as having a transport protocol.\nif h.FragmentOffset() != 0 || h.More() {\n- return &ipv4, parsePayload\n+ return &ipv4, bodySizeHint(h.PayloadLength()), parsePayload\n}\nswitch h.TransportProtocol() {\ncase header.TCPProtocolNumber:\n@@ -455,7 +466,7 @@ func parseIPv4(b []byte) (Layer, layerParser) {\n// Assume that the rest is a payload.\nnextParser = parsePayload\n}\n- return &ipv4, nextParser\n+ return &ipv4, bodySizeHint(h.PayloadLength()), nextParser\n}\nfunc (l *IPv4) match(other Layer) bool {\n@@ -555,7 +566,7 @@ func nextIPv6PayloadParser(nextHeader uint8) layerParser {\n// parseIPv6 parses the bytes assuming that they start with an ipv6 header and\n// continues parsing further encapsulations.\n-func parseIPv6(b []byte) (Layer, layerParser) {\n+func parseIPv6(b []byte) (Layer, bodySizeHint, layerParser) {\nh := header.IPv6(b)\ntos, flowLabel := h.TOS()\nipv6 := IPv6{\n@@ -568,7 +579,7 @@ func parseIPv6(b []byte) (Layer, layerParser) {\nDstAddr: Address(h.DestinationAddress()),\n}\nnextParser := nextIPv6PayloadParser(h.NextHeader())\n- return &ipv6, nextParser\n+ return &ipv6, bodySizeHint(h.PayloadLength()), nextParser\n}\nfunc (l *IPv6) match(other Layer) bool {\n@@ -727,16 +738,16 @@ func parseIPv6ExtHdr(b []byte) (header.IPv6ExtensionHeaderIdentifier, []byte, la\n// parseIPv6HopByHopOptionsExtHdr parses the bytes assuming that they start\n// with an IPv6 HopByHop Options Extension Header.\n-func parseIPv6HopByHopOptionsExtHdr(b []byte) (Layer, layerParser) {\n+func parseIPv6HopByHopOptionsExtHdr(b []byte) (Layer, bodySizeHint, layerParser) {\nnextHeader, options, nextParser := parseIPv6ExtHdr(b)\n- return &IPv6HopByHopOptionsExtHdr{NextHeader: &nextHeader, Options: options}, nextParser\n+ return &IPv6HopByHopOptionsExtHdr{NextHeader: &nextHeader, Options: options}, bodySizeUnknown, nextParser\n}\n// parseIPv6DestinationOptionsExtHdr parses the bytes assuming that they start\n// with an IPv6 Destination Options Extension Header.\n-func parseIPv6DestinationOptionsExtHdr(b []byte) (Layer, layerParser) {\n+func parseIPv6DestinationOptionsExtHdr(b []byte) (Layer, bodySizeHint, layerParser) {\nnextHeader, options, nextParser := parseIPv6ExtHdr(b)\n- return &IPv6DestinationOptionsExtHdr{NextHeader: &nextHeader, Options: options}, nextParser\n+ return &IPv6DestinationOptionsExtHdr{NextHeader: &nextHeader, Options: options}, bodySizeUnknown, nextParser\n}\n// Bool is a helper routine that allocates a new\n@@ -747,7 +758,7 @@ func Bool(v bool) *bool {\n// parseIPv6FragmentExtHdr parses the bytes assuming that they start\n// with an IPv6 Fragment Extension Header.\n-func parseIPv6FragmentExtHdr(b []byte) (Layer, layerParser) {\n+func parseIPv6FragmentExtHdr(b []byte) (Layer, bodySizeHint, layerParser) {\nnextHeader := b[0]\nvar extHdr header.IPv6FragmentExtHdr\ncopy(extHdr[:], b[2:])\n@@ -759,9 +770,9 @@ func parseIPv6FragmentExtHdr(b []byte) (Layer, layerParser) {\n}\n// If it is a fragment, we can't interpret it.\nif extHdr.FragmentOffset() != 0 || extHdr.More() {\n- return &fragLayer, parsePayload\n+ return &fragLayer, bodySizeUnknown, parsePayload\n}\n- return &fragLayer, nextIPv6PayloadParser(nextHeader)\n+ return &fragLayer, bodySizeUnknown, nextIPv6PayloadParser(nextHeader)\n}\nfunc (l *IPv6HopByHopOptionsExtHdr) length() int {\n@@ -894,7 +905,7 @@ func ICMPv6Code(v header.ICMPv6Code) *header.ICMPv6Code {\n}\n// parseICMPv6 parses the bytes assuming that they start with an ICMPv6 header.\n-func parseICMPv6(b []byte) (Layer, layerParser) {\n+func parseICMPv6(b []byte) (Layer, bodySizeHint, layerParser) {\nh := header.ICMPv6(b)\nmsgType := h.Type()\nicmpv6 := ICMPv6{\n@@ -909,7 +920,7 @@ func parseICMPv6(b []byte) (Layer, layerParser) {\ncase header.ICMPv6ParamProblem:\nicmpv6.Pointer = Uint32(h.TypeSpecific())\n}\n- return &icmpv6, nil\n+ return &icmpv6, bodySizeUnknown, nil\n}\nfunc (l *ICMPv6) match(other Layer) bool {\n@@ -995,7 +1006,7 @@ func (l *ICMPv4) ToBytes() ([]byte, error) {\n// parseICMPv4 parses the bytes as an ICMPv4 header, returning a Layer and a\n// parser for the encapsulated payload.\n-func parseICMPv4(b []byte) (Layer, layerParser) {\n+func parseICMPv4(b []byte) (Layer, bodySizeHint, layerParser) {\nh := header.ICMPv4(b)\nmsgType := h.Type()\n@@ -1012,7 +1023,7 @@ func parseICMPv4(b []byte) (Layer, layerParser) {\ncase header.ICMPv4ParamProblem:\nicmpv4.Pointer = Uint8(h.Pointer())\n}\n- return &icmpv4, nil\n+ return &icmpv4, bodySizeUnknown, nil\n}\nfunc (l *ICMPv4) match(other Layer) bool {\n@@ -1156,7 +1167,7 @@ func Uint32(v uint32) *uint32 {\n// parseTCP parses the bytes assuming that they start with a tcp header and\n// continues parsing further encapsulations.\n-func parseTCP(b []byte) (Layer, layerParser) {\n+func parseTCP(b []byte) (Layer, bodySizeHint, layerParser) {\nh := header.TCP(b)\ntcp := TCP{\nSrcPort: Uint16(h.SourcePort()),\n@@ -1170,7 +1181,7 @@ func parseTCP(b []byte) (Layer, layerParser) {\nUrgentPointer: Uint16(h.UrgentPointer()),\nOptions: b[header.TCPMinimumSize:h.DataOffset()],\n}\n- return &tcp, parsePayload\n+ return &tcp, bodySizeUnknown, parsePayload\n}\nfunc (l *TCP) match(other Layer) bool {\n@@ -1245,7 +1256,7 @@ func setUDPChecksum(h *header.UDP, udp *UDP) error {\n// parseUDP parses the bytes assuming that they start with a udp header and\n// returns the parsed layer and the next parser to use.\n-func parseUDP(b []byte) (Layer, layerParser) {\n+func parseUDP(b []byte) (Layer, bodySizeHint, layerParser) {\nh := header.UDP(b)\nudp := UDP{\nSrcPort: Uint16(h.SourcePort()),\n@@ -1253,7 +1264,7 @@ func parseUDP(b []byte) (Layer, layerParser) {\nLength: Uint16(h.Length()),\nChecksum: Uint16(h.Checksum()),\n}\n- return &udp, parsePayload\n+ return &udp, bodySizeUnknown, parsePayload\n}\nfunc (l *UDP) match(other Layer) bool {\n@@ -1281,11 +1292,11 @@ func (l *Payload) String() string {\n// parsePayload parses the bytes assuming that they start with a payload and\n// continue to the end. There can be no further encapsulations.\n-func parsePayload(b []byte) (Layer, layerParser) {\n+func parsePayload(b []byte) (Layer, bodySizeHint, layerParser) {\npayload := Payload{\nBytes: b,\n}\n- return &payload, nil\n+ return &payload, bodySizeUnknown, nil\n}\n// ToBytes implements Layer.ToBytes.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/testbench/layers_test.go",
"new_path": "test/packetimpact/testbench/layers_test.go",
"diff": "@@ -16,6 +16,7 @@ package testbench\nimport (\n\"bytes\"\n+ \"encoding/hex\"\n\"net\"\n\"testing\"\n@@ -726,3 +727,52 @@ func TestIPv6ExtHdrOptions(t *testing.T) {\n})\n}\n}\n+\n+func TestEthernetPadding(t *testing.T) {\n+ packet := []byte{\n+ 0x3a, 0xd6, 0x90, 0x36, 0x18, 0xce, 0x64, 0x4f, 0x16, 0x3f,\n+ 0x5f, 0x0f, 0x08, 0x00, 0x45, 0x00, 0x00, 0x2c, 0xf5, 0x0e,\n+ 0x00, 0x00, 0x40, 0x06, 0x2d, 0xba, 0xac, 0x00, 0x00, 0x02,\n+ 0xac, 0x00, 0x00, 0x01, 0x7c, 0x3e, 0xe3, 0x91, 0x2b, 0xe4,\n+ 0xb0, 0xe7, 0x9a, 0xcb, 0x04, 0x43, 0x60, 0x12, 0x72, 0x00,\n+ 0xf2, 0x67, 0x00, 0x00, 0x02, 0x04, 0x05, 0xb4, 0x00, 0x00,\n+ }\n+ parsed := parse(parseEther, packet)\n+ wanted := Layers{\n+ &Ether{\n+ SrcAddr: LinkAddress(tcpip.LinkAddress(\"\\x64\\x4f\\x16\\x3f\\x5f\\x0f\")),\n+ DstAddr: LinkAddress(tcpip.LinkAddress(\"\\x3a\\xd6\\x90\\x36\\x18\\xce\")),\n+ Type: NetworkProtocolNumber(header.IPv4ProtocolNumber),\n+ },\n+ &IPv4{\n+ IHL: Uint8(20),\n+ TOS: Uint8(0),\n+ TotalLength: Uint16(44),\n+ ID: Uint16(0xf50e),\n+ Flags: Uint8(0),\n+ FragmentOffset: Uint16(0),\n+ TTL: Uint8(64),\n+ Protocol: Uint8(uint8(header.TCPProtocolNumber)),\n+ Checksum: Uint16(0x2dba),\n+ SrcAddr: Address(tcpip.Address(\"\\xac\\x00\\x00\\x02\")),\n+ DstAddr: Address(tcpip.Address(\"\\xac\\x00\\x00\\x01\")),\n+ },\n+ &TCP{\n+ SrcPort: Uint16(31806),\n+ DstPort: Uint16(58257),\n+ SeqNum: Uint32(736407783),\n+ AckNum: Uint32(2596996163),\n+ DataOffset: Uint8(24),\n+ Flags: TCPFlags(header.TCPFlagSyn | header.TCPFlagAck),\n+ WindowSize: Uint16(29184),\n+ Checksum: Uint16(0xf267),\n+ UrgentPointer: Uint16(0),\n+ },\n+ &Payload{\n+ Bytes: []byte{},\n+ },\n+ }\n+ if !parsed.match(wanted) {\n+ t.Fatalf(\"parse(parseEther, %s) = %s, want %s)\", hex.Dump(packet), parsed, wanted)\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Ignore ethernet paddings in parsing
PacketImpact testbench did not take into account that ethernet frames can have
paddings, which can cause test failures.
PiperOrigin-RevId: 416127829 |
259,962 | 13.12.2021 23:36:02 | 28,800 | c06c9deb1c71f235f646c4dc644e848df748c2da | Add support for virtio net headers in sharedmem endpoint. | [
{
"change_type": "MODIFY",
"old_path": "pkg/eventfd/BUILD",
"new_path": "pkg/eventfd/BUILD",
"diff": "@@ -8,7 +8,10 @@ go_library(\n\"eventfd.go\",\n\"eventfd_unsafe.go\",\n],\n- visibility = [\"//:sandbox\"],\n+ visibility = [\n+ \"//:sandbox\",\n+ \"//cloud/cluster/node/network/client/go:__pkg__\",\n+ ],\ndeps = [\n\"//pkg/hostarch\",\n\"//pkg/tcpip/link/rawfile\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/BUILD",
"new_path": "pkg/tcpip/header/BUILD",
"diff": "@@ -26,6 +26,7 @@ go_library(\n\"ndpoptionidentifier_string.go\",\n\"tcp.go\",\n\"udp.go\",\n+ \"virtionet.go\",\n],\nvisibility = [\"//visibility:public\"],\ndeps = [\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/header/virtionet.go",
"diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package header\n+\n+import \"encoding/binary\"\n+\n+// These constants are declared in linux/virtio_net.h.\n+const (\n+ _VIRTIO_NET_HDR_F_NEEDS_CSUM = 1\n+ _VIRTIO_NET_HDR_GSO_NONE = 0\n+ _VIRTIO_NET_HDR_GSO_TCPV4 = 1\n+ _VIRTIO_NET_HDR_GSO_TCPV6 = 4\n+)\n+\n+const (\n+ // VirtioNetHeaderSize is the size of VirtioNetHeader in bytes.\n+ VirtioNetHeaderSize = 10\n+)\n+\n+// Offsets for fields in the virtio net header.\n+const (\n+ flags = 0\n+ gsoType = 1\n+ hdrLen = 2\n+ gsoSize = 4\n+ csumStart = 6\n+ csumOffset = 8\n+)\n+\n+// VirtioNetHeaderFields is the Go equivalent of the struct declared in\n+// linux/virtio_net.h.\n+type VirtioNetHeaderFields struct {\n+ Flags uint8\n+ GSOType uint8\n+ HdrLen uint16\n+ GSOSize uint16\n+ CSumStart uint16\n+ CSumOffset uint16\n+}\n+\n+// VirtioNetHeader represents a virtio net header stored in a byte array.\n+type VirtioNetHeader []byte\n+\n+// Flags returns the \"flags\" field of the virtio net header.\n+func (v VirtioNetHeader) Flags() uint8 {\n+ return uint8(v[flags])\n+}\n+\n+// GSOType returns the \"gsoType\" field of the virtio net header.\n+func (v VirtioNetHeader) GSOType() uint8 {\n+ return uint8(v[gsoType])\n+}\n+\n+// HdrLen returns the \"hdrLen\" field of the virtio net header.\n+func (v VirtioNetHeader) HdrLen() uint16 {\n+ return binary.BigEndian.Uint16(v[hdrLen:])\n+}\n+\n+// GSOSize returns the \"gsoSize\" field of the virtio net header.\n+func (v VirtioNetHeader) GSOSize() uint16 {\n+ return binary.BigEndian.Uint16(v[gsoSize:])\n+}\n+\n+// CSumStart returns the \"csumStart\" field of the virtio net header.\n+func (v VirtioNetHeader) CSumStart() uint16 {\n+ return binary.BigEndian.Uint16(v[csumStart:])\n+}\n+\n+// CSumOffset returns the \"csumOffset\" field of the virtio net header.\n+func (v VirtioNetHeader) CSumOffset() uint16 {\n+ return binary.BigEndian.Uint16(v[csumOffset:])\n+}\n+\n+// Encode encodes all the fields of the virtio net header.\n+func (v VirtioNetHeader) Encode(f *VirtioNetHeaderFields) {\n+ v[flags] = uint8(f.Flags)\n+ v[gsoType] = uint8(f.GSOType)\n+ binary.BigEndian.PutUint16(v[hdrLen:], f.HdrLen)\n+ binary.BigEndian.PutUint16(v[gsoSize:], f.GSOSize)\n+ binary.BigEndian.PutUint16(v[csumStart:], f.CSumStart)\n+ binary.BigEndian.PutUint16(v[csumOffset:], f.CSumOffset)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/BUILD",
"new_path": "pkg/tcpip/link/sharedmem/BUILD",
"diff": "@@ -14,7 +14,9 @@ go_library(\n\"sharedmem_unsafe.go\",\n\"tx.go\",\n],\n- visibility = [\"//visibility:public\"],\n+ visibility = [\n+ \"//visibility:public\",\n+ ],\ndeps = [\n\"//pkg/cleanup\",\n\"//pkg/eventfd\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem.go",
"diff": "@@ -129,6 +129,10 @@ type Options struct {\n// RXChecksumOffload if true, indicates that this endpoints capability\n// set should include CapabilityRXChecksumOffload.\nRXChecksumOffload bool\n+\n+ // VirtioNetHeaderRequired if true, indicates that all outbound packets should have\n+ // a virtio header and inbound packets should have a virtio header as well.\n+ VirtioNetHeaderRequired bool\n}\ntype endpoint struct {\n@@ -156,6 +160,10 @@ type endpoint struct {\n// hdrSize is immutable.\nhdrSize uint32\n+ // virtioNetHeaderRequired if true indicates that a virtio header is expected\n+ // in all inbound/outbound packets.\n+ virtioNetHeaderRequired bool\n+\n// rx is the receive queue.\nrx rx\n@@ -191,6 +199,7 @@ func New(opts Options) (stack.LinkEndpoint, error) {\naddr: opts.LinkAddress,\npeerFD: opts.PeerFD,\nonClosed: opts.OnClosed,\n+ virtioNetHeaderRequired: opts.VirtioNetHeaderRequired,\n}\nif err := e.tx.init(opts.BufferSize, &opts.TX); err != nil {\n@@ -215,6 +224,11 @@ func New(opts Options) (stack.LinkEndpoint, error) {\ne.hdrSize = header.EthernetMinimumSize\ne.caps |= stack.CapabilityResolutionRequired\n}\n+\n+ if opts.VirtioNetHeaderRequired {\n+ e.hdrSize += header.VirtioNetHeaderSize\n+ }\n+\nreturn e, nil\n}\n@@ -322,6 +336,11 @@ func (e *endpoint) AddHeader(local, remote tcpip.LinkAddress, protocol tcpip.Net\neth.Encode(ethHdr)\n}\n+func (e *endpoint) AddVirtioNetHeader(pkt *stack.PacketBuffer) {\n+ virtio := header.VirtioNetHeader(pkt.VirtioNetHeader().Push(header.VirtioNetHeaderSize))\n+ virtio.Encode(&header.VirtioNetHeaderFields{})\n+}\n+\n// WriteRawPacket implements stack.LinkEndpoint.\nfunc (*endpoint) WriteRawPacket(*stack.PacketBuffer) tcpip.Error { return &tcpip.ErrNotSupported{} }\n@@ -330,6 +349,9 @@ func (e *endpoint) writePacketLocked(r stack.RouteInfo, protocol tcpip.NetworkPr\nif e.addr != \"\" {\ne.AddHeader(r.LocalLinkAddress, r.RemoteLinkAddress, protocol, pkt)\n}\n+ if e.virtioNetHeaderRequired {\n+ e.AddVirtioNetHeader(pkt)\n+ }\nviews := pkt.Views()\n// Transmit the packet.\n@@ -414,6 +436,14 @@ func (e *endpoint) dispatchLoop(d stack.NetworkDispatcher) {\nData: buffer.View(b).ToVectorisedView(),\n})\n+ if e.virtioNetHeaderRequired {\n+ _, ok := pkt.VirtioNetHeader().Consume(header.VirtioNetHeaderSize)\n+ if !ok {\n+ pkt.DecRef()\n+ continue\n+ }\n+ }\n+\nvar src, dst tcpip.LinkAddress\nvar proto tcpip.NetworkProtocolNumber\nif e.addr != \"\" {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem_server.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem_server.go",
"diff": "@@ -63,6 +63,10 @@ type serverEndpoint struct {\n// hdrSize is immutable.\nhdrSize uint32\n+ // virtioNetHeaderRequired if true indicates that a virtio header is expected\n+ // in all inbound/outbound packets.\n+ virtioNetHeaderRequired bool\n+\n// onClosed is a function to be called when the FD's peer (if any) closes its\n// end of the communication pipe.\nonClosed func(tcpip.Error)\n@@ -218,6 +222,11 @@ func (e *serverEndpoint) AddHeader(local, remote tcpip.LinkAddress, protocol tcp\neth.Encode(ethHdr)\n}\n+func (e *serverEndpoint) AddVirtioNetHeader(pkt *stack.PacketBuffer) {\n+ virtio := header.VirtioNetHeader(pkt.VirtioNetHeader().Push(header.VirtioNetHeaderSize))\n+ virtio.Encode(&header.VirtioNetHeaderFields{})\n+}\n+\n// WriteRawPacket implements stack.LinkEndpoint.WriteRawPacket\nfunc (e *serverEndpoint) WriteRawPacket(pkt *stack.PacketBuffer) tcpip.Error {\nviews := pkt.Views()\n@@ -237,6 +246,10 @@ func (e *serverEndpoint) writePacketLocked(r stack.RouteInfo, protocol tcpip.Net\ne.AddHeader(r.LocalLinkAddress, r.RemoteLinkAddress, protocol, pkt)\n}\n+ if e.virtioNetHeaderRequired {\n+ e.AddVirtioNetHeader(pkt)\n+ }\n+\nviews := pkt.Views()\nok := e.tx.transmit(views)\nif !ok {\n@@ -306,6 +319,13 @@ func (e *serverEndpoint) dispatchLoop(d stack.NetworkDispatcher) {\npkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: buffer.View(b).ToVectorisedView(),\n})\n+ if e.virtioNetHeaderRequired {\n+ _, ok := pkt.VirtioNetHeader().Consume(header.VirtioNetHeaderSize)\n+ if !ok {\n+ pkt.DecRef()\n+ continue\n+ }\n+ }\nvar src, dst tcpip.LinkAddress\nvar proto tcpip.NetworkProtocolNumber\nif e.addr != \"\" {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sniffer/sniffer.go",
"new_path": "pkg/tcpip/link/sniffer/sniffer.go",
"diff": "@@ -207,6 +207,7 @@ func logPacket(prefix string, dir direction, protocol tcpip.NetworkProtocolNumbe\n// We trim the link headers from the cloned buffer as the sniffer doesn't\n// handle link headers.\nvv := buffer.NewVectorisedView(pkt.Size(), pkt.Views())\n+ vv.TrimFront(len(pkt.VirtioNetHeader().View()))\nvv.TrimFront(len(pkt.LinkHeader().View()))\npkt = stack.NewPacketBuffer(stack.PacketBufferOptions{Data: vv})\ndefer pkt.DecRef()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/headertype_string.go",
"new_path": "pkg/tcpip/stack/headertype_string.go",
"diff": "@@ -21,13 +21,14 @@ func _() {\n// An \"invalid array index\" compiler error signifies that the constant values have changed.\n// Re-run the stringer command to generate them again.\nvar x [1]struct{}\n- _ = x[linkHeader-0]\n- _ = x[networkHeader-1]\n- _ = x[transportHeader-2]\n- _ = x[numHeaderType-3]\n+ _ = x[virtioNetHeader-0]\n+ _ = x[linkHeader-1]\n+ _ = x[networkHeader-2]\n+ _ = x[transportHeader-3]\n+ _ = x[numHeaderType-4]\n}\n-const _headerType_name = \"linkHeadernetworkHeadertransportHeadernumHeaderType\"\n+const _headerType_name = \"virtioNetHeaderlinkHeadernetworkHeadertransportHeadernumHeaderType\"\nvar _headerType_index = [...]uint8{0, 10, 23, 38, 51}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/packet_buffer.go",
"new_path": "pkg/tcpip/stack/packet_buffer.go",
"diff": "@@ -27,7 +27,8 @@ import (\ntype headerType int\nconst (\n- linkHeader headerType = iota\n+ virtioNetHeader headerType = iota\n+ linkHeader\nnetworkHeader\ntransportHeader\nnumHeaderType\n@@ -216,6 +217,14 @@ func (pk *PacketBuffer) AvailableHeaderBytes() int {\nreturn pk.reserved - pk.pushed\n}\n+// VirtioNetHeader returns the handle to virtio-layer header.\n+func (pk *PacketBuffer) VirtioNetHeader() PacketHeader {\n+ return PacketHeader{\n+ pk: pk,\n+ typ: virtioNetHeader,\n+ }\n+}\n+\n// LinkHeader returns the handle to link-layer header.\nfunc (pk *PacketBuffer) LinkHeader() PacketHeader {\nreturn PacketHeader{\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/icmp/BUILD",
"new_path": "pkg/tcpip/transport/icmp/BUILD",
"diff": "@@ -26,6 +26,7 @@ go_library(\nimports = [\"gvisor.dev/gvisor/pkg/tcpip/buffer\"],\nvisibility = [\"//visibility:public\"],\ndeps = [\n+ \"//pkg/log\",\n\"//pkg/sleep\",\n\"//pkg/sync\",\n\"//pkg/tcpip\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/icmp/endpoint.go",
"new_path": "pkg/tcpip/transport/icmp/endpoint.go",
"diff": "@@ -19,6 +19,7 @@ import (\n\"io\"\n\"time\"\n+ \"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n@@ -348,6 +349,7 @@ func (e *endpoint) GetSockOpt(opt tcpip.GettableSocketOption) tcpip.Error {\nfunc send4(s *stack.Stack, ctx *network.WriteContext, ident uint16, data buffer.View, maxHeaderLength uint16) tcpip.Error {\nif len(data) < header.ICMPv4MinimumSize {\n+ log.Infof(\"len(data) is smaller than min size\")\nreturn &tcpip.ErrInvalidEndpointState{}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add support for virtio net headers in sharedmem endpoint.
PiperOrigin-RevId: 416221825 |
259,853 | 14.12.2021 00:03:19 | 28,800 | 7f72261ad1d0d6eaa5688d595983e6d7e64b1fa9 | test/fifo: check that open blocks and can be interrupted
and move fifo test cases to a separate test. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -306,6 +306,11 @@ syscall_test(\ntest = \"//test/syscalls/linux:mknod_test\",\n)\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:fifo_test\",\n+)\n+\nsyscall_test(\ntest = \"//test/syscalls/linux:mlock_test\",\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -1263,6 +1263,23 @@ cc_binary(\n],\n)\n+cc_binary(\n+ name = \"fifo_test\",\n+ testonly = 1,\n+ srcs = [\"fifo.cc\"],\n+ linkstatic = 1,\n+ deps = [\n+ \"//test/util:file_descriptor\",\n+ gtest,\n+ \"//test/util:signal_util\",\n+ \"//test/util:temp_path\",\n+ \"//test/util:test_main\",\n+ \"//test/util:test_util\",\n+ \"//test/util:thread_util\",\n+ \"//test/util:timer_util\",\n+ ],\n+)\n+\ncc_binary(\nname = \"mlock_test\",\ntestonly = 1,\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/syscalls/linux/fifo.cc",
"diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#include <errno.h>\n+#include <fcntl.h>\n+#include <sys/socket.h>\n+#include <sys/stat.h>\n+#include <sys/types.h>\n+#include <sys/un.h>\n+#include <unistd.h>\n+\n+#include <vector>\n+\n+#include \"gtest/gtest.h\"\n+#include \"test/util/file_descriptor.h\"\n+#include \"test/util/signal_util.h\"\n+#include \"test/util/temp_path.h\"\n+#include \"test/util/test_util.h\"\n+#include \"test/util/thread_util.h\"\n+#include \"test/util/timer_util.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+\n+namespace {\n+\n+PosixErrorOr<FileDescriptor> OpenRetryEINTR(std::string const& path, int flags,\n+ mode_t mode = 0) {\n+ while (true) {\n+ auto maybe_fd = Open(path, flags, mode);\n+ if (maybe_fd.ok() || maybe_fd.error().errno_value() != EINTR) {\n+ return maybe_fd;\n+ }\n+ }\n+}\n+\n+TEST(FifoTest, MknodAtFIFO) {\n+ const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ const std::string fifo_relpath = NewTempRelPath();\n+ const std::string fifo = JoinPath(dir.path(), fifo_relpath);\n+\n+ const FileDescriptor dirfd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path().c_str(), O_RDONLY));\n+ ASSERT_THAT(mknodat(dirfd.get(), fifo_relpath.c_str(), S_IFIFO | S_IRUSR, 0),\n+ SyscallSucceeds());\n+\n+ struct stat st;\n+ ASSERT_THAT(stat(fifo.c_str(), &st), SyscallSucceeds());\n+ EXPECT_TRUE(S_ISFIFO(st.st_mode));\n+}\n+\n+TEST(FifoTest, Fifo) {\n+ const std::string fifo = NewTempAbsPath();\n+ ASSERT_THAT(mknod(fifo.c_str(), S_IFIFO | S_IRUSR | S_IWUSR, 0),\n+ SyscallSucceeds());\n+\n+ struct stat st;\n+ ASSERT_THAT(stat(fifo.c_str(), &st), SyscallSucceeds());\n+ EXPECT_TRUE(S_ISFIFO(st.st_mode));\n+\n+ std::string msg = \"some std::string\";\n+ std::vector<char> buf(512);\n+\n+ // Read-end of the pipe.\n+ ScopedThread t([&fifo, &buf, &msg]() {\n+ FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(OpenRetryEINTR(fifo.c_str(), O_RDONLY));\n+ EXPECT_THAT(ReadFd(fd.get(), buf.data(), buf.size()),\n+ SyscallSucceedsWithValue(msg.length()));\n+ EXPECT_EQ(msg, std::string(buf.data()));\n+ });\n+\n+ // Write-end of the pipe.\n+ FileDescriptor wfd =\n+ ASSERT_NO_ERRNO_AND_VALUE(OpenRetryEINTR(fifo.c_str(), O_WRONLY));\n+ EXPECT_THAT(WriteFd(wfd.get(), msg.c_str(), msg.length()),\n+ SyscallSucceedsWithValue(msg.length()));\n+}\n+\n+TEST(FifoTest, FifoOtrunc) {\n+ const std::string fifo = NewTempAbsPath();\n+ ASSERT_THAT(mknod(fifo.c_str(), S_IFIFO | S_IRUSR | S_IWUSR, 0),\n+ SyscallSucceeds());\n+\n+ struct stat st = {};\n+ ASSERT_THAT(stat(fifo.c_str(), &st), SyscallSucceeds());\n+ EXPECT_TRUE(S_ISFIFO(st.st_mode));\n+\n+ std::string msg = \"some std::string\";\n+ std::vector<char> buf(512);\n+ // Read-end of the pipe.\n+ ScopedThread t([&fifo, &buf, &msg]() {\n+ FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(OpenRetryEINTR(fifo.c_str(), O_RDONLY));\n+ EXPECT_THAT(ReadFd(fd.get(), buf.data(), buf.size()),\n+ SyscallSucceedsWithValue(msg.length()));\n+ EXPECT_EQ(msg, std::string(buf.data()));\n+ });\n+\n+ // Write-end of the pipe.\n+ FileDescriptor wfd = ASSERT_NO_ERRNO_AND_VALUE(\n+ OpenRetryEINTR(fifo.c_str(), O_WRONLY | O_TRUNC));\n+ EXPECT_THAT(WriteFd(wfd.get(), msg.c_str(), msg.length()),\n+ SyscallSucceedsWithValue(msg.length()));\n+}\n+\n+TEST(FifoTest, FifoTruncNoOp) {\n+ const std::string fifo = NewTempAbsPath();\n+ ASSERT_THAT(mknod(fifo.c_str(), S_IFIFO | S_IRUSR | S_IWUSR, 0),\n+ SyscallSucceeds());\n+\n+ EXPECT_THAT(truncate(fifo.c_str(), 0), SyscallFailsWithErrno(EINVAL));\n+\n+ struct stat st = {};\n+ ASSERT_THAT(stat(fifo.c_str(), &st), SyscallSucceeds());\n+ EXPECT_TRUE(S_ISFIFO(st.st_mode));\n+\n+ std::string msg = \"some std::string\";\n+ std::vector<char> buf(512);\n+ // Read-end of the pipe.\n+ ScopedThread t([&fifo, &buf, &msg]() {\n+ FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(OpenRetryEINTR(fifo.c_str(), O_RDONLY));\n+ EXPECT_THAT(ReadFd(fd.get(), buf.data(), buf.size()),\n+ SyscallSucceedsWithValue(msg.length()));\n+ EXPECT_EQ(msg, std::string(buf.data()));\n+ });\n+\n+ FileDescriptor wfd = ASSERT_NO_ERRNO_AND_VALUE(\n+ OpenRetryEINTR(fifo.c_str(), O_WRONLY | O_TRUNC));\n+ EXPECT_THAT(ftruncate(wfd.get(), 0), SyscallFailsWithErrno(EINVAL));\n+ EXPECT_THAT(WriteFd(wfd.get(), msg.c_str(), msg.length()),\n+ SyscallSucceedsWithValue(msg.length()));\n+ EXPECT_THAT(ftruncate(wfd.get(), 0), SyscallFailsWithErrno(EINVAL));\n+}\n+\n+void TestSigHandler(int sig, siginfo_t* info, void* ucontext) {}\n+\n+TEST(FifoTest, OpenBlockedAndInterrupted) {\n+ constexpr int kSigno = SIGUSR1;\n+ constexpr int kSigvalue = 42;\n+\n+ // Install our signal handler.\n+ struct sigaction sa = {};\n+ sa.sa_sigaction = TestSigHandler;\n+ sigemptyset(&sa.sa_mask);\n+ sa.sa_flags = SA_SIGINFO;\n+ const auto scoped_sigaction =\n+ ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(kSigno, sa));\n+\n+ // Ensure that kSigno is unblocked on at least one thread.\n+ const auto scoped_sigmask =\n+ ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, kSigno));\n+\n+ struct sigevent sev = {};\n+ sev.sigev_notify = SIGEV_THREAD;\n+ sev.sigev_signo = kSigno;\n+ sev.sigev_value.sival_int = kSigvalue;\n+ auto timer = ASSERT_NO_ERRNO_AND_VALUE(TimerCreate(CLOCK_MONOTONIC, sev));\n+\n+ constexpr absl::Duration kPeriod = absl::Seconds(1);\n+ struct itimerspec its = {};\n+ its.it_value = its.it_interval = absl::ToTimespec(kPeriod);\n+ ASSERT_NO_ERRNO(timer.Set(0, its));\n+\n+ const std::string fifo = NewTempAbsPath();\n+ ASSERT_THAT(mknod(fifo.c_str(), S_IFIFO | S_IRUSR | S_IWUSR, 0),\n+ SyscallSucceeds());\n+\n+ EXPECT_THAT(open(fifo.c_str(), O_WRONLY), SyscallFailsWithErrno(EINTR));\n+ EXPECT_THAT(open(fifo.c_str(), O_RDONLY), SyscallFailsWithErrno(EINTR));\n+}\n+\n+TEST(FifoTest, FifoOpenRDWR) {\n+ const std::string fifo = NewTempAbsPath();\n+ ASSERT_THAT(mknod(fifo.c_str(), S_IFIFO | S_IRUSR | S_IWUSR, 0),\n+ SyscallSucceeds());\n+\n+ struct stat st;\n+ ASSERT_THAT(stat(fifo.c_str(), &st), SyscallSucceeds());\n+ EXPECT_TRUE(S_ISFIFO(st.st_mode));\n+\n+ std::string msg = \"some std::string\";\n+ std::vector<char> buf(msg.length() + 1);\n+\n+ FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(OpenRetryEINTR(fifo.c_str(), O_RDWR));\n+ EXPECT_THAT(WriteFd(fd.get(), msg.c_str(), msg.length()),\n+ SyscallSucceedsWithValue(msg.length()));\n+ EXPECT_THAT(ReadFd(fd.get(), buf.data(), msg.length()),\n+ SyscallSucceedsWithValue(msg.length()));\n+ EXPECT_EQ(msg, std::string(buf.data()));\n+}\n+\n+} // namespace\n+\n+} // namespace testing\n+} // namespace gvisor\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/mknod.cc",
"new_path": "test/syscalls/linux/mknod.cc",
"diff": "@@ -125,101 +125,6 @@ TEST(MknodTest, Socket) {\nASSERT_THAT(unlink(filename.c_str()), SyscallSucceeds());\n}\n-PosixErrorOr<FileDescriptor> OpenRetryEINTR(std::string const& path, int flags,\n- mode_t mode = 0) {\n- while (true) {\n- auto maybe_fd = Open(path, flags, mode);\n- if (maybe_fd.ok() || maybe_fd.error().errno_value() != EINTR) {\n- return maybe_fd;\n- }\n- }\n-}\n-\n-TEST(MknodTest, Fifo) {\n- const std::string fifo = NewTempAbsPath();\n- ASSERT_THAT(mknod(fifo.c_str(), S_IFIFO | S_IRUSR | S_IWUSR, 0),\n- SyscallSucceeds());\n-\n- struct stat st;\n- ASSERT_THAT(stat(fifo.c_str(), &st), SyscallSucceeds());\n- EXPECT_TRUE(S_ISFIFO(st.st_mode));\n-\n- std::string msg = \"some std::string\";\n- std::vector<char> buf(512);\n-\n- // Read-end of the pipe.\n- ScopedThread t([&fifo, &buf, &msg]() {\n- FileDescriptor fd =\n- ASSERT_NO_ERRNO_AND_VALUE(OpenRetryEINTR(fifo.c_str(), O_RDONLY));\n- EXPECT_THAT(ReadFd(fd.get(), buf.data(), buf.size()),\n- SyscallSucceedsWithValue(msg.length()));\n- EXPECT_EQ(msg, std::string(buf.data()));\n- });\n-\n- // Write-end of the pipe.\n- FileDescriptor wfd =\n- ASSERT_NO_ERRNO_AND_VALUE(OpenRetryEINTR(fifo.c_str(), O_WRONLY));\n- EXPECT_THAT(WriteFd(wfd.get(), msg.c_str(), msg.length()),\n- SyscallSucceedsWithValue(msg.length()));\n-}\n-\n-TEST(MknodTest, FifoOtrunc) {\n- const std::string fifo = NewTempAbsPath();\n- ASSERT_THAT(mknod(fifo.c_str(), S_IFIFO | S_IRUSR | S_IWUSR, 0),\n- SyscallSucceeds());\n-\n- struct stat st = {};\n- ASSERT_THAT(stat(fifo.c_str(), &st), SyscallSucceeds());\n- EXPECT_TRUE(S_ISFIFO(st.st_mode));\n-\n- std::string msg = \"some std::string\";\n- std::vector<char> buf(512);\n- // Read-end of the pipe.\n- ScopedThread t([&fifo, &buf, &msg]() {\n- FileDescriptor fd =\n- ASSERT_NO_ERRNO_AND_VALUE(OpenRetryEINTR(fifo.c_str(), O_RDONLY));\n- EXPECT_THAT(ReadFd(fd.get(), buf.data(), buf.size()),\n- SyscallSucceedsWithValue(msg.length()));\n- EXPECT_EQ(msg, std::string(buf.data()));\n- });\n-\n- // Write-end of the pipe.\n- FileDescriptor wfd = ASSERT_NO_ERRNO_AND_VALUE(\n- OpenRetryEINTR(fifo.c_str(), O_WRONLY | O_TRUNC));\n- EXPECT_THAT(WriteFd(wfd.get(), msg.c_str(), msg.length()),\n- SyscallSucceedsWithValue(msg.length()));\n-}\n-\n-TEST(MknodTest, FifoTruncNoOp) {\n- const std::string fifo = NewTempAbsPath();\n- ASSERT_THAT(mknod(fifo.c_str(), S_IFIFO | S_IRUSR | S_IWUSR, 0),\n- SyscallSucceeds());\n-\n- EXPECT_THAT(truncate(fifo.c_str(), 0), SyscallFailsWithErrno(EINVAL));\n-\n- struct stat st = {};\n- ASSERT_THAT(stat(fifo.c_str(), &st), SyscallSucceeds());\n- EXPECT_TRUE(S_ISFIFO(st.st_mode));\n-\n- std::string msg = \"some std::string\";\n- std::vector<char> buf(512);\n- // Read-end of the pipe.\n- ScopedThread t([&fifo, &buf, &msg]() {\n- FileDescriptor fd =\n- ASSERT_NO_ERRNO_AND_VALUE(OpenRetryEINTR(fifo.c_str(), O_RDONLY));\n- EXPECT_THAT(ReadFd(fd.get(), buf.data(), buf.size()),\n- SyscallSucceedsWithValue(msg.length()));\n- EXPECT_EQ(msg, std::string(buf.data()));\n- });\n-\n- FileDescriptor wfd = ASSERT_NO_ERRNO_AND_VALUE(\n- OpenRetryEINTR(fifo.c_str(), O_WRONLY | O_TRUNC));\n- EXPECT_THAT(ftruncate(wfd.get(), 0), SyscallFailsWithErrno(EINVAL));\n- EXPECT_THAT(WriteFd(wfd.get(), msg.c_str(), msg.length()),\n- SyscallSucceedsWithValue(msg.length()));\n- EXPECT_THAT(ftruncate(wfd.get(), 0), SyscallFailsWithErrno(EINVAL));\n-}\n-\nTEST(MknodTest, MknodAtEmptyPath) {\nauto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\nauto fd =\n"
}
] | Go | Apache License 2.0 | google/gvisor | test/fifo: check that open blocks and can be interrupted
and move fifo test cases to a separate test.
PiperOrigin-RevId: 416225961 |
259,898 | 15.12.2021 12:31:05 | 28,800 | 164a2fe386c73d28bef0954c66046b4a2b05c879 | Remove unused docker image
Remove packetimpact docker image
Update Makefile to remove unnecessary steps
Remove unused runner flags | [
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -231,6 +231,10 @@ syscall-tests: ## Run all system call tests.\n@$(call test,$(PARTITIONS) test/syscalls/...)\n.PHONY: syscall-tests\n+packetimpact-tests:\n+ @$(call test,--jobs=HOST_CPUS*3 --local_test_jobs=HOST_CPUS*3 //test/packetimpact/tests:all_tests)\n+.PHONY: packetimpact-tests\n+\n%-runtime-tests: load-runtimes_% $(RUNTIME_BIN)\n@$(call install_runtime,$(RUNTIME),) # Ensure flags are cleared.\n@$(call test_runtime,$(RUNTIME),--test_timeout=10800 //test/runtimes:$*)\n@@ -302,13 +306,6 @@ packetdrill-tests: load-packetdrill $(RUNTIME_BIN)\n@$(call test_runtime,$(RUNTIME),//test/packetdrill:all_tests)\n.PHONY: packetdrill-tests\n-packetimpact-tests: load-packetimpact $(RUNTIME_BIN)\n- @sudo modprobe iptable_filter\n- @sudo modprobe ip6table_filter\n- @$(call install_runtime,$(RUNTIME),) # Clear flags.\n- @$(call test_runtime,$(RUNTIME),--jobs=HOST_CPUS*3 --local_test_jobs=HOST_CPUS*3 //test/packetimpact/tests:all_tests)\n-.PHONY: packetimpact-tests\n-\nfsstress-test: load-basic $(RUNTIME_BIN)\n@$(call install_runtime,$(RUNTIME),--vfs2)\n@$(call test_runtime,$(RUNTIME),//test/fsstress:fsstress_test)\n"
},
{
"change_type": "DELETE",
"old_path": "images/packetimpact/Dockerfile",
"new_path": null,
"diff": "-FROM ubuntu:focal\n-RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \\\n- # iptables to disable OS native packet processing.\n- iptables \\\n- # nc to check that the posix_server is running.\n- netcat \\\n- # tcpdump to log brief packet sniffing.\n- tcpdump \\\n- # ip link show to display MAC addresses.\n- iproute2 \\\n- # tshark to log verbose packet sniffing.\n- tshark \\\n- # killall for cleanup.\n- psmisc \\\n- # qemu-system-x86 to emulate fuchsia.\n- qemu-system-x86 \\\n- # sha1sum to generate entropy.\n- libdigest-sha-perl\n"
},
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/runner/main.go",
"new_path": "test/packetimpact/runner/main.go",
"diff": "@@ -100,9 +100,6 @@ func main() {\nexpectFailure bool\nnumDUTs int\nvariant string\n- runtime string\n- partition int\n- totalPartitions int\ndutArgs dutArgList\n)\nfs := flag.NewFlagSet(os.Args[0], flag.ContinueOnError)\n@@ -112,10 +109,6 @@ func main() {\nfs.IntVar(&numDUTs, \"num_duts\", 1, \"number of DUTs to create\")\nfs.StringVar(&variant, \"variant\", \"\", \"test variant could be native, gvisor or fuchsia\")\nfs.Var(&dutArgs, \"dut_arg\", \"argument to the DUT binary\")\n- // The following args are passed by CI environment which are not used by us.\n- fs.StringVar(&runtime, \"runtime\", \"\", \"docker runtime to use (unused)\")\n- fs.IntVar(&partition, \"partition\", 1, \"1-indexed partition (unused)\")\n- fs.IntVar(&totalPartitions, \"total_partitions\", 1, \"total partitions (unused)\")\nif err := fs.Parse(os.Args[1:]); err != nil {\nlog.Fatal(err)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove unused docker image
- Remove packetimpact docker image
- Update Makefile to remove unnecessary steps
- Remove unused runner flags
PiperOrigin-RevId: 416621865 |
259,962 | 15.12.2021 12:48:40 | 28,800 | fe88fe67683ad3516425e8111c3074184e370b82 | Support custom socket options in hostinet. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/netdevice.go",
"new_path": "pkg/abi/linux/netdevice.go",
"diff": "@@ -86,3 +86,37 @@ type IFConf struct {\n_ [4]byte // Pad to sizeof(struct ifconf).\nPtr uint64\n}\n+\n+// EthtoolCmd is a marshallable type to be able to easily copyin the\n+// the command for an SIOCETHTOOL ioctl.\n+//\n+// +marshal\n+type EthtoolCmd uint32\n+\n+const (\n+ // ETHTOOL_GFEATURES is the command to SIOCETHTOOL to query device\n+ // features.\n+ // See: <linux/ethtool.h>\n+ ETHTOOL_GFEATURES EthtoolCmd = 0x3a\n+)\n+\n+// EthtoolGFeatures is used to return a list of device features.\n+// See: <linux/ethtool.h>\n+//\n+// +marshal\n+type EthtoolGFeatures struct {\n+ Cmd uint32\n+ Size uint32\n+}\n+\n+// EthtoolGetFeaturesBlock is used to return state of upto 32 device\n+// features.\n+// See: <linux/ethtool.h>\n+//\n+// +marshal\n+type EthtoolGetFeaturesBlock struct {\n+ Available uint32\n+ Requested uint32\n+ Active uint32\n+ NeverChanged uint32\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/inet/BUILD",
"new_path": "pkg/sentry/inet/BUILD",
"diff": "@@ -27,6 +27,7 @@ go_library(\n\"test_stack.go\",\n],\ndeps = [\n+ \"//pkg/abi/linux\",\n\"//pkg/context\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/stack\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/inet/inet.go",
"new_path": "pkg/sentry/inet/inet.go",
"diff": "package inet\nimport (\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/stack\"\n)\n@@ -120,6 +121,10 @@ type Interface struct {\n// MTU is the maximum transmission unit.\nMTU uint32\n+\n+ // Features are the device features queried from the host at\n+ // stack creation time. These are immutable after startup.\n+ Features []linux.EthtoolGetFeaturesBlock\n}\n// InterfaceAddr contains information about a network interface address.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/hostinet/BUILD",
"new_path": "pkg/sentry/socket/hostinet/BUILD",
"diff": "@@ -13,6 +13,7 @@ go_library(\n\"socket_vfs2.go\",\n\"sockopt_impl.go\",\n\"stack.go\",\n+ \"stack_unsafe.go\",\n],\nvisibility = [\"//pkg/sentry:internal\"],\ndeps = [\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/hostinet/socket.go",
"new_path": "pkg/sentry/socket/hostinet/socket.go",
"diff": "@@ -377,13 +377,13 @@ func (s *socketOpsCommon) Shutdown(_ *kernel.Task, how int) *syserr.Error {\n}\n// GetSockOpt implements socket.Socket.GetSockOpt.\n-func (s *socketOpsCommon) GetSockOpt(t *kernel.Task, level int, name int, _ hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {\n+func (s *socketOpsCommon) GetSockOpt(t *kernel.Task, level int, name int, optValAddr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {\nif outLen < 0 {\nreturn nil, syserr.ErrInvalidArgument\n}\n// Only allow known and safe options.\n- optlen := getSockOptLen(t, level, name)\n+ optlen, copyIn := getSockOptLen(t, level, name)\nswitch level {\ncase linux.SOL_IP:\nswitch name {\n@@ -418,10 +418,21 @@ func (s *socketOpsCommon) GetSockOpt(t *kernel.Task, level int, name int, _ host\nreturn nil, syserr.ErrInvalidArgument\n}\n- opt, err := getsockopt(s.fd, level, name, optlen)\n+ opt := make([]byte, optlen)\n+ if copyIn {\n+ // This is non-intuitive as normally in getsockopt one assumes that the\n+ // parameter is purely an out parameter. But some custom options do require\n+ // copying in the optVal so we do it here only for those custom options.\n+ if _, err := t.CopyInBytes(optValAddr, opt); err != nil {\n+ return nil, syserr.FromError(err)\n+ }\n+ }\n+ var err error\n+ opt, err = getsockopt(s.fd, level, name, opt)\nif err != nil {\nreturn nil, syserr.FromError(err)\n}\n+ opt = postGetSockOpt(t, level, name, opt)\noptP := primitive.ByteSlice(opt)\nreturn &optP, nil\n}\n@@ -748,7 +759,9 @@ func translateIOSyscallError(err error) error {\n// State implements socket.Socket.State.\nfunc (s *socketOpsCommon) State() uint32 {\ninfo := linux.TCPInfo{}\n- buf, err := getsockopt(s.fd, unix.SOL_TCP, unix.TCP_INFO, linux.SizeOfTCPInfo)\n+ buf := make([]byte, linux.SizeOfTCPInfo)\n+ var err error\n+ buf, err = getsockopt(s.fd, unix.SOL_TCP, unix.TCP_INFO, buf)\nif err != nil {\nif err != unix.ENOPROTOOPT {\nlog.Warningf(\"Failed to get TCP socket info from %+v: %v\", s, err)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/hostinet/socket_unsafe.go",
"new_path": "pkg/sentry/socket/hostinet/socket_unsafe.go",
"diff": "@@ -23,6 +23,7 @@ import (\n\"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n\"gvisor.dev/gvisor/pkg/hostarch\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n+ \"gvisor.dev/gvisor/pkg/sentry/inet\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/socket\"\n\"gvisor.dev/gvisor/pkg/syserr\"\n@@ -102,6 +103,76 @@ func ioctl(ctx context.Context, fd int, io usermem.IO, args arch.SyscallArgument\n}\n_, err := ifc.CopyOut(cc, args[2].Pointer())\nreturn 0, err\n+ case linux.SIOCETHTOOL:\n+ cc := &usermem.IOCopyContext{\n+ Ctx: ctx,\n+ IO: io,\n+ Opts: usermem.IOOpts{\n+ AddressSpaceActive: true,\n+ },\n+ }\n+ var ifr linux.IFReq\n+ if _, err := ifr.CopyIn(cc, args[2].Pointer()); err != nil {\n+ return 0, err\n+ }\n+ // SIOCETHTOOL commands specify the subcommand in the first 32 bytes pointed\n+ // to by ifr.ifr_data. We need to copy it in first to understand the actual\n+ // structure pointed by ifr.ifr_data.\n+ ifrData := hostarch.Addr(hostarch.ByteOrder.Uint64(ifr.Data[:8]))\n+ var ethtoolCmd linux.EthtoolCmd\n+ if _, err := ethtoolCmd.CopyIn(cc, ifrData); err != nil {\n+ return 0, err\n+ }\n+ // We only support ETHTOOL_GFEATURES.\n+ if ethtoolCmd != linux.ETHTOOL_GFEATURES {\n+ return 0, linuxerr.EOPNOTSUPP\n+ }\n+ var gfeatures linux.EthtoolGFeatures\n+ if _, err := gfeatures.CopyIn(cc, ifrData); err != nil {\n+ return 0, err\n+ }\n+\n+ // Find the requested device.\n+ stk := inet.StackFromContext(ctx)\n+ if stk == nil {\n+ return 0, linuxerr.ENODEV\n+ }\n+\n+ var (\n+ iface inet.Interface\n+ found bool\n+ )\n+ for _, iface = range stk.Interfaces() {\n+ if iface.Name == ifr.Name() {\n+ found = true\n+ break\n+ }\n+ }\n+ if !found {\n+ return 0, linuxerr.ENODEV\n+ }\n+\n+ // Copy out the feature blocks to the memory pointed to by ifrData.\n+ blksToCopy := int(gfeatures.Size)\n+ if blksToCopy > len(iface.Features) {\n+ blksToCopy = len(iface.Features)\n+ }\n+ gfeatures.Size = uint32(blksToCopy)\n+ if _, err := gfeatures.CopyOut(cc, ifrData); err != nil {\n+ return 0, err\n+ }\n+ next, ok := ifrData.AddLength(uint64(unsafe.Sizeof(linux.EthtoolGFeatures{})))\n+ for i := 0; i < blksToCopy; i++ {\n+ if !ok {\n+ return 0, linuxerr.EFAULT\n+ }\n+ if _, err := iface.Features[i].CopyOut(cc, next); err != nil {\n+ return 0, err\n+ }\n+ next, ok = next.AddLength(uint64(unsafe.Sizeof(linux.EthtoolGetFeaturesBlock{})))\n+ }\n+\n+ return 0, nil\ndefault:\nreturn 0, linuxerr.ENOTTY\n}\n@@ -115,8 +186,7 @@ func accept4(fd int, addr *byte, addrlen *uint32, flags int) (int, error) {\nreturn int(afd), nil\n}\n-func getsockopt(fd int, level, name int, optlen int) ([]byte, error) {\n- opt := make([]byte, optlen)\n+func getsockopt(fd int, level, name int, opt []byte) ([]byte, error) {\noptlen32 := int32(len(opt))\n_, _, errno := unix.Syscall6(unix.SYS_GETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(firstBytePtr(opt)), uintptr(unsafe.Pointer(&optlen32)), 0)\nif errno != 0 {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/hostinet/sockopt_impl.go",
"new_path": "pkg/sentry/socket/hostinet/sockopt_impl.go",
"diff": "@@ -21,10 +21,14 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n)\n-func getSockOptLen(t *kernel.Task, level, name int) int {\n- return 0 // No custom options.\n+func getSockOptLen(t *kernel.Task, level, name int) (len int, copyIn bool) {\n+ return 0, false // No custom options.\n}\nfunc setSockOptLen(t *kernel.Task, level, name int) int {\nreturn 0 // No custom options.\n}\n+\n+func postGetSockOpt(t *kernel.Task, level, name int, opt []byte) []byte {\n+ return opt // No custom changes to option value.\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/hostinet/stack.go",
"new_path": "pkg/sentry/socket/hostinet/stack.go",
"diff": "@@ -23,7 +23,6 @@ import (\n\"reflect\"\n\"strconv\"\n\"strings\"\n-\n\"syscall\"\n\"golang.org/x/sys/unix\"\n@@ -54,7 +53,7 @@ var defaultSendBufSize = inet.TCPBufferSize{\n// Stack implements inet.Stack for host sockets.\ntype Stack struct {\n// Stack is immutable.\n- interfaces map[int32]inet.Interface\n+ interfaces map[int32]*inet.Interface\ninterfaceAddrs map[int32][]inet.InterfaceAddr\nroutes []inet.Route\nsupportsIPv6 bool\n@@ -69,7 +68,7 @@ type Stack struct {\n// NewStack returns an empty Stack containing no configuration.\nfunc NewStack() *Stack {\nreturn &Stack{\n- interfaces: make(map[int32]inet.Interface),\n+ interfaces: make(map[int32]*inet.Interface),\ninterfaceAddrs: make(map[int32][]inet.InterfaceAddr),\n}\n}\n@@ -129,7 +128,7 @@ func (s *Stack) Configure() error {\n// ExtractHostInterfaces will populate an interface map and\n// interfaceAddrs map with the results of the equivalent\n// netlink messages.\n-func ExtractHostInterfaces(links []syscall.NetlinkMessage, addrs []syscall.NetlinkMessage, interfaces map[int32]inet.Interface, interfaceAddrs map[int32][]inet.InterfaceAddr) error {\n+func ExtractHostInterfaces(links []syscall.NetlinkMessage, addrs []syscall.NetlinkMessage, interfaces map[int32]*inet.Interface, interfaceAddrs map[int32][]inet.InterfaceAddr) error {\nfor _, link := range links {\nif link.Header.Type != unix.RTM_NEWLINK {\ncontinue\n@@ -158,7 +157,7 @@ func ExtractHostInterfaces(links []syscall.NetlinkMessage, addrs []syscall.Netli\ninetIF.Name = string(attr.Value[:len(attr.Value)-1])\n}\n}\n- interfaces[ifinfo.Index] = inetIF\n+ interfaces[ifinfo.Index] = &inetIF\n}\nfor _, addr := range addrs {\n@@ -258,7 +257,15 @@ func addHostInterfaces(s *Stack) error {\nreturn fmt.Errorf(\"RTM_GETADDR failed: %v\", err)\n}\n- return ExtractHostInterfaces(links, addrs, s.interfaces, s.interfaceAddrs)\n+ if err := ExtractHostInterfaces(links, addrs, s.interfaces, s.interfaceAddrs); err != nil {\n+ return err\n+ }\n+\n+ // query interface features for each of the host interfaces.\n+ if err := queryInterfaceFeatures(s.interfaces); err != nil {\n+ return err\n+ }\n+ return nil\n}\nfunc addHostRoutes(s *Stack) error {\n@@ -304,7 +311,7 @@ func readTCPBufferSizeFile(filename string) (inet.TCPBufferSize, error) {\nfunc (s *Stack) Interfaces() map[int32]inet.Interface {\ninterfaces := make(map[int32]inet.Interface)\nfor k, v := range s.interfaces {\n- interfaces[k] = v\n+ interfaces[k] = *v\n}\nreturn interfaces\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/socket/hostinet/stack_unsafe.go",
"diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package hostinet\n+\n+import (\n+ \"runtime\"\n+ \"unsafe\"\n+\n+ \"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/hostarch\"\n+ \"gvisor.dev/gvisor/pkg/sentry/inet\"\n+)\n+\n+func queryInterfaceFeatures(interfaces map[int32]*inet.Interface) error {\n+ fd, err := unix.Socket(unix.AF_INET6, unix.SOCK_STREAM, 0)\n+ if err != nil {\n+ return err\n+ }\n+ defer unix.Close(fd)\n+ for idx, nic := range interfaces {\n+ var ifr linux.IFReq\n+ copy(ifr.IFName[:], nic.Name)\n+ var gfeatures linux.EthtoolGFeatures\n+ // Each feature block is sufficient to query 32 features, the linux\n+ // kernel today supports upto 64 features per device. Technically it\n+ // can support more in the future but this is sufficient for our use\n+ // right now.\n+ const (\n+ numFeatureBlocks = 2\n+ ifrDataSz = unsafe.Sizeof(linux.EthtoolGFeatures{}) + numFeatureBlocks*unsafe.Sizeof(linux.EthtoolGetFeaturesBlock{})\n+ )\n+ featureBlocks := make([]linux.EthtoolGetFeaturesBlock, numFeatureBlocks)\n+ b := make([]byte, ifrDataSz)\n+ gfeatures.Cmd = uint32(linux.ETHTOOL_GFEATURES)\n+ gfeatures.Size = numFeatureBlocks\n+ gfeatures.MarshalBytes(b)\n+ next := b[unsafe.Sizeof(linux.EthtoolGFeatures{}):]\n+ for i := 0; i < numFeatureBlocks; i++ {\n+ featureBlocks[i].MarshalBytes(next)\n+ next = next[unsafe.Sizeof(linux.EthtoolGetFeaturesBlock{}):]\n+ }\n+\n+ // Technically the next two lines are not safe as Go GC can technically move\n+ // b to a new location and the pointer value stored in ifr.Data could point\n+ // to random memory. But the reality today is that Go GC is not a moving GC\n+ // so this is essentially safe as of today.\n+ //\n+ // TODO(b/209014118): Use Pin API when available in Go runtime to make this\n+ // safe.\n+ dataPtr := unsafe.Pointer(&b[0])\n+ hostarch.ByteOrder.PutUint64(ifr.Data[:8], uint64(uintptr(dataPtr)))\n+\n+ if _, _, errno := unix.Syscall(unix.SYS_IOCTL, uintptr(fd), unix.SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))); errno != 0 {\n+ return errno\n+ }\n+\n+ // Unmarshall the features back.\n+ gfeatures.UnmarshalBytes(b)\n+ next = b[unsafe.Sizeof(linux.EthtoolGFeatures{}):]\n+ for i := 0; i < int(gfeatures.Size); i++ {\n+ featureBlocks[i].UnmarshalBytes(next)\n+ next = next[unsafe.Sizeof(linux.EthtoolGetFeaturesBlock{}):]\n+ }\n+ // Store the queried features.\n+ interfaces[idx].Features = make([]linux.EthtoolGetFeaturesBlock, gfeatures.Size)\n+ copy(interfaces[idx].Features, featureBlocks)\n+\n+ // This ensures b is not garbage collected before this point to ensure that\n+ // the slice is not collected before the syscall returns and we copy out the\n+ // data.\n+ runtime.KeepAlive(b)\n+ }\n+ return nil\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Support custom socket options in hostinet.
PiperOrigin-RevId: 416625574 |
260,004 | 15.12.2021 13:06:52 | 28,800 | 7b6078e25245da877dc03656a9af35403abdad36 | Provide MTU for pipe LinkEndpoint
...so that callers can set different MTUs for different configurations. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/pipe/pipe.go",
"new_path": "pkg/tcpip/link/pipe/pipe.go",
"diff": "@@ -26,12 +26,14 @@ import (\nvar _ stack.LinkEndpoint = (*Endpoint)(nil)\n// New returns both ends of a new pipe.\n-func New(linkAddr1, linkAddr2 tcpip.LinkAddress) (*Endpoint, *Endpoint) {\n+func New(linkAddr1, linkAddr2 tcpip.LinkAddress, mtu uint32) (*Endpoint, *Endpoint) {\nep1 := &Endpoint{\nlinkAddr: linkAddr1,\n+ mtu: mtu,\n}\nep2 := &Endpoint{\nlinkAddr: linkAddr2,\n+ mtu: mtu,\n}\nep1.linked = ep2\nep2.linked = ep1\n@@ -43,6 +45,7 @@ type Endpoint struct {\ndispatcher stack.NetworkDispatcher\nlinked *Endpoint\nlinkAddr tcpip.LinkAddress\n+ mtu uint32\n}\nfunc (e *Endpoint) deliverPackets(r stack.RouteInfo, proto tcpip.NetworkProtocolNumber, pkts stack.PacketBufferList) {\n@@ -96,8 +99,8 @@ func (e *Endpoint) IsAttached() bool {\nfunc (*Endpoint) Wait() {}\n// MTU implements stack.LinkEndpoint.\n-func (*Endpoint) MTU() uint32 {\n- return header.IPv6MinimumMTU\n+func (e *Endpoint) MTU() uint32 {\n+ return e.mtu\n}\n// Capabilities implements stack.LinkEndpoint.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tests/integration/istio_test.go",
"new_path": "pkg/tcpip/tests/integration/istio_test.go",
"diff": "@@ -113,7 +113,7 @@ var (\nfunc newTestContext(t *testing.T) *testContext {\nt.Helper()\n- localNIC, remoteNIC := pipe.New(\"\" /* linkAddr1 */, \"\" /* linkAddr2 */)\n+ localNIC, remoteNIC := pipe.New(\"\" /* linkAddr1 */, \"\" /* linkAddr2 */, header.IPv4MinimumMTU)\nlocalStack := stack.New(stack.Options{\nNetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tests/integration/link_resolution_test.go",
"new_path": "pkg/tcpip/tests/integration/link_resolution_test.go",
"diff": "@@ -44,10 +44,12 @@ import (\n)\nfunc setupStack(t *testing.T, stackOpts stack.Options, host1NICID, host2NICID tcpip.NICID) (*stack.Stack, *stack.Stack) {\n+ const maxFrameSize = header.IPv6MinimumMTU + header.EthernetMinimumSize\n+\nhost1Stack := stack.New(stackOpts)\nhost2Stack := stack.New(stackOpts)\n- host1NIC, host2NIC := pipe.New(utils.LinkAddr1, utils.LinkAddr2)\n+ host1NIC, host2NIC := pipe.New(utils.LinkAddr1, utils.LinkAddr2, maxFrameSize)\nif err := host1Stack.CreateNIC(host1NICID, utils.NewEthernetEndpoint(host1NIC)); err != nil {\nt.Fatalf(\"host1Stack.CreateNIC(%d, _): %s\", host1NICID, err)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tests/utils/utils.go",
"new_path": "pkg/tcpip/tests/utils/utils.go",
"diff": "@@ -279,8 +279,9 @@ func SetupRouterStack(t *testing.T, s *stack.Stack, ep1, ep2 stack.LinkEndpoint)\n// SetupRoutedStacks creates the NICs, sets forwarding, adds addresses and sets\n// the route tables for the passed stacks.\nfunc SetupRoutedStacks(t *testing.T, host1Stack, routerStack, host2Stack *stack.Stack) {\n- host1NIC, routerNIC1 := pipe.New(LinkAddr1, LinkAddr2)\n- routerNIC2, host2NIC := pipe.New(LinkAddr3, LinkAddr4)\n+ const maxFrameSize = header.IPv6MinimumMTU + header.EthernetMinimumSize\n+ host1NIC, routerNIC1 := pipe.New(LinkAddr1, LinkAddr2, maxFrameSize)\n+ routerNIC2, host2NIC := pipe.New(LinkAddr3, LinkAddr4, maxFrameSize)\nSetupRouterStack(t, routerStack, NewEthernetEndpoint(routerNIC1), NewEthernetEndpoint(routerNIC2))\n"
}
] | Go | Apache License 2.0 | google/gvisor | Provide MTU for pipe LinkEndpoint
...so that callers can set different MTUs for different configurations.
PiperOrigin-RevId: 416629560 |
260,004 | 15.12.2021 14:18:00 | 28,800 | bb129b4becfa09346eb4e2a88dcaf96dbe9a2c0f | Consider ethernet header size when returning MTU
...so that upper-layers do not send larger than expected packets. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/ethernet/ethernet.go",
"new_path": "pkg/tcpip/link/ethernet/ethernet.go",
"diff": "@@ -50,6 +50,14 @@ func (e *Endpoint) LinkAddress() tcpip.LinkAddress {\nreturn header.UnspecifiedEthernetAddress\n}\n+// MTU implements stack.LinkEndpoint.\n+func (e *Endpoint) MTU() uint32 {\n+ if mtu := e.Endpoint.MTU(); mtu > header.EthernetMinimumSize {\n+ return mtu - header.EthernetMinimumSize\n+ }\n+ return 0\n+}\n+\n// DeliverNetworkPacket implements stack.NetworkDispatcher.\nfunc (e *Endpoint) DeliverNetworkPacket(_, _ tcpip.LinkAddress, _ tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {\nhdr, ok := pkt.LinkHeader().Consume(header.EthernetMinimumSize)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/ethernet/ethernet_test.go",
"new_path": "pkg/tcpip/link/ethernet/ethernet_test.go",
"diff": "package ethernet_test\nimport (\n+ \"fmt\"\n\"testing\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n@@ -69,3 +70,52 @@ func TestDeliverNetworkPacket(t *testing.T) {\nt.Fatalf(\"got networkDispatcher.networkPackets = %d, want = 1\", networkDispatcher.networkPackets)\n}\n}\n+\n+type testLinkEndpoint struct {\n+ stack.LinkEndpoint\n+\n+ mtu uint32\n+}\n+\n+func (t *testLinkEndpoint) MTU() uint32 {\n+ return t.mtu\n+}\n+\n+func TestMTU(t *testing.T) {\n+ const maxFrameSize = 1500\n+\n+ tests := []struct {\n+ maxFrameSize uint32\n+ expectedMTU uint32\n+ }{\n+ {\n+ maxFrameSize: 0,\n+ expectedMTU: 0,\n+ },\n+ {\n+ maxFrameSize: header.EthernetMinimumSize - 1,\n+ expectedMTU: 0,\n+ },\n+ {\n+ maxFrameSize: header.EthernetMinimumSize,\n+ expectedMTU: 0,\n+ },\n+ {\n+ maxFrameSize: header.EthernetMinimumSize + 1,\n+ expectedMTU: 1,\n+ },\n+ {\n+ maxFrameSize: maxFrameSize,\n+ expectedMTU: maxFrameSize - header.EthernetMinimumSize,\n+ },\n+ }\n+\n+ for _, test := range tests {\n+ t.Run(fmt.Sprintf(\"MaxFrameSize=%d\", test.maxFrameSize), func(t *testing.T) {\n+ e := ethernet.New(&testLinkEndpoint{mtu: test.maxFrameSize})\n+ if got := e.MTU(); got != test.expectedMTU {\n+ t.Errorf(\"got e.MTU() = %d, want = %d\", got, test.expectedMTU)\n+ }\n+ })\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Consider ethernet header size when returning MTU
...so that upper-layers do not send larger than expected packets.
PiperOrigin-RevId: 416645503 |
259,898 | 15.12.2021 14:44:22 | 28,800 | 3ac6a70159af43177b12e71a66e2a6b2db6098a4 | Remove stale TODO
Fixes | [
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/dut/dut.go",
"new_path": "test/packetimpact/dut/dut.go",
"diff": "@@ -134,9 +134,9 @@ func Run(dut DUT) error {\n// WaitForServer waits for a pattern to occur in posix_server's logs.\nfunc WaitForServer(output io.Reader) error {\n- // TODO(gvisor.dev/issue/6835): waiting for the server via log output is\n- // fragile, a better way could be passing a file descriptor, which is not\n- // possible with docker, do that after the docker runner is removed.\n+ // Scanning log lines is not the most robust way, we could pass a file\n+ // descriptor to signal the event for native/runsc DUTs, however, it is not\n+ // possible for a fuchsia DUT as it lives inside a qemu instance.\nscanner := bufio.NewScanner(output)\nfor scanner.Scan() {\nif text := scanner.Text(); strings.HasPrefix(text, \"Server listening on\") {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove stale TODO
Fixes #6835.
PiperOrigin-RevId: 416651232 |
260,004 | 15.12.2021 17:08:59 | 28,800 | 5baf7212b778324cca324a60739e7733614425f9 | Fix error getting IPv6 option on IPv4 socket
Previously returned ENOPROTOOPT but should return EOPNOTSUPP to match
Linux. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "@@ -1302,7 +1302,7 @@ func getSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name\nfamily, skType, _ := s.Type()\nif family != linux.AF_INET6 {\n- return nil, syserr.ErrUnknownProtocolOption\n+ return nil, syserr.ErrNotSupported\n}\nswitch name {\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/raw_socket_icmp.cc",
"new_path": "test/syscalls/linux/raw_socket_icmp.cc",
"diff": "@@ -99,6 +99,16 @@ void RawSocketICMPTest::TearDown() {\n}\n}\n+TEST_F(RawSocketICMPTest, SockOptIPv6Checksum) {\n+ int v;\n+ EXPECT_THAT(setsockopt(s_, SOL_IPV6, IPV6_CHECKSUM, &v, sizeof(v)),\n+ SyscallFailsWithErrno(ENOPROTOOPT));\n+ socklen_t len = sizeof(v);\n+ EXPECT_THAT(getsockopt(s_, SOL_IPV6, IPV6_CHECKSUM, &v, &len),\n+ SyscallFailsWithErrno(EOPNOTSUPP));\n+ EXPECT_EQ(len, sizeof(v));\n+}\n+\n// We'll only read an echo in this case, as the kernel won't respond to the\n// malformed ICMP checksum.\nTEST_F(RawSocketICMPTest, SendAndReceiveBadChecksum) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix error getting IPv6 option on IPv4 socket
Previously returned ENOPROTOOPT but should return EOPNOTSUPP to match
Linux.
PiperOrigin-RevId: 416680620 |
259,951 | 20.12.2021 10:08:15 | 28,800 | ec18c6bcf9760b8a6a8eaed61ee0f6cb915b9466 | Add default TTL test for udp and tcp endpoints
Simplify the retrieval of the default TTL and include multicast flow in
the UDP SetTTL tests.
Add IPv6 protocol coverage for the TCP TTL tests | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/tcp_test.go",
"new_path": "pkg/tcpip/transport/tcp/tcp_test.go",
"diff": "@@ -3403,14 +3403,81 @@ func TestSendGreaterThanMTU(t *testing.T) {\ntestBrokenUpWrite(t, c, maxPayload)\n}\n+func TestDefaultTTL(t *testing.T) {\n+ for _, test := range []struct {\n+ name string\n+ protoNum tcpip.NetworkProtocolNumber\n+ addr tcpip.Address\n+ }{\n+ {\"ipv4\", ipv4.ProtocolNumber, context.TestAddr},\n+ {\"ipv6\", ipv6.ProtocolNumber, context.TestV6Addr},\n+ } {\n+ t.Run(fmt.Sprint(test.name), func(t *testing.T) {\n+ c := context.New(t, 65535)\n+ defer c.Cleanup()\n+\n+ var err tcpip.Error\n+ c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, test.protoNum, &waiter.Queue{})\n+ if err != nil {\n+ t.Fatalf(\"NewEndpoint failed: %s\", err)\n+ }\n+\n+ proto := c.Stack().NetworkProtocolInstance(test.protoNum)\n+ if proto == nil {\n+ t.Fatalf(\"c.s.NetworkProtocolInstance(flow.netProto()) did not return a protocol\")\n+ }\n+\n+ var initialDefaultTTL tcpip.DefaultTTLOption\n+ if err := proto.Option(&initialDefaultTTL); err != nil {\n+ t.Fatalf(\"proto.Option(&initialDefaultTTL) (%T) failed: %s\", initialDefaultTTL, err)\n+ }\n+\n+ {\n+ err := c.EP.Connect(tcpip.FullAddress{Addr: test.addr, Port: context.TestPort})\n+ if d := cmp.Diff(&tcpip.ErrConnectStarted{}, err); d != \"\" {\n+ t.Fatalf(\"c.EP.Connect(...) mismatch (-want +got):\\n%s\", d)\n+ }\n+ }\n+\n+ checkTTL := func(ttl uint8) {\n+ if test.protoNum == ipv4.ProtocolNumber {\n+ checker.IPv4(t, c.GetPacket(), checker.TTL(ttl))\n+ } else {\n+ checker.IPv6(t, c.GetV6Packet(), checker.TTL(ttl))\n+ }\n+ }\n+\n+ // Receive SYN packet.\n+ checkTTL(uint8(initialDefaultTTL))\n+\n+ newDefaultTTL := tcpip.DefaultTTLOption(initialDefaultTTL + 1)\n+ if err := proto.SetOption(&newDefaultTTL); err != nil {\n+ t.Fatalf(\"proto.SetOption(&%T(%d))) failed: %s\", newDefaultTTL, newDefaultTTL, err)\n+ }\n+\n+ // Receive retransmitted SYN packet.\n+ checkTTL(uint8(newDefaultTTL))\n+ })\n+ }\n+}\n+\nfunc TestSetTTL(t *testing.T) {\n+ for _, test := range []struct {\n+ name string\n+ protoNum tcpip.NetworkProtocolNumber\n+ addr tcpip.Address\n+ }{\n+ {\"ipv4\", ipv4.ProtocolNumber, context.TestAddr},\n+ {\"ipv6\", ipv6.ProtocolNumber, context.TestV6Addr},\n+ } {\n+ t.Run(fmt.Sprint(test.name), func(t *testing.T) {\nfor _, wantTTL := range []uint8{1, 2, 50, 64, 128, 254, 255} {\nt.Run(fmt.Sprintf(\"TTL:%d\", wantTTL), func(t *testing.T) {\nc := context.New(t, 65535)\ndefer c.Cleanup()\nvar err tcpip.Error\n- c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{})\n+ c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, test.protoNum, &waiter.Queue{})\nif err != nil {\nt.Fatalf(\"NewEndpoint failed: %s\", err)\n}\n@@ -3420,16 +3487,20 @@ func TestSetTTL(t *testing.T) {\n}\n{\n- err := c.EP.Connect(tcpip.FullAddress{Addr: context.TestAddr, Port: context.TestPort})\n+ err := c.EP.Connect(tcpip.FullAddress{Addr: test.addr, Port: context.TestPort})\nif d := cmp.Diff(&tcpip.ErrConnectStarted{}, err); d != \"\" {\nt.Fatalf(\"c.EP.Connect(...) mismatch (-want +got):\\n%s\", d)\n}\n}\n// Receive SYN packet.\n- b := c.GetPacket()\n-\n- checker.IPv4(t, b, checker.TTL(wantTTL))\n+ if test.protoNum == ipv4.ProtocolNumber {\n+ checker.IPv4(t, c.GetPacket(), checker.TTL(wantTTL))\n+ } else {\n+ checker.IPv6(t, c.GetV6Packet(), checker.TTL(wantTTL))\n+ }\n+ })\n+ }\n})\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/udp_test.go",
"new_path": "pkg/tcpip/transport/udp/udp_test.go",
"diff": "@@ -287,6 +287,13 @@ func (flow testFlow) isReverseMulticast() bool {\n}\n}\n+func (flow testFlow) ttlOption() tcpip.SockOptInt {\n+ if flow.isMulticast() {\n+ return tcpip.MulticastTTLOption\n+ }\n+ return tcpip.TTLOption\n+}\n+\ntype testContext struct {\nt *testing.T\nlinkEP *channel.Endpoint\n@@ -1608,48 +1615,35 @@ func (*testInterface) Enabled() bool {\nreturn true\n}\n-func TestTTL(t *testing.T) {\n- for _, flow := range []testFlow{unicastV4, unicastV4in6, unicastV6, unicastV6Only, multicastV4, multicastV4in6, multicastV6, broadcast, broadcastIn6} {\n+func TestNonMulticastDefaultTTL(t *testing.T) {\n+ for _, flow := range []testFlow{unicastV4, unicastV4in6, unicastV6, unicastV6Only, broadcast, broadcastIn6} {\nt.Run(fmt.Sprintf(\"flow:%s\", flow), func(t *testing.T) {\nc := newDualTestContext(t, defaultMTU)\ndefer c.cleanup()\nc.createEndpointForFlow(flow)\n-\n- const multicastTTL = 42\n- if err := c.ep.SetSockOptInt(tcpip.MulticastTTLOption, multicastTTL); err != nil {\n- c.t.Fatalf(\"SetSockOptInt failed: %s\", err)\n+ proto := c.s.NetworkProtocolInstance(flow.netProto())\n+ if proto == nil {\n+ t.Fatalf(\"c.s.NetworkProtocolInstance(flow.netProto()) did not return a protocol\")\n}\n- var wantTTL uint8\n- if flow.isMulticast() {\n- wantTTL = multicastTTL\n- } else {\n- var p stack.NetworkProtocolFactory\n- var n tcpip.NetworkProtocolNumber\n- if flow.isV4() {\n- p = ipv4.NewProtocol\n- n = ipv4.ProtocolNumber\n- } else {\n- p = ipv6.NewProtocol\n- n = ipv6.ProtocolNumber\n- }\n- s := stack.New(stack.Options{\n- NetworkProtocols: []stack.NetworkProtocolFactory{p},\n- Clock: &faketime.NullClock{},\n- })\n- ep := s.NetworkProtocolInstance(n).NewEndpoint(&testInterface{}, nil)\n- wantTTL = ep.DefaultTTL()\n- ep.Close()\n+ var initialDefaultTTL tcpip.DefaultTTLOption\n+ if err := proto.Option(&initialDefaultTTL); err != nil {\n+ t.Fatalf(\"proto.Option(&initialDefaultTTL) (%T) failed: %s\", initialDefaultTTL, err)\n}\n+ testWrite(c, flow, checker.TTL(uint8(initialDefaultTTL)))\n- testWrite(c, flow, checker.TTL(wantTTL))\n+ newDefaultTTL := tcpip.DefaultTTLOption(initialDefaultTTL + 1)\n+ if err := proto.SetOption(&newDefaultTTL); err != nil {\n+ c.t.Fatalf(\"proto.SetOption(&%T(%d))) failed: %s\", newDefaultTTL, newDefaultTTL, err)\n+ }\n+ testWrite(c, flow, checker.TTL(uint8(newDefaultTTL)))\n})\n}\n}\nfunc TestSetTTL(t *testing.T) {\n- for _, flow := range []testFlow{unicastV4, unicastV4in6, unicastV6, unicastV6Only, broadcast, broadcastIn6} {\n+ for _, flow := range []testFlow{unicastV4, unicastV4in6, unicastV6, unicastV6Only, multicastV4, multicastV4in6, multicastV6, broadcast, broadcastIn6} {\nt.Run(fmt.Sprintf(\"flow:%s\", flow), func(t *testing.T) {\nfor _, wantTTL := range []uint8{1, 2, 50, 64, 128, 254, 255} {\nt.Run(fmt.Sprintf(\"TTL:%d\", wantTTL), func(t *testing.T) {\n@@ -1658,8 +1652,9 @@ func TestSetTTL(t *testing.T) {\nc.createEndpointForFlow(flow)\n- if err := c.ep.SetSockOptInt(tcpip.TTLOption, int(wantTTL)); err != nil {\n- c.t.Fatalf(\"SetSockOptInt(TTLOption, %d) failed: %s\", wantTTL, err)\n+ opt := flow.ttlOption()\n+ if err := c.ep.SetSockOptInt(opt, int(wantTTL)); err != nil {\n+ c.t.Fatalf(\"SetSockOptInt(%d, %d) failed: %s\", opt, wantTTL, err)\n}\ntestWrite(c, flow, checker.TTL(wantTTL))\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add default TTL test for udp and tcp endpoints
Simplify the retrieval of the default TTL and include multicast flow in
the UDP SetTTL tests.
Add IPv6 protocol coverage for the TCP TTL tests
PiperOrigin-RevId: 417429943 |
260,004 | 20.12.2021 10:45:51 | 28,800 | b26b4610bfb7bf37821ab33ad51ace3c6ece81dd | Use available helpers to set fields
...and introduce a helper to set the Checksum. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/checksum.go",
"new_path": "pkg/tcpip/header/checksum.go",
"diff": "@@ -24,6 +24,11 @@ import (\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n)\n+// PutChecksum puts the checksum in the provided byte slice.\n+func PutChecksum(b []byte, xsum uint16) {\n+ binary.BigEndian.PutUint16(b, xsum)\n+}\n+\nfunc calculateChecksum(buf []byte, odd bool, initial uint32) (uint16, bool) {\nv := initial\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/icmpv4.go",
"new_path": "pkg/tcpip/header/icmpv4.go",
"diff": "@@ -139,7 +139,7 @@ func (b ICMPv4) Checksum() uint16 {\n// SetChecksum sets the ICMP checksum field.\nfunc (b ICMPv4) SetChecksum(checksum uint16) {\n- binary.BigEndian.PutUint16(b[icmpv4ChecksumOffset:], checksum)\n+ PutChecksum(b[icmpv4ChecksumOffset:], checksum)\n}\n// SourcePort implements Transport.SourcePort.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/icmpv6.go",
"new_path": "pkg/tcpip/header/icmpv6.go",
"diff": "@@ -199,7 +199,7 @@ func (b ICMPv6) Checksum() uint16 {\n// SetChecksum sets the ICMP checksum field.\nfunc (b ICMPv6) SetChecksum(checksum uint16) {\n- binary.BigEndian.PutUint16(b[icmpv6ChecksumOffset:], checksum)\n+ PutChecksum(b[icmpv6ChecksumOffset:], checksum)\n}\n// SourcePort implements Transport.SourcePort.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/ipv4.go",
"new_path": "pkg/tcpip/header/ipv4.go",
"diff": "@@ -380,7 +380,7 @@ func (b IPv4) SetTotalLength(totalLength uint16) {\n// SetChecksum sets the checksum field of the IPv4 header.\nfunc (b IPv4) SetChecksum(v uint16) {\n- binary.BigEndian.PutUint16(b[checksum:], v)\n+ PutChecksum(b[checksum:], v)\n}\n// SetFlagsFragmentOffset sets the \"flags\" and \"fragment offset\" fields of the\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/tcp.go",
"new_path": "pkg/tcpip/header/tcp.go",
"diff": "@@ -289,7 +289,7 @@ func (b TCP) SetDestinationPort(port uint16) {\n// SetChecksum sets the checksum field of the TCP header.\nfunc (b TCP) SetChecksum(checksum uint16) {\n- binary.BigEndian.PutUint16(b[TCPChecksumOffset:], checksum)\n+ PutChecksum(b[TCPChecksumOffset:], checksum)\n}\n// SetDataOffset sets the data offset field of the TCP header. headerLen should\n@@ -318,8 +318,8 @@ func (b TCP) SetWindowSize(rcvwnd uint16) {\nbinary.BigEndian.PutUint16(b[TCPWinSizeOffset:], rcvwnd)\n}\n-// SetUrgentPoiner sets the window size field of the TCP header.\n-func (b TCP) SetUrgentPoiner(urgentPointer uint16) {\n+// SetUrgentPointer sets the window size field of the TCP header.\n+func (b TCP) SetUrgentPointer(urgentPointer uint16) {\nbinary.BigEndian.PutUint16(b[TCPUrgentPtrOffset:], urgentPointer)\n}\n@@ -360,11 +360,11 @@ func (b TCP) encodeSubset(seq, ack uint32, flags TCPFlags, rcvwnd uint16) {\n// Encode encodes all the fields of the TCP header.\nfunc (b TCP) Encode(t *TCPFields) {\nb.encodeSubset(t.SeqNum, t.AckNum, t.Flags, t.WindowSize)\n- binary.BigEndian.PutUint16(b[TCPSrcPortOffset:], t.SrcPort)\n- binary.BigEndian.PutUint16(b[TCPDstPortOffset:], t.DstPort)\n- b[TCPDataOffset] = (t.DataOffset / 4) << 4\n- binary.BigEndian.PutUint16(b[TCPChecksumOffset:], t.Checksum)\n- binary.BigEndian.PutUint16(b[TCPUrgentPtrOffset:], t.UrgentPointer)\n+ b.SetSourcePort(t.SrcPort)\n+ b.SetDestinationPort(t.DstPort)\n+ b.SetDataOffset(t.DataOffset)\n+ b.SetChecksum(t.Checksum)\n+ b.SetUrgentPointer(t.UrgentPointer)\n}\n// EncodePartial updates a subset of the fields of the TCP header. It is useful\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/udp.go",
"new_path": "pkg/tcpip/header/udp.go",
"diff": "@@ -101,7 +101,7 @@ func (b UDP) SetDestinationPort(port uint16) {\n// SetChecksum sets the \"checksum\" field of the UDP header.\nfunc (b UDP) SetChecksum(checksum uint16) {\n- binary.BigEndian.PutUint16(b[udpChecksum:], checksum)\n+ PutChecksum(b[udpChecksum:], checksum)\n}\n// SetLength sets the \"length\" field of the UDP header.\n@@ -125,10 +125,10 @@ func (b UDP) IsChecksumValid(src, dst tcpip.Address, payloadChecksum uint16) boo\n// Encode encodes all the fields of the UDP header.\nfunc (b UDP) Encode(u *UDPFields) {\n- binary.BigEndian.PutUint16(b[udpSrcPort:], u.SrcPort)\n- binary.BigEndian.PutUint16(b[udpDstPort:], u.DstPort)\n- binary.BigEndian.PutUint16(b[udpLength:], u.Length)\n- binary.BigEndian.PutUint16(b[udpChecksum:], u.Checksum)\n+ b.SetSourcePort(u.SrcPort)\n+ b.SetDestinationPort(u.DstPort)\n+ b.SetLength(u.Length)\n+ b.SetChecksum(u.Checksum)\n}\n// SetSourcePortWithChecksumUpdate implements ChecksummableTransport.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/testbench/layers.go",
"new_path": "test/packetimpact/testbench/layers.go",
"diff": "@@ -1089,7 +1089,7 @@ func (l *TCP) ToBytes() ([]byte, error) {\nh.SetWindowSize(32768)\n}\nif l.UrgentPointer != nil {\n- h.SetUrgentPoiner(*l.UrgentPointer)\n+ h.SetUrgentPointer(*l.UrgentPointer)\n}\ncopy(b[header.TCPMinimumSize:], l.Options)\nheader.AddTCPOptionPadding(b[header.TCPMinimumSize:], len(l.Options))\n"
}
] | Go | Apache License 2.0 | google/gvisor | Use available helpers to set fields
...and introduce a helper to set the Checksum.
PiperOrigin-RevId: 417437300 |
260,004 | 20.12.2021 17:02:59 | 28,800 | 7fe91395d24ca17e3c75de6e3b8d007cbeb781f9 | Check rules without holding locks
...so that IPTables is re-enterant.
This is to prepare for a later change where an IPTables target will
attempt to send a packet which will have to go through IPTables. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/iptables.go",
"new_path": "pkg/tcpip/stack/iptables.go",
"diff": "@@ -219,6 +219,11 @@ func EmptyNATTable() Table {\nfunc (it *IPTables) GetTable(id TableID, ipv6 bool) Table {\nit.mu.RLock()\ndefer it.mu.RUnlock()\n+ return it.getTableRLocked(id, ipv6)\n+}\n+\n+// +checklocksread:it.mu\n+func (it *IPTables) getTableRLocked(id TableID, ipv6 bool) Table {\nif ipv6 {\nreturn it.v6Tables[id]\n}\n@@ -260,6 +265,40 @@ const (\nchainReturn\n)\n+type checkTable struct {\n+ fn checkTableFn\n+ tableID TableID\n+ table Table\n+}\n+\n+// shouldSkipOrPopulateTables returns true iff IPTables should be skipped.\n+//\n+// If IPTables should not be skipped, tables will be updated with the\n+// specified table.\n+func (it *IPTables) shouldSkipOrPopulateTables(tables []checkTable, pkt *PacketBuffer) bool {\n+ switch pkt.NetworkProtocolNumber {\n+ case header.IPv4ProtocolNumber, header.IPv6ProtocolNumber:\n+ default:\n+ // IPTables only supports IPv4/IPv6.\n+ return true\n+ }\n+\n+ it.mu.RLock()\n+ defer it.mu.RUnlock()\n+\n+ if !it.modified {\n+ // Many users never configure iptables. Spare them the cost of rule\n+ // traversal if rules have never been set.\n+ return true\n+ }\n+\n+ for i := range tables {\n+ table := &tables[i]\n+ table.table = it.getTableRLocked(table.tableID, pkt.NetworkProtocolNumber == header.IPv6ProtocolNumber)\n+ }\n+ return false\n+}\n+\n// CheckPrerouting performs the prerouting hook on the packet.\n//\n// Returns true iff the packet may continue traversing the stack; the packet\n@@ -267,20 +306,25 @@ const (\n//\n// Precondition: The packet's network and transport header must be set.\nfunc (it *IPTables) CheckPrerouting(pkt *PacketBuffer, addressEP AddressableEndpoint, inNicName string) bool {\n- it.mu.RLock()\n- defer it.mu.RUnlock()\n+ tables := [...]checkTable{\n+ {\n+ fn: it.check,\n+ tableID: MangleID,\n+ },\n+ {\n+ fn: it.checkNAT,\n+ tableID: NATID,\n+ },\n+ }\n- if it.shouldSkipRLocked(pkt.NetworkProtocolNumber) {\n+ if it.shouldSkipOrPopulateTables(tables[:], pkt) {\nreturn true\n}\npkt.tuple = it.connections.getConnAndUpdate(pkt)\n- for _, check := range [...]checkTableFn{\n- it.checkMangleRLocked,\n- it.checkNATRLocked,\n- } {\n- if !check(Prerouting, pkt, nil /* route */, addressEP, inNicName, \"\" /* outNicName */) {\n+ for _, table := range tables {\n+ if !table.fn(table.table, Prerouting, pkt, nil /* route */, addressEP, inNicName, \"\" /* outNicName */) {\nreturn false\n}\n}\n@@ -295,18 +339,23 @@ func (it *IPTables) CheckPrerouting(pkt *PacketBuffer, addressEP AddressableEndp\n//\n// Precondition: The packet's network and transport header must be set.\nfunc (it *IPTables) CheckInput(pkt *PacketBuffer, inNicName string) bool {\n- it.mu.RLock()\n- defer it.mu.RUnlock()\n+ tables := [...]checkTable{\n+ {\n+ fn: it.checkNAT,\n+ tableID: NATID,\n+ },\n+ {\n+ fn: it.check,\n+ tableID: FilterID,\n+ },\n+ }\n- if it.shouldSkipRLocked(pkt.NetworkProtocolNumber) {\n+ if it.shouldSkipOrPopulateTables(tables[:], pkt) {\nreturn true\n}\n- for _, check := range [...]checkTableFn{\n- it.checkNATRLocked,\n- it.checkFilterRLocked,\n- } {\n- if !check(Input, pkt, nil /* route */, nil /* addressEP */, inNicName, \"\" /* outNicName */) {\n+ for _, table := range tables {\n+ if !table.fn(table.table, Input, pkt, nil /* route */, nil /* addressEP */, inNicName, \"\" /* outNicName */) {\nreturn false\n}\n}\n@@ -325,14 +374,24 @@ func (it *IPTables) CheckInput(pkt *PacketBuffer, inNicName string) bool {\n//\n// Precondition: The packet's network and transport header must be set.\nfunc (it *IPTables) CheckForward(pkt *PacketBuffer, inNicName, outNicName string) bool {\n- it.mu.RLock()\n- defer it.mu.RUnlock()\n+ tables := [...]checkTable{\n+ {\n+ fn: it.check,\n+ tableID: FilterID,\n+ },\n+ }\n- if it.shouldSkipRLocked(pkt.NetworkProtocolNumber) {\n+ if it.shouldSkipOrPopulateTables(tables[:], pkt) {\nreturn true\n}\n- return it.checkFilterRLocked(Forward, pkt, nil /* route */, nil /* addressEP */, inNicName, outNicName)\n+ for _, table := range tables {\n+ if !table.fn(table.table, Forward, pkt, nil /* route */, nil /* addressEP */, inNicName, outNicName) {\n+ return false\n+ }\n+ }\n+\n+ return true\n}\n// CheckOutput performs the output hook on the packet.\n@@ -342,21 +401,29 @@ func (it *IPTables) CheckForward(pkt *PacketBuffer, inNicName, outNicName string\n//\n// Precondition: The packet's network and transport header must be set.\nfunc (it *IPTables) CheckOutput(pkt *PacketBuffer, r *Route, outNicName string) bool {\n- it.mu.RLock()\n- defer it.mu.RUnlock()\n+ tables := [...]checkTable{\n+ {\n+ fn: it.check,\n+ tableID: MangleID,\n+ },\n+ {\n+ fn: it.checkNAT,\n+ tableID: NATID,\n+ },\n+ {\n+ fn: it.check,\n+ tableID: FilterID,\n+ },\n+ }\n- if it.shouldSkipRLocked(pkt.NetworkProtocolNumber) {\n+ if it.shouldSkipOrPopulateTables(tables[:], pkt) {\nreturn true\n}\npkt.tuple = it.connections.getConnAndUpdate(pkt)\n- for _, check := range [...]checkTableFn{\n- it.checkMangleRLocked,\n- it.checkNATRLocked,\n- it.checkFilterRLocked,\n- } {\n- if !check(Output, pkt, r, nil /* addressEP */, \"\" /* inNicName */, outNicName) {\n+ for _, table := range tables {\n+ if !table.fn(table.table, Output, pkt, r, nil /* addressEP */, \"\" /* inNicName */, outNicName) {\nreturn false\n}\n}\n@@ -371,18 +438,23 @@ func (it *IPTables) CheckOutput(pkt *PacketBuffer, r *Route, outNicName string)\n//\n// Precondition: The packet's network and transport header must be set.\nfunc (it *IPTables) CheckPostrouting(pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, outNicName string) bool {\n- it.mu.RLock()\n- defer it.mu.RUnlock()\n+ tables := [...]checkTable{\n+ {\n+ fn: it.check,\n+ tableID: MangleID,\n+ },\n+ {\n+ fn: it.checkNAT,\n+ tableID: NATID,\n+ },\n+ }\n- if it.shouldSkipRLocked(pkt.NetworkProtocolNumber) {\n+ if it.shouldSkipOrPopulateTables(tables[:], pkt) {\nreturn true\n}\n- for _, check := range [...]checkTableFn{\n- it.checkMangleRLocked,\n- it.checkNATRLocked,\n- } {\n- if !check(Postrouting, pkt, r, addressEP, \"\" /* inNicName */, outNicName) {\n+ for _, table := range tables {\n+ if !table.fn(table.table, Postrouting, pkt, r, addressEP, \"\" /* inNicName */, outNicName) {\nreturn false\n}\n}\n@@ -394,43 +466,18 @@ func (it *IPTables) CheckPostrouting(pkt *PacketBuffer, r *Route, addressEP Addr\nreturn true\n}\n-// +checklocksread:it.mu\n-func (it *IPTables) shouldSkipRLocked(netProto tcpip.NetworkProtocolNumber) bool {\n- switch netProto {\n- case header.IPv4ProtocolNumber, header.IPv6ProtocolNumber:\n- default:\n- // IPTables only supports IPv4/IPv6.\n- return true\n- }\n+type checkTableFn func(table Table, hook Hook, pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool\n- // Many users never configure iptables. Spare them the cost of rule\n- // traversal if rules have never been set.\n- return !it.modified\n-}\n-\n-type checkTableFn func(hook Hook, pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool\n-\n-// checkMangleRLocked runs the packet through the mangle table.\n-//\n-// See checkRLocked.\n-//\n-// +checklocksread:it.mu\n-func (it *IPTables) checkMangleRLocked(hook Hook, pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool {\n- return it.checkRLocked(MangleID, hook, pkt, r, addressEP, inNicName, outNicName)\n-}\n-\n-// checkNATRLocked runs the packet through the NAT table.\n-//\n-// See checkRLocked.\n+// checkNAT runs the packet through the NAT table.\n//\n-// +checklocksread:it.mu\n-func (it *IPTables) checkNATRLocked(hook Hook, pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool {\n+// See check.\n+func (it *IPTables) checkNAT(table Table, hook Hook, pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool {\nt := pkt.tuple\nif t != nil && t.conn.handlePacket(pkt, hook, r) {\nreturn true\n}\n- if !it.checkRLocked(NATID, hook, pkt, r, addressEP, inNicName, outNicName) {\n+ if !it.check(table, hook, pkt, r, addressEP, inNicName, outNicName) {\nreturn false\n}\n@@ -462,29 +509,12 @@ func (it *IPTables) checkNATRLocked(hook Hook, pkt *PacketBuffer, r *Route, addr\nreturn true\n}\n-// checkFilterRLocked runs the packet through the filter table.\n-//\n-// See checkRLocked.\n-//\n-// +checklocksread:it.mu\n-func (it *IPTables) checkFilterRLocked(hook Hook, pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool {\n- return it.checkRLocked(FilterID, hook, pkt, r, addressEP, inNicName, outNicName)\n-}\n-\n-// checkRLocked runs the packet through the rules in the specified table for the\n+// check runs the packet through the rules in the specified table for the\n// hook. It returns true if the packet should continue to traverse through the\n// network stack or tables, or false when it must be dropped.\n//\n// Precondition: The packet's network and transport header must be set.\n-//\n-// +checklocksread:it.mu\n-func (it *IPTables) checkRLocked(tableID TableID, hook Hook, pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool {\n- var table Table\n- if pkt.NetworkProtocolNumber == header.IPv6ProtocolNumber {\n- table = it.v6Tables[tableID]\n- } else {\n- table = it.v4Tables[tableID]\n- }\n+func (it *IPTables) check(table Table, hook Hook, pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool {\nruleIdx := table.BuiltinChains[hook]\nswitch verdict := it.checkChain(hook, pkt, table, ruleIdx, r, addressEP, inNicName, outNicName); verdict {\n// If the table returns Accept, move on to the next table.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/iptables_types.go",
"new_path": "pkg/tcpip/stack/iptables_types.go",
"diff": "@@ -90,8 +90,11 @@ type IPTables struct {\n// v4Tables and v6tables map tableIDs to tables. They hold builtin\n// tables only, not user tables.\n//\n+ // mu protects the array of tables, but not the tables themselves.\n// +checklocks:mu\nv4Tables [NumTables]Table\n+ //\n+ // mu protects the array of tables, but not the tables themselves.\n// +checklocks:mu\nv6Tables [NumTables]Table\n// modified is whether tables have been modified at least once. It is\n"
}
] | Go | Apache License 2.0 | google/gvisor | Check rules without holding locks
...so that IPTables is re-enterant.
This is to prepare for a later change where an IPTables target will
attempt to send a packet which will have to go through IPTables.
PiperOrigin-RevId: 417505430 |
259,853 | 21.12.2021 10:16:59 | 28,800 | b76119a1e786b51d012d358ba8711fc9d709e046 | pipe: a reader has to wait when all writers will be notified
Otherwise, we can have a race when a reader cloes a pipe before
a write detects this reader. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/pipe/node.go",
"new_path": "pkg/sentry/kernel/pipe/node.go",
"diff": "package pipe\nimport (\n+ \"sync/atomic\"\n+\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n@@ -73,8 +75,10 @@ func NewInodeOperations(ctx context.Context, perms fs.FilePermissions, p *Pipe)\nfunc (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) {\nswitch {\ncase flags.Read && !flags.Write: // O_RDONLY.\n+ tWriters := atomic.LoadInt32(&i.p.totalWriters)\nr := i.p.Open(ctx, d, flags)\n- for i.p.isNamed && !flags.NonBlocking && !i.p.HasWriters() {\n+ for i.p.isNamed && !flags.NonBlocking && !i.p.HasWriters() &&\n+ tWriters == atomic.LoadInt32(&i.p.totalWriters) {\nif !ctx.BlockOn((*waitWriters)(i.p), waiter.EventInternal) {\nr.DecRef(ctx)\nreturn nil, linuxerr.ErrInterrupted\n@@ -87,8 +91,10 @@ func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.Fi\nreturn r, nil\ncase flags.Write && !flags.Read: // O_WRONLY.\n+ tReaders := atomic.LoadInt32(&i.p.totalReaders)\nw := i.p.Open(ctx, d, flags)\n- for i.p.isNamed && !i.p.HasReaders() {\n+ for i.p.isNamed && !i.p.HasReaders() &&\n+ tReaders == atomic.LoadInt32(&i.p.totalReaders) {\n// On a nonblocking, write-only open, the open fails with ENXIO if the\n// read side isn't open yet.\nif flags.NonBlocking {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/pipe/pipe.go",
"new_path": "pkg/sentry/kernel/pipe/pipe.go",
"diff": "@@ -124,11 +124,21 @@ type Pipe struct {\n// Access atomically.\nreaders int32\n- // The number of active writes for this pipe.\n+ // The total number of readers for this pipe.\n+ //\n+ // Access atomically.\n+ totalReaders int32\n+\n+ // The number of active writers for this pipe.\n//\n// Access atomically.\nwriters int32\n+ // The total number of writers for this pipe.\n+ //\n+ // Access atomically.\n+ totalWriters int32\n+\n// mu protects all pipe internal state below.\nmu sync.Mutex `state:\"nosave\"`\n@@ -158,6 +168,13 @@ type Pipe struct {\n//\n// This is protected by mu.\nhadWriter bool\n+\n+ // waitingWriters is used to wait when writers are initialized after a\n+ // reader has opened the pipe.\n+ waitingWriters sync.WaitGroup `state:\"nosave\"`\n+ // waitingReaders is used to wait when readers are initialized after a\n+ // write has opened the pipe.\n+ waitingReaders sync.WaitGroup `state:\"nosave\"`\n}\n// NewPipe initializes and returns a pipe.\n@@ -373,6 +390,7 @@ func (p *Pipe) writeLocked(count int64, f func(safemem.BlockSeq) (uint64, error)\n// rOpen signals a new reader of the pipe.\nfunc (p *Pipe) rOpen() {\natomic.AddInt32(&p.readers, 1)\n+ atomic.AddInt32(&p.totalReaders, 1)\n// Notify for blocking openers.\np.queue.Notify(waiter.EventInternal)\n@@ -383,6 +401,7 @@ func (p *Pipe) wOpen() {\np.mu.Lock()\np.hadWriter = true\natomic.AddInt32(&p.writers, 1)\n+ atomic.AddInt32(&p.totalWriters, 1)\np.mu.Unlock()\n// Notify for blocking openers.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/pipe/vfs.go",
"new_path": "pkg/sentry/kernel/pipe/vfs.go",
"diff": "package pipe\nimport (\n+ \"sync/atomic\"\n+\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n@@ -55,11 +57,13 @@ func (vp *VFSPipe) ReaderWriterPair(ctx context.Context, mnt *vfs.Mount, vfsd *v\nif err != nil {\nreturn nil, nil, err\n}\n+ vp.pipe.rOpen()\nw, err := vp.newFD(mnt, vfsd, linux.O_WRONLY|statusFlags, locks)\nif err != nil {\nr.DecRef(ctx)\nreturn nil, nil, err\n}\n+ vp.pipe.wOpen()\nreturn r, w, nil\n}\n@@ -93,12 +97,17 @@ func (vp *VFSPipe) Open(ctx context.Context, mnt *vfs.Mount, vfsd *vfs.Dentry, s\n// FIFO for writing while there are no readers available.\" - fifo(7)\nswitch {\ncase readable && writable:\n+ vp.pipe.rOpen()\n+ vp.pipe.wOpen()\n// Pipes opened for read-write always succeed without blocking.\ncase readable:\n+ tWriters := atomic.LoadInt32(&vp.pipe.totalWriters)\n+ vp.pipe.rOpen()\n// If this pipe is being opened as blocking and there's no\n// writer, we have to wait for a writer to open the other end.\n- for vp.pipe.isNamed && statusFlags&linux.O_NONBLOCK == 0 && !vp.pipe.HasWriters() {\n+ for vp.pipe.isNamed && statusFlags&linux.O_NONBLOCK == 0 && !vp.pipe.HasWriters() &&\n+ tWriters == atomic.LoadInt32(&vp.pipe.totalWriters) {\nif !ctx.BlockOn((*waitWriters)(&vp.pipe), waiter.EventInternal) {\nfd.DecRef(ctx)\nreturn nil, linuxerr.EINTR\n@@ -106,7 +115,10 @@ func (vp *VFSPipe) Open(ctx context.Context, mnt *vfs.Mount, vfsd *vfs.Dentry, s\n}\ncase writable:\n- for vp.pipe.isNamed && !vp.pipe.HasReaders() {\n+ tReaders := atomic.LoadInt32(&vp.pipe.totalReaders)\n+ vp.pipe.wOpen()\n+ for vp.pipe.isNamed && !vp.pipe.HasReaders() &&\n+ tReaders == atomic.LoadInt32(&vp.pipe.totalReaders) {\n// Non-blocking, write-only opens fail with ENXIO when the read\n// side isn't open yet.\nif statusFlags&linux.O_NONBLOCK != 0 {\n@@ -140,18 +152,6 @@ func (vp *VFSPipe) newFD(mnt *vfs.Mount, vfsd *vfs.Dentry, statusFlags uint32, l\nreturn nil, err\n}\n- switch {\n- case fd.vfsfd.IsReadable() && fd.vfsfd.IsWritable():\n- vp.pipe.rOpen()\n- vp.pipe.wOpen()\n- case fd.vfsfd.IsReadable():\n- vp.pipe.rOpen()\n- case fd.vfsfd.IsWritable():\n- vp.pipe.wOpen()\n- default:\n- panic(\"invalid pipe flags: must be readable, writable, or both\")\n- }\n-\nreturn &fd.vfsfd, nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | pipe: a reader has to wait when all writers will be notified
Otherwise, we can have a race when a reader cloes a pipe before
a write detects this reader.
PiperOrigin-RevId: 417645683 |
260,004 | 21.12.2021 10:18:40 | 28,800 | e0e530c1cde723b63e4e7dcc957350e7213358bb | Drop icmpReason.isForwarding
To prepare for later changes where IPTables can send any ICMP error
from either the Input, Forward or Output hook. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/icmp.go",
"new_path": "pkg/tcpip/network/ipv4/icmp.go",
"diff": "@@ -219,7 +219,7 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer) {\nif optProblem.NeedICMP {\n_ = e.protocol.returnError(&icmpReasonParamProblem{\npointer: optProblem.Pointer,\n- }, pkt)\n+ }, pkt, true /* deliveredLocally */)\ne.stats.ip.MalformedPacketsReceived.Increment()\n}\nreturn\n@@ -387,9 +387,6 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer) {\n// icmpReason is a marker interface for IPv4 specific ICMP errors.\ntype icmpReason interface {\nisICMPReason()\n- // isForwarding indicates whether or not the error arose while attempting to\n- // forward a packet.\n- isForwarding() bool\n}\n// icmpReasonPortUnreachable is an error where the transport protocol has no\n@@ -397,18 +394,12 @@ type icmpReason interface {\ntype icmpReasonPortUnreachable struct{}\nfunc (*icmpReasonPortUnreachable) isICMPReason() {}\n-func (*icmpReasonPortUnreachable) isForwarding() bool {\n- return false\n-}\n// icmpReasonProtoUnreachable is an error where the transport protocol is\n// not supported.\ntype icmpReasonProtoUnreachable struct{}\nfunc (*icmpReasonProtoUnreachable) isICMPReason() {}\n-func (*icmpReasonProtoUnreachable) isForwarding() bool {\n- return false\n-}\n// icmpReasonTTLExceeded is an error where a packet's time to live exceeded in\n// transit to its final destination, as per RFC 792 page 6, Time Exceeded\n@@ -416,15 +407,6 @@ func (*icmpReasonProtoUnreachable) isForwarding() bool {\ntype icmpReasonTTLExceeded struct{}\nfunc (*icmpReasonTTLExceeded) isICMPReason() {}\n-func (*icmpReasonTTLExceeded) isForwarding() bool {\n- // If we hit a TTL Exceeded error, then we know we are operating as a router.\n- // As per RFC 792 page 6, Time Exceeded Message,\n- //\n- // If the gateway processing a datagram finds the time to live field\n- // is zero it must discard the datagram. The gateway may also notify\n- // the source host via the time exceeded message.\n- return true\n-}\n// icmpReasonReassemblyTimeout is an error where insufficient fragments are\n// received to complete reassembly of a packet within a configured time after\n@@ -432,38 +414,20 @@ func (*icmpReasonTTLExceeded) isForwarding() bool {\ntype icmpReasonReassemblyTimeout struct{}\nfunc (*icmpReasonReassemblyTimeout) isICMPReason() {}\n-func (*icmpReasonReassemblyTimeout) isForwarding() bool {\n- return false\n-}\n// icmpReasonParamProblem is an error to use to request a Parameter Problem\n// message to be sent.\ntype icmpReasonParamProblem struct {\npointer byte\n- forwarding bool\n}\nfunc (*icmpReasonParamProblem) isICMPReason() {}\n-func (r *icmpReasonParamProblem) isForwarding() bool {\n- return r.forwarding\n-}\n// icmpReasonNetworkUnreachable is an error in which the network specified in\n// the internet destination field of the datagram is unreachable.\ntype icmpReasonNetworkUnreachable struct{}\nfunc (*icmpReasonNetworkUnreachable) isICMPReason() {}\n-func (*icmpReasonNetworkUnreachable) isForwarding() bool {\n- // If we hit a Net Unreachable error, then we know we are operating as\n- // a router. As per RFC 792 page 5, Destination Unreachable Message,\n- //\n- // If, according to the information in the gateway's routing tables,\n- // the network specified in the internet destination field of a\n- // datagram is unreachable, e.g., the distance to the network is\n- // infinity, the gateway may send a destination unreachable message to\n- // the internet source host of the datagram.\n- return true\n-}\n// icmpReasonFragmentationNeeded is an error where a packet requires\n// fragmentation while also having the Don't Fragment flag set, as per RFC 792\n@@ -471,38 +435,19 @@ func (*icmpReasonNetworkUnreachable) isForwarding() bool {\ntype icmpReasonFragmentationNeeded struct{}\nfunc (*icmpReasonFragmentationNeeded) isICMPReason() {}\n-func (*icmpReasonFragmentationNeeded) isForwarding() bool {\n- // If we hit a Don't Fragment error, then we know we are operating as a router.\n- // As per RFC 792 page 4, Destination Unreachable Message,\n- //\n- // Another case is when a datagram must be fragmented to be forwarded by a\n- // gateway yet the Don't Fragment flag is on. In this case the gateway must\n- // discard the datagram and may return a destination unreachable message.\n- return true\n-}\n// icmpReasonHostUnreachable is an error in which the host specified in the\n// internet destination field of the datagram is unreachable.\ntype icmpReasonHostUnreachable struct{}\nfunc (*icmpReasonHostUnreachable) isICMPReason() {}\n-func (*icmpReasonHostUnreachable) isForwarding() bool {\n- // If we hit a Host Unreachable error, then we know we are operating as a\n- // router. As per RFC 792 page 5, Destination Unreachable Message,\n- //\n- // In addition, in some networks, the gateway may be able to determine\n- // if the internet destination host is unreachable. Gateways in these\n- // networks may send destination unreachable messages to the source host\n- // when the destination host is unreachable.\n- return true\n-}\n// returnError takes an error descriptor and generates the appropriate ICMP\n// error packet for IPv4 and sends it back to the remote device that sent\n// the problematic packet. It incorporates as much of that packet as\n// possible as well as any error metadata as is available. returnError\n// expects pkt to hold a valid IPv4 packet as per the wire format.\n-func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer) tcpip.Error {\n+func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer, deliveredLocally bool) tcpip.Error {\norigIPHdr := header.IPv4(pkt.NetworkHeader().View())\norigIPHdrSrc := origIPHdr.SourceAddress()\norigIPHdrDst := origIPHdr.DestinationAddress()\n@@ -534,11 +479,11 @@ func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer) tcpip\nreturn nil\n}\n- // If we are operating as a router/gateway, don't use the packet's destination\n+ // If the packet wasn't delivered locally, do not use the packet's destination\n// address as the response's source address as we should not not own the\n// destination address of a packet we are forwarding.\nlocalAddr := origIPHdrDst\n- if reason.isForwarding() {\n+ if !deliveredLocally {\nlocalAddr = \"\"\n}\n@@ -704,6 +649,6 @@ func (p *protocol) OnReassemblyTimeout(pkt *stack.PacketBuffer) {\n// If fragment zero is not available then no time exceeded need be sent at\n// all.\nif pkt != nil {\n- p.returnError(&icmpReasonReassemblyTimeout{}, pkt)\n+ p.returnError(&icmpReasonReassemblyTimeout{}, pkt, true /* deliveredLocally */)\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4.go",
"diff": "@@ -109,7 +109,7 @@ func (e *endpoint) HandleLinkResolutionFailure(pkt *stack.PacketBuffer) {\nif pkt.NetworkPacketInfo.IsForwardedPacket {\n// TODO(gvisor.dev/issue/6005): Propagate asynchronously generated ICMP\n// errors to local endpoints.\n- e.protocol.returnError(&icmpReasonHostUnreachable{}, pkt)\n+ e.protocol.returnError(&icmpReasonHostUnreachable{}, pkt, false /* deliveredLocally */)\ne.stats.ip.Forwarding.Errors.Increment()\ne.stats.ip.Forwarding.HostUnreachable.Increment()\nreturn\n@@ -597,7 +597,7 @@ func (e *endpoint) forwardPacket(pkt *stack.PacketBuffer) ip.ForwardingError {\n// We return the original error rather than the result of returning\n// the ICMP packet because the original error is more relevant to\n// the caller.\n- _ = e.protocol.returnError(&icmpReasonTTLExceeded{}, pkt)\n+ _ = e.protocol.returnError(&icmpReasonTTLExceeded{}, pkt, false /* deliveredLocally */)\nreturn &ip.ErrTTLExceeded{}\n}\n@@ -607,8 +607,7 @@ func (e *endpoint) forwardPacket(pkt *stack.PacketBuffer) ip.ForwardingError {\nif optProblem.NeedICMP {\n_ = e.protocol.returnError(&icmpReasonParamProblem{\npointer: optProblem.Pointer,\n- forwarding: true,\n- }, pkt)\n+ }, pkt, false /* deliveredLocally */)\n}\nreturn &ip.ErrParameterProblem{}\n}\n@@ -651,7 +650,7 @@ func (e *endpoint) forwardPacket(pkt *stack.PacketBuffer) ip.ForwardingError {\n// We return the original error rather than the result of returning\n// the ICMP packet because the original error is more relevant to\n// the caller.\n- _ = e.protocol.returnError(&icmpReasonNetworkUnreachable{}, pkt)\n+ _ = e.protocol.returnError(&icmpReasonNetworkUnreachable{}, pkt, false /* deliveredLocally */)\nreturn &ip.ErrNoRoute{}\ndefault:\nreturn &ip.ErrOther{Err: err}\n@@ -705,7 +704,7 @@ func (e *endpoint) forwardPacket(pkt *stack.PacketBuffer) ip.ForwardingError {\n// WriteHeaderIncludedPacket checks for the presence of the Don't Fragment bit\n// while sending the packet and returns this error iff fragmentation is\n// necessary and the bit is also set.\n- _ = e.protocol.returnError(&icmpReasonFragmentationNeeded{}, pkt)\n+ _ = e.protocol.returnError(&icmpReasonFragmentationNeeded{}, pkt, false /* deliveredLocally */)\nreturn &ip.ErrMessageTooLong{}\ndefault:\nreturn &ip.ErrOther{Err: err}\n@@ -880,7 +879,7 @@ func (e *endpoint) handleValidatedPacket(h header.IPv4, pkt *stack.PacketBuffer,\nif optProblem.NeedICMP {\n_ = e.protocol.returnError(&icmpReasonParamProblem{\npointer: optProblem.Pointer,\n- }, pkt)\n+ }, pkt, true /* deliveredLocally */)\ne.stats.ip.MalformedPacketsReceived.Increment()\n}\nreturn\n@@ -957,7 +956,7 @@ func (e *endpoint) handleValidatedPacket(h header.IPv4, pkt *stack.PacketBuffer,\nif optProblem.NeedICMP {\n_ = e.protocol.returnError(&icmpReasonParamProblem{\npointer: optProblem.Pointer,\n- }, pkt)\n+ }, pkt, true /* deliveredLocally */)\nstats.ip.MalformedPacketsReceived.Increment()\n}\nreturn\n@@ -987,13 +986,13 @@ func (e *endpoint) handleValidatedPacket(h header.IPv4, pkt *stack.PacketBuffer,\n// 3 (Port Unreachable), when the designated transport protocol\n// (e.g., UDP) is unable to demultiplex the datagram but has no\n// protocol mechanism to inform the sender.\n- _ = e.protocol.returnError(&icmpReasonPortUnreachable{}, pkt)\n+ _ = e.protocol.returnError(&icmpReasonPortUnreachable{}, pkt, true /* deliveredLocally */)\ncase stack.TransportPacketProtocolUnreachable:\n// As per RFC: 1122 Section 3.2.2.1\n// A host SHOULD generate Destination Unreachable messages with code:\n// 2 (Protocol Unreachable), when the designated transport protocol\n// is not supported\n- _ = e.protocol.returnError(&icmpReasonProtoUnreachable{}, pkt)\n+ _ = e.protocol.returnError(&icmpReasonProtoUnreachable{}, pkt, true /* deliveredLocally */)\ndefault:\npanic(fmt.Sprintf(\"unrecognized result from DeliverTransportPacket = %d\", res))\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/icmp.go",
"new_path": "pkg/tcpip/network/ipv6/icmp.go",
"diff": "@@ -928,9 +928,6 @@ func (*endpoint) ResolveStaticAddress(addr tcpip.Address) (tcpip.LinkAddress, bo\n// icmpReason is a marker interface for IPv6 specific ICMP errors.\ntype icmpReason interface {\nisICMPReason()\n- // isForwarding indicates whether or not the error arose while attempting to\n- // forward a packet.\n- isForwarding() bool\n// respondToMulticast indicates whether this error falls under the exception\n// outlined by RFC 4443 section 2.4 point e.3 exception 2:\n//\n@@ -958,15 +955,10 @@ type icmpReasonParameterProblem struct {\n// in the maximum size of an ICMPv6 error message.\npointer uint32\n- forwarding bool\n-\nrespondToMulticast bool\n}\nfunc (*icmpReasonParameterProblem) isICMPReason() {}\n-func (p *icmpReasonParameterProblem) isForwarding() bool {\n- return p.forwarding\n-}\nfunc (p *icmpReasonParameterProblem) respondsToMulticast() bool {\nreturn p.respondToMulticast\n@@ -978,10 +970,6 @@ type icmpReasonPortUnreachable struct{}\nfunc (*icmpReasonPortUnreachable) isICMPReason() {}\n-func (*icmpReasonPortUnreachable) isForwarding() bool {\n- return false\n-}\n-\nfunc (*icmpReasonPortUnreachable) respondsToMulticast() bool {\nreturn false\n}\n@@ -992,16 +980,6 @@ type icmpReasonNetUnreachable struct{}\nfunc (*icmpReasonNetUnreachable) isICMPReason() {}\n-func (*icmpReasonNetUnreachable) isForwarding() bool {\n- // If we hit a Network Unreachable error, then we also know we are\n- // operating as a router. As per RFC 4443 section 3.1:\n- //\n- // If the reason for the failure to deliver is lack of a matching\n- // entry in the forwarding node's routing table, the Code field is\n- // set to 0 (Network Unreachable).\n- return true\n-}\n-\nfunc (*icmpReasonNetUnreachable) respondsToMulticast() bool {\nreturn false\n}\n@@ -1011,16 +989,6 @@ func (*icmpReasonNetUnreachable) respondsToMulticast() bool {\ntype icmpReasonHostUnreachable struct{}\nfunc (*icmpReasonHostUnreachable) isICMPReason() {}\n-func (*icmpReasonHostUnreachable) isForwarding() bool {\n- // If we hit a Host Unreachable error, then we know we are operating as a\n- // router. As per RFC 4443 page 8, Destination Unreachable Message,\n- //\n- // If the reason for the failure to deliver cannot be mapped to any of\n- // other codes, the Code field is set to 3. Example of such cases are\n- // an inability to resolve the IPv6 destination address into a\n- // corresponding link address, or a link-specific problem of some sort.\n- return true\n-}\nfunc (*icmpReasonHostUnreachable) respondsToMulticast() bool {\nreturn false\n@@ -1032,16 +1000,6 @@ type icmpReasonPacketTooBig struct{}\nfunc (*icmpReasonPacketTooBig) isICMPReason() {}\n-func (*icmpReasonPacketTooBig) isForwarding() bool {\n- // If we hit a Packet Too Big error, then we know we are operating as a router.\n- // As per RFC 4443 section 3.2:\n- //\n- // A Packet Too Big MUST be sent by a router in response to a packet that it\n- // cannot forward because the packet is larger than the MTU of the outgoing\n- // link.\n- return true\n-}\n-\nfunc (*icmpReasonPacketTooBig) respondsToMulticast() bool {\nreturn true\n}\n@@ -1052,18 +1010,6 @@ type icmpReasonHopLimitExceeded struct{}\nfunc (*icmpReasonHopLimitExceeded) isICMPReason() {}\n-func (*icmpReasonHopLimitExceeded) isForwarding() bool {\n- // If we hit a Hop Limit Exceeded error, then we know we are operating\n- // as a router. As per RFC 4443 section 3.3:\n- //\n- // If a router receives a packet with a Hop Limit of zero, or if a\n- // router decrements a packet's Hop Limit to zero, it MUST discard\n- // the packet and originate an ICMPv6 Time Exceeded message with Code\n- // 0 to the source of the packet. This indicates either a routing\n- // loop or too small an initial Hop Limit value.\n- return true\n-}\n-\nfunc (*icmpReasonHopLimitExceeded) respondsToMulticast() bool {\nreturn false\n}\n@@ -1075,17 +1021,13 @@ type icmpReasonReassemblyTimeout struct{}\nfunc (*icmpReasonReassemblyTimeout) isICMPReason() {}\n-func (*icmpReasonReassemblyTimeout) isForwarding() bool {\n- return false\n-}\n-\nfunc (*icmpReasonReassemblyTimeout) respondsToMulticast() bool {\nreturn false\n}\n// returnError takes an error descriptor and generates the appropriate ICMP\n// error packet for IPv6 and sends it.\n-func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer) tcpip.Error {\n+func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer, deliveredLocally bool) tcpip.Error {\norigIPHdr := header.IPv6(pkt.NetworkHeader().View())\norigIPHdrSrc := origIPHdr.SourceAddress()\norigIPHdrDst := origIPHdr.DestinationAddress()\n@@ -1117,7 +1059,7 @@ func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer) tcpip\nreturn nil\n}\n- // If we are operating as a router, do not use the packet's destination\n+ // If the packet wasn't delivered locally, do not use the packet's destination\n// address as the response's source address as we should not own the\n// destination address of a packet we are forwarding.\n//\n@@ -1126,7 +1068,7 @@ func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer) tcpip\n// packet as \"multicast addresses must not be used as source addresses in IPv6\n// packets\", as per RFC 4291 section 2.7.\nlocalAddr := origIPHdrDst\n- if reason.isForwarding() || isOrigDstMulticast {\n+ if !deliveredLocally || isOrigDstMulticast {\nlocalAddr = \"\"\n}\n// Even if we were able to receive a packet from some remote, we may not have\n@@ -1255,6 +1197,6 @@ func (p *protocol) OnReassemblyTimeout(pkt *stack.PacketBuffer) {\n// been received, an ICMP Time Exceeded -- Fragment Reassembly Time Exceeded\n// message should be sent to the source of that fragment.\nif pkt != nil {\n- p.returnError(&icmpReasonReassemblyTimeout{}, pkt)\n+ p.returnError(&icmpReasonReassemblyTimeout{}, pkt, true /* deliveredLocally */)\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/ipv6.go",
"new_path": "pkg/tcpip/network/ipv6/ipv6.go",
"diff": "@@ -286,7 +286,7 @@ func (e *endpoint) HandleLinkResolutionFailure(pkt *stack.PacketBuffer) {\nif pkt.NetworkPacketInfo.IsForwardedPacket {\n// TODO(gvisor.dev/issue/6005): Propagate asynchronously generated ICMP\n// errors to local endpoints.\n- e.protocol.returnError(&icmpReasonHostUnreachable{}, pkt)\n+ e.protocol.returnError(&icmpReasonHostUnreachable{}, pkt, false /* deliveredLocally */)\ne.stats.ip.Forwarding.Errors.Increment()\ne.stats.ip.Forwarding.HostUnreachable.Increment()\nreturn\n@@ -891,7 +891,7 @@ func (e *endpoint) forwardPacket(pkt *stack.PacketBuffer) ip.ForwardingError {\n// We return the original error rather than the result of returning\n// the ICMP packet because the original error is more relevant to\n// the caller.\n- _ = e.protocol.returnError(&icmpReasonHopLimitExceeded{}, pkt)\n+ _ = e.protocol.returnError(&icmpReasonHopLimitExceeded{}, pkt, false /* deliveredLocally */)\nreturn &ip.ErrTTLExceeded{}\n}\n@@ -923,7 +923,7 @@ func (e *endpoint) forwardPacket(pkt *stack.PacketBuffer) ip.ForwardingError {\ncase *tcpip.ErrNoRoute, *tcpip.ErrNetworkUnreachable:\n// We return the original error rather than the result of returning the\n// ICMP packet because the original error is more relevant to the caller.\n- _ = e.protocol.returnError(&icmpReasonNetUnreachable{}, pkt)\n+ _ = e.protocol.returnError(&icmpReasonNetUnreachable{}, pkt, false /* deliveredLocally */)\nreturn &ip.ErrNoRoute{}\ndefault:\nreturn &ip.ErrOther{Err: err}\n@@ -965,7 +965,7 @@ func (e *endpoint) forwardPacket(pkt *stack.PacketBuffer) ip.ForwardingError {\n// A Packet Too Big MUST be sent by a router in response to a packet that\n// it cannot forward because the packet is larger than the MTU of the\n// outgoing link.\n- _ = e.protocol.returnError(&icmpReasonPacketTooBig{}, pkt)\n+ _ = e.protocol.returnError(&icmpReasonPacketTooBig{}, pkt, false /* deliveredLocally */)\nreturn &ip.ErrMessageTooLong{}\ndefault:\nreturn &ip.ErrOther{Err: err}\n@@ -1175,8 +1175,7 @@ func (e *endpoint) processExtensionHeaders(h header.IPv6, pkt *stack.PacketBuffe\n_ = e.protocol.returnError(&icmpReasonParameterProblem{\ncode: header.ICMPv6UnknownHeader,\npointer: previousHeaderStart,\n- forwarding: forwarding,\n- }, pkt)\n+ }, pkt, !forwarding /* deliveredLocally */)\nreturn fmt.Errorf(\"found Hop-by-Hop header = %#v with non-zero previous header offset = %d\", extHdr, previousHeaderStart)\n}\n@@ -1227,8 +1226,7 @@ func (e *endpoint) processExtensionHeaders(h header.IPv6, pkt *stack.PacketBuffe\ncode: header.ICMPv6UnknownOption,\npointer: it.ParseOffset() + optsIt.OptionOffset(),\nrespondToMulticast: true,\n- forwarding: forwarding,\n- }, pkt)\n+ }, pkt, !forwarding /* deliveredLocally */)\nreturn fmt.Errorf(\"found unknown hop-by-hop header option = %#v with discard action\", opt)\ndefault:\npanic(fmt.Sprintf(\"unrecognized action for an unrecognized Hop By Hop extension header option = %#v\", opt))\n@@ -1253,12 +1251,7 @@ func (e *endpoint) processExtensionHeaders(h header.IPv6, pkt *stack.PacketBuffe\n_ = e.protocol.returnError(&icmpReasonParameterProblem{\ncode: header.ICMPv6ErroneousHeader,\npointer: it.ParseOffset(),\n- // For the sake of consistency, we're using the value of `forwarding`\n- // here, even though it should always be false if we've reached this\n- // point. If `forwarding` is true here, we're executing undefined\n- // behavior no matter what.\n- forwarding: forwarding,\n- }, pkt)\n+ }, pkt, true /* deliveredLocally */)\nreturn fmt.Errorf(\"found unrecognized routing type with non-zero segments left in header = %#v\", extHdr)\n}\n@@ -1348,7 +1341,7 @@ func (e *endpoint) processExtensionHeaders(h header.IPv6, pkt *stack.PacketBuffe\n_ = e.protocol.returnError(&icmpReasonParameterProblem{\ncode: header.ICMPv6ErroneousHeader,\npointer: header.IPv6PayloadLenOffset,\n- }, pkt)\n+ }, pkt, true /* deliveredLocally */)\nreturn fmt.Errorf(\"found fragment length = %d that is not a multiple of 8 octets\", fragmentPayloadLen)\n}\n@@ -1370,7 +1363,7 @@ func (e *endpoint) processExtensionHeaders(h header.IPv6, pkt *stack.PacketBuffe\n_ = e.protocol.returnError(&icmpReasonParameterProblem{\ncode: header.ICMPv6ErroneousHeader,\npointer: fragmentFieldOffset,\n- }, pkt)\n+ }, pkt, true /* deliveredLocally */)\nreturn fmt.Errorf(\"determined that reassembled packet length = %d would exceed allowed length = %d\", lengthAfterReassembly, header.IPv6MaximumPayloadSize)\n}\n@@ -1445,7 +1438,7 @@ func (e *endpoint) processExtensionHeaders(h header.IPv6, pkt *stack.PacketBuffe\ncode: header.ICMPv6UnknownOption,\npointer: it.ParseOffset() + optsIt.OptionOffset(),\nrespondToMulticast: true,\n- }, pkt)\n+ }, pkt, true /* deliveredLocally */)\nreturn fmt.Errorf(\"found unknown destination header option %#v with discard action\", opt)\ndefault:\npanic(fmt.Sprintf(\"unrecognized action for an unrecognized Destination extension header option = %#v\", opt))\n@@ -1493,7 +1486,7 @@ func (e *endpoint) processExtensionHeaders(h header.IPv6, pkt *stack.PacketBuffe\n// message with Code 4 in response to a packet for which the\n// transport protocol (e.g., UDP) has no listener, if that transport\n// protocol has no alternative means to inform the sender.\n- _ = e.protocol.returnError(&icmpReasonPortUnreachable{}, pkt)\n+ _ = e.protocol.returnError(&icmpReasonPortUnreachable{}, pkt, true /* deliveredLocally */)\nreturn fmt.Errorf(\"destination port unreachable\")\ncase stack.TransportPacketProtocolUnreachable:\n// As per RFC 8200 section 4. (page 7):\n@@ -1525,7 +1518,7 @@ func (e *endpoint) processExtensionHeaders(h header.IPv6, pkt *stack.PacketBuffe\n_ = e.protocol.returnError(&icmpReasonParameterProblem{\ncode: header.ICMPv6UnknownHeader,\npointer: prevHdrIDOffset,\n- }, pkt)\n+ }, pkt, true /* deliveredLocally */)\nreturn fmt.Errorf(\"transport protocol unreachable\")\ndefault:\npanic(fmt.Sprintf(\"unrecognized result from DeliverTransportPacket = %d\", res))\n"
}
] | Go | Apache License 2.0 | google/gvisor | Drop icmpReason.isForwarding
To prepare for later changes where IPTables can send any ICMP error
from either the Input, Forward or Output hook.
PiperOrigin-RevId: 417646060 |
260,004 | 21.12.2021 13:55:29 | 28,800 | e57939e24aa6ca1bf25e32dd5f591503f5ba8c56 | Update googletest to v1.11.0 | [
{
"change_type": "MODIFY",
"old_path": "WORKSPACE",
"new_path": "WORKSPACE",
"diff": "@@ -636,11 +636,11 @@ grpc_extra_deps()\nhttp_archive(\nname = \"com_google_googletest\",\n- sha256 = \"0a10bea96d8670e5eef948d79d824162b1577bb7889539e49ec786bfc3e48912\",\n- strip_prefix = \"googletest-565f1b848215b77c3732bca345fe76a0431d8b34\",\n+ sha256 = \"b4870bf121ff7795ba20d20bcdd8627b8e088f2d1dab299a031c1034eddc93d5\",\n+ strip_prefix = \"googletest-release-1.11.0\",\nurls = [\n- \"https://mirror.bazel.build/github.com/google/googletest/archive/565f1b848215b77c3732bca345fe76a0431d8b34.tar.gz\",\n- \"https://github.com/google/googletest/archive/565f1b848215b77c3732bca345fe76a0431d8b34.tar.gz\",\n+ \"https://mirror.bazel.build/github.com/google/googletest/archive/release-1.11.0.tar.gz\",\n+ \"https://github.com/google/googletest/archive/release-1.11.0.tar.gz\",\n],\n)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update googletest to v1.11.0
https://github.com/google/googletest/releases/tag/release-1.11.0.
PiperOrigin-RevId: 417685256 |
260,004 | 21.12.2021 15:00:16 | 28,800 | beaecd1e3cb00e1766e498f613adaaabe1f5ac37 | Support SOL_ICMPV6 -> ICMPV6_FILTER | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/ip.go",
"new_path": "pkg/abi/linux/ip.go",
"diff": "@@ -159,3 +159,8 @@ const (\nIPV6_RECVFRAGSIZE = 77\nIPV6_FREEBIND = 78\n)\n+\n+// Socket options from uapi/linux/icmpv6.h\n+const (\n+ ICMPV6_FILTER = 1\n+)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/socket.go",
"new_path": "pkg/abi/linux/socket.go",
"diff": "@@ -586,3 +586,11 @@ const SCM_MAX_FD = 253\n// socket option for querying whether a socket is in a listening\n// state.\nconst SO_ACCEPTCON = 1 << 16\n+\n+// ICMP6Filter represents struct icmp6_filter from linux/icmpv6.h.\n+//\n+// +marshal\n+// +stateify savable\n+type ICMP6Filter struct {\n+ Filter [8]uint32\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "@@ -65,6 +65,8 @@ import (\n\"gvisor.dev/gvisor/pkg/waiter\"\n)\n+const bitsPerUint32 = 32\n+\nfunc mustCreateMetric(name, description string) *tcpip.StatCounter {\nvar cm tcpip.StatCounter\nmetric.MustRegisterCustomUint64Metric(name, true /* cumulative */, false /* sync */, description, cm.Value)\n@@ -858,8 +860,10 @@ func GetSockOpt(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, family in\ncase linux.SOL_IP:\nreturn getSockOptIP(t, s, ep, name, outPtr, outLen, family)\n+ case linux.SOL_ICMPV6:\n+ return getSockOptICMPv6(t, s, ep, name, outLen)\n+\ncase linux.SOL_UDP,\n- linux.SOL_ICMPV6,\nlinux.SOL_RAW,\nlinux.SOL_PACKET:\n@@ -1293,6 +1297,39 @@ func getSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name,\nreturn nil, syserr.ErrProtocolNotAvailable\n}\n+func getSockOptICMPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name int, outLen int) (marshal.Marshallable, *syserr.Error) {\n+ if _, ok := ep.(tcpip.Endpoint); !ok {\n+ log.Warningf(\"SOL_ICMPV6 options not supported on endpoints other than tcpip.Endpoint: option = %d\", name)\n+ return nil, syserr.ErrUnknownProtocolOption\n+ }\n+\n+ if family, _, _ := s.Type(); family != linux.AF_INET6 {\n+ return nil, syserr.ErrNotSupported\n+ }\n+\n+ switch name {\n+ case linux.ICMPV6_FILTER:\n+ var v tcpip.ICMPv6Filter\n+ if err := ep.GetSockOpt(&v); err != nil {\n+ return nil, syserr.TranslateNetstackError(err)\n+ }\n+\n+ filter := linux.ICMP6Filter{Filter: v.DenyType}\n+\n+ // Linux truncates the output to outLen.\n+ buf := t.CopyScratchBuffer(filter.SizeBytes())\n+ filter.MarshalUnsafe(buf)\n+ if len(buf) > outLen {\n+ buf = buf[:outLen]\n+ }\n+ bufP := primitive.ByteSlice(buf)\n+ return &bufP, nil\n+ default:\n+ t.Kernel().EmitUnimplementedEvent(t)\n+ }\n+ return nil, syserr.ErrProtocolNotAvailable\n+}\n+\n// getSockOptIPv6 implements GetSockOpt when level is SOL_IPV6.\nfunc getSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {\nif _, ok := ep.(tcpip.Endpoint); !ok {\n@@ -1686,6 +1723,9 @@ func SetSockOpt(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, level int\ncase linux.SOL_TCP:\nreturn setSockOptTCP(t, s, ep, name, optVal)\n+ case linux.SOL_ICMPV6:\n+ return setSockOptICMPv6(t, s, ep, name, optVal)\n+\ncase linux.SOL_IPV6:\nreturn setSockOptIPv6(t, s, ep, name, optVal)\n@@ -1700,7 +1740,6 @@ func SetSockOpt(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, level int\nreturn syserr.ErrProtocolNotAvailable\ncase linux.SOL_UDP,\n- linux.SOL_ICMPV6,\nlinux.SOL_RAW:\nt.Kernel().EmitUnimplementedEvent(t)\n@@ -2051,6 +2090,32 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i\nreturn nil\n}\n+func setSockOptICMPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name int, optVal []byte) *syserr.Error {\n+ if _, ok := ep.(tcpip.Endpoint); !ok {\n+ log.Warningf(\"SOL_ICMPV6 options not supported on endpoints other than tcpip.Endpoint: option = %d\", name)\n+ return syserr.ErrUnknownProtocolOption\n+ }\n+\n+ if family, _, _ := s.Type(); family != linux.AF_INET6 {\n+ return syserr.ErrUnknownProtocolOption\n+ }\n+\n+ switch name {\n+ case linux.ICMPV6_FILTER:\n+ var req linux.ICMP6Filter\n+ if len(optVal) < req.SizeBytes() {\n+ return syserr.ErrInvalidArgument\n+ }\n+\n+ req.UnmarshalUnsafe(optVal)\n+ return syserr.TranslateNetstackError(ep.SetSockOpt(&tcpip.ICMPv6Filter{DenyType: req.Filter}))\n+ default:\n+ t.Kernel().EmitUnimplementedEvent(t)\n+ }\n+\n+ return nil\n+}\n+\n// setSockOptIPv6 implements SetSockOpt when level is SOL_IPV6.\nfunc setSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name int, optVal []byte) *syserr.Error {\nif _, ok := ep.(tcpip.Endpoint); !ok {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tcpip.go",
"new_path": "pkg/tcpip/tcpip.go",
"diff": "@@ -897,6 +897,29 @@ type SettableSocketOption interface {\nisSettableSocketOption()\n}\n+// ICMPv6Filter specifes a filter for ICMPv6 types.\n+//\n+// +stateify savable\n+type ICMPv6Filter struct {\n+ // DenyType indicates if an ICMP type should be blocked.\n+ //\n+ // The ICMPv6 type field is 8 bits so there are up to 256 different ICMPv6\n+ // types.\n+ DenyType [8]uint32\n+}\n+\n+// ShouldDeny returns true iff the ICMPv6 Type should be denied.\n+func (f *ICMPv6Filter) ShouldDeny(icmpType uint8) bool {\n+ const bitsInUint32 = 32\n+ i := icmpType / bitsInUint32\n+ b := icmpType % bitsInUint32\n+ return f.DenyType[i]&(1<<b) != 0\n+}\n+\n+func (*ICMPv6Filter) isGettableSocketOption() {}\n+\n+func (*ICMPv6Filter) isSettableSocketOption() {}\n+\n// EndpointState represents the state of an endpoint.\ntype EndpointState uint8\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/raw/endpoint.go",
"new_path": "pkg/tcpip/transport/raw/endpoint.go",
"diff": "@@ -85,6 +85,10 @@ type endpoint struct {\nrcvDisabled bool\nmu sync.RWMutex `state:\"nosave\"`\n+ // icmp6Filter holds the filter for ICMPv6 packets.\n+ //\n+ // +checklocks:mu\n+ icmpv6Filter tcpip.ICMPv6Filter\n}\n// NewEndpoint returns a raw endpoint for the given protocols.\n@@ -388,10 +392,23 @@ func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask {\n// SetSockOpt implements tcpip.Endpoint.SetSockOpt.\nfunc (e *endpoint) SetSockOpt(opt tcpip.SettableSocketOption) tcpip.Error {\n- switch opt.(type) {\n+ switch opt := opt.(type) {\ncase *tcpip.SocketDetachFilterOption:\nreturn nil\n+ case *tcpip.ICMPv6Filter:\n+ if e.net.NetProto() != header.IPv6ProtocolNumber {\n+ return &tcpip.ErrUnknownProtocolOption{}\n+ }\n+\n+ if e.transProto != header.ICMPv6ProtocolNumber {\n+ return &tcpip.ErrInvalidOptionValue{}\n+ }\n+\n+ e.mu.Lock()\n+ defer e.mu.Unlock()\n+ e.icmpv6Filter = *opt\n+ return nil\ndefault:\nreturn e.net.SetSockOpt(opt)\n}\n@@ -403,8 +420,25 @@ func (e *endpoint) SetSockOptInt(opt tcpip.SockOptInt, v int) tcpip.Error {\n// GetSockOpt implements tcpip.Endpoint.GetSockOpt.\nfunc (e *endpoint) GetSockOpt(opt tcpip.GettableSocketOption) tcpip.Error {\n+ switch opt := opt.(type) {\n+ case *tcpip.ICMPv6Filter:\n+ if e.net.NetProto() != header.IPv6ProtocolNumber {\n+ return &tcpip.ErrUnknownProtocolOption{}\n+ }\n+\n+ if e.transProto != header.ICMPv6ProtocolNumber {\n+ return &tcpip.ErrInvalidOptionValue{}\n+ }\n+\n+ e.mu.RLock()\n+ defer e.mu.RUnlock()\n+ *opt = e.icmpv6Filter\n+ return nil\n+\n+ default:\nreturn e.net.GetSockOpt(opt)\n}\n+}\n// GetSockOptInt implements tcpip.Endpoint.GetSockOptInt.\nfunc (e *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, tcpip.Error) {\n@@ -509,15 +543,29 @@ func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {\n//\n// TODO(https://gvisor.dev/issue/6517): Avoid the copy once S/R supports\n// overlapping slices.\n+ transportHeader := pkt.TransportHeader().View()\nvar combinedVV buffer.VectorisedView\n- if info.NetProto == header.IPv4ProtocolNumber {\n- networkHeader, transportHeader := pkt.NetworkHeader().View(), pkt.TransportHeader().View()\n+ switch info.NetProto {\n+ case header.IPv4ProtocolNumber:\n+ networkHeader := pkt.NetworkHeader().View()\nheaders := make(buffer.View, 0, len(networkHeader)+len(transportHeader))\nheaders = append(headers, networkHeader...)\nheaders = append(headers, transportHeader...)\ncombinedVV = headers.ToVectorisedView()\n- } else {\n- combinedVV = append(buffer.View(nil), pkt.TransportHeader().View()...).ToVectorisedView()\n+ case header.IPv6ProtocolNumber:\n+ if e.transProto == header.ICMPv6ProtocolNumber {\n+ if len(transportHeader) < header.ICMPv6MinimumSize {\n+ return false\n+ }\n+\n+ if e.icmpv6Filter.ShouldDeny(uint8(header.ICMPv6(transportHeader).Type())) {\n+ return false\n+ }\n+ }\n+\n+ combinedVV = append(buffer.View(nil), transportHeader...).ToVectorisedView()\n+ default:\n+ panic(fmt.Sprintf(\"unrecognized protocol number = %d\", info.NetProto))\n}\ncombinedVV.Append(pkt.Data().ExtractVV())\npacket.data = combinedVV\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/raw_socket_icmp.cc",
"new_path": "test/syscalls/linux/raw_socket_icmp.cc",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n+#include <netinet/icmp6.h>\n#include <netinet/in.h>\n#include <netinet/ip.h>\n#include <netinet/ip_icmp.h>\n@@ -34,6 +35,15 @@ namespace testing {\nnamespace {\n+using ::testing::_;\n+using ::testing::ElementsAre;\n+using ::testing::ElementsAreArray;\n+using ::testing::FieldsAre;\n+using ::testing::Not;\n+using ::testing::Test;\n+using ::testing::Values;\n+using ::testing::WithParamInterface;\n+\n// The size of an empty ICMP packet and IP header together.\nconstexpr size_t kEmptyICMPSize = 28;\n@@ -41,7 +51,7 @@ constexpr size_t kEmptyICMPSize = 28;\n// responds to ICMP echo requests, and thus a single echo request sent via\n// loopback leads to 2 received ICMP packets.\n-class RawSocketICMPTest : public ::testing::Test {\n+class RawSocketICMPTest : public Test {\nprotected:\n// Creates a socket to be used in tests.\nvoid SetUp() override;\n@@ -109,6 +119,16 @@ TEST_F(RawSocketICMPTest, SockOptIPv6Checksum) {\nEXPECT_EQ(len, sizeof(v));\n}\n+TEST_F(RawSocketICMPTest, ICMPv6FilterNotSupported) {\n+ icmp6_filter v;\n+ EXPECT_THAT(setsockopt(s_, SOL_ICMPV6, ICMP6_FILTER, &v, sizeof(v)),\n+ SyscallFailsWithErrno(ENOPROTOOPT));\n+ socklen_t len = sizeof(v);\n+ EXPECT_THAT(getsockopt(s_, SOL_ICMPV6, ICMP6_FILTER, &v, &len),\n+ SyscallFailsWithErrno(EOPNOTSUPP));\n+ EXPECT_EQ(len, sizeof(v));\n+}\n+\n// We'll only read an echo in this case, as the kernel won't respond to the\n// malformed ICMP checksum.\nTEST_F(RawSocketICMPTest, SendAndReceiveBadChecksum) {\n@@ -552,6 +572,146 @@ void RawSocketICMPTest::ReceiveICMPFrom(char* recv_buf, size_t recv_buf_len,\nSyscallSucceedsWithValue(expected_size + sizeof(struct iphdr)));\n}\n+class RawSocketICMPv6Test : public Test {\n+ public:\n+ void SetUp() override {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveRawIPSocketCapability()));\n+\n+ fd_ = ASSERT_NO_ERRNO_AND_VALUE(\n+ Socket(AF_INET6, SOCK_RAW | SOCK_NONBLOCK, IPPROTO_ICMPV6));\n+ }\n+\n+ void TearDown() override {\n+ if (!ASSERT_NO_ERRNO_AND_VALUE(HaveRawIPSocketCapability())) {\n+ return;\n+ }\n+\n+ EXPECT_THAT(close(fd_.release()), SyscallSucceeds());\n+ }\n+\n+ protected:\n+ const FileDescriptor& fd() { return fd_; }\n+\n+ private:\n+ FileDescriptor fd_;\n+};\n+\n+TEST_F(RawSocketICMPv6Test, InitialFilterPassesAll) {\n+ icmp6_filter got_filter;\n+ socklen_t got_filter_len = sizeof(got_filter);\n+ ASSERT_THAT(getsockopt(fd().get(), SOL_ICMPV6, ICMP6_FILTER, &got_filter,\n+ &got_filter_len),\n+ SyscallSucceeds());\n+ ASSERT_EQ(got_filter_len, sizeof(got_filter));\n+ icmp6_filter expected_filter;\n+ ICMP6_FILTER_SETPASSALL(&expected_filter);\n+ EXPECT_THAT(got_filter,\n+ FieldsAre(ElementsAreArray(expected_filter.icmp6_filt)));\n+}\n+\n+TEST_F(RawSocketICMPv6Test, GetPartialFilterSucceeds) {\n+ icmp6_filter set_filter;\n+ ICMP6_FILTER_SETBLOCKALL(&set_filter);\n+ ASSERT_THAT(setsockopt(fd().get(), SOL_ICMPV6, ICMP6_FILTER, &set_filter,\n+ sizeof(set_filter)),\n+ SyscallSucceeds());\n+\n+ icmp6_filter got_filter = {};\n+ // We use a length smaller than a full filter length and expect that\n+ // only the bytes up to the provided length are modified. The last element\n+ // should be unmodified when getsockopt returns.\n+ constexpr socklen_t kShortFilterLen =\n+ sizeof(got_filter) - sizeof(got_filter.icmp6_filt[0]);\n+ socklen_t got_filter_len = kShortFilterLen;\n+ ASSERT_THAT(getsockopt(fd().get(), SOL_ICMPV6, ICMP6_FILTER, &got_filter,\n+ &got_filter_len),\n+ SyscallSucceeds());\n+ ASSERT_EQ(got_filter_len, kShortFilterLen);\n+ icmp6_filter expected_filter = set_filter;\n+ expected_filter.icmp6_filt[std::size(expected_filter.icmp6_filt) - 1] = 0;\n+ EXPECT_THAT(got_filter,\n+ FieldsAre(ElementsAreArray(expected_filter.icmp6_filt)));\n+}\n+\n+class RawSocketICMPv6TypeTest : public RawSocketICMPv6Test,\n+ public WithParamInterface<uint8_t> {};\n+\n+TEST_P(RawSocketICMPv6TypeTest, FilterDeliveredPackets) {\n+ const sockaddr_in6 addr = {\n+ .sin6_family = AF_INET6,\n+ .sin6_addr = IN6ADDR_LOOPBACK_INIT,\n+ };\n+\n+ const uint8_t allowed_type = GetParam();\n+\n+ // Pass only the allowed type.\n+ {\n+ icmp6_filter set_filter;\n+ ICMP6_FILTER_SETBLOCKALL(&set_filter);\n+ ICMP6_FILTER_SETPASS(allowed_type, &set_filter);\n+ ASSERT_THAT(setsockopt(fd().get(), SOL_ICMPV6, ICMP6_FILTER, &set_filter,\n+ sizeof(set_filter)),\n+ SyscallSucceeds());\n+\n+ icmp6_filter got_filter;\n+ socklen_t got_filter_len = sizeof(got_filter);\n+ ASSERT_THAT(getsockopt(fd().get(), SOL_ICMPV6, ICMP6_FILTER, &got_filter,\n+ &got_filter_len),\n+ SyscallSucceeds());\n+ ASSERT_EQ(got_filter_len, sizeof(got_filter));\n+ EXPECT_THAT(got_filter, FieldsAre(ElementsAreArray(set_filter.icmp6_filt)));\n+ }\n+\n+ // Send an ICMP packet for each type.\n+ uint8_t icmp_type = 0;\n+ constexpr uint8_t kUnusedICMPCode = 0;\n+ do {\n+ const icmp6_hdr packet = {\n+ .icmp6_type = icmp_type,\n+ .icmp6_code = kUnusedICMPCode,\n+ // The stack will calculate the checksum.\n+ .icmp6_cksum = 0,\n+ };\n+\n+ ASSERT_THAT(RetryEINTR(sendto)(fd().get(), &packet, sizeof(packet), 0,\n+ reinterpret_cast<const sockaddr*>(&addr),\n+ sizeof(addr)),\n+ SyscallSucceedsWithValue(sizeof(packet)));\n+ } while (icmp_type++ != std::numeric_limits<uint8_t>::max());\n+\n+ // Make sure only the allowed type was received.\n+ {\n+ icmp6_hdr got_packet;\n+ sockaddr_in6 sender;\n+ socklen_t sender_len = sizeof(sender);\n+ ASSERT_THAT(RetryEINTR(recvfrom)(\n+ fd().get(), &got_packet, sizeof(got_packet), 0 /* flags */,\n+ reinterpret_cast<sockaddr*>(&sender), &sender_len),\n+ SyscallSucceedsWithValue(sizeof(got_packet)));\n+ ASSERT_EQ(sender_len, sizeof(sender));\n+ EXPECT_EQ(memcmp(&sender, &addr, sizeof(addr)), 0);\n+ // The stack should have populated the checksum.\n+ if (IsRunningOnGvisor() && !IsRunningWithHostinet()) {\n+ // TODO(https://github.com/google/gvisor/pull/6957): Use same check as\n+ // Linux.\n+ EXPECT_THAT(got_packet,\n+ FieldsAre(allowed_type, kUnusedICMPCode, 0 /* icmp6_cksum */,\n+ _ /* icmp6_dataun */\n+ ));\n+ } else {\n+ EXPECT_THAT(got_packet,\n+ FieldsAre(allowed_type, kUnusedICMPCode,\n+ Not(0) /* icmp6_cksum */, _ /* icmp6_dataun */\n+ ));\n+ }\n+ EXPECT_THAT(got_packet.icmp6_data32, ElementsAre(0));\n+ }\n+}\n+\n+INSTANTIATE_TEST_SUITE_P(AllRawSocketTests, RawSocketICMPv6TypeTest,\n+ Values(uint8_t{0},\n+ std::numeric_limits<uint8_t>::max()));\n+\n} // namespace\n} // namespace testing\n"
}
] | Go | Apache License 2.0 | google/gvisor | Support SOL_ICMPV6 -> ICMPV6_FILTER
PiperOrigin-RevId: 417696519 |
259,909 | 21.12.2021 16:07:13 | 28,800 | d54045f34aa51661116a62604be12dc77ce1d627 | Implement the pivot_root syscall. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -1844,3 +1844,30 @@ func (k *Kernel) ReleaseCgroupHierarchy(hid uint32) {\n})\nk.tasks.mu.RUnlock()\n}\n+\n+func (k *Kernel) ReplaceFSContextRoots(ctx context.Context, oldRoot vfs.VirtualDentry, newRoot vfs.VirtualDentry) {\n+ k.tasks.mu.RLock()\n+ oldRootDecRefs := 0\n+ k.tasks.forEachTaskLocked(func(t *Task) {\n+ t.mu.Lock()\n+ defer t.mu.Unlock()\n+ if fsc := t.fsContext; fsc != nil {\n+ fsc.mu.Lock()\n+ defer fsc.mu.Unlock()\n+ if fsc.rootVFS2 == oldRoot {\n+ newRoot.IncRef()\n+ oldRootDecRefs++\n+ fsc.rootVFS2 = newRoot\n+ }\n+ if fsc.cwdVFS2 == oldRoot {\n+ newRoot.IncRef()\n+ oldRootDecRefs++\n+ fsc.cwdVFS2 = newRoot\n+ }\n+ }\n+ })\n+ k.tasks.mu.RUnlock()\n+ for i := 0; i < oldRootDecRefs; i++ {\n+ oldRoot.DecRef(ctx)\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/vfs2/fscontext.go",
"new_path": "pkg/sentry/syscalls/linux/vfs2/fscontext.go",
"diff": "@@ -129,3 +129,48 @@ func Chroot(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\nvd.DecRef(t)\nreturn 0, nil, nil\n}\n+\n+// PivotRoot implements Linux syscall pivot_root(2).\n+func PivotRoot(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n+ addr1 := args[0].Pointer()\n+ addr2 := args[1].Pointer()\n+\n+ if !t.HasCapability(linux.CAP_SYS_ADMIN) {\n+ return 0, nil, linuxerr.EPERM\n+ }\n+\n+ newRootPath, err := copyInPath(t, addr1)\n+ if err != nil {\n+ return 0, nil, err\n+ }\n+ newRootTpop, err := getTaskPathOperation(t, linux.AT_FDCWD, newRootPath, disallowEmptyPath, followFinalSymlink)\n+ if err != nil {\n+ return 0, nil, err\n+ }\n+ defer newRootTpop.Release(t)\n+ putOldPath, err := copyInPath(t, addr2)\n+ if err != nil {\n+ return 0, nil, err\n+ }\n+ putOldTpop, err := getTaskPathOperation(t, linux.AT_FDCWD, putOldPath, disallowEmptyPath, followFinalSymlink)\n+ if err != nil {\n+ return 0, nil, err\n+ }\n+ defer putOldTpop.Release(t)\n+\n+ oldRootVd := t.FSContext().RootDirectoryVFS2()\n+ defer oldRootVd.DecRef(t)\n+ newRootVd, err := t.Kernel().VFS().GetDentryAt(t, t.Credentials(), &newRootTpop.pop, &vfs.GetDentryOptions{\n+ CheckSearchable: true,\n+ })\n+ if err != nil {\n+ return 0, nil, err\n+ }\n+ defer newRootVd.DecRef(t)\n+\n+ if err := t.Kernel().VFS().PivotRoot(t, t.Credentials(), &newRootTpop.pop, &putOldTpop.pop); err != nil {\n+ return 0, nil, err\n+ }\n+ t.Kernel().ReplaceFSContextRoots(t, oldRootVd, newRootVd)\n+ return 0, nil, nil\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/vfs2/vfs2.go",
"new_path": "pkg/sentry/syscalls/linux/vfs2/vfs2.go",
"diff": "@@ -88,6 +88,7 @@ func Override() {\ns.Table[133] = syscalls.Supported(\"mknod\", Mknod)\ns.Table[137] = syscalls.Supported(\"statfs\", Statfs)\ns.Table[138] = syscalls.Supported(\"fstatfs\", Fstatfs)\n+ s.Table[155] = syscalls.Supported(\"pivot_root\", PivotRoot)\ns.Table[161] = syscalls.Supported(\"chroot\", Chroot)\ns.Table[162] = syscalls.Supported(\"sync\", Sync)\ns.Table[165] = syscalls.Supported(\"mount\", Mount)\n@@ -200,6 +201,7 @@ func Override() {\ns.Table[38] = syscalls.Supported(\"renameat\", Renameat)\ns.Table[39] = syscalls.Supported(\"umount2\", Umount2)\ns.Table[40] = syscalls.Supported(\"mount\", Mount)\n+ s.Table[41] = syscalls.Supported(\"pivot_root\", PivotRoot)\ns.Table[43] = syscalls.Supported(\"statfs\", Statfs)\ns.Table[44] = syscalls.Supported(\"fstatfs\", Fstatfs)\ns.Table[45] = syscalls.Supported(\"truncate\", Truncate)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/mount.go",
"new_path": "pkg/sentry/vfs/mount.go",
"diff": "@@ -134,7 +134,7 @@ type MountNamespace struct {\n// Owner is the usernamespace that owns this mount namespace.\nOwner *auth.UserNamespace\n- // root is the MountNamespace's root mount. root is immutable.\n+ // root is the MountNamespace's root mount.\nroot *Mount\n// mountpoints maps all Dentries which are mount points in this namespace\n@@ -263,7 +263,8 @@ func (vfs *VirtualFilesystem) ConnectMountAt(ctx context.Context, creds *auth.Cr\n}\n// MountAt creates and mounts a Filesystem configured by the given arguments.\n-// The VirtualFilesystem will hold a reference to the Mount until it is unmounted.\n+// The VirtualFilesystem will hold a reference to the Mount until it is\n+// unmounted.\n//\n// This method returns the mounted Mount without a reference, for convenience\n// during VFS setup when there is no chance of racing with unmount.\n@@ -305,9 +306,10 @@ func (vfs *VirtualFilesystem) UmountAt(ctx context.Context, creds *auth.Credenti\ndefer func() {\nvd.DecRef(ctx)\n}()\n- // Linux passes the LOOKUP_MOUNPOINT flag to user_path_at in ksys_umount to resolve to the\n- // toppmost mount in the stack located at the specified path. vfs.GetMountAt() imitiates this\n- // behavior. See fs/namei.c:user_path_at(...) and fs/namespace.c:ksys_umount(...).\n+ // Linux passes the LOOKUP_MOUNPOINT flag to user_path_at in ksys_umount to\n+ // resolve to the toppmost mount in the stack located at the specified path.\n+ // vfs.GetMountAt() imitiates this behavior. See fs/namei.c:user_path_at(...)\n+ // and fs/namespace.c:ksys_umount(...).\nif vd.dentry.isMounted() {\nif realmnt := vfs.getMountAt(ctx, vd.mount, vd.dentry); realmnt != nil {\nvd.mount.DecRef(ctx)\n@@ -708,6 +710,79 @@ retryFirst:\nreturn VirtualDentry{mnt, d}\n}\n+// PivotRoot makes location pointed to by newRootPop the root of the current\n+// namespace, and moves the current root to the location pointed to by\n+// putOldPop.\n+func (vfs *VirtualFilesystem) PivotRoot(ctx context.Context, creds *auth.Credentials, newRootPop *PathOperation, putOldPop *PathOperation) error {\n+ newRootVd, err := vfs.GetDentryAt(ctx, creds, newRootPop, &GetDentryOptions{CheckSearchable: true})\n+ if err != nil {\n+ return err\n+ }\n+ defer newRootVd.DecRef(ctx)\n+ putOldVd, err := vfs.GetDentryAt(ctx, creds, putOldPop, &GetDentryOptions{CheckSearchable: true})\n+ if err != nil {\n+ return err\n+ }\n+ defer putOldVd.DecRef(ctx)\n+ rootVd := RootFromContext(ctx)\n+ defer rootVd.DecRef(ctx)\n+\n+ vfs.mountMu.Lock()\n+ defer vfs.mountMu.Unlock()\n+\n+ // Neither new_root nor put_old can be on the same mount as the current\n+ //root mount.\n+ if newRootVd.mount == rootVd.mount || putOldVd.mount == rootVd.mount {\n+ return linuxerr.EBUSY\n+ }\n+ // new_root must be a mountpoint.\n+ if newRootVd.mount.root != newRootVd.dentry {\n+ return linuxerr.EINVAL\n+ }\n+ // put_old must be at or underneath new_root.\n+ path, err := vfs.PathnameReachable(ctx, newRootVd, putOldVd)\n+ if err != nil || len(path) == 0 {\n+ return linuxerr.EINVAL\n+ }\n+ // The current root directory must be a mountpoint\n+ // (in the case it has been chrooted).\n+ if rootVd.mount.root != rootVd.dentry {\n+ return linuxerr.EINVAL\n+ }\n+ // The current root and the new root cannot be on the rootfs mount.\n+ if rootVd.mount.parent() == nil || newRootVd.mount.parent() == nil {\n+ return linuxerr.EINVAL\n+ }\n+ // The current root and the new root must be in the context's mount namespace.\n+ ns := MountNamespaceFromContext(ctx)\n+ defer ns.DecRef(ctx)\n+ if rootVd.mount.ns != ns || newRootVd.mount.ns != ns {\n+ return linuxerr.EINVAL\n+ }\n+ // TODO(gvisor.dev/issues/221): Update this function to disallow\n+ // pivot_root-ing new_root/put_old mounts with MS_SHARED propagation once it\n+ // is implemented in gVisor.\n+\n+ vfs.mounts.seq.BeginWrite()\n+ mp := vfs.disconnectLocked(newRootVd.mount)\n+ mp.DecRef(ctx)\n+ rootMp := vfs.disconnectLocked(rootVd.mount)\n+\n+ putOldVd.IncRef()\n+ putOldVd.dentry.mu.Lock()\n+ vfs.connectLocked(rootVd.mount, putOldVd, ns)\n+ putOldVd.dentry.mu.Unlock()\n+\n+ rootMp.dentry.mu.Lock()\n+ vfs.connectLocked(newRootVd.mount, rootMp, ns)\n+ rootMp.dentry.mu.Unlock()\n+ vfs.mounts.seq.EndWrite()\n+\n+ newRootVd.mount.DecRef(ctx)\n+ rootVd.mount.DecRef(ctx)\n+ return nil\n+}\n+\n// SetMountReadOnly sets the mount as ReadOnly.\nfunc (vfs *VirtualFilesystem) SetMountReadOnly(mnt *Mount, ro bool) error {\nvfs.mountMu.Lock()\n@@ -781,7 +856,8 @@ func (mnt *Mount) Root() *Dentry {\nreturn mnt.root\n}\n-// Root returns mntns' root. It does not take a reference on the returned Dentry.\n+// Root returns mntns' root. It does not take a reference on the returned\n+// Dentry.\nfunc (mntns *MountNamespace) Root() VirtualDentry {\nvd := VirtualDentry{\nmount: mntns.root,\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -83,6 +83,12 @@ syscall_test(\ntest = \"//test/syscalls/linux:chroot_test\",\n)\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:pivot_root_test\",\n+ use_tmpfs = True,\n+)\n+\nsyscall_test(\ntest = \"//test/syscalls/linux:clock_getres_test\",\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -474,6 +474,28 @@ cc_binary(\n],\n)\n+cc_binary(\n+ name = \"pivot_root_test\",\n+ testonly = 1,\n+ srcs = [\"pivot_root.cc\"],\n+ linkstatic = 1,\n+ deps = [\n+ \"//test/util:capability_util\",\n+ \"//test/util:cleanup\",\n+ \"//test/util:file_descriptor\",\n+ \"//test/util:fs_util\",\n+ \"@com_google_absl//absl/cleanup\",\n+ \"@com_google_absl//absl/strings\",\n+ gtest,\n+ \"//test/util:logging\",\n+ \"//test/util:mount_util\",\n+ \"//test/util:multiprocess_util\",\n+ \"//test/util:temp_path\",\n+ \"//test/util:test_main\",\n+ \"//test/util:test_util\",\n+ ],\n+)\n+\ncc_binary(\nname = \"clock_getres_test\",\ntestonly = 1,\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/syscalls/linux/pivot_root.cc",
"diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#include <errno.h>\n+#include <fcntl.h>\n+#include <linux/capability.h>\n+#include <stddef.h>\n+#include <sys/mman.h>\n+#include <sys/mount.h>\n+#include <sys/stat.h>\n+#include <syscall.h>\n+#include <unistd.h>\n+\n+#include <algorithm>\n+#include <string>\n+#include <vector>\n+\n+#include \"gmock/gmock.h\"\n+#include \"gtest/gtest.h\"\n+#include \"absl/cleanup/cleanup.h\"\n+#include \"absl/strings/str_cat.h\"\n+#include \"absl/strings/str_split.h\"\n+#include \"absl/strings/string_view.h\"\n+#include \"test/util/capability_util.h\"\n+#include \"test/util/file_descriptor.h\"\n+#include \"test/util/fs_util.h\"\n+#include \"test/util/logging.h\"\n+#include \"test/util/mount_util.h\"\n+#include \"test/util/multiprocess_util.h\"\n+#include \"test/util/temp_path.h\"\n+#include \"test/util/test_util.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+\n+namespace {\n+\n+TEST(PivotRootTest, Success) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ auto root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ EXPECT_THAT(mount(\"\", root.path().c_str(), \"tmpfs\", 0, \"mode=0700\"),\n+ SyscallSucceeds());\n+ auto new_root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root.path()));\n+ EXPECT_THAT(mount(\"\", new_root.path().c_str(), \"tmpfs\", 0, \"mode=0700\"),\n+ SyscallSucceeds());\n+ const std::string new_root_path =\n+ absl::StrCat(\"/\", Basename(new_root.path()));\n+ auto put_old =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(new_root.path()));\n+ const std::string put_old_path =\n+ absl::StrCat(new_root_path, \"/\", Basename(put_old.path()));\n+\n+ const auto rest = [&] {\n+ TEST_CHECK_SUCCESS(chroot(root.path().c_str()));\n+ TEST_CHECK_SUCCESS(\n+ syscall(__NR_pivot_root, new_root_path.c_str(), put_old_path.c_str()));\n+ };\n+ EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));\n+}\n+\n+TEST(PivotRootTest, CreatesNewRoot) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ auto root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ EXPECT_THAT(mount(\"\", root.path().c_str(), \"tmpfs\", 0, \"mode=0700\"),\n+ SyscallSucceeds());\n+ auto new_root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root.path()));\n+ EXPECT_THAT(mount(\"\", new_root.path().c_str(), \"tmpfs\", 0, \"mode=0700\"),\n+ SyscallSucceeds());\n+ const std::string new_root_path =\n+ absl::StrCat(\"/\", Basename(new_root.path()));\n+ auto put_old =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(new_root.path()));\n+ const std::string put_old_path =\n+ absl::StrCat(new_root_path, \"/\", Basename(put_old.path()));\n+ auto file_in_new_root =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(new_root.path()));\n+ const std::string file_in_new_root_path = file_in_new_root.path();\n+ const std::string file_in_new_root_new_path =\n+ absl::StrCat(\"/\", Basename(file_in_new_root_path));\n+\n+ const auto rest = [&] {\n+ TEST_CHECK_SUCCESS(chroot(root.path().c_str()));\n+ // pivot_root and switch into new_root.\n+ TEST_CHECK_SUCCESS(\n+ syscall(__NR_pivot_root, new_root_path.c_str(), put_old_path.c_str()));\n+ TEST_CHECK_SUCCESS(chdir(\"/\"));\n+ // Should not be able to stat file by its full path.\n+ char buf[1024];\n+ struct stat statbuf;\n+ TEST_CHECK_ERRNO(stat(file_in_new_root_path.c_str(), &statbuf), ENOENT);\n+ // Should be able to stat file at new rooted path.\n+ TEST_CHECK_SUCCESS(stat(file_in_new_root_new_path.c_str(), &statbuf));\n+ // getcwd should return \"/\".\n+ TEST_CHECK_SUCCESS(syscall(__NR_getcwd, buf, sizeof(buf)));\n+ TEST_CHECK_SUCCESS(strcmp(buf, \"/\") == 0);\n+ // Statting '.', '..', '/', and '/..' all return the same dev and inode.\n+ struct stat statbuf_dot;\n+ TEST_CHECK_SUCCESS(stat(\".\", &statbuf_dot));\n+ struct stat statbuf_dotdot;\n+ TEST_CHECK_SUCCESS(stat(\"..\", &statbuf_dotdot));\n+ TEST_CHECK(statbuf_dot.st_dev == statbuf_dotdot.st_dev);\n+ TEST_CHECK(statbuf_dot.st_ino == statbuf_dotdot.st_ino);\n+ struct stat statbuf_slash;\n+ TEST_CHECK_SUCCESS(stat(\"/\", &statbuf_slash));\n+ TEST_CHECK(statbuf_dot.st_dev == statbuf_slash.st_dev);\n+ TEST_CHECK(statbuf_dot.st_ino == statbuf_slash.st_ino);\n+ struct stat statbuf_slashdotdot;\n+ TEST_CHECK_SUCCESS(stat(\"/..\", &statbuf_slashdotdot));\n+ TEST_CHECK(statbuf_dot.st_dev == statbuf_slashdotdot.st_dev);\n+ TEST_CHECK(statbuf_dot.st_ino == statbuf_slashdotdot.st_ino);\n+ };\n+ EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));\n+}\n+\n+TEST(PivotRootTest, MovesOldRoot) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ auto root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ EXPECT_THAT(mount(\"\", root.path().c_str(), \"tmpfs\", 0, \"mode=0700\"),\n+ SyscallSucceeds());\n+ auto new_root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root.path()));\n+ EXPECT_THAT(mount(\"\", new_root.path().c_str(), \"tmpfs\", 0, \"mode=0700\"),\n+ SyscallSucceeds());\n+ const std::string new_root_path =\n+ absl::StrCat(\"/\", Basename(new_root.path()));\n+ auto put_old =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(new_root.path()));\n+ const std::string put_old_path =\n+ absl::StrCat(new_root_path, \"/\", Basename(put_old.path()));\n+\n+ const std::string old_root_new_path =\n+ absl::StrCat(\"/\", Basename(put_old_path));\n+\n+ const auto rest = [&] {\n+ TEST_CHECK_SUCCESS(chroot(root.path().c_str()));\n+ struct stat statbuf_oldroot;\n+ TEST_CHECK_SUCCESS(stat(\"/\", &statbuf_oldroot));\n+ // pivot_root and switch into new_root.\n+ TEST_CHECK_SUCCESS(\n+ syscall(__NR_pivot_root, new_root_path.c_str(), put_old_path.c_str()));\n+ TEST_CHECK_SUCCESS(chdir(\"/\"));\n+ // Should not be able to stat file by its full path.\n+ struct stat statbuf;\n+ TEST_CHECK_ERRNO(stat(put_old_path.c_str(), &statbuf), ENOENT);\n+ // Should be able to chdir to old root.\n+ TEST_CHECK_SUCCESS(chdir(old_root_new_path.c_str()));\n+ // Statting the root dir from before pivot_root and the put_old location\n+ // should return the same inode and device.\n+ struct stat statbuf_dot;\n+ TEST_CHECK_SUCCESS(stat(\".\", &statbuf_dot));\n+ TEST_CHECK(statbuf_dot.st_ino == statbuf_oldroot.st_ino);\n+ TEST_CHECK(statbuf_dot.st_dev == statbuf_oldroot.st_dev);\n+ };\n+ EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));\n+}\n+\n+TEST(PivotRootTest, ChangesCwdForAllProcesses) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ auto root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ EXPECT_THAT(mount(\"\", root.path().c_str(), \"tmpfs\", 0, \"mode=0700\"),\n+ SyscallSucceeds());\n+ auto new_root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root.path()));\n+ EXPECT_THAT(mount(\"\", new_root.path().c_str(), \"tmpfs\", 0, \"mode=0700\"),\n+ SyscallSucceeds());\n+ const std::string new_root_path =\n+ absl::StrCat(\"/\", Basename(new_root.path()));\n+ auto put_old =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(new_root.path()));\n+ const std::string put_old_path =\n+ absl::StrCat(new_root_path, \"/\", Basename(put_old.path()));\n+ const std::string old_root_new_path =\n+ absl::StrCat(\"/\", Basename(put_old_path));\n+\n+ struct stat statbuf_newroot;\n+ TEST_CHECK_SUCCESS(stat(new_root.path().c_str(), &statbuf_newroot));\n+ // Change cwd to the root path.\n+ chdir(root.path().c_str());\n+ const auto rest = [&] {\n+ TEST_CHECK_SUCCESS(chroot(root.path().c_str()));\n+ TEST_CHECK_SUCCESS(\n+ syscall(__NR_pivot_root, new_root_path.c_str(), put_old_path.c_str()));\n+ };\n+ EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));\n+ // pivot_root should change the cwd/root directory all threads and processes\n+ // in the current mount namespace if they pointed the old root.\n+ struct stat statbuf_cwd_after_syscall;\n+ EXPECT_THAT(stat(\".\", &statbuf_cwd_after_syscall), SyscallSucceeds());\n+ EXPECT_EQ(statbuf_cwd_after_syscall.st_ino, statbuf_newroot.st_ino);\n+ EXPECT_EQ(statbuf_cwd_after_syscall.st_dev, statbuf_newroot.st_dev);\n+}\n+\n+TEST(PivotRootTest, DotDot) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ auto root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ EXPECT_THAT(mount(\"\", root.path().c_str(), \"tmpfs\", 0, \"mode=0700\"),\n+ SyscallSucceeds());\n+ auto new_root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root.path()));\n+ EXPECT_THAT(mount(\"\", new_root.path().c_str(), \"tmpfs\", 0, \"mode=0700\"),\n+ SyscallSucceeds());\n+ const std::string new_root_path =\n+ absl::StrCat(\"/\", Basename(new_root.path()));\n+\n+ auto file_in_new_root =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(new_root.path()));\n+ const std::string file_in_new_root_path =\n+ std::string(Basename(file_in_new_root.path()));\n+\n+ const auto rest = [&] {\n+ TEST_CHECK_SUCCESS(chroot(root.path().c_str()));\n+ TEST_CHECK_SUCCESS(chdir(new_root_path.c_str()));\n+ // pivot_root should be able to stack put_old ontop of new_root. This allows\n+ // users to pivot_root without creating a temp directory.\n+ TEST_CHECK_SUCCESS(syscall(__NR_pivot_root, \".\", \".\"));\n+ TEST_CHECK_SUCCESS(umount2(\".\", MNT_DETACH));\n+ struct stat statbuf;\n+ TEST_CHECK_SUCCESS(stat(file_in_new_root_path.c_str(), &statbuf));\n+ };\n+ EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));\n+}\n+\n+TEST(PivotRootTest, NotDir) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ auto file1 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n+ auto file2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n+ auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ EXPECT_THAT(\n+ syscall(__NR_pivot_root, file1.path().c_str(), file2.path().c_str()),\n+ SyscallFailsWithErrno(ENOTDIR));\n+ EXPECT_THAT(\n+ syscall(__NR_pivot_root, file1.path().c_str(), dir.path().c_str()),\n+ SyscallFailsWithErrno(ENOTDIR));\n+ EXPECT_THAT(\n+ syscall(__NR_pivot_root, dir.path().c_str(), file2.path().c_str()),\n+ SyscallFailsWithErrno(ENOTDIR));\n+}\n+\n+TEST(PivotRootTest, NotExist) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ EXPECT_THAT(syscall(__NR_pivot_root, \"/foo/bar\", \"/bar/baz\"),\n+ SyscallFailsWithErrno(ENOENT));\n+ EXPECT_THAT(syscall(__NR_pivot_root, dir.path().c_str(), \"/bar/baz\"),\n+ SyscallFailsWithErrno(ENOENT));\n+ EXPECT_THAT(syscall(__NR_pivot_root, \"/foo/bar\", dir.path().c_str()),\n+ SyscallFailsWithErrno(ENOENT));\n+}\n+\n+TEST(PivotRootTest, WithoutCapability) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SETPCAP)));\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ auto new_root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ const std::string new_root_path = new_root.path();\n+ EXPECT_THAT(mount(\"\", new_root_path.c_str(), \"tmpfs\", 0, \"mode=0700\"),\n+ SyscallSucceeds());\n+ auto put_old =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(new_root_path));\n+ const std::string put_old_path = put_old.path();\n+\n+ AutoCapability cap(CAP_SYS_ADMIN, false);\n+ EXPECT_THAT(\n+ syscall(__NR_pivot_root, new_root_path.c_str(), put_old_path.c_str()),\n+ SyscallFailsWithErrno(EPERM));\n+}\n+\n+TEST(PivotRootTest, NewRootOnRootMount) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ auto root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ EXPECT_THAT(mount(\"\", root.path().c_str(), \"tmpfs\", 0, \"mode=0700\"),\n+ SyscallSucceeds());\n+ auto new_root =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root.path().c_str()));\n+ const std::string new_root_path =\n+ absl::StrCat(\"/\", Basename(new_root.path()));\n+\n+ auto put_old =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(new_root.path()));\n+ const std::string put_old_path =\n+ absl::StrCat(new_root_path, \"/\", Basename(put_old.path()));\n+\n+ const auto rest = [&] {\n+ TEST_CHECK_SUCCESS(chroot(root.path().c_str()));\n+ TEST_CHECK_ERRNO(\n+ syscall(__NR_pivot_root, new_root_path.c_str(), put_old_path.c_str()),\n+ EBUSY);\n+ };\n+ EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));\n+}\n+\n+TEST(PivotRootTest, NewRootNotAMountpoint) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ auto root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ EXPECT_THAT(mount(\"\", root.path().c_str(), \"tmpfs\", 0, \"mode=0700\"),\n+ SyscallSucceeds());\n+ // Make sure new_root is on a separate mount, otherwise this is the same\n+ // as the NewRootOnRootMount test.\n+ auto mountpoint =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root.path().c_str()));\n+ EXPECT_THAT(mount(\"\", mountpoint.path().c_str(), \"tmpfs\", 0, \"mode=0700\"),\n+ SyscallSucceeds());\n+ const std::string mountpoint_path =\n+ absl::StrCat(\"/\", Basename(mountpoint.path()));\n+ auto new_root = ASSERT_NO_ERRNO_AND_VALUE(\n+ TempPath::CreateDirIn(mountpoint.path()));\n+ const std::string new_root_path =\n+ absl::StrCat(mountpoint_path, \"/\", Basename(new_root.path()));\n+ auto put_old =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(new_root.path()));\n+ const std::string put_old_path =\n+ absl::StrCat(new_root_path, \"/\", Basename(put_old.path()));\n+\n+ const auto rest = [&] {\n+ TEST_CHECK_SUCCESS(chroot(root.path().c_str()));\n+ TEST_CHECK_ERRNO(syscall(\n+ __NR_pivot_root, new_root_path.c_str(), put_old_path.c_str()), EINVAL);\n+ };\n+ EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));\n+}\n+\n+TEST(PivotRootTest, PutOldNotUnderNewRoot) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ auto root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ EXPECT_THAT(mount(\"\", root.path().c_str(), \"tmpfs\", 0, \"mode=0700\"),\n+ SyscallSucceeds());\n+ auto new_root =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root.path()));\n+ const std::string new_root_path =\n+ absl::StrCat(\"/\", Basename(new_root.path()));\n+ EXPECT_THAT(mount(\"\", new_root.path().c_str(), \"tmpfs\", 0, \"mode=0700\"),\n+ SyscallSucceeds());\n+ auto put_old =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root.path()));\n+ const std::string put_old_path =\n+ absl::StrCat(\"/\", Basename(put_old.path()));\n+ EXPECT_THAT(mount(\"\", put_old.path().c_str(), \"tmpfs\", 0, \"mode=0700\"),\n+ SyscallSucceeds());\n+\n+ const auto rest = [&] {\n+ TEST_CHECK_SUCCESS(chroot(root.path().c_str()));\n+ TEST_CHECK_ERRNO(\n+ syscall(__NR_pivot_root, new_root_path.c_str(), put_old_path.c_str()),\n+ EINVAL);\n+ };\n+ EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));\n+}\n+\n+TEST(PivotRootTest, CurrentRootNotAMountPoint) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ auto root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ auto new_root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(root.path()));\n+ EXPECT_THAT(mount(\"\", new_root.path().c_str(), \"tmpfs\", 0, \"mode=0700\"),\n+ SyscallSucceeds());\n+ const std::string new_root_path =\n+ absl::StrCat(\"/\", Basename(new_root.path()));\n+ auto put_old =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(new_root.path()));\n+ const std::string put_old_path =\n+ absl::StrCat(new_root_path, \"/\", Basename(put_old.path()));\n+\n+ const auto rest = [&] {\n+ TEST_CHECK_SUCCESS(chroot(root.path().c_str()));\n+ TEST_CHECK_ERRNO(syscall(\n+ __NR_pivot_root, new_root_path.c_str(), put_old_path.c_str()), EINVAL);\n+ };\n+ EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));\n+}\n+\n+TEST(PivotRootTest, OnRootFS) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_CHROOT)));\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ auto new_root = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ const std::string new_root_path = new_root.path();\n+ EXPECT_THAT(mount(\"\", new_root_path.c_str(), \"tmpfs\", 0, \"mode=0700\"),\n+ SyscallSucceeds());\n+ auto put_old =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(new_root_path));\n+ const std::string put_old_path = put_old.path();\n+\n+ const auto rest = [&] {\n+ TEST_CHECK_ERRNO(\n+ syscall(__NR_pivot_root, new_root_path.c_str(), put_old_path.c_str()),\n+ EINVAL);\n+ };\n+ EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));\n+}\n+\n+} // namespace\n+\n+} // namespace testing\n+} // namespace gvisor\n"
}
] | Go | Apache License 2.0 | google/gvisor | Implement the pivot_root syscall.
PiperOrigin-RevId: 417707787 |
259,853 | 21.12.2021 17:10:51 | 28,800 | 271e4f4ae6db20240ad5f2cb195e681a5ed5ca9f | kernel/pipe: clean up unused fields from the Pipe structure
They have been added by mistake. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/pipe/pipe.go",
"new_path": "pkg/sentry/kernel/pipe/pipe.go",
"diff": "@@ -168,13 +168,6 @@ type Pipe struct {\n//\n// This is protected by mu.\nhadWriter bool\n-\n- // waitingWriters is used to wait when writers are initialized after a\n- // reader has opened the pipe.\n- waitingWriters sync.WaitGroup `state:\"nosave\"`\n- // waitingReaders is used to wait when readers are initialized after a\n- // write has opened the pipe.\n- waitingReaders sync.WaitGroup `state:\"nosave\"`\n}\n// NewPipe initializes and returns a pipe.\n"
}
] | Go | Apache License 2.0 | google/gvisor | kernel/pipe: clean up unused fields from the Pipe structure
They have been added by mistake.
PiperOrigin-RevId: 417716586 |
260,004 | 21.12.2021 20:20:41 | 28,800 | c299f605d3704422d50b1f159ae6492ab36cad3e | Remove unused WritePackets helpers
dropped the WritePackets family of functions for layers above the
LinkEndpoint. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/iptables.go",
"new_path": "pkg/tcpip/stack/iptables.go",
"diff": "@@ -572,56 +572,6 @@ func (it *IPTables) startReaper(interval time.Duration) {\n}()\n}\n-// CheckOutputPackets performs the output hook on the packets.\n-//\n-// Returns a map of packets that must be dropped.\n-//\n-// Precondition: The packets' network and transport header must be set.\n-func (it *IPTables) CheckOutputPackets(pkts PacketBufferList, r *Route, outNicName string) (drop map[*PacketBuffer]struct{}, natPkts map[*PacketBuffer]struct{}) {\n- return checkPackets(pkts, func(pkt *PacketBuffer) bool {\n- return it.CheckOutput(pkt, r, outNicName)\n- }, true /* dnat */)\n-}\n-\n-// CheckPostroutingPackets performs the postrouting hook on the packets.\n-//\n-// Returns a map of packets that must be dropped.\n-//\n-// Precondition: The packets' network and transport header must be set.\n-func (it *IPTables) CheckPostroutingPackets(pkts PacketBufferList, r *Route, addressEP AddressableEndpoint, outNicName string) (drop map[*PacketBuffer]struct{}, natPkts map[*PacketBuffer]struct{}) {\n- return checkPackets(pkts, func(pkt *PacketBuffer) bool {\n- return it.CheckPostrouting(pkt, r, addressEP, outNicName)\n- }, false /* dnat */)\n-}\n-\n-func getAddr(pkt *PacketBuffer, dnat bool) tcpip.Address {\n- net := pkt.Network()\n- if dnat {\n- return net.DestinationAddress()\n- }\n- return net.SourceAddress()\n-}\n-\n-func checkPackets(pkts PacketBufferList, f func(*PacketBuffer) bool, dnat bool) (drop map[*PacketBuffer]struct{}, natPkts map[*PacketBuffer]struct{}) {\n- for pkt := pkts.Front(); pkt != nil; pkt = pkt.Next() {\n- origAddr := getAddr(pkt, dnat)\n-\n- if ok := f(pkt); !ok {\n- if drop == nil {\n- drop = make(map[*PacketBuffer]struct{})\n- }\n- drop[pkt] = struct{}{}\n- }\n- if newAddr := getAddr(pkt, dnat); newAddr != origAddr {\n- if natPkts == nil {\n- natPkts = make(map[*PacketBuffer]struct{})\n- }\n- natPkts[pkt] = struct{}{}\n- }\n- }\n- return drop, natPkts\n-}\n-\n// Preconditions:\n// * pkt is a IPv4 packet of at least length header.IPv4MinimumSize.\n// * pkt.NetworkHeader is not nil.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove unused WritePackets helpers
https://github.com/google/gvisor/commit/fd89c0892ba7ff2c1e1d0bc91e181488f2f4ca8f
dropped the WritePackets family of functions for layers above the
LinkEndpoint.
PiperOrigin-RevId: 417737737 |
260,004 | 27.12.2021 11:17:22 | 28,800 | 76776aad8b3c3de915e9307a8c436af0cdbcfa21 | Perform Output hook for ICMPv4 Reply
Before this change, locally generated ICMPv4 replies would not
perform the Output hook so NAT will not be performed for locally
generated ICMPv4 replies. This change fixes that bug. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/icmp.go",
"new_path": "pkg/tcpip/network/ipv4/icmp.go",
"diff": "@@ -20,6 +20,7 @@ import (\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/header/parse\"\n\"gvisor.dev/gvisor/pkg/tcpip/stack\"\n)\n@@ -255,6 +256,12 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer) {\ne.dispatcher.DeliverTransportPacket(header.ICMPv4ProtocolNumber, pkt)\npkt = nil\n+ sent := e.stats.icmp.packetsSent\n+ if !e.protocol.allowICMPReply(header.ICMPv4EchoReply, header.ICMPv4UnusedCode) {\n+ sent.rateLimited.Increment()\n+ return\n+ }\n+\n// Take the base of the incoming request IP header but replace the options.\nreplyHeaderLength := uint8(header.IPv4MinimumSize + len(newOptions))\nreplyIPHdr := header.IPv4(append(iph[:header.IPv4MinimumSize:header.IPv4MinimumSize], newOptions...))\n@@ -275,9 +282,10 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer) {\n}\ndefer r.Release()\n- sent := e.stats.icmp.packetsSent\n- if !e.protocol.allowICMPReply(header.ICMPv4EchoReply, header.ICMPv4UnusedCode) {\n- sent.rateLimited.Increment()\n+ outgoingEP, ok := e.protocol.getEndpointForNIC(r.NICID())\n+ if !ok {\n+ // The outgoing NIC went away.\n+ sent.dropped.Increment()\nreturn\n}\n@@ -308,6 +316,9 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer) {\nreplyIPHdr.SetSourceAddress(r.LocalAddress())\nreplyIPHdr.SetDestinationAddress(r.RemoteAddress())\nreplyIPHdr.SetTTL(r.DefaultTTL())\n+ replyIPHdr.SetTotalLength(uint16(len(replyIPHdr) + len(replyData)))\n+ replyIPHdr.SetChecksum(0)\n+ replyIPHdr.SetChecksum(^replyIPHdr.CalculateChecksum())\nreplyICMPHdr := header.ICMPv4(replyData)\nreplyICMPHdr.SetType(header.ICMPv4EchoReply)\n@@ -321,9 +332,16 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer) {\nData: replyVV,\n})\ndefer replyPkt.DecRef()\n- replyPkt.TransportProtocolNumber = header.ICMPv4ProtocolNumber\n+ // Populate the network/transport headers in the packet buffer so the\n+ // ICMP packet goes through IPTables.\n+ if ok := parse.IPv4(replyPkt); !ok {\n+ panic(\"expected to parse IPv4 header we just created\")\n+ }\n+ if ok := parse.ICMPv4(replyPkt); !ok {\n+ panic(\"expected to parse ICMPv4 header we just created\")\n+ }\n- if err := r.WriteHeaderIncludedPacket(replyPkt); err != nil {\n+ if err := outgoingEP.writePacket(r, replyPkt); err != nil {\nsent.dropped.Increment()\nreturn\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4.go",
"diff": "@@ -420,11 +420,17 @@ func (e *endpoint) handleFragments(_ *stack.Route, networkMTU uint32, pkt *stack\n// WritePacket writes a packet to the given destination address and protocol.\nfunc (e *endpoint) WritePacket(r *stack.Route, params stack.NetworkHeaderParams, pkt *stack.PacketBuffer) tcpip.Error {\n- dstAddr := r.RemoteAddress()\n- if err := e.addIPHeader(r.LocalAddress(), dstAddr, pkt, params, nil /* options */); err != nil {\n+ if err := e.addIPHeader(r.LocalAddress(), r.RemoteAddress(), pkt, params, nil /* options */); err != nil {\nreturn err\n}\n+ return e.writePacket(r, pkt)\n+}\n+\n+func (e *endpoint) writePacket(r *stack.Route, pkt *stack.PacketBuffer) tcpip.Error {\n+ netHeader := header.IPv4(pkt.NetworkHeader().View())\n+ dstAddr := netHeader.DestinationAddress()\n+\n// iptables filtering. All packets that reach here are locally\n// generated.\noutNicName := e.protocol.stack.FindNICNameFromID(e.nic.ID())\n@@ -441,8 +447,8 @@ func (e *endpoint) WritePacket(r *stack.Route, params stack.NetworkHeaderParams,\n// We should do this for every packet, rather than only DNATted packets, but\n// removing this check short circuits broadcasts before they are sent out to\n// other hosts.\n- if netHeader := header.IPv4(pkt.NetworkHeader().View()); dstAddr != netHeader.DestinationAddress() {\n- if ep := e.protocol.findEndpointWithAddress(netHeader.DestinationAddress()); ep != nil {\n+ if newDstAddr := netHeader.DestinationAddress(); dstAddr != newDstAddr {\n+ if ep := e.protocol.findEndpointWithAddress(newDstAddr); ep != nil {\n// Since we rewrote the packet but it is being routed back to us, we\n// can safely assume the checksum is valid.\nep.handleLocalPacket(pkt, true /* canSkipRXChecksum */)\n@@ -450,10 +456,10 @@ func (e *endpoint) WritePacket(r *stack.Route, params stack.NetworkHeaderParams,\n}\n}\n- return e.writePacket(r, pkt, false /* headerIncluded */)\n+ return e.writePacketPostRouting(r, pkt, false /* headerIncluded */)\n}\n-func (e *endpoint) writePacket(r *stack.Route, pkt *stack.PacketBuffer, headerIncluded bool) tcpip.Error {\n+func (e *endpoint) writePacketPostRouting(r *stack.Route, pkt *stack.PacketBuffer, headerIncluded bool) tcpip.Error {\nif r.Loop()&stack.PacketLoop != 0 {\n// If the packet was generated by the stack (not a raw/packet endpoint\n// where a packet may be written with the header included), then we can\n@@ -561,7 +567,7 @@ func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt *stack.PacketBu\nreturn &tcpip.ErrMalformedHeader{}\n}\n- return e.writePacket(r, pkt, true /* headerIncluded */)\n+ return e.writePacketPostRouting(r, pkt, true /* headerIncluded */)\n}\n// forwardPacket attempts to forward a packet to its final destination.\n@@ -691,7 +697,7 @@ func (e *endpoint) forwardPacket(pkt *stack.PacketBuffer) ip.ForwardingError {\nreturn &ip.ErrOther{Err: &tcpip.ErrUnknownDevice{}}\n}\n- switch err := forwardToEp.writePacket(r, newPkt, true /* headerIncluded */); err.(type) {\n+ switch err := forwardToEp.writePacketPostRouting(r, newPkt, true /* headerIncluded */); err.(type) {\ncase nil:\nreturn nil\ncase *tcpip.ErrMessageTooLong:\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tests/integration/iptables_test.go",
"new_path": "pkg/tcpip/tests/integration/iptables_test.go",
"diff": "@@ -971,7 +971,7 @@ func TestForwardingHook(t *testing.T) {\n}\n}\n-func TestInputHookWithLocalForwarding(t *testing.T) {\n+func TestFilteringEchoPacketsWithLocalForwarding(t *testing.T) {\nconst (\nnicID1 = 1\nnicID2 = 2\n@@ -1018,37 +1018,58 @@ func TestInputHookWithLocalForwarding(t *testing.T) {\n},\n}\n+ type droppedEcho int\n+ const (\n+ _ droppedEcho = iota\n+ noneDropped\n+ echoRequestDroppedAtInput\n+ echoRequestDroppedAtForward\n+ echoReplyDropped\n+ )\n+\nsubTests := []struct {\nname string\nsetupFilter func(*testing.T, *stack.Stack, tcpip.NetworkProtocolNumber)\n- expectDrop bool\n+ expectResult droppedEcho\n}{\n{\nname: \"Accept\",\nsetupFilter: func(*testing.T, *stack.Stack, tcpip.NetworkProtocolNumber) { /* no filter */ },\n- expectDrop: false,\n+ expectResult: noneDropped,\n},\n{\n- name: \"Drop\",\n+ name: \"Input Drop\",\nsetupFilter: setupDropFilter(stack.Input, stack.IPHeaderFilter{}),\n- expectDrop: true,\n+ expectResult: echoRequestDroppedAtInput,\n},\n{\n- name: \"Drop with input NIC filtering on arrival NIC\",\n+ name: \"Input Drop with input NIC filtering on arrival NIC\",\nsetupFilter: setupDropFilter(stack.Input, stack.IPHeaderFilter{InputInterface: nic1Name}),\n- expectDrop: true,\n+ expectResult: echoRequestDroppedAtInput,\n},\n{\n- name: \"Drop with input NIC filtering on delivered NIC\",\n+ name: \"Input Drop with input NIC filtering on delivered NIC\",\nsetupFilter: setupDropFilter(stack.Input, stack.IPHeaderFilter{InputInterface: nic2Name}),\n- expectDrop: false,\n+ expectResult: noneDropped,\n},\n{\n- name: \"Drop with input NIC filtering on other NIC\",\n+ name: \"Input Drop with input NIC filtering on other NIC\",\nsetupFilter: setupDropFilter(stack.Input, stack.IPHeaderFilter{InputInterface: otherNICName}),\n- expectDrop: false,\n+ expectResult: noneDropped,\n+ },\n+\n+ {\n+ name: \"Forward Drop\",\n+ setupFilter: setupDropFilter(stack.Forward, stack.IPHeaderFilter{}),\n+ expectResult: echoRequestDroppedAtForward,\n+ },\n+\n+ {\n+ name: \"Output Drop\",\n+ setupFilter: setupDropFilter(stack.Output, stack.IPHeaderFilter{}),\n+ expectResult: echoReplyDropped,\n},\n}\n@@ -1121,8 +1142,34 @@ func TestInputHookWithLocalForwarding(t *testing.T) {\nif got := ip1Stats.ValidPacketsReceived.Value(); got != 1 {\nt.Errorf(\"got ip1Stats.ValidPacketsReceived.Value() = %d, want = 1\", got)\n}\n- if got, want := ip1Stats.PacketsSent.Value(), boolToInt(!subTest.expectDrop); got != want {\n- t.Errorf(\"got ip1Stats.PacketsSent.Value() = %d, want = %d\", got, want)\n+\n+ expectedIP1StatIPTablesForawrdDropped := uint64(0)\n+ expectedIP1StatIPTablesOutputDropped := uint64(0)\n+ expectedIP1StatPacketsSent := uint64(0)\n+ expectedIP2StatValidPacketsReceived := uint64(1)\n+ expectedIP2StatIPTablesInputDropped := uint64(0)\n+ switch subTest.expectResult {\n+ case noneDropped:\n+ expectedIP1StatPacketsSent = 1\n+ case echoRequestDroppedAtInput:\n+ expectedIP2StatIPTablesInputDropped = 1\n+ case echoRequestDroppedAtForward:\n+ expectedIP1StatIPTablesForawrdDropped = 1\n+ expectedIP2StatValidPacketsReceived = 0\n+ case echoReplyDropped:\n+ expectedIP1StatIPTablesOutputDropped = 1\n+ default:\n+ t.Fatalf(\"unhandled expectResult = %d\", subTest.expectResult)\n+ }\n+\n+ if got := ip1Stats.IPTablesForwardDropped.Value(); got != expectedIP1StatIPTablesForawrdDropped {\n+ t.Errorf(\"got ip1Stats.IPTablesForwardDropped.Value() = %d, want = %d\", got, expectedIP1StatIPTablesForawrdDropped)\n+ }\n+ if got := ip1Stats.IPTablesOutputDropped.Value(); got != expectedIP1StatIPTablesOutputDropped {\n+ t.Errorf(\"got ip1Stats.IPTablesOutputDropped.Value() = %d, want = %d\", got, expectedIP1StatIPTablesOutputDropped)\n+ }\n+ if got := ip1Stats.PacketsSent.Value(); got != expectedIP1StatPacketsSent {\n+ t.Errorf(\"got ip1Stats.PacketsSent.Value() = %d, want = %d\", got, expectedIP1StatPacketsSent)\n}\nep2, err := s.GetNetworkEndpoint(nicID2, test.netProto)\n@@ -1138,19 +1185,20 @@ func TestInputHookWithLocalForwarding(t *testing.T) {\nif got := ip2Stats.PacketsReceived.Value(); got != 0 {\nt.Errorf(\"got ip2Stats.PacketsReceived.Value() = %d, want = 0\", got)\n}\n- if got := ip2Stats.ValidPacketsReceived.Value(); got != 1 {\n- t.Errorf(\"got ip2Stats.ValidPacketsReceived.Value() = %d, want = 1\", got)\n+ if got := ip2Stats.ValidPacketsReceived.Value(); got != expectedIP2StatValidPacketsReceived {\n+ t.Errorf(\"got ip2Stats.ValidPacketsReceived.Value() = %d, want = %d\", got, expectedIP2StatValidPacketsReceived)\n}\n- if got, want := ip2Stats.IPTablesInputDropped.Value(), boolToInt(subTest.expectDrop); got != want {\n- t.Errorf(\"got ip2Stats.IPTablesInputDropped.Value() = %d, want = %d\", got, want)\n+ if got := ip2Stats.IPTablesInputDropped.Value(); got != expectedIP2StatIPTablesInputDropped {\n+ t.Errorf(\"got ip2Stats.IPTablesInputDropped.Value() = %d, want = %d\", got, expectedIP2StatIPTablesInputDropped)\n}\nif got := ip2Stats.PacketsSent.Value(); got != 0 {\nt.Errorf(\"got ip2Stats.PacketsSent.Value() = %d, want = 0\", got)\n}\n- if p := e1.Read(); (p != nil) == subTest.expectDrop {\n- t.Errorf(\"got e1.Read() = %#v, want = (_ == nil) = %t\", p, !subTest.expectDrop)\n- } else if !subTest.expectDrop {\n+ expectPacket := subTest.expectResult == noneDropped\n+ if p := e1.Read(); (p != nil) != expectPacket {\n+ t.Errorf(\"got e1.Read() = %#v, want = (_ == nil) = %t\", p, expectPacket)\n+ } else if expectPacket {\ntest.checker(t, stack.PayloadSince(p.NetworkHeader()))\n}\nif p := e2.Read(); p != nil {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Perform Output hook for ICMPv4 Reply
Before this change, locally generated ICMPv4 replies would not
perform the Output hook so NAT will not be performed for locally
generated ICMPv4 replies. This change fixes that bug.
PiperOrigin-RevId: 418513563 |
260,004 | 27.12.2021 11:19:12 | 28,800 | 9c9fdfa07542d264b39e017b3e7df9d4de7b4b83 | Populate ethernet header fields from packet buffer
...instead of the arguments provided in the call to WritePackets.
QDisc always passes 0 for the protocol for calls to WritePackets as
each packet may have a different protocol number.
A later change will remove the unnecessary arguments from WritePackets. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/ethernet/ethernet.go",
"new_path": "pkg/tcpip/link/ethernet/ethernet.go",
"diff": "@@ -91,7 +91,7 @@ func (e *Endpoint) WritePackets(r stack.RouteInfo, pkts stack.PacketBufferList,\nlinkAddr := e.LinkAddress()\nfor pkt := pkts.Front(); pkt != nil; pkt = pkt.Next() {\n- e.AddHeader(linkAddr, r.RemoteLinkAddress, proto, pkt)\n+ e.AddHeader(linkAddr, pkt.EgressRoute.RemoteLinkAddress, pkt.NetworkProtocolNumber, pkt)\n}\nreturn e.Endpoint.WritePackets(r, pkts, proto)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/ethernet/ethernet_test.go",
"new_path": "pkg/tcpip/link/ethernet/ethernet_test.go",
"diff": "@@ -119,3 +119,49 @@ func TestMTU(t *testing.T) {\n})\n}\n}\n+\n+func TestWritePacketsAddHeader(t *testing.T) {\n+ const (\n+ localLinkAddr = tcpip.LinkAddress(\"\\x02\\x02\\x03\\x04\\x05\\x06\")\n+ remoteLinkAddr = tcpip.LinkAddress(\"\\x02\\x02\\x03\\x04\\x05\\x07\")\n+\n+ netProto = 55\n+ )\n+\n+ c := channel.New(1, header.EthernetMinimumSize, localLinkAddr)\n+ e := ethernet.New(c)\n+\n+ {\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ ReserveHeaderBytes: int(e.MaxHeaderLength()),\n+ })\n+ pkt.NetworkProtocolNumber = netProto\n+ pkt.EgressRoute.RemoteLinkAddress = remoteLinkAddr\n+\n+ var pkts stack.PacketBufferList\n+ pkts.PushFront(pkt)\n+ if n, err := e.WritePackets(stack.RouteInfo{}, pkts, 0 /* protocol */); err != nil {\n+ t.Fatalf(\"e.WritePackets({}, _, 0): %s\", err)\n+ } else if n != 1 {\n+ t.Fatalf(\"got e.WritePackets({}, _, 0) = %d, want = 1\", n)\n+ }\n+ }\n+\n+ {\n+ pkt := c.Read()\n+ if pkt == nil {\n+ t.Fatal(\"expected to read a packet\")\n+ }\n+\n+ eth := header.Ethernet(pkt.LinkHeader().View())\n+ if got := eth.SourceAddress(); got != localLinkAddr {\n+ t.Errorf(\"got eth.SourceAddress() = %s, want = %s\", got, localLinkAddr)\n+ }\n+ if got := eth.DestinationAddress(); got != remoteLinkAddr {\n+ t.Errorf(\"got eth.DestinationAddress() = %s, want = %s\", got, remoteLinkAddr)\n+ }\n+ if got := eth.Type(); got != netProto {\n+ t.Errorf(\"got eth.Type() = %d, want = %d\", got, netProto)\n+ }\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Populate ethernet header fields from packet buffer
...instead of the arguments provided in the call to WritePackets.
QDisc always passes 0 for the protocol for calls to WritePackets as
each packet may have a different protocol number.
A later change will remove the unnecessary arguments from WritePackets.
PiperOrigin-RevId: 418513699 |
259,898 | 28.12.2021 12:38:32 | 28,800 | 6838e0fe246e124f3403373bbdd4e6696ba517f3 | Remove unused build target
The new packetimpact runner no longer requires static linking for posix_server | [
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/dut/BUILD",
"new_path": "test/packetimpact/dut/BUILD",
"diff": "load(\"//tools:defs.bzl\", \"cc_binary\", \"go_library\", \"grpcpp\")\npackage(\n- default_visibility = [\"//test/packetimpact:__subpackages__\"],\nlicenses = [\"notice\"],\n)\ncc_binary(\nname = \"posix_server\",\nsrcs = [\"posix_server.cc\"],\n- linkstatic = 1,\n- static = True, # This is needed for running in a docker container.\n- deps = [\n- grpcpp,\n- \"//test/packetimpact/proto:posix_server_cc_grpc_proto\",\n- \"//test/packetimpact/proto:posix_server_cc_proto\",\n- \"@com_google_absl//absl/strings:str_format\",\n- ],\n-)\n-\n-cc_binary(\n- name = \"posix_server_dynamic\",\n- srcs = [\"posix_server.cc\"],\n+ visibility = [\"//test/packetimpact:__subpackages__\"],\ndeps = [\ngrpcpp,\n\"//test/packetimpact/proto:posix_server_cc_grpc_proto\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove unused build target
The new packetimpact runner no longer requires static linking for posix_server
PiperOrigin-RevId: 418664824 |
259,951 | 29.12.2021 12:31:04 | 28,800 | 58b9bdfc21e792c5d529ec9f4ab0b2f2cd1ee082 | Use protocol-specific options for TTL/HopLimit
The new HopLimit matches the IPV6_UNICAST_HOPS socket option.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "@@ -1351,6 +1351,26 @@ func getSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name\nv := primitive.Int32(boolToInt32(ep.SocketOptions().GetV6Only()))\nreturn &v, nil\n+ case linux.IPV6_UNICAST_HOPS:\n+ if outLen < sizeOfInt32 {\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ v, err := ep.GetSockOptInt(tcpip.IPv6HopLimitOption)\n+ if err != nil {\n+ return nil, syserr.TranslateNetstackError(err)\n+ }\n+\n+ // Fill in the default value, if needed.\n+ vP := primitive.Int32(v)\n+ if vP == -1 {\n+ // TODO(https://github.com/google/gvisor/issues/6973): Retrieve the\n+ // configured DefaultTTLOption of the IPv6 protocol.\n+ vP = DefaultTTL\n+ }\n+\n+ return &vP, nil\n+\ncase linux.IPV6_PATHMTU:\nt.Kernel().EmitUnimplementedEvent(t)\n@@ -1499,7 +1519,7 @@ func getSockOptIP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name in\nreturn nil, syserr.ErrInvalidArgument\n}\n- v, err := ep.GetSockOptInt(tcpip.TTLOption)\n+ v, err := ep.GetSockOptInt(tcpip.IPv4TTLOption)\nif err != nil {\nreturn nil, syserr.TranslateNetstackError(err)\n}\n@@ -1507,6 +1527,8 @@ func getSockOptIP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name in\n// Fill in the default value, if needed.\nvP := primitive.Int32(v)\nif vP == 0 {\n+ // TODO(https://github.com/google/gvisor/issues/6973): Retrieve the\n+ // configured DefaultTTLOption of the IPv4 protocol.\nvP = DefaultTTL\n}\n@@ -2200,6 +2222,16 @@ func setSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name\nep.SocketOptions().SetIPv6ReceivePacketInfo(v != 0)\nreturn nil\n+ case linux.IPV6_UNICAST_HOPS:\n+ if len(optVal) < sizeOfInt32 {\n+ return syserr.ErrInvalidArgument\n+ }\n+ v := int32(hostarch.ByteOrder.Uint32(optVal))\n+ if v < -1 || v > 255 {\n+ return syserr.ErrInvalidArgument\n+ }\n+ return syserr.TranslateNetstackError(ep.SetSockOptInt(tcpip.IPv6HopLimitOption, int(v)))\n+\ncase linux.IPV6_TCLASS:\nif len(optVal) < sizeOfInt32 {\nreturn syserr.ErrInvalidArgument\n@@ -2410,7 +2442,7 @@ func setSockOptIP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name in\n} else if v < 1 || v > 255 {\nreturn syserr.ErrInvalidArgument\n}\n- return syserr.TranslateNetstackError(ep.SetSockOptInt(tcpip.TTLOption, int(v)))\n+ return syserr.TranslateNetstackError(ep.SetSockOptInt(tcpip.IPv4TTLOption, int(v)))\ncase linux.IP_TOS:\nif len(optVal) == 0 {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tcpip.go",
"new_path": "pkg/tcpip/tcpip.go",
"diff": "@@ -731,12 +731,19 @@ const (\n// number of unread bytes in the output buffer should be returned.\nSendQueueSizeOption\n- // TTLOption is used by SetSockOptInt/GetSockOptInt to control the\n- // default TTL/hop limit value for unicast messages. The default is\n- // protocol specific.\n+ // IPv4TTLOption is used by SetSockOptInt/GetSockOptInt to control the default\n+ // TTL value for unicast messages.\n//\n- // A zero value indicates the default.\n- TTLOption\n+ // The default is configured by DefaultTTLOption. A UseDefaultIPv4TTL value\n+ // configures the endpoint to use the default.\n+ IPv4TTLOption\n+\n+ // IPv6HopLimitOption is used by SetSockOptInt/GetSockOptInt to control the\n+ // default hop limit value for unicast messages.\n+ //\n+ // The default is configured by DefaultTTLOption. A UseDefaultIPv6HopLimit\n+ // value configures the endpoint to use the default.\n+ IPv6HopLimitOption\n// TCPSynCountOption is used by SetSockOptInt/GetSockOptInt to specify\n// the number of SYN retransmits that TCP should send before aborting\n@@ -752,6 +759,18 @@ const (\nTCPWindowClampOption\n)\n+const (\n+ // UseDefaultIPv4TTL is the IPv4TTLOption value that configures an endpoint to\n+ // use the default ttl currently configured by the IPv4 protocol (see\n+ // DefaultTTLOption).\n+ UseDefaultIPv4TTL = 0\n+\n+ // UseDefaultIPv6HopLimit is the IPv6HopLimitOption value that configures an\n+ // endpoint to use the default hop limit currently configured by the IPv6\n+ // protocol (see DefaultTTLOption).\n+ UseDefaultIPv6HopLimit = -1\n+)\n+\nconst (\n// PMTUDiscoveryWant is a setting of the MTUDiscoverOption to use\n// per-route settings.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/internal/network/endpoint.go",
"new_path": "pkg/tcpip/transport/internal/network/endpoint.go",
"diff": "@@ -53,9 +53,10 @@ type Endpoint struct {\nconnectedRoute *stack.Route `state:\"manual\"`\n// +checklocks:mu\nmulticastMemberships map[multicastMembership]struct{}\n- // TODO(https://gvisor.dev/issue/6389): Use different fields for IPv4/IPv6.\n// +checklocks:mu\n- ttl uint8\n+ ipv4TTL uint8\n+ // +checklocks:mu\n+ ipv6HopLimit int16\n// TODO(https://gvisor.dev/issue/6389): Use different fields for IPv4/IPv6.\n// +checklocks:mu\nmulticastTTL uint8\n@@ -131,6 +132,8 @@ func (e *Endpoint) Init(s *stack.Stack, netProto tcpip.NetworkProtocolNumber, tr\nTransProto: transProto,\n},\neffectiveNetProto: netProto,\n+ ipv4TTL: tcpip.UseDefaultIPv4TTL,\n+ ipv6HopLimit: tcpip.UseDefaultIPv6HopLimit,\n// Linux defaults to TTL=1.\nmulticastTTL: 1,\nmulticastMemberships: make(map[multicastMembership]struct{}),\n@@ -191,16 +194,27 @@ func (e *Endpoint) SetOwner(owner tcpip.PacketOwner) {\ne.owner = owner\n}\n-func calculateTTL(route *stack.Route, ttl uint8, multicastTTL uint8) uint8 {\n- if header.IsV4MulticastAddress(route.RemoteAddress()) || header.IsV6MulticastAddress(route.RemoteAddress()) {\n- return multicastTTL\n+// +checklocksread:e.mu\n+func (e *Endpoint) calculateTTL(route *stack.Route) uint8 {\n+ remoteAddress := route.RemoteAddress()\n+ if header.IsV4MulticastAddress(remoteAddress) || header.IsV6MulticastAddress(remoteAddress) {\n+ return e.multicastTTL\n}\n- if ttl == 0 {\n+ switch netProto := route.NetProto(); netProto {\n+ case header.IPv4ProtocolNumber:\n+ if e.ipv4TTL == 0 {\nreturn route.DefaultTTL()\n}\n-\n- return ttl\n+ return e.ipv4TTL\n+ case header.IPv6ProtocolNumber:\n+ if e.ipv6HopLimit == -1 {\n+ return route.DefaultTTL()\n+ }\n+ return uint8(e.ipv6HopLimit)\n+ default:\n+ panic(fmt.Sprintf(\"invalid protocol number = %d\", netProto))\n+ }\n}\n// WriteContext holds the context for a write.\n@@ -327,7 +341,7 @@ func (e *Endpoint) AcquireContextForWrite(opts tcpip.WriteOptions) (WriteContext\nreturn WriteContext{\ntransProto: e.transProto,\nroute: route,\n- ttl: calculateTTL(route, e.ttl, e.multicastTTL),\n+ ttl: e.calculateTTL(route),\ntos: tos,\nowner: e.owner,\n}, nil\n@@ -602,9 +616,14 @@ func (e *Endpoint) SetSockOptInt(opt tcpip.SockOptInt, v int) tcpip.Error {\ne.multicastTTL = uint8(v)\ne.mu.Unlock()\n- case tcpip.TTLOption:\n+ case tcpip.IPv4TTLOption:\n+ e.mu.Lock()\n+ e.ipv4TTL = uint8(v)\n+ e.mu.Unlock()\n+\n+ case tcpip.IPv6HopLimitOption:\ne.mu.Lock()\n- e.ttl = uint8(v)\n+ e.ipv6HopLimit = int16(v)\ne.mu.Unlock()\ncase tcpip.IPv4TOSOption:\n@@ -634,9 +653,15 @@ func (e *Endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, tcpip.Error) {\ne.mu.Unlock()\nreturn v, nil\n- case tcpip.TTLOption:\n+ case tcpip.IPv4TTLOption:\n+ e.mu.Lock()\n+ v := int(e.ipv4TTL)\n+ e.mu.Unlock()\n+ return v, nil\n+\n+ case tcpip.IPv6HopLimitOption:\ne.mu.Lock()\n- v := int(e.ttl)\n+ v := int(e.ipv6HopLimit)\ne.mu.Unlock()\nreturn v, nil\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/accept.go",
"new_path": "pkg/tcpip/transport/tcp/accept.go",
"diff": "@@ -435,7 +435,7 @@ func (e *endpoint) handleListenSegment(ctx *listenContext, s *segment) tcpip.Err\n// RFC 793 section 3.4 page 35 (figure 12) outlines that a RST\n// must be sent in response to a SYN-ACK while in the listen\n// state to prevent completing a handshake from an old SYN.\n- return replyWithReset(e.stack, s, e.sendTOS, e.ttl)\n+ return replyWithReset(e.stack, s, e.sendTOS, e.ipv4TTL, e.ipv6HopLimit)\n}\nswitch {\n@@ -569,7 +569,7 @@ func (e *endpoint) handleListenSegment(ctx *listenContext, s *segment) tcpip.Err\ncookie := ctx.createCookie(s.id, s.sequenceNumber, encodeMSS(opts.MSS))\nfields := tcpFields{\nid: s.id,\n- ttl: e.ttl,\n+ ttl: calculateTTL(route, e.ipv4TTL, e.ipv6HopLimit),\ntos: e.sendTOS,\nflags: header.TCPFlagSyn | header.TCPFlagAck,\nseq: cookie,\n@@ -616,7 +616,7 @@ func (e *endpoint) handleListenSegment(ctx *listenContext, s *segment) tcpip.Err\n// The only time we should reach here when a connection\n// was opened and closed really quickly and a delayed\n// ACK was received from the sender.\n- return replyWithReset(e.stack, s, e.sendTOS, e.ttl)\n+ return replyWithReset(e.stack, s, e.sendTOS, e.ipv4TTL, e.ipv6HopLimit)\n}\n// Keep hold of acceptMu until the new endpoint is in the accept queue (or\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/connect.go",
"new_path": "pkg/tcpip/transport/tcp/connect.go",
"diff": "@@ -283,7 +283,7 @@ func (h *handshake) synSentState(s *segment) tcpip.Error {\n// but resend our own SYN and wait for it to be acknowledged in the\n// SYN-RCVD state.\nh.state = handshakeSynRcvd\n- ttl := h.ep.ttl\n+ ttl := calculateTTL(h.ep.route, h.ep.ipv4TTL, h.ep.ipv6HopLimit)\namss := h.ep.amss\nh.ep.setEndpointState(StateSynRecv)\nsynOpts := header.TCPSynOptions{\n@@ -366,7 +366,7 @@ func (h *handshake) synRcvdState(s *segment) tcpip.Error {\n}\nh.ep.sendSynTCP(h.ep.route, tcpFields{\nid: h.ep.TransportEndpointInfo.ID,\n- ttl: h.ep.ttl,\n+ ttl: calculateTTL(h.ep.route, h.ep.ipv4TTL, h.ep.ipv6HopLimit),\ntos: h.ep.sendTOS,\nflags: h.flags,\nseq: h.iss,\n@@ -508,7 +508,7 @@ func (h *handshake) start() {\nh.sendSYNOpts = synOpts\nh.ep.sendSynTCP(h.ep.route, tcpFields{\nid: h.ep.TransportEndpointInfo.ID,\n- ttl: h.ep.ttl,\n+ ttl: calculateTTL(h.ep.route, h.ep.ipv4TTL, h.ep.ipv6HopLimit),\ntos: h.ep.sendTOS,\nflags: h.flags,\nseq: h.iss,\n@@ -556,7 +556,7 @@ func (h *handshake) complete() tcpip.Error {\nif h.active || !h.acked || h.deferAccept != 0 && h.ep.stack.Clock().NowMonotonic().Sub(h.startTime) > h.deferAccept {\nh.ep.sendSynTCP(h.ep.route, tcpFields{\nid: h.ep.TransportEndpointInfo.ID,\n- ttl: h.ep.ttl,\n+ ttl: calculateTTL(h.ep.route, h.ep.ipv4TTL, h.ep.ipv6HopLimit),\ntos: h.ep.sendTOS,\nflags: h.flags,\nseq: h.iss,\n@@ -847,9 +847,6 @@ func sendTCPBatch(r *stack.Route, tf tcpFields, data buffer.VectorisedView, gso\nbuildTCPHdr(r, tf, pkt, gso)\ntf.seq = tf.seq.Add(seqnum.Size(packetSize))\npkt.GSOOptions = gso\n- if tf.ttl == 0 {\n- tf.ttl = r.DefaultTTL()\n- }\nif err := r.WritePacket(stack.NetworkHeaderParams{Protocol: ProtocolNumber, TTL: tf.ttl, TOS: tf.tos}, pkt); err != nil {\nr.Stats().TCP.SegmentSendErrors.Increment()\npkt.DecRef()\n@@ -883,9 +880,6 @@ func sendTCP(r *stack.Route, tf tcpFields, data buffer.VectorisedView, gso stack\npkt.Owner = owner\nbuildTCPHdr(r, tf, pkt, gso)\n- if tf.ttl == 0 {\n- tf.ttl = r.DefaultTTL()\n- }\nif err := r.WritePacket(stack.NetworkHeaderParams{Protocol: ProtocolNumber, TTL: tf.ttl, TOS: tf.tos}, pkt); err != nil {\nr.Stats().TCP.SegmentSendErrors.Increment()\nreturn err\n@@ -945,7 +939,7 @@ func (e *endpoint) sendRaw(data buffer.VectorisedView, flags header.TCPFlags, se\noptions := e.makeOptions(sackBlocks)\nerr := e.sendTCP(e.route, tcpFields{\nid: e.TransportEndpointInfo.ID,\n- ttl: e.ttl,\n+ ttl: calculateTTL(e.route, e.ipv4TTL, e.ipv6HopLimit),\ntos: e.sendTOS,\nflags: flags,\nseq: seq,\n@@ -1049,7 +1043,7 @@ func (e *endpoint) tryDeliverSegmentFromClosedEndpoint(s *segment) {\n)\n}\nif ep == nil {\n- replyWithReset(e.stack, s, stack.DefaultTOS, 0 /* ttl */)\n+ replyWithReset(e.stack, s, stack.DefaultTOS, tcpip.UseDefaultIPv4TTL, tcpip.UseDefaultIPv6HopLimit)\ns.decRef()\nreturn\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -436,7 +436,8 @@ type endpoint struct {\nisRegistered bool `state:\"manual\"`\nboundNICID tcpip.NICID\nroute *stack.Route `state:\"manual\"`\n- ttl uint8\n+ ipv4TTL uint8\n+ ipv6HopLimit int16\nisConnectNotified bool\n// h stores a reference to the current handshake state if the endpoint is in\n@@ -783,6 +784,25 @@ func (e *endpoint) recentTimestamp() uint32 {\nreturn e.RecentTS\n}\n+// TODO(gvisor.dev/issue/6974): Remove once tcp endpoints are composed with a\n+// network.Endpoint, which also defines this function.\n+func calculateTTL(route *stack.Route, ipv4TTL uint8, ipv6HopLimit int16) uint8 {\n+ switch netProto := route.NetProto(); netProto {\n+ case header.IPv4ProtocolNumber:\n+ if ipv4TTL == tcpip.UseDefaultIPv4TTL {\n+ return route.DefaultTTL()\n+ }\n+ return ipv4TTL\n+ case header.IPv6ProtocolNumber:\n+ if ipv6HopLimit == tcpip.UseDefaultIPv6HopLimit {\n+ return route.DefaultTTL()\n+ }\n+ return uint8(ipv6HopLimit)\n+ default:\n+ panic(fmt.Sprintf(\"invalid protocol number = %d\", netProto))\n+ }\n+}\n+\n// keepalive is a synchronization wrapper used to appease stateify. See the\n// comment in endpoint, where it is used.\n//\n@@ -818,6 +838,8 @@ func newEndpoint(s *stack.Stack, protocol *protocol, netProto tcpip.NetworkProto\ncount: DefaultKeepaliveCount,\n},\nuniqueID: s.UniqueID(),\n+ ipv4TTL: tcpip.UseDefaultIPv4TTL,\n+ ipv6HopLimit: tcpip.UseDefaultIPv6HopLimit,\ntxHash: s.Rand().Uint32(),\nwindowClamp: DefaultReceiveBufferSize,\nmaxSynRetries: DefaultSynRetries,\n@@ -1775,9 +1797,14 @@ func (e *endpoint) SetSockOptInt(opt tcpip.SockOptInt, v int) tcpip.Error {\nreturn &tcpip.ErrNotSupported{}\n}\n- case tcpip.TTLOption:\n+ case tcpip.IPv4TTLOption:\ne.LockUser()\n- e.ttl = uint8(v)\n+ e.ipv4TTL = uint8(v)\n+ e.UnlockUser()\n+\n+ case tcpip.IPv6HopLimitOption:\n+ e.LockUser()\n+ e.ipv6HopLimit = int16(v)\ne.UnlockUser()\ncase tcpip.TCPSynCountOption:\n@@ -1960,9 +1987,15 @@ func (e *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, tcpip.Error) {\ncase tcpip.ReceiveQueueSizeOption:\nreturn e.readyReceiveSize()\n- case tcpip.TTLOption:\n+ case tcpip.IPv4TTLOption:\n+ e.LockUser()\n+ v := int(e.ipv4TTL)\n+ e.UnlockUser()\n+ return v, nil\n+\n+ case tcpip.IPv6HopLimitOption:\ne.LockUser()\n- v := int(e.ttl)\n+ v := int(e.ipv6HopLimit)\ne.UnlockUser()\nreturn v, nil\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/forwarder.go",
"new_path": "pkg/tcpip/transport/tcp/forwarder.go",
"diff": "@@ -132,7 +132,7 @@ func (r *ForwarderRequest) Complete(sendReset bool) {\nr.forwarder.mu.Unlock()\nif sendReset {\n- replyWithReset(r.forwarder.stack, r.segment, stack.DefaultTOS, 0 /* ttl */)\n+ replyWithReset(r.forwarder.stack, r.segment, stack.DefaultTOS, tcpip.UseDefaultIPv4TTL, tcpip.UseDefaultIPv6HopLimit)\n}\n// Release all resources.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/protocol.go",
"new_path": "pkg/tcpip/transport/tcp/protocol.go",
"diff": "@@ -165,7 +165,7 @@ func (p *protocol) HandleUnknownDestinationPacket(id stack.TransportEndpointID,\n}\nif !s.flags.Contains(header.TCPFlagRst) {\n- replyWithReset(p.stack, s, stack.DefaultTOS, 0)\n+ replyWithReset(p.stack, s, stack.DefaultTOS, tcpip.UseDefaultIPv4TTL, tcpip.UseDefaultIPv6HopLimit)\n}\nreturn stack.UnknownDestinationPacketHandled\n@@ -191,14 +191,17 @@ func (p *protocol) tsOffset(src, dst tcpip.Address) tcp.TSOffset {\n// replyWithReset replies to the given segment with a reset segment.\n//\n-// If the passed TTL is 0, then the route's default TTL will be used.\n-func replyWithReset(st *stack.Stack, s *segment, tos, ttl uint8) tcpip.Error {\n+// If the relevant TTL has its reset value (0 for ipv4TTL, -1 for ipv6HopLimit),\n+// then the route's default TTL will be used.\n+func replyWithReset(st *stack.Stack, s *segment, tos, ipv4TTL uint8, ipv6HopLimit int16) tcpip.Error {\nroute, err := st.FindRoute(s.nicID, s.dstAddr, s.srcAddr, s.netProto, false /* multicastLoop */)\nif err != nil {\nreturn err\n}\ndefer route.Release()\n+ ttl := calculateTTL(route, ipv4TTL, ipv6HopLimit)\n+\n// Get the seqnum from the packet if the ack flag is set.\nseq := seqnum.Value(0)\nack := seqnum.Value(0)\n@@ -221,10 +224,6 @@ func replyWithReset(st *stack.Stack, s *segment, tos, ttl uint8) tcpip.Error {\nack = s.sequenceNumber.Add(s.logicalLen())\n}\n- if ttl == 0 {\n- ttl = route.DefaultTTL()\n- }\n-\nreturn sendTCP(route, tcpFields{\nid: s.id,\nttl: ttl,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/tcp_test.go",
"new_path": "pkg/tcpip/transport/tcp/tcp_test.go",
"diff": "@@ -3466,9 +3466,11 @@ func TestSetTTL(t *testing.T) {\nname string\nprotoNum tcpip.NetworkProtocolNumber\naddr tcpip.Address\n+ relevantOpt tcpip.SockOptInt\n+ irrelevantOpt tcpip.SockOptInt\n}{\n- {\"ipv4\", ipv4.ProtocolNumber, context.TestAddr},\n- {\"ipv6\", ipv6.ProtocolNumber, context.TestV6Addr},\n+ {\"ipv4\", ipv4.ProtocolNumber, context.TestAddr, tcpip.IPv4TTLOption, tcpip.IPv6HopLimitOption},\n+ {\"ipv6\", ipv6.ProtocolNumber, context.TestV6Addr, tcpip.IPv6HopLimitOption, tcpip.IPv4TTLOption},\n} {\nt.Run(fmt.Sprint(test.name), func(t *testing.T) {\nfor _, wantTTL := range []uint8{1, 2, 50, 64, 128, 254, 255} {\n@@ -3482,8 +3484,13 @@ func TestSetTTL(t *testing.T) {\nt.Fatalf(\"NewEndpoint failed: %s\", err)\n}\n- if err := c.EP.SetSockOptInt(tcpip.TTLOption, int(wantTTL)); err != nil {\n- t.Fatalf(\"SetSockOptInt(TTLOption, %d) failed: %s\", wantTTL, err)\n+ if err := c.EP.SetSockOptInt(test.relevantOpt, int(wantTTL)); err != nil {\n+ t.Fatalf(\"SetSockOptInt(%d, %d) failed: %s\", test.relevantOpt, wantTTL, err)\n+ }\n+ // Set a different ttl/hoplimit for the unused protocol, showing that\n+ // it does not affect the other protocol.\n+ if err := c.EP.SetSockOptInt(test.irrelevantOpt, int(wantTTL+1)); err != nil {\n+ t.Fatalf(\"SetSockOptInt(%d, %d) failed: %s\", test.irrelevantOpt, wantTTL, err)\n}\n{\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/udp_test.go",
"new_path": "pkg/tcpip/transport/udp/udp_test.go",
"diff": "@@ -287,13 +287,6 @@ func (flow testFlow) isReverseMulticast() bool {\n}\n}\n-func (flow testFlow) ttlOption() tcpip.SockOptInt {\n- if flow.isMulticast() {\n- return tcpip.MulticastTTLOption\n- }\n- return tcpip.TTLOption\n-}\n-\ntype testContext struct {\nt *testing.T\nlinkEP *channel.Endpoint\n@@ -1615,7 +1608,7 @@ func (*testInterface) Enabled() bool {\nreturn true\n}\n-func TestNonMulticastDefaultTTL(t *testing.T) {\n+func TestDefaultTTL(t *testing.T) {\nfor _, flow := range []testFlow{unicastV4, unicastV4in6, unicastV6, unicastV6Only, broadcast, broadcastIn6} {\nt.Run(fmt.Sprintf(\"flow:%s\", flow), func(t *testing.T) {\nc := newDualTestContext(t, defaultMTU)\n@@ -1642,8 +1635,43 @@ func TestNonMulticastDefaultTTL(t *testing.T) {\n}\n}\n-func TestSetTTL(t *testing.T) {\n- for _, flow := range []testFlow{unicastV4, unicastV4in6, unicastV6, unicastV6Only, multicastV4, multicastV4in6, multicastV6, broadcast, broadcastIn6} {\n+func TestNonMulticastDefaultTTL(t *testing.T) {\n+ for _, flow := range []testFlow{unicastV4, unicastV4in6, unicastV6, unicastV6Only, broadcast, broadcastIn6} {\n+ t.Run(fmt.Sprintf(\"flow:%s\", flow), func(t *testing.T) {\n+ for _, wantTTL := range []uint8{1, 2, 50, 64, 128, 254, 255} {\n+ t.Run(fmt.Sprintf(\"TTL:%d\", wantTTL), func(t *testing.T) {\n+ c := newDualTestContext(t, defaultMTU)\n+ defer c.cleanup()\n+\n+ c.createEndpointForFlow(flow)\n+\n+ var relevantOpt tcpip.SockOptInt\n+ var irrelevantOpt tcpip.SockOptInt\n+ if flow.isV4() {\n+ relevantOpt = tcpip.IPv4TTLOption\n+ irrelevantOpt = tcpip.IPv6HopLimitOption\n+ } else {\n+ relevantOpt = tcpip.IPv6HopLimitOption\n+ irrelevantOpt = tcpip.IPv4TTLOption\n+ }\n+ if err := c.ep.SetSockOptInt(relevantOpt, int(wantTTL)); err != nil {\n+ c.t.Fatalf(\"SetSockOptInt(%d, %d) failed: %s\", relevantOpt, wantTTL, err)\n+ }\n+ // Set a different ttl/hoplimit for the unused protocol, showing that\n+ // it does not affect the other protocol.\n+ if err := c.ep.SetSockOptInt(irrelevantOpt, int(wantTTL+1)); err != nil {\n+ c.t.Fatalf(\"SetSockOptInt(%d, %d) failed: %s\", irrelevantOpt, wantTTL, err)\n+ }\n+\n+ testWrite(c, flow, checker.TTL(wantTTL))\n+ })\n+ }\n+ })\n+ }\n+}\n+\n+func TestSetMulticastTTL(t *testing.T) {\n+ for _, flow := range []testFlow{multicastV4, multicastV4in6, multicastV6} {\nt.Run(fmt.Sprintf(\"flow:%s\", flow), func(t *testing.T) {\nfor _, wantTTL := range []uint8{1, 2, 50, 64, 128, 254, 255} {\nt.Run(fmt.Sprintf(\"TTL:%d\", wantTTL), func(t *testing.T) {\n@@ -1652,9 +1680,8 @@ func TestSetTTL(t *testing.T) {\nc.createEndpointForFlow(flow)\n- opt := flow.ttlOption()\n- if err := c.ep.SetSockOptInt(opt, int(wantTTL)); err != nil {\n- c.t.Fatalf(\"SetSockOptInt(%d, %d) failed: %s\", opt, wantTTL, err)\n+ if err := c.ep.SetSockOptInt(tcpip.MulticastTTLOption, int(wantTTL)); err != nil {\n+ c.t.Fatalf(\"SetSockOptInt failed: %s\", err)\n}\ntestWrite(c, flow, checker.TTL(wantTTL))\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -766,6 +766,11 @@ syscall_test(\ntest = \"//test/syscalls/linux:socket_ip_unbound_test\",\n)\n+syscall_test(\n+ shard_count = more_shards,\n+ test = \"//test/syscalls/linux:socket_ipv6_unbound_test\",\n+)\n+\nsyscall_test(\ntest = \"//test/syscalls/linux:socket_ip_unbound_netlink_test\",\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -3058,6 +3058,22 @@ cc_binary(\n],\n)\n+cc_binary(\n+ name = \"socket_ipv6_unbound_test\",\n+ testonly = 1,\n+ srcs = [\n+ \"socket_ipv6_unbound.cc\",\n+ ],\n+ linkstatic = 1,\n+ deps = [\n+ \":ip_socket_test_util\",\n+ \"//test/util:socket_util\",\n+ gtest,\n+ \"//test/util:test_main\",\n+ \"//test/util:test_util\",\n+ ],\n+)\n+\ncc_binary(\nname = \"socket_ipv6_udp_unbound_loopback_test\",\ntestonly = 1,\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/syscalls/linux/socket_ipv6_unbound.cc",
"diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#include <netinet/in.h>\n+#ifdef __linux__\n+#include <linux/in6.h>\n+#endif // __linux__\n+#include <sys/socket.h>\n+#include <sys/types.h>\n+\n+#include \"gtest/gtest.h\"\n+#include \"test/syscalls/linux/ip_socket_test_util.h\"\n+#include \"test/util/socket_util.h\"\n+#include \"test/util/test_util.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+namespace {\n+\n+constexpr int kDefaultHopLimit = 64;\n+\n+using ::testing::ValuesIn;\n+using IPv6UnboundSocketTest = SimpleSocketTest;\n+\n+TEST_P(IPv6UnboundSocketTest, HopLimitDefault) {\n+ std::unique_ptr<FileDescriptor> socket =\n+ ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n+\n+ int get = -1;\n+ socklen_t get_sz = sizeof(get);\n+ ASSERT_THAT(\n+ getsockopt(socket->get(), IPPROTO_IPV6, IPV6_UNICAST_HOPS, &get, &get_sz),\n+ SyscallSucceedsWithValue(0));\n+ ASSERT_EQ(get_sz, sizeof(get));\n+ EXPECT_EQ(get, kDefaultHopLimit);\n+}\n+\n+TEST_P(IPv6UnboundSocketTest, SetHopLimit) {\n+ std::unique_ptr<FileDescriptor> socket =\n+ ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n+\n+ int get1 = -1;\n+ socklen_t get1_sz = sizeof(get1);\n+ ASSERT_THAT(getsockopt(socket->get(), IPPROTO_IPV6, IPV6_UNICAST_HOPS, &get1,\n+ &get1_sz),\n+ SyscallSucceedsWithValue(0));\n+ ASSERT_EQ(get1_sz, sizeof(get1));\n+ EXPECT_EQ(get1, kDefaultHopLimit);\n+\n+ const int set = (get1 % 255) + 1;\n+ ASSERT_THAT(setsockopt(socket->get(), IPPROTO_IPV6, IPV6_UNICAST_HOPS, &set,\n+ sizeof(set)),\n+ SyscallSucceedsWithValue(0));\n+\n+ int get2 = -1;\n+ socklen_t get2_sz = sizeof(get2);\n+ ASSERT_THAT(getsockopt(socket->get(), IPPROTO_IPV6, IPV6_UNICAST_HOPS, &get2,\n+ &get2_sz),\n+ SyscallSucceedsWithValue(0));\n+ ASSERT_EQ(get2_sz, sizeof(get2));\n+ EXPECT_EQ(get2, set);\n+}\n+\n+TEST_P(IPv6UnboundSocketTest, ResetHopLimitToDefault) {\n+ std::unique_ptr<FileDescriptor> socket =\n+ ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n+\n+ int get1 = -1;\n+ socklen_t get1_sz = sizeof(get1);\n+ ASSERT_THAT(getsockopt(socket->get(), IPPROTO_IPV6, IPV6_UNICAST_HOPS, &get1,\n+ &get1_sz),\n+ SyscallSucceedsWithValue(0));\n+ ASSERT_EQ(get1_sz, sizeof(get1));\n+ EXPECT_EQ(get1, kDefaultHopLimit);\n+\n+ const int set = (get1 % 255) + 1;\n+ ASSERT_THAT(setsockopt(socket->get(), IPPROTO_IPV6, IPV6_UNICAST_HOPS, &set,\n+ sizeof(set)),\n+ SyscallSucceedsWithValue(0));\n+\n+ constexpr int kUseDefaultHopLimit = -1;\n+ ASSERT_THAT(setsockopt(socket->get(), IPPROTO_IPV6, IPV6_UNICAST_HOPS,\n+ &kUseDefaultHopLimit, sizeof(kUseDefaultHopLimit)),\n+ SyscallSucceedsWithValue(0));\n+\n+ int get2 = -1;\n+ socklen_t get2_sz = sizeof(get2);\n+ ASSERT_THAT(getsockopt(socket->get(), IPPROTO_IPV6, IPV6_UNICAST_HOPS, &get2,\n+ &get2_sz),\n+ SyscallSucceedsWithValue(0));\n+ ASSERT_EQ(get2_sz, sizeof(get2));\n+ EXPECT_EQ(get2, get1);\n+}\n+\n+TEST_P(IPv6UnboundSocketTest, ZeroHopLimit) {\n+ std::unique_ptr<FileDescriptor> socket =\n+ ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n+\n+ constexpr int kZero = 0;\n+ ASSERT_THAT(setsockopt(socket->get(), IPPROTO_IPV6, IPV6_UNICAST_HOPS, &kZero,\n+ sizeof(kZero)),\n+ SyscallSucceedsWithValue(0));\n+\n+ int get = -1;\n+ socklen_t get_sz = sizeof(get);\n+ ASSERT_THAT(\n+ getsockopt(socket->get(), IPPROTO_IPV6, IPV6_UNICAST_HOPS, &get, &get_sz),\n+ SyscallSucceedsWithValue(0));\n+ ASSERT_EQ(get, kZero);\n+ EXPECT_EQ(get_sz, sizeof(get));\n+}\n+\n+TEST_P(IPv6UnboundSocketTest, InvalidLargeHopLimit) {\n+ std::unique_ptr<FileDescriptor> socket =\n+ ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n+\n+ constexpr int kInvalidLarge = 256;\n+ EXPECT_THAT(setsockopt(socket->get(), IPPROTO_IPV6, IPV6_UNICAST_HOPS,\n+ &kInvalidLarge, sizeof(kInvalidLarge)),\n+ SyscallFailsWithErrno(EINVAL));\n+}\n+\n+TEST_P(IPv6UnboundSocketTest, InvalidNegativeHopLimit) {\n+ std::unique_ptr<FileDescriptor> socket =\n+ ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n+\n+ constexpr int kInvalidNegative = -2;\n+ EXPECT_THAT(setsockopt(socket->get(), IPPROTO_IPV6, IPV6_UNICAST_HOPS,\n+ &kInvalidNegative, sizeof(kInvalidNegative)),\n+ SyscallFailsWithErrno(EINVAL));\n+}\n+\n+TEST_P(IPv6UnboundSocketTest, SetTtlDoesNotAffectHopLimit) {\n+ std::unique_ptr<FileDescriptor> socket =\n+ ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n+\n+ int get = -1;\n+ socklen_t get_sz = sizeof(get);\n+ ASSERT_THAT(\n+ getsockopt(socket->get(), IPPROTO_IPV6, IPV6_UNICAST_HOPS, &get, &get_sz),\n+ SyscallSucceedsWithValue(0));\n+ ASSERT_EQ(get_sz, sizeof(get));\n+\n+ const int set = (get % 255) + 1;\n+ ASSERT_THAT(setsockopt(socket->get(), IPPROTO_IP, IP_TTL, &set, sizeof(set)),\n+ SyscallSucceedsWithValue(0));\n+\n+ int get2 = -1;\n+ socklen_t get2_sz = sizeof(get2);\n+ ASSERT_THAT(getsockopt(socket->get(), IPPROTO_IPV6, IPV6_UNICAST_HOPS, &get2,\n+ &get2_sz),\n+ SyscallSucceedsWithValue(0));\n+ ASSERT_EQ(get2_sz, sizeof(get2));\n+ EXPECT_EQ(get2, get);\n+}\n+\n+TEST_P(IPv6UnboundSocketTest, SetHopLimitDoesNotAffectTtl) {\n+ std::unique_ptr<FileDescriptor> socket =\n+ ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n+\n+ int get = -1;\n+ socklen_t get_sz = sizeof(get);\n+ ASSERT_THAT(getsockopt(socket->get(), IPPROTO_IP, IP_TTL, &get, &get_sz),\n+ SyscallSucceedsWithValue(0));\n+ ASSERT_EQ(get_sz, sizeof(get));\n+\n+ const int set = (get % 255) + 1;\n+ ASSERT_THAT(setsockopt(socket->get(), IPPROTO_IPV6, IPV6_UNICAST_HOPS, &set,\n+ sizeof(set)),\n+ SyscallSucceedsWithValue(0));\n+\n+ int get2 = -1;\n+ socklen_t get2_sz = sizeof(get2);\n+ ASSERT_THAT(getsockopt(socket->get(), IPPROTO_IP, IP_TTL, &get2, &get2_sz),\n+ SyscallSucceedsWithValue(0));\n+ ASSERT_EQ(get2_sz, sizeof(get2));\n+ EXPECT_EQ(get2, get);\n+}\n+\n+INSTANTIATE_TEST_SUITE_P(\n+ IPv6UnboundSockets, IPv6UnboundSocketTest,\n+ ValuesIn(VecCat<SocketKind>(\n+ ApplyVec<SocketKind>(IPv6UDPUnboundSocket,\n+ std::vector<int>{0, SOCK_NONBLOCK}),\n+ ApplyVec<SocketKind>(IPv6TCPUnboundSocket,\n+ std::vector{0, SOCK_NONBLOCK}))));\n+\n+} // namespace\n+} // namespace testing\n+} // namespace gvisor\n"
}
] | Go | Apache License 2.0 | google/gvisor | Use protocol-specific options for TTL/HopLimit
The new HopLimit matches the IPV6_UNICAST_HOPS socket option.
Updates #6389
PiperOrigin-RevId: 418831844 |
259,888 | 29.12.2021 21:27:56 | 21,600 | 871aaa768d0a6357e044d113a46eaf3f0abbd86a | tools/checklocks: fix typo in readme | [
{
"change_type": "MODIFY",
"old_path": "tools/checklocks/README.md",
"new_path": "tools/checklocks/README.md",
"diff": "@@ -88,7 +88,7 @@ greater than 100%, if the lock is held multiple times. For example:\nfunc foo(ts1 *testStruct, ts2 *testStruct) {\nts1.Lock()\nts2.Lock()\n- ts1.gaurdedField = 1 // 200% locks held.\n+ ts1.guardedField = 1 // 200% locks held.\nts1.Unlock()\nts2.Unlock()\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | tools/checklocks: fix typo in readme |
260,004 | 31.12.2021 21:30:17 | 28,800 | b488df0a2f15ba21603610bdb13d8656b856218f | Support SOL_IPV6 -> IPV6_CHECKSUM | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "@@ -1343,6 +1343,19 @@ func getSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name\n}\nswitch name {\n+ case linux.IPV6_CHECKSUM:\n+ if outLen < sizeOfInt32 {\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ v, err := ep.GetSockOptInt(tcpip.IPv6Checksum)\n+ if err != nil {\n+ return nil, syserr.TranslateNetstackError(err)\n+ }\n+\n+ vP := primitive.Int32(v)\n+ return &vP, nil\n+\ncase linux.IPV6_V6ONLY:\nif outLen < sizeOfInt32 {\nreturn nil, syserr.ErrInvalidArgument\n@@ -2151,6 +2164,15 @@ func setSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name\n}\nswitch name {\n+ case linux.IPV6_CHECKSUM:\n+ if len(optVal) < sizeOfInt32 {\n+ return syserr.ErrInvalidArgument\n+ }\n+\n+ // int may not be 32-bits so we cast the uint32 to an int32 before casting\n+ // to an int.\n+ return syserr.TranslateNetstackError(ep.SetSockOptInt(tcpip.IPv6Checksum, int(int32(hostarch.ByteOrder.Uint32(optVal)))))\n+\ncase linux.IPV6_V6ONLY:\nif len(optVal) < sizeOfInt32 {\nreturn syserr.ErrInvalidArgument\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/checksum.go",
"new_path": "pkg/tcpip/header/checksum.go",
"diff": "@@ -24,6 +24,11 @@ import (\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n)\n+// ChecksumSize is the size of a checksum.\n+//\n+// The checksum is held in a uint16 which is 2 bytes.\n+const ChecksumSize = 2\n+\n// PutChecksum puts the checksum in the provided byte slice.\nfunc PutChecksum(b []byte, xsum uint16) {\nbinary.BigEndian.PutUint16(b, xsum)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/icmpv6.go",
"new_path": "pkg/tcpip/header/icmpv6.go",
"diff": "@@ -67,9 +67,9 @@ const (\n// packet-too-big packet.\nICMPv6PacketTooBigMinimumSize = ICMPv6MinimumSize\n- // icmpv6ChecksumOffset is the offset of the checksum field\n+ // ICMPv6ChecksumOffset is the offset of the checksum field\n// in an ICMPv6 message.\n- icmpv6ChecksumOffset = 2\n+ ICMPv6ChecksumOffset = 2\n// icmpv6PointerOffset is the offset of the pointer\n// in an ICMPv6 Parameter problem message.\n@@ -194,12 +194,12 @@ func (b ICMPv6) SetTypeSpecific(val uint32) {\n// Checksum is the ICMP checksum field.\nfunc (b ICMPv6) Checksum() uint16 {\n- return binary.BigEndian.Uint16(b[icmpv6ChecksumOffset:])\n+ return binary.BigEndian.Uint16(b[ICMPv6ChecksumOffset:])\n}\n// SetChecksum sets the ICMP checksum field.\nfunc (b ICMPv6) SetChecksum(checksum uint16) {\n- PutChecksum(b[icmpv6ChecksumOffset:], checksum)\n+ PutChecksum(b[ICMPv6ChecksumOffset:], checksum)\n}\n// SourcePort implements Transport.SourcePort.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tcpip.go",
"new_path": "pkg/tcpip/tcpip.go",
"diff": "@@ -757,6 +757,10 @@ const (\n//\n// NOTE: This option is currently only stubed out and is a no-op\nTCPWindowClampOption\n+\n+ // IPv6Checksum is used to request the stack to populate and validate the IPv6\n+ // checksum for transport level headers.\n+ IPv6Checksum\n)\nconst (\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/raw/endpoint.go",
"new_path": "pkg/tcpip/transport/raw/endpoint.go",
"diff": "@@ -85,6 +85,13 @@ type endpoint struct {\nrcvDisabled bool\nmu sync.RWMutex `state:\"nosave\"`\n+\n+ // ipv6ChecksumOffset indicates the offset to populate the IPv6 checksum at.\n+ //\n+ // A negative value indicates no checksum should be calculated.\n+ //\n+ // +checklocks:mu\n+ ipv6ChecksumOffset int\n// icmp6Filter holds the filter for ICMPv6 packets.\n//\n// +checklocks:mu\n@@ -97,11 +104,24 @@ func NewEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber, trans\n}\nfunc newEndpoint(s *stack.Stack, netProto tcpip.NetworkProtocolNumber, transProto tcpip.TransportProtocolNumber, waiterQueue *waiter.Queue, associated bool) (tcpip.Endpoint, tcpip.Error) {\n+ // Calculating the upper-layer checksum is disabled by default for raw IPv6\n+ // endpoints, unless the upper-layer protocol is ICMPv6.\n+ //\n+ // As per RFC 3542 section 3.1,\n+ //\n+ // The kernel will calculate and insert the ICMPv6 checksum for ICMPv6\n+ // raw sockets, since this checksum is mandatory.\n+ ipv6ChecksumOffset := -1\n+ if netProto == header.IPv6ProtocolNumber && transProto == header.ICMPv6ProtocolNumber {\n+ ipv6ChecksumOffset = header.ICMPv6ChecksumOffset\n+ }\n+\ne := &endpoint{\nstack: s,\ntransProto: transProto,\nwaiterQueue: waiterQueue,\nassociated: associated,\n+ ipv6ChecksumOffset: ipv6ChecksumOffset,\n}\ne.ops.InitHandler(e, e.stack, tcpip.GetStackSendBufferLimits, tcpip.GetStackReceiveBufferLimits)\ne.ops.SetHeaderIncluded(!associated)\n@@ -274,7 +294,10 @@ func (e *endpoint) Write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcp\n}\nfunc (e *endpoint) write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcpip.Error) {\n+ e.mu.Lock()\nctx, err := e.net.AcquireContextForWrite(opts)\n+ ipv6ChecksumOffset := e.ipv6ChecksumOffset\n+ e.mu.Unlock()\nif err != nil {\nreturn 0, err\n}\n@@ -285,6 +308,18 @@ func (e *endpoint) write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcp\nreturn 0, &tcpip.ErrBadBuffer{}\n}\n+ if packetInfo := ctx.PacketInfo(); packetInfo.NetProto == header.IPv6ProtocolNumber && ipv6ChecksumOffset >= 0 {\n+ // Make sure we can fit the checksum.\n+ if len(payloadBytes) < ipv6ChecksumOffset+header.ChecksumSize {\n+ return 0, &tcpip.ErrInvalidOptionValue{}\n+ }\n+\n+ xsum := header.PseudoHeaderChecksum(e.transProto, packetInfo.LocalAddress, packetInfo.RemoteAddress, uint16(len(payloadBytes)))\n+ header.PutChecksum(payloadBytes[ipv6ChecksumOffset:], 0)\n+ xsum = header.Checksum(payloadBytes, xsum)\n+ header.PutChecksum(payloadBytes[ipv6ChecksumOffset:], ^xsum)\n+ }\n+\npkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nReserveHeaderBytes: int(ctx.PacketInfo().MaxHeaderLength),\nData: buffer.View(payloadBytes).ToVectorisedView(),\n@@ -415,8 +450,32 @@ func (e *endpoint) SetSockOpt(opt tcpip.SettableSocketOption) tcpip.Error {\n}\nfunc (e *endpoint) SetSockOptInt(opt tcpip.SockOptInt, v int) tcpip.Error {\n+ switch opt {\n+ case tcpip.IPv6Checksum:\n+ if e.net.NetProto() != header.IPv6ProtocolNumber {\n+ return &tcpip.ErrUnknownProtocolOption{}\n+ }\n+\n+ if e.transProto == header.ICMPv6ProtocolNumber {\n+ // As per RFC 3542 section 3.1,\n+ //\n+ // An attempt to set IPV6_CHECKSUM for an ICMPv6 socket will fail.\n+ return &tcpip.ErrInvalidOptionValue{}\n+ }\n+\n+ // Make sure the offset is aligned properly if checksum is requested.\n+ if v > 0 && v%header.ChecksumSize != 0 {\n+ return &tcpip.ErrInvalidOptionValue{}\n+ }\n+\n+ e.mu.Lock()\n+ defer e.mu.Unlock()\n+ e.ipv6ChecksumOffset = v\n+ return nil\n+ default:\nreturn e.net.SetSockOptInt(opt, v)\n}\n+}\n// GetSockOpt implements tcpip.Endpoint.GetSockOpt.\nfunc (e *endpoint) GetSockOpt(opt tcpip.GettableSocketOption) tcpip.Error {\n@@ -453,6 +512,15 @@ func (e *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, tcpip.Error) {\ne.rcvMu.Unlock()\nreturn v, nil\n+ case tcpip.IPv6Checksum:\n+ if e.net.NetProto() != header.IPv6ProtocolNumber {\n+ return 0, &tcpip.ErrUnknownProtocolOption{}\n+ }\n+\n+ e.mu.Lock()\n+ defer e.mu.Unlock()\n+ return e.ipv6ChecksumOffset, nil\n+\ndefault:\nreturn e.net.GetSockOptInt(opt)\n}\n@@ -552,6 +620,7 @@ func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {\nheaders = append(headers, networkHeader...)\nheaders = append(headers, transportHeader...)\ncombinedVV = headers.ToVectorisedView()\n+ combinedVV.Append(pkt.Data().ExtractVV())\ncase header.IPv6ProtocolNumber:\nif e.transProto == header.ICMPv6ProtocolNumber {\nif len(transportHeader) < header.ICMPv6MinimumSize {\n@@ -564,10 +633,26 @@ func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {\n}\ncombinedVV = append(buffer.View(nil), transportHeader...).ToVectorisedView()\n+ combinedVV.Append(pkt.Data().ExtractVV())\n+\n+ if checksumOffset := e.ipv6ChecksumOffset; checksumOffset >= 0 {\n+ vvSize := combinedVV.Size()\n+ if vvSize < checksumOffset+header.ChecksumSize {\n+ // Message too small to fit checksum.\n+ return false\n+ }\n+\n+ xsum := header.PseudoHeaderChecksum(e.transProto, srcAddr, dstAddr, uint16(vvSize))\n+ xsum = header.ChecksumVV(combinedVV, xsum)\n+ if xsum != 0xFFFF {\n+ // Invalid checksum.\n+ return false\n+ }\n+ }\ndefault:\npanic(fmt.Sprintf(\"unrecognized protocol number = %d\", info.NetProto))\n}\n- combinedVV.Append(pkt.Data().ExtractVV())\n+\npacket.data = combinedVV\npacket.receivedAt = e.stack.Clock().Now()\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/raw_socket.cc",
"new_path": "test/syscalls/linux/raw_socket.cc",
"diff": "@@ -1186,6 +1186,179 @@ TEST(RawSocketTest, ReceiveIPv6PacketInfo) {\nEXPECT_THAT(CMSG_NXTHDR(&recv_msg, cmsg), IsNull());\n}\n+TEST(RawSocketTest, SetIPv6ChecksumError_MultipleOf2) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveRawIPSocketCapability()));\n+\n+ FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET6, SOCK_RAW, IPPROTO_UDP));\n+\n+ int intV = 3;\n+ ASSERT_THAT(\n+ setsockopt(fd.get(), SOL_IPV6, IPV6_CHECKSUM, &intV, sizeof(intV)),\n+ SyscallFailsWithErrno(EINVAL));\n+\n+ intV = 5;\n+ ASSERT_THAT(\n+ setsockopt(fd.get(), SOL_IPV6, IPV6_CHECKSUM, &intV, sizeof(intV)),\n+ SyscallFailsWithErrno(EINVAL));\n+\n+ intV = 2;\n+ ASSERT_THAT(\n+ setsockopt(fd.get(), SOL_IPV6, IPV6_CHECKSUM, &intV, sizeof(intV)),\n+ SyscallSucceeds());\n+\n+ intV = 4;\n+ ASSERT_THAT(\n+ setsockopt(fd.get(), SOL_IPV6, IPV6_CHECKSUM, &intV, sizeof(intV)),\n+ SyscallSucceeds());\n+}\n+\n+TEST(RawSocketTest, SetIPv6ChecksumError_ReadShort) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveRawIPSocketCapability()));\n+\n+ FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET6, SOCK_RAW, IPPROTO_UDP));\n+\n+ int intV = 2;\n+ if (IsRunningOnGvisor() && !IsRunningWithHostinet()) {\n+ // TODO(https://gvisor.dev/issue/6982): This is a deviation from Linux. We\n+ // should determine if we want to match the behaviour or handle the error\n+ // more gracefully.\n+ ASSERT_THAT(\n+ setsockopt(fd.get(), SOL_IPV6, IPV6_CHECKSUM, &intV, sizeof(intV) - 1),\n+ SyscallFailsWithErrno(EINVAL));\n+ return;\n+ }\n+\n+ intV = std::numeric_limits<int>::max();\n+ if (intV % 2) {\n+ intV--;\n+ }\n+\n+ if (const char* val = getenv(\"IPV6_CHECKSUM_SETSOCKOPT_SHORT_EXCEPTION\");\n+ val != nullptr && strcmp(val, \"1\") == 0) {\n+ // TODO(https://issuetracker.google.com/issues/212585236): As of writing, it\n+ // seems like at least one Linux environment considers optlen unlike a local\n+ // Linux environment. In this case we call setsockopt with the full int so\n+ // that the rest of the test passes. Once the root cause for this difference\n+ // is found, we can update this check.\n+ ASSERT_THAT(\n+ setsockopt(fd.get(), SOL_IPV6, IPV6_CHECKSUM, &intV, sizeof(intV)),\n+ SyscallSucceeds());\n+ } else {\n+ ASSERT_THAT(\n+ setsockopt(fd.get(), SOL_IPV6, IPV6_CHECKSUM, &intV, sizeof(intV) - 1),\n+ SyscallSucceeds());\n+ }\n+\n+ {\n+ int got;\n+ socklen_t got_len = sizeof(got);\n+ ASSERT_THAT(getsockopt(fd.get(), SOL_IPV6, IPV6_CHECKSUM, &got, &got_len),\n+ SyscallSucceeds());\n+ ASSERT_EQ(got_len, sizeof(got));\n+ // Even though we called setsockopt with a length smaller than an int, Linux\n+ // seems to read the full int.\n+ EXPECT_EQ(got, intV);\n+ }\n+\n+ // If we have pass a pointer that points to memory less than the size of an\n+ // int, we get a bad address error.\n+ std::unique_ptr<uint8_t> u8V;\n+ // Linux seems to assume a full int but doesn't check the passed length.\n+ //\n+ // https://github.com/torvalds/linux/blob/a52a8e9eaf4a12dd58953fc622bb2bc08fd1d32c/net/ipv6/raw.c#L1023\n+ // shows that Linux copies optVal to an int without first checking optLen.\n+ ASSERT_THAT(\n+ setsockopt(fd.get(), SOL_IPV6, IPV6_CHECKSUM, u8V.get(), sizeof(*u8V)),\n+ SyscallFailsWithErrno(EFAULT));\n+}\n+\n+TEST(RawSocketTest, IPv6Checksum_ValidateAndCalculate) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveRawIPSocketCapability()));\n+\n+ FileDescriptor checksum_set =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET6, SOCK_RAW, IPPROTO_UDP));\n+\n+ FileDescriptor checksum_not_set =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET6, SOCK_RAW, IPPROTO_UDP));\n+\n+ const sockaddr_in6 addr = {\n+ .sin6_family = AF_INET6,\n+ .sin6_addr = IN6ADDR_LOOPBACK_INIT,\n+ };\n+\n+ auto bind_and_set_checksum = [&](const FileDescriptor& fd, int v) {\n+ ASSERT_THAT(\n+ bind(fd.get(), reinterpret_cast<const sockaddr*>(&addr), sizeof(addr)),\n+ SyscallSucceeds());\n+\n+ int got;\n+ socklen_t got_len = sizeof(got);\n+ ASSERT_THAT(getsockopt(fd.get(), SOL_IPV6, IPV6_CHECKSUM, &got, &got_len),\n+ SyscallSucceeds());\n+ ASSERT_EQ(got_len, sizeof(got));\n+ EXPECT_EQ(got, -1);\n+\n+ ASSERT_THAT(setsockopt(fd.get(), SOL_IPV6, IPV6_CHECKSUM, &v, sizeof(v)),\n+ SyscallSucceeds());\n+ ASSERT_THAT(getsockopt(fd.get(), SOL_IPV6, IPV6_CHECKSUM, &got, &got_len),\n+ SyscallSucceeds());\n+ ASSERT_EQ(got_len, sizeof(got));\n+ EXPECT_EQ(got, v);\n+ };\n+\n+ struct udp_packet {\n+ udphdr udp;\n+ uint32_t value;\n+ } ABSL_ATTRIBUTE_PACKED;\n+\n+ ASSERT_NO_FATAL_FAILURE(bind_and_set_checksum(\n+ checksum_set, offsetof(udp_packet, udp) + offsetof(udphdr, uh_sum)));\n+ ASSERT_NO_FATAL_FAILURE(bind_and_set_checksum(checksum_not_set, -1));\n+\n+ auto send = [&](const FileDescriptor& fd, uint32_t v) {\n+ const udp_packet packet = {\n+ .value = v,\n+ };\n+\n+ ASSERT_THAT(sendto(fd.get(), &packet, sizeof(packet), /*flags=*/0,\n+ reinterpret_cast<const sockaddr*>(&addr), sizeof(addr)),\n+ SyscallSucceedsWithValue(sizeof(packet)));\n+ };\n+\n+ auto expect_receive = [&](const FileDescriptor& fd, uint32_t v,\n+ bool should_check_xsum) {\n+ udp_packet packet;\n+ sockaddr_in6 sender;\n+ socklen_t sender_len = sizeof(sender);\n+ ASSERT_THAT(\n+ RetryEINTR(recvfrom)(fd.get(), &packet, sizeof(packet), /*flags=*/0,\n+ reinterpret_cast<sockaddr*>(&sender), &sender_len),\n+ SyscallSucceedsWithValue(sizeof(packet)));\n+ ASSERT_EQ(sender_len, sizeof(sender));\n+ EXPECT_EQ(memcmp(&sender, &addr, sizeof(addr)), 0);\n+ EXPECT_EQ(packet.value, v);\n+ if (should_check_xsum) {\n+ EXPECT_NE(packet.udp.uh_sum, 0);\n+ } else {\n+ EXPECT_EQ(packet.udp.uh_sum, 0);\n+ }\n+ };\n+\n+ uint32_t counter = 1;\n+ // Packets sent through checksum_not_set will not have a valid checksum set so\n+ // checksum_set should not accept those packets.\n+ ASSERT_NO_FATAL_FAILURE(send(checksum_not_set, counter));\n+ ASSERT_NO_FATAL_FAILURE(expect_receive(checksum_not_set, counter, false));\n+\n+ // Packets sent through checksum_set will have a valid checksum so both\n+ // sockets should accept them.\n+ ASSERT_NO_FATAL_FAILURE(send(checksum_set, ++counter));\n+ ASSERT_NO_FATAL_FAILURE(expect_receive(checksum_set, counter, true));\n+ ASSERT_NO_FATAL_FAILURE(expect_receive(checksum_not_set, counter, true));\n+}\n+\n} // namespace\n} // namespace testing\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/raw_socket_icmp.cc",
"new_path": "test/syscalls/linux/raw_socket_icmp.cc",
"diff": "@@ -109,7 +109,7 @@ void RawSocketICMPTest::TearDown() {\n}\n}\n-TEST_F(RawSocketICMPTest, SockOptIPv6Checksum) {\n+TEST_F(RawSocketICMPTest, IPv6ChecksumNotSupported) {\nint v;\nEXPECT_THAT(setsockopt(s_, SOL_IPV6, IPV6_CHECKSUM, &v, sizeof(v)),\nSyscallFailsWithErrno(ENOPROTOOPT));\n@@ -633,6 +633,92 @@ TEST_F(RawSocketICMPv6Test, GetPartialFilterSucceeds) {\nFieldsAre(ElementsAreArray(expected_filter.icmp6_filt)));\n}\n+TEST_F(RawSocketICMPv6Test, SetSockOptIPv6ChecksumFails) {\n+ int v = 2;\n+ EXPECT_THAT(setsockopt(fd().get(), SOL_IPV6, IPV6_CHECKSUM, &v, sizeof(v)),\n+ SyscallFailsWithErrno(EINVAL));\n+ socklen_t len = sizeof(v);\n+ EXPECT_THAT(getsockopt(fd().get(), SOL_IPV6, IPV6_CHECKSUM, &v, &len),\n+ SyscallSucceeds());\n+ ASSERT_EQ(len, sizeof(v));\n+ EXPECT_EQ(v, offsetof(icmp6_hdr, icmp6_cksum));\n+}\n+\n+TEST_F(RawSocketICMPv6Test, MsgTooSmallToFillChecksumFailsSend) {\n+ char buf[offsetof(icmp6_hdr, icmp6_cksum) +\n+ sizeof((icmp6_hdr{}).icmp6_cksum) - 1];\n+\n+ const sockaddr_in6 addr = {\n+ .sin6_family = AF_INET6,\n+ .sin6_addr = IN6ADDR_LOOPBACK_INIT,\n+ };\n+\n+ ASSERT_THAT(sendto(fd().get(), &buf, sizeof(buf), /*flags=*/0,\n+ reinterpret_cast<const sockaddr*>(&addr), sizeof(addr)),\n+ SyscallFailsWithErrno(EINVAL));\n+}\n+\n+constexpr uint8_t kUnusedICMPCode = 0;\n+\n+TEST_F(RawSocketICMPv6Test, PingSuccessfully) {\n+ // Only observe echo packets.\n+ {\n+ icmp6_filter set_filter;\n+ ICMP6_FILTER_SETBLOCKALL(&set_filter);\n+ ICMP6_FILTER_SETPASS(ICMP6_ECHO_REQUEST, &set_filter);\n+ ICMP6_FILTER_SETPASS(ICMP6_ECHO_REPLY, &set_filter);\n+ ASSERT_THAT(setsockopt(fd().get(), SOL_ICMPV6, ICMP6_FILTER, &set_filter,\n+ sizeof(set_filter)),\n+ SyscallSucceeds());\n+ }\n+\n+ const sockaddr_in6 addr = {\n+ .sin6_family = AF_INET6,\n+ .sin6_addr = IN6ADDR_LOOPBACK_INIT,\n+ };\n+\n+ auto send_with_checksum = [&](uint16_t checksum) {\n+ const icmp6_hdr echo_request = {\n+ .icmp6_type = ICMP6_ECHO_REQUEST,\n+ .icmp6_code = kUnusedICMPCode,\n+ .icmp6_cksum = checksum,\n+ };\n+\n+ ASSERT_THAT(RetryEINTR(sendto)(fd().get(), &echo_request,\n+ sizeof(echo_request), /*flags=*/0,\n+ reinterpret_cast<const sockaddr*>(&addr),\n+ sizeof(addr)),\n+ SyscallSucceedsWithValue(sizeof(echo_request)));\n+ };\n+\n+ auto check_recv = [&](uint8_t expected_type) {\n+ icmp6_hdr got_echo;\n+ sockaddr_in6 sender;\n+ socklen_t sender_len = sizeof(sender);\n+ ASSERT_THAT(RetryEINTR(recvfrom)(\n+ fd().get(), &got_echo, sizeof(got_echo), /*flags=*/0,\n+ reinterpret_cast<sockaddr*>(&sender), &sender_len),\n+ SyscallSucceedsWithValue(sizeof(got_echo)));\n+ ASSERT_EQ(sender_len, sizeof(sender));\n+ EXPECT_EQ(memcmp(&sender, &addr, sizeof(addr)), 0);\n+ EXPECT_THAT(got_echo,\n+ FieldsAre(expected_type, kUnusedICMPCode,\n+ // The stack should have populated the checksum.\n+ /*icmp6_cksum=*/Not(0), /*icmp6_dataun=*/_));\n+ EXPECT_THAT(got_echo.icmp6_data32, ElementsAre(0));\n+ };\n+\n+ // Send a request and observe the request followed by the response.\n+ ASSERT_NO_FATAL_FAILURE(send_with_checksum(0));\n+ ASSERT_NO_FATAL_FAILURE(check_recv(ICMP6_ECHO_REQUEST));\n+ ASSERT_NO_FATAL_FAILURE(check_recv(ICMP6_ECHO_REPLY));\n+\n+ // The stack ignores the checksum set by the user.\n+ ASSERT_NO_FATAL_FAILURE(send_with_checksum(1));\n+ ASSERT_NO_FATAL_FAILURE(check_recv(ICMP6_ECHO_REQUEST));\n+ ASSERT_NO_FATAL_FAILURE(check_recv(ICMP6_ECHO_REPLY));\n+}\n+\nclass RawSocketICMPv6TypeTest : public RawSocketICMPv6Test,\npublic WithParamInterface<uint8_t> {};\n@@ -664,7 +750,6 @@ TEST_P(RawSocketICMPv6TypeTest, FilterDeliveredPackets) {\n// Send an ICMP packet for each type.\nuint8_t icmp_type = 0;\n- constexpr uint8_t kUnusedICMPCode = 0;\ndo {\nconst icmp6_hdr packet = {\n.icmp6_type = icmp_type,\n@@ -685,25 +770,15 @@ TEST_P(RawSocketICMPv6TypeTest, FilterDeliveredPackets) {\nsockaddr_in6 sender;\nsocklen_t sender_len = sizeof(sender);\nASSERT_THAT(RetryEINTR(recvfrom)(\n- fd().get(), &got_packet, sizeof(got_packet), 0 /* flags */,\n+ fd().get(), &got_packet, sizeof(got_packet), /*flags=*/0,\nreinterpret_cast<sockaddr*>(&sender), &sender_len),\nSyscallSucceedsWithValue(sizeof(got_packet)));\nASSERT_EQ(sender_len, sizeof(sender));\nEXPECT_EQ(memcmp(&sender, &addr, sizeof(addr)), 0);\n// The stack should have populated the checksum.\n- if (IsRunningOnGvisor() && !IsRunningWithHostinet()) {\n- // TODO(https://github.com/google/gvisor/pull/6957): Use same check as\n- // Linux.\n- EXPECT_THAT(got_packet,\n- FieldsAre(allowed_type, kUnusedICMPCode, 0 /* icmp6_cksum */,\n- _ /* icmp6_dataun */\n- ));\n- } else {\nEXPECT_THAT(got_packet,\nFieldsAre(allowed_type, kUnusedICMPCode,\n- Not(0) /* icmp6_cksum */, _ /* icmp6_dataun */\n- ));\n- }\n+ /*icmp6_cksum=*/Not(0), /*icmp6_dataun=*/_));\nEXPECT_THAT(got_packet.icmp6_data32, ElementsAre(0));\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Support SOL_IPV6 -> IPV6_CHECKSUM
PiperOrigin-RevId: 419164074 |
259,853 | 04.01.2022 16:23:33 | 28,800 | 52bee5297caf67ddb7bb6d23035255e09cb10861 | unix: call Listening under the endpoint lock | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/gofer/socket.go",
"new_path": "pkg/sentry/fs/gofer/socket.go",
"diff": "@@ -93,7 +93,7 @@ func (e *endpoint) BidirectionalConnect(ctx context.Context, ce transport.Connec\nce.Unlock()\nreturn syserr.ErrAlreadyConnected\n}\n- if ce.Listening() {\n+ if ce.ListeningLocked() {\nce.Unlock()\nreturn syserr.ErrInvalidEndpointState\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/net.go",
"new_path": "pkg/sentry/fs/proc/net.go",
"diff": "@@ -442,11 +442,13 @@ func (n *netUnix) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]s\nsockFlags := 0\nif ce, ok := sops.Endpoint().(transport.ConnectingEndpoint); ok {\n- if ce.Listening() {\n+ ce.Lock()\n+ if ce.ListeningLocked() {\n// For unix domain sockets, linux reports a single flag\n// value if the socket is listening, of __SO_ACCEPTCON.\nsockFlags = linux.SO_ACCEPTCON\n}\n+ ce.Unlock()\n}\n// In the socket entry below, the value for the 'Num' field requires\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/socket.go",
"new_path": "pkg/sentry/fsimpl/gofer/socket.go",
"diff": "@@ -66,7 +66,7 @@ func (e *endpoint) BidirectionalConnect(ctx context.Context, ce transport.Connec\nce.Unlock()\nreturn syserr.ErrAlreadyConnected\n}\n- if ce.Listening() {\n+ if ce.ListeningLocked() {\nce.Unlock()\nreturn syserr.ErrInvalidEndpointState\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/proc/task_net.go",
"new_path": "pkg/sentry/fsimpl/proc/task_net.go",
"diff": "@@ -226,11 +226,13 @@ func (n *netUnixData) Generate(ctx context.Context, buf *bytes.Buffer) error {\nsockFlags := 0\nif ce, ok := sops.Endpoint().(transport.ConnectingEndpoint); ok {\n- if ce.Listening() {\n+ ce.Lock()\n+ if ce.ListeningLocked() {\n// For unix domain sockets, linux reports a single flag\n// value if the socket is listening, of __SO_ACCEPTCON.\nsockFlags = linux.SO_ACCEPTCON\n}\n+ ce.Unlock()\n}\n// Get inode number.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/unix/transport/connectioned.go",
"new_path": "pkg/sentry/socket/unix/transport/connectioned.go",
"diff": "@@ -53,10 +53,11 @@ type ConnectingEndpoint interface {\n// so the connection attempt must be aborted if this returns true.\nConnected() bool\n- // Listening returns true iff the ConnectingEndpoint is in the listening\n- // state. ConnectingEndpoints cannot make connections while listening, so\n- // the connection attempt must be aborted if this returns true.\n- Listening() bool\n+ // ListeningLocked returns true iff the ConnectingEndpoint is in the\n+ // listening state. ConnectingEndpoints cannot make connections while\n+ // listening, so the connection attempt must be aborted if this returns\n+ // true.\n+ ListeningLocked() bool\n// WaiterQueue returns a pointer to the endpoint's waiter queue.\nWaiterQueue() *waiter.Queue\n@@ -199,6 +200,12 @@ func (e *connectionedEndpoint) isBound() bool {\n// Listening implements ConnectingEndpoint.Listening.\nfunc (e *connectionedEndpoint) Listening() bool {\n+ e.Lock()\n+ defer e.Unlock()\n+ return e.ListeningLocked()\n+}\n+\n+func (e *connectionedEndpoint) ListeningLocked() bool {\nreturn e.acceptedChan != nil\n}\n@@ -228,7 +235,7 @@ func (e *connectionedEndpoint) Close(ctx context.Context) {\ne.receiver = nil\ncase e.isBound():\ne.path = \"\"\n- case e.Listening():\n+ case e.ListeningLocked():\nclose(e.acceptedChan)\nacceptedChan = e.acceptedChan\ne.acceptedChan = nil\n@@ -276,14 +283,14 @@ func (e *connectionedEndpoint) BidirectionalConnect(ctx context.Context, ce Conn\nce.Unlock()\nreturn syserr.ErrAlreadyConnected\n}\n- if ce.Listening() {\n+ if ce.ListeningLocked() {\ne.Unlock()\nce.Unlock()\nreturn syserr.ErrInvalidEndpointState\n}\n// Check bound state.\n- if !e.Listening() {\n+ if !e.ListeningLocked() {\ne.Unlock()\nce.Unlock()\nreturn syserr.ErrConnectionRefused\n@@ -378,7 +385,7 @@ func (e *connectionedEndpoint) Connect(ctx context.Context, server BoundEndpoint\nfunc (e *connectionedEndpoint) Listen(backlog int) *syserr.Error {\ne.Lock()\ndefer e.Unlock()\n- if e.Listening() {\n+ if e.ListeningLocked() {\n// Adjust the size of the channel iff we can fix existing\n// pending connections into the new one.\nif len(e.acceptedChan) > backlog {\n@@ -405,7 +412,7 @@ func (e *connectionedEndpoint) Listen(backlog int) *syserr.Error {\nfunc (e *connectionedEndpoint) Accept(peerAddr *tcpip.FullAddress) (Endpoint, *syserr.Error) {\ne.Lock()\n- if !e.Listening() {\n+ if !e.ListeningLocked() {\ne.Unlock()\nreturn nil, syserr.ErrInvalidEndpointState\n}\n@@ -445,7 +452,7 @@ func (e *connectionedEndpoint) Accept(peerAddr *tcpip.FullAddress) (Endpoint, *s\nfunc (e *connectionedEndpoint) Bind(addr tcpip.FullAddress, commit func() *syserr.Error) *syserr.Error {\ne.Lock()\ndefer e.Unlock()\n- if e.isBound() || e.Listening() {\n+ if e.isBound() || e.ListeningLocked() {\nreturn syserr.ErrAlreadyBound\n}\nif addr.Addr == \"\" {\n@@ -490,7 +497,7 @@ func (e *connectionedEndpoint) Readiness(mask waiter.EventMask) waiter.EventMask\nif mask&waiter.WritableEvents != 0 && e.connected.Writable() {\nready |= waiter.WritableEvents\n}\n- case e.Listening():\n+ case e.ListeningLocked():\nif mask&waiter.ReadableEvents != 0 && len(e.acceptedChan) > 0 {\nready |= waiter.ReadableEvents\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | unix: call Listening under the endpoint lock
PiperOrigin-RevId: 419703575 |
259,853 | 04.01.2022 22:43:14 | 28,800 | 496ba59add3c7f1495c879d6fb79fedc19a8b33e | Deflake the socket_inet_loopback test
Don't close sockets for writing otherwise sendto can fail with EPIPE. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_inet_loopback.cc",
"new_path": "test/syscalls/linux/socket_inet_loopback.cc",
"diff": "@@ -1562,7 +1562,7 @@ TEST_P(SocketInetReusePortTest, UdpPortReuseMultiThread) {\n// Shutdown all sockets to wake up other threads.\nfor (int j = 0; j < kThreadCount; j++)\n- shutdown(listener_fds[j].get(), SHUT_RDWR);\n+ shutdown(listener_fds[j].get(), SHUT_RD);\n});\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Deflake the socket_inet_loopback test
Don't close sockets for writing otherwise sendto can fail with EPIPE.
PiperOrigin-RevId: 419751558 |
259,868 | 05.01.2022 13:20:41 | 28,800 | d057161dc81a0359e92fa86b4038fe72740020fe | Make `SyscallTable.lookup` be a fixed-size array rather than a slice.
Same for `SyscallFlagsTable.enable`.
This avoids a few extra memory reads during syscall execution, at the expense
of holding two slightly-larger-than-necessary-in-practice lookup arrays in
memory. Syscall latency overhead should be improved very slightly. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/syscalls.go",
"new_path": "pkg/sentry/kernel/syscalls.go",
"diff": "@@ -25,12 +25,14 @@ import (\n\"gvisor.dev/gvisor/pkg/sync\"\n)\n+const (\n// maxSyscallNum is the highest supported syscall number.\n//\n// The types below create fast lookup slices for all syscalls. This maximum\n// serves as a sanity check that we don't allocate huge slices for a very large\n// syscall. This is checked during registration.\n-const maxSyscallNum = 2000\n+ maxSyscallNum = 2000\n+)\n// SyscallSupportLevel is a syscall support levels.\ntype SyscallSupportLevel int\n@@ -119,7 +121,7 @@ type SyscallFlagsTable struct {\n//\n// missing syscalls have the same value in enable as missingEnable to\n// avoid an extra branch in Word.\n- enable []uint32\n+ enable [maxSyscallNum + 1]uint32\n// missingEnable contains the enable bits for missing syscalls.\nmissingEnable uint32\n@@ -128,8 +130,7 @@ type SyscallFlagsTable struct {\n// Init initializes the struct, with all syscalls in table set to enable.\n//\n// max is the largest syscall number in table.\n-func (e *SyscallFlagsTable) init(table map[uintptr]Syscall, max uintptr) {\n- e.enable = make([]uint32, max+1)\n+func (e *SyscallFlagsTable) init(table map[uintptr]Syscall) {\nfor num := range table {\ne.enable[num] = syscallPresent\n}\n@@ -137,7 +138,7 @@ func (e *SyscallFlagsTable) init(table map[uintptr]Syscall, max uintptr) {\n// Word returns the enable bitfield for sysno.\nfunc (e *SyscallFlagsTable) Word(sysno uintptr) uint32 {\n- if sysno < uintptr(len(e.enable)) {\n+ if sysno <= maxSyscallNum {\nreturn atomic.LoadUint32(&e.enable[sysno])\n}\n@@ -239,7 +240,7 @@ type SyscallTable struct {\n// lookup is a fixed-size array that holds the syscalls (indexed by\n// their numbers). It is used for fast look ups.\n- lookup []SyscallFn\n+ lookup [maxSyscallNum + 1]SyscallFn\n// Emulate is a collection of instruction addresses to emulate. The\n// keys are addresses, and the values are system call numbers.\n@@ -319,24 +320,20 @@ func (s *SyscallTable) Init() {\ns.Emulate = make(map[hostarch.Addr]uintptr)\n}\n- max := s.MaxSysno() // Checked during RegisterSyscallTable.\n-\n// Initialize the fast-lookup table.\n- s.lookup = make([]SyscallFn, max+1)\nfor num, sc := range s.Table {\ns.lookup[num] = sc.Fn\n}\n// Initialize all features.\n- s.FeatureEnable.init(s.Table, max)\n+ s.FeatureEnable.init(s.Table)\n}\n// Lookup returns the syscall implementation, if one exists.\nfunc (s *SyscallTable) Lookup(sysno uintptr) SyscallFn {\n- if sysno < uintptr(len(s.lookup)) {\n+ if sysno <= maxSyscallNum {\nreturn s.lookup[sysno]\n}\n-\nreturn nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Make `SyscallTable.lookup` be a fixed-size array rather than a slice.
Same for `SyscallFlagsTable.enable`.
This avoids a few extra memory reads during syscall execution, at the expense
of holding two slightly-larger-than-necessary-in-practice lookup arrays in
memory. Syscall latency overhead should be improved very slightly.
PiperOrigin-RevId: 419898650 |
259,898 | 05.01.2022 16:44:54 | 28,800 | 66ad3a56574a393da544b5a9e97393d03525bc8b | Update PacketImpact README
Make the README accurate by not mentioning docker containers. | [
{
"change_type": "MODIFY",
"old_path": "test/packetimpact/README.md",
"new_path": "test/packetimpact/README.md",
"diff": "Packetimpact is a tool for platform-independent network testing. It is heavily\ninspired by [packetdrill](https://github.com/google/packetdrill). It creates two\n-docker containers connected by a network. One is for the test bench, which\n-operates the test. The other is for the device-under-test (DUT), which is the\n-software being tested. The test bench communicates over the network with the DUT\n-to check correctness of the network.\n+network namespaces. One is for the test bench, which operates the test. The\n+other is for the device-under-test (DUT), which is the software being tested.\n+The test bench communicates over the network with the DUT to check correctness\n+of the network.\n### Goals\n@@ -20,13 +20,6 @@ Packetimpact aims to provide:\n## How to run packetimpact tests?\n-Build the test container image by running the following at the root of the\n-repository:\n-\n-```bash\n-$ make load-packetimpact\n-```\n-\nRun a test, e.g. `fin_wait2_timeout`, against Linux:\n```bash\n@@ -118,19 +111,19 @@ design decisions below are made to mitigate that.\n+-------------------+ +------------------------+\n```\n-Two docker containers are created by a \"runner\" script, one for the testbench\n-and the other for the device under test (DUT). The script connects the two\n-containers with a control network and test network. It also does some other\n-tasks like waiting until the DUT is ready before starting the test and disabling\n-Linux networking that would interfere with the test bench.\n+Two network namespaces are created by the test runner, one for the testbench and\n+the other for the device under test (DUT). The runner connects the two\n+namespaces with a control veth pair and test veth pair. It also does some other\n+tasks like waiting until the DUT is ready before starting the test and\n+installing iptables rules so that RST won't be generated for TCP segments from\n+the DUT that the kernel has no knowledge about.\n### DUT\n-The DUT container runs a program called the \"posix_server\". The posix_server is\n-written in c++ for maximum portability. It is compiled on the host. The script\n-that starts the containers copies it into the DUT's container and runs it. It's\n-job is to receive directions from the test bench on what actions to take. For\n-this, the posix_server does three steps in a loop:\n+The DUT namespace runs a program called the \"posix_server\". The posix_server is\n+written in c++ for maximum portability. Its job is to receive directions from\n+the test bench on what actions to take. For this, the posix_server does three\n+steps in a loop:\n1. Listen for a request from the test bench.\n2. Execute a command.\n@@ -175,9 +168,9 @@ message SocketResponse {\n### Test Bench\nThe test bench does most of the work in a test. It is a Go program that compiles\n-on the host and is copied by the script into test bench's container. It is a\n-regular [go unit test](https://golang.org/pkg/testing/) that imports the test\n-bench framework. The test bench framework is based on three basic utilities:\n+on the host and is run inside the test bench's namespace. It is a regular\n+[go unit test](https://golang.org/pkg/testing/) that imports the test bench\n+framework. The test bench framework is based on three basic utilities:\n* Commanding the DUT to run POSIX commands and return responses.\n* Sending raw packets to the DUT on the test network.\n@@ -245,7 +238,8 @@ func (i *Injector) Send(b []byte) {...}\n* [gopacket](https://github.com/google/gopacket) pcap has raw socket support\nbut requires cgo. cgo is not guaranteed to be portable from the host to the\ncontainer and in practice, the container doesn't recognize binaries built on\n- the host if they use cgo.\n+ the host if they use cgo. Packetimpact used to be based on docker, so the\n+ library was not adopted, now we can start to consider using the library.\n* Both gVisor and gopacket have the ability to read and write pcap files\nwithout cgo but that is insufficient here because we can't just replay pcap\nfiles, we need a more dynamic solution.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update PacketImpact README
Make the README accurate by not mentioning docker containers.
PiperOrigin-RevId: 419940405 |
259,853 | 06.01.2022 10:33:27 | 28,800 | aa06a0cc20fda0a5cd6cbe122d29dc0756448af9 | vdso: remove empty note section
An empty note section looks weird and can confuse some tools. For example,
we've seen this in | [
{
"change_type": "MODIFY",
"old_path": "vdso/vdso_amd64.lds",
"new_path": "vdso/vdso_amd64.lds",
"diff": "@@ -43,8 +43,6 @@ SECTIONS {\n.gnu.version_d : { *(.gnu.version_d) }\n.gnu.version_r : { *(.gnu.version_r) }\n- .note : { *(.note.*) } :text :note\n-\n.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr\n.eh_frame : { KEEP (*(.eh_frame)) } :text\n@@ -77,7 +75,6 @@ SECTIONS {\nPHDRS {\ntext PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R | PF_X */\ndynamic PT_DYNAMIC FLAGS(4); /* PF_R */\n- note PT_NOTE FLAGS(4); /* PF_R */\neh_frame_hdr PT_GNU_EH_FRAME;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "vdso/vdso_arm64.lds",
"new_path": "vdso/vdso_arm64.lds",
"diff": "@@ -46,8 +46,6 @@ SECTIONS {\n.gnu.version_d : { *(.gnu.version_d) }\n.gnu.version_r : { *(.gnu.version_r) }\n- .note : { *(.note.*) } :text :note\n-\n.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr\n.eh_frame : { KEEP (*(.eh_frame)) } :text\n@@ -80,7 +78,6 @@ SECTIONS {\nPHDRS {\ntext PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R | PF_X */\ndynamic PT_DYNAMIC FLAGS(4); /* PF_R */\n- note PT_NOTE FLAGS(4); /* PF_R */\neh_frame_hdr PT_GNU_EH_FRAME;\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | vdso: remove empty note section
An empty note section looks weird and can confuse some tools. For example,
we've seen this in https://reviews.llvm.org/D116639.
PiperOrigin-RevId: 420093551 |
259,907 | 06.01.2022 14:51:24 | 28,800 | d7dbf65873e2e05723ea23fbfa414742342456e9 | Add Bind RPC to gvisor's 9P protocol and implement it in runsc/fsgofer.
This new RPC allows a client to be able to bind (and hence create) UDS on the
host filesystem. Following changes will add functionality to listen and accept
on such a bound UDS. | [
{
"change_type": "MODIFY",
"old_path": "pkg/p9/client_file.go",
"new_path": "pkg/p9/client_file.go",
"diff": "@@ -348,6 +348,37 @@ func (c *clientFile) Open(flags OpenFlags) (*fd.FD, QID, uint32, error) {\nreturn rlopen.File, rlopen.QID, rlopen.IoUnit, nil\n}\n+func (c *clientFile) Bind(sockType uint32, sockName string, uid UID, gid GID) (File, QID, AttrMask, Attr, error) {\n+ if atomic.LoadUint32(&c.closed) != 0 {\n+ return nil, QID{}, AttrMask{}, Attr{}, unix.EBADF\n+ }\n+\n+ if !versionSupportsBind(c.client.version) {\n+ return nil, QID{}, AttrMask{}, Attr{}, unix.EOPNOTSUPP\n+ }\n+\n+ fid, ok := c.client.fidPool.Get()\n+ if !ok {\n+ return nil, QID{}, AttrMask{}, Attr{}, ErrOutOfFIDs\n+ }\n+\n+ tbind := Tbind{\n+ SockType: sockType,\n+ SockName: sockName,\n+ UID: uid,\n+ GID: gid,\n+ Directory: c.fid,\n+ NewFID: FID(fid),\n+ }\n+ rbind := Rbind{}\n+ if err := c.client.sendRecv(&tbind, &rbind); err != nil {\n+ c.client.fidPool.Put(fid)\n+ return nil, QID{}, AttrMask{}, Attr{}, err\n+ }\n+\n+ return c.client.newFile(FID(fid)), rbind.QID, rbind.Valid, rbind.Attr, nil\n+}\n+\n// Connect implements File.Connect.\nfunc (c *clientFile) Connect(flags ConnectFlags) (*fd.FD, error) {\nif atomic.LoadUint32(&c.closed) != 0 {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/p9/file.go",
"new_path": "pkg/p9/file.go",
"diff": "@@ -294,6 +294,17 @@ type File interface {\n// On the server, Flush has a read concurrency guarantee.\nFlush() error\n+ // Bind binds to a host unix domain socket. If successful, it creates a\n+ // socket file on the host filesystem and returns a File for the newly\n+ // created socket file. The File implementation must save the bound socket\n+ // FD so that subsequent Listen and Accept operations on the File can be\n+ // served.\n+ //\n+ // Bind is an extension to 9P2000.L, see version.go.\n+ //\n+ // On the server, UnlinkAt has a write concurrency guarantee.\n+ Bind(sockType uint32, sockName string, uid UID, gid GID) (File, QID, AttrMask, Attr, error)\n+\n// Connect establishes a new host-socket backed connection with a\n// socket. A File does not need to be opened before it can be connected\n// and it can be connected to multiple times resulting in a unique\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/p9/handlers.go",
"new_path": "pkg/p9/handlers.go",
"diff": "@@ -1398,6 +1398,58 @@ func (t *Tumknod) handle(cs *connState) message {\nreturn &Rumknod{*rmknod}\n}\n+// handle implements handler.handle.\n+func (t *Tbind) handle(cs *connState) message {\n+ if err := checkSafeName(t.SockName); err != nil {\n+ return newErr(err)\n+ }\n+\n+ ref, ok := cs.LookupFID(t.Directory)\n+ if !ok {\n+ return newErr(unix.EBADF)\n+ }\n+ defer ref.DecRef()\n+\n+ var (\n+ sockRef *fidRef\n+ qid QID\n+ valid AttrMask\n+ attr Attr\n+ )\n+ if err := ref.safelyWrite(func() (err error) {\n+ // Don't allow creation from non-directories or deleted directories.\n+ if ref.isDeleted() || !ref.mode.IsDir() {\n+ return unix.EINVAL\n+ }\n+\n+ // Not allowed on open directories.\n+ if ref.opened {\n+ return unix.EINVAL\n+ }\n+\n+ var sockF File\n+ sockF, qid, valid, attr, err = ref.file.Bind(t.SockType, t.SockName, t.UID, t.GID)\n+ if err != nil {\n+ return err\n+ }\n+\n+ sockRef = &fidRef{\n+ server: cs.server,\n+ parent: ref,\n+ file: sockF,\n+ mode: ModeSocket,\n+ pathNode: ref.pathNode.pathNodeFor(t.SockName),\n+ }\n+ ref.pathNode.addChild(sockRef, t.SockName)\n+ ref.IncRef() // Acquire parent reference.\n+ return nil\n+ }); err != nil {\n+ return newErr(err)\n+ }\n+ cs.InsertFID(t.NewFID, sockRef)\n+ return &Rbind{QID: qid, Valid: valid, Attr: attr}\n+}\n+\n// handle implements handler.handle.\nfunc (t *Tlconnect) handle(cs *connState) message {\nref, ok := cs.LookupFID(t.FID)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/p9/messages.go",
"new_path": "pkg/p9/messages.go",
"diff": "@@ -2438,6 +2438,95 @@ func (r *Rusymlink) String() string {\nreturn fmt.Sprintf(\"Rusymlink{%v}\", &r.Rsymlink)\n}\n+// Tbind is a bind request.\n+type Tbind struct {\n+ // Directory is the directory inside which the bound socket file should be\n+ // created.\n+ Directory FID\n+\n+ // SockType is the type of socket to be used. This is passed as an argument\n+ // to socket(2).\n+ SockType uint32\n+\n+ // SockName is the name of the socket file to be created.\n+ SockName string\n+\n+ // UID is the owning user.\n+ UID UID\n+\n+ // GID is the owning group.\n+ GID GID\n+\n+ // NewFID is the resulting FID for the socket file.\n+ NewFID FID\n+}\n+\n+// decode implements encoder.decode.\n+func (t *Tbind) decode(b *buffer) {\n+ t.Directory = b.ReadFID()\n+ t.SockType = b.Read32()\n+ t.SockName = b.ReadString()\n+ t.UID = b.ReadUID()\n+ t.GID = b.ReadGID()\n+ t.NewFID = b.ReadFID()\n+}\n+\n+// encode implements encoder.encode.\n+func (t *Tbind) encode(b *buffer) {\n+ b.WriteFID(t.Directory)\n+ b.Write32(t.SockType)\n+ b.WriteString(t.SockName)\n+ b.WriteUID(t.UID)\n+ b.WriteGID(t.GID)\n+ b.WriteFID(t.NewFID)\n+}\n+\n+// Type implements message.Type.\n+func (*Tbind) Type() MsgType {\n+ return MsgTbind\n+}\n+\n+// String implements fmt.Stringer.\n+func (t *Tbind) String() string {\n+ return fmt.Sprintf(\"Tbind{Directory: %d, SockType: %d, SockName: %s, UID: %d, GID: %d, NewFID: %d}\", t.Directory, t.SockType, t.SockName, t.UID, t.GID, t.NewFID)\n+}\n+\n+// Rbind is a bind response.\n+type Rbind struct {\n+ // QID is the resulting QID of the created socket file.\n+ QID QID\n+\n+ // Valid indicates which fields are valid.\n+ Valid AttrMask\n+\n+ // Attr is the set of attributes of the created socket file.\n+ Attr Attr\n+}\n+\n+// decode implements encoder.decode.\n+func (r *Rbind) decode(b *buffer) {\n+ r.QID.decode(b)\n+ r.Valid.decode(b)\n+ r.Attr.decode(b)\n+}\n+\n+// encode implements encoder.encode.\n+func (r *Rbind) encode(b *buffer) {\n+ r.QID.encode(b)\n+ r.Valid.encode(b)\n+ r.Attr.encode(b)\n+}\n+\n+// Type implements message.Type.\n+func (*Rbind) Type() MsgType {\n+ return MsgRbind\n+}\n+\n+// String implements fmt.Stringer.\n+func (r *Rbind) String() string {\n+ return fmt.Sprintf(\"Rbind{QID: %s, Valid: %v, Attr: %s}\", r.QID, r.Valid, r.Attr)\n+}\n+\n// Tlconnect is a connect request.\ntype Tlconnect struct {\n// FID is the FID to be connected.\n@@ -2785,6 +2874,8 @@ func init() {\nmsgRegistry.register(MsgRumknod, func() message { return &Rumknod{} })\nmsgRegistry.register(MsgTusymlink, func() message { return &Tusymlink{} })\nmsgRegistry.register(MsgRusymlink, func() message { return &Rusymlink{} })\n+ msgRegistry.register(MsgTbind, func() message { return &Tbind{} })\n+ msgRegistry.register(MsgRbind, func() message { return &Rbind{} })\nmsgRegistry.register(MsgTlconnect, func() message { return &Tlconnect{} })\nmsgRegistry.register(MsgRlconnect, func() message { return &Rlconnect{} })\nmsgRegistry.register(MsgTallocate, func() message { return &Tallocate{} })\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/p9/messages_test.go",
"new_path": "pkg/p9/messages_test.go",
"diff": "@@ -118,6 +118,17 @@ func TestEncodeDecode(t *testing.T) {\nQID: QID{Type: 1},\nIoUnit: 2,\n},\n+ &Tbind{\n+ Directory: 1,\n+ SockType: 2,\n+ SockName: \"name\",\n+ GID: 3,\n+ UID: 4,\n+ NewFID: 5,\n+ },\n+ &Rbind{\n+ QID: QID{Type: 1},\n+ },\n&Tlconnect{\nFID: 1,\n},\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/p9/p9.go",
"new_path": "pkg/p9/p9.go",
"diff": "@@ -404,6 +404,8 @@ const (\nMsgRsetattrclunk MsgType = 141\nMsgTmultigetattr MsgType = 142\nMsgRmultigetattr MsgType = 143\n+ MsgTbind MsgType = 144\n+ MsgRbind MsgType = 145\nMsgTchannel MsgType = 250\nMsgRchannel MsgType = 251\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/p9/version.go",
"new_path": "pkg/p9/version.go",
"diff": "@@ -185,3 +185,9 @@ func versionSupportsTsetattrclunk(v uint32) bool {\nfunc versionSupportsTmultiGetAttr(v uint32) bool {\nreturn v >= 13\n}\n+\n+// versionSupportsBind returns true if version v supports the Tbind message.\n+func versionSupportsBind(v uint32) bool {\n+ // TODO(b/194709873): Bump version and gate with that.\n+ return false\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/config/config.go",
"new_path": "runsc/config/config.go",
"diff": "@@ -75,7 +75,8 @@ type Config struct {\n// Verity is whether there's one or more verity file system to mount.\nVerity bool `flag:\"verity\"`\n- // FSGoferHostUDS enables the gofer to mount a host UDS.\n+ // FSGoferHostUDS enables the gofer to mount a host UDS and connect to it or\n+ // bind (create) a host UDS and serve it.\nFSGoferHostUDS bool `flag:\"fsgofer-host-uds\"`\n// Network indicates what type of network to use.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/fsgofer.go",
"new_path": "runsc/fsgofer/fsgofer.go",
"diff": "@@ -47,6 +47,9 @@ const (\nopenFlags = unix.O_NOFOLLOW | unix.O_CLOEXEC\nallowedOpenFlags = unix.O_TRUNC\n+\n+ // UNIX_PATH_MAX as defined in include/uapi/linux/un.h.\n+ unixPathMax = 108\n)\n// verityXattrs are the extended attributes used by verity file system.\n@@ -70,7 +73,8 @@ type Config struct {\n// PanicOnWrite panics on attempts to write to RO mounts.\nPanicOnWrite bool\n- // HostUDS signals whether the gofer can mount a host's UDS.\n+ // HostUDS signals whether the gofer can mount a host's UDS and connect to it\n+ // or bind (create) a host UDS and serve it.\nHostUDS bool\n// EnableVerityXattr allows access to extended attributes used by the\n@@ -1129,6 +1133,80 @@ func (l *localFile) Flush() error {\nreturn nil\n}\n+// Bind implements p9.File.Bind.\n+func (l *localFile) Bind(sockType uint32, sockName string, uid p9.UID, gid p9.GID) (p9.File, p9.QID, p9.AttrMask, p9.Attr, error) {\n+ if !l.attachPoint.conf.HostUDS {\n+ // Bind on host UDS is not allowed. As per mknod(2), which is invoked as\n+ // part of bind(2), if \"the filesystem containing pathname does not support\n+ // the type of node requested.\" then EPERM must be returned.\n+ return nil, p9.QID{}, p9.AttrMask{}, p9.Attr{}, unix.EPERM\n+ }\n+\n+ // TODO(gvisor.dev/issue/1003): Due to different app vs replacement\n+ // mappings, the app path may have fit in the sockaddr, but we can't\n+ // fit f.path in our sockaddr. We'd need to redirect through a shorter\n+ // path in order to actually connect to this socket.\n+ sockPath := path.Join(l.hostPath, sockName)\n+ if len(sockPath) >= unixPathMax {\n+ return nil, p9.QID{}, p9.AttrMask{}, p9.Attr{}, unix.EINVAL\n+ }\n+\n+ // Create socket only for supported types.\n+ switch sockType {\n+ case unix.SOCK_STREAM, unix.SOCK_DGRAM, unix.SOCK_SEQPACKET:\n+ default:\n+ return nil, p9.QID{}, p9.AttrMask{}, p9.Attr{}, unix.ENXIO\n+ }\n+ sock, err := unix.Socket(unix.AF_UNIX, int(sockType), 0)\n+ if err != nil {\n+ return nil, p9.QID{}, p9.AttrMask{}, p9.Attr{}, extractErrno(err)\n+ }\n+\n+ // Revert operations on error paths.\n+ didBind := false\n+ cu := cleanup.Make(func() {\n+ _ = unix.Close(sock)\n+ if didBind {\n+ if err := unix.Unlinkat(l.file.FD(), sockName, 0); err != nil {\n+ log.Warningf(\"error unlinking file %q after failure: %v\", sockPath, err)\n+ }\n+ }\n+ })\n+ defer cu.Clean()\n+\n+ // socket FD must be non blocking because RPC operations like Accept on this\n+ // socket must be non blocking.\n+ if err := unix.SetNonblock(sock, true); err != nil {\n+ return nil, p9.QID{}, p9.AttrMask{}, p9.Attr{}, extractErrno(err)\n+ }\n+\n+ // Bind at the given path which should create the socket file.\n+ if err := unix.Bind(sock, &unix.SockaddrUnix{Name: sockPath}); err != nil {\n+ return nil, p9.QID{}, p9.AttrMask{}, p9.Attr{}, extractErrno(err)\n+ }\n+ didBind = true\n+\n+ // Open socket to change ownership.\n+ tempSockFD, err := fd.OpenAt(l.file, sockName, unix.O_PATH|openFlags, 0)\n+ if err != nil {\n+ return nil, p9.QID{}, p9.AttrMask{}, p9.Attr{}, extractErrno(err)\n+ }\n+ defer tempSockFD.Close()\n+\n+ if _, err = setOwnerIfNeeded(tempSockFD.FD(), uid, gid); err != nil {\n+ return nil, p9.QID{}, p9.AttrMask{}, p9.Attr{}, extractErrno(err)\n+ }\n+\n+ // Generate file for this socket by walking on it.\n+ qid, sockF, valid, attr, err := l.WalkGetAttr([]string{sockName})\n+ if err != nil {\n+ return nil, p9.QID{}, p9.AttrMask{}, p9.Attr{}, err\n+ }\n+\n+ cu.Release()\n+ return &socketLocalFile{localFile: sockF.(*localFile), sock: sock}, qid[0], valid, attr, nil\n+}\n+\n// Connect implements p9.File.\nfunc (l *localFile) Connect(flags p9.ConnectFlags) (*fd.FD, error) {\nif !l.attachPoint.conf.HostUDS {\n@@ -1139,8 +1217,7 @@ func (l *localFile) Connect(flags p9.ConnectFlags) (*fd.FD, error) {\n// mappings, the app path may have fit in the sockaddr, but we can't\n// fit f.path in our sockaddr. We'd need to redirect through a shorter\n// path in order to actually connect to this socket.\n- const UNIX_PATH_MAX = 108 // defined in afunix.h\n- if len(l.hostPath) > UNIX_PATH_MAX {\n+ if len(l.hostPath) >= unixPathMax {\nreturn nil, unix.ECONNREFUSED\n}\n@@ -1175,7 +1252,7 @@ func (l *localFile) Connect(flags p9.ConnectFlags) (*fd.FD, error) {\nreturn fd.New(f), nil\n}\n-// Close implements p9.File.\n+// Close implements p9.File.Close.\nfunc (l *localFile) Close() error {\nl.mode = invalidMode\nerr := l.file.Close()\n@@ -1290,3 +1367,22 @@ func (l *localFile) MultiGetAttr(names []string) ([]p9.FullStat, error) {\n}\nreturn stats, nil\n}\n+\n+// socketLocalFile is an extension of localFile which is only created via Bind\n+// and additionally implements Listen and Accept. It also tracks the lifecycle\n+// of the socket FD created by socket(2) in addition to the FD opened on the\n+// socket file itself.\n+type socketLocalFile struct {\n+ *localFile\n+ sock int\n+}\n+\n+// Close implements p9.File.Close.\n+func (l *socketLocalFile) Close() error {\n+ err := l.localFile.Close()\n+ err2 := unix.Close(l.sock)\n+ if err != nil {\n+ return err\n+ }\n+ return err2\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/fsgofer_test.go",
"new_path": "runsc/fsgofer/fsgofer_test.go",
"diff": "@@ -796,6 +796,47 @@ func TestReaddir(t *testing.T) {\n})\n}\n+func TestUDS(t *testing.T) {\n+ config := Config{ROMount: false, HostUDS: true}\n+ dir, err := ioutil.TempDir(\"\", \"root-\")\n+ if err != nil {\n+ t.Fatalf(\"ioutil.TempDir() failed, err: %v\", err)\n+ }\n+ defer os.RemoveAll(dir)\n+\n+ // First attach with writable configuration to setup tree.\n+ a, err := NewAttachPoint(dir, config)\n+ if err != nil {\n+ t.Fatalf(\"NewAttachPoint failed: %v\", err)\n+ }\n+ root, err := a.Attach()\n+ if err != nil {\n+ t.Fatalf(\"attach failed, err: %v\", err)\n+ }\n+ defer root.Close()\n+\n+ name := \"sock\"\n+ uid := p9.UID(os.Getuid())\n+ gid := p9.GID(os.Getgid())\n+ sockF, _, valid, attr, err := root.Bind(unix.SOCK_STREAM, name, uid, gid)\n+ if err != nil {\n+ t.Fatalf(\"Bind failed: %v\", err)\n+ }\n+ defer sockF.Close()\n+\n+ if valid.Mode && !attr.Mode.IsSocket() {\n+ t.Errorf(\"socket file mode is incorrect: want %d, got %d\", p9.ModeSocket, attr.Mode)\n+ }\n+ if valid.UID && attr.UID != uid {\n+ t.Errorf(\"socket file uid is incorrect: want %d, got %d\", uid, attr.UID)\n+ }\n+ if valid.GID && attr.GID != gid {\n+ t.Errorf(\"socket file gid is incorrect: want %d, got %d\", gid, attr.GID)\n+ }\n+ // TODO(b/194709873): Once listen and accept are implemented, test connecting\n+ // and accepting a connection using sockF.\n+}\n+\n// Test that attach point can be written to when it points to a file, e.g.\n// /etc/hosts.\nfunc TestAttachFile(t *testing.T) {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/lisafs.go",
"new_path": "runsc/fsgofer/lisafs.go",
"diff": "@@ -655,7 +655,7 @@ func (fd *controlFDLisa) Connect(c *lisafs.Connection, sockType uint32) (int, er\n// hostPath in our sockaddr. We'd need to redirect through a shorter path\n// in order to actually connect to this socket.\nhostPath := fd.FilePathLocked()\n- if len(hostPath) > 108 { // UNIX_PATH_MAX = 108 is defined in afunix.h.\n+ if len(hostPath) >= unixPathMax {\nreturn -1, unix.ECONNREFUSED\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add Bind RPC to gvisor's 9P protocol and implement it in runsc/fsgofer.
This new RPC allows a client to be able to bind (and hence create) UDS on the
host filesystem. Following changes will add functionality to listen and accept
on such a bound UDS.
PiperOrigin-RevId: 420149313 |
260,004 | 06.01.2022 17:08:48 | 28,800 | 381a17d92329370c8807ba14962746debb06ae83 | Support REJECT hook | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/header/icmpv4.go",
"new_path": "pkg/tcpip/header/icmpv4.go",
"diff": "@@ -102,13 +102,17 @@ const (\nICMPv4ReassemblyTimeout ICMPv4Code = 1\n)\n-// ICMP codes for ICMPv4 Destination Unreachable messages as defined in RFC 792.\n+// ICMP codes for ICMPv4 Destination Unreachable messages as defined in RFC 792,\n+// RFC 1122 section 3.2.2.1 and RFC 1812 section 5.2.7.1.\nconst (\nICMPv4NetUnreachable ICMPv4Code = 0\nICMPv4HostUnreachable ICMPv4Code = 1\nICMPv4ProtoUnreachable ICMPv4Code = 2\nICMPv4PortUnreachable ICMPv4Code = 3\nICMPv4FragmentationNeeded ICMPv4Code = 4\n+ ICMPv4NetProhibited ICMPv4Code = 9\n+ ICMPv4HostProhibited ICMPv4Code = 10\n+ ICMPv4AdminProhibited ICMPv4Code = 13\n)\n// ICMPv4UnusedCode is a code to use in ICMP messages where no code is needed.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/icmp.go",
"new_path": "pkg/tcpip/network/ipv4/icmp.go",
"diff": "@@ -410,6 +410,24 @@ type icmpReason interface {\nisICMPReason()\n}\n+// icmpReasonNetworkProhibited is an error where the destination network is\n+// prohibited.\n+type icmpReasonNetworkProhibited struct{}\n+\n+func (*icmpReasonNetworkProhibited) isICMPReason() {}\n+\n+// icmpReasonHostProhibited is an error where the destination host is\n+// prohibited.\n+type icmpReasonHostProhibited struct{}\n+\n+func (*icmpReasonHostProhibited) isICMPReason() {}\n+\n+// icmpReasonAdministrativelyProhibited is an error where the destination is\n+// administratively prohibited.\n+type icmpReasonAdministrativelyProhibited struct{}\n+\n+func (*icmpReasonAdministrativelyProhibited) isICMPReason() {}\n+\n// icmpReasonPortUnreachable is an error where the transport protocol has no\n// listener and no alternative means to inform the sender.\ntype icmpReasonPortUnreachable struct{}\n@@ -560,6 +578,12 @@ func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer, deliv\nsent := netEP.stats.icmp.packetsSent\nicmpType, icmpCode, counter, pointer := func() (header.ICMPv4Type, header.ICMPv4Code, tcpip.MultiCounterStat, byte) {\nswitch reason := reason.(type) {\n+ case *icmpReasonNetworkProhibited:\n+ return header.ICMPv4DstUnreachable, header.ICMPv4NetProhibited, sent.dstUnreachable, 0\n+ case *icmpReasonHostProhibited:\n+ return header.ICMPv4DstUnreachable, header.ICMPv4HostProhibited, sent.dstUnreachable, 0\n+ case *icmpReasonAdministrativelyProhibited:\n+ return header.ICMPv4DstUnreachable, header.ICMPv4AdminProhibited, sent.dstUnreachable, 0\ncase *icmpReasonPortUnreachable:\nreturn header.ICMPv4DstUnreachable, header.ICMPv4PortUnreachable, sent.dstUnreachable, 0\ncase *icmpReasonProtoUnreachable:\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4.go",
"diff": "@@ -1129,6 +1129,7 @@ func (e *endpoint) Stats() stack.NetworkEndpointStats {\n}\nvar _ stack.NetworkProtocol = (*protocol)(nil)\n+var _ stack.RejectIPv4WithHandler = (*protocol)(nil)\nvar _ fragmentation.TimeoutHandler = (*protocol)(nil)\ntype protocol struct {\n@@ -1285,6 +1286,26 @@ func (p *protocol) allowICMPReply(icmpType header.ICMPv4Type, code header.ICMPv4\nreturn true\n}\n+// SendRejectionError implements stack.RejectIPv4WithHandler.\n+func (p *protocol) SendRejectionError(pkt *stack.PacketBuffer, rejectWith stack.RejectIPv4WithICMPType, inputHook bool) tcpip.Error {\n+ switch rejectWith {\n+ case stack.RejectIPv4WithICMPNetUnreachable:\n+ return p.returnError(&icmpReasonNetworkUnreachable{}, pkt, inputHook)\n+ case stack.RejectIPv4WithICMPHostUnreachable:\n+ return p.returnError(&icmpReasonHostUnreachable{}, pkt, inputHook)\n+ case stack.RejectIPv4WithICMPPortUnreachable:\n+ return p.returnError(&icmpReasonPortUnreachable{}, pkt, inputHook)\n+ case stack.RejectIPv4WithICMPNetProhibited:\n+ return p.returnError(&icmpReasonNetworkProhibited{}, pkt, inputHook)\n+ case stack.RejectIPv4WithICMPHostProhibited:\n+ return p.returnError(&icmpReasonHostProhibited{}, pkt, inputHook)\n+ case stack.RejectIPv4WithICMPAdminProhibited:\n+ return p.returnError(&icmpReasonAdministrativelyProhibited{}, pkt, inputHook)\n+ default:\n+ panic(fmt.Sprintf(\"unhandled %[1]T = %[1]d\", rejectWith))\n+ }\n+}\n+\n// calculateNetworkMTU calculates the network-layer payload MTU based on the\n// link-layer payload mtu.\nfunc calculateNetworkMTU(linkMTU, networkHeaderSize uint32) (uint32, tcpip.Error) {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/icmp.go",
"new_path": "pkg/tcpip/network/ipv6/icmp.go",
"diff": "@@ -678,7 +678,7 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r\n})\ndefer replyPkt.DecRef()\nicmp := header.ICMPv6(replyPkt.TransportHeader().Push(header.ICMPv6EchoMinimumSize))\n- pkt.TransportProtocolNumber = header.ICMPv6ProtocolNumber\n+ replyPkt.TransportProtocolNumber = header.ICMPv6ProtocolNumber\ncopy(icmp, h)\nicmp.SetType(header.ICMPv6EchoReply)\ndataRange := replyPkt.Data().AsRange()\n@@ -964,6 +964,16 @@ func (p *icmpReasonParameterProblem) respondsToMulticast() bool {\nreturn p.respondToMulticast\n}\n+// icmpReasonAdministrativelyProhibited is an error where the destination is\n+// administratively prohibited.\n+type icmpReasonAdministrativelyProhibited struct{}\n+\n+func (*icmpReasonAdministrativelyProhibited) isICMPReason() {}\n+\n+func (*icmpReasonAdministrativelyProhibited) respondsToMulticast() bool {\n+ return false\n+}\n+\n// icmpReasonPortUnreachable is an error where the transport protocol has no\n// listener and no alternative means to inform the sender.\ntype icmpReasonPortUnreachable struct{}\n@@ -1104,6 +1114,8 @@ func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer, deliv\nswitch reason := reason.(type) {\ncase *icmpReasonParameterProblem:\nreturn header.ICMPv6ParamProblem, reason.code, sent.paramProblem, reason.pointer\n+ case *icmpReasonAdministrativelyProhibited:\n+ return header.ICMPv6DstUnreachable, header.ICMPv6Prohibited, sent.dstUnreachable, 0\ncase *icmpReasonPortUnreachable:\nreturn header.ICMPv6DstUnreachable, header.ICMPv6PortUnreachable, sent.dstUnreachable, 0\ncase *icmpReasonNetUnreachable:\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/ipv6.go",
"new_path": "pkg/tcpip/network/ipv6/ipv6.go",
"diff": "@@ -1899,6 +1899,7 @@ func (e *endpoint) Stats() stack.NetworkEndpointStats {\n}\nvar _ stack.NetworkProtocol = (*protocol)(nil)\n+var _ stack.RejectIPv6WithHandler = (*protocol)(nil)\nvar _ fragmentation.TimeoutHandler = (*protocol)(nil)\ntype protocol struct {\n@@ -2124,6 +2125,22 @@ func (p *protocol) allowICMPReply(icmpType header.ICMPv6Type) bool {\nreturn true\n}\n+// SendRejectionError implements stack.RejectIPv6WithHandler.\n+func (p *protocol) SendRejectionError(pkt *stack.PacketBuffer, rejectWith stack.RejectIPv6WithICMPType, inputHook bool) tcpip.Error {\n+ switch rejectWith {\n+ case stack.RejectIPv6WithICMPNoRoute:\n+ return p.returnError(&icmpReasonNetUnreachable{}, pkt, inputHook)\n+ case stack.RejectIPv6WithICMPAddrUnreachable:\n+ return p.returnError(&icmpReasonHostUnreachable{}, pkt, inputHook)\n+ case stack.RejectIPv6WithICMPPortUnreachable:\n+ return p.returnError(&icmpReasonPortUnreachable{}, pkt, inputHook)\n+ case stack.RejectIPv6WithICMPAdminProhibited:\n+ return p.returnError(&icmpReasonAdministrativelyProhibited{}, pkt, inputHook)\n+ default:\n+ panic(fmt.Sprintf(\"unhandled %[1]T = %[1]d\", rejectWith))\n+ }\n+}\n+\n// calculateNetworkMTU calculates the network-layer payload MTU based on the\n// link-layer payload MTU and the length of every IPv6 header.\n// Note that this is different than the Payload Length field of the IPv6 header,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/iptables_targets.go",
"new_path": "pkg/tcpip/stack/iptables_targets.go",
"diff": "@@ -45,6 +45,88 @@ func (*DropTarget) Action(*PacketBuffer, Hook, *Route, AddressableEndpoint) (Rul\nreturn RuleDrop, 0\n}\n+// RejectIPv4WithHandler handles rejecting a packet.\n+type RejectIPv4WithHandler interface {\n+ // SendRejectionError sends an error packet in response to the packet.\n+ SendRejectionError(pkt *PacketBuffer, rejectWith RejectIPv4WithICMPType, inputHook bool) tcpip.Error\n+}\n+\n+// RejectIPv4WithICMPType indicates the type of ICMP error that should be sent.\n+type RejectIPv4WithICMPType int\n+\n+// The types of errors that may be returned when rejecting IPv4 packets.\n+const (\n+ _ RejectIPv4WithICMPType = iota\n+ RejectIPv4WithICMPNetUnreachable\n+ RejectIPv4WithICMPHostUnreachable\n+ RejectIPv4WithICMPPortUnreachable\n+ RejectIPv4WithICMPNetProhibited\n+ RejectIPv4WithICMPHostProhibited\n+ RejectIPv4WithICMPAdminProhibited\n+)\n+\n+// RejectIPv4Target drops packets and sends back an error packet in response to the\n+// matched packet.\n+type RejectIPv4Target struct {\n+ Handler RejectIPv4WithHandler\n+ RejectWith RejectIPv4WithICMPType\n+}\n+\n+// Action implements Target.Action.\n+func (rt *RejectIPv4Target) Action(pkt *PacketBuffer, hook Hook, _ *Route, _ AddressableEndpoint) (RuleVerdict, int) {\n+ switch hook {\n+ case Input, Forward, Output:\n+ // There is nothing reasonable for us to do in response to an error here;\n+ // we already drop the packet.\n+ _ = rt.Handler.SendRejectionError(pkt, rt.RejectWith, hook == Input)\n+ return RuleDrop, 0\n+ case Prerouting, Postrouting:\n+ panic(fmt.Sprintf(\"%s hook not supported for REDIRECT\", hook))\n+ default:\n+ panic(fmt.Sprintf(\"unhandled hook = %s\", hook))\n+ }\n+}\n+\n+// RejectIPv6WithHandler handles rejecting a packet.\n+type RejectIPv6WithHandler interface {\n+ // SendRejectionError sends an error packet in response to the packet.\n+ SendRejectionError(pkt *PacketBuffer, rejectWith RejectIPv6WithICMPType, forwardingHook bool) tcpip.Error\n+}\n+\n+// RejectIPv6WithICMPType indicates the type of ICMP error that should be sent.\n+type RejectIPv6WithICMPType int\n+\n+// The types of errors that may be returned when rejecting IPv6 packets.\n+const (\n+ _ RejectIPv6WithICMPType = iota\n+ RejectIPv6WithICMPNoRoute\n+ RejectIPv6WithICMPAddrUnreachable\n+ RejectIPv6WithICMPPortUnreachable\n+ RejectIPv6WithICMPAdminProhibited\n+)\n+\n+// RejectIPv6Target drops packets and sends back an error packet in response to the\n+// matched packet.\n+type RejectIPv6Target struct {\n+ Handler RejectIPv6WithHandler\n+ RejectWith RejectIPv6WithICMPType\n+}\n+\n+// Action implements Target.Action.\n+func (rt *RejectIPv6Target) Action(pkt *PacketBuffer, hook Hook, _ *Route, _ AddressableEndpoint) (RuleVerdict, int) {\n+ switch hook {\n+ case Input, Forward, Output:\n+ // There is nothing reasonable for us to do in response to an error here;\n+ // we already drop the packet.\n+ _ = rt.Handler.SendRejectionError(pkt, rt.RejectWith, hook == Input)\n+ return RuleDrop, 0\n+ case Prerouting, Postrouting:\n+ panic(fmt.Sprintf(\"%s hook not supported for REDIRECT\", hook))\n+ default:\n+ panic(fmt.Sprintf(\"unhandled hook = %s\", hook))\n+ }\n+}\n+\n// ErrorTarget logs an error and drops the packet. It represents a target that\n// should be unreachable.\ntype ErrorTarget struct {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tests/integration/iptables_test.go",
"new_path": "pkg/tcpip/tests/integration/iptables_test.go",
"diff": "@@ -3037,3 +3037,295 @@ func TestLocallyRoutedPackets(t *testing.T) {\n})\n}\n}\n+\n+type icmpv4Matcher struct {\n+ icmpType header.ICMPv4Type\n+}\n+\n+func (m *icmpv4Matcher) Match(_ stack.Hook, pkt *stack.PacketBuffer, _, _ string) (matches bool, hotdrop bool) {\n+ if pkt.NetworkProtocolNumber != header.IPv4ProtocolNumber {\n+ return false, false\n+ }\n+\n+ if pkt.TransportProtocolNumber != header.ICMPv4ProtocolNumber {\n+ return false, false\n+ }\n+\n+ return header.ICMPv4(pkt.TransportHeader().View()).Type() == m.icmpType, false\n+}\n+\n+type icmpv6Matcher struct {\n+ icmpType header.ICMPv6Type\n+}\n+\n+func (m *icmpv6Matcher) Match(_ stack.Hook, pkt *stack.PacketBuffer, _, _ string) (matches bool, hotdrop bool) {\n+ if pkt.NetworkProtocolNumber != header.IPv6ProtocolNumber {\n+ return false, false\n+ }\n+\n+ if pkt.TransportProtocolNumber != header.ICMPv6ProtocolNumber {\n+ return false, false\n+ }\n+\n+ return header.ICMPv6(pkt.TransportHeader().View()).Type() == m.icmpType, false\n+}\n+\n+func TestRejectWith(t *testing.T) {\n+ type natHook struct {\n+ hook stack.Hook\n+ dstAddr tcpip.Address\n+ matcher stack.Matcher\n+\n+ errorICMPDstAddr tcpip.Address\n+ errorICMPPayload buffer.View\n+ }\n+\n+ type rejectWithVal struct {\n+ name string\n+ val int\n+ errorICMPCode uint8\n+ }\n+\n+ rxICMPv4EchoRequest := func(dst tcpip.Address) buffer.View {\n+ return utils.ICMPv4Echo(utils.Host1IPv4Addr.AddressWithPrefix.Address, dst, ttl, header.ICMPv4Echo)\n+ }\n+\n+ rxICMPv6EchoRequest := func(dst tcpip.Address) buffer.View {\n+ return utils.ICMPv6Echo(utils.Host1IPv6Addr.AddressWithPrefix.Address, dst, ttl, header.ICMPv6EchoRequest)\n+ }\n+\n+ tests := []struct {\n+ name string\n+ netProto tcpip.NetworkProtocolNumber\n+ rxICMPEchoRequest func(tcpip.Address) buffer.View\n+ icmpChecker func(*testing.T, buffer.View, tcpip.Address, uint8, uint8, buffer.View)\n+\n+ natHooks []natHook\n+\n+ rejectTarget func(*testing.T, stack.NetworkProtocol, int) stack.Target\n+ rejectWithVals []rejectWithVal\n+ errorICMPType uint8\n+ }{\n+ {\n+ name: \"IPv4\",\n+ netProto: header.IPv4ProtocolNumber,\n+ rxICMPEchoRequest: rxICMPv4EchoRequest,\n+\n+ icmpChecker: func(t *testing.T, v buffer.View, dstAddr tcpip.Address, icmpType, icmpCode uint8, origPayload buffer.View) {\n+ t.Helper()\n+\n+ checker.IPv4(t, v,\n+ checker.SrcAddr(utils.RouterNIC1IPv4Addr.AddressWithPrefix.Address),\n+ checker.DstAddr(dstAddr),\n+ checker.ICMPv4(\n+ checker.ICMPv4Checksum(),\n+ checker.ICMPv4Type(header.ICMPv4Type(icmpType)),\n+ checker.ICMPv4Code(header.ICMPv4Code(icmpCode)),\n+ checker.ICMPv4Payload(origPayload),\n+ ),\n+ )\n+ },\n+ natHooks: []natHook{\n+ {\n+ hook: stack.Input,\n+ dstAddr: utils.RouterNIC1IPv4Addr.AddressWithPrefix.Address,\n+ matcher: &icmpv4Matcher{icmpType: header.ICMPv4Echo},\n+ errorICMPDstAddr: utils.Host1IPv4Addr.AddressWithPrefix.Address,\n+ errorICMPPayload: rxICMPv4EchoRequest(utils.RouterNIC1IPv4Addr.AddressWithPrefix.Address),\n+ },\n+ {\n+ hook: stack.Forward,\n+ dstAddr: utils.Host2IPv4Addr.AddressWithPrefix.Address,\n+ matcher: &icmpv4Matcher{icmpType: header.ICMPv4Echo},\n+ errorICMPDstAddr: utils.Host1IPv4Addr.AddressWithPrefix.Address,\n+ errorICMPPayload: rxICMPv4EchoRequest(utils.Host2IPv4Addr.AddressWithPrefix.Address),\n+ },\n+ {\n+ hook: stack.Output,\n+ dstAddr: utils.RouterNIC1IPv4Addr.AddressWithPrefix.Address,\n+ matcher: &icmpv4Matcher{icmpType: header.ICMPv4EchoReply},\n+ errorICMPDstAddr: utils.RouterNIC1IPv4Addr.AddressWithPrefix.Address,\n+ errorICMPPayload: utils.ICMPv4Echo(utils.RouterNIC1IPv4Addr.AddressWithPrefix.Address, utils.Host1IPv4Addr.AddressWithPrefix.Address, ttl, header.ICMPv4EchoReply),\n+ },\n+ },\n+ rejectTarget: func(t *testing.T, netProto stack.NetworkProtocol, rejectWith int) stack.Target {\n+ handler, ok := netProto.(stack.RejectIPv4WithHandler)\n+ if !ok {\n+ t.Fatalf(\"expected %T to implement %T\", netProto, handler)\n+ }\n+\n+ return &stack.RejectIPv4Target{\n+ Handler: handler,\n+ RejectWith: stack.RejectIPv4WithICMPType(rejectWith),\n+ }\n+ },\n+ rejectWithVals: []rejectWithVal{\n+ {\n+ name: \"ICMP Network Unreachable\",\n+ val: int(stack.RejectIPv4WithICMPNetUnreachable),\n+ errorICMPCode: uint8(header.ICMPv4NetUnreachable),\n+ },\n+ {\n+ name: \"ICMP Host Unreachable\",\n+ val: int(stack.RejectIPv4WithICMPHostUnreachable),\n+ errorICMPCode: uint8(header.ICMPv4HostUnreachable),\n+ },\n+ {\n+ name: \"ICMP Port Unreachable\",\n+ val: int(stack.RejectIPv4WithICMPPortUnreachable),\n+ errorICMPCode: uint8(header.ICMPv4PortUnreachable),\n+ },\n+ {\n+ name: \"ICMP Network Prohibited\",\n+ val: int(stack.RejectIPv4WithICMPNetProhibited),\n+ errorICMPCode: uint8(header.ICMPv4NetProhibited),\n+ },\n+ {\n+ name: \"ICMP Host Prohibited\",\n+ val: int(stack.RejectIPv4WithICMPHostProhibited),\n+ errorICMPCode: uint8(header.ICMPv4HostProhibited),\n+ },\n+ {\n+ name: \"ICMP Administratively Prohibited\",\n+ val: int(stack.RejectIPv4WithICMPAdminProhibited),\n+ errorICMPCode: uint8(header.ICMPv4AdminProhibited),\n+ },\n+ },\n+ errorICMPType: uint8(header.ICMPv4DstUnreachable),\n+ },\n+ {\n+ name: \"IPv6\",\n+ netProto: header.IPv6ProtocolNumber,\n+ rxICMPEchoRequest: rxICMPv6EchoRequest,\n+\n+ icmpChecker: func(t *testing.T, v buffer.View, dstAddr tcpip.Address, icmpType, icmpCode uint8, origPayload buffer.View) {\n+ t.Helper()\n+\n+ checker.IPv6(t, v,\n+ checker.SrcAddr(utils.RouterNIC1IPv6Addr.AddressWithPrefix.Address),\n+ checker.DstAddr(dstAddr),\n+ checker.ICMPv6(\n+ checker.ICMPv6Type(header.ICMPv6Type(icmpType)),\n+ checker.ICMPv6Code(header.ICMPv6Code(icmpCode)),\n+ checker.ICMPv6Payload(origPayload),\n+ ),\n+ )\n+ },\n+ natHooks: []natHook{\n+ {\n+ hook: stack.Input,\n+ dstAddr: utils.RouterNIC1IPv6Addr.AddressWithPrefix.Address,\n+ matcher: &icmpv6Matcher{icmpType: header.ICMPv6EchoRequest},\n+ errorICMPDstAddr: utils.Host1IPv6Addr.AddressWithPrefix.Address,\n+ errorICMPPayload: rxICMPv6EchoRequest(utils.RouterNIC1IPv6Addr.AddressWithPrefix.Address),\n+ },\n+ {\n+ hook: stack.Forward,\n+ dstAddr: utils.Host2IPv6Addr.AddressWithPrefix.Address,\n+ matcher: &icmpv6Matcher{icmpType: header.ICMPv6EchoRequest},\n+ errorICMPDstAddr: utils.Host1IPv6Addr.AddressWithPrefix.Address,\n+ errorICMPPayload: rxICMPv6EchoRequest(utils.Host2IPv6Addr.AddressWithPrefix.Address),\n+ },\n+ {\n+ hook: stack.Output,\n+ dstAddr: utils.RouterNIC1IPv6Addr.AddressWithPrefix.Address,\n+ matcher: &icmpv6Matcher{icmpType: header.ICMPv6EchoReply},\n+ errorICMPDstAddr: utils.RouterNIC1IPv6Addr.AddressWithPrefix.Address,\n+ errorICMPPayload: utils.ICMPv6Echo(utils.RouterNIC1IPv6Addr.AddressWithPrefix.Address, utils.Host1IPv6Addr.AddressWithPrefix.Address, ttl, header.ICMPv6EchoReply),\n+ },\n+ },\n+ rejectTarget: func(t *testing.T, netProto stack.NetworkProtocol, rejectWith int) stack.Target {\n+ handler, ok := netProto.(stack.RejectIPv6WithHandler)\n+ if !ok {\n+ t.Fatalf(\"expected %T to implement %T\", netProto, handler)\n+ }\n+\n+ return &stack.RejectIPv6Target{\n+ Handler: handler,\n+ RejectWith: stack.RejectIPv6WithICMPType(rejectWith),\n+ }\n+ },\n+ rejectWithVals: []rejectWithVal{\n+ {\n+ name: \"ICMP No Route\",\n+ val: int(stack.RejectIPv6WithICMPNoRoute),\n+ errorICMPCode: uint8(header.ICMPv6NetworkUnreachable),\n+ },\n+ {\n+ name: \"ICMP Address Unreachable\",\n+ val: int(stack.RejectIPv6WithICMPAddrUnreachable),\n+ errorICMPCode: uint8(header.ICMPv6AddressUnreachable),\n+ },\n+ {\n+ name: \"ICMP Port Unreachable\",\n+ val: int(stack.RejectIPv6WithICMPPortUnreachable),\n+ errorICMPCode: uint8(header.ICMPv6PortUnreachable),\n+ },\n+ {\n+ name: \"ICMP Administratively Prohibited\",\n+ val: int(stack.RejectIPv6WithICMPAdminProhibited),\n+ errorICMPCode: uint8(header.ICMPv6Prohibited),\n+ },\n+ },\n+ errorICMPType: uint8(header.ICMPv6DstUnreachable),\n+ },\n+ }\n+\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\n+ for _, natHook := range test.natHooks {\n+ t.Run(natHook.hook.String(), func(t *testing.T) {\n+ for _, rejectWith := range test.rejectWithVals {\n+ t.Run(rejectWith.name, func(t *testing.T) {\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},\n+ TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol, tcp.NewProtocol},\n+ })\n+\n+ ep1 := channel.New(1, header.IPv6MinimumMTU, \"\")\n+ ep2 := channel.New(1, header.IPv6MinimumMTU, \"\")\n+ utils.SetupRouterStack(t, s, ep1, ep2)\n+\n+ {\n+ ipv6 := test.netProto == ipv6.ProtocolNumber\n+ ipt := s.IPTables()\n+ filter := ipt.GetTable(stack.FilterID, ipv6)\n+ ruleIdx := filter.BuiltinChains[natHook.hook]\n+ filter.Rules[ruleIdx].Matchers = []stack.Matcher{natHook.matcher}\n+ filter.Rules[ruleIdx].Target = test.rejectTarget(t, s.NetworkProtocolInstance(test.netProto), rejectWith.val)\n+ // Make sure the packet is not dropped by the next rule.\n+ filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}\n+ if err := ipt.ReplaceTable(stack.FilterID, filter, ipv6); err != nil {\n+ t.Fatalf(\"ipt.ReplaceTable(%d, _, %t): %s\", stack.FilterID, ipv6, err)\n+ }\n+ }\n+\n+ func() {\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ Data: test.rxICMPEchoRequest(natHook.dstAddr).ToVectorisedView(),\n+ })\n+ defer pkt.DecRef()\n+ ep1.InjectInbound(test.netProto, pkt)\n+ }()\n+\n+ {\n+ pkt := ep1.Read()\n+ if pkt == nil {\n+ t.Fatal(\"expected to read a packet on ep1\")\n+ }\n+ test.icmpChecker(\n+ t,\n+ stack.PayloadSince(pkt.NetworkHeader()),\n+ natHook.errorICMPDstAddr,\n+ test.errorICMPType,\n+ rejectWith.errorICMPCode,\n+ natHook.errorICMPPayload,\n+ )\n+ }\n+ })\n+ }\n+ })\n+ }\n+ })\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tests/utils/utils.go",
"new_path": "pkg/tcpip/tests/utils/utils.go",
"diff": "@@ -353,7 +353,8 @@ func SetupRoutedStacks(t *testing.T, host1Stack, routerStack, host2Stack *stack.\n})\n}\n-func rxICMPv4Echo(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8, ty header.ICMPv4Type) {\n+// ICMPv4Echo returns an ICMPv4 echo packet.\n+func ICMPv4Echo(src, dst tcpip.Address, ttl uint8, ty header.ICMPv4Type) buffer.View {\ntotalLen := header.IPv4MinimumSize + header.ICMPv4MinimumSize\nhdr := buffer.NewPrependable(totalLen)\npkt := header.ICMPv4(hdr.Prepend(header.ICMPv4MinimumSize))\n@@ -370,27 +371,31 @@ func rxICMPv4Echo(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8, ty hea\nDstAddr: dst,\n})\nip.SetChecksum(^ip.CalculateChecksum())\n-\n- newPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n- Data: hdr.View().ToVectorisedView(),\n- })\n- defer newPkt.DecRef()\n- e.InjectInbound(header.IPv4ProtocolNumber, newPkt)\n+ return hdr.View()\n}\n// RxICMPv4EchoRequest constructs and injects an ICMPv4 echo request packet on\n// the provided endpoint.\nfunc RxICMPv4EchoRequest(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8) {\n- rxICMPv4Echo(e, src, dst, ttl, header.ICMPv4Echo)\n+ newPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ Data: ICMPv4Echo(src, dst, ttl, header.ICMPv4Echo).ToVectorisedView(),\n+ })\n+ defer newPkt.DecRef()\n+ e.InjectInbound(header.IPv4ProtocolNumber, newPkt)\n}\n// RxICMPv4EchoReply constructs and injects an ICMPv4 echo reply packet on\n// the provided endpoint.\nfunc RxICMPv4EchoReply(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8) {\n- rxICMPv4Echo(e, src, dst, ttl, header.ICMPv4EchoReply)\n+ newPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ Data: ICMPv4Echo(src, dst, ttl, header.ICMPv4EchoReply).ToVectorisedView(),\n+ })\n+ defer newPkt.DecRef()\n+ e.InjectInbound(header.IPv4ProtocolNumber, newPkt)\n}\n-func rxICMPv6Echo(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8, ty header.ICMPv6Type) {\n+// ICMPv6Echo returns an ICMPv6 echo packet.\n+func ICMPv6Echo(src, dst tcpip.Address, ttl uint8, ty header.ICMPv6Type) buffer.View {\ntotalLen := header.IPv6MinimumSize + header.ICMPv6MinimumSize\nhdr := buffer.NewPrependable(totalLen)\npkt := header.ICMPv6(hdr.Prepend(header.ICMPv6MinimumSize))\n@@ -410,22 +415,25 @@ func rxICMPv6Echo(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8, ty hea\nSrcAddr: src,\nDstAddr: dst,\n})\n-\n- newPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n- Data: hdr.View().ToVectorisedView(),\n- })\n- defer newPkt.DecRef()\n- e.InjectInbound(header.IPv6ProtocolNumber, newPkt)\n+ return hdr.View()\n}\n// RxICMPv6EchoRequest constructs and injects an ICMPv6 echo request packet on\n// the provided endpoint.\nfunc RxICMPv6EchoRequest(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8) {\n- rxICMPv6Echo(e, src, dst, ttl, header.ICMPv6EchoRequest)\n+ newPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ Data: ICMPv6Echo(src, dst, ttl, header.ICMPv6EchoRequest).ToVectorisedView(),\n+ })\n+ defer newPkt.DecRef()\n+ e.InjectInbound(header.IPv6ProtocolNumber, newPkt)\n}\n// RxICMPv6EchoReply constructs and injects an ICMPv6 echo reply packet on\n// the provided endpoint.\nfunc RxICMPv6EchoReply(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8) {\n- rxICMPv6Echo(e, src, dst, ttl, header.ICMPv6EchoReply)\n+ newPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ Data: ICMPv6Echo(src, dst, ttl, header.ICMPv6EchoReply).ToVectorisedView(),\n+ })\n+ defer newPkt.DecRef()\n+ e.InjectInbound(header.IPv6ProtocolNumber, newPkt)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Support REJECT hook
PiperOrigin-RevId: 420174647 |
259,962 | 06.01.2022 17:30:26 | 28,800 | f2a57c9dac27bd6dcd1c53156d94bdcbdb7a542e | Fixes multiple bugs in server_rx implementation.
fillPacket was incorrectly setting the buffer sizes and
causing large packets to be egressed incorrectly resulting
in packet drops when the MTU was > buffer size. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/BUILD",
"new_path": "pkg/tcpip/link/sharedmem/BUILD",
"diff": "@@ -56,15 +56,18 @@ go_test(\nsrcs = [\"sharedmem_server_test.go\"],\ndeps = [\n\":sharedmem\",\n+ \"//pkg/log\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/adapters/gonet\",\n\"//pkg/tcpip/header\",\n+ \"//pkg/tcpip/link/qdisc/fifo\",\n\"//pkg/tcpip/link/sniffer\",\n\"//pkg/tcpip/network/ipv4\",\n\"//pkg/tcpip/network/ipv6\",\n\"//pkg/tcpip/stack\",\n\"//pkg/tcpip/transport/tcp\",\n\"//pkg/tcpip/transport/udp\",\n+ \"@org_golang_x_sync//errgroup:go_default_library\",\n\"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/pipe/pipe_test.go",
"new_path": "pkg/tcpip/link/sharedmem/pipe/pipe_test.go",
"diff": "@@ -461,7 +461,7 @@ func TestConcurrentReaderWriter(t *testing.T) {\ntr := rand.New(rand.NewSource(99))\nrr := rand.New(rand.NewSource(99))\n- b := make([]byte, 100)\n+ b := make([]byte, 4096)\nvar tx Tx\ntx.Init(b)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/pipe/rx.go",
"new_path": "pkg/tcpip/link/sharedmem/pipe/rx.go",
"diff": "@@ -87,6 +87,11 @@ func (r *Rx) Flush() {\nr.tail = r.head\n}\n+// Abort unpulls any pulled buffers.\n+func (r *Rx) Abort() {\n+ r.head = r.tail\n+}\n+\n// Bytes returns the byte slice on which the pipe operates.\nfunc (r *Rx) Bytes() []byte {\nreturn r.p.buffer\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/queuepair.go",
"new_path": "pkg/tcpip/link/sharedmem/queuepair.go",
"diff": "@@ -50,6 +50,12 @@ const (\n// defaultSharedDataSize is the size of the sharedData region used to\n// enable/disable notifications.\ndefaultSharedDataSize = 4 << 10 // 4KiB\n+\n+ // DefaultBufferSize is the size of each individual buffer that the data\n+ // region is broken down into to hold packet data. Should be larger than\n+ // 1500 + 14 (Ethernet header) + 10 (VirtIO header) to fit each packet\n+ // in a single buffer.\n+ DefaultBufferSize = 2048\n)\n// A QueuePair represents a pair of TX/RX queues.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/server_tx.go",
"new_path": "pkg/tcpip/link/sharedmem/server_tx.go",
"diff": "@@ -113,6 +113,27 @@ func (s *serverTx) cleanup() {\ns.eventFD.Close()\n}\n+// acquireBuffers acquires enough buffers to hold all the data in views or\n+// returns nil if not enough buffers are currently available.\n+func (s *serverTx) acquireBuffers(views []buffer.View, buffers []queue.RxBuffer) (acquiredBuffers []queue.RxBuffer) {\n+ acquiredBuffers = buffers[:0]\n+ wantBytes := 0\n+ for i := range views {\n+ wantBytes += len(views[i])\n+ }\n+ for wantBytes > 0 {\n+ var b []byte\n+ if b = s.fillPipe.Pull(); b == nil {\n+ s.fillPipe.Abort()\n+ return nil\n+ }\n+ rxBuffer := queue.DecodeRxBufferHeader(b)\n+ acquiredBuffers = append(acquiredBuffers, rxBuffer)\n+ wantBytes -= int(rxBuffer.Size)\n+ }\n+ return acquiredBuffers\n+}\n+\n// fillPacket copies the data in the provided views into buffers pulled from the\n// fillPipe and returns a slice of RxBuffers that contain the copied data as\n// well as the total number of bytes copied.\n@@ -120,50 +141,48 @@ func (s *serverTx) cleanup() {\n// To avoid allocations the filledBuffers are appended to the buffers slice\n// which will be grown as required.\nfunc (s *serverTx) fillPacket(views []buffer.View, buffers []queue.RxBuffer) (filledBuffers []queue.RxBuffer, totalCopied uint32) {\n- filledBuffers = buffers[:0]\n// fillBuffer copies as much of the views as possible into the provided buffer\n// and returns any left over views (if any).\nfillBuffer := func(buffer *queue.RxBuffer, views []buffer.View) (left []buffer.View) {\nif len(views) == 0 {\nreturn nil\n}\n- availBytes := buffer.Size\ncopied := uint64(0)\n+ availBytes := buffer.Size\nfor availBytes > 0 && len(views) > 0 {\nn := copy(s.data[buffer.Offset+copied:][:uint64(buffer.Size)-copied], views[0])\n+ copied += uint64(n)\n+ availBytes -= uint32(n)\nviews[0].TrimFront(n)\nif !views[0].IsEmpty() {\nbreak\n}\nviews = views[1:]\n- copied += uint64(n)\n- availBytes -= uint32(n)\n}\nbuffer.Size = uint32(copied)\nreturn views\n}\n-\n- for len(views) > 0 {\n- var b []byte\n- // Spin till we get a free buffer reposted by the peer.\n- for {\n- if b = s.fillPipe.Pull(); b != nil {\n- break\n- }\n+ bufs := s.acquireBuffers(views, buffers)\n+ if bufs == nil {\n+ return nil, 0\n}\n- rxBuffer := queue.DecodeRxBufferHeader(b)\n+ for i := 0; len(views) > 0 && i < len(bufs); i++ {\n// Copy the packet into the posted buffer.\n- views = fillBuffer(&rxBuffer, views)\n- totalCopied += rxBuffer.Size\n- filledBuffers = append(filledBuffers, rxBuffer)\n+ views = fillBuffer(&bufs[i], views)\n+ totalCopied += bufs[i].Size\n}\n- return filledBuffers, totalCopied\n+ return bufs, totalCopied\n}\nfunc (s *serverTx) transmit(views []buffer.View) bool {\nbuffers := make([]queue.RxBuffer, 8)\nbuffers, totalCopied := s.fillPacket(views, buffers)\n+ if totalCopied == 0 {\n+ // drop the packet as not enough buffers were probably available\n+ // to send.\n+ return false\n+ }\nb := s.completionPipe.Push(queue.RxCompletionSize(len(buffers)))\nif b == nil {\nreturn false\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem_server_test.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem_server_test.go",
"diff": "@@ -22,13 +22,17 @@ import (\n\"io\"\n\"net\"\n\"net/http\"\n+ \"strings\"\n\"syscall\"\n\"testing\"\n+ \"golang.org/x/sync/errgroup\"\n\"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/link/qdisc/fifo\"\n\"gvisor.dev/gvisor/pkg/tcpip/link/sharedmem\"\n\"gvisor.dev/gvisor/pkg/tcpip/link/sniffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/network/ipv4\"\n@@ -45,13 +49,18 @@ const (\nremoteIPv4Address = tcpip.Address(\"\\x0a\\x00\\x00\\x02\")\nserverPort = 10001\n- defaultMTU = 1500\n+ defaultMTU = 65536\ndefaultBufferSize = 1500\n+\n+ // qDisc options\n+ numQueues = 1\n+ queueLen = 1000\n)\ntype stackOptions struct {\nep stack.LinkEndpoint\naddr tcpip.Address\n+ enablePacketLogs bool\n}\nfunc newStackWithOptions(stackOpts stackOptions) (*stack.Stack, error) {\n@@ -67,9 +76,16 @@ func newStackWithOptions(stackOpts stackOptions) (*stack.Stack, error) {\nTransportProtocols: []stack.TransportProtocolFactory{tcp.NewProtocol, udp.NewProtocol},\n})\nnicID := tcpip.NICID(1)\n- sniffEP := sniffer.New(stackOpts.ep)\n- opts := stack.NICOptions{Name: \"eth0\"}\n- if err := st.CreateNICWithOptions(nicID, sniffEP, opts); err != nil {\n+ ep := stackOpts.ep\n+ if stackOpts.enablePacketLogs {\n+ ep = sniffer.New(stackOpts.ep)\n+ }\n+ qDisc := fifo.New(ep, int(numQueues), int(queueLen))\n+ opts := stack.NICOptions{\n+ Name: \"eth0\",\n+ QDisc: qDisc,\n+ }\n+ if err := st.CreateNICWithOptions(nicID, ep, opts); err != nil {\nreturn nil, fmt.Errorf(\"method CreateNICWithOptions(%d, _, %v) failed: %s\", nicID, opts, err)\n}\n@@ -106,7 +122,7 @@ func newClientStack(t *testing.T, qPair *sharedmem.QueuePair, peerFD int) (*stac\nif err != nil {\nreturn nil, fmt.Errorf(\"failed to create sharedmem endpoint: %s\", err)\n}\n- st, err := newStackWithOptions(stackOptions{ep: ep, addr: localIPv4Address})\n+ st, err := newStackWithOptions(stackOptions{ep: ep, addr: localIPv4Address, enablePacketLogs: true})\nif err != nil {\nreturn nil, fmt.Errorf(\"failed to create client stack: %s\", err)\n}\n@@ -125,7 +141,7 @@ func newServerStack(t *testing.T, qPair *sharedmem.QueuePair, peerFD int) (*stac\nif err != nil {\nreturn nil, fmt.Errorf(\"failed to create sharedmem endpoint: %s\", err)\n}\n- st, err := newStackWithOptions(stackOptions{ep: ep, addr: remoteIPv4Address})\n+ st, err := newStackWithOptions(stackOptions{ep: ep, addr: remoteIPv4Address, enablePacketLogs: true})\nif err != nil {\nreturn nil, fmt.Errorf(\"failed to create client stack: %s\", err)\n}\n@@ -185,7 +201,7 @@ func TestServerRoundTrip(t *testing.T) {\nt.Fatalf(\"failed to start TCP Listener: %s\", err)\n}\ndefer l.Close()\n- var responseString = \"response\"\n+ var responseString = strings.Repeat(\"response\", 8<<10)\ngo func() {\nhttp.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\nw.Write([]byte(responseString))\n@@ -218,3 +234,56 @@ func TestServerRoundTrip(t *testing.T) {\nt.Fatalf(\"unexpected response got: %s, want: %s\", got, want)\n}\n}\n+\n+func TestServerRoundTripStress(t *testing.T) {\n+ ctx := newTestContext(t)\n+ defer ctx.cleanup()\n+ listenAddr := tcpip.FullAddress{Addr: remoteIPv4Address, Port: serverPort}\n+ l, err := gonet.ListenTCP(ctx.serverStk, listenAddr, ipv4.ProtocolNumber)\n+ if err != nil {\n+ t.Fatalf(\"failed to start TCP Listener: %s\", err)\n+ }\n+ defer l.Close()\n+ var responseString = strings.Repeat(\"response\", 8<<10)\n+ go func() {\n+ http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n+ w.Write([]byte(responseString))\n+ }))\n+ }()\n+\n+ dialFunc := func(address, protocol string) (net.Conn, error) {\n+ return gonet.DialTCP(ctx.clientStk, listenAddr, ipv4.ProtocolNumber)\n+ }\n+\n+ serverURL := fmt.Sprintf(\"http://[%s]:%d/\", net.IP(remoteIPv4Address), serverPort)\n+ var errs errgroup.Group\n+ for i := 0; i < 1000; i++ {\n+ errs.Go(func() error {\n+ httpClient := &http.Client{\n+ Transport: &http.Transport{\n+ Dial: dialFunc,\n+ },\n+ }\n+ response, err := httpClient.Get(serverURL)\n+ if err != nil {\n+ return fmt.Errorf(\"httpClient.Get(\\\"/\\\") failed: %s\", err)\n+ }\n+ if got, want := response.StatusCode, http.StatusOK; got != want {\n+ return fmt.Errorf(\"unexpected status code got: %d, want: %d\", got, want)\n+ }\n+ body, err := io.ReadAll(response.Body)\n+ if err != nil {\n+ return fmt.Errorf(\"io.ReadAll(response.Body) failed: %s\", err)\n+ }\n+ response.Body.Close()\n+ if got, want := string(body), responseString; got != want {\n+ return fmt.Errorf(\"unexpected response got: %s, want: %s\", got, want)\n+ }\n+ log.Infof(\"worker: %d read %d bytes\", len(body))\n+ return nil\n+ })\n+ }\n+ if err := errs.Wait(); err != nil {\n+ t.Fatalf(\"request failed: %s\", err)\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/tx.go",
"new_path": "pkg/tcpip/link/sharedmem/tx.go",
"diff": "@@ -43,7 +43,7 @@ type tx struct {\n//\n// The caller always retains ownership of all file descriptors passed in. The\n// queue implementation will duplicate any that it may need in the future.\n-func (t *tx) init(mtu uint32, c *QueueConfig) error {\n+func (t *tx) init(bufferSize uint32, c *QueueConfig) error {\n// Map in all buffers.\ntxPipe, err := getBuffer(c.TxPipeFD)\nif err != nil {\n@@ -73,7 +73,7 @@ func (t *tx) init(mtu uint32, c *QueueConfig) error {\n// Initialize state based on buffers.\nt.q.Init(txPipe, rxPipe, sharedDataPointer(sharedData))\nt.ids.init()\n- t.bufs.init(0, len(data), int(mtu))\n+ t.bufs.init(0, len(data), int(bufferSize))\nt.data = data\nt.eventFD = c.EventFD\nt.sharedDataFD = c.SharedDataFD\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sniffer/sniffer.go",
"new_path": "pkg/tcpip/link/sniffer/sniffer.go",
"diff": "@@ -166,7 +166,7 @@ func (e *endpoint) dumpPacket(dir direction, protocol tcpip.NetworkProtocolNumbe\n// forwards the request to the lower endpoint.\nfunc (e *endpoint) WritePackets(r stack.RouteInfo, pkts stack.PacketBufferList, protocol tcpip.NetworkProtocolNumber) (int, tcpip.Error) {\nfor pkt := pkts.Front(); pkt != nil; pkt = pkt.Next() {\n- e.dumpPacket(directionSend, protocol, pkt)\n+ e.dumpPacket(directionSend, pkt.NetworkProtocolNumber, pkt)\n}\nreturn e.Endpoint.WritePackets(r, pkts, protocol)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fixes multiple bugs in server_rx implementation.
fillPacket was incorrectly setting the buffer sizes and
causing large packets to be egressed incorrectly resulting
in packet drops when the MTU was > buffer size.
PiperOrigin-RevId: 420178122 |
259,853 | 13.01.2022 11:42:46 | 28,800 | 6b2178dd644657396811d3ffb6e996c27f586d89 | buildkite: skip cgroupv2 tests
Skip them for now to not block new changes.
We need to find out why buildkite doesn't have cgroupv2 agents. | [
{
"change_type": "MODIFY",
"old_path": ".buildkite/pipeline.yaml",
"new_path": ".buildkite/pipeline.yaml",
"diff": "@@ -116,6 +116,7 @@ steps:\ncommand: make unit-tests\nagents:\nqueue: \"cgroupv2\"\n+ skip: \"Currently broken\"\n- <<: *common\nlabel: \":test_tube: Container tests\"\ncommand: make container-tests\n@@ -124,6 +125,7 @@ steps:\ncommand: make container-tests\nagents:\nqueue: \"cgroupv2\"\n+ skip: \"Currently broken\"\n# All system call tests.\n- <<: *common\n@@ -140,6 +142,7 @@ steps:\ncommand: make docker-tests\nagents:\nqueue: \"cgroupv2\"\n+ skip: \"Currently broken\"\n- <<: *common\nlabel: \":goggles: Overlay tests\"\ncommand: make overlay-tests\n@@ -172,6 +175,7 @@ steps:\ncommand: make containerd-test-1.5.4\nagents:\nqueue: \"cgroupv2\"\n+ skip: \"Currently broken\"\n# Check the website builds.\n- <<: *common\n"
}
] | Go | Apache License 2.0 | google/gvisor | buildkite: skip cgroupv2 tests
Skip them for now to not block new changes.
We need to find out why buildkite doesn't have cgroupv2 agents.
PiperOrigin-RevId: 421620826 |
259,907 | 14.01.2022 01:43:24 | 28,800 | 0a22b6c29ff587841fbbca3e4fe65ed5f8a499ff | Deleted should be a property of pathNode, not fidRef.
This reduces the work required to do when a file is deleted. Just mark the path
node deleted. All fidRefs pointing to it will read the correct value then. | [
{
"change_type": "MODIFY",
"old_path": "pkg/p9/handlers.go",
"new_path": "pkg/p9/handlers.go",
"diff": "@@ -1249,11 +1249,6 @@ func doWalk(cs *connState, ref *fidRef, names []string, getattr bool) (qids []QI\nfile: sf,\nmode: ref.mode,\npathNode: ref.pathNode,\n-\n- // For the clone case, the cloned fid must\n- // preserve the deleted property of the\n- // original FID.\n- deleted: ref.deleted,\n}\nif !ref.isRoot() {\nif !newRef.isDeleted() {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/p9/path_tree.go",
"new_path": "pkg/p9/path_tree.go",
"diff": "@@ -33,10 +33,15 @@ import (\ntype pathNode struct {\n// opMu synchronizes high-level, sematic operations, such as the\n// simultaneous creation and deletion of a file.\n- //\n- // opMu does not directly protect any fields in pathNode.\nopMu sync.RWMutex\n+ // deleted indicates that the backing file has been deleted. We stop many\n+ // operations at the API level if they are incompatible with a file that has\n+ // already been unlinked. deleted is protected by opMu. However, it may be\n+ // changed without opMu if this node is deleted as part of an entire subtree\n+ // on unlink. So deleted must only be accessed/mutated using atomics.\n+ deleted uint32\n+\n// childMu protects the fields below.\nchildMu sync.RWMutex\n@@ -211,9 +216,11 @@ func (p *pathNode) removeWithName(name string, fn func(ref *fidRef)) *pathNode {\nfor ref := range m {\ndelete(m, ref)\ndelete(p.childRefNames, ref)\n+ if fn != nil {\nfn(ref)\n}\n}\n+ }\n// Return the original path node, if it exists.\norigPathNode := p.childNodes[name]\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/p9/server.go",
"new_path": "pkg/p9/server.go",
"diff": "@@ -179,11 +179,6 @@ type fidRef struct {\n// isRoot should be used to check for root over looking at parent\n// directly.\nparent *fidRef\n-\n- // deleted indicates that the backing file has been deleted. We stop\n- // many operations at the API level if they are incompatible with a\n- // file that has already been unlinked.\n- deleted uint32\n}\n// IncRef increases the references on a fid.\n@@ -211,8 +206,11 @@ func (f *fidRef) DecRef() {\n}\n// isDeleted returns true if this fidRef has been deleted.\n+//\n+// Precondition: this must be called via safelyRead, safelyWrite or\n+// safelyGlobal.\nfunc (f *fidRef) isDeleted() bool {\n- return atomic.LoadUint32(&f.deleted) != 0\n+ return atomic.LoadUint32(&f.pathNode.deleted) != 0\n}\n// isRoot indicates whether this is a root fid.\n@@ -232,10 +230,7 @@ func (f *fidRef) maybeParent() *fidRef {\n//\n// Precondition: this must be called via safelyWrite or safelyGlobal.\nfunc notifyDelete(pn *pathNode) {\n- // Call on all local references.\n- pn.forEachChildRef(func(ref *fidRef, _ string) {\n- atomic.StoreUint32(&ref.deleted, 1)\n- })\n+ atomic.StoreUint32(&pn.deleted, 1)\n// Call on all subtrees.\npn.forEachChildNode(func(pn *pathNode) {\n@@ -247,11 +242,7 @@ func notifyDelete(pn *pathNode) {\n//\n// Precondition: this must be called via safelyWrite or safelyGlobal.\nfunc (f *fidRef) markChildDeleted(name string) {\n- origPathNode := f.pathNode.removeWithName(name, func(ref *fidRef) {\n- atomic.StoreUint32(&ref.deleted, 1)\n- })\n-\n- if origPathNode != nil {\n+ if origPathNode := f.pathNode.removeWithName(name, nil); origPathNode != nil {\n// Mark all children as deleted.\nnotifyDelete(origPathNode)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Deleted should be a property of pathNode, not fidRef.
This reduces the work required to do when a file is deleted. Just mark the path
node deleted. All fidRefs pointing to it will read the correct value then.
PiperOrigin-RevId: 421766678 |
259,909 | 14.01.2022 12:16:45 | 28,800 | b5355d8bf967efc15d505fed78e151aff954d356 | Add a panic mode to refs and refsvfs2.
This mode causes a panic rather than just a warning
when leaks are detected during a call to DoLeakCheck(). | [
{
"change_type": "MODIFY",
"old_path": "pkg/refs/refcounter.go",
"new_path": "pkg/refs/refcounter.go",
"diff": "@@ -238,6 +238,9 @@ const (\n// LeaksLogTraces indicates that a trace collected during allocation\n// should be logged when leaks are found.\nLeaksLogTraces\n+\n+ // LeaksPanic indidcates that a panic should be issued when leaks are found.\n+ LeaksPanic\n)\n// Set implements flag.Value.\n@@ -249,6 +252,8 @@ func (l *LeakMode) Set(v string) error {\n*l = LeaksLogWarning\ncase \"log-traces\":\n*l = LeaksLogTraces\n+ case \"panic\":\n+ *l = LeaksPanic\ndefault:\nreturn fmt.Errorf(\"invalid ref leak mode %q\", v)\n}\n@@ -271,9 +276,12 @@ func (l LeakMode) String() string {\nreturn \"log-names\"\ncase LeaksLogTraces:\nreturn \"log-traces\"\n- }\n+ case LeaksPanic:\n+ return \"panic\"\n+ default:\npanic(fmt.Sprintf(\"invalid ref leak mode %d\", l))\n}\n+}\n// leakMode stores the current mode for the reference leak checker.\n//\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/refsvfs2/refs_map.go",
"new_path": "pkg/refsvfs2/refs_map.go",
"diff": "@@ -53,6 +53,12 @@ func leakCheckEnabled() bool {\nreturn refs_vfs1.GetLeakMode() != refs_vfs1.NoLeakChecking\n}\n+// leakCheckPanicEnabled returns whether DoLeakCheck() should panic when leaks\n+// are detected.\n+func leakCheckPanicEnabled() bool {\n+ return refs_vfs1.GetLeakMode() == refs_vfs1.LeaksPanic\n+}\n+\n// Register adds obj to the live object map.\nfunc Register(obj CheckedObject) {\nif leakCheckEnabled() {\n@@ -131,6 +137,9 @@ func DoLeakCheck() {\nfor obj := range liveObjects {\nmsg += obj.LeakMessage() + \"\\n\"\n}\n+ if leakCheckPanicEnabled() {\n+ panic(msg)\n+ }\nlog.Warningf(msg)\n}\n})\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add a panic mode to refs and refsvfs2.
This mode causes a panic rather than just a warning
when leaks are detected during a call to DoLeakCheck().
PiperOrigin-RevId: 421885173 |
260,004 | 14.01.2022 12:55:20 | 28,800 | c6de0ac8026c77cfd7f68ced7b59480dbf956e7d | Drop return from IPTables.ReplaceTable
We always return the same thing. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netfilter/netfilter.go",
"new_path": "pkg/sentry/socket/netfilter/netfilter.go",
"diff": "@@ -289,7 +289,8 @@ func SetEntries(task *kernel.Task, stk *stack.Stack, optVal []byte, ipv6 bool) *\n// - There are no chains without an unconditional final rule.\n// - There are no chains without an unconditional underflow rule.\n- return syserr.TranslateNetstackError(stk.IPTables().ReplaceTable(nameToID[replace.Name.String()], table, ipv6))\n+ stk.IPTables().ReplaceTable(nameToID[replace.Name.String()], table, ipv6)\n+ return nil\n}\n// parseMatchers parses 0 or more matchers from optVal. optVal should contain\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4_test.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4_test.go",
"diff": "@@ -2774,9 +2774,7 @@ func TestWriteStats(t *testing.T) {\nfilter := ipt.GetTable(stack.FilterID, false /* ipv6 */)\nruleIdx := filter.BuiltinChains[stack.Output]\nfilter.Rules[ruleIdx].Target = &stack.DropTarget{}\n- if err := ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */); err != nil {\n- t.Fatalf(\"failed to replace table: %s\", err)\n- }\n+ ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */)\n},\nallowPackets: math.MaxInt32,\nexpectSent: 0,\n@@ -2790,9 +2788,7 @@ func TestWriteStats(t *testing.T) {\nfilter := ipt.GetTable(stack.NATID, false /* ipv6 */)\nruleIdx := filter.BuiltinChains[stack.Postrouting]\nfilter.Rules[ruleIdx].Target = &stack.DropTarget{}\n- if err := ipt.ReplaceTable(stack.NATID, filter, false /* ipv6 */); err != nil {\n- t.Fatalf(\"failed to replace table: %s\", err)\n- }\n+ ipt.ReplaceTable(stack.NATID, filter, false /* ipv6 */)\n},\nallowPackets: math.MaxInt32,\nexpectSent: 0,\n@@ -2812,9 +2808,7 @@ func TestWriteStats(t *testing.T) {\nfilter.Rules[ruleIdx].Matchers = []stack.Matcher{&limitedMatcher{nPackets - 1}}\n// Make sure the next rule is ACCEPT.\nfilter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}\n- if err := ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */); err != nil {\n- t.Fatalf(\"failed to replace table: %s\", err)\n- }\n+ ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */)\n},\nallowPackets: math.MaxInt32,\nexpectSent: nPackets - 1,\n@@ -2834,9 +2828,7 @@ func TestWriteStats(t *testing.T) {\nfilter.Rules[ruleIdx].Matchers = []stack.Matcher{&limitedMatcher{nPackets - 1}}\n// Make sure the next rule is ACCEPT.\nfilter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}\n- if err := ipt.ReplaceTable(stack.NATID, filter, false /* ipv6 */); err != nil {\n- t.Fatalf(\"failed to replace table: %s\", err)\n- }\n+ ipt.ReplaceTable(stack.NATID, filter, false /* ipv6 */)\n},\nallowPackets: math.MaxInt32,\nexpectSent: nPackets - 1,\n@@ -3184,9 +3176,7 @@ func TestCloseLocking(t *testing.T) {\nstack.Postrouting: 3,\n},\n}\n- if err := s.IPTables().ReplaceTable(stack.NATID, table, false /* ipv6 */); err != nil {\n- t.Fatalf(\"s.IPTables().ReplaceTable(...): %s\", err)\n- }\n+ s.IPTables().ReplaceTable(stack.NATID, table, false /* ipv6 */)\ne := channel.New(0, defaultMTU, \"\")\nif err := s.CreateNIC(nicID1, e); err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/ipv6_test.go",
"new_path": "pkg/tcpip/network/ipv6/ipv6_test.go",
"diff": "@@ -2530,9 +2530,7 @@ func TestWriteStats(t *testing.T) {\nfilter := ipt.GetTable(stack.FilterID, true /* ipv6 */)\nruleIdx := filter.BuiltinChains[stack.Output]\nfilter.Rules[ruleIdx].Target = &stack.DropTarget{}\n- if err := ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */); err != nil {\n- t.Fatalf(\"failed to replace table: %v\", err)\n- }\n+ ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */)\n},\nallowPackets: math.MaxInt32,\nexpectSent: 0,\n@@ -2547,9 +2545,7 @@ func TestWriteStats(t *testing.T) {\nfilter := ipt.GetTable(stack.NATID, true /* ipv6 */)\nruleIdx := filter.BuiltinChains[stack.Postrouting]\nfilter.Rules[ruleIdx].Target = &stack.DropTarget{}\n- if err := ipt.ReplaceTable(stack.NATID, filter, true /* ipv6 */); err != nil {\n- t.Fatalf(\"failed to replace table: %v\", err)\n- }\n+ ipt.ReplaceTable(stack.NATID, filter, true /* ipv6 */)\n},\nallowPackets: math.MaxInt32,\nexpectSent: 0,\n@@ -2569,9 +2565,7 @@ func TestWriteStats(t *testing.T) {\nfilter.Rules[ruleIdx].Matchers = []stack.Matcher{&limitedMatcher{nPackets - 1}}\n// Make sure the next rule is ACCEPT.\nfilter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}\n- if err := ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */); err != nil {\n- t.Fatalf(\"failed to replace table: %v\", err)\n- }\n+ ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */)\n},\nallowPackets: math.MaxInt32,\nexpectSent: nPackets - 1,\n@@ -2591,9 +2585,7 @@ func TestWriteStats(t *testing.T) {\nfilter.Rules[ruleIdx].Matchers = []stack.Matcher{&limitedMatcher{nPackets - 1}}\n// Make sure the next rule is ACCEPT.\nfilter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}\n- if err := ipt.ReplaceTable(stack.NATID, filter, true /* ipv6 */); err != nil {\n- t.Fatalf(\"failed to replace table: %v\", err)\n- }\n+ ipt.ReplaceTable(stack.NATID, filter, true /* ipv6 */)\n},\nallowPackets: math.MaxInt32,\nexpectSent: nPackets - 1,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/iptables.go",
"new_path": "pkg/tcpip/stack/iptables.go",
"diff": "@@ -231,7 +231,7 @@ func (it *IPTables) getTableRLocked(id TableID, ipv6 bool) Table {\n// ReplaceTable replaces or inserts table by name. It panics when an invalid id\n// is provided.\n-func (it *IPTables) ReplaceTable(id TableID, table Table, ipv6 bool) tcpip.Error {\n+func (it *IPTables) ReplaceTable(id TableID, table Table, ipv6 bool) {\nit.mu.Lock()\ndefer it.mu.Unlock()\n// If iptables is being enabled, initialize the conntrack table and\n@@ -246,7 +246,6 @@ func (it *IPTables) ReplaceTable(id TableID, table Table, ipv6 bool) tcpip.Error\n} else {\nit.v4Tables[id] = table\n}\n- return nil\n}\n// A chainVerdict is what a table decides should be done with a packet.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/iptables_test.go",
"new_path": "pkg/tcpip/stack/iptables_test.go",
"diff": "@@ -115,9 +115,7 @@ func TestNATedConnectionReap(t *testing.T) {\nPostrouting: 5,\n},\n}\n- if err := iptables.ReplaceTable(NATID, table, ipv6); err != nil {\n- t.Fatalf(\"ipt.ReplaceTable(%d, _, true): %s\", NATID, err)\n- }\n+ iptables.ReplaceTable(NATID, table, ipv6)\n// Stop the reaper if it is running so we can reap manually as it is started\n// on the first change to IPTables.\n@@ -299,9 +297,7 @@ func TestNATAlwaysPerformed(t *testing.T) {\niptables := DefaultTables(clock, rand.New(rand.NewSource(0 /* seed */)))\n// Just to make sure the iptables is not short circuited.\n- if err := iptables.ReplaceTable(NATID, iptables.GetTable(NATID, ipv6), ipv6); err != nil {\n- t.Fatalf(\"ipt.ReplaceTable(%d, _, true): %s\", NATID, err)\n- }\n+ iptables.ReplaceTable(NATID, iptables.GetTable(NATID, ipv6), ipv6)\npkt := v6PacketBuffer()\n@@ -414,9 +410,7 @@ func TestNATConflict(t *testing.T) {\nPostrouting: 5,\n},\n}\n- if err := iptables.ReplaceTable(NATID, table, ipv6); err != nil {\n- t.Fatalf(\"ipt.ReplaceTable(%d, _, true): %s\", NATID, err)\n- }\n+ iptables.ReplaceTable(NATID, table, ipv6)\n// Create and finalize the connection.\ntest.checkIPTables(t, iptables, v6PacketBufferWithSrcAddr(srcAddr), true /* lastHookOK */)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tests/integration/iptables_test.go",
"new_path": "pkg/tcpip/tests/integration/iptables_test.go",
"diff": "@@ -182,9 +182,7 @@ func TestIPTablesStatsForInput(t *testing.T) {\nfilter.Rules[ruleIdx].Matchers = []stack.Matcher{&inputIfNameMatcher{nicName}}\n// Make sure the packet is not dropped by the next rule.\nfilter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}\n- if err := ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */); err != nil {\n- t.Fatalf(\"ipt.ReplaceTable(%d, _, %t): %s\", stack.FilterID, true, err)\n- }\n+ ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */)\n},\ngenPacket: genPacketV6,\nproto: header.IPv6ProtocolNumber,\n@@ -203,9 +201,7 @@ func TestIPTablesStatsForInput(t *testing.T) {\nfilter.Rules[ruleIdx].Target = &stack.DropTarget{}\nfilter.Rules[ruleIdx].Matchers = []stack.Matcher{&inputIfNameMatcher{nicName}}\nfilter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}\n- if err := ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */); err != nil {\n- t.Fatalf(\"ipt.ReplaceTable(%d, _, %t): %s\", stack.FilterID, false, err)\n- }\n+ ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */)\n},\ngenPacket: genPacketV4,\nproto: header.IPv4ProtocolNumber,\n@@ -223,9 +219,7 @@ func TestIPTablesStatsForInput(t *testing.T) {\nfilter.Rules[ruleIdx].Filter = stack.IPHeaderFilter{InputInterface: anotherNicName}\nfilter.Rules[ruleIdx].Target = &stack.DropTarget{}\nfilter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}\n- if err := ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */); err != nil {\n- t.Fatalf(\"ipt.ReplaceTable(%d, _, %t): %s\", stack.FilterID, true, err)\n- }\n+ ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */)\n},\ngenPacket: genPacketV6,\nproto: header.IPv6ProtocolNumber,\n@@ -243,9 +237,7 @@ func TestIPTablesStatsForInput(t *testing.T) {\nfilter.Rules[ruleIdx].Filter = stack.IPHeaderFilter{InputInterface: anotherNicName}\nfilter.Rules[ruleIdx].Target = &stack.DropTarget{}\nfilter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}\n- if err := ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */); err != nil {\n- t.Fatalf(\"ipt.ReplaceTable(%d, _, %t): %s\", stack.FilterID, false, err)\n- }\n+ ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */)\n},\ngenPacket: genPacketV4,\nproto: header.IPv4ProtocolNumber,\n@@ -266,9 +258,7 @@ func TestIPTablesStatsForInput(t *testing.T) {\n}\nfilter.Rules[ruleIdx].Target = &stack.DropTarget{}\nfilter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}\n- if err := ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */); err != nil {\n- t.Fatalf(\"ipt.ReplaceTable(%d, _, %t): %s\", stack.FilterID, true, err)\n- }\n+ ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */)\n},\ngenPacket: genPacketV6,\nproto: header.IPv6ProtocolNumber,\n@@ -289,9 +279,7 @@ func TestIPTablesStatsForInput(t *testing.T) {\n}\nfilter.Rules[ruleIdx].Target = &stack.DropTarget{}\nfilter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}\n- if err := ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */); err != nil {\n- t.Fatalf(\"ipt.ReplaceTable(%d, _, %t): %s\", stack.FilterID, false, err)\n- }\n+ ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */)\n},\ngenPacket: genPacketV4,\nproto: header.IPv4ProtocolNumber,\n@@ -309,9 +297,7 @@ func TestIPTablesStatsForInput(t *testing.T) {\nfilter.Rules[ruleIdx].Target = &stack.DropTarget{}\nfilter.Rules[ruleIdx].Matchers = []stack.Matcher{&inputIfNameMatcher{anotherNicName}}\nfilter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}\n- if err := ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */); err != nil {\n- t.Fatalf(\"ipt.ReplaceTable(%d, _, %t): %s\", stack.FilterID, true, err)\n- }\n+ ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */)\n},\ngenPacket: genPacketV6,\nproto: header.IPv6ProtocolNumber,\n@@ -329,9 +315,7 @@ func TestIPTablesStatsForInput(t *testing.T) {\nfilter.Rules[ruleIdx].Target = &stack.DropTarget{}\nfilter.Rules[ruleIdx].Matchers = []stack.Matcher{&inputIfNameMatcher{anotherNicName}}\nfilter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}\n- if err := ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */); err != nil {\n- t.Fatalf(\"ipt.ReplaceTable(%d, _, %t): %s\", stack.FilterID, false, err)\n- }\n+ ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */)\n},\ngenPacket: genPacketV4,\nproto: header.IPv4ProtocolNumber,\n@@ -474,9 +458,7 @@ func TestIPTableWritePackets(t *testing.T) {\n},\n}\n- if err := s.IPTables().ReplaceTable(stack.FilterID, table, false /* ipv4 */); err != nil {\n- t.Fatalf(\"ReplaceTable(%d, _, false): %s\", stack.FilterID, err)\n- }\n+ s.IPTables().ReplaceTable(stack.FilterID, table, false /* ipv4 */)\n},\ngenPacket: func(r *stack.Route) stack.PacketBufferList {\nvar pkts stack.PacketBufferList\n@@ -565,9 +547,7 @@ func TestIPTableWritePackets(t *testing.T) {\n},\n}\n- if err := s.IPTables().ReplaceTable(stack.FilterID, table, true /* ipv6 */); err != nil {\n- t.Fatalf(\"ReplaceTable(%d, _, true): %s\", stack.FilterID, err)\n- }\n+ s.IPTables().ReplaceTable(stack.FilterID, table, true /* ipv6 */)\n},\ngenPacket: func(r *stack.Route) stack.PacketBufferList {\nvar pkts stack.PacketBufferList\n@@ -722,9 +702,7 @@ func setupDropFilter(hook stack.Hook, f stack.IPHeaderFilter) func(*testing.T, *\nfilter.Rules[ruleIdx].Target = &stack.DropTarget{NetworkProtocol: netProto}\n// Make sure the packet is not dropped by the next rule.\nfilter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{NetworkProtocol: netProto}\n- if err := ipt.ReplaceTable(stack.FilterID, filter, ipv6); err != nil {\n- t.Fatalf(\"ipt.ReplaceTable(%d, _, %t): %s\", stack.FilterID, ipv6, err)\n- }\n+ ipt.ReplaceTable(stack.FilterID, filter, ipv6)\n}\n}\n@@ -1221,9 +1199,7 @@ func setupNAT(t *testing.T, s *stack.Stack, netProto tcpip.NetworkProtocolNumber\ntable.Rules[ruleIdx].Target = target\n// Make sure the packet is not dropped by the next rule.\ntable.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}\n- if err := ipt.ReplaceTable(stack.NATID, table, ipv6); err != nil {\n- t.Fatalf(\"ipt.ReplaceTable(%d, _, %t): %s\", stack.NATID, ipv6, err)\n- }\n+ ipt.ReplaceTable(stack.NATID, table, ipv6)\n}\nfunc setupDNAT(t *testing.T, s *stack.Stack, netProto tcpip.NetworkProtocolNumber, transProto tcpip.TransportProtocolNumber, target stack.Target) {\n@@ -1316,9 +1292,7 @@ func setupTwiceNAT(t *testing.T, s *stack.Stack, netProto tcpip.NetworkProtocolN\n},\n}\n- if err := ipt.ReplaceTable(stack.NATID, table, ipv6); err != nil {\n- t.Fatalf(\"ipt.ReplaceTable(%d, _, %t): %s\", stack.NATID, ipv6, err)\n- }\n+ ipt.ReplaceTable(stack.NATID, table, ipv6)\n}\ntype natType struct {\n@@ -2523,9 +2497,7 @@ func TestNATICMPError(t *testing.T) {\n},\n}\n- if err := ipt.ReplaceTable(stack.NATID, table, ipv6); err != nil {\n- t.Fatalf(\"ipt.ReplaceTable(%d, _, %t): %s\", stack.NATID, ipv6, err)\n- }\n+ ipt.ReplaceTable(stack.NATID, table, ipv6)\nbuf := transportType.buf\n@@ -2890,9 +2862,7 @@ func TestSNATHandlePortOrIdentConflicts(t *testing.T) {\n},\n}\n- if err := ipt.ReplaceTable(stack.NATID, table, ipv6); err != nil {\n- t.Fatalf(\"ipt.ReplaceTable(%d, _, %t): %s\", stack.NATID, ipv6, err)\n- }\n+ ipt.ReplaceTable(stack.NATID, table, ipv6)\nfor i, srcAddr := range test.srcAddrs {\nt.Run(fmt.Sprintf(\"Packet#%d\", i), func(t *testing.T) {\n@@ -2970,9 +2940,7 @@ func TestLocallyRoutedPackets(t *testing.T) {\nipv6 := test.netProto == ipv6.ProtocolNumber\nipt := s.IPTables()\nfilter := ipt.GetTable(stack.FilterID, ipv6)\n- if err := ipt.ReplaceTable(stack.FilterID, filter, ipv6); err != nil {\n- t.Fatalf(\"ipt.ReplaceTable(%d, _, %t): %s\", stack.FilterID, ipv6, err)\n- }\n+ ipt.ReplaceTable(stack.FilterID, filter, ipv6)\n}\nvar wq waiter.Queue\n@@ -3295,9 +3263,7 @@ func TestRejectWith(t *testing.T) {\nfilter.Rules[ruleIdx].Target = test.rejectTarget(t, s.NetworkProtocolInstance(test.netProto), rejectWith.val)\n// Make sure the packet is not dropped by the next rule.\nfilter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}\n- if err := ipt.ReplaceTable(stack.FilterID, filter, ipv6); err != nil {\n- t.Fatalf(\"ipt.ReplaceTable(%d, _, %t): %s\", stack.FilterID, ipv6, err)\n- }\n+ ipt.ReplaceTable(stack.FilterID, filter, ipv6)\n}\nfunc() {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tests/integration/istio_test.go",
"new_path": "pkg/tcpip/tests/integration/istio_test.go",
"diff": "@@ -314,9 +314,7 @@ func TestOutboundNATRedirect(t *testing.T) {\nNetworkProtocol: ipv4.ProtocolNumber,\n}\ntbl.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}\n- if err := ipt.ReplaceTable(stack.NATID, tbl, false /* ipv6 */); err != nil {\n- t.Fatalf(\"ipt.ReplaceTable(%d, _, false): %s\", stack.NATID, err)\n- }\n+ ipt.ReplaceTable(stack.NATID, tbl, false /* ipv6 */)\ndialFunc := func(protocol, address string) (net.Conn, error) {\nhost, port, err := net.SplitHostPort(address)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Drop return from IPTables.ReplaceTable
We always return the same thing.
PiperOrigin-RevId: 421893067 |
259,868 | 14.01.2022 14:32:51 | 28,800 | 027eb1492c2a6b87fdcd79821b0852a2fec976bd | BigQuery metrics: Add ability to output `benchstat`-formatted benchmark data. | [
{
"change_type": "MODIFY",
"old_path": "tools/bigquery/bigquery.go",
"new_path": "tools/bigquery/bigquery.go",
"diff": "@@ -21,6 +21,8 @@ package bigquery\nimport (\n\"context\"\n\"fmt\"\n+ \"regexp\"\n+ \"sort\"\n\"strconv\"\n\"strings\"\n\"time\"\n@@ -81,6 +83,31 @@ func (s *Suite) debugString(sb *strings.Builder, prefix string) {\nsb.WriteString(fmt.Sprintf(\"End of data for benchmark suite %s.\", s.Name))\n}\n+// Benchstat returns a benchstat-formatted output string.\n+// See https://pkg.go.dev/golang.org/x/perf/cmd/benchstat\n+// `includeConditions` contains names of `Condition`s that should be included\n+// as part of the benchmark name.\n+func (s *Suite) Benchstat(includeConditions []string) string {\n+ var sb strings.Builder\n+ benchmarkNames := make([]string, 0, len(s.Benchmarks))\n+ benchmarks := make(map[string]*Benchmark, len(s.Benchmarks))\n+ for _, bm := range s.Benchmarks {\n+ if _, found := benchmarks[bm.Name]; !found {\n+ benchmarkNames = append(benchmarkNames, bm.Name)\n+ benchmarks[bm.Name] = bm\n+ }\n+ }\n+ sort.Strings(benchmarkNames)\n+ includeConditionsMap := make(map[string]bool, len(includeConditions))\n+ for _, condName := range includeConditions {\n+ includeConditionsMap[condName] = true\n+ }\n+ for _, bmName := range benchmarkNames {\n+ benchmarks[bmName].benchstat(&sb, s.Name, includeConditionsMap, s.Conditions)\n+ }\n+ return sb.String()\n+}\n+\n// Benchmark represents an individual benchmark in a suite.\ntype Benchmark struct {\nName string `bq:\"name\"`\n@@ -117,6 +144,69 @@ func (bm *Benchmark) debugString(sb *strings.Builder, prefix string) {\n}\n}\n+// noSpaceRe is used to remove whitespace characters in `noSpace`.\n+var noSpaceRe = regexp.MustCompile(\"\\\\s+\")\n+\n+// noSpace replaces whitespace characters from `s` with \"_\".\n+func noSpace(s string) string {\n+ return noSpaceRe.ReplaceAllString(s, \"_\")\n+}\n+\n+// benchstat produces benchmark-formatted output for this Benchmark.\n+func (bm *Benchmark) benchstat(sb *strings.Builder, suiteName string, includeConditions map[string]bool, suiteConditions []*Condition) {\n+ var conditionsStr string\n+ conditionNames := make([]string, 0, len(suiteConditions)+len(bm.Condition))\n+ conditionMap := make(map[string]string, len(suiteConditions)+len(bm.Condition))\n+ for _, c := range suiteConditions {\n+ cName := noSpace(c.Name)\n+ if _, found := conditionMap[cName]; !found && includeConditions[cName] {\n+ conditionNames = append(conditionNames, cName)\n+ conditionMap[cName] = noSpace(c.Value)\n+ }\n+ }\n+ for _, c := range bm.Condition {\n+ cName := noSpace(c.Name)\n+ if _, found := conditionMap[cName]; !found && includeConditions[cName] {\n+ conditionNames = append(conditionNames, cName)\n+ conditionMap[cName] = noSpace(c.Value)\n+ }\n+ }\n+ sort.Strings(conditionNames)\n+ var conditionsBuilder strings.Builder\n+ if len(conditionNames) > 0 {\n+ conditionsBuilder.WriteByte('{')\n+ for i, condName := range conditionNames {\n+ if i != 0 {\n+ conditionsBuilder.WriteByte(',')\n+ }\n+ conditionsBuilder.WriteString(condName)\n+ conditionsBuilder.WriteByte('=')\n+ conditionsBuilder.WriteString(conditionMap[condName])\n+ }\n+ conditionsBuilder.WriteByte('}')\n+ }\n+ conditionsStr = conditionsBuilder.String()\n+ for _, m := range bm.Metric {\n+ if !strings.HasPrefix(suiteName, \"Benchmark\") {\n+ // benchstat format requires all benchmark names to start with \"Benchmark\".\n+ sb.WriteString(\"Benchmark\")\n+ }\n+ sb.WriteString(noSpace(suiteName))\n+ if suiteName != bm.Name {\n+ sb.WriteByte('/')\n+ sb.WriteString(noSpace(bm.Name))\n+ }\n+ sb.WriteString(conditionsStr)\n+ sb.WriteByte('/')\n+ sb.WriteString(noSpace(m.Name))\n+ sb.WriteString(\" 1 \") // 1 sample\n+ sb.WriteString(fmt.Sprintf(\"%f\", m.Sample))\n+ sb.WriteByte(' ')\n+ sb.WriteString(noSpace(m.Unit))\n+ sb.WriteByte('\\n')\n+ }\n+}\n+\n// AddMetric adds a metric to an existing Benchmark.\nfunc (bm *Benchmark) AddMetric(metricName, unit string, sample float64) {\nm := &Metric{\n"
}
] | Go | Apache License 2.0 | google/gvisor | BigQuery metrics: Add ability to output `benchstat`-formatted benchmark data.
PiperOrigin-RevId: 421914403 |
259,907 | 14.01.2022 15:44:37 | 28,800 | 0ab91dbf4e865d4d48a375b880f1bbeb64b04686 | Do not check the stability of certain /proc/cpuinfo fields. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/proc.cc",
"new_path": "test/syscalls/linux/proc.cc",
"diff": "@@ -395,6 +395,25 @@ int ReadlinkWhileExited(std::string const& basename, char* buf, size_t count) {\nreturn ret;\n}\n+void RemoveUnstableCPUInfoFields(std::vector<std::string>& cpu_info_fields) {\n+ const std::vector<std::string> unstable_fields{\"cpu MHz\", \"bogomips\"};\n+ auto it = cpu_info_fields.begin();\n+ while (it != cpu_info_fields.end()) {\n+ bool found = false;\n+ for (const std::string& unstable_field : unstable_fields) {\n+ if (it->find(unstable_field) != std::string::npos) {\n+ found = true;\n+ break;\n+ }\n+ }\n+ if (found) {\n+ it = cpu_info_fields.erase(it);\n+ } else {\n+ ++it;\n+ }\n+ }\n+}\n+\nTEST(ProcTest, NotFoundInRoot) {\nstruct stat s;\nEXPECT_THAT(stat(\"/proc/foobar\", &s), SyscallFailsWithErrno(ENOENT));\n@@ -1217,7 +1236,6 @@ TEST(ProcCpuinfo, RequiredFieldsArePresent) {\nstd::string proc_cpuinfo =\nASSERT_NO_ERRNO_AND_VALUE(GetContents(\"/proc/cpuinfo\"));\nASSERT_FALSE(proc_cpuinfo.empty());\n- std::vector<std::string> cpuinfo_fields = absl::StrSplit(proc_cpuinfo, '\\n');\n// Check that the usual fields are there. We don't really care about the\n// contents.\n@@ -1271,7 +1289,14 @@ TEST(ProcCpuinfo, Stable) {\nMaybeSave();\nstd::string output_after;\nASSERT_NO_ERRNO(GetContents(\"/proc/cpuinfo\", &output_after));\n- EXPECT_THAT(output_before, Eq(output_after));\n+\n+ std::vector<std::string> before_fields = absl::StrSplit(output_before, '\\n');\n+ std::vector<std::string> after_fields = absl::StrSplit(output_before, '\\n');\n+ RemoveUnstableCPUInfoFields(before_fields);\n+ RemoveUnstableCPUInfoFields(after_fields);\n+\n+ EXPECT_THAT(absl::StrJoin(before_fields, \"\\n\"),\n+ Eq(absl::StrJoin(after_fields, \"\\n\")));\n}\n// Sanity checks that uptime is present.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Do not check the stability of certain /proc/cpuinfo fields.
PiperOrigin-RevId: 421928736 |
259,909 | 15.01.2022 16:33:32 | 28,800 | 6d15b0ee64f191a3c5282058fee88bc2b99697e8 | Fix packet buffer reference counting in IP fragmentation/reassembly.
This change also adds a reference counting check to the
fragmentation/reassembly unit tests. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/fragmentation/BUILD",
"new_path": "pkg/tcpip/network/internal/fragmentation/BUILD",
"diff": "@@ -41,10 +41,13 @@ go_test(\nsize = \"small\",\nsrcs = [\n\"fragmentation_test.go\",\n+ \"main_test.go\",\n\"reassembler_test.go\",\n],\nlibrary = \":fragmentation\",\ndeps = [\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip/buffer\",\n\"//pkg/tcpip/faketime\",\n\"//pkg/tcpip/network/internal/testutil\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/fragmentation/fragmentation.go",
"new_path": "pkg/tcpip/network/internal/fragmentation/fragmentation.go",
"diff": "@@ -237,6 +237,14 @@ func (f *Fragmentation) release(r *reassembler, timedOut bool) {\nif h := f.timeoutHandler; timedOut && h != nil {\nh.OnReassemblyTimeout(r.pkt)\n}\n+ if r.pkt != nil {\n+ r.pkt.DecRef()\n+ }\n+ for _, h := range r.holes {\n+ if h.pkt != nil {\n+ h.pkt.DecRef()\n+ }\n+ }\n}\n// releaseReassemblersLocked releases already-expired reassemblers, then\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/fragmentation/fragmentation_test.go",
"new_path": "pkg/tcpip/network/internal/fragmentation/fragmentation_test.go",
"diff": "-// Copyright 2018 The gVisor Authors.\n+// Copyright 2022 The gVisor Authors.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n@@ -61,6 +61,7 @@ type processOutput struct {\ndone bool\n}\n+func TestFragmentationProcess(t *testing.T) {\nvar processTestCases = []struct {\ncomment string\nin []processInput\n@@ -104,13 +105,12 @@ var processTestCases = []struct {\n},\n},\n}\n-\n-func TestFragmentationProcess(t *testing.T) {\nfor _, c := range processTestCases {\nt.Run(c.comment, func(t *testing.T) {\nf := NewFragmentation(minBlockSize, 1024, 512, reassembleTimeout, &faketime.NullClock{}, nil)\nfirstFragmentProto := c.in[0].proto\nfor i, in := range c.in {\n+ defer in.pkt.DecRef()\nresPkt, proto, done, err := f.Process(in.id, in.first, in.last, in.more, in.proto, in.pkt)\nif err != nil {\nt.Fatalf(\"f.Process(%+v, %d, %d, %t, %d, %#v) failed: %s\",\n@@ -180,7 +180,9 @@ func TestReassemblingTimeout(t *testing.T) {\nmemSizeOfFrags := func(frags ...*fragment) int {\nvar size int\nfor _, frag := range frags {\n- size += pkt(len(frag.data), frag.data).MemSize()\n+ p := pkt(len(frag.data), frag.data)\n+ size += p.MemSize()\n+ p.DecRef()\n}\nreturn size\n}\n@@ -254,7 +256,9 @@ func TestReassemblingTimeout(t *testing.T) {\nfor _, event := range test.events {\nclock.Advance(event.clockAdvance)\nif frag := event.fragment; frag != nil {\n- _, _, done, err := f.Process(FragmentID{}, frag.first, frag.last, frag.more, protocol, pkt(len(frag.data), frag.data))\n+ p := pkt(len(frag.data), frag.data)\n+ defer p.DecRef()\n+ _, _, done, err := f.Process(FragmentID{}, frag.first, frag.last, frag.more, protocol, p)\nif err != nil {\nt.Fatalf(\"%s: f.Process failed: %s\", event.name, err)\n}\n@@ -271,25 +275,41 @@ func TestReassemblingTimeout(t *testing.T) {\n}\nfunc TestMemoryLimits(t *testing.T) {\n- lowLimit := pkt(1, \"0\").MemSize()\n+ p := pkt(1, \"0\")\n+ defer p.DecRef()\n+ lowLimit := p.MemSize()\nhighLimit := 3 * lowLimit // Allow at most 3 such packets.\n- f := NewFragmentation(minBlockSize, highLimit, lowLimit, reassembleTimeout, &faketime.NullClock{}, nil)\n+ // Using a manual clock here and below because the fragmentation object\n+ // cleans up its reassemblers with a job that's scheduled with the clock\n+ // argument. If the clock does not schedule jobs, the reassemblers are not\n+ // released and the fragmentation object leaks packets.\n+ c := faketime.NewManualClock()\n+ defer c.Advance(reassembleTimeout)\n+ f := NewFragmentation(minBlockSize, highLimit, lowLimit, reassembleTimeout, c, nil)\n// Send first fragment with id = 0.\n- if _, _, _, err := f.Process(FragmentID{ID: 0}, 0, 0, true, 0xFF, pkt(1, \"0\")); err != nil {\n+ p0 := pkt(1, \"0\")\n+ defer p0.DecRef()\n+ if _, _, _, err := f.Process(FragmentID{ID: 0}, 0, 0, true, 0xFF, p0); err != nil {\nt.Fatal(err)\n}\n// Send first fragment with id = 1.\n- if _, _, _, err := f.Process(FragmentID{ID: 1}, 0, 0, true, 0xFF, pkt(1, \"1\")); err != nil {\n+ p1 := pkt(1, \"1\")\n+ defer p1.DecRef()\n+ if _, _, _, err := f.Process(FragmentID{ID: 1}, 0, 0, true, 0xFF, p1); err != nil {\nt.Fatal(err)\n}\n// Send first fragment with id = 2.\n- if _, _, _, err := f.Process(FragmentID{ID: 2}, 0, 0, true, 0xFF, pkt(1, \"2\")); err != nil {\n+ p2 := pkt(1, \"2\")\n+ defer p2.DecRef()\n+ if _, _, _, err := f.Process(FragmentID{ID: 2}, 0, 0, true, 0xFF, p2); err != nil {\nt.Fatal(err)\n}\n// Send first fragment with id = 3. This should caused id = 0 and id = 1 to be\n// evicted.\n- if _, _, _, err := f.Process(FragmentID{ID: 3}, 0, 0, true, 0xFF, pkt(1, \"3\")); err != nil {\n+ p3 := pkt(1, \"3\")\n+ defer p3.DecRef()\n+ if _, _, _, err := f.Process(FragmentID{ID: 3}, 0, 0, true, 0xFF, p3); err != nil {\nt.Fatal(err)\n}\n@@ -305,14 +325,22 @@ func TestMemoryLimits(t *testing.T) {\n}\nfunc TestMemoryLimitsIgnoresDuplicates(t *testing.T) {\n- memSize := pkt(1, \"0\").MemSize()\n- f := NewFragmentation(minBlockSize, memSize, 0, reassembleTimeout, &faketime.NullClock{}, nil)\n+ p0 := pkt(1, \"0\")\n+ defer p0.DecRef()\n+ memSize := p0.MemSize()\n+ c := faketime.NewManualClock()\n+ defer c.Advance(reassembleTimeout)\n+ f := NewFragmentation(minBlockSize, memSize, 0, reassembleTimeout, c, nil)\n// Send first fragment with id = 0.\n- if _, _, _, err := f.Process(FragmentID{}, 0, 0, true, 0xFF, pkt(1, \"0\")); err != nil {\n+ p1 := pkt(1, \"0\")\n+ defer p1.DecRef()\n+ if _, _, _, err := f.Process(FragmentID{}, 0, 0, true, 0xFF, p1); err != nil {\nt.Fatal(err)\n}\n// Send the same packet again.\n- if _, _, _, err := f.Process(FragmentID{}, 0, 0, true, 0xFF, pkt(1, \"0\")); err != nil {\n+ p1dup := pkt(1, \"0\")\n+ defer p1dup.DecRef()\n+ if _, _, _, err := f.Process(FragmentID{}, 0, 0, true, 0xFF, p1dup); err != nil {\nt.Fatal(err)\n}\n@@ -404,8 +432,16 @@ func TestErrors(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- f := NewFragmentation(test.blockSize, HighFragThreshold, LowFragThreshold, reassembleTimeout, &faketime.NullClock{}, nil)\n- _, _, done, err := f.Process(FragmentID{}, test.first, test.last, test.more, 0, pkt(len(test.data), test.data))\n+ p0 := pkt(len(test.data), test.data)\n+ defer p0.DecRef()\n+ c := faketime.NewManualClock()\n+ defer c.Advance(reassembleTimeout)\n+ f := NewFragmentation(test.blockSize, HighFragThreshold, LowFragThreshold, reassembleTimeout, c, nil)\n+ resPkt, _, done, err := f.Process(FragmentID{}, test.first, test.last, test.more, 0, p0)\n+\n+ if resPkt != nil {\n+ resPkt.DecRef()\n+ }\nif !errors.Is(err, test.err) {\nt.Errorf(\"got Process(_, %d, %d, %t, _, %q) = (_, _, _, %v), want = (_, _, _, %v)\", test.first, test.last, test.more, test.data, err, test.err)\n}\n@@ -482,11 +518,13 @@ func TestPacketFragmenter(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\npkt := testutil.MakeRandPkt(test.transportHeaderLen, reserve, []int{test.payloadSize}, proto)\n+ defer pkt.DecRef()\noriginalPayload := stack.PayloadSince(pkt.TransportHeader())\nvar reassembledPayload buffer.VectorisedView\npf := MakePacketFragmenter(pkt, test.fragmentPayloadLen, reserve)\nfor i := 0; ; i++ {\nfragPkt, offset, copied, more := pf.BuildNextFragment()\n+ defer fragPkt.DecRef()\nwantFragment := test.wantFragments[i]\nif got := pf.RemainingFragmentCount(); got != wantFragment.remaining {\nt.Errorf(\"(fragment #%d) got pf.RemainingFragmentCount() = %d, want = %d\", i, got, wantFragment.remaining)\n@@ -538,7 +576,9 @@ func TestTimeoutHandler(t *testing.T) {\n)\npk1 := pkt(1, \"1\")\n+ defer pk1.DecRef()\npk2 := pkt(1, \"2\")\n+ defer pk2.DecRef()\ntype processParam struct {\nfirst uint16\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/network/internal/fragmentation/main_test.go",
"diff": "+// Copyright 2022 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package fragmentation\n+\n+import (\n+ \"os\"\n+ \"testing\"\n+\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n+)\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/fragmentation/reassembler.go",
"new_path": "pkg/tcpip/network/internal/fragmentation/reassembler.go",
"diff": "@@ -135,6 +135,7 @@ func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt *s\nfinal: currentHole.final,\npkt: pkt,\n}\n+ pkt.IncRef()\nr.filled++\n// For IPv6, it is possible to have different Protocol values between\n// fragments of a packet (because, unlike IPv4, the Protocol is not used to\n@@ -145,11 +146,13 @@ func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt *s\n// options received in the first fragment should be used - and they should\n// override options from following fragments.\nif first == 0 {\n+ if r.pkt != nil {\n+ r.pkt.DecRef()\n+ }\nr.pkt = pkt\n+ pkt.IncRef()\nr.proto = proto\n}\n-\n- pkt.IncRef()\nbreak\n}\nif !holeFound {\n@@ -167,7 +170,6 @@ func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt *s\n})\nresPkt := r.holes[0].pkt\n- resPkt.DecRef()\nfor i := 1; i < len(r.holes); i++ {\nstack.MergeFragment(resPkt, r.holes[i].pkt)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/fragmentation/reassembler_test.go",
"new_path": "pkg/tcpip/network/internal/fragmentation/reassembler_test.go",
"diff": "@@ -186,6 +186,19 @@ func TestReassemblerProcess(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\nr := newReassembler(FragmentID{}, &faketime.NullClock{})\n+ // Emulate a call to (*Fragmentation).release(), which always happens\n+ // after reassembler error or completion. Without release(), the\n+ // reassembler will leak PacketBuffers.\n+ defer func() {\n+ for _, h := range r.holes {\n+ if h.pkt != nil {\n+ h.pkt.DecRef()\n+ }\n+ }\n+ if r.pkt != nil {\n+ r.pkt.DecRef()\n+ }\n+ }()\nvar resPkt *stack.PacketBuffer\nvar isDone bool\nfor _, param := range test.params {\n@@ -229,5 +242,18 @@ func TestReassemblerProcess(t *testing.T) {\n}\n}\n})\n+ for _, p := range test.params {\n+ if p.pkt != nil {\n+ p.pkt.DecRef()\n+ }\n+ }\n+ for _, w := range test.want {\n+ if w.pkt != nil {\n+ w.pkt.DecRef()\n+ }\n+ }\n+ if test.wantPkt != nil {\n+ test.wantPkt.DecRef()\n+ }\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4.go",
"diff": "@@ -409,10 +409,12 @@ func (e *endpoint) handleFragments(_ *stack.Route, networkMTU uint32, pkt *stack\nfor {\nfragPkt, more := buildNextFragment(&pf, networkHeader)\nif err := handler(fragPkt); err != nil {\n+ fragPkt.DecRef()\nreturn n, pf.RemainingFragmentCount() + 1, err\n}\nn++\nif !more {\n+ fragPkt.DecRef()\nreturn n, pf.RemainingFragmentCount(), nil\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/ipv6.go",
"new_path": "pkg/tcpip/network/ipv6/ipv6.go",
"diff": "@@ -731,10 +731,12 @@ func (e *endpoint) handleFragments(r *stack.Route, networkMTU uint32, pkt *stack\nfor {\nfragPkt, more := buildNextFragment(&pf, networkHeader, transProto, id)\nif err := handler(fragPkt); err != nil {\n+ fragPkt.DecRef()\nreturn n, pf.RemainingFragmentCount() + 1, err\n}\nn++\nif !more {\n+ fragPkt.DecRef()\nreturn n, pf.RemainingFragmentCount(), nil\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix packet buffer reference counting in IP fragmentation/reassembly.
This change also adds a reference counting check to the
fragmentation/reassembly unit tests.
PiperOrigin-RevId: 422097740 |
259,962 | 18.01.2022 10:38:50 | 28,800 | 82fa6d300bf0a4a0ec06aadcd279cde108dc46c7 | Add Leak checking to UDP/ICMP tests. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/icmp/BUILD",
"new_path": "pkg/tcpip/transport/icmp/BUILD",
"diff": "@@ -48,6 +48,8 @@ go_test(\nsrcs = [\"icmp_test.go\"],\ndeps = [\n\":icmp\",\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/buffer\",\n\"//pkg/tcpip/checker\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/icmp/icmp_test.go",
"new_path": "pkg/tcpip/transport/icmp/icmp_test.go",
"diff": "package icmp_test\nimport (\n+ \"os\"\n\"testing\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/checker\"\n@@ -237,3 +240,10 @@ func TestWriteUnboundWithBindToDevice(t *testing.T) {\n}\n}\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/BUILD",
"new_path": "pkg/tcpip/transport/udp/BUILD",
"diff": "@@ -49,6 +49,8 @@ go_test(\nsrcs = [\"udp_test.go\"],\ndeps = [\n\":udp\",\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/buffer\",\n\"//pkg/tcpip/checker\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/udp_test.go",
"new_path": "pkg/tcpip/transport/udp/udp_test.go",
"diff": "@@ -19,10 +19,13 @@ import (\n\"fmt\"\n\"io/ioutil\"\n\"math/rand\"\n+ \"os\"\n\"testing\"\n\"github.com/google/go-cmp/cmp\"\n\"golang.org/x/time/rate\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/checker\"\n@@ -442,9 +445,11 @@ func (c *testContext) injectPacket(flow testFlow, payload []byte, badChecksum bo\n}\n}\n}\n- c.linkEP.InjectInbound(ipv4.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: buf.ToVectorisedView(),\n- }))\n+ })\n+ defer pkt.DecRef()\n+ c.linkEP.InjectInbound(ipv4.ProtocolNumber, pkt)\n} else {\nbuf := c.buildV6Packet(payload, &h)\nif badChecksum {\n@@ -453,9 +458,11 @@ func (c *testContext) injectPacket(flow testFlow, payload []byte, badChecksum bo\nu := header.UDP(buf[header.IPv6MinimumSize:])\nu.SetChecksum(u.Checksum() + 1)\n}\n- c.linkEP.InjectInbound(ipv6.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: buf.ToVectorisedView(),\n- }))\n+ })\n+ defer pkt.DecRef()\n+ c.linkEP.InjectInbound(ipv6.ProtocolNumber, pkt)\n}\n}\n@@ -851,9 +858,11 @@ func TestV4ReadSelfSource(t *testing.T) {\nh.srcAddr = h.dstAddr\nbuf := c.buildV4Packet(payload, &h)\n- c.linkEP.InjectInbound(ipv4.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: buf.ToVectorisedView(),\n- }))\n+ })\n+ defer pkt.DecRef()\n+ c.linkEP.InjectInbound(ipv4.ProtocolNumber, pkt)\nif got := c.s.Stats().IP.InvalidSourceAddressesReceived.Value(); got != tt.wantInvalidSource {\nt.Errorf(\"c.s.Stats().IP.InvalidSourceAddressesReceived got %d, want %d\", got, tt.wantInvalidSource)\n@@ -2108,10 +2117,11 @@ func TestIncrementMalformedPacketsReceived(t *testing.T) {\n// Invalidate the UDP header length field.\nu := header.UDP(buf[header.IPv6MinimumSize:])\nu.SetLength(u.Length() + 1)\n-\n- c.linkEP.InjectInbound(ipv6.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: buf.ToVectorisedView(),\n- }))\n+ })\n+ defer pkt.DecRef()\n+ c.linkEP.InjectInbound(ipv6.ProtocolNumber, pkt)\nconst want = 1\nif got := c.s.Stats().UDP.MalformedPacketsReceived.Value(); got != want {\n@@ -2164,9 +2174,11 @@ func TestShortHeader(t *testing.T) {\ncopy(buf[header.IPv6MinimumSize:], udpHdr)\n// Inject packet.\n- c.linkEP.InjectInbound(ipv6.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: buf.ToVectorisedView(),\n- }))\n+ })\n+ defer pkt.DecRef()\n+ c.linkEP.InjectInbound(ipv6.ProtocolNumber, pkt)\nif got, want := c.s.Stats().NICs.MalformedL4RcvdPackets.Value(), uint64(1); got != want {\nt.Errorf(\"got c.s.Stats().NIC.MalformedL4RcvdPackets.Value() = %d, want = %d\", got, want)\n@@ -2219,9 +2231,12 @@ func TestPayloadModifiedV4(t *testing.T) {\n// Modify the payload so that the checksum value in the UDP header will be\n// incorrect.\nbuf[len(buf)-1]++\n- c.linkEP.InjectInbound(ipv4.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: buf.ToVectorisedView(),\n- }))\n+ })\n+ defer pkt.DecRef()\n+ c.linkEP.InjectInbound(ipv4.ProtocolNumber, pkt)\nconst want = 1\nif got := c.s.Stats().UDP.ChecksumErrors.Value(); got != want {\n@@ -2250,9 +2265,11 @@ func TestPayloadModifiedV6(t *testing.T) {\n// Modify the payload so that the checksum value in the UDP header will be\n// incorrect.\nbuf[len(buf)-1]++\n- c.linkEP.InjectInbound(ipv6.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: buf.ToVectorisedView(),\n- }))\n+ })\n+ defer pkt.DecRef()\n+ c.linkEP.InjectInbound(ipv6.ProtocolNumber, pkt)\nconst want = 1\nif got := c.s.Stats().UDP.ChecksumErrors.Value(); got != want {\n@@ -2281,9 +2298,11 @@ func TestChecksumZeroV4(t *testing.T) {\n// Set the checksum field in the UDP header to zero.\nu := header.UDP(buf[header.IPv4MinimumSize:])\nu.SetChecksum(0)\n- c.linkEP.InjectInbound(ipv4.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: buf.ToVectorisedView(),\n- }))\n+ })\n+ defer pkt.DecRef()\n+ c.linkEP.InjectInbound(ipv4.ProtocolNumber, pkt)\nconst want = 0\nif got := c.s.Stats().UDP.ChecksumErrors.Value(); got != want {\n@@ -2312,9 +2331,11 @@ func TestChecksumZeroV6(t *testing.T) {\n// Set the checksum field in the UDP header to zero.\nu := header.UDP(buf[header.IPv6MinimumSize:])\nu.SetChecksum(0)\n- c.linkEP.InjectInbound(ipv6.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: buf.ToVectorisedView(),\n- }))\n+ })\n+ defer pkt.DecRef()\n+ c.linkEP.InjectInbound(ipv6.ProtocolNumber, pkt)\nconst want = 1\nif got := c.s.Stats().UDP.ChecksumErrors.Value(); got != want {\n@@ -2620,3 +2641,10 @@ func TestOutgoingSubnetBroadcast(t *testing.T) {\n})\n}\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add Leak checking to UDP/ICMP tests.
PiperOrigin-RevId: 422593497 |
259,909 | 18.01.2022 11:27:12 | 28,800 | 2f6454681c56d730d9c48ca750bf421a798b712e | Add leak checker to waitable tests. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/waitable/BUILD",
"new_path": "pkg/tcpip/link/waitable/BUILD",
"diff": "@@ -23,6 +23,8 @@ go_test(\n],\nlibrary = \":waitable\",\ndeps = [\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/header\",\n\"//pkg/tcpip/stack\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/waitable/waitable_test.go",
"new_path": "pkg/tcpip/link/waitable/waitable_test.go",
"diff": "package waitable\nimport (\n+ \"os\"\n\"testing\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n\"gvisor.dev/gvisor/pkg/tcpip/stack\"\n@@ -110,6 +113,7 @@ func TestWaitWrite(t *testing.T) {\nif want := 1; ep.writeCount != want {\nt.Fatalf(\"Unexpected writeCount: got=%v, want=%v\", ep.writeCount, want)\n}\n+ pkts.DecRef()\n}\n{\nvar pkts stack.PacketBufferList\n@@ -124,6 +128,7 @@ func TestWaitWrite(t *testing.T) {\nif want := 2; ep.writeCount != want {\nt.Fatalf(\"Unexpected writeCount: got=%v, want=%v\", ep.writeCount, want)\n}\n+ pkts.DecRef()\n}\n{\n@@ -139,6 +144,7 @@ func TestWaitWrite(t *testing.T) {\nif want := 2; ep.writeCount != want {\nt.Fatalf(\"Unexpected writeCount: got=%v, want=%v\", ep.writeCount, want)\n}\n+ pkts.DecRef()\n}\n}\n@@ -153,24 +159,36 @@ func TestWaitDispatch(t *testing.T) {\n}\n// Dispatch and check that it goes through.\n- ep.dispatcher.DeliverNetworkPacket(\"\", \"\", 0, stack.NewPacketBuffer(stack.PacketBufferOptions{}))\n+ {\n+ p := stack.NewPacketBuffer(stack.PacketBufferOptions{})\n+ ep.dispatcher.DeliverNetworkPacket(\"\", \"\", 0, p)\nif want := 1; ep.dispatchCount != want {\nt.Fatalf(\"Unexpected dispatchCount: got=%v, want=%v\", ep.dispatchCount, want)\n}\n+ p.DecRef()\n+ }\n// Wait on writes, then try to dispatch. It must go through.\n+ {\nwep.WaitWrite()\n- ep.dispatcher.DeliverNetworkPacket(\"\", \"\", 0, stack.NewPacketBuffer(stack.PacketBufferOptions{}))\n+ p := stack.NewPacketBuffer(stack.PacketBufferOptions{})\n+ ep.dispatcher.DeliverNetworkPacket(\"\", \"\", 0, p)\nif want := 2; ep.dispatchCount != want {\nt.Fatalf(\"Unexpected dispatchCount: got=%v, want=%v\", ep.dispatchCount, want)\n}\n+ p.DecRef()\n+ }\n// Wait on dispatches, then try to dispatch. It must not go through.\n+ {\nwep.WaitDispatch()\n- ep.dispatcher.DeliverNetworkPacket(\"\", \"\", 0, stack.NewPacketBuffer(stack.PacketBufferOptions{}))\n+ p := stack.NewPacketBuffer(stack.PacketBufferOptions{})\n+ ep.dispatcher.DeliverNetworkPacket(\"\", \"\", 0, p)\nif want := 2; ep.dispatchCount != want {\nt.Fatalf(\"Unexpected dispatchCount: got=%v, want=%v\", ep.dispatchCount, want)\n}\n+ p.DecRef()\n+ }\n}\nfunc TestOtherMethods(t *testing.T) {\n@@ -204,3 +222,10 @@ func TestOtherMethods(t *testing.T) {\nt.Fatalf(\"Unexpected LinkAddress: got=%q, want=%q\", v, linkAddr)\n}\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add leak checker to waitable tests.
PiperOrigin-RevId: 422605449 |
259,909 | 18.01.2022 16:03:26 | 28,800 | 8c9dc0babfd64d5f053039c898401f94c6be6d0d | Update PacketBuffer to hold a Buffer struct instead of a Buffer pointer.
The extra pointer indirection is not necessary and allows for a nil buffer.
This change bumps the PacketBuffer struct size from 296 to 792 bytes. | [
{
"change_type": "MODIFY",
"old_path": "pkg/buffer/view.go",
"new_path": "pkg/buffer/view.go",
"diff": "@@ -380,8 +380,8 @@ func (v *View) Copy() (other View) {\n// Clone makes a more shallow copy compared to Copy. The underlying payload\n// slice (buffer.data) is shared but the buffers themselves are copied.\n-func (v *View) Clone() *View {\n- other := &View{\n+func (v *View) Clone() View {\n+ other := View{\nsize: v.size,\n}\nfor buf := v.data.Front(); buf != nil; buf = buf.Next() {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/fragmentation/fragmentation_test.go",
"new_path": "pkg/tcpip/network/internal/fragmentation/fragmentation_test.go",
"diff": "@@ -107,7 +107,7 @@ func TestFragmentationProcess(t *testing.T) {\n}\nfor _, c := range processTestCases {\nt.Run(c.comment, func(t *testing.T) {\n- f := NewFragmentation(minBlockSize, 1024, 512, reassembleTimeout, &faketime.NullClock{}, nil)\n+ f := NewFragmentation(minBlockSize, 2048, 512, reassembleTimeout, &faketime.NullClock{}, nil)\nfirstFragmentProto := c.in[0].proto\nfor i, in := range c.in {\ndefer in.pkt.DecRef()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/packet_buffer.go",
"new_path": "pkg/tcpip/stack/packet_buffer.go",
"diff": "@@ -106,7 +106,7 @@ type PacketBuffer struct {\n// buf is the underlying buffer for the packet. See struct level docs for\n// details.\n- buf *buffer.Buffer\n+ buf buffer.Buffer\nreserved int\npushed int\nconsumed int\n@@ -169,7 +169,6 @@ type PacketBuffer struct {\nfunc NewPacketBuffer(opts PacketBufferOptions) *PacketBuffer {\npk := pkPool.Get().(*PacketBuffer)\npk.reset()\n- pk.buf = &buffer.Buffer{}\nif opts.ReserveHeaderBytes != 0 {\npk.buf.AppendOwned(make([]byte, opts.ReserveHeaderBytes))\npk.reserved = opts.ReserveHeaderBytes\n@@ -558,7 +557,7 @@ func (d PacketData) AppendView(v tcpipbuffer.View) {\n// frag and frag should not be used again.\nfunc MergeFragment(dst, frag *PacketBuffer) {\nfrag.buf.TrimFront(int64(frag.dataOffset()))\n- dst.buf.Merge(frag.buf)\n+ dst.buf.Merge(&frag.buf)\n}\n// ReadFromVV moves at most count bytes from the beginning of srcVV to the end\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update PacketBuffer to hold a Buffer struct instead of a Buffer pointer.
The extra pointer indirection is not necessary and allows for a nil buffer.
This change bumps the PacketBuffer struct size from 296 to 792 bytes.
PiperOrigin-RevId: 422669812 |
259,909 | 18.01.2022 17:07:03 | 28,800 | 30f36c9c8789d0fcebf6371fc3d8820a873b2c49 | Enable leak checker in sharedmem tests.
This change also adds packet cleanup after qDisc queue is closed. This is
required for sharedmem_server_test to pass. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/qdisc/fifo/fifo.go",
"new_path": "pkg/tcpip/link/qdisc/fifo/fifo.go",
"diff": "@@ -69,6 +69,13 @@ func New(lower stack.LinkWriter, n int, queueLen int) stack.QueueingDiscipline {\ngo func() {\ndefer d.wg.Done()\nqd.dispatchLoop()\n+ qd.mu.Lock()\n+ for qd.queue.Front() != nil {\n+ p := qd.queue.Front()\n+ qd.queue.Remove(p)\n+ p.DecRef()\n+ }\n+ qd.mu.Unlock()\n}()\n}\nreturn d\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/BUILD",
"new_path": "pkg/tcpip/link/sharedmem/BUILD",
"diff": "@@ -39,6 +39,8 @@ go_test(\nsrcs = [\"sharedmem_test.go\"],\nlibrary = \":sharedmem\",\ndeps = [\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/sync\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/buffer\",\n@@ -57,6 +59,8 @@ go_test(\ndeps = [\n\":sharedmem\",\n\"//pkg/log\",\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/adapters/gonet\",\n\"//pkg/tcpip/header\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem_server_test.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem_server_test.go",
"diff": "@@ -22,6 +22,7 @@ import (\n\"io\"\n\"net\"\n\"net/http\"\n+ \"os\"\n\"strings\"\n\"syscall\"\n\"testing\"\n@@ -29,6 +30,8 @@ import (\n\"golang.org/x/sync/errgroup\"\n\"golang.org/x/sys/unix\"\n\"gvisor.dev/gvisor/pkg/log\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n@@ -287,3 +290,10 @@ func TestServerRoundTripStress(t *testing.T) {\nt.Fatalf(\"request failed: %s\", err)\n}\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem_test.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem_test.go",
"diff": "@@ -20,11 +20,14 @@ package sharedmem\nimport (\n\"bytes\"\n\"math/rand\"\n+ \"os\"\n\"strings\"\n\"testing\"\n\"time\"\n\"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n@@ -235,6 +238,7 @@ func TestSimpleSend(t *testing.T) {\npkt.NetworkProtocolNumber = proto\nvar pkts stack.PacketBufferList\npkts.PushBack(pkt)\n+ defer pkts.DecRef()\nif _, err := c.ep.WritePackets(pkts); err != nil {\nt.Fatalf(\"WritePackets failed: %s\", err)\n}\n@@ -310,6 +314,7 @@ func TestPreserveSrcAddressInSend(t *testing.T) {\npkt.NetworkProtocolNumber = proto\nvar pkts stack.PacketBufferList\n+ defer pkts.DecRef()\npkts.PushBack(pkt)\nif _, err := c.ep.WritePackets(pkts); err != nil {\nt.Fatalf(\"WritePackets failed: %s\", err)\n@@ -360,6 +365,8 @@ func TestFillTxQueue(t *testing.T) {\n// Each packet is uses no more than 40 bytes, so write that many packets\n// until the tx queue if full.\n+ // Each packet uses no more than 40 bytes, so write that many packets\n+ // until the tx queue if full.\nids := make(map[uint64]struct{})\nfor i := queuePipeSize / 40; i > 0; i-- {\npkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n@@ -372,8 +379,10 @@ func TestFillTxQueue(t *testing.T) {\nvar pkts stack.PacketBufferList\npkts.PushBack(pkt)\nif _, err := c.ep.WritePackets(pkts); err != nil {\n+ pkts.DecRef()\nt.Fatalf(\"WritePackets failed unexpectedly: %s\", err)\n}\n+ pkts.DecRef()\n// Check that they have different IDs.\ndesc := c.txq.tx.Pull()\n@@ -398,6 +407,7 @@ func TestFillTxQueue(t *testing.T) {\nif _, ok := err.(*tcpip.ErrWouldBlock); !ok {\nt.Fatalf(\"got WritePackets(...) = %s, want %s\", err, &tcpip.ErrWouldBlock{})\n}\n+ pkts.DecRef()\n}\n// TestFillTxQueueAfterBadCompletion sends a bad completion, then sends packets\n@@ -431,6 +441,7 @@ func TestFillTxQueueAfterBadCompletion(t *testing.T) {\nif _, err := c.ep.WritePackets(pkts); err != nil {\nt.Fatalf(\"WritePackets failed unexpectedly: %s\", err)\n}\n+ pkts.DecRef()\n}\n// Complete the two writes twice.\n@@ -458,6 +469,7 @@ func TestFillTxQueueAfterBadCompletion(t *testing.T) {\nif _, err := c.ep.WritePackets(pkts); err != nil {\nt.Fatalf(\"WritePackets failed unexpectedly: %s\", err)\n}\n+ pkts.DecRef()\n// Check that they have different IDs.\ndesc := c.txq.tx.Pull()\n@@ -481,6 +493,7 @@ func TestFillTxQueueAfterBadCompletion(t *testing.T) {\nif _, ok := err.(*tcpip.ErrWouldBlock); !ok {\nt.Fatalf(\"got WritePackets(...) = %s, want %s\", err, &tcpip.ErrWouldBlock{})\n}\n+ pkts.DecRef()\n}\n// TestFillTxMemory sends packets until the we run out of shared memory.\n@@ -510,6 +523,7 @@ func TestFillTxMemory(t *testing.T) {\nif _, err := c.ep.WritePackets(pkts); err != nil {\nt.Fatalf(\"WritePackets failed unexpectedly: %s\", err)\n}\n+ pkts.DecRef()\n// Check that they have different IDs.\ndesc := c.txq.tx.Pull()\n@@ -534,6 +548,7 @@ func TestFillTxMemory(t *testing.T) {\nif _, ok := err.(*tcpip.ErrWouldBlock); !ok {\nt.Fatalf(\"got WritePackets(...) = %s, want %s\", err, &tcpip.ErrWouldBlock{})\n}\n+ pkts.DecRef()\n}\n// TestFillTxMemoryWithMultiBuffer sends packets until the we run out of\n@@ -564,6 +579,7 @@ func TestFillTxMemoryWithMultiBuffer(t *testing.T) {\nif _, err := c.ep.WritePackets(pkts); err != nil {\nt.Fatalf(\"WritePackets failed unexpectedly: %s\", err)\n}\n+ pkts.DecRef()\n// Pull the posted buffer.\nc.txq.tx.Pull()\n@@ -584,6 +600,7 @@ func TestFillTxMemoryWithMultiBuffer(t *testing.T) {\nif _, ok := err.(*tcpip.ErrWouldBlock); !ok {\nt.Fatalf(\"got WritePackets(...) = %s, want %s\", err, &tcpip.ErrWouldBlock{})\n}\n+ pkts.DecRef()\n}\n// Attempt to write the one-buffer packet again. It must succeed.\n@@ -599,6 +616,7 @@ func TestFillTxMemoryWithMultiBuffer(t *testing.T) {\nif _, err := c.ep.WritePackets(pkts); err != nil {\nt.Fatalf(\"WritePackets failed unexpectedly: %s\", err)\n}\n+ pkts.DecRef()\n}\n}\n@@ -815,3 +833,10 @@ func TestCloseWhileWaitingToPost(t *testing.T) {\ncleaned = true\nc.ep.Wait()\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enable leak checker in sharedmem tests.
This change also adds packet cleanup after qDisc queue is closed. This is
required for sharedmem_server_test to pass.
PiperOrigin-RevId: 422682841 |
259,909 | 18.01.2022 17:21:03 | 28,800 | 4ce5c43a309facd82a521facd545c463a34ba7cb | Add leak checking to qdisc and nested tests. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/nested/BUILD",
"new_path": "pkg/tcpip/link/nested/BUILD",
"diff": "@@ -23,6 +23,8 @@ go_test(\n\"nested_test.go\",\n],\ndeps = [\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/header\",\n\"//pkg/tcpip/link/nested\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/nested/nested_test.go",
"new_path": "pkg/tcpip/link/nested/nested_test.go",
"diff": "package nested_test\nimport (\n+ \"os\"\n\"testing\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n\"gvisor.dev/gvisor/pkg/tcpip/link/nested\"\n@@ -87,10 +90,14 @@ func TestNestedLinkEndpoint(t *testing.T) {\nt.Error(\"After attach, nestedEP.IsAttached() = false, want = true\")\n}\n- nestedEP.DeliverNetworkPacket(emptyAddress, emptyAddress, header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{}))\n+ {\n+ p := stack.NewPacketBuffer(stack.PacketBufferOptions{})\n+ nestedEP.DeliverNetworkPacket(emptyAddress, emptyAddress, header.IPv4ProtocolNumber, p)\n+ p.DecRef()\nif disp.count != 1 {\nt.Errorf(\"After first packet with dispatcher attached, got disp.count = %d, want = 1\", disp.count)\n}\n+ }\nnestedEP.Attach(nil)\nif childEP.IsAttached() {\n@@ -100,10 +107,20 @@ func TestNestedLinkEndpoint(t *testing.T) {\nt.Error(\"After detach, nestedEP.IsAttached() = true, want = false\")\n}\n+ {\ndisp.count = 0\n- nestedEP.DeliverNetworkPacket(emptyAddress, emptyAddress, header.IPv4ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{}))\n+ p := stack.NewPacketBuffer(stack.PacketBufferOptions{})\n+ nestedEP.DeliverNetworkPacket(emptyAddress, emptyAddress, header.IPv4ProtocolNumber, p)\n+ p.DecRef()\nif disp.count != 0 {\nt.Errorf(\"After second packet with dispatcher detached, got disp.count = %d, want = 0\", disp.count)\n}\n+ }\n+}\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/qdisc/fifo/BUILD",
"new_path": "pkg/tcpip/link/qdisc/fifo/BUILD",
"diff": "@@ -22,6 +22,8 @@ go_test(\nsrcs = [\"qdisc_test.go\"],\ndeps = [\n\":fifo\",\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/sync\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/buffer\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/qdisc/fifo/qdisc_test.go",
"new_path": "pkg/tcpip/link/qdisc/fifo/qdisc_test.go",
"diff": "@@ -16,8 +16,11 @@ package qdisc_test\nimport (\n\"math/rand\"\n+ \"os\"\n\"testing\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n@@ -63,3 +66,10 @@ func TestFastSimultaneousWrites(t *testing.T) {\n}()\n}\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add leak checking to qdisc and nested tests.
PiperOrigin-RevId: 422685326 |
259,909 | 18.01.2022 18:47:34 | 28,800 | 3d578afc8da96bf87e40b27423d063db45e5e4cc | Add leak checking to ethernet and muxed tests. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/ethernet/BUILD",
"new_path": "pkg/tcpip/link/ethernet/BUILD",
"diff": "@@ -20,6 +20,8 @@ go_test(\nsrcs = [\"ethernet_test.go\"],\ndeps = [\n\":ethernet\",\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/buffer\",\n\"//pkg/tcpip/header\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/ethernet/ethernet_test.go",
"new_path": "pkg/tcpip/link/ethernet/ethernet_test.go",
"diff": "@@ -16,8 +16,11 @@ package ethernet_test\nimport (\n\"fmt\"\n+ \"os\"\n\"testing\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n@@ -63,9 +66,9 @@ func TestDeliverNetworkPacket(t *testing.T) {\nDstAddr: otherLinkAddr2,\nType: header.IPv4ProtocolNumber,\n})\n- e.DeliverNetworkPacket(\"\", \"\", 0, stack.NewPacketBuffer(stack.PacketBufferOptions{\n- Data: eth.ToVectorisedView(),\n- }))\n+ p := stack.NewPacketBuffer(stack.PacketBufferOptions{Data: eth.ToVectorisedView()})\n+ defer p.DecRef()\n+ e.DeliverNetworkPacket(\"\", \"\", 0, p)\nif networkDispatcher.networkPackets != 1 {\nt.Fatalf(\"got networkDispatcher.networkPackets = %d, want = 1\", networkDispatcher.networkPackets)\n}\n@@ -135,6 +138,7 @@ func TestWritePacketsAddHeader(t *testing.T) {\npkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nReserveHeaderBytes: int(e.MaxHeaderLength()),\n})\n+ defer pkt.DecRef()\npkt.NetworkProtocolNumber = netProto\npkt.EgressRoute.RemoteLinkAddress = remoteLinkAddr\n@@ -165,3 +169,10 @@ func TestWritePacketsAddHeader(t *testing.T) {\n}\n}\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/muxed/BUILD",
"new_path": "pkg/tcpip/link/muxed/BUILD",
"diff": "@@ -19,6 +19,8 @@ go_test(\nsrcs = [\"injectable_test.go\"],\nlibrary = \":muxed\",\ndeps = [\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/buffer\",\n\"//pkg/tcpip/link/fdbased\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/muxed/injectable_test.go",
"new_path": "pkg/tcpip/link/muxed/injectable_test.go",
"diff": "@@ -21,6 +21,8 @@ import (\n\"testing\"\n\"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/link/fdbased\"\n@@ -50,6 +52,7 @@ func TestInjectableEndpointDispatch(t *testing.T) {\nReserveHeaderBytes: 1,\nData: buffer.NewViewFromBytes([]byte{0xFB}).ToVectorisedView(),\n})\n+ defer pkt.DecRef()\npkt.TransportHeader().Push(1)[0] = 0xFA\npkt.EgressRoute.RemoteAddress = dstIP\npkt.NetworkProtocolNumber = ipv4.ProtocolNumber\n@@ -77,6 +80,7 @@ func TestInjectableEndpointDispatchHdrOnly(t *testing.T) {\nReserveHeaderBytes: 1,\nData: buffer.NewView(0).ToVectorisedView(),\n})\n+ defer pkt.DecRef()\npkt.TransportHeader().Push(1)[0] = 0xFA\npkt.EgressRoute.RemoteAddress = dstIP\npkt.NetworkProtocolNumber = ipv4.ProtocolNumber\n@@ -108,3 +112,10 @@ func makeTestInjectableEndpoint(t *testing.T) (*InjectableEndpoint, *os.File, tc\nendpoint := NewInjectableEndpoint(routes)\nreturn endpoint, os.NewFile(uintptr(pair[0]), \"test route end\"), dstIP\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add leak checking to ethernet and muxed tests.
PiperOrigin-RevId: 422698888 |
259,885 | 19.01.2022 13:44:18 | 28,800 | 8e22ce50198bef6ce182cb1426b012400a348ef3 | Consistently order Pipe.mu before other file mutexes and MM.activeMu. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/pipe/pipe_util.go",
"new_path": "pkg/sentry/kernel/pipe/pipe_util.go",
"diff": "@@ -43,21 +43,26 @@ func (p *Pipe) Release(context.Context) {\n// Read reads from the Pipe into dst.\nfunc (p *Pipe) Read(ctx context.Context, dst usermem.IOSequence) (int64, error) {\n- n, err := dst.CopyOutFrom(ctx, p)\n+ n, err := p.read(dst.NumBytes(), func(srcs safemem.BlockSeq) (uint64, error) {\n+ var done uint64\n+ for !srcs.IsEmpty() {\n+ src := srcs.Head()\n+ n, err := dst.CopyOut(ctx, src.ToSlice())\n+ done += uint64(n)\n+ if err != nil {\n+ return done, err\n+ }\n+ dst = dst.DropFirst(n)\n+ srcs = srcs.Tail()\n+ }\n+ return done, nil\n+ }, true /* removeFromSrc */)\nif n > 0 {\np.queue.Notify(waiter.WritableEvents)\n}\nreturn n, err\n}\n-// ReadToBlocks implements safemem.Reader.ReadToBlocks for Pipe.Read.\n-func (p *Pipe) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) {\n- n, err := p.read(int64(dsts.NumBytes()), func(srcs safemem.BlockSeq) (uint64, error) {\n- return safemem.CopySeq(dsts, srcs)\n- }, true /* removeFromSrc */)\n- return uint64(n), err\n-}\n-\nfunc (p *Pipe) read(count int64, f func(srcs safemem.BlockSeq) (uint64, error), removeFromSrc bool) (int64, error) {\np.mu.Lock()\ndefer p.mu.Unlock()\n@@ -81,7 +86,20 @@ func (p *Pipe) WriteTo(ctx context.Context, w io.Writer, count int64, dup bool)\n// Write writes to the Pipe from src.\nfunc (p *Pipe) Write(ctx context.Context, src usermem.IOSequence) (int64, error) {\n- n, err := src.CopyInTo(ctx, p)\n+ n, err := p.write(src.NumBytes(), func(dsts safemem.BlockSeq) (uint64, error) {\n+ var done uint64\n+ for !dsts.IsEmpty() {\n+ dst := dsts.Head()\n+ n, err := src.CopyIn(ctx, dst.ToSlice())\n+ done += uint64(n)\n+ if err != nil {\n+ return done, err\n+ }\n+ src = src.DropFirst(n)\n+ dsts = dsts.Tail()\n+ }\n+ return done, nil\n+ })\nif n > 0 {\np.queue.Notify(waiter.ReadableEvents)\n}\n@@ -94,14 +112,6 @@ func (p *Pipe) Write(ctx context.Context, src usermem.IOSequence) (int64, error)\nreturn n, err\n}\n-// WriteFromBlocks implements safemem.Writer.WriteFromBlocks for Pipe.Write.\n-func (p *Pipe) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) {\n- n, err := p.write(int64(srcs.NumBytes()), func(dsts safemem.BlockSeq) (uint64, error) {\n- return safemem.CopySeq(dsts, srcs)\n- })\n- return uint64(n), err\n-}\n-\nfunc (p *Pipe) write(count int64, f func(safemem.BlockSeq) (uint64, error)) (int64, error) {\np.mu.Lock()\ndefer p.mu.Unlock()\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -2310,9 +2310,11 @@ cc_binary(\nlinkstatic = 1,\ndeps = [\n\"//test/util:file_descriptor\",\n+ \"@com_google_absl//absl/cleanup\",\n\"@com_google_absl//absl/strings\",\n\"@com_google_absl//absl/time\",\ngtest,\n+ \"//test/util:memory_util\",\n\"//test/util:signal_util\",\n\"//test/util:temp_path\",\n\"//test/util:test_main\",\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/splice.cc",
"new_path": "test/syscalls/linux/splice.cc",
"diff": "#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n+#include \"absl/cleanup/cleanup.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/time/clock.h\"\n#include \"absl/time/time.h\"\n#include \"test/util/file_descriptor.h\"\n+#include \"test/util/memory_util.h\"\n#include \"test/util/signal_util.h\"\n#include \"test/util/temp_path.h\"\n#include \"test/util/test_util.h\"\n@@ -830,6 +832,100 @@ TEST(SpliceTest, ToPipeWithSmallCapacityDoesNotSpin) {\nEXPECT_EQ(signaled, 1);\n}\n+// Regression test for b/208679047.\n+TEST(SpliceTest, FromPipeWithConcurrentIo) {\n+ // Create a file containing two copies of the same byte. Two bytes are\n+ // necessary because both the read() and splice() loops below advance the file\n+ // offset by one byte before lseek(); use of the file offset is required since\n+ // the mutex protecting the file offset is implicated in the circular lock\n+ // ordering that this test attempts to reproduce.\n+ //\n+ // This can't use memfd_create() because, in Linux, memfd_create(2) creates a\n+ // struct file using alloc_file_pseudo() without going through\n+ // do_dentry_open(), so FMODE_ATOMIC_POS is not set despite the created file\n+ // having type S_IFREG (\"regular file\").\n+ const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n+ const FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));\n+ constexpr char kSplicedByte = 0x01;\n+ for (int i = 0; i < 2; i++) {\n+ ASSERT_THAT(WriteFd(fd.get(), &kSplicedByte, 1),\n+ SyscallSucceedsWithValue(1));\n+ }\n+\n+ // Create a pipe.\n+ int pipe_fds[2];\n+ ASSERT_THAT(pipe(pipe_fds), SyscallSucceeds());\n+ const FileDescriptor rfd(pipe_fds[0]);\n+ FileDescriptor wfd(pipe_fds[1]);\n+\n+ DisableSave ds;\n+ std::atomic<bool> done(false);\n+\n+ // Create a thread that reads from fd until the end of the test.\n+ ScopedThread memfd_reader([&] {\n+ char file_buf;\n+ while (!done.load()) {\n+ ASSERT_THAT(lseek(fd.get(), 0, SEEK_SET), SyscallSucceeds());\n+ int n = ReadFd(fd.get(), &file_buf, 1);\n+ if (n == 0) {\n+ // fd was at offset 2 (EOF). In Linux, this is possible even after\n+ // lseek(0) because splice() doesn't attempt atomicity with respect to\n+ // concurrent lseek(), so the effect of lseek() may be lost.\n+ continue;\n+ }\n+ ASSERT_THAT(n, SyscallSucceedsWithValue(1));\n+ ASSERT_EQ(file_buf, kSplicedByte);\n+ }\n+ });\n+\n+ // Create a thread that reads from the pipe until the end of the test.\n+ ScopedThread pipe_reader([&] {\n+ char pipe_buf;\n+ while (!done.load()) {\n+ int n = ReadFd(rfd.get(), &pipe_buf, 1);\n+ if (n == 0) {\n+ // This should only happen due to cleanup_threads (below) closing wfd.\n+ EXPECT_TRUE(done.load());\n+ return;\n+ }\n+ ASSERT_THAT(n, SyscallSucceedsWithValue(1));\n+ ASSERT_EQ(pipe_buf, kSplicedByte);\n+ }\n+ });\n+\n+ // Create a thread that repeatedly invokes madvise(MADV_DONTNEED) on the same\n+ // page of memory. (Having a thread attempt to lock MM.activeMu for writing is\n+ // necessary to create a deadlock from the circular lock ordering, since\n+ // otherwise both uses of MM.activeMu are for reading and may proceed\n+ // concurrently.)\n+ ScopedThread mm_locker([&] {\n+ const Mapping m = ASSERT_NO_ERRNO_AND_VALUE(\n+ MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE));\n+ while (!done.load()) {\n+ madvise(m.ptr(), kPageSize, MADV_DONTNEED);\n+ }\n+ });\n+\n+ // This must come after the ScopedThreads since its destructor must run before\n+ // theirs.\n+ const absl::Cleanup cleanup_threads = [&] {\n+ done.store(true);\n+ // Ensure that pipe_reader is unblocked after setting done, so that it will\n+ // be able to observe done being true.\n+ wfd.reset();\n+ };\n+\n+ // Repeatedly splice from memfd to the pipe. The test passes if this does not\n+ // deadlock.\n+ const int kIterations = 5000;\n+ for (int i = 0; i < kIterations; i++) {\n+ ASSERT_THAT(lseek(fd.get(), 0, SEEK_SET), SyscallSucceeds());\n+ ASSERT_THAT(splice(fd.get(), nullptr, wfd.get(), nullptr, 1, 0),\n+ SyscallSucceedsWithValue(1));\n+ }\n+}\n+\n} // namespace\n} // namespace testing\n"
}
] | Go | Apache License 2.0 | google/gvisor | Consistently order Pipe.mu before other file mutexes and MM.activeMu.
PiperOrigin-RevId: 422894869 |
259,885 | 19.01.2022 15:59:28 | 28,800 | 04ddb203afd244ff67b51c12fbab3e9de9718066 | Add caller for the ExitNotifyParent checkpoint. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_exit.go",
"new_path": "pkg/sentry/kernel/task_exit.go",
"diff": "@@ -32,6 +32,7 @@ import (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n+ \"gvisor.dev/gvisor/pkg/sentry/seccheck\"\n\"gvisor.dev/gvisor/pkg/waiter\"\n)\n@@ -641,6 +642,10 @@ func (t *Task) exitNotifyLocked(fromPtraceDetach bool) {\n// should return ECHILD).\nt.parent.tg.eventQueue.Notify(EventExit | EventChildGroupStop | EventGroupContinue)\n}\n+ if seccheck.Global.Enabled(seccheck.PointExitNotifyParent) {\n+ mask, info := getExitNotifyParentSeccheckInfo(t)\n+ seccheck.Global.ExitNotifyParent(t, mask, &info)\n+ }\n}\n}\nif t.exitTracerAcked && t.exitParentAcked {\n@@ -694,6 +699,18 @@ func (t *Task) exitNotificationSignal(sig linux.Signal, receiver *Task) *linux.S\nreturn info\n}\n+// Preconditions: The TaskSet mutex must be locked.\n+func getExitNotifyParentSeccheckInfo(t *Task) (seccheck.ExitNotifyParentFieldSet, seccheck.ExitNotifyParentInfo) {\n+ req := seccheck.Global.ExitNotifyParentReq()\n+ info := seccheck.ExitNotifyParentInfo{\n+ ExitStatus: t.tg.exitStatus,\n+ }\n+ var mask seccheck.ExitNotifyParentFieldSet\n+ mask.Add(seccheck.ExitNotifyParentFieldExitStatus)\n+ t.loadSeccheckInfoLocked(req.Exiter, &mask.Exiter, &info.Exiter)\n+ return mask, info\n+}\n+\n// ExitStatus returns t's exit status, which is only guaranteed to be\n// meaningful if t.ExitState() != TaskExitNone.\nfunc (t *Task) ExitStatus() linux.WaitStatus {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add caller for the ExitNotifyParent checkpoint.
PiperOrigin-RevId: 422925140 |
259,907 | 19.01.2022 17:38:09 | 28,800 | 5fb52763235857427fef62ef227b26f1fad4de2c | Handle 0 sized writes to /dev/net/tun.
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/devices/tundev/BUILD",
"new_path": "pkg/sentry/devices/tundev/BUILD",
"diff": "@@ -20,5 +20,6 @@ go_library(\n\"//pkg/tcpip/link/tun\",\n\"//pkg/usermem\",\n\"//pkg/waiter\",\n+ \"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/devices/tundev/tundev.go",
"new_path": "pkg/sentry/devices/tundev/tundev.go",
"diff": "package tundev\nimport (\n+ \"golang.org/x/sys/unix\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n@@ -139,6 +140,9 @@ func (fd *tunFD) PWrite(ctx context.Context, src usermem.IOSequence, offset int6\n// Write implements vfs.FileDescriptionImpl.Write.\nfunc (fd *tunFD) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {\n+ if src.NumBytes() == 0 {\n+ return 0, unix.EINVAL\n+ }\ndata := make([]byte, src.NumBytes())\nif _, err := src.CopyIn(ctx, data); err != nil {\nreturn 0, err\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/dev/BUILD",
"new_path": "pkg/sentry/fs/dev/BUILD",
"diff": "@@ -37,5 +37,6 @@ go_library(\n\"//pkg/tcpip/link/tun\",\n\"//pkg/usermem\",\n\"//pkg/waiter\",\n+ \"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/dev/net_tun.go",
"new_path": "pkg/sentry/fs/dev/net_tun.go",
"diff": "package dev\nimport (\n+ \"golang.org/x/sys/unix\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n@@ -131,6 +132,9 @@ func (n *netTunFileOperations) Ioctl(ctx context.Context, file *fs.File, io user\n// Write implements fs.FileOperations.Write.\nfunc (n *netTunFileOperations) Write(ctx context.Context, file *fs.File, src usermem.IOSequence, offset int64) (int64, error) {\n+ if src.NumBytes() == 0 {\n+ return 0, unix.EINVAL\n+ }\ndata := make([]byte, src.NumBytes())\nif _, err := src.CopyIn(ctx, data); err != nil {\nreturn 0, err\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/tuntap.cc",
"new_path": "test/syscalls/linux/tuntap.cc",
"diff": "@@ -269,6 +269,17 @@ TEST_F(TuntapTest, InvalidReadWrite) {\nEXPECT_THAT(write(fd.get(), buf, sizeof(buf)), SyscallFailsWithErrno(EBADFD));\n}\n+TEST_F(TuntapTest, ZeroWrite) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));\n+\n+ FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(kDevNetTun, O_RDWR));\n+ struct ifreq ifr_set = {};\n+ ifr_set.ifr_flags = IFF_TUN | IFF_NO_PI;\n+ strncpy(ifr_set.ifr_name, kTunName, IFNAMSIZ);\n+ EXPECT_THAT(ioctl(fd.get(), TUNSETIFF, &ifr_set), SyscallSucceeds());\n+ EXPECT_THAT(write(fd.get(), nullptr, 0), SyscallFailsWithErrno(EINVAL));\n+}\n+\nTEST_F(TuntapTest, WriteToDownDevice) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_ADMIN)));\n"
}
] | Go | Apache License 2.0 | google/gvisor | Handle 0 sized writes to /dev/net/tun.
Reported-by: [email protected]
PiperOrigin-RevId: 422944054 |
259,907 | 20.01.2022 20:13:43 | 28,800 | 2a62f437960641b655f790cfb13ca14ca6a7478d | Avoid uint32 to int cast in lisafs unmarshalling code.
On 32 bit systems, 0xffffffff will be cast to -1 which can trip the bound
checks in the unmarshalling code. A compromised sentry (client) will be able
to panic the gofer server. | [
{
"change_type": "MODIFY",
"old_path": "pkg/lisafs/fd.go",
"new_path": "pkg/lisafs/fd.go",
"diff": "@@ -344,11 +344,11 @@ type ControlFDImpl interface {\nSymlink(c *Connection, name string, target string, uid UID, gid GID) (Inode, error)\nLink(c *Connection, dir ControlFDImpl, name string) (Inode, error)\nStatFS(c *Connection) (StatFS, error)\n- Readlink(c *Connection, getLinkBuf func(uint32) []byte) (uint32, error)\n+ Readlink(c *Connection, getLinkBuf func(uint32) []byte) (uint16, error)\nConnect(c *Connection, sockType uint32) (int, error)\nUnlink(c *Connection, name string, flags uint32) error\nRenameLocked(c *Connection, newDir ControlFDImpl, newName string) (func(ControlFDImpl), func(), error)\n- GetXattr(c *Connection, name string, dataBuf []byte) (uint32, error)\n+ GetXattr(c *Connection, name string, dataBuf []byte) (uint16, error)\nSetXattr(c *Connection, name string, value string, flags uint32) error\nListXattr(c *Connection, size uint64) (StringArray, error)\nRemoveXattr(c *Connection, name string) error\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/lisafs/handlers.go",
"new_path": "pkg/lisafs/handlers.go",
"diff": "@@ -251,7 +251,7 @@ func WalkHandler(c *Connection, comm Communicator, payloadLen uint32) (uint32, e\n// the slice allocation. The memory format should be WalkResp's.\nvar (\nstatus WalkStatus\n- numInodes primitive.Uint32\n+ numInodes primitive.Uint16\n)\nmaxPayloadSize := status.SizeBytes() + numInodes.SizeBytes() + (len(req.Path) * (*Inode)(nil).SizeBytes())\nif maxPayloadSize > math.MaxUint32 {\n@@ -308,7 +308,7 @@ func WalkStatHandler(c *Connection, comm Communicator, payloadLen uint32) (uint3\n// We will manually marshal the statx results into the payload buffer as they\n// are generated to avoid the slice allocation. The memory format should be\n// the same as WalkStatResp's.\n- var numStats primitive.Uint32\n+ var numStats primitive.Uint16\nmaxPayloadSize := numStats.SizeBytes() + (len(req.Path) * linux.SizeOfStatx)\nif maxPayloadSize > math.MaxUint32 {\n// Too much to walk, can't do.\n@@ -730,7 +730,7 @@ func ReadLinkAtHandler(c *Connection, comm Communicator, payloadLen uint32) (uin\n// We will manually marshal ReadLinkAtResp, which just contains a\n// SizedString. Let Readlinkat directly write into the payload buffer and\n// manually write the string size before it.\n- var linkLen primitive.Uint32\n+ var linkLen primitive.Uint16\nrespMetaSize := uint32(linkLen.SizeBytes())\nn, err := fd.impl.Readlink(c, func(dataLen uint32) []byte {\nreturn comm.PayloadBuf(dataLen + respMetaSize)[respMetaSize:]\n@@ -738,9 +738,9 @@ func ReadLinkAtHandler(c *Connection, comm Communicator, payloadLen uint32) (uin\nif err != nil {\nreturn 0, err\n}\n- linkLen = primitive.Uint32(n)\n+ linkLen = primitive.Uint16(n)\nlinkLen.MarshalUnsafe(comm.PayloadBuf(respMetaSize))\n- return respMetaSize + n, nil\n+ return respMetaSize + uint32(n), nil\n}\n// FlushHandler handles the Flush RPC.\n@@ -923,7 +923,7 @@ func Getdents64Handler(c *Connection, comm Communicator, payloadLen uint32) (uin\n// We will manually marshal the response Getdents64Resp.\n// numDirents is the number of dirents marshalled into the payload.\n- var numDirents primitive.Uint32\n+ var numDirents primitive.Uint16\n// The payload starts with numDirents, dirents go right after that.\n// payloadBufPos represents the position at which to write the next dirent.\npayloadBufPos := uint32(numDirents.SizeBytes())\n@@ -964,16 +964,16 @@ func FGetXattrHandler(c *Connection, comm Communicator, payloadLen uint32) (uint\n// Manually marshal FGetXattrResp to avoid allocations and copying.\n// FGetXattrResp simply is a wrapper around SizedString.\n- var valueLen primitive.Uint32\n+ var valueLen primitive.Uint16\nrespMetaSize := uint32(valueLen.SizeBytes())\npayloadBuf := comm.PayloadBuf(respMetaSize + uint32(req.BufSize))\nn, err := fd.impl.GetXattr(c, string(req.Name), payloadBuf[respMetaSize:])\nif err != nil {\nreturn 0, err\n}\n- valueLen = primitive.Uint32(n)\n+ valueLen = primitive.Uint16(n)\nvalueLen.MarshalBytes(payloadBuf)\n- return respMetaSize + n, nil\n+ return respMetaSize + uint32(n), nil\n}\n// FSetXattrHandler handles the FSetXattr RPC.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/lisafs/message.go",
"new_path": "pkg/lisafs/message.go",
"diff": "@@ -181,17 +181,17 @@ func NoopMarshal(b []byte) []byte { return b }\nfunc NoopUnmarshal(b []byte) ([]byte, bool) { return b, true }\n// SizedString represents a string in memory. The marshalled string bytes are\n-// preceded by a uint32 signifying the string length.\n+// preceded by a uint16 signifying the string length.\ntype SizedString string\n// SizeBytes implements marshal.Marshallable.SizeBytes.\nfunc (s *SizedString) SizeBytes() int {\n- return (*primitive.Uint32)(nil).SizeBytes() + len(*s)\n+ return (*primitive.Uint16)(nil).SizeBytes() + len(*s)\n}\n// MarshalBytes implements marshal.Marshallable.MarshalBytes.\nfunc (s *SizedString) MarshalBytes(dst []byte) []byte {\n- strLen := primitive.Uint32(len(*s))\n+ strLen := primitive.Uint16(len(*s))\ndst = strLen.MarshalUnsafe(dst)\n// Copy without any allocation.\nreturn dst[copy(dst[:strLen], *s):]\n@@ -199,7 +199,7 @@ func (s *SizedString) MarshalBytes(dst []byte) []byte {\n// CheckedUnmarshal implements marshal.CheckedMarshallable.CheckedUnmarshal.\nfunc (s *SizedString) CheckedUnmarshal(src []byte) ([]byte, bool) {\n- var strLen primitive.Uint32\n+ var strLen primitive.Uint16\nsrcRemain, ok := strLen.CheckedUnmarshal(src)\nif !ok || len(srcRemain) < int(strLen) {\nreturn src, false\n@@ -210,12 +210,12 @@ func (s *SizedString) CheckedUnmarshal(src []byte) ([]byte, bool) {\n}\n// StringArray represents an array of SizedStrings in memory. The marshalled\n-// array data is preceded by a uint32 signifying the array length.\n+// array data is preceded by a uint16 signifying the array length.\ntype StringArray []string\n// SizeBytes implements marshal.Marshallable.SizeBytes.\nfunc (s *StringArray) SizeBytes() int {\n- size := (*primitive.Uint32)(nil).SizeBytes()\n+ size := (*primitive.Uint16)(nil).SizeBytes()\nfor _, str := range *s {\nsstr := SizedString(str)\nsize += sstr.SizeBytes()\n@@ -225,7 +225,7 @@ func (s *StringArray) SizeBytes() int {\n// MarshalBytes implements marshal.Marshallable.MarshalBytes.\nfunc (s *StringArray) MarshalBytes(dst []byte) []byte {\n- arrLen := primitive.Uint32(len(*s))\n+ arrLen := primitive.Uint16(len(*s))\ndst = arrLen.MarshalUnsafe(dst)\nfor _, str := range *s {\nsstr := SizedString(str)\n@@ -236,7 +236,7 @@ func (s *StringArray) MarshalBytes(dst []byte) []byte {\n// CheckedUnmarshal implements marshal.CheckedMarshallable.CheckedUnmarshal.\nfunc (s *StringArray) CheckedUnmarshal(src []byte) ([]byte, bool) {\n- var arrLen primitive.Uint32\n+ var arrLen primitive.Uint16\nsrcRemain, ok := arrLen.CheckedUnmarshal(src)\nif !ok {\nreturn src, false\n@@ -248,7 +248,7 @@ func (s *StringArray) CheckedUnmarshal(src []byte) ([]byte, bool) {\n*s = (*s)[:arrLen]\n}\n- for i := primitive.Uint32(0); i < arrLen; i++ {\n+ for i := primitive.Uint16(0); i < arrLen; i++ {\nvar sstr SizedString\nsrcRemain, ok = sstr.CheckedUnmarshal(srcRemain)\nif !ok {\n@@ -436,7 +436,8 @@ const (\nWalkComponentSymlink\n)\n-// WalkResp is used to communicate the inodes walked by the server.\n+// WalkResp is used to communicate the inodes walked by the server. In memory,\n+// the inode array is preceded by a uint16 integer denoting array length.\ntype WalkResp struct {\nStatus WalkStatus\nInodes []Inode\n@@ -445,14 +446,14 @@ type WalkResp struct {\n// SizeBytes implements marshal.Marshallable.SizeBytes.\nfunc (w *WalkResp) SizeBytes() int {\nreturn w.Status.SizeBytes() +\n- (*primitive.Uint32)(nil).SizeBytes() + (len(w.Inodes) * (*Inode)(nil).SizeBytes())\n+ (*primitive.Uint16)(nil).SizeBytes() + (len(w.Inodes) * (*Inode)(nil).SizeBytes())\n}\n// MarshalBytes implements marshal.Marshallable.MarshalBytes.\nfunc (w *WalkResp) MarshalBytes(dst []byte) []byte {\ndst = w.Status.MarshalUnsafe(dst)\n- numInodes := primitive.Uint32(len(w.Inodes))\n+ numInodes := primitive.Uint16(len(w.Inodes))\ndst = numInodes.MarshalUnsafe(dst)\nreturn MarshalUnsafeInodeSlice(w.Inodes, dst)\n@@ -466,7 +467,7 @@ func (w *WalkResp) CheckedUnmarshal(src []byte) ([]byte, bool) {\n}\nsrcRemain := w.Status.UnmarshalUnsafe(src)\n- var numInodes primitive.Uint32\n+ var numInodes primitive.Uint16\nsrcRemain = numInodes.UnmarshalUnsafe(srcRemain)\nif int(numInodes)*(*Inode)(nil).SizeBytes() > len(srcRemain) {\nreturn src, false\n@@ -479,19 +480,20 @@ func (w *WalkResp) CheckedUnmarshal(src []byte) ([]byte, bool) {\nreturn UnmarshalUnsafeInodeSlice(w.Inodes, srcRemain), true\n}\n-// WalkStatResp is used to communicate stat results for WalkStat.\n+// WalkStatResp is used to communicate stat results for WalkStat. In memory,\n+// the array data is preceded by a uint16 denoting the array length.\ntype WalkStatResp struct {\nStats []linux.Statx\n}\n// SizeBytes implements marshal.Marshallable.SizeBytes.\nfunc (w *WalkStatResp) SizeBytes() int {\n- return (*primitive.Uint32)(nil).SizeBytes() + (len(w.Stats) * linux.SizeOfStatx)\n+ return (*primitive.Uint16)(nil).SizeBytes() + (len(w.Stats) * linux.SizeOfStatx)\n}\n// MarshalBytes implements marshal.Marshallable.MarshalBytes.\nfunc (w *WalkStatResp) MarshalBytes(dst []byte) []byte {\n- numStats := primitive.Uint32(len(w.Stats))\n+ numStats := primitive.Uint16(len(w.Stats))\ndst = numStats.MarshalUnsafe(dst)\nreturn linux.MarshalUnsafeStatxSlice(w.Stats, dst)\n@@ -503,7 +505,7 @@ func (w *WalkStatResp) CheckedUnmarshal(src []byte) ([]byte, bool) {\nif w.SizeBytes() > len(src) {\nreturn src, false\n}\n- var numStats primitive.Uint32\n+ var numStats primitive.Uint16\nsrcRemain := numStats.UnmarshalUnsafe(src)\nif int(numStats)*linux.SizeOfStatx > len(srcRemain) {\n@@ -585,17 +587,17 @@ type OpenCreateAtResp struct {\n// FdArray is a utility struct which implements a marshallable type for\n// communicating an array of FDIDs. In memory, the array data is preceded by a\n-// uint32 denoting the array length.\n+// uint16 denoting the array length.\ntype FdArray []FDID\n// SizeBytes implements marshal.Marshallable.SizeBytes.\nfunc (f *FdArray) SizeBytes() int {\n- return (*primitive.Uint32)(nil).SizeBytes() + (len(*f) * (*FDID)(nil).SizeBytes())\n+ return (*primitive.Uint16)(nil).SizeBytes() + (len(*f) * (*FDID)(nil).SizeBytes())\n}\n// MarshalBytes implements marshal.Marshallable.MarshalBytes.\nfunc (f *FdArray) MarshalBytes(dst []byte) []byte {\n- arrLen := primitive.Uint32(len(*f))\n+ arrLen := primitive.Uint16(len(*f))\ndst = arrLen.MarshalUnsafe(dst)\nreturn MarshalUnsafeFDIDSlice(*f, dst)\n}\n@@ -606,7 +608,7 @@ func (f *FdArray) CheckedUnmarshal(src []byte) ([]byte, bool) {\nif f.SizeBytes() > len(src) {\nreturn src, false\n}\n- var arrLen primitive.Uint32\n+ var arrLen primitive.Uint16\nsrcRemain := arrLen.UnmarshalUnsafe(src)\nif int(arrLen)*(*FDID)(nil).SizeBytes() > len(srcRemain) {\nreturn src, false\n@@ -688,7 +690,7 @@ func (r *PReadResp) MarshalBytes(dst []byte) []byte {\n// CheckedUnmarshal implements marshal.CheckedMarshallable.CheckedUnmarshal.\nfunc (r *PReadResp) CheckedUnmarshal(src []byte) ([]byte, bool) {\nsrcRemain, ok := r.NumBytes.CheckedUnmarshal(src)\n- if !ok || int(r.NumBytes) > len(srcRemain) || int(r.NumBytes) > len(r.Buf) {\n+ if !ok || uint32(r.NumBytes) > uint32(len(srcRemain)) || uint32(r.NumBytes) > uint32(len(r.Buf)) {\nreturn src, false\n}\n@@ -730,7 +732,7 @@ func (w *PWriteReq) CheckedUnmarshal(src []byte) ([]byte, bool) {\n// This is an optimization. Assuming that the server is making this call, it\n// is safe to just point to src rather than allocating and copying.\n- if int(w.NumBytes) > len(srcRemain) {\n+ if uint32(w.NumBytes) > uint32(len(srcRemain)) {\nreturn src, false\n}\nw.Buf = srcRemain[:w.NumBytes]\n@@ -1112,14 +1114,15 @@ func (d *Dirent64) CheckedUnmarshal(src []byte) ([]byte, bool) {\nreturn src, false\n}\n-// Getdents64Resp is used to communicate getdents64 results.\n+// Getdents64Resp is used to communicate getdents64 results. In memory, the\n+// dirents array is preceded by a uint16 integer denoting array length.\ntype Getdents64Resp struct {\nDirents []Dirent64\n}\n// SizeBytes implements marshal.Marshallable.SizeBytes.\nfunc (g *Getdents64Resp) SizeBytes() int {\n- ret := (*primitive.Uint32)(nil).SizeBytes()\n+ ret := (*primitive.Uint16)(nil).SizeBytes()\nfor i := range g.Dirents {\nret += g.Dirents[i].SizeBytes()\n}\n@@ -1128,7 +1131,7 @@ func (g *Getdents64Resp) SizeBytes() int {\n// MarshalBytes implements marshal.Marshallable.MarshalBytes.\nfunc (g *Getdents64Resp) MarshalBytes(dst []byte) []byte {\n- numDirents := primitive.Uint32(len(g.Dirents))\n+ numDirents := primitive.Uint16(len(g.Dirents))\ndst = numDirents.MarshalUnsafe(dst)\nfor i := range g.Dirents {\ndst = g.Dirents[i].MarshalBytes(dst)\n@@ -1142,7 +1145,7 @@ func (g *Getdents64Resp) CheckedUnmarshal(src []byte) ([]byte, bool) {\nif g.SizeBytes() > len(src) {\nreturn src, false\n}\n- var numDirents primitive.Uint32\n+ var numDirents primitive.Uint16\nsrcRemain := numDirents.UnmarshalUnsafe(src)\nif cap(g.Dirents) < int(numDirents) {\ng.Dirents = make([]Dirent64, numDirents)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/lisafs.go",
"new_path": "runsc/fsgofer/lisafs.go",
"diff": "@@ -16,6 +16,7 @@ package fsgofer\nimport (\n\"io\"\n+ \"math\"\n\"path\"\n\"strconv\"\n\"sync/atomic\"\n@@ -622,17 +623,16 @@ func (fd *controlFDLisa) StatFS(c *lisafs.Connection) (lisafs.StatFS, error) {\n}\n// Readlink implements lisafs.ControlFDImpl.Readlink.\n-func (fd *controlFDLisa) Readlink(c *lisafs.Connection, getLinkBuf func(uint32) []byte) (uint32, error) {\n+func (fd *controlFDLisa) Readlink(c *lisafs.Connection, getLinkBuf func(uint32) []byte) (uint16, error) {\n// This is similar to what os.Readlink does.\n- const limit = uint32(1024 * 1024)\n- for linkLen := uint32(128); linkLen < limit; linkLen *= 2 {\n- b := getLinkBuf(linkLen)\n+ for linkLen := 128; linkLen < math.MaxUint16; linkLen *= 2 {\n+ b := getLinkBuf(uint32(linkLen))\nn, err := unix.Readlinkat(fd.hostFD, \"\", b)\nif err != nil {\nreturn 0, err\n}\nif n < int(linkLen) {\n- return uint32(n), nil\n+ return uint16(n), nil\n}\n}\nreturn 0, unix.ENOMEM\n@@ -693,7 +693,7 @@ func (fd *controlFDLisa) RenameLocked(c *lisafs.Connection, newDir lisafs.Contro\n}\n// GetXattr implements lisafs.ControlFDImpl.GetXattr.\n-func (fd *controlFDLisa) GetXattr(c *lisafs.Connection, name string, dataBuf []byte) (uint32, error) {\n+func (fd *controlFDLisa) GetXattr(c *lisafs.Connection, name string, dataBuf []byte) (uint16, error) {\nif !c.ServerImpl().(*LisafsServer).config.EnableVerityXattr {\nreturn 0, unix.EOPNOTSUPP\n}\n@@ -701,7 +701,7 @@ func (fd *controlFDLisa) GetXattr(c *lisafs.Connection, name string, dataBuf []b\nreturn 0, unix.EOPNOTSUPP\n}\nn, err := unix.Fgetxattr(fd.hostFD, name, dataBuf)\n- return uint32(n), err\n+ return uint16(n), err\n}\n// SetXattr implements lisafs.ControlFDImpl.SetXattr.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Avoid uint32 to int cast in lisafs unmarshalling code.
On 32 bit systems, 0xffffffff will be cast to -1 which can trip the bound
checks in the unmarshalling code. A compromised sentry (client) will be able
to panic the gofer server.
PiperOrigin-RevId: 423221118 |
259,903 | 23.01.2022 13:05:38 | 18,000 | e66d177645c07372fa76d010464a95f0191efa0b | Allocate auxv with better capacity while loading stack | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/arch/stack.go",
"new_path": "pkg/sentry/arch/stack.go",
"diff": "@@ -190,7 +190,7 @@ func (s *Stack) Load(args []string, env []string, aux Auxv) (StackLayout, error)\n// NOTE: We need an extra zero here per spec.\n// The Push function will automatically terminate\n// strings and arrays with a single null value.\n- auxv := make([]hostarch.Addr, 0, len(aux))\n+ auxv := make([]hostarch.Addr, 0, len(aux)*2+1)\nfor _, a := range aux {\nauxv = append(auxv, hostarch.Addr(a.Key), a.Value)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Allocate auxv with better capacity while loading stack |
259,875 | 24.01.2022 12:44:00 | 28,800 | 2b2f9ea914bd52b7155f5643d7975f38b3331b58 | Use network protocol default ttl instead of a hardcoded one. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netstack/netstack.go",
"new_path": "pkg/sentry/socket/netstack/netstack.go",
"diff": "@@ -1330,6 +1330,15 @@ func getSockOptICMPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam\nreturn nil, syserr.ErrProtocolNotAvailable\n}\n+func defaultTTL(t *kernel.Task, network tcpip.NetworkProtocolNumber) (primitive.Int32, tcpip.Error) {\n+ var opt tcpip.DefaultTTLOption\n+ stack := inet.StackFromContext(t)\n+ if err := stack.(*Stack).Stack.NetworkProtocolOption(network, &opt); err != nil {\n+ return 0, err\n+ }\n+ return primitive.Int32(opt), nil\n+}\n+\n// getSockOptIPv6 implements GetSockOpt when level is SOL_IPV6.\nfunc getSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {\nif _, ok := ep.(tcpip.Endpoint); !ok {\n@@ -1377,9 +1386,10 @@ func getSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name\n// Fill in the default value, if needed.\nvP := primitive.Int32(v)\nif vP == -1 {\n- // TODO(https://github.com/google/gvisor/issues/6973): Retrieve the\n- // configured DefaultTTLOption of the IPv6 protocol.\n- vP = DefaultTTL\n+ vP, err = defaultTTL(t, header.IPv6ProtocolNumber)\n+ if err != nil {\n+ return nil, syserr.TranslateNetstackError(err)\n+ }\n}\nreturn &vP, nil\n@@ -1540,9 +1550,10 @@ func getSockOptIP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name in\n// Fill in the default value, if needed.\nvP := primitive.Int32(v)\nif vP == 0 {\n- // TODO(https://github.com/google/gvisor/issues/6973): Retrieve the\n- // configured DefaultTTLOption of the IPv4 protocol.\n- vP = DefaultTTL\n+ vP, err = defaultTTL(t, header.IPv4ProtocolNumber)\n+ if err != nil {\n+ return nil, syserr.TranslateNetstackError(err)\n+ }\n}\nreturn &vP, nil\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_ip_unbound.cc",
"new_path": "test/syscalls/linux/socket_ip_unbound.cc",
"diff": "@@ -96,6 +96,7 @@ TEST_P(IPUnboundSocketTest, ResetTtlToDefault) {\nEXPECT_THAT(getsockopt(socket->get(), IPPROTO_IP, IP_TTL, &get2, &get2_sz),\nSyscallSucceedsWithValue(0));\nEXPECT_EQ(get2_sz, sizeof(get2));\n+ EXPECT_TRUE(get2 == 64 || get2 == 127);\nEXPECT_EQ(get2, get1);\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_ipv6_unbound.cc",
"new_path": "test/syscalls/linux/socket_ipv6_unbound.cc",
"diff": "@@ -29,6 +29,7 @@ namespace testing {\nnamespace {\nconstexpr int kDefaultHopLimit = 64;\n+constexpr int kDefaultTtl = 64;\nusing ::testing::ValuesIn;\nusing IPv6UnboundSocketTest = SimpleSocketTest;\n@@ -37,13 +38,18 @@ TEST_P(IPv6UnboundSocketTest, HopLimitDefault) {\nstd::unique_ptr<FileDescriptor> socket =\nASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n+ const int set = -1;\n+ ASSERT_THAT(setsockopt(socket->get(), IPPROTO_IPV6, IPV6_UNICAST_HOPS, &set,\n+ sizeof(set)),\n+ SyscallSucceedsWithValue(0));\n+\nint get = -1;\nsocklen_t get_sz = sizeof(get);\nASSERT_THAT(\ngetsockopt(socket->get(), IPPROTO_IPV6, IPV6_UNICAST_HOPS, &get, &get_sz),\nSyscallSucceedsWithValue(0));\nASSERT_EQ(get_sz, sizeof(get));\n- EXPECT_EQ(get, kDefaultHopLimit);\n+ EXPECT_EQ(get, kDefaultTtl);\n}\nTEST_P(IPv6UnboundSocketTest, SetHopLimit) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Use network protocol default ttl instead of a hardcoded one.
PiperOrigin-RevId: 423886874 |
259,909 | 24.01.2022 13:47:32 | 28,800 | e7091facd206e23cfe243c46c49906daf68c97b0 | Add leak checking to fdbased_test. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/fdbased/BUILD",
"new_path": "pkg/tcpip/link/fdbased/BUILD",
"diff": "@@ -30,6 +30,8 @@ go_test(\nsrcs = [\"endpoint_test.go\"],\nlibrary = \":fdbased\",\ndeps = [\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/buffer\",\n\"//pkg/tcpip/header\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/fdbased/endpoint_test.go",
"new_path": "pkg/tcpip/link/fdbased/endpoint_test.go",
"diff": "@@ -21,6 +21,7 @@ import (\n\"bytes\"\n\"fmt\"\n\"math/rand\"\n+ \"os\"\n\"reflect\"\n\"testing\"\n\"time\"\n@@ -28,6 +29,8 @@ import (\n\"github.com/google/go-cmp/cmp\"\n\"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n@@ -203,6 +206,7 @@ func testWritePacket(t *testing.T, plen int, eth bool, gsoMaxSize uint32, hash u\n// See nic.writePacket.\npkt.EgressRoute = r\npkt.NetworkProtocolNumber = proto\n+ defer pkt.DecRef()\n// Build header.\nb := pkt.NetworkHeader().Push(netHdrLen)\n@@ -339,6 +343,7 @@ func TestPreserveSrcAddress(t *testing.T) {\nReserveHeaderBytes: header.EthernetMinimumSize,\nData: buffer.VectorisedView{},\n})\n+ defer pkt.DecRef()\n// Every PacketBuffer must have these set:\n// See nic.writePacket.\npkt.NetworkProtocolNumber = proto\n@@ -385,6 +390,7 @@ func TestDeliverPacket(t *testing.T) {\nReserveHeaderBytes: header.EthernetMinimumSize,\nData: buffer.NewViewFromBytes(all).ToVectorisedView(),\n})\n+ defer wantPkt.DecRef()\nif eth {\nhdr := header.Ethernet(wantPkt.LinkHeader().Push(header.EthernetMinimumSize))\nhdr.Encode(&header.EthernetFields{\n@@ -634,3 +640,10 @@ func TestDispatchPacketFormat(t *testing.T) {\n})\n}\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add leak checking to fdbased_test.
PiperOrigin-RevId: 423901655 |
259,853 | 24.01.2022 17:15:29 | 28,800 | 8b98da43f265d707acbfe1f9591ffc455e0fb45e | cpuid: deflake cpuid_test
xsavec, xgetbv1 are in Sub-leaf (EAX = 0DH, ECX = 1). | [
{
"change_type": "MODIFY",
"old_path": "pkg/cpuid/features_amd64.go",
"new_path": "pkg/cpuid/features_amd64.go",
"diff": "@@ -81,13 +81,14 @@ func (f *Feature) set(s Static, on bool) {\nout := s.Query(In{Eax: uint32(featureInfo)})\nout.Ecx |= (1 << 26)\ns[In{Eax: uint32(featureInfo)}] = out\n- out = s.Query(In{Eax: uint32(xSaveInfo)})\n+\n+ out = s.Query(In{Eax: xSaveInfoSub.eax(), Ecx: xSaveInfoSub.ecx()})\nif on {\nout.Eax |= f.bit()\n} else {\nout.Eax &^= f.bit()\n}\n- s[In{Eax: uint32(xSaveInfo)}] = out\n+ s[In{Eax: xSaveInfoSub.eax(), Ecx: xSaveInfoSub.ecx()}] = out\ncase 5, 6:\n// Need to enable extended features.\nout := s.Query(In{Eax: uint32(extendedFunctionInfo)})\n@@ -136,7 +137,7 @@ func (f *Feature) check(fs FeatureSet) bool {\nif (cx & (1 << 26)) == 0 {\nreturn false\n}\n- ax, _, _, _ := fs.query(xSaveInfo)\n+ ax, _, _, _ := fs.query(xSaveInfoSub)\nreturn (ax & f.bit()) != 0\ncase 5, 6:\n// eax=0x80000000 gets supported extended levels. We use this\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/cpuid/native_amd64.go",
"new_path": "pkg/cpuid/native_amd64.go",
"diff": "@@ -25,8 +25,16 @@ import (\n\"gvisor.dev/gvisor/pkg/log\"\n)\n-// cpuididFunction is a useful type wrapper.\n-type cpuidFunction uint32\n+// cpuididFunction is a useful type wrapper. The format is eax | (ecx << 32).\n+type cpuidFunction uint64\n+\n+func (f cpuidFunction) eax() uint32 {\n+ return uint32(f)\n+}\n+\n+func (f cpuidFunction) ecx() uint32 {\n+ return uint32(f >> 32)\n+}\n// The constants below are the lower or \"standard\" cpuid functions, ordered as\n// defined by the hardware. Note that these may not be included in the standard\n@@ -47,8 +55,11 @@ const (\nintelX2APICInfo cpuidFunction = 0xb // Returns core/logical processor topology. Intel only.\n_ // Function 0xc is reserved.\nxSaveInfo cpuidFunction = 0xd // Returns information about extended state management.\n+ xSaveInfoSub cpuidFunction = 0xd | (0x1 << 32) // Returns information about extended state management (Sub-leaf).\n)\n+const xSaveInfoNumLeaves = 64 // Maximum number of xSaveInfo leaves.\n+\n// The \"extended\" functions.\nconst (\nextendedStart cpuidFunction = 0x80000000\n@@ -135,7 +146,7 @@ func (*Native) Query(in In) Out {\n//\n//go:nosplit\nfunc (fs FeatureSet) query(fn cpuidFunction) (uint32, uint32, uint32, uint32) {\n- out := fs.Query(In{Eax: uint32(fn)})\n+ out := fs.Query(In{Eax: fn.eax(), Ecx: fn.ecx()})\nreturn out.Eax, out.Ebx, out.Ecx, out.Edx\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/cpuid/static_amd64.go",
"new_path": "pkg/cpuid/static_amd64.go",
"diff": "@@ -56,6 +56,12 @@ func (fs FeatureSet) ToStatic() Static {\nfeature.set(s, fs.HasFeature(feature))\n}\n+ // Processor Extended State Enumeration.\n+ for i := uint32(0); i < xSaveInfoNumLeaves; i++ {\n+ in := In{Eax: uint32(xSaveInfo), Ecx: i}\n+ s[in] = fs.Query(in)\n+ }\n+\n// Save all cache information.\nout := fs.Query(In{Eax: uint32(featureInfo)})\nfor i := uint32(0); i < out.Ecx; i++ {\n"
}
] | Go | Apache License 2.0 | google/gvisor | cpuid: deflake cpuid_test
xsavec, xgetbv1 are in Sub-leaf (EAX = 0DH, ECX = 1).
PiperOrigin-RevId: 423948954 |
259,853 | 24.01.2022 17:38:42 | 28,800 | 4cfb21a86a7d81669235326fd2e2fb854c0f406c | Fix deadlock caused by calling absl::Now() after fork
absl::Now() may acquires a lock. The problem happens if fork()
is called when another thread holds the lock. In this case,
absl::Now() is stuck in a child process. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/concurrency.cc",
"new_path": "test/syscalls/linux/concurrency.cc",
"diff": "// limitations under the License.\n#include <signal.h>\n+#include <time.h>\n#include <atomic>\n@@ -78,9 +79,12 @@ TEST(ConcurrencyTest, MultiProcessMultithreaded) {\npid_t child_pid = fork();\nif (child_pid == 0) {\n+ struct timespec now;\n+ TEST_CHECK(clock_gettime(CLOCK_MONOTONIC, &now) == 0);\n// Busy wait without making any blocking syscalls.\n- auto end = absl::Now() + absl::Seconds(5);\n- while (absl::Now() < end) {\n+ auto end = now.tv_sec + 5;\n+ while (now.tv_sec < end) {\n+ TEST_CHECK(clock_gettime(CLOCK_MONOTONIC, &now) == 0);\n}\n_exit(0);\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/sigstop.cc",
"new_path": "test/syscalls/linux/sigstop.cc",
"diff": "#include <signal.h>\n#include <stdlib.h>\n#include <sys/select.h>\n+#include <time.h>\n#include \"gtest/gtest.h\"\n#include \"absl/flags/flag.h\"\n@@ -126,21 +127,22 @@ void SleepIgnoreStopped(absl::Duration d) {\nTEST(SigstopTest, RestartSyscall) {\npid_t pid;\nconstexpr absl::Duration kStopDelay = absl::Seconds(5);\n- constexpr absl::Duration kSleepDelay = absl::Seconds(15);\nconstexpr absl::Duration kStartupDelay = absl::Seconds(5);\n- constexpr absl::Duration kErrorDelay = absl::Seconds(3);\n+ constexpr uint64_t kSleepDelay = 15;\n+ constexpr uint64_t kErrorDelay = 3;\nconst DisableSave ds; // Timing-related.\npid = fork();\nif (pid == 0) {\n- struct timespec ts = {.tv_sec = kSleepDelay / absl::Seconds(1)};\n- auto start = absl::Now();\n+ struct timespec ts = {.tv_sec = kSleepDelay};\n+ struct timespec start, finish;\n+ TEST_CHECK(clock_gettime(CLOCK_MONOTONIC, &start) == 0);\nTEST_CHECK(nanosleep(&ts, nullptr) == 0);\n- auto finish = absl::Now();\n+ TEST_CHECK(clock_gettime(CLOCK_MONOTONIC, &finish) == 0);\n// Check that time spent stopped is counted as time spent sleeping.\n- TEST_CHECK(finish - start >= kSleepDelay);\n- TEST_CHECK(finish - start < kSleepDelay + kErrorDelay);\n+ TEST_CHECK(finish.tv_sec - start.tv_sec >= kSleepDelay);\n+ TEST_CHECK(finish.tv_sec - start.tv_sec < kSleepDelay + kErrorDelay);\n_exit(kChildMainThreadExitCode);\n}\nASSERT_THAT(pid, SyscallSucceeds());\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix deadlock caused by calling absl::Now() after fork
absl::Now() may acquires a lock. The problem happens if fork()
is called when another thread holds the lock. In this case,
absl::Now() is stuck in a child process.
PiperOrigin-RevId: 423952981 |
259,885 | 24.01.2022 18:09:17 | 28,800 | d894748026b128ea09593729676dddd25eef2cca | Add missing fields to seccheck.ExecveInfo. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/seccheck/execve.go",
"new_path": "pkg/sentry/seccheck/execve.go",
"diff": "@@ -42,6 +42,12 @@ type ExecveInfo struct {\n// BinaryMode is the executable binary file's mode.\nBinaryMode uint16\n+ // BinaryUID is the executable binary file's owner.\n+ BinaryUID auth.KUID\n+\n+ // BinaryGID is the executable binary file's group.\n+ BinaryGID auth.KGID\n+\n// BinarySHA256 is the SHA-256 hash of the executable binary file.\n//\n// Note that this requires reading the entire file into memory, which is\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add missing fields to seccheck.ExecveInfo.
PiperOrigin-RevId: 423958103 |
259,858 | 24.01.2022 18:16:30 | 28,800 | cbc2aa94820f1b9e3a00bbda0ddb35e6c0ce08b4 | Add checklocks annotations for ipv4 types. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/icmp.go",
"new_path": "pkg/tcpip/network/ipv4/icmp.go",
"diff": "@@ -538,7 +538,7 @@ func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer, deliv\n// which it arrived, which isn't necessarily the same as the NIC on which it\n// will be transmitted. On the other hand, the route's NIC *is* guaranteed\n// to be the NIC on which the packet will be transmitted.\n- netEP, ok := p.mu.eps[route.NICID()]\n+ netEP, ok := p.eps[route.NICID()]\np.mu.Unlock()\nif !ok {\nreturn &tcpip.ErrNotConnected{}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/igmp.go",
"new_path": "pkg/tcpip/network/ipv4/igmp.go",
"diff": "@@ -102,7 +102,7 @@ func (igmp *igmpState) Enabled() bool {\n// SendReport implements ip.MulticastGroupProtocol.\n//\n-// Precondition: igmp.ep.mu must be read locked.\n+// +checklocksread:igmp.ep.mu\nfunc (igmp *igmpState) SendReport(groupAddress tcpip.Address) (bool, tcpip.Error) {\nigmpType := header.IGMPv2MembershipReport\nif igmp.v1Present() {\n@@ -113,7 +113,7 @@ func (igmp *igmpState) SendReport(groupAddress tcpip.Address) (bool, tcpip.Error\n// SendLeave implements ip.MulticastGroupProtocol.\n//\n-// Precondition: igmp.ep.mu must be read locked.\n+// +checklocksread:igmp.ep.mu\nfunc (igmp *igmpState) SendLeave(groupAddress tcpip.Address) tcpip.Error {\n// As per RFC 2236 Section 6, Page 8: \"If the interface state says the\n// Querier is running IGMPv1, this action SHOULD be skipped. If the flag\n@@ -143,7 +143,7 @@ func (igmp *igmpState) ShouldPerformProtocol(groupAddress tcpip.Address) bool {\n// Must only be called once for the lifetime of igmp.\nfunc (igmp *igmpState) init(ep *endpoint) {\nigmp.ep = ep\n- igmp.genericMulticastProtocol.Init(&ep.mu.RWMutex, ip.GenericMulticastProtocolOptions{\n+ igmp.genericMulticastProtocol.Init(&ep.mu, ip.GenericMulticastProtocolOptions{\nRand: ep.protocol.stack.Rand(),\nClock: ep.protocol.stack.Clock(),\nProtocol: igmp,\n@@ -155,7 +155,7 @@ func (igmp *igmpState) init(ep *endpoint) {\n})\n}\n-// Precondition: igmp.ep.mu must be locked.\n+// +checklocks:igmp.ep.mu\nfunc (igmp *igmpState) isSourceIPValidLocked(src tcpip.Address, messageType header.IGMPType) bool {\nif messageType == header.IGMPMembershipQuery {\n// RFC 2236 does not require the IGMP implementation to check the source IP\n@@ -175,7 +175,7 @@ func (igmp *igmpState) isSourceIPValidLocked(src tcpip.Address, messageType head\n//\n// Note: this rule applies to both V1 and V2 Membership Reports.\nvar isSourceIPValid bool\n- igmp.ep.mu.addressableEndpointState.ForEachPrimaryEndpoint(func(addressEndpoint stack.AddressEndpoint) bool {\n+ igmp.ep.addressableEndpointState.ForEachPrimaryEndpoint(func(addressEndpoint stack.AddressEndpoint) bool {\nif subnet := addressEndpoint.Subnet(); subnet.Contains(src) {\nisSourceIPValid = true\nreturn false\n@@ -186,7 +186,7 @@ func (igmp *igmpState) isSourceIPValidLocked(src tcpip.Address, messageType head\nreturn isSourceIPValid\n}\n-// Precondition: igmp.ep.mu must be locked.\n+// +checklocks:igmp.ep.mu\nfunc (igmp *igmpState) isPacketValidLocked(pkt *stack.PacketBuffer, messageType header.IGMPType, hasRouterAlertOption bool) bool {\n// We can safely assume that the IP header is valid if we got this far.\niph := header.IPv4(pkt.NetworkHeader().View())\n@@ -204,7 +204,7 @@ func (igmp *igmpState) isPacketValidLocked(pkt *stack.PacketBuffer, messageType\n// handleIGMP handles an IGMP packet.\n//\n-// Precondition: igmp.ep.mu must be locked.\n+// +checklocks:igmp.ep.mu\nfunc (igmp *igmpState) handleIGMP(pkt *stack.PacketBuffer, hasRouterAlertOption bool) {\nreceived := igmp.ep.stats.igmp.packetsReceived\nheaderView, ok := pkt.Data().PullUp(header.IGMPMinimumSize)\n@@ -287,7 +287,7 @@ func (igmp *igmpState) resetV1Present() {\n// handleMembershipQuery handles a membership query.\n//\n-// Precondition: igmp.ep.mu must be locked.\n+// +checklocks:igmp.ep.mu\nfunc (igmp *igmpState) handleMembershipQuery(groupAddress tcpip.Address, maxRespTime time.Duration) {\n// As per RFC 2236 Section 6, Page 10: If the maximum response time is zero\n// then change the state to note that an IGMPv1 router is present and\n@@ -304,14 +304,14 @@ func (igmp *igmpState) handleMembershipQuery(groupAddress tcpip.Address, maxResp\n// handleMembershipReport handles a membership report.\n//\n-// Precondition: igmp.ep.mu must be locked.\n+// +checklocks:igmp.ep.mu\nfunc (igmp *igmpState) handleMembershipReport(groupAddress tcpip.Address) {\nigmp.genericMulticastProtocol.HandleReportLocked(groupAddress)\n}\n// writePacket assembles and sends an IGMP packet.\n//\n-// Precondition: igmp.ep.mu must be read locked.\n+// +checklocksread:igmp.ep.mu\nfunc (igmp *igmpState) writePacket(destAddress tcpip.Address, groupAddress tcpip.Address, igmpType header.IGMPType) (bool, tcpip.Error) {\nigmpData := header.IGMP(buffer.NewView(header.IGMPReportMinimumSize))\nigmpData.SetType(igmpType)\n@@ -366,14 +366,14 @@ func (igmp *igmpState) writePacket(destAddress tcpip.Address, groupAddress tcpip\n// If the group already exists in the membership map, returns\n// *tcpip.ErrDuplicateAddress.\n//\n-// Precondition: igmp.ep.mu must be locked.\n+// +checklocks:igmp.ep.mu\nfunc (igmp *igmpState) joinGroup(groupAddress tcpip.Address) {\nigmp.genericMulticastProtocol.JoinGroupLocked(groupAddress)\n}\n// isInGroup returns true if the specified group has been joined locally.\n//\n-// Precondition: igmp.ep.mu must be read locked.\n+// +checklocksread:igmp.ep.mu\nfunc (igmp *igmpState) isInGroup(groupAddress tcpip.Address) bool {\nreturn igmp.genericMulticastProtocol.IsLocallyJoinedRLocked(groupAddress)\n}\n@@ -382,7 +382,7 @@ func (igmp *igmpState) isInGroup(groupAddress tcpip.Address) bool {\n// delay timers associated with that group, and sends the Leave Group message\n// if required.\n//\n-// Precondition: igmp.ep.mu must be locked.\n+// +checklocks:igmp.ep.mu\nfunc (igmp *igmpState) leaveGroup(groupAddress tcpip.Address) tcpip.Error {\n// LeaveGroup returns false only if the group was not joined.\nif igmp.genericMulticastProtocol.LeaveGroupLocked(groupAddress) {\n@@ -395,7 +395,7 @@ func (igmp *igmpState) leaveGroup(groupAddress tcpip.Address) tcpip.Error {\n// softLeaveAll leaves all groups from the perspective of IGMP, but remains\n// joined locally.\n//\n-// Precondition: igmp.ep.mu must be locked.\n+// +checklocks:igmp.ep.mu\nfunc (igmp *igmpState) softLeaveAll() {\nigmp.genericMulticastProtocol.MakeAllNonMemberLocked()\n}\n@@ -403,14 +403,14 @@ func (igmp *igmpState) softLeaveAll() {\n// initializeAll attemps to initialize the IGMP state for each group that has\n// been joined locally.\n//\n-// Precondition: igmp.ep.mu must be locked.\n+// +checklocks:igmp.ep.mu\nfunc (igmp *igmpState) initializeAll() {\nigmp.genericMulticastProtocol.InitializeGroupsLocked()\n}\n// sendQueuedReports attempts to send any reports that are queued for sending.\n//\n-// Precondition: igmp.ep.mu must be locked.\n+// +checklocksread:igmp.ep.mu\nfunc (igmp *igmpState) sendQueuedReports() {\nigmp.genericMulticastProtocol.SendQueuedReportsLocked()\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4.go",
"diff": "@@ -85,22 +85,24 @@ type endpoint struct {\n// enabled is set to 1 when the endpoint is enabled and 0 when it is\n// disabled.\n//\n- // Must be accessed using atomic operations.\n+ // +checkatomic\nenabled uint32\n// forwarding is set to forwardingEnabled when the endpoint has forwarding\n// enabled and forwardingDisabled when it is disabled.\n//\n- // Must be accessed using atomic operations.\n+ // +checkatomic\nforwarding uint32\n- mu struct {\n- sync.RWMutex\n+ // mu protects below.\n+ mu sync.RWMutex\n+ // +checklocks:mu\naddressableEndpointState stack.AddressableEndpointState\n+\n+ // +checklocks:mu\nigmp igmpState\n}\n-}\n// HandleLinkResolutionFailure implements stack.LinkResolvableNetworkEndpoint.\nfunc (e *endpoint) HandleLinkResolutionFailure(pkt *stack.PacketBuffer) {\n@@ -136,8 +138,8 @@ func (p *protocol) NewEndpoint(nic stack.NetworkInterface, dispatcher stack.Tran\nprotocol: p,\n}\ne.mu.Lock()\n- e.mu.addressableEndpointState.Init(e)\n- e.mu.igmp.init(e)\n+ e.addressableEndpointState.Init(e)\n+ e.igmp.init(e)\ne.mu.Unlock()\ntcpip.InitStatCounters(reflect.ValueOf(&e.stats.localStats).Elem())\n@@ -148,7 +150,7 @@ func (p *protocol) NewEndpoint(nic stack.NetworkInterface, dispatcher stack.Tran\ne.stats.igmp.init(&e.stats.localStats.IGMP, &stackStats.IGMP)\np.mu.Lock()\n- p.mu.eps[nic.ID()] = e\n+ p.eps[nic.ID()] = e\np.mu.Unlock()\nreturn e\n@@ -158,7 +160,7 @@ func (p *protocol) findEndpointWithAddress(addr tcpip.Address) *endpoint {\np.mu.RLock()\ndefer p.mu.RUnlock()\n- for _, e := range p.mu.eps {\n+ for _, e := range p.eps {\nif addressEndpoint := e.AcquireAssignedAddress(addr, false /* allowTemp */, stack.NeverPrimaryEndpoint); addressEndpoint != nil {\naddressEndpoint.DecRef()\nreturn e\n@@ -171,14 +173,14 @@ func (p *protocol) findEndpointWithAddress(addr tcpip.Address) *endpoint {\nfunc (p *protocol) getEndpointForNIC(id tcpip.NICID) (*endpoint, bool) {\np.mu.RLock()\ndefer p.mu.RUnlock()\n- ep, ok := p.mu.eps[id]\n+ ep, ok := p.eps[id]\nreturn ep, ok\n}\nfunc (p *protocol) forgetEndpoint(nicID tcpip.NICID) {\np.mu.Lock()\ndefer p.mu.Unlock()\n- delete(p.mu.eps, nicID)\n+ delete(p.eps, nicID)\n}\n// Forwarding implements stack.ForwardingNetworkEndpoint.\n@@ -235,7 +237,12 @@ func (e *endpoint) SetForwarding(forwarding bool) {\nfunc (e *endpoint) Enable() tcpip.Error {\ne.mu.Lock()\ndefer e.mu.Unlock()\n+ return e.enableLocked()\n+}\n+// +checklocks:e.mu\n+// +checklocksalias:e.igmp.ep.mu=e.mu\n+func (e *endpoint) enableLocked() tcpip.Error {\n// If the NIC is not enabled, the endpoint can't do anything meaningful so\n// don't enable the endpoint.\nif !e.nic.Enabled() {\n@@ -248,7 +255,7 @@ func (e *endpoint) Enable() tcpip.Error {\n}\n// Create an endpoint to receive broadcast packets on this interface.\n- ep, err := e.mu.addressableEndpointState.AddAndAcquirePermanentAddress(ipv4BroadcastAddr, stack.AddressProperties{PEB: stack.NeverPrimaryEndpoint})\n+ ep, err := e.addressableEndpointState.AddAndAcquirePermanentAddress(ipv4BroadcastAddr, stack.AddressProperties{PEB: stack.NeverPrimaryEndpoint})\nif err != nil {\nreturn err\n}\n@@ -259,7 +266,7 @@ func (e *endpoint) Enable() tcpip.Error {\n// endpoint may have left groups from the perspective of IGMP when the\n// endpoint was disabled. Either way, we need to let routers know to\n// send us multicast traffic.\n- e.mu.igmp.initializeAll()\n+ e.igmp.initializeAll()\n// As per RFC 1122 section 3.3.7, all hosts should join the all-hosts\n// multicast group. Note, the IANA calls the all-hosts multicast group the\n@@ -301,6 +308,8 @@ func (e *endpoint) Disable() {\ne.disableLocked()\n}\n+// +checklocks:e.mu\n+// +checklocksalias:e.igmp.ep.mu=e.mu\nfunc (e *endpoint) disableLocked() {\nif !e.isEnabled() {\nreturn\n@@ -315,10 +324,10 @@ func (e *endpoint) disableLocked() {\n// Leave groups from the perspective of IGMP so that routers know that\n// we are no longer interested in the group.\n- e.mu.igmp.softLeaveAll()\n+ e.igmp.softLeaveAll()\n// The address may have already been removed.\n- switch err := e.mu.addressableEndpointState.RemovePermanentAddress(ipv4BroadcastAddr.Address); err.(type) {\n+ switch err := e.addressableEndpointState.RemovePermanentAddress(ipv4BroadcastAddr.Address); err.(type) {\ncase nil, *tcpip.ErrBadLocalAddress:\ndefault:\npanic(fmt.Sprintf(\"unexpected error when removing address = %s: %s\", ipv4BroadcastAddr.Address, err))\n@@ -328,7 +337,7 @@ func (e *endpoint) disableLocked() {\n//\n// If the node comes back up on the same network, it will re-learn that it\n// needs to perform IGMPv1.\n- e.mu.igmp.resetV1Present()\n+ e.igmp.resetV1Present()\nif !e.setEnabled(false) {\npanic(\"should have only done work to disable the endpoint if it was enabled\")\n@@ -981,7 +990,7 @@ func (e *endpoint) handleValidatedPacket(h header.IPv4, pkt *stack.PacketBuffer,\n}\nif p == header.IGMPProtocolNumber {\ne.mu.Lock()\n- e.mu.igmp.handleIGMP(pkt, hasRouterAlertOption)\n+ e.igmp.handleIGMP(pkt, hasRouterAlertOption) // +checklocksforce: e == e.igmp.ep.\ne.mu.Unlock()\nreturn\n}\n@@ -1010,7 +1019,7 @@ func (e *endpoint) handleValidatedPacket(h header.IPv4, pkt *stack.PacketBuffer,\nfunc (e *endpoint) Close() {\ne.mu.Lock()\ne.disableLocked()\n- e.mu.addressableEndpointState.Cleanup()\n+ e.addressableEndpointState.Cleanup()\ne.mu.Unlock()\ne.protocol.forgetEndpoint(e.nic.ID())\n@@ -1021,25 +1030,33 @@ func (e *endpoint) AddAndAcquirePermanentAddress(addr tcpip.AddressWithPrefix, p\ne.mu.RLock()\ndefer e.mu.RUnlock()\n- ep, err := e.mu.addressableEndpointState.AddAndAcquirePermanentAddress(addr, properties)\n+ ep, err := e.addressableEndpointState.AddAndAcquirePermanentAddress(addr, properties)\nif err == nil {\n- e.mu.igmp.sendQueuedReports()\n+ e.sendQueuedReports()\n}\nreturn ep, err\n}\n+// sendQueuedReports sends queued igmp reports.\n+//\n+// +checklocksread:e.mu\n+// +checklocksalias:e.igmp.ep.mu=e.mu\n+func (e *endpoint) sendQueuedReports() {\n+ e.igmp.sendQueuedReports()\n+}\n+\n// RemovePermanentAddress implements stack.AddressableEndpoint.\nfunc (e *endpoint) RemovePermanentAddress(addr tcpip.Address) tcpip.Error {\ne.mu.RLock()\ndefer e.mu.RUnlock()\n- return e.mu.addressableEndpointState.RemovePermanentAddress(addr)\n+ return e.addressableEndpointState.RemovePermanentAddress(addr)\n}\n// MainAddress implements stack.AddressableEndpoint.\nfunc (e *endpoint) MainAddress() tcpip.AddressWithPrefix {\ne.mu.RLock()\ndefer e.mu.RUnlock()\n- return e.mu.addressableEndpointState.MainAddress()\n+ return e.addressableEndpointState.MainAddress()\n}\n// AcquireAssignedAddress implements stack.AddressableEndpoint.\n@@ -1048,7 +1065,7 @@ func (e *endpoint) AcquireAssignedAddress(localAddr tcpip.Address, allowTemp boo\ndefer e.mu.RUnlock()\nloopback := e.nic.IsLoopback()\n- return e.mu.addressableEndpointState.AcquireAssignedAddressOrMatching(localAddr, func(addressEndpoint stack.AddressEndpoint) bool {\n+ return e.addressableEndpointState.AcquireAssignedAddressOrMatching(localAddr, func(addressEndpoint stack.AddressEndpoint) bool {\nsubnet := addressEndpoint.Subnet()\n// IPv4 has a notion of a subnet broadcast address and considers the\n// loopback interface bound to an address's whole subnet (on linux).\n@@ -1066,23 +1083,23 @@ func (e *endpoint) AcquireOutgoingPrimaryAddress(remoteAddr tcpip.Address, allow\n// acquireOutgoingPrimaryAddressRLocked is like AcquireOutgoingPrimaryAddress\n// but with locking requirements\n//\n-// Precondition: igmp.ep.mu must be read locked.\n+// +checklocksread:e.mu\nfunc (e *endpoint) acquireOutgoingPrimaryAddressRLocked(remoteAddr tcpip.Address, allowExpired bool) stack.AddressEndpoint {\n- return e.mu.addressableEndpointState.AcquireOutgoingPrimaryAddress(remoteAddr, allowExpired)\n+ return e.addressableEndpointState.AcquireOutgoingPrimaryAddress(remoteAddr, allowExpired)\n}\n// PrimaryAddresses implements stack.AddressableEndpoint.\nfunc (e *endpoint) PrimaryAddresses() []tcpip.AddressWithPrefix {\ne.mu.RLock()\ndefer e.mu.RUnlock()\n- return e.mu.addressableEndpointState.PrimaryAddresses()\n+ return e.addressableEndpointState.PrimaryAddresses()\n}\n// PermanentAddresses implements stack.AddressableEndpoint.\nfunc (e *endpoint) PermanentAddresses() []tcpip.AddressWithPrefix {\ne.mu.RLock()\ndefer e.mu.RUnlock()\n- return e.mu.addressableEndpointState.PermanentAddresses()\n+ return e.addressableEndpointState.PermanentAddresses()\n}\n// JoinGroup implements stack.GroupAddressableEndpoint.\n@@ -1094,13 +1111,14 @@ func (e *endpoint) JoinGroup(addr tcpip.Address) tcpip.Error {\n// joinGroupLocked is like JoinGroup but with locking requirements.\n//\n-// Precondition: e.mu must be locked.\n+// +checklocks:e.mu\n+// +checklocksalias:e.igmp.ep.mu=e.mu\nfunc (e *endpoint) joinGroupLocked(addr tcpip.Address) tcpip.Error {\nif !header.IsV4MulticastAddress(addr) {\nreturn &tcpip.ErrBadAddress{}\n}\n- e.mu.igmp.joinGroup(addr)\n+ e.igmp.joinGroup(addr)\nreturn nil\n}\n@@ -1113,16 +1131,17 @@ func (e *endpoint) LeaveGroup(addr tcpip.Address) tcpip.Error {\n// leaveGroupLocked is like LeaveGroup but with locking requirements.\n//\n-// Precondition: e.mu must be locked.\n+// +checklocks:e.mu\n+// +checklocksalias:e.igmp.ep.mu=e.mu\nfunc (e *endpoint) leaveGroupLocked(addr tcpip.Address) tcpip.Error {\n- return e.mu.igmp.leaveGroup(addr)\n+ return e.igmp.leaveGroup(addr)\n}\n// IsInGroup implements stack.GroupAddressableEndpoint.\nfunc (e *endpoint) IsInGroup(addr tcpip.Address) bool {\ne.mu.RLock()\ndefer e.mu.RUnlock()\n- return e.mu.igmp.isInGroup(addr)\n+ return e.igmp.isInGroup(addr) // +checklocksforce: e.mu==e.igmp.ep.mu.\n}\n// Stats implements stack.NetworkEndpoint.\n@@ -1137,21 +1156,22 @@ var _ fragmentation.TimeoutHandler = (*protocol)(nil)\ntype protocol struct {\nstack *stack.Stack\n- mu struct {\n- sync.RWMutex\n+ // mu protects annotated fields below.\n+ mu sync.RWMutex\n// eps is keyed by NICID to allow protocol methods to retrieve an endpoint\n// when handling a packet, by looking at which NIC handled the packet.\n+ // +checklocks:mu\neps map[tcpip.NICID]*endpoint\n// ICMP types for which the stack's global rate limiting must apply.\n+ // +checklocks:mu\nicmpRateLimitedTypes map[header.ICMPv4Type]struct{}\n- }\n// defaultTTL is the current default TTL for the protocol. Only the\n// uint8 portion of it is meaningful.\n//\n- // Must be accessed using atomic operations.\n+ // +checkatomic\ndefaultTTL uint32\nids []uint32\n@@ -1282,7 +1302,7 @@ func (p *protocol) allowICMPReply(icmpType header.ICMPv4Type, code header.ICMPv4\np.mu.RLock()\ndefer p.mu.RUnlock()\n- if _, ok := p.mu.icmpRateLimitedTypes[icmpType]; ok {\n+ if _, ok := p.icmpRateLimitedTypes[icmpType]; ok {\nreturn p.stack.AllowICMPMessage()\n}\nreturn true\n@@ -1386,10 +1406,10 @@ func NewProtocolWithOptions(opts Options) stack.NetworkProtocolFactory {\noptions: opts,\n}\np.fragmentation = fragmentation.NewFragmentation(fragmentblockSize, fragmentation.HighFragThreshold, fragmentation.LowFragThreshold, ReassembleTimeout, s.Clock(), p)\n- p.mu.eps = make(map[tcpip.NICID]*endpoint)\n+ p.eps = make(map[tcpip.NICID]*endpoint)\n// Set ICMP rate limiting to Linux defaults.\n// See https://man7.org/linux/man-pages/man7/icmp.7.html.\n- p.mu.icmpRateLimitedTypes = map[header.ICMPv4Type]struct{}{\n+ p.icmpRateLimitedTypes = map[header.ICMPv4Type]struct{}{\nheader.ICMPv4DstUnreachable: struct{}{},\nheader.ICMPv4SrcQuench: struct{}{},\nheader.ICMPv4TimeExceeded: struct{}{},\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/stats_test.go",
"new_path": "pkg/tcpip/network/ipv4/stats_test.go",
"diff": "@@ -34,10 +34,11 @@ func (t *testInterface) ID() tcpip.NICID {\nreturn t.nicID\n}\n+// +checklocks:proto.mu\nfunc knownNICIDs(proto *protocol) []tcpip.NICID {\nvar nicIDs []tcpip.NICID\n- for k := range proto.mu.eps {\n+ for k := range proto.eps {\nnicIDs = append(nicIDs, k)\n}\n@@ -54,7 +55,7 @@ func TestClearEndpointFromProtocolOnClose(t *testing.T) {\nvar nicIDs []tcpip.NICID\nproto.mu.Lock()\n- foundEP, hasEndpointBeforeClose := proto.mu.eps[nic.ID()]\n+ foundEP, hasEndpointBeforeClose := proto.eps[nic.ID()]\nnicIDs = knownNICIDs(proto)\nproto.mu.Unlock()\n@@ -68,7 +69,7 @@ func TestClearEndpointFromProtocolOnClose(t *testing.T) {\nep.Close()\nproto.mu.Lock()\n- _, hasEP := proto.mu.eps[nic.ID()]\n+ _, hasEP := proto.eps[nic.ID()]\nnicIDs = knownNICIDs(proto)\nproto.mu.Unlock()\nif hasEP {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add checklocks annotations for ipv4 types.
PiperOrigin-RevId: 423959261 |
259,858 | 24.01.2022 19:13:11 | 28,800 | 7b6fc15705541555f0fcf0d72e1f2088d27b712c | Add checklocks annotations for stack/nic.go. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/nic.go",
"new_path": "pkg/tcpip/stack/nic.go",
"diff": "@@ -68,20 +68,22 @@ type nic struct {\n// complete.\nlinkResQueue packetsPendingLinkResolution\n- mu struct {\n- sync.RWMutex\n+ // mu protects annotated fields below.\n+ mu sync.RWMutex\n+\n+ // +checklocks:mu\nspoofing bool\n+\n+ // +checklocks:mu\npromiscuous bool\n- }\n- packetEPs struct {\n- mu sync.RWMutex\n+ // packetEPsMu protects annotated fields below.\n+ packetEPsMu sync.RWMutex\n// eps is protected by the mutex, but the values contained in it are not.\n//\n- // +checklocks:mu\n- eps map[tcpip.NetworkProtocolNumber]*packetEndpointList\n- }\n+ // +checklocks:packetEPsMu\n+ packetEPs map[tcpip.NetworkProtocolNumber]*packetEndpointList\nqDisc QueueingDiscipline\nrawLinkEP LinkRawWriter\n@@ -184,10 +186,10 @@ func newNIC(stack *Stack, id tcpip.NICID, ep LinkEndpoint, opts NICOptions) *nic\n}\nnic.linkResQueue.init(nic)\n- nic.packetEPs.mu.Lock()\n- defer nic.packetEPs.mu.Unlock()\n+ nic.packetEPsMu.Lock()\n+ defer nic.packetEPsMu.Unlock()\n- nic.packetEPs.eps = make(map[tcpip.NetworkProtocolNumber]*packetEndpointList)\n+ nic.packetEPs = make(map[tcpip.NetworkProtocolNumber]*packetEndpointList)\nresolutionRequired := ep.Capabilities()&CapabilityResolutionRequired != 0\n@@ -324,14 +326,14 @@ func (n *nic) remove() tcpip.Error {\n// setPromiscuousMode enables or disables promiscuous mode.\nfunc (n *nic) setPromiscuousMode(enable bool) {\nn.mu.Lock()\n- n.mu.promiscuous = enable\n+ n.promiscuous = enable\nn.mu.Unlock()\n}\n// Promiscuous implements NetworkInterface.\nfunc (n *nic) Promiscuous() bool {\nn.mu.RLock()\n- rv := n.mu.promiscuous\n+ rv := n.promiscuous\nn.mu.RUnlock()\nreturn rv\n}\n@@ -401,7 +403,7 @@ func (n *nic) writePacket(pkt *PacketBuffer) tcpip.Error {\n// setSpoofing enables or disables address spoofing.\nfunc (n *nic) setSpoofing(enable bool) {\nn.mu.Lock()\n- n.mu.spoofing = enable\n+ n.spoofing = enable\nn.mu.Unlock()\n}\n@@ -409,7 +411,7 @@ func (n *nic) setSpoofing(enable bool) {\nfunc (n *nic) Spoofing() bool {\nn.mu.RLock()\ndefer n.mu.RUnlock()\n- return n.mu.spoofing\n+ return n.spoofing\n}\n// primaryAddress returns an address that can be used to communicate with\n@@ -426,7 +428,7 @@ func (n *nic) primaryEndpoint(protocol tcpip.NetworkProtocolNumber, remoteAddr t\n}\nn.mu.RLock()\n- spoofing := n.mu.spoofing\n+ spoofing := n.spoofing\nn.mu.RUnlock()\nreturn addressableEndpoint.AcquireOutgoingPrimaryAddress(remoteAddr, spoofing)\n@@ -477,9 +479,9 @@ func (n *nic) getAddressOrCreateTemp(protocol tcpip.NetworkProtocolNumber, addre\nvar spoofingOrPromiscuous bool\nswitch tempRef {\ncase spoofing:\n- spoofingOrPromiscuous = n.mu.spoofing\n+ spoofingOrPromiscuous = n.spoofing\ncase promiscuous:\n- spoofingOrPromiscuous = n.mu.promiscuous\n+ spoofingOrPromiscuous = n.promiscuous\n}\nn.mu.RUnlock()\nreturn n.getAddressOrCreateTempInner(protocol, address, spoofingOrPromiscuous, peb)\n@@ -758,12 +760,12 @@ func (n *nic) DeliverNetworkPacket(remote, local tcpip.LinkAddress, protocol tcp\nep.HandlePacket(n.id, local, protocol, clone)\n}\n- n.packetEPs.mu.Lock()\n+ n.packetEPsMu.Lock()\n// Are any packet type sockets listening for this network protocol?\n- protoEPs, protoEPsOK := n.packetEPs.eps[protocol]\n+ protoEPs, protoEPsOK := n.packetEPs[protocol]\n// Other packet type sockets that are listening for all protocols.\n- anyEPs, anyEPsOK := n.packetEPs.eps[header.EthernetProtocolAll]\n- n.packetEPs.mu.Unlock()\n+ anyEPs, anyEPsOK := n.packetEPs[header.EthernetProtocolAll]\n+ n.packetEPsMu.Unlock()\nif protoEPsOK {\nprotoEPs.forEach(deliverPacketEPs)\n@@ -777,12 +779,12 @@ func (n *nic) DeliverNetworkPacket(remote, local tcpip.LinkAddress, protocol tcp\n// deliverOutboundPacket delivers outgoing packets to interested endpoints.\nfunc (n *nic) deliverOutboundPacket(remote tcpip.LinkAddress, pkt *PacketBuffer) {\n- n.packetEPs.mu.RLock()\n- defer n.packetEPs.mu.RUnlock()\n+ n.packetEPsMu.RLock()\n+ defer n.packetEPsMu.RUnlock()\n// We do not deliver to protocol specific packet endpoints as on Linux\n// only ETH_P_ALL endpoints get outbound packets.\n// Add any other packet sockets that maybe listening for all protocols.\n- eps, ok := n.packetEPs.eps[header.EthernetProtocolAll]\n+ eps, ok := n.packetEPs[header.EthernetProtocolAll]\nif !ok {\nreturn\n}\n@@ -956,13 +958,13 @@ func (n *nic) setNUDConfigs(protocol tcpip.NetworkProtocolNumber, c NUDConfigura\n}\nfunc (n *nic) registerPacketEndpoint(netProto tcpip.NetworkProtocolNumber, ep PacketEndpoint) tcpip.Error {\n- n.packetEPs.mu.Lock()\n- defer n.packetEPs.mu.Unlock()\n+ n.packetEPsMu.Lock()\n+ defer n.packetEPsMu.Unlock()\n- eps, ok := n.packetEPs.eps[netProto]\n+ eps, ok := n.packetEPs[netProto]\nif !ok {\neps = new(packetEndpointList)\n- n.packetEPs.eps[netProto] = eps\n+ n.packetEPs[netProto] = eps\n}\neps.add(ep)\n@@ -970,16 +972,16 @@ func (n *nic) registerPacketEndpoint(netProto tcpip.NetworkProtocolNumber, ep Pa\n}\nfunc (n *nic) unregisterPacketEndpoint(netProto tcpip.NetworkProtocolNumber, ep PacketEndpoint) {\n- n.packetEPs.mu.Lock()\n- defer n.packetEPs.mu.Unlock()\n+ n.packetEPsMu.Lock()\n+ defer n.packetEPsMu.Unlock()\n- eps, ok := n.packetEPs.eps[netProto]\n+ eps, ok := n.packetEPs[netProto]\nif !ok {\nreturn\n}\neps.remove(ep)\nif eps.len() == 0 {\n- delete(n.packetEPs.eps, netProto)\n+ delete(n.packetEPs, netProto)\n}\n}\n@@ -988,7 +990,7 @@ func (n *nic) unregisterPacketEndpoint(netProto tcpip.NetworkProtocolNumber, ep\n// has been removed) unless the NIC is in spoofing mode, or temporary.\nfunc (n *nic) isValidForOutgoing(ep AssignableAddressEndpoint) bool {\nn.mu.RLock()\n- spoofing := n.mu.spoofing\n+ spoofing := n.spoofing\nn.mu.RUnlock()\nreturn n.Enabled() && ep.IsAssigned(spoofing)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add checklocks annotations for stack/nic.go.
PiperOrigin-RevId: 423966906 |
259,853 | 24.01.2022 19:18:37 | 28,800 | 836731eb725425deb23fb516320f655079255429 | kvm: enable supervisor-mode access prevention (SMAP)
SMAP can be temporarily disabled for explicit memory accesses by setting
the EFLAGS.AC (Alignment Check) flag. But in out case, EFLAGS.AC is in
KernelFlagsClear.
Sentry never reads/writes app memory from app address space. | [
{
"change_type": "MODIFY",
"old_path": "pkg/ring0/kernel_amd64.go",
"new_path": "pkg/ring0/kernel_amd64.go",
"diff": "@@ -194,6 +194,9 @@ func (c *CPU) CR4() uint64 {\nif hasSMEP {\ncr4 |= _CR4_SMEP\n}\n+ if hasSMAP {\n+ cr4 |= _CR4_SMAP\n+ }\nif hasFSGSBASE {\ncr4 |= _CR4_FSGSBASE\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/ring0/lib_amd64.go",
"new_path": "pkg/ring0/lib_amd64.go",
"diff": "@@ -67,6 +67,7 @@ func rdmsr(reg uintptr) uintptr\n// Mostly-constants set by Init.\nvar (\nhasSMEP bool\n+ hasSMAP bool\nhasPCID bool\nhasXSAVEOPT bool\nhasXSAVE bool\n@@ -90,6 +91,7 @@ func Init(fs cpuid.FeatureSet) {\n// Initialize all functions.\nhasSMEP = fs.HasFeature(cpuid.X86FeatureSMEP)\n+ hasSMAP = fs.HasFeature(cpuid.X86FeatureSMAP)\nhasPCID = fs.HasFeature(cpuid.X86FeaturePCID)\nhasXSAVEOPT = fs.UseXsaveopt()\nhasXSAVE = fs.UseXsave()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/ring0/x86.go",
"new_path": "pkg/ring0/x86.go",
"diff": "@@ -34,6 +34,7 @@ const (\n_CR4_PCIDE = 1 << 17\n_CR4_OSXSAVE = 1 << 18\n_CR4_SMEP = 1 << 20\n+ _CR4_SMAP = 1 << 21\n_RFLAGS_AC = 1 << 18\n_RFLAGS_NT = 1 << 14\n"
}
] | Go | Apache License 2.0 | google/gvisor | kvm: enable supervisor-mode access prevention (SMAP)
SMAP can be temporarily disabled for explicit memory accesses by setting
the EFLAGS.AC (Alignment Check) flag. But in out case, EFLAGS.AC is in
KernelFlagsClear.
Sentry never reads/writes app memory from app address space.
PiperOrigin-RevId: 423967732 |
259,858 | 24.01.2022 19:51:35 | 28,800 | 7482e48e35ca75d24ffc03957d2ad38951143649 | Add checklocks annoations for stack/route.go. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/route.go",
"new_path": "pkg/tcpip/stack/route.go",
"diff": "@@ -33,16 +33,16 @@ type Route struct {\n// address's assigned status without the NIC.\nlocalAddressNIC *nic\n- mu struct {\n- sync.RWMutex\n+ // mu protects annotated fields below.\n+ mu sync.RWMutex\n// localAddressEndpoint is the local address this route is associated with.\n+ // +checklocks:mu\nlocalAddressEndpoint AssignableAddressEndpoint\n- // remoteLinkAddress is the link-layer (MAC) address of the next hop in the\n- // route.\n+ // remoteLinkAddress is the link-layer (MAC) address of the next hop.\n+ // +checklocks:mu\nremoteLinkAddress tcpip.LinkAddress\n- }\n// outgoingNIC is the interface this route uses to write packets.\noutgoingNIC *nic\n@@ -119,10 +119,11 @@ func (r *Route) Fields() RouteInfo {\nreturn r.fieldsLocked()\n}\n+// +checklocksread:r.mu\nfunc (r *Route) fieldsLocked() RouteInfo {\nreturn RouteInfo{\nrouteInfo: r.routeInfo,\n- RemoteLinkAddress: r.mu.remoteLinkAddress,\n+ RemoteLinkAddress: r.remoteLinkAddress,\n}\n}\n@@ -242,7 +243,7 @@ func makeRouteInner(netProto tcpip.NetworkProtocolNumber, localAddr, remoteAddr\n}\nr.mu.Lock()\n- r.mu.localAddressEndpoint = localAddressEndpoint\n+ r.localAddressEndpoint = localAddressEndpoint\nr.mu.Unlock()\nreturn r\n@@ -268,7 +269,7 @@ func makeLocalRoute(netProto tcpip.NetworkProtocolNumber, localAddr, remoteAddr\nfunc (r *Route) RemoteLinkAddress() tcpip.LinkAddress {\nr.mu.RLock()\ndefer r.mu.RUnlock()\n- return r.mu.remoteLinkAddress\n+ return r.remoteLinkAddress\n}\n// NICID returns the id of the NIC from which this route originates.\n@@ -340,7 +341,7 @@ func (r *Route) GSOMaxSize() uint32 {\nfunc (r *Route) ResolveWith(addr tcpip.LinkAddress) {\nr.mu.Lock()\ndefer r.mu.Unlock()\n- r.mu.remoteLinkAddress = addr\n+ r.remoteLinkAddress = addr\n}\n// ResolvedFieldsResult is the result of a route resolution attempt.\n@@ -427,8 +428,9 @@ func (r *Route) IsResolutionRequired() bool {\nreturn r.isResolutionRequiredRLocked()\n}\n+// +checklocksread:r.mu\nfunc (r *Route) isResolutionRequiredRLocked() bool {\n- return len(r.mu.remoteLinkAddress) == 0 && r.linkRes != nil && r.isValidForOutgoingRLocked() && !r.local()\n+ return len(r.remoteLinkAddress) == 0 && r.linkRes != nil && r.isValidForOutgoingRLocked() && !r.local()\n}\nfunc (r *Route) isValidForOutgoing() bool {\n@@ -437,12 +439,13 @@ func (r *Route) isValidForOutgoing() bool {\nreturn r.isValidForOutgoingRLocked()\n}\n+// +checklocksread:r.mu\nfunc (r *Route) isValidForOutgoingRLocked() bool {\nif !r.outgoingNIC.Enabled() {\nreturn false\n}\n- localAddressEndpoint := r.mu.localAddressEndpoint\n+ localAddressEndpoint := r.localAddressEndpoint\nif localAddressEndpoint == nil || !r.localAddressNIC.isValidForOutgoing(localAddressEndpoint) {\nreturn false\n}\n@@ -491,7 +494,7 @@ func (r *Route) Release() {\nr.mu.Lock()\ndefer r.mu.Unlock()\n- if ep := r.mu.localAddressEndpoint; ep != nil {\n+ if ep := r.localAddressEndpoint; ep != nil {\nep.DecRef()\n}\n}\n@@ -504,8 +507,9 @@ func (r *Route) Acquire() {\nr.acquireLocked()\n}\n+// +checklocksread:r.mu\nfunc (r *Route) acquireLocked() {\n- if ep := r.mu.localAddressEndpoint; ep != nil {\n+ if ep := r.localAddressEndpoint; ep != nil {\nif !ep.IncRef() {\npanic(fmt.Sprintf(\"failed to increment reference count for local address endpoint = %s\", r.LocalAddress()))\n}\n@@ -523,7 +527,7 @@ func (r *Route) isV4Broadcast(addr tcpip.Address) bool {\n}\nr.mu.RLock()\n- localAddressEndpoint := r.mu.localAddressEndpoint\n+ localAddressEndpoint := r.localAddressEndpoint\nr.mu.RUnlock()\nif localAddressEndpoint == nil {\nreturn false\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack.go",
"new_path": "pkg/tcpip/stack/stack.go",
"diff": "@@ -66,6 +66,8 @@ func (u *uniqueIDGenerator) UniqueID() uint64 {\n// Stack is a networking stack, with all supported protocols, NICs, and route\n// table.\n+//\n+// LOCK ORDERING: mu > routeMu.\ntype Stack struct {\ntransportProtocols map[tcpip.TransportProtocolNumber]*transportProtocolState\nnetworkProtocols map[tcpip.NetworkProtocolNumber]NetworkProtocol\n@@ -79,14 +81,11 @@ type Stack struct {\nstats tcpip.Stats\n- // LOCK ORDERING: mu > route.mu.\n- route struct {\n- mu struct {\n- sync.RWMutex\n+ // routeMu protects annotated fields below.\n+ routeMu sync.RWMutex\n- table []tcpip.Route\n- }\n- }\n+ // +checklocks:routeMu\n+ routeTable []tcpip.Route\nmu sync.RWMutex\nnics map[tcpip.NICID]*nic\n@@ -579,37 +578,37 @@ func (s *Stack) SetPortRange(start uint16, end uint16) tcpip.Error {\n//\n// This method takes ownership of the table.\nfunc (s *Stack) SetRouteTable(table []tcpip.Route) {\n- s.route.mu.Lock()\n- defer s.route.mu.Unlock()\n- s.route.mu.table = table\n+ s.routeMu.Lock()\n+ defer s.routeMu.Unlock()\n+ s.routeTable = table\n}\n// GetRouteTable returns the route table which is currently in use.\nfunc (s *Stack) GetRouteTable() []tcpip.Route {\n- s.route.mu.RLock()\n- defer s.route.mu.RUnlock()\n- return append([]tcpip.Route(nil), s.route.mu.table...)\n+ s.routeMu.RLock()\n+ defer s.routeMu.RUnlock()\n+ return append([]tcpip.Route(nil), s.routeTable...)\n}\n// AddRoute appends a route to the route table.\nfunc (s *Stack) AddRoute(route tcpip.Route) {\n- s.route.mu.Lock()\n- defer s.route.mu.Unlock()\n- s.route.mu.table = append(s.route.mu.table, route)\n+ s.routeMu.Lock()\n+ defer s.routeMu.Unlock()\n+ s.routeTable = append(s.routeTable, route)\n}\n// RemoveRoutes removes matching routes from the route table.\nfunc (s *Stack) RemoveRoutes(match func(tcpip.Route) bool) {\n- s.route.mu.Lock()\n- defer s.route.mu.Unlock()\n+ s.routeMu.Lock()\n+ defer s.routeMu.Unlock()\nvar filteredRoutes []tcpip.Route\n- for _, route := range s.route.mu.table {\n+ for _, route := range s.routeTable {\nif !match(route) {\nfilteredRoutes = append(filteredRoutes, route)\n}\n}\n- s.route.mu.table = filteredRoutes\n+ s.routeTable = filteredRoutes\n}\n// NewEndpoint creates a new transport layer endpoint of the given protocol.\n@@ -797,18 +796,18 @@ func (s *Stack) removeNICLocked(id tcpip.NICID) tcpip.Error {\ndelete(s.nics, id)\n// Remove routes in-place. n tracks the number of routes written.\n- s.route.mu.Lock()\n+ s.routeMu.Lock()\nn := 0\n- for i, r := range s.route.mu.table {\n- s.route.mu.table[i] = tcpip.Route{}\n+ for i, r := range s.routeTable {\n+ s.routeTable[i] = tcpip.Route{}\nif r.NIC != id {\n// Keep this route.\n- s.route.mu.table[n] = r\n+ s.routeTable[n] = r\nn++\n}\n}\n- s.route.mu.table = s.route.mu.table[:n]\n- s.route.mu.Unlock()\n+ s.routeTable = s.routeTable[:n]\n+ s.routeMu.Unlock()\nreturn nic.remove()\n}\n@@ -1135,10 +1134,10 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n\n// Find a route to the remote with the route table.\nvar chosenRoute tcpip.Route\nif r := func() *Route {\n- s.route.mu.RLock()\n- defer s.route.mu.RUnlock()\n+ s.routeMu.RLock()\n+ defer s.routeMu.RUnlock()\n- for _, route := range s.route.mu.table {\n+ for _, route := range s.routeTable {\nif len(remoteAddr) != 0 && !route.Destination.Contains(remoteAddr) {\ncontinue\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add checklocks annoations for stack/route.go.
PiperOrigin-RevId: 423973318 |
259,858 | 24.01.2022 20:45:54 | 28,800 | 06ffd8eaa600eb34be63957dcb170cda5650896d | Add checklocks annotations for arp. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/arp/arp.go",
"new_path": "pkg/tcpip/network/arp/arp.go",
"diff": "@@ -57,25 +57,25 @@ type endpoint struct {\nnic stack.NetworkInterface\nstats sharedStats\n- mu struct {\n- sync.Mutex\n+ // mu protects annotated fields below.\n+ mu sync.Mutex\n+ // +checklocks:mu\ndad ip.DAD\n}\n-}\n// CheckDuplicateAddress implements stack.DuplicateAddressDetector.\nfunc (e *endpoint) CheckDuplicateAddress(addr tcpip.Address, h stack.DADCompletionHandler) stack.DADCheckAddressDisposition {\ne.mu.Lock()\ndefer e.mu.Unlock()\n- return e.mu.dad.CheckDuplicateAddressLocked(addr, h)\n+ return e.dad.CheckDuplicateAddressLocked(addr, h)\n}\n// SetDADConfigurations implements stack.DuplicateAddressDetector.\nfunc (e *endpoint) SetDADConfigurations(c stack.DADConfigurations) {\ne.mu.Lock()\ndefer e.mu.Unlock()\n- e.mu.dad.SetConfigsLocked(c)\n+ e.dad.SetConfigsLocked(c)\n}\n// DuplicateAddressProtocol implements stack.DuplicateAddressDetector.\n@@ -230,7 +230,7 @@ func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {\nlinkAddr := tcpip.LinkAddress(h.HardwareAddressSender())\ne.mu.Lock()\n- e.mu.dad.StopLocked(addr, &stack.DADDupAddrDetected{HolderLinkAddress: linkAddr})\n+ e.dad.StopLocked(addr, &stack.DADDupAddrDetected{HolderLinkAddress: linkAddr})\ne.mu.Unlock()\n// The solicited, override, and isRouter flags are not available for ARP;\n@@ -280,7 +280,7 @@ func (p *protocol) NewEndpoint(nic stack.NetworkInterface, _ stack.TransportDisp\n}\ne.mu.Lock()\n- e.mu.dad.Init(&e.mu, p.options.DADConfigs, ip.DADOptions{\n+ e.dad.Init(&e.mu, p.options.DADConfigs, ip.DADOptions{\nClock: p.stack.Clock(),\nSecureRNG: p.stack.SecureRNG(),\n// ARP does not support sending nonce values.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add checklocks annotations for arp.
PiperOrigin-RevId: 423980993 |
259,907 | 24.01.2022 21:58:50 | 28,800 | f3ff82093eb55b5b53ad99576f8181867ecfedd0 | Wrap server-side panics in EREMOTEIO.
EREMOTEIO is a more appropriate generic error for a remote procedure call (RPC)
failure on the gofer. EFAULT means bad address and can be misleading to the
application as it will denote a MM layer related issue. | [
{
"change_type": "MODIFY",
"old_path": "pkg/lisafs/connection.go",
"new_path": "pkg/lisafs/connection.go",
"diff": "package lisafs\nimport (\n+ \"runtime/debug\"\n+\n\"golang.org/x/sys/unix\"\n\"gvisor.dev/gvisor/pkg/flipcall\"\n\"gvisor.dev/gvisor/pkg/log\"\n@@ -156,7 +158,7 @@ func (c *Connection) respondError(comm Communicator, err unix.Errno) (MID, uint3\nreturn Error, respLen, nil\n}\n-func (c *Connection) handleMsg(comm Communicator, m MID, payloadLen uint32) (MID, uint32, []int) {\n+func (c *Connection) handleMsg(comm Communicator, m MID, payloadLen uint32) (retM MID, retPayloadLen uint32, retFDs []int) {\nif payloadLen > c.maxMessageSize {\nlog.Warningf(\"received payload is too large: %d bytes\", payloadLen)\nreturn c.respondError(comm, unix.EIO)\n@@ -165,7 +167,20 @@ func (c *Connection) handleMsg(comm Communicator, m MID, payloadLen uint32) (MID\n// c.close() has been called; the connection is shutting down.\nreturn c.respondError(comm, unix.ECONNRESET)\n}\n- defer c.reqGate.Leave()\n+ defer func() {\n+ c.reqGate.Leave()\n+\n+ // Don't allow a panic to propagate.\n+ if err := recover(); err != nil {\n+ // Include a useful log message.\n+ log.Warningf(\"panic in handler: %v\\n%s\", err, debug.Stack())\n+\n+ // Wrap in an EREMOTEIO error; we don't really have a better way to\n+ // describe this kind of error. EREMOTEIO is appropriate for a generic\n+ // failed RPC message.\n+ retM, retPayloadLen, retFDs = c.respondError(comm, unix.EREMOTEIO)\n+ }\n+ }()\nif !c.mounted && m != Mount {\nlog.Warningf(\"connection must first be mounted\")\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/p9/p9test/client_test.go",
"new_path": "pkg/p9/p9test/client_test.go",
"diff": "@@ -45,8 +45,8 @@ func TestPanic(t *testing.T) {\n})\n// Attach to the client.\n- if _, err := c.Attach(\"/\"); err != unix.EFAULT {\n- t.Fatalf(\"got attach err %v, want EFAULT\", err)\n+ if _, err := c.Attach(\"/\"); err != unix.EREMOTEIO {\n+ t.Fatalf(\"got attach err %v, want EREMOTEIO\", err)\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/p9/server.go",
"new_path": "pkg/p9/server.go",
"diff": "@@ -494,10 +494,10 @@ func (cs *connState) handle(m message) (r message) {\n// Include a useful log message.\nlog.Warningf(\"panic in handler: %v\\n%s\", err, debug.Stack())\n- // Wrap in an EFAULT error; we don't really have a\n+ // Wrap in an EREMOTEIO error; we don't really have a\n// better way to describe this kind of error. It will\n// usually manifest as a result of the test framework.\n- r = newErrFromLinuxerr(linuxerr.EFAULT)\n+ r = newErrFromLinuxerr(linuxerr.EREMOTEIO)\n}\n}()\nif handler, ok := m.(handler); ok {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Wrap server-side panics in EREMOTEIO.
EREMOTEIO is a more appropriate generic error for a remote procedure call (RPC)
failure on the gofer. EFAULT means bad address and can be misleading to the
application as it will denote a MM layer related issue.
PiperOrigin-RevId: 423990374 |
259,962 | 25.01.2022 12:22:37 | 28,800 | 434e58d261210ebfd60fe133c1b01f6804fbe06f | Remove NICs on stack shutdown to ensure qdisc cleanup. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack.go",
"new_path": "pkg/tcpip/stack/stack.go",
"diff": "@@ -1530,9 +1530,13 @@ func (s *Stack) Wait() {\np.Wait()\n}\n- s.mu.RLock()\n- defer s.mu.RUnlock()\n- for _, n := range s.nics {\n+ s.mu.Lock()\n+ defer s.mu.Unlock()\n+\n+ for id, n := range s.nics {\n+ // Remove NIC to ensure that qDisc goroutines are correctly\n+ // terminated on stack teardown.\n+ s.removeNICLocked(id)\nn.NetworkLinkEndpoint.Wait()\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove NICs on stack shutdown to ensure qdisc cleanup.
PiperOrigin-RevId: 424149803 |
259,858 | 25.01.2022 12:30:32 | 28,800 | b92bb28e6a87d49d1551a85b82476e792e1c2cbb | Fix release tags generation. | [
{
"change_type": "MODIFY",
"old_path": "tools/nogo/check/build.go",
"new_path": "tools/nogo/check/build.go",
"diff": "@@ -16,6 +16,7 @@ package check\nimport (\n\"fmt\"\n+ \"go/build\"\n\"io\"\n\"os\"\n@@ -39,7 +40,7 @@ var findStdPkg = func(path string) (io.ReadCloser, error) {\nreturn os.Open(fmt.Sprintf(\"%s/pkg/%s_%s/%s.a\", root, flags.GOOS, flags.GOARCH, path))\n}\n-// releaseTags returns nil, indicating that the defaults should be used.\n+// releaseTags returns the default release tags.\nvar releaseTags = func() ([]string, error) {\n- return nil, nil\n+ return build.Default.ReleaseTags, nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/nogo/check/check.go",
"new_path": "tools/nogo/check/check.go",
"diff": "@@ -33,8 +33,6 @@ import (\n\"path/filepath\"\n\"reflect\"\n\"regexp\"\n- \"runtime\"\n- \"strconv\"\n\"strings\"\n\"sync\"\n\"sync/atomic\"\n@@ -69,46 +67,12 @@ var (\nreleaseTagsErr error\n)\n-// versionTags generates all version tags.\n-//\n-// This function will panic if passed an invalid version.\n-func versionTags(v string) (tags []string) {\n- if len(v) < 2 || string(v[:2]) != \"go\" {\n- panic(fmt.Errorf(\"version %q is not valid\", v))\n- }\n- v = v[2:] // Strip go prefix.\n- v = strings.Split(v, \" \")[0]\n- v = strings.Split(v, \"-\")[0]\n- parts := strings.Split(v, \".\")\n- if len(parts) < 2 {\n- panic(fmt.Errorf(\"version %q lacks major and minor number\", v))\n- }\n- major, err := strconv.ParseInt(parts[0], 10, 64)\n- if err != nil {\n- panic(fmt.Errorf(\"version %q contains invalid major: %w\", v, err))\n- }\n- minor, err := strconv.ParseInt(parts[1], 10, 64)\n- if err != nil {\n- panic(fmt.Errorf(\"version %q contains invalid minor: %w\", v, err))\n- }\n- // Generate all compliant tags.\n- for i := int64(0); i <= minor; i++ {\n- tags = append(tags, fmt.Sprintf(\"go%d.%d\", major, i))\n- }\n- return tags\n-}\n-\n// shouldInclude indicates whether the file should be included.\nfunc shouldInclude(path string) (bool, error) {\ntagsOnce.Do(func() {\nif len(flags.BuildTags) > 0 {\nbuildTags = strings.Split(flags.BuildTags, \",\")\n}\n- if v, err := flags.Env(\"GOVERSION\"); err == nil {\n- buildTags = append(buildTags, versionTags(v)...)\n- } else {\n- buildTags = append(buildTags, versionTags(runtime.Version())...)\n- }\nreleaseTagsVal, releaseTagsErr = releaseTags()\n})\nif releaseTagsErr != nil {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix release tags generation.
PiperOrigin-RevId: 424151576 |
259,885 | 25.01.2022 14:12:37 | 28,800 | edb6bd399e13d958515151cd69230863d7e92dc3 | Add caller for the Execve checkpoint.
Binary fields are VFS2-only. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -1014,6 +1014,7 @@ func (k *Kernel) CreateProcess(args CreateProcessArgs) (*ThreadGroup, ThreadID,\nargs.File = nil\ncase args.File != nil:\n// If File is set, take the File provided directly.\n+ args.Filename = args.File.PathnameWithDeleted(ctx)\ndefault:\n// Otherwise look at Argv and see if the first argument is a valid path.\nif len(args.Argv) == 0 {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_exec.go",
"new_path": "pkg/sentry/kernel/task_exec.go",
"diff": "@@ -68,7 +68,10 @@ import (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n\"gvisor.dev/gvisor/pkg/sentry/fs\"\n+ \"gvisor.dev/gvisor/pkg/sentry/fsbridge\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.dev/gvisor/pkg/sentry/mm\"\n+ \"gvisor.dev/gvisor/pkg/sentry/seccheck\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n)\n@@ -85,9 +88,21 @@ func (*execStop) Killable() bool { return true }\n// thread group and switching to newImage. Execve always takes ownership of\n// newImage.\n//\n+// If executable is not nil, it is the first executable file that was loaded in\n+// the process of obtaining newImage, and pathname is a path to it.\n+//\n// Preconditions: The caller must be running Task.doSyscallInvoke on the task\n// goroutine.\n-func (t *Task) Execve(newImage *TaskImage) (*SyscallControl, error) {\n+func (t *Task) Execve(newImage *TaskImage, argv, env []string, executable fsbridge.File, pathname string) (*SyscallControl, error) {\n+ // We can't clearly hold kernel package locks while stat'ing executable.\n+ if seccheck.Global.Enabled(seccheck.PointExecve) {\n+ mask, info := getExecveSeccheckInfo(t, argv, env, executable, pathname)\n+ if err := seccheck.Global.Execve(t, mask, &info); err != nil {\n+ newImage.release()\n+ return nil, err\n+ }\n+ }\n+\nt.tg.pidns.owner.mu.Lock()\ndefer t.tg.pidns.owner.mu.Unlock()\nt.tg.signalHandlers.mu.Lock()\n@@ -286,3 +301,54 @@ func (t *Task) promoteLocked() {\n}\noldLeader.exitNotifyLocked(false)\n}\n+\n+func getExecveSeccheckInfo(t *Task, argv, env []string, executable fsbridge.File, pathname string) (seccheck.ExecveFieldSet, seccheck.ExecveInfo) {\n+ req := seccheck.Global.ExecveReq()\n+ info := seccheck.ExecveInfo{\n+ Credentials: t.Credentials(),\n+ Argv: argv,\n+ Env: env,\n+ }\n+ var mask seccheck.ExecveFieldSet\n+ mask.Add(seccheck.ExecveFieldCredentials)\n+ mask.Add(seccheck.ExecveFieldArgv)\n+ mask.Add(seccheck.ExecveFieldEnv)\n+ if executable != nil {\n+ info.BinaryPath = pathname\n+ mask.Add(seccheck.ExecveFieldBinaryPath)\n+ if vfs2bridgeFile, ok := executable.(*fsbridge.VFSFile); ok {\n+ if req.Contains(seccheck.ExecveFieldBinaryMode) || req.Contains(seccheck.ExecveFieldBinaryUID) || req.Contains(seccheck.ExecveFieldBinaryGID) {\n+ var statOpts vfs.StatOptions\n+ if req.Contains(seccheck.ExecveFieldBinaryMode) {\n+ statOpts.Mask |= linux.STATX_TYPE | linux.STATX_MODE\n+ }\n+ if req.Contains(seccheck.ExecveFieldBinaryUID) {\n+ statOpts.Mask |= linux.STATX_UID\n+ }\n+ if req.Contains(seccheck.ExecveFieldBinaryGID) {\n+ statOpts.Mask |= linux.STATX_GID\n+ }\n+ if stat, err := vfs2bridgeFile.FileDescription().Stat(t, statOpts); err == nil {\n+ if stat.Mask&(linux.STATX_TYPE|linux.STATX_MODE) == (linux.STATX_TYPE | linux.STATX_MODE) {\n+ info.BinaryMode = stat.Mode\n+ mask.Add(seccheck.ExecveFieldBinaryMode)\n+ }\n+ if stat.Mask&linux.STATX_UID != 0 {\n+ info.BinaryUID = auth.KUID(stat.UID)\n+ mask.Add(seccheck.ExecveFieldBinaryUID)\n+ }\n+ if stat.Mask&linux.STATX_GID != 0 {\n+ info.BinaryGID = auth.KGID(stat.GID)\n+ mask.Add(seccheck.ExecveFieldBinaryGID)\n+ }\n+ }\n+ }\n+ // TODO(b/202293325): Decide if we actually want to offer binary\n+ // SHA256, which is very expensive.\n+ }\n+ }\n+ t.k.tasks.mu.RLock()\n+ defer t.k.tasks.mu.RUnlock()\n+ t.loadSeccheckInfoLocked(req.Invoker, &mask.Invoker, &info.Invoker)\n+ return mask, info\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_image.go",
"new_path": "pkg/sentry/kernel/task_image.go",
"diff": "@@ -137,11 +137,6 @@ func (t *Task) Stack() *arch.Stack {\n//\n// args.MemoryManager does not need to be set by the caller.\nfunc (k *Kernel) LoadTaskImage(ctx context.Context, args loader.LoadArgs) (*TaskImage, *syserr.Error) {\n- // If File is not nil, we should load that instead of resolving Filename.\n- if args.File != nil {\n- args.Filename = args.File.PathnameWithDeleted(ctx)\n- }\n-\n// Prepare a new user address space to load into.\nm := mm.NewMemoryManager(k, k, k.SleepForAddressSpaceActivation)\ndefer m.DecUsers(ctx)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/loader.go",
"new_path": "pkg/sentry/loader/loader.go",
"diff": "@@ -64,6 +64,10 @@ type LoadArgs struct {\n// Opener is used to open the executable file when 'File' is nil.\nOpener fsbridge.Lookup\n+ // If AfterOpen is not nil, it is called after every successful call to\n+ // Opener.OpenPath().\n+ AfterOpen func(f fsbridge.File)\n+\n// CloseOnExec indicates that the executable (or one of its parent\n// directories) was opened with O_CLOEXEC. If the executable is an\n// interpreter script, then cause an ENOENT error to occur, since the\n@@ -102,7 +106,14 @@ func openPath(ctx context.Context, args LoadArgs) (fsbridge.File, error) {\nFlags: linux.O_RDONLY,\nFileExec: true,\n}\n- return args.Opener.OpenPath(ctx, args.Filename, opts, args.RemainingTraversals, args.ResolveFinal)\n+ f, err := args.Opener.OpenPath(ctx, args.Filename, opts, args.RemainingTraversals, args.ResolveFinal)\n+ if err != nil {\n+ return f, err\n+ }\n+ if args.AfterOpen != nil {\n+ args.AfterOpen(f)\n+ }\n+ return f, nil\n}\n// checkIsRegularFile prevents us from trying to execute a directory, pipe, etc.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_thread.go",
"new_path": "pkg/sentry/syscalls/linux/sys_thread.go",
"diff": "@@ -144,6 +144,7 @@ func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr host\nreturn 0, nil, err\n}\nexecutable = fsbridge.NewFSFile(f)\n+ pathname = executable.PathnameWithDeleted(t)\n} else {\nwd = f.Dirent\nwd.IncRef()\n@@ -175,7 +176,7 @@ func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr host\nreturn 0, nil, se.ToError()\n}\n- ctrl, err := t.Execve(image)\n+ ctrl, err := t.Execve(image, argv, envv, nil, \"\")\nreturn 0, ctrl, err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/vfs2/BUILD",
"new_path": "pkg/sentry/syscalls/linux/vfs2/BUILD",
"diff": "@@ -68,6 +68,7 @@ go_library(\n\"//pkg/sentry/loader\",\n\"//pkg/sentry/memmap\",\n\"//pkg/sentry/mm\",\n+ \"//pkg/sentry/seccheck\",\n\"//pkg/sentry/socket\",\n\"//pkg/sentry/socket/control\",\n\"//pkg/sentry/socket/unix/transport\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/vfs2/execve.go",
"new_path": "pkg/sentry/syscalls/linux/vfs2/execve.go",
"diff": "@@ -23,6 +23,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/fsbridge\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/loader\"\n+ \"gvisor.dev/gvisor/pkg/sentry/seccheck\"\nslinux \"gvisor.dev/gvisor/pkg/sentry/syscalls/linux\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n)\n@@ -73,6 +74,11 @@ func execveat(t *kernel.Task, dirfd int32, pathnameAddr, argvAddr, envvAddr host\nroot := t.FSContext().RootDirectoryVFS2()\ndefer root.DecRef(t)\nvar executable fsbridge.File\n+ defer func() {\n+ if executable != nil {\n+ executable.DecRef(t)\n+ }\n+ }()\ncloseOnExec := false\nif path := fspath.Parse(pathname); dirfd != linux.AT_FDCWD && !path.Absolute {\n// We must open the executable ourselves since dirfd is used as the\n@@ -105,8 +111,8 @@ func execveat(t *kernel.Task, dirfd int32, pathnameAddr, argvAddr, envvAddr host\nif err != nil {\nreturn 0, nil, err\n}\n- defer file.DecRef(t)\nexecutable = fsbridge.NewVFSFile(file)\n+ pathname = executable.PathnameWithDeleted(t)\n}\n// Load the new TaskImage.\n@@ -125,12 +131,25 @@ func execveat(t *kernel.Task, dirfd int32, pathnameAddr, argvAddr, envvAddr host\nEnvv: envv,\nFeatures: t.Kernel().FeatureSet(),\n}\n+ if seccheck.Global.Enabled(seccheck.PointExecve) {\n+ // Retain the first executable file that is opened (which may open\n+ // multiple executable files while resolving interpreter scripts).\n+ if executable == nil {\n+ loadArgs.AfterOpen = func(f fsbridge.File) {\n+ if executable == nil {\n+ f.IncRef()\n+ executable = f\n+ pathname = executable.PathnameWithDeleted(t)\n+ }\n+ }\n+ }\n+ }\nimage, se := t.Kernel().LoadTaskImage(t, loadArgs)\nif se != nil {\nreturn 0, nil, se.ToError()\n}\n- ctrl, err := t.Execve(image)\n+ ctrl, err := t.Execve(image, argv, envv, executable, pathname)\nreturn 0, ctrl, err\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add caller for the Execve checkpoint.
Binary fields are VFS2-only.
PiperOrigin-RevId: 424176912 |
259,853 | 25.01.2022 16:28:20 | 28,800 | 18dca1bf998524f53ee03bc413382bfcc880f967 | kvm: fix a race condition between seccompMMapHandler and machine.Destroy
A machine file descriptor has to be closed only when we are sure that
it isn't used by seccompMMapHandler. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine.go",
"new_path": "pkg/sentry/platform/kvm/machine.go",
"diff": "@@ -369,10 +369,6 @@ func (m *machine) mapPhysical(physical, length uintptr, phyRegions []physicalReg\nfunc (m *machine) Destroy() {\nruntime.SetFinalizer(m, nil)\n- machinePoolMu.Lock()\n- machinePool[m.machinePoolIndex].Store(nil)\n- machinePoolMu.Unlock()\n-\n// Destroy vCPUs.\nfor _, c := range m.vCPUsByID {\nif c == nil {\n@@ -396,6 +392,9 @@ func (m *machine) Destroy() {\n}\n}\n+ machinePool[m.machinePoolIndex].Store(nil)\n+ seccompMmapSync()\n+\n// vCPUs are gone: teardown machine state.\nif err := unix.Close(m.fd); err != nil {\npanic(fmt.Sprintf(\"error closing VM fd: %v\", err))\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine_unsafe.go",
"new_path": "pkg/sentry/platform/kvm/machine_unsafe.go",
"diff": "@@ -24,6 +24,7 @@ package kvm\nimport (\n\"fmt\"\n\"math\"\n+ \"runtime\"\n\"sync/atomic\"\n\"unsafe\"\n@@ -172,6 +173,26 @@ func (c *vCPU) setSignalMask() error {\nreturn nil\n}\n+// seccompMmapHandlerCnt is a number of currently running seccompMmapHandler\n+// instances.\n+var seccompMmapHandlerCnt int64\n+\n+// seccompMmapSync waits for all currently runnuing seccompMmapHandler\n+// instances.\n+//\n+// The standard locking primitives can't be used in this case since\n+// seccompMmapHandler is executed in a signal handler context.\n+//\n+// It can be implemented by using FUTEX calls, but it will require to call\n+// FUTEX_WAKE from seccompMmapHandler. Consider machine.Destroy is called only\n+// once, and the probability is racing with seccompMmapHandler is very low the\n+// spinlock-like way looks more reasonable.\n+func seccompMmapSync() {\n+ for atomic.LoadInt64(&seccompMmapHandlerCnt) != 0 {\n+ runtime.Gosched()\n+ }\n+}\n+\n// seccompMmapHandler is a signal handler for runtime mmap system calls\n// that are trapped by seccomp.\n//\n@@ -185,6 +206,7 @@ func seccompMmapHandler(context unsafe.Pointer) {\nreturn\n}\n+ atomic.AddInt64(&seccompMmapHandlerCnt, 1)\nfor i := uint32(0); i < atomic.LoadUint32(&machinePoolLen); i++ {\nm := machinePool[i].Load()\nif m == nil {\n@@ -213,4 +235,5 @@ func seccompMmapHandler(context unsafe.Pointer) {\nvirtual += length\n}\n}\n+ atomic.AddInt64(&seccompMmapHandlerCnt, -1)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | kvm: fix a race condition between seccompMMapHandler and machine.Destroy
A machine file descriptor has to be closed only when we are sure that
it isn't used by seccompMMapHandler.
PiperOrigin-RevId: 424207803 |
259,853 | 25.01.2022 17:39:02 | 28,800 | e3c34d68d345d8142489de7e08e93b744627881d | Disable VMX and SVM in guest PCID.
The origin version converts KVM cpuid entries into cpuid.Static
to be able to modify it. But such conversion has side-effects
that we need to avoid and so it looks reasonable to modify KVM
cpuid entries directly. | [
{
"change_type": "MODIFY",
"old_path": "pkg/cpuid/features_amd64.go",
"new_path": "pkg/cpuid/features_amd64.go",
"diff": "@@ -32,17 +32,33 @@ func featureID(b block, bit int) Feature {\n}\n// block returns the block associated with the feature.\n-func (f *Feature) block() block {\n- return block((*f) / blockSize)\n+func (f Feature) block() block {\n+ return block(f / blockSize)\n}\n// Bit returns the bit associated with the feature.\n-func (f *Feature) bit() uint32 {\n- return uint32(1 << ((*f) % blockSize))\n+func (f Feature) bit() uint32 {\n+ return uint32(1 << (f % blockSize))\n+}\n+\n+// ChangeableSet is a feature set that can allows changes.\n+type ChangeableSet interface {\n+ Query(in In) Out\n+ Set(in In, out Out)\n+}\n+\n+// Set sets the given feature.\n+func (f Feature) Set(s ChangeableSet) {\n+ f.set(s, true)\n+}\n+\n+// Unset unsets the given feature.\n+func (f Feature) Unset(s ChangeableSet) {\n+ f.set(s, false)\n}\n// set sets the given feature.\n-func (f *Feature) set(s Static, on bool) {\n+func (f Feature) set(s ChangeableSet, on bool) {\nswitch f.block() {\ncase 0:\nout := s.Query(In{Eax: uint32(featureInfo)})\n@@ -51,7 +67,7 @@ func (f *Feature) set(s Static, on bool) {\n} else {\nout.Ecx &^= f.bit()\n}\n- s[In{Eax: uint32(featureInfo)}] = out\n+ s.Set(In{Eax: uint32(featureInfo)}, out)\ncase 1:\nout := s.Query(In{Eax: uint32(featureInfo)})\nif on {\n@@ -59,7 +75,7 @@ func (f *Feature) set(s Static, on bool) {\n} else {\nout.Edx &^= f.bit()\n}\n- s[In{Eax: uint32(featureInfo)}] = out\n+ s.Set(In{Eax: uint32(featureInfo)}, out)\ncase 2:\nout := s.Query(In{Eax: uint32(extendedFeatureInfo)})\nif on {\n@@ -67,7 +83,7 @@ func (f *Feature) set(s Static, on bool) {\n} else {\nout.Ebx &^= f.bit()\n}\n- s[In{Eax: uint32(extendedFeatureInfo)}] = out\n+ s.Set(In{Eax: uint32(extendedFeatureInfo)}, out)\ncase 3:\nout := s.Query(In{Eax: uint32(extendedFeatureInfo)})\nif on {\n@@ -75,12 +91,12 @@ func (f *Feature) set(s Static, on bool) {\n} else {\nout.Ecx &^= f.bit()\n}\n- s[In{Eax: uint32(extendedFeatureInfo)}] = out\n+ s.Set(In{Eax: uint32(extendedFeatureInfo)}, out)\ncase 4:\n// Need to turn on the bit in block 0.\nout := s.Query(In{Eax: uint32(featureInfo)})\nout.Ecx |= (1 << 26)\n- s[In{Eax: uint32(featureInfo)}] = out\n+ s.Set(In{Eax: uint32(featureInfo)}, out)\nout = s.Query(In{Eax: xSaveInfoSub.eax(), Ecx: xSaveInfoSub.ecx()})\nif on {\n@@ -88,14 +104,14 @@ func (f *Feature) set(s Static, on bool) {\n} else {\nout.Eax &^= f.bit()\n}\n- s[In{Eax: xSaveInfoSub.eax(), Ecx: xSaveInfoSub.ecx()}] = out\n+ s.Set(In{Eax: xSaveInfoSub.eax(), Ecx: xSaveInfoSub.ecx()}, out)\ncase 5, 6:\n// Need to enable extended features.\nout := s.Query(In{Eax: uint32(extendedFunctionInfo)})\nif out.Eax < uint32(extendedFeatures) {\nout.Eax = uint32(extendedFeatures)\n}\n- s[In{Eax: uint32(extendedFunctionInfo)}] = out\n+ s.Set(In{Eax: uint32(extendedFunctionInfo)}, out)\nout = s.Query(In{Eax: uint32(extendedFeatures)})\nif f.block() == 5 {\nif on {\n@@ -110,14 +126,14 @@ func (f *Feature) set(s Static, on bool) {\nout.Edx &^= f.bit()\n}\n}\n- s[In{Eax: uint32(extendedFeatures)}] = out\n+ s.Set(In{Eax: uint32(extendedFeatures)}, out)\n}\n}\n// check checks for the given feature.\n//\n//go:nosplit\n-func (f *Feature) check(fs FeatureSet) bool {\n+func (f Feature) check(fs FeatureSet) bool {\nswitch f.block() {\ncase 0:\n_, _, cx, _ := fs.query(featureInfo)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/cpuid/static_amd64.go",
"new_path": "pkg/cpuid/static_amd64.go",
"diff": "@@ -118,6 +118,11 @@ func (s Static) Remove(feature Feature) Static {\nreturn s\n}\n+// Set implements ChangeableSet.Set.\n+func (s Static) Set(in In, out Out) {\n+ s[in] = out\n+}\n+\n// Query implements Function.Query.\nfunc (s Static) Query(in In) Out {\nin.normalize()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/kvm_amd64.go",
"new_path": "pkg/sentry/platform/kvm/kvm_amd64.go",
"diff": "@@ -196,17 +196,44 @@ func (c *cpuidEntries) Query(in cpuid.In) (out cpuid.Out) {\nreturn\n}\n+// Set implements cpuid.ChangeableSet.Set.\n+func (c *cpuidEntries) Set(in cpuid.In, out cpuid.Out) {\n+ i := 0\n+ for ; i < int(c.nr); i++ {\n+ if c.entries[i].function == in.Eax && c.entries[i].index == in.Ecx {\n+ break\n+ }\n+ }\n+ if i == _KVM_NR_CPUID_ENTRIES {\n+ panic(\"exceede KVM_NR_CPUID_ENTRIES\")\n+ }\n+\n+ c.entries[i].eax = out.Eax\n+ c.entries[i].ebx = out.Ebx\n+ c.entries[i].ecx = out.Ecx\n+ c.entries[i].edx = out.Edx\n+ if i == int(c.nr) {\n+ c.nr++\n+ }\n+}\n+\n// updateGlobalOnce does global initialization. It has to be called only once.\nfunc updateGlobalOnce(fd int) error {\nerr := updateSystemValues(int(fd))\n- // Create a static feature set from the KVM entries. Then, we\n- // explicitly set OSXSAVE, since this does not come in the feature\n- // entries, but can be provided when the relevant CR4 bit is set.\nfs := cpuid.FeatureSet{\nFunction: &cpuidSupported,\n}\n- s := fs.ToStatic()\n- s.Add(cpuid.X86FeatureOSXSAVE)\n+ // Calculate whether guestPCID is supported.\n+ hasGuestPCID = fs.HasFeature(cpuid.X86FeaturePCID)\n+ // Create a static feature set from the KVM entries. Then, we\n+ // explicitly set OSXSAVE, since this does not come in the feature\n+ // entries, but can be provided when the relevant CR4 bit is set.\n+ s := &cpuidSupported\n+ cpuid.X86FeatureOSXSAVE.Set(s)\n+ // Explicitly disable nested virtualization. Since we don't provide\n+ // any virtualization APIs, there is no need to enable this feature.\n+ cpuid.X86FeatureVMX.Unset(s)\n+ cpuid.X86FeatureSVM.Unset(s)\nring0.Init(cpuid.FeatureSet{\nFunction: s,\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/kvm_amd64_unsafe.go",
"new_path": "pkg/sentry/platform/kvm/kvm_amd64_unsafe.go",
"diff": "@@ -62,18 +62,6 @@ func updateSystemValues(fd int) error {\nreturn fmt.Errorf(\"getting supported CPUID (2nd attempt): %v\", errno)\n}\n- // Calculate whether guestPCID is supported.\n- //\n- // FIXME(ascannell): These should go through the much more pleasant\n- // cpuid package interfaces, once a way to accept raw kvm CPUID entries\n- // is plumbed (or some rough equivalent).\n- for i := 0; i < int(cpuidSupported.nr); i++ {\n- entry := cpuidSupported.entries[i]\n- if entry.function == 1 && entry.index == 0 && entry.ecx&(1<<17) != 0 {\n- hasGuestPCID = true // Found matching PCID in guest feature set.\n- }\n- }\n-\n// Success.\nreturn nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Disable VMX and SVM in guest PCID.
The origin version converts KVM cpuid entries into cpuid.Static
to be able to modify it. But such conversion has side-effects
that we need to avoid and so it looks reasonable to modify KVM
cpuid entries directly.
PiperOrigin-RevId: 424221630 |
259,962 | 25.01.2022 22:02:35 | 28,800 | c89b6d26118c3c20d1a2c5ea75584e78288ee117 | Add leak checking to tcp tests. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/BUILD",
"new_path": "pkg/tcpip/transport/tcp/BUILD",
"diff": "@@ -82,11 +82,14 @@ go_test(\nname = \"tcp_test\",\nsize = \"small\",\nsrcs = [\n+ \"main_test.go\",\n\"segment_test.go\",\n\"timer_test.go\",\n],\nlibrary = \":tcp\",\ndeps = [\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/sleep\",\n\"//pkg/tcpip/buffer\",\n\"//pkg/tcpip/faketime\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/transport/tcp/main_test.go",
"diff": "+// Copyright 2022 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package tcp\n+\n+import (\n+ \"os\"\n+ \"testing\"\n+\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n+)\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/test/e2e/BUILD",
"new_path": "pkg/tcpip/transport/tcp/test/e2e/BUILD",
"diff": "@@ -10,6 +10,8 @@ go_test(\ndeps = [\n\":e2e\",\n\"//pkg/rand\",\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/sync\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/checker\",\n@@ -36,6 +38,8 @@ go_test(\nsrcs = [\"dual_stack_test.go\"],\ndeps = [\n\":e2e\",\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/checker\",\n\"//pkg/tcpip/header\",\n@@ -70,6 +74,8 @@ go_test(\nsrcs = [\"forwarder_test.go\"],\ndeps = [\n\":e2e\",\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/header\",\n\"//pkg/tcpip/transport/tcp\",\n@@ -92,6 +98,8 @@ go_test(\nsize = \"small\",\nsrcs = [\"sack_scoreboard_test.go\"],\ndeps = [\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip/header\",\n\"//pkg/tcpip/seqnum\",\n\"//pkg/tcpip/transport/tcp\",\n@@ -104,6 +112,8 @@ go_test(\nsrcs = [\"tcp_timestamp_test.go\"],\ndeps = [\n\":e2e\",\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/checker\",\n\"//pkg/tcpip/header\",\n@@ -120,6 +130,8 @@ go_test(\nsrcs = [\"tcp_rack_test.go\"],\ndeps = [\n\":e2e\",\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/header\",\n\"//pkg/tcpip/seqnum\",\n@@ -135,6 +147,8 @@ go_test(\nsrcs = [\"tcp_sack_test.go\"],\ndeps = [\n\":e2e\",\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/checker\",\n\"//pkg/tcpip/header\",\n@@ -152,6 +166,8 @@ go_test(\nsrcs = [\"tcp_noracedetector_test.go\"],\ndeps = [\n\":e2e\",\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/header\",\n\"//pkg/tcpip/transport/tcp\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/test/e2e/dual_stack_test.go",
"new_path": "pkg/tcpip/transport/tcp/test/e2e/dual_stack_test.go",
"diff": "package dual_stack_test\nimport (\n+ \"os\"\n\"strings\"\n\"testing\"\n\"time\"\n\"github.com/google/go-cmp/cmp\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/checker\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n@@ -545,3 +548,13 @@ func TestV4ListenCloseOnV4(t *testing.T) {\n// Test acceptance.\ntestV4ListenClose(t, c)\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ // Allow TCP async work to complete to avoid false reports of leaks.\n+ // TODO(gvisor.dev/issue/5940): Use fake clock in tests.\n+ time.Sleep(1 * time.Second)\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/test/e2e/forwarder_test.go",
"new_path": "pkg/tcpip/transport/tcp/test/e2e/forwarder_test.go",
"diff": "package forwarder_test\nimport (\n+ \"os\"\n\"testing\"\n\"time\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n\"gvisor.dev/gvisor/pkg/tcpip/transport/tcp\"\n@@ -100,3 +103,13 @@ func TestForwarderDoesNotRejectECNFlags(t *testing.T) {\n})\n}\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ // Allow TCP async work to complete to avoid false reports of leaks.\n+ // TODO(gvisor.dev/issue/5940): Use fake clock in tests.\n+ time.Sleep(1 * time.Second)\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/test/e2e/sack_scoreboard_test.go",
"new_path": "pkg/tcpip/transport/tcp/test/e2e/sack_scoreboard_test.go",
"diff": "package sack_scoreboard_test\nimport (\n+ \"os\"\n\"testing\"\n+ \"time\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n\"gvisor.dev/gvisor/pkg/tcpip/seqnum\"\n\"gvisor.dev/gvisor/pkg/tcpip/transport/tcp\"\n@@ -247,3 +251,13 @@ func TestSACKScoreboardDelete(t *testing.T) {\nt.Fatalf(\"incorrect sacked bytes in scoreboard got: %v, want: %v\", got, want)\n}\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ // Allow TCP async work to complete to avoid false reports of leaks.\n+ // TODO(gvisor.dev/issue/5940): Use fake clock in tests.\n+ time.Sleep(1 * time.Second)\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/test/e2e/tcp_noracedetector_test.go",
"new_path": "pkg/tcpip/transport/tcp/test/e2e/tcp_noracedetector_test.go",
"diff": "@@ -25,9 +25,12 @@ import (\n\"bytes\"\n\"fmt\"\n\"math\"\n+ \"os\"\n\"testing\"\n\"time\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n\"gvisor.dev/gvisor/pkg/tcpip/transport/tcp\"\n@@ -558,3 +561,13 @@ func TestRetransmit(t *testing.T) {\nc.CheckNoPacketTimeout(\"More packets received than expected for this cwnd.\", 50*time.Millisecond)\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ // Allow TCP async work to complete to avoid false reports of leaks.\n+ // TODO(gvisor.dev/issue/5940): Use fake clock in tests.\n+ time.Sleep(1 * time.Second)\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/test/e2e/tcp_rack_test.go",
"new_path": "pkg/tcpip/transport/tcp/test/e2e/tcp_rack_test.go",
"diff": "@@ -17,9 +17,12 @@ package tcp_rack_test\nimport (\n\"bytes\"\n\"fmt\"\n+ \"os\"\n\"testing\"\n\"time\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n\"gvisor.dev/gvisor/pkg/tcpip/seqnum\"\n@@ -1061,3 +1064,13 @@ func TestRACKWithWindowFull(t *testing.T) {\n// No packet should be received as the receive window size is zero.\nc.CheckNoPacket(\"unexpected packet received after userTimeout has expired\")\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ // Allow TCP async work to complete to avoid false reports of leaks.\n+ // TODO(gvisor.dev/issue/5940): Use fake clock in tests.\n+ time.Sleep(1 * time.Second)\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/test/e2e/tcp_sack_test.go",
"new_path": "pkg/tcpip/transport/tcp/test/e2e/tcp_sack_test.go",
"diff": "@@ -18,10 +18,13 @@ import (\n\"bytes\"\n\"fmt\"\n\"log\"\n+ \"os\"\n\"reflect\"\n\"testing\"\n\"time\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/checker\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n@@ -942,3 +945,13 @@ func TestNoSpuriousRecoveryWithDSACK(t *testing.T) {\nverifySpuriousRecoveryMetric(t, c, 0 /* numSpuriousRecovery */, 0 /* numSpuriousRTO */)\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ // Allow TCP async work to complete to avoid false reports of leaks.\n+ // TODO(gvisor.dev/issue/5940): Use fake clock in tests.\n+ time.Sleep(1 * time.Second)\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/test/e2e/tcp_test.go",
"new_path": "pkg/tcpip/transport/tcp/test/e2e/tcp_test.go",
"diff": "@@ -19,12 +19,15 @@ import (\n\"fmt\"\n\"io/ioutil\"\n\"math\"\n+ \"os\"\n\"strings\"\n\"testing\"\n\"time\"\n\"github.com/google/go-cmp/cmp\"\n\"gvisor.dev/gvisor/pkg/rand\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/checker\"\n@@ -8623,3 +8626,13 @@ func TestECNFlagsAccept(t *testing.T) {\n})\n}\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ // Allow TCP async work to complete to avoid false reports of leaks.\n+ // TODO(gvisor.dev/issue/5940): Use fake clock in tests.\n+ time.Sleep(1 * time.Second)\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/test/e2e/tcp_timestamp_test.go",
"new_path": "pkg/tcpip/transport/tcp/test/e2e/tcp_timestamp_test.go",
"diff": "@@ -17,10 +17,13 @@ package tcp_timestamp_test\nimport (\n\"bytes\"\n\"math/rand\"\n+ \"os\"\n\"testing\"\n\"time\"\n\"github.com/google/go-cmp/cmp\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/checker\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n@@ -310,3 +313,13 @@ func TestSegmentNotDroppedWhenTimestampMissing(t *testing.T) {\nt.Fatalf(\"Data is different: got: %v, want: %v\", got, want)\n}\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ // Allow TCP async work to complete to avoid false reports of leaks.\n+ // TODO(gvisor.dev/issue/5940): Use fake clock in tests.\n+ time.Sleep(1 * time.Second)\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/testing/context/context.go",
"new_path": "pkg/tcpip/transport/tcp/testing/context/context.go",
"diff": "@@ -499,6 +499,7 @@ func (c *Context) SendPacketWithAddrs(payload []byte, h *Headers, src, dst tcpip\npkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: c.BuildSegmentWithAddrs(payload, h, src, dst),\n})\n+ defer pkt.DecRef()\nc.linkEP.InjectInbound(ipv4.ProtocolNumber, pkt)\n}\n@@ -672,6 +673,7 @@ func (c *Context) SendV6PacketWithAddrs(payload []byte, h *Headers, src, dst tcp\npkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nData: buf.ToVectorisedView(),\n})\n+ defer pkt.DecRef()\nc.linkEP.InjectInbound(ipv6.ProtocolNumber, pkt)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add leak checking to tcp tests.
PiperOrigin-RevId: 424258607 |
259,962 | 25.01.2022 23:14:16 | 28,800 | ee08e56d4ce6ca0dfa23f75b6a10800b15c35ea4 | Add leak checking to transport internal network tests. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/internal/network/BUILD",
"new_path": "pkg/tcpip/transport/internal/network/BUILD",
"diff": "@@ -28,6 +28,8 @@ go_test(\nsrcs = [\"endpoint_test.go\"],\ndeps = [\n\":network\",\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/buffer\",\n\"//pkg/tcpip/checker\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/internal/network/endpoint_test.go",
"new_path": "pkg/tcpip/transport/internal/network/endpoint_test.go",
"diff": "@@ -16,9 +16,12 @@ package network_test\nimport (\n\"fmt\"\n+ \"os\"\n\"testing\"\n\"github.com/google/go-cmp/cmp\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/checker\"\n@@ -198,10 +201,12 @@ func TestEndpointStateTransitions(t *testing.T) {\n}, info); diff != \"\" {\nt.Errorf(\"write packet info mismatch (-want +got):\\n%s\", diff)\n}\n- if err := ctx.WritePacket(stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ injectPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\nReserveHeaderBytes: int(info.MaxHeaderLength),\nData: data.ToVectorisedView(),\n- }), false /* headerIncluded */); err != nil {\n+ })\n+ defer injectPkt.DecRef()\n+ if err := ctx.WritePacket(injectPkt, false /* headerIncluded */); err != nil {\nt.Fatalf(\"ctx.WritePacket(_, false): %s\", err)\n}\nif pkt := e.Read(); pkt == nil {\n@@ -316,3 +321,10 @@ func TestBindNICID(t *testing.T) {\n})\n}\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add leak checking to transport internal network tests.
PiperOrigin-RevId: 424267559 |
259,853 | 25.01.2022 23:14:19 | 28,800 | daea5b76742c7f999c096fa1009dd46b71e53cbb | kvm: Check that VMX and SVM are disabled in guest PCID | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/BUILD",
"new_path": "pkg/sentry/platform/kvm/BUILD",
"diff": "@@ -131,6 +131,7 @@ go_test(\n],\ndeps = [\n\"//pkg/abi/linux\",\n+ \"//pkg/cpuid\",\n\"//pkg/hostarch\",\n\"//pkg/memutil\",\n\"//pkg/ring0\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/kvm_amd64_test.go",
"new_path": "pkg/sentry/platform/kvm/kvm_amd64_test.go",
"diff": "@@ -21,6 +21,7 @@ import (\n\"testing\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/cpuid\"\n\"gvisor.dev/gvisor/pkg/ring0\"\n\"gvisor.dev/gvisor/pkg/ring0/pagetables\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n@@ -88,3 +89,19 @@ func TestMXCSR(t *testing.T) {\nreturn false\n})\n}\n+\n+//go:nosplit\n+func nestedVirtIsOn(c *vCPU, fs *cpuid.FeatureSet) bool {\n+ bluepill(c)\n+ return fs.HasFeature(cpuid.X86FeatureVMX) || fs.HasFeature(cpuid.X86FeatureSVM)\n+\n+}\n+\n+func TestKernelCPUID(t *testing.T) {\n+ bluepillTest(t, func(c *vCPU) {\n+ fs := cpuid.HostFeatureSet()\n+ if nestedVirtIsOn(c, &fs) {\n+ t.Fatalf(\"Nested virtualization is enabled\")\n+ }\n+ })\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | kvm: Check that VMX and SVM are disabled in guest PCID
PiperOrigin-RevId: 424267563 |
259,962 | 26.01.2022 09:55:27 | 28,800 | b57e94c30307300263820b0ed49bbb546984a0bd | Enable leak checker for tcp_conntrack tests. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcpconntrack/BUILD",
"new_path": "pkg/tcpip/transport/tcpconntrack/BUILD",
"diff": "@@ -18,6 +18,8 @@ go_test(\nsrcs = [\"tcp_conntrack_test.go\"],\ndeps = [\n\":tcpconntrack\",\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/tcpip/header\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcpconntrack/tcp_conntrack_test.go",
"new_path": "pkg/tcpip/transport/tcpconntrack/tcp_conntrack_test.go",
"diff": "package tcpconntrack_test\nimport (\n+ \"os\"\n\"testing\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n\"gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack\"\n)\n@@ -515,3 +518,10 @@ func TestIgnoreBadResetOnSynSent(t *testing.T) {\nfunc dataLen(tcp header.TCP) int {\nreturn len(tcp) - int(tcp.DataOffset())\n}\n+\n+func TestMain(m *testing.M) {\n+ refs.SetLeakMode(refs.LeaksPanic)\n+ code := m.Run()\n+ refsvfs2.DoLeakCheck()\n+ os.Exit(code)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enable leak checker for tcp_conntrack tests.
PiperOrigin-RevId: 424374964 |
259,858 | 26.01.2022 11:05:19 | 28,800 | c18ec0b53cf2ef47ae5660bd2ae392c48047229f | Fix race build error.
This adds a test to smoke-tests to ensure that the race build does not
break again. In debugging this issue, a race in the nogo tool itself
was discovered, and a related fix is included. | [
{
"change_type": "MODIFY",
"old_path": "Makefile",
"new_path": "Makefile",
"diff": "@@ -187,6 +187,7 @@ debian: ## Builds the debian packages.\nsmoke-tests: ## Runs a simple smoke test after build runsc.\n@$(call run,//runsc,--alsologtostderr --network none --debug --TESTONLY-unsafe-nonroot=true --rootless do true)\n+ @$(call run,$(RACE_FLAGS) //runsc:runsc-race,--alsologtostderr --network none --debug --TESTONLY-unsafe-nonroot=true --rootless do true)\n.PHONY: smoke-tests\nnogo-tests:\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/bazel.mk",
"new_path": "tools/bazel.mk",
"diff": "@@ -40,6 +40,7 @@ BRANCH_NAME := $(shell (git branch --show-current 2>/dev/null || \\\ngit rev-parse --abbrev-ref HEAD 2>/dev/null) | \\\nxargs -n 1 basename 2>/dev/null)\nBUILD_ROOTS := bazel-bin/ bazel-out/\n+RACE_FLAGS := --@io_bazel_rules_go//go/config:race\n# Bazel container configuration (see below).\nUSER := $(shell whoami)\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/bazeldefs/defs.bzl",
"new_path": "tools/bazeldefs/defs.bzl",
"diff": "@@ -34,6 +34,7 @@ def select_system(linux = [\"__linux__\"], darwin = [], **kwargs):\n})\narch_config = [\n+ \"@io_bazel_rules_go//go/config:race\",\n\"//command_line_option:cpu\",\n\"//command_line_option:crosstool_top\",\n\"//command_line_option:platforms\",\n@@ -41,6 +42,9 @@ arch_config = [\ndef arm64_config(settings, attr):\nreturn {\n+ # Race builds are always disabled for cross-architecture generation. We\n+ # can't run it locally anyways, what value can this provide?\n+ \"@io_bazel_rules_go//go/config:race\": False,\n\"//command_line_option:cpu\": \"aarch64\",\n\"//command_line_option:crosstool_top\": \"@crosstool//:toolchains\",\n\"//command_line_option:platforms\": \"@io_bazel_rules_go//go/toolchain:linux_arm64\",\n@@ -48,6 +52,8 @@ def arm64_config(settings, attr):\ndef amd64_config(settings, attr):\nreturn {\n+ # See above.\n+ \"@io_bazel_rules_go//go/config:race\": False,\n\"//command_line_option:cpu\": \"k8\",\n\"//command_line_option:crosstool_top\": \"@crosstool//:toolchains\",\n\"//command_line_option:platforms\": \"@io_bazel_rules_go//go/toolchain:linux_amd64\",\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/nogo/defs.bzl",
"new_path": "tools/nogo/defs.bzl",
"diff": "@@ -112,7 +112,7 @@ nogo_stdlib = go_rule(\nattrs = {\n\"_nogo\": attr.label(\ndefault = \"//tools/nogo:nogo\",\n- cfg = \"host\",\n+ cfg = \"exec\",\n),\n\"_target\": attr.label(\ndefault = \"//tools/nogo:target\",\n@@ -319,7 +319,7 @@ nogo_aspect = go_rule(\nattrs = {\n\"_nogo\": attr.label(\ndefault = \"//tools/nogo:nogo\",\n- cfg = \"host\",\n+ cfg = \"exec\",\n),\n\"_target\": attr.label(\ndefault = \"//tools/nogo:target\",\n@@ -329,7 +329,7 @@ nogo_aspect = go_rule(\n# appears to be reserved for some internal bazel use.\n\"_nogo_stdlib\": attr.label(\ndefault = \"//tools/nogo:stdlib\",\n- cfg = \"host\",\n+ cfg = \"target\",\n),\n},\n)\n@@ -411,7 +411,7 @@ nogo_test = rule(\n),\n\"_nogo\": attr.label(\ndefault = \"//tools/nogo:nogo\",\n- cfg = \"host\",\n+ cfg = \"exec\",\n),\n\"_target\": attr.label(\ndefault = \"//tools/nogo:target\",\n@@ -493,12 +493,12 @@ nogo_facts = go_rule(\n),\n\"_nogo\": attr.label(\ndefault = \"//tools/nogo:nogo\",\n- cfg = \"host\",\n+ cfg = \"exec\",\n),\n# See _nogo_aspect, above.\n\"_nogo_stdlib\": attr.label(\ndefault = \"//tools/nogo:stdlib\",\n- cfg = \"host\",\n+ cfg = \"target\",\n),\n\"_target\": attr.label(\ndefault = \"//tools/nogo:target\",\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/worker/worker.go",
"new_path": "tools/worker/worker.go",
"diff": "@@ -32,7 +32,6 @@ import (\n\"sort\"\n\"strings\"\n\"sync\"\n- \"time\"\n_ \"net/http/pprof\" // For profiling.\n@@ -243,6 +242,19 @@ func allCacheStats() string {\nreturn sb.String()\n}\n+// safeBuffer is a trivial wrapper around bytes.Buffer.\n+type safeBuffer struct {\n+ mu sync.Mutex\n+ bytes.Buffer\n+}\n+\n+// Write implements io.Writer.Write.\n+func (s *safeBuffer) Write(p []byte) (int, error) {\n+ s.mu.Lock()\n+ defer s.mu.Unlock()\n+ return s.Buffer.Write(p)\n+}\n+\n// Work invokes the main function.\nfunc Work(run func([]string) int) {\nflag.CommandLine.Parse(os.Args[1:])\n@@ -298,8 +310,6 @@ func Work(run func([]string) int) {\nlog.Fatalf(\"unable to move stdout: %v\", err)\n}\n}\n-\n- // Best-effort: collect logs.\nrPipe, wPipe, err := os.Pipe()\nif err != nil {\nlog.Fatalf(\"unable to create pipe: %v\", err)\n@@ -310,8 +320,8 @@ func Work(run func([]string) int) {\nif err := unix.Dup2(int(wPipe.Fd()), 2); err != nil {\nlog.Fatalf(\"error duping over stderr: %v\", err)\n}\n- wPipe.Close()\n- defer rPipe.Close()\n+ wPipe.Close() // Still open at stdout, stderr.\n+ rPipe.Close() // Read end of pipe is now closed.\n// Read requests from stdin.\ninput := bufio.NewReader(os.NewFile(0, \"input\"))\n@@ -348,30 +358,16 @@ func Work(run func([]string) int) {\n}\n// Prepare logging.\n- outputBuffer := bytes.NewBuffer(nil)\n+ var outputBuffer safeBuffer\noutputBuffer.WriteString(listenHeader)\n- log.SetOutput(outputBuffer)\n+ log.SetOutput(&outputBuffer)\n// Parse all arguments.\nflag.CommandLine.Parse(wreq.GetArguments())\n- var exitCode int\n- exitChan := make(chan int)\n- go func() { exitChan <- run(flag.CommandLine.Args()) }()\n- for running := true; running; {\n- select {\n- case exitCode = <-exitChan:\n- running = false\n- default:\n- }\n- // N.B. rPipe is given a read deadline of 1ms. We expect\n- // this to turn a copy error after 1ms, and we just keep\n- // flushing this buffer while the task is running.\n- rPipe.SetReadDeadline(time.Now().Add(time.Millisecond))\n- outputBuffer.ReadFrom(rPipe)\n- }\n+ exitCode := run(flag.CommandLine.Args())\n- if *workerDebug {\n// Attach all cache stats.\n+ if *workerDebug {\noutputBuffer.WriteString(allCacheStats())\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix race build error.
This adds a test to smoke-tests to ensure that the race build does not
break again. In debugging this issue, a race in the nogo tool itself
was discovered, and a related fix is included.
PiperOrigin-RevId: 424393624 |
260,004 | 26.01.2022 14:27:06 | 28,800 | ad021f48c08e78a2dbd2223525fbb80990cfaf0f | Add link-layer headers in nic
This removes the need for the stack to add a link header out-of-line the
write path when delivering outbound packets to a packet socket. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/ethernet/ethernet.go",
"new_path": "pkg/tcpip/link/ethernet/ethernet.go",
"diff": "@@ -79,17 +79,6 @@ func (e *Endpoint) Capabilities() stack.LinkEndpointCapabilities {\nreturn c\n}\n-// WritePackets implements stack.LinkEndpoint.\n-func (e *Endpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) {\n- linkAddr := e.LinkAddress()\n-\n- for pkt := pkts.Front(); pkt != nil; pkt = pkt.Next() {\n- e.AddHeader(linkAddr, pkt.EgressRoute.RemoteLinkAddress, pkt.NetworkProtocolNumber, pkt)\n- }\n-\n- return e.Endpoint.WritePackets(pkts)\n-}\n-\n// MaxHeaderLength implements stack.LinkEndpoint.\nfunc (e *Endpoint) MaxHeaderLength() uint16 {\nreturn header.EthernetMinimumSize + e.Endpoint.MaxHeaderLength()\n@@ -113,8 +102,3 @@ func (*Endpoint) AddHeader(local, remote tcpip.LinkAddress, proto tcpip.NetworkP\n}\neth.Encode(&fields)\n}\n-\n-// WriteRawPacket implements stack.LinkEndpoint.\n-func (e *Endpoint) WriteRawPacket(pkt *stack.PacketBuffer) tcpip.Error {\n- return e.Endpoint.WriteRawPacket(pkt)\n-}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/ethernet/ethernet_test.go",
"new_path": "pkg/tcpip/link/ethernet/ethernet_test.go",
"diff": "@@ -120,32 +120,24 @@ func TestMTU(t *testing.T) {\n}\n}\n-func TestWritePacketsAddHeader(t *testing.T) {\n+func TestWritePacketToRemoteAddHeader(t *testing.T) {\nconst (\nlocalLinkAddr = tcpip.LinkAddress(\"\\x02\\x02\\x03\\x04\\x05\\x06\")\nremoteLinkAddr = tcpip.LinkAddress(\"\\x02\\x02\\x03\\x04\\x05\\x07\")\nnetProto = 55\n+ nicID = 1\n)\nc := channel.New(1, header.EthernetMinimumSize, localLinkAddr)\n- e := ethernet.New(c)\n- {\n- pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n- ReserveHeaderBytes: int(e.MaxHeaderLength()),\n- })\n- defer pkt.DecRef()\n- pkt.NetworkProtocolNumber = netProto\n- pkt.EgressRoute.RemoteLinkAddress = remoteLinkAddr\n-\n- var pkts stack.PacketBufferList\n- pkts.PushFront(pkt)\n- if n, err := e.WritePackets(pkts); err != nil {\n- t.Fatalf(\"e.WritePackets(_): %s\", err)\n- } else if n != 1 {\n- t.Fatalf(\"got e.WritePackets(_) = %d, want = 1\", n)\n+ s := stack.New(stack.Options{})\n+ if err := s.CreateNIC(nicID, ethernet.New(c)); err != nil {\n+ t.Fatalf(\"s.CreateNIC(%d, _): %s\", nicID, err)\n}\n+\n+ if err := s.WritePacketToRemote(nicID, remoteLinkAddr, netProto, buffer.VectorisedView{}); err != nil {\n+ t.Fatalf(\"s.WritePacketToRemote(%d, %s, _): %s\", nicID, remoteLinkAddr, err)\n}\n{\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/fdbased/endpoint.go",
"new_path": "pkg/tcpip/link/fdbased/endpoint.go",
"diff": "@@ -510,11 +510,7 @@ func (*endpoint) WriteRawPacket(*stack.PacketBuffer) tcpip.Error { return &tcpip\n// writePacket writes outbound packets to the file descriptor. If it is not\n// currently writable, the packet is dropped.\n-func (e *endpoint) writePacket(r stack.RouteInfo, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) tcpip.Error {\n- if e.hdrSize > 0 {\n- e.AddHeader(r.LocalLinkAddress, r.RemoteLinkAddress, protocol, pkt)\n- }\n-\n+func (e *endpoint) writePacket(pkt *stack.PacketBuffer) tcpip.Error {\nfd := e.fds[pkt.Hash%uint32(len(e.fds))]\nvar vnetHdrBuf []byte\nif e.gsoKind == stack.HWGSOSupported {\n@@ -572,10 +568,6 @@ func (e *endpoint) sendBatch(batchFD int, pkts []*stack.PacketBuffer) (int, tcpi\nbatch := pkts[packets:]\nsyscallHeaderBytes := uintptr(0)\nfor _, pkt := range batch {\n- if e.hdrSize > 0 {\n- e.AddHeader(pkt.EgressRoute.LocalLinkAddress, pkt.EgressRoute.RemoteLinkAddress, pkt.NetworkProtocolNumber, pkt)\n- }\n-\nvar vnetHdrBuf []byte\nif e.gsoKind == stack.HWGSOSupported {\nvnetHdr := virtioNetHdr{}\n@@ -641,7 +633,7 @@ func (e *endpoint) sendBatch(batchFD int, pkts []*stack.PacketBuffer) (int, tcpi\n// if necessary (by using e.writevMaxIovs instead of\n// rawfile.MaxIovs).\npkt := batch[0]\n- if err := e.writePacket(pkt.EgressRoute, pkt.NetworkProtocolNumber, pkt); err != nil {\n+ if err := e.writePacket(pkt); err != nil {\nreturn packets, err\n}\npackets++\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/fdbased/endpoint_test.go",
"new_path": "pkg/tcpip/link/fdbased/endpoint_test.go",
"diff": "@@ -181,9 +181,6 @@ func testWritePacket(t *testing.T, plen int, eth bool, gsoMaxSize uint32, hash u\nc := newContext(t, &Options{Address: laddr, MTU: mtu, EthernetHeader: eth, GSOMaxSize: gsoMaxSize})\ndefer c.cleanup()\n- var r stack.RouteInfo\n- r.RemoteLinkAddress = raddr\n-\n// Build payload.\npayload := buffer.NewView(plen)\nif _, err := rand.Read(payload); err != nil {\n@@ -199,7 +196,8 @@ func testWritePacket(t *testing.T, plen int, eth bool, gsoMaxSize uint32, hash u\npkt.Hash = hash\n// Every PacketBuffer must have these set:\n// See nic.writePacket.\n- pkt.EgressRoute = r\n+ pkt.EgressRoute.LocalLinkAddress = laddr\n+ pkt.EgressRoute.RemoteLinkAddress = raddr\npkt.NetworkProtocolNumber = proto\ndefer pkt.DecRef()\n@@ -221,6 +219,9 @@ func testWritePacket(t *testing.T, plen int, eth bool, gsoMaxSize uint32, hash u\nL3HdrLen: l3HdrLen,\n}\n}\n+\n+ c.ep.AddHeader(pkt.EgressRoute.LocalLinkAddress, pkt.EgressRoute.RemoteLinkAddress, pkt.NetworkProtocolNumber, pkt)\n+\nvar pkts stack.PacketBufferList\npkts.PushBack(pkt)\nif _, err := c.ep.WritePackets(pkts); err != nil {\n@@ -326,11 +327,6 @@ func TestPreserveSrcAddress(t *testing.T) {\nc := newContext(t, &Options{Address: laddr, MTU: mtu, EthernetHeader: true})\ndefer c.cleanup()\n- // Set LocalLinkAddress in route to the value of the bridged address.\n- var r stack.RouteInfo\n- r.LocalLinkAddress = baddr\n- r.RemoteLinkAddress = raddr\n-\npkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n// WritePacket panics given a prependable with anything less than\n// the minimum size of the ethernet header.\n@@ -342,7 +338,11 @@ func TestPreserveSrcAddress(t *testing.T) {\n// Every PacketBuffer must have these set:\n// See nic.writePacket.\npkt.NetworkProtocolNumber = proto\n- pkt.EgressRoute = r\n+ // Set LocalLinkAddress in route to the value of the bridged address.\n+ pkt.EgressRoute.LocalLinkAddress = baddr\n+ pkt.EgressRoute.RemoteLinkAddress = raddr\n+ c.ep.AddHeader(pkt.EgressRoute.LocalLinkAddress, pkt.EgressRoute.RemoteLinkAddress, pkt.NetworkProtocolNumber, pkt)\n+\nvar pkts stack.PacketBufferList\npkts.PushBack(pkt)\nif _, err := c.ep.WritePackets(pkts); err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem.go",
"diff": "@@ -321,6 +321,10 @@ func (e *endpoint) LinkAddress() tcpip.LinkAddress {\n// AddHeader implements stack.LinkEndpoint.AddHeader.\nfunc (e *endpoint) AddHeader(local, remote tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {\n// Add ethernet header if needed.\n+ if len(e.addr) == 0 {\n+ return\n+ }\n+\neth := header.Ethernet(pkt.LinkHeader().Push(header.EthernetMinimumSize))\nethHdr := &header.EthernetFields{\nDstAddr: remote,\n@@ -346,9 +350,6 @@ func (*endpoint) WriteRawPacket(*stack.PacketBuffer) tcpip.Error { return &tcpip\n// +checklocks:e.mu\nfunc (e *endpoint) writePacketLocked(r stack.RouteInfo, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) tcpip.Error {\n- if e.addr != \"\" {\n- e.AddHeader(r.LocalLinkAddress, r.RemoteLinkAddress, protocol, pkt)\n- }\nif e.virtioNetHeaderRequired {\ne.AddVirtioNetHeader(pkt)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem_server.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem_server.go",
"diff": "@@ -207,6 +207,10 @@ func (e *serverEndpoint) LinkAddress() tcpip.LinkAddress {\n// AddHeader implements stack.LinkEndpoint.AddHeader.\nfunc (e *serverEndpoint) AddHeader(local, remote tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {\n// Add ethernet header if needed.\n+ if len(e.addr) == 0 {\n+ return\n+ }\n+\neth := header.Ethernet(pkt.LinkHeader().Push(header.EthernetMinimumSize))\nethHdr := &header.EthernetFields{\nDstAddr: remote,\n@@ -242,10 +246,6 @@ func (e *serverEndpoint) WriteRawPacket(pkt *stack.PacketBuffer) tcpip.Error {\n// +checklocks:e.mu\nfunc (e *serverEndpoint) writePacketLocked(r stack.RouteInfo, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) tcpip.Error {\n- if e.addr != \"\" {\n- e.AddHeader(r.LocalLinkAddress, r.RemoteLinkAddress, protocol, pkt)\n- }\n-\nif e.virtioNetHeaderRequired {\ne.AddVirtioNetHeader(pkt)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem_test.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem_test.go",
"diff": "@@ -204,11 +204,6 @@ func TestSimpleSend(t *testing.T) {\nc := newTestContext(t, 20000, 1500, localLinkAddr)\ndefer c.cleanup()\n- // Prepare route.\n- var r stack.RouteInfo\n- r.RemoteLinkAddress = remoteLinkAddr\n- r.LocalLinkAddress = localLinkAddr\n-\nfor iters := 1000; iters > 0; iters-- {\nfunc() {\nhdrLen, dataLen := rand.Intn(10000), rand.Intn(10000)\n@@ -228,8 +223,10 @@ func TestSimpleSend(t *testing.T) {\nproto := tcpip.NetworkProtocolNumber(rand.Intn(0x10000))\n// Every PacketBuffer must have these set:\n// See nic.writePacket.\n- pkt.EgressRoute = r\n+ pkt.EgressRoute.RemoteLinkAddress = remoteLinkAddr\n+ pkt.EgressRoute.LocalLinkAddress = localLinkAddr\npkt.NetworkProtocolNumber = proto\n+ c.ep.AddHeader(pkt.EgressRoute.LocalLinkAddress, pkt.EgressRoute.RemoteLinkAddress, pkt.NetworkProtocolNumber, pkt)\nvar pkts stack.PacketBufferList\npkts.PushBack(pkt)\ndefer pkts.DecRef()\n@@ -291,10 +288,6 @@ func TestPreserveSrcAddressInSend(t *testing.T) {\ndefer c.cleanup()\nnewLocalLinkAddress := tcpip.LinkAddress(strings.Repeat(\"0xFE\", 6))\n- // Set both remote and local link address in route.\n- var r stack.RouteInfo\n- r.LocalLinkAddress = newLocalLinkAddress\n- r.RemoteLinkAddress = remoteLinkAddr\npkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n// WritePacket panics given a prependable with anything less than\n@@ -304,8 +297,10 @@ func TestPreserveSrcAddressInSend(t *testing.T) {\nproto := tcpip.NetworkProtocolNumber(rand.Intn(0x10000))\n// Every PacketBuffer must have these set:\n// See nic.writePacket.\n- pkt.EgressRoute = r\n+ pkt.EgressRoute.LocalLinkAddress = newLocalLinkAddress\n+ pkt.EgressRoute.RemoteLinkAddress = remoteLinkAddr\npkt.NetworkProtocolNumber = proto\n+ c.ep.AddHeader(pkt.EgressRoute.LocalLinkAddress, pkt.EgressRoute.RemoteLinkAddress, pkt.NetworkProtocolNumber, pkt)\nvar pkts stack.PacketBufferList\ndefer pkts.DecRef()\n@@ -351,10 +346,6 @@ func TestFillTxQueue(t *testing.T) {\nc := newTestContext(t, 20000, 1500, localLinkAddr)\ndefer c.cleanup()\n- // Prepare to send a packet.\n- var r stack.RouteInfo\n- r.RemoteLinkAddress = remoteLinkAddr\n-\nbuf := buffer.NewView(100)\n// Each packet is uses no more than 40 bytes, so write that many packets\n@@ -367,8 +358,9 @@ func TestFillTxQueue(t *testing.T) {\nReserveHeaderBytes: int(c.ep.MaxHeaderLength()),\nData: buf.ToVectorisedView(),\n})\n- pkt.EgressRoute = r\n+ pkt.EgressRoute.RemoteLinkAddress = remoteLinkAddr\npkt.NetworkProtocolNumber = header.IPv4ProtocolNumber\n+ c.ep.AddHeader(pkt.EgressRoute.LocalLinkAddress, pkt.EgressRoute.RemoteLinkAddress, pkt.NetworkProtocolNumber, pkt)\nvar pkts stack.PacketBufferList\npkts.PushBack(pkt)\n@@ -392,8 +384,9 @@ func TestFillTxQueue(t *testing.T) {\nReserveHeaderBytes: int(c.ep.MaxHeaderLength()),\nData: buf.ToVectorisedView(),\n})\n- pkt.EgressRoute = r\n+ pkt.EgressRoute.RemoteLinkAddress = remoteLinkAddr\npkt.NetworkProtocolNumber = header.IPv4ProtocolNumber\n+ c.ep.AddHeader(pkt.EgressRoute.LocalLinkAddress, pkt.EgressRoute.RemoteLinkAddress, pkt.NetworkProtocolNumber, pkt)\nvar pkts stack.PacketBufferList\npkts.PushBack(pkt)\n@@ -414,10 +407,6 @@ func TestFillTxQueueAfterBadCompletion(t *testing.T) {\nqueue.EncodeTxCompletion(c.txq.rx.Push(8), 1)\nc.txq.rx.Flush()\n- // Prepare to send a packet.\n- var r stack.RouteInfo\n- r.RemoteLinkAddress = remoteLinkAddr\n-\nbuf := buffer.NewView(100)\n// Send two packets so that the id slice has at least two slots.\n@@ -429,8 +418,9 @@ func TestFillTxQueueAfterBadCompletion(t *testing.T) {\nData: buf.ToVectorisedView(),\n})\npkts.PushBack(pkt)\n- pkt.EgressRoute = r\n+ pkt.EgressRoute.RemoteLinkAddress = remoteLinkAddr\npkt.NetworkProtocolNumber = header.IPv4ProtocolNumber\n+ c.ep.AddHeader(pkt.EgressRoute.LocalLinkAddress, pkt.EgressRoute.RemoteLinkAddress, pkt.NetworkProtocolNumber, pkt)\n}\nif _, err := c.ep.WritePackets(pkts); err != nil {\nt.Fatalf(\"WritePackets failed unexpectedly: %s\", err)\n@@ -456,8 +446,10 @@ func TestFillTxQueueAfterBadCompletion(t *testing.T) {\nReserveHeaderBytes: int(c.ep.MaxHeaderLength()),\nData: buf.ToVectorisedView(),\n})\n- pkt.EgressRoute = r\n+ pkt.EgressRoute.RemoteLinkAddress = remoteLinkAddr\npkt.NetworkProtocolNumber = header.IPv4ProtocolNumber\n+ c.ep.AddHeader(pkt.EgressRoute.LocalLinkAddress, pkt.EgressRoute.RemoteLinkAddress, pkt.NetworkProtocolNumber, pkt)\n+\nvar pkts stack.PacketBufferList\npkts.PushBack(pkt)\nif _, err := c.ep.WritePackets(pkts); err != nil {\n@@ -479,8 +471,10 @@ func TestFillTxQueueAfterBadCompletion(t *testing.T) {\nReserveHeaderBytes: int(c.ep.MaxHeaderLength()),\nData: buf.ToVectorisedView(),\n})\n- pkt.EgressRoute = r\n+ pkt.EgressRoute.RemoteLinkAddress = remoteLinkAddr\npkt.NetworkProtocolNumber = header.IPv4ProtocolNumber\n+ c.ep.AddHeader(pkt.EgressRoute.LocalLinkAddress, pkt.EgressRoute.RemoteLinkAddress, pkt.NetworkProtocolNumber, pkt)\n+\nvar pkts stack.PacketBufferList\npkts.PushBack(pkt)\n_, err := c.ep.WritePackets(pkts)\n@@ -496,10 +490,6 @@ func TestFillTxMemory(t *testing.T) {\nc := newTestContext(t, 20000, bufferSize, localLinkAddr)\ndefer c.cleanup()\n- // Prepare to send a packet.\n- var r stack.RouteInfo\n- r.RemoteLinkAddress = remoteLinkAddr\n-\nbuf := buffer.NewView(100)\n// Each packet is uses up one buffer, so write as many as possible until\n@@ -510,8 +500,10 @@ func TestFillTxMemory(t *testing.T) {\nReserveHeaderBytes: int(c.ep.MaxHeaderLength()),\nData: buf.ToVectorisedView(),\n})\n- pkt.EgressRoute = r\n+ pkt.EgressRoute.RemoteLinkAddress = remoteLinkAddr\npkt.NetworkProtocolNumber = header.IPv4ProtocolNumber\n+ c.ep.AddHeader(pkt.EgressRoute.LocalLinkAddress, pkt.EgressRoute.RemoteLinkAddress, pkt.NetworkProtocolNumber, pkt)\n+\nvar pkts stack.PacketBufferList\npkts.PushBack(pkt)\nif _, err := c.ep.WritePackets(pkts); err != nil {\n@@ -535,7 +527,7 @@ func TestFillTxMemory(t *testing.T) {\nData: buf.ToVectorisedView(),\n})\npkt.NetworkProtocolNumber = header.IPv4ProtocolNumber\n- pkt.EgressRoute = r\n+ pkt.EgressRoute.RemoteLinkAddress = remoteLinkAddr\nvar pkts stack.PacketBufferList\npkts.PushBack(pkt)\n_, err := c.ep.WritePackets(pkts)\n@@ -553,10 +545,6 @@ func TestFillTxMemoryWithMultiBuffer(t *testing.T) {\nc := newTestContext(t, 20000, bufferSize, localLinkAddr)\ndefer c.cleanup()\n- // Prepare to send a packet.\n- var r stack.RouteInfo\n- r.RemoteLinkAddress = remoteLinkAddr\n-\nbuf := buffer.NewView(100)\n// Each packet is uses up one buffer, so write as many as possible\n@@ -567,7 +555,7 @@ func TestFillTxMemoryWithMultiBuffer(t *testing.T) {\nData: buf.ToVectorisedView(),\n})\nvar pkts stack.PacketBufferList\n- pkt.EgressRoute = r\n+ pkt.EgressRoute.RemoteLinkAddress = remoteLinkAddr\npkt.NetworkProtocolNumber = header.IPv4ProtocolNumber\npkts.PushBack(pkt)\nif _, err := c.ep.WritePackets(pkts); err != nil {\n@@ -587,8 +575,10 @@ func TestFillTxMemoryWithMultiBuffer(t *testing.T) {\nReserveHeaderBytes: int(c.ep.MaxHeaderLength()),\nData: buffer.NewView(bufferSize).ToVectorisedView(),\n})\n- pkt.EgressRoute = r\n+ pkt.EgressRoute.RemoteLinkAddress = remoteLinkAddr\npkt.NetworkProtocolNumber = header.IPv4ProtocolNumber\n+ c.ep.AddHeader(pkt.EgressRoute.LocalLinkAddress, pkt.EgressRoute.RemoteLinkAddress, pkt.NetworkProtocolNumber, pkt)\n+\npkts.PushBack(pkt)\n_, err := c.ep.WritePackets(pkts)\nif _, ok := err.(*tcpip.ErrWouldBlock); !ok {\n@@ -604,7 +594,7 @@ func TestFillTxMemoryWithMultiBuffer(t *testing.T) {\nReserveHeaderBytes: int(c.ep.MaxHeaderLength()),\nData: buf.ToVectorisedView(),\n})\n- pkt.EgressRoute = r\n+ pkt.EgressRoute.RemoteLinkAddress = remoteLinkAddr\npkt.NetworkProtocolNumber = header.IPv4ProtocolNumber\npkts.PushBack(pkt)\nif _, err := c.ep.WritePackets(pkts); err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/tun/device.go",
"new_path": "pkg/tcpip/link/tun/device.go",
"diff": "@@ -266,20 +266,7 @@ func (d *Device) encodePkt(pkt *stack.PacketBuffer) (buffer.View, bool) {\nvv.AppendView(buffer.View(hdr))\n}\n- // Ethernet header (TAP only).\n- if d.flags.TAP {\n- // Add ethernet header if not provided.\n- if pkt.LinkHeader().View().IsEmpty() {\n- d.endpoint.AddHeader(pkt.EgressRoute.LocalLinkAddress, pkt.EgressRoute.RemoteLinkAddress, pkt.NetworkProtocolNumber, pkt)\n- }\n- vv.AppendView(pkt.LinkHeader().View())\n- }\n-\n- // Append upper headers.\n- vv.AppendView(pkt.NetworkHeader().View())\n- vv.AppendView(pkt.TransportHeader().View())\n- // Append data payload.\n- vv.Append(pkt.Data().ExtractVV())\n+ vv.AppendViews(pkt.Views())\nreturn vv.ToView(), true\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/icmp_test.go",
"new_path": "pkg/tcpip/network/ipv6/icmp_test.go",
"diff": "@@ -83,6 +83,9 @@ func (*stubLinkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.E\nfunc (*stubLinkEndpoint) Attach(stack.NetworkDispatcher) {}\n+func (*stubLinkEndpoint) AddHeader(_, _ tcpip.LinkAddress, _ tcpip.NetworkProtocolNumber, _ *stack.PacketBuffer) {\n+}\n+\ntype stubDispatcher struct {\nstack.TransportDispatcher\n}\n@@ -1290,6 +1293,7 @@ func TestLinkAddressRequest(t *testing.T) {\nvar want stack.RouteInfo\nwant.NetProto = ProtocolNumber\n+ want.LocalLinkAddress = linkAddr0\nwant.RemoteLinkAddress = test.expectedRemoteLinkAddr\nif diff := cmp.Diff(want, pkt.EgressRoute, cmp.AllowUnexported(want)); diff != \"\" {\nt.Errorf(\"route info mismatch (-want +got):\\n%s\", diff)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/ndp_test.go",
"new_path": "pkg/tcpip/network/ipv6/ndp_test.go",
"diff": "@@ -473,6 +473,7 @@ func TestNeighborSolicitationResponse(t *testing.T) {\nrespNSDst := header.SolicitedNodeAddr(test.nsSrc)\nvar want stack.RouteInfo\nwant.NetProto = ProtocolNumber\n+ want.LocalLinkAddress = nicLinkAddr\nwant.RemoteLinkAddress = header.EthernetAddressFromMulticastIPv6Address(respNSDst)\nif diff := cmp.Diff(want, p.EgressRoute, cmp.AllowUnexported(want)); diff != \"\" {\nt.Errorf(\"route info mismatch (-want +got):\\n%s\", diff)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/forwarding_test.go",
"new_path": "pkg/tcpip/stack/forwarding_test.go",
"diff": "@@ -331,7 +331,6 @@ func (*fwdTestLinkEndpoint) ARPHardwareType() header.ARPHardwareType {\n// AddHeader implements stack.LinkEndpoint.AddHeader.\nfunc (e *fwdTestLinkEndpoint) AddHeader(tcpip.LinkAddress, tcpip.LinkAddress, tcpip.NetworkProtocolNumber, *PacketBuffer) {\n- panic(\"not implemented\")\n}\nfunc fwdTestNetFactory(t *testing.T, proto *fwdTestNetworkProtocol) (*faketime.ManualClock, *fwdTestLinkEndpoint, *fwdTestLinkEndpoint) {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/nic.go",
"new_path": "pkg/tcpip/stack/nic.go",
"diff": "@@ -381,7 +381,13 @@ func (n *nic) WritePacket(r *Route, pkt *PacketBuffer) tcpip.Error {\n// WritePacketToRemote implements NetworkInterface.\nfunc (n *nic) WritePacketToRemote(remoteLinkAddr tcpip.LinkAddress, pkt *PacketBuffer) tcpip.Error {\n- pkt.EgressRoute = RouteInfo{routeInfo: routeInfo{NetProto: pkt.NetworkProtocolNumber}, RemoteLinkAddress: remoteLinkAddr}\n+ pkt.EgressRoute = RouteInfo{\n+ routeInfo: routeInfo{\n+ NetProto: pkt.NetworkProtocolNumber,\n+ LocalLinkAddress: n.LinkAddress(),\n+ },\n+ RemoteLinkAddress: remoteLinkAddr,\n+ }\nreturn n.writePacket(pkt)\n}\n@@ -389,7 +395,9 @@ func (n *nic) writePacket(pkt *PacketBuffer) tcpip.Error {\n// WritePacket modifies pkt, calculate numBytes first.\nnumBytes := pkt.Size()\n- n.deliverOutboundPacket(pkt.EgressRoute.RemoteLinkAddress, pkt)\n+ n.NetworkLinkEndpoint.AddHeader(n.LinkAddress(), pkt.EgressRoute.RemoteLinkAddress, pkt.NetworkProtocolNumber, pkt)\n+\n+ n.deliverLinkPacket(pkt.NetworkProtocolNumber, pkt, false /* incoming */)\nif err := n.qDisc.WritePacket(pkt); err != nil {\nreturn err\n@@ -722,6 +730,12 @@ func (n *nic) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt *Pa\npkt.RXTransportChecksumValidated = n.NetworkLinkEndpoint.Capabilities()&CapabilityRXChecksumOffload != 0\n+ n.deliverLinkPacket(protocol, pkt, true /* incoming */)\n+\n+ networkEndpoint.HandlePacket(pkt)\n+}\n+\n+func (n *nic) deliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt *PacketBuffer, incoming bool) {\n// Deliver to interested packet endpoints without holding NIC lock.\nvar packetEPPkt *PacketBuffer\ndefer func() {\n@@ -747,7 +761,12 @@ func (n *nic) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt *Pa\n// populate it in the packet buffer we provide to packet endpoints as\n// packet endpoints inspect link headers.\npacketEPPkt.LinkHeader().Consume(pkt.LinkHeader().View().Size())\n+\n+ if incoming {\npacketEPPkt.PktType = tcpip.PacketHost\n+ } else {\n+ packetEPPkt.PktType = tcpip.PacketOutgoing\n+ }\n}\nclone := packetEPPkt.Clone()\n@@ -762,61 +781,13 @@ func (n *nic) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt *Pa\nanyEPs, anyEPsOK := n.packetEPs[header.EthernetProtocolAll]\nn.packetEPsMu.Unlock()\n- if protoEPsOK {\n+ // On Linux, only ETH_P_ALL endpoints get outbound packets.\n+ if incoming && protoEPsOK {\nprotoEPs.forEach(deliverPacketEPs)\n}\nif anyEPsOK {\nanyEPs.forEach(deliverPacketEPs)\n}\n-\n- networkEndpoint.HandlePacket(pkt)\n-}\n-\n-// deliverOutboundPacket delivers outgoing packets to interested endpoints.\n-func (n *nic) deliverOutboundPacket(remote tcpip.LinkAddress, pkt *PacketBuffer) {\n- n.packetEPsMu.RLock()\n- defer n.packetEPsMu.RUnlock()\n- // We do not deliver to protocol specific packet endpoints as on Linux\n- // only ETH_P_ALL endpoints get outbound packets.\n- // Add any other packet sockets that maybe listening for all protocols.\n- eps, ok := n.packetEPs[header.EthernetProtocolAll]\n- if !ok {\n- return\n- }\n-\n- local := n.LinkAddress()\n-\n- var packetEPPkt *PacketBuffer\n- defer func() {\n- if packetEPPkt != nil {\n- packetEPPkt.DecRef()\n- }\n- }()\n- eps.forEach(func(ep PacketEndpoint) {\n- if packetEPPkt == nil {\n- // Packet endpoints hold the full packet.\n- //\n- // We perform a deep copy because higher-level endpoints may point to\n- // the middle of a view that is held by a packet endpoint. Save/Restore\n- // does not support overlapping slices and will panic in this case.\n- //\n- // TODO(https://gvisor.dev/issue/6517): Avoid this copy once S/R supports\n- // overlapping slices (e.g. by passing a shallow copy of pkt to the packet\n- // endpoint).\n- packetEPPkt = NewPacketBuffer(PacketBufferOptions{\n- ReserveHeaderBytes: pkt.AvailableHeaderBytes(),\n- Data: PayloadSince(pkt.NetworkHeader()).ToVectorisedView(),\n- })\n- // Add the link layer header as outgoing packets are intercepted before\n- // the link layer header is created and packet endpoints are interested\n- // in the link header.\n- n.NetworkLinkEndpoint.AddHeader(local, remote, pkt.NetworkProtocolNumber, packetEPPkt)\n- packetEPPkt.PktType = tcpip.PacketOutgoing\n- }\n- clone := packetEPPkt.Clone()\n- defer clone.DecRef()\n- ep.HandlePacket(n.id, pkt.NetworkProtocolNumber, clone)\n- })\n}\n// DeliverTransportPacket delivers the packets to the appropriate transport\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add link-layer headers in nic
This removes the need for the stack to add a link header out-of-line the
write path when delivering outbound packets to a packet socket.
PiperOrigin-RevId: 424444109 |
259,858 | 26.01.2022 16:28:04 | 28,800 | b5962471e17d31f509094b21a330db789df477a8 | Increase buildkite parallelism.
Since there is very little wasted work for Buildkite, increasing the
parallelsim will decrease throw-away work on cancelation or failure.
This aims to achieve ~3 minutes per individiaul test instance. | [
{
"change_type": "MODIFY",
"old_path": ".buildkite/pipeline.yaml",
"new_path": ".buildkite/pipeline.yaml",
"diff": "@@ -129,7 +129,7 @@ steps:\n- <<: *common\nlabel: \":toolbox: System call tests\"\ncommand: make syscall-tests\n- parallelism: 20\n+ parallelism: 60\n# Integration tests.\n- <<: *common\n@@ -193,15 +193,15 @@ steps:\n- <<: *common\nlabel: \":php: PHP runtime tests\"\ncommand: make php7.3.6-runtime-tests_vfs2\n- parallelism: 10\n+ parallelism: 20\n- <<: *common\nlabel: \":java: Java runtime tests\"\ncommand: make java11-runtime-tests_vfs2\n- parallelism: 40\n+ parallelism: 120\n- <<: *common\nlabel: \":golang: Go runtime tests\"\ncommand: make go1.12-runtime-tests_vfs2\n- parallelism: 10\n+ parallelism: 20\n- <<: *common\nlabel: \":node: NodeJS runtime tests\"\ncommand: make nodejs12.4.0-runtime-tests_vfs2\n@@ -209,23 +209,23 @@ steps:\n- <<: *common\nlabel: \":python: Python runtime tests\"\ncommand: make python3.7.3-runtime-tests_vfs2\n- parallelism: 10\n+ parallelism: 20\n# Runtime tests (VFS1).\n- <<: *common\nlabel: \":php: PHP runtime tests (VFS1)\"\ncommand: make php7.3.6-runtime-tests\n- parallelism: 10\n+ parallelism: 20\nif: build.message =~ /VFS1/ || build.branch == \"master\"\n- <<: *common\nlabel: \":java: Java runtime tests (VFS1)\"\ncommand: make java11-runtime-tests\n- parallelism: 40\n+ parallelism: 120\nif: build.message =~ /VFS1/ || build.branch == \"master\"\n- <<: *common\nlabel: \":golang: Go runtime tests (VFS1)\"\ncommand: make go1.12-runtime-tests\n- parallelism: 10\n+ parallelism: 20\nif: build.message =~ /VFS1/ || build.branch == \"master\"\n- <<: *common\nlabel: \":node: NodeJS runtime tests (VFS1)\"\n@@ -235,7 +235,7 @@ steps:\n- <<: *common\nlabel: \":python: Python runtime tests (VFS1)\"\ncommand: make python3.7.3-runtime-tests\n- parallelism: 10\n+ parallelism: 20\nif: build.message =~ /VFS1/ || build.branch == \"master\"\n# ARM tests.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Increase buildkite parallelism.
Since there is very little wasted work for Buildkite, increasing the
parallelsim will decrease throw-away work on cancelation or failure.
This aims to achieve ~3 minutes per individiaul test instance.
PiperOrigin-RevId: 424469351 |
259,909 | 26.01.2022 17:24:05 | 28,800 | 6a28dc7c59632b4007a095377073b8b74df85bea | Correct fragmentation reference counting.
Before this change the only reference on the packet after reassembly
processing was held by the reassembler in the holes array. This meant that
after the reassembly cleanup job, there were no references left on the
packet, leading to use after free bugs. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/fragmentation/fragmentation_test.go",
"new_path": "pkg/tcpip/network/internal/fragmentation/fragmentation_test.go",
"diff": "@@ -112,6 +112,9 @@ func TestFragmentationProcess(t *testing.T) {\nfor i, in := range c.in {\ndefer in.pkt.DecRef()\nresPkt, proto, done, err := f.Process(in.id, in.first, in.last, in.more, in.proto, in.pkt)\n+ if resPkt != nil {\n+ defer resPkt.DecRef()\n+ }\nif err != nil {\nt.Fatalf(\"f.Process(%+v, %d, %d, %t, %d, %#v) failed: %s\",\nin.id, in.first, in.last, in.more, in.proto, in.pkt, err)\n@@ -258,7 +261,10 @@ func TestReassemblingTimeout(t *testing.T) {\nif frag := event.fragment; frag != nil {\np := pkt(len(frag.data), frag.data)\ndefer p.DecRef()\n- _, _, done, err := f.Process(FragmentID{}, frag.first, frag.last, frag.more, protocol, p)\n+ pkt, _, done, err := f.Process(FragmentID{}, frag.first, frag.last, frag.more, protocol, p)\n+ if pkt != nil {\n+ pkt.DecRef()\n+ }\nif err != nil {\nt.Fatalf(\"%s: f.Process failed: %s\", event.name, err)\n}\n@@ -686,3 +692,18 @@ func TestTimeoutHandler(t *testing.T) {\n})\n}\n}\n+\n+func TestFragmentSurvivesReleaseJob(t *testing.T) {\n+ handler := &testTimeoutHandler{pkt: nil}\n+ c := faketime.NewManualClock()\n+ f := NewFragmentation(minBlockSize, HighFragThreshold, LowFragThreshold, reassembleTimeout, c, handler)\n+ pkt := pkt(2, \"01\")\n+ // Values to Process don't matter except for pkt.\n+ resPkt, _, _, _ := f.Process(FragmentID{ID: 0}, 0, 1, false, 0, pkt)\n+ pkt.DecRef()\n+ // This clears out the references held by the reassembler.\n+ c.Advance(reassembleTimeout)\n+ // If Process doesn't give the returned packet its own reference, this will\n+ // fail.\n+ resPkt.DecRef()\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/fragmentation/reassembler.go",
"new_path": "pkg/tcpip/network/internal/fragmentation/reassembler.go",
"diff": "@@ -170,6 +170,7 @@ func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt *s\n})\nresPkt := r.holes[0].pkt\n+ resPkt.IncRef()\nfor i := 1; i < len(r.holes); i++ {\nstack.MergeFragment(resPkt, r.holes[i].pkt)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/fragmentation/reassembler_test.go",
"new_path": "pkg/tcpip/network/internal/fragmentation/reassembler_test.go",
"diff": "@@ -203,6 +203,9 @@ func TestReassemblerProcess(t *testing.T) {\nvar isDone bool\nfor _, param := range test.params {\npkt, _, done, _, err := r.process(param.first, param.last, param.more, proto, param.pkt)\n+ if pkt != nil {\n+ defer pkt.DecRef()\n+ }\nif done != param.wantDone || err != param.wantError {\nt.Errorf(\"got r.process(%d, %d, %t, %d, _) = (_, _, %t, _, %v), want = (%t, %v)\", param.first, param.last, param.more, proto, done, err, param.wantDone, param.wantError)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv4/ipv4.go",
"new_path": "pkg/tcpip/network/ipv4/ipv4.go",
"diff": "@@ -941,6 +941,7 @@ func (e *endpoint) handleValidatedPacket(h header.IPv4, pkt *stack.PacketBuffer,\nif !ready {\nreturn\n}\n+ defer resPkt.DecRef()\npkt = resPkt\nh = header.IPv4(pkt.NetworkHeader().View())\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/ipv6/ipv6.go",
"new_path": "pkg/tcpip/network/ipv6/ipv6.go",
"diff": "@@ -1136,7 +1136,16 @@ func (e *endpoint) processExtensionHeaders(h header.IPv6, pkt *stack.PacketBuffe\nvar (\nhasFragmentHeader bool\nrouterAlert *header.IPv6RouterAlertOption\n+ // Create an extra packet buffer reference to keep track of the packet to\n+ // DecRef so that we do not incur a memory allocation for deferring a DecRef\n+ // within the loop.\n+ resPktToDecRef *stack.PacketBuffer\n)\n+ defer func() {\n+ if resPktToDecRef != nil {\n+ resPktToDecRef.DecRef()\n+ }\n+ }()\nfor {\n// Keep track of the start of the previous header so we can report the\n@@ -1392,6 +1401,7 @@ func (e *endpoint) processExtensionHeaders(h header.IPv6, pkt *stack.PacketBuffe\n}\nif ready {\n+ resPktToDecRef = resPkt\npkt = resPkt\n// We create a new iterator with the reassembled packet because we could\n"
}
] | Go | Apache License 2.0 | google/gvisor | Correct fragmentation reference counting.
Before this change the only reference on the packet after reassembly
processing was held by the reassembler in the holes array. This meant that
after the reassembly cleanup job, there were no references left on the
packet, leading to use after free bugs.
PiperOrigin-RevId: 424479461 |
260,004 | 26.01.2022 18:31:27 | 28,800 | f54fcc6e11ae42289e4e8d3e4df35f33c458c7f0 | Drop LinkEndpoint.WriteRawPacket | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/channel/channel.go",
"new_path": "pkg/tcpip/link/channel/channel.go",
"diff": "@@ -268,13 +268,3 @@ func (*Endpoint) ARPHardwareType() header.ARPHardwareType {\n// AddHeader implements stack.LinkEndpoint.AddHeader.\nfunc (*Endpoint) AddHeader(*stack.PacketBuffer) {}\n-\n-// WriteRawPacket implements stack.LinkEndpoint.\n-func (e *Endpoint) WriteRawPacket(pkt *stack.PacketBuffer) tcpip.Error {\n- // Write returns false if the queue is full. A full queue is not an error\n- // from the perspective of a LinkEndpoint so we ignore Write's return\n- // value and always return nil from this method.\n- _ = e.q.Write(pkt)\n-\n- return nil\n-}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/fdbased/endpoint.go",
"new_path": "pkg/tcpip/link/fdbased/endpoint.go",
"diff": "@@ -498,9 +498,6 @@ func (e *endpoint) AddHeader(pkt *stack.PacketBuffer) {\n}\n}\n-// WriteRawPacket implements stack.LinkEndpoint.\n-func (*endpoint) WriteRawPacket(*stack.PacketBuffer) tcpip.Error { return &tcpip.ErrNotSupported{} }\n-\n// writePacket writes outbound packets to the file descriptor. If it is not\n// currently writable, the packet is dropped.\nfunc (e *endpoint) writePacket(pkt *stack.PacketBuffer) tcpip.Error {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/loopback/loopback.go",
"new_path": "pkg/tcpip/link/loopback/loopback.go",
"diff": "@@ -76,14 +76,14 @@ func (*endpoint) Wait() {}\n// WritePackets implements stack.LinkEndpoint.WritePackets.\nfunc (e *endpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) {\n- n := 0\n- for p := pkts.Front(); p != nil; p = p.Next() {\n- if err := e.WriteRawPacket(p); err != nil {\n- return n, err\n- }\n- n++\n+ for pkt := pkts.Front(); pkt != nil; pkt = pkt.Next() {\n+ newPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ Data: buffer.NewVectorisedView(pkt.Size(), pkt.Views()),\n+ })\n+ e.dispatcher.DeliverNetworkPacket(pkt.NetworkProtocolNumber, newPkt)\n+ newPkt.DecRef()\n}\n- return n, nil\n+ return pkts.Len(), nil\n}\n// ARPHardwareType implements stack.LinkEndpoint.ARPHardwareType.\n@@ -92,20 +92,3 @@ func (*endpoint) ARPHardwareType() header.ARPHardwareType {\n}\nfunc (*endpoint) AddHeader(*stack.PacketBuffer) {}\n-\n-// WriteRawPacket implements stack.LinkEndpoint.\n-func (e *endpoint) WriteRawPacket(pkt *stack.PacketBuffer) tcpip.Error {\n- // Construct data as the unparsed portion for the loopback packet.\n- data := buffer.NewVectorisedView(pkt.Size(), pkt.Views())\n-\n- // Because we're immediately turning around and writing the packet back\n- // to the rx path, we intentionally don't preserve the remote and local\n- // link addresses from the stack.Route we're passed.\n- newPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n- Data: data,\n- })\n- defer newPkt.DecRef()\n- e.dispatcher.DeliverNetworkPacket(pkt.NetworkProtocolNumber, newPkt)\n-\n- return nil\n-}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/muxed/injectable.go",
"new_path": "pkg/tcpip/link/muxed/injectable.go",
"diff": "@@ -137,11 +137,6 @@ func (*InjectableEndpoint) ARPHardwareType() header.ARPHardwareType {\n// AddHeader implements stack.LinkEndpoint.AddHeader.\nfunc (*InjectableEndpoint) AddHeader(*stack.PacketBuffer) {}\n-// WriteRawPacket implements stack.LinkEndpoint.\n-func (*InjectableEndpoint) WriteRawPacket(*stack.PacketBuffer) tcpip.Error {\n- return &tcpip.ErrNotSupported{}\n-}\n-\n// NewInjectableEndpoint creates a new multi-endpoint injectable endpoint.\nfunc NewInjectableEndpoint(routes map[tcpip.Address]stack.InjectableLinkEndpoint) *InjectableEndpoint {\nreturn &InjectableEndpoint{\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/nested/nested.go",
"new_path": "pkg/tcpip/link/nested/nested.go",
"diff": "@@ -137,8 +137,3 @@ func (e *Endpoint) ARPHardwareType() header.ARPHardwareType {\nfunc (e *Endpoint) AddHeader(pkt *stack.PacketBuffer) {\ne.child.AddHeader(pkt)\n}\n-\n-// WriteRawPacket implements stack.LinkEndpoint.\n-func (e *Endpoint) WriteRawPacket(pkt *stack.PacketBuffer) tcpip.Error {\n- return e.child.WriteRawPacket(pkt)\n-}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/pipe/pipe.go",
"new_path": "pkg/tcpip/link/pipe/pipe.go",
"diff": "@@ -109,11 +109,3 @@ func (*Endpoint) ARPHardwareType() header.ARPHardwareType {\n// AddHeader implements stack.LinkEndpoint.\nfunc (*Endpoint) AddHeader(*stack.PacketBuffer) {}\n-\n-// WriteRawPacket implements stack.LinkEndpoint.\n-func (e *Endpoint) WriteRawPacket(pkt *stack.PacketBuffer) tcpip.Error {\n- var pkts stack.PacketBufferList\n- pkts.PushBack(pkt)\n- _, err := e.WritePackets(pkts)\n- return err\n-}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem.go",
"diff": "@@ -338,9 +338,6 @@ func (e *endpoint) AddVirtioNetHeader(pkt *stack.PacketBuffer) {\nvirtio.Encode(&header.VirtioNetHeaderFields{})\n}\n-// WriteRawPacket implements stack.LinkEndpoint.\n-func (*endpoint) WriteRawPacket(*stack.PacketBuffer) tcpip.Error { return &tcpip.ErrNotSupported{} }\n-\n// +checklocks:e.mu\nfunc (e *endpoint) writePacketLocked(r stack.RouteInfo, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) tcpip.Error {\nif e.virtioNetHeaderRequired {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem_server.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem_server.go",
"diff": "@@ -224,19 +224,6 @@ func (e *serverEndpoint) AddVirtioNetHeader(pkt *stack.PacketBuffer) {\nvirtio.Encode(&header.VirtioNetHeaderFields{})\n}\n-// WriteRawPacket implements stack.LinkEndpoint.WriteRawPacket\n-func (e *serverEndpoint) WriteRawPacket(pkt *stack.PacketBuffer) tcpip.Error {\n- views := pkt.Views()\n- e.mu.Lock()\n- defer e.mu.Unlock()\n- ok := e.tx.transmit(views)\n- if !ok {\n- return &tcpip.ErrWouldBlock{}\n- }\n- e.tx.notify()\n- return nil\n-}\n-\n// +checklocks:e.mu\nfunc (e *serverEndpoint) writePacketLocked(r stack.RouteInfo, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) tcpip.Error {\nif e.virtioNetHeaderRequired {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/waitable/waitable.go",
"new_path": "pkg/tcpip/link/waitable/waitable.go",
"diff": "@@ -133,6 +133,3 @@ func (e *Endpoint) ARPHardwareType() header.ARPHardwareType {\nfunc (e *Endpoint) AddHeader(pkt *stack.PacketBuffer) {\ne.lower.AddHeader(pkt)\n}\n-\n-// WriteRawPacket implements stack.LinkEndpoint.\n-func (*Endpoint) WriteRawPacket(*stack.PacketBuffer) tcpip.Error { return &tcpip.ErrNotSupported{} }\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/waitable/waitable_test.go",
"new_path": "pkg/tcpip/link/waitable/waitable_test.go",
"diff": "@@ -76,11 +76,6 @@ func (e *countedEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.\nreturn pkts.Len(), nil\n}\n-// WriteRawPacket implements stack.LinkEndpoint.\n-func (*countedEndpoint) WriteRawPacket(*stack.PacketBuffer) tcpip.Error {\n- return &tcpip.ErrNotSupported{}\n-}\n-\n// ARPHardwareType implements stack.LinkEndpoint.ARPHardwareType.\nfunc (*countedEndpoint) ARPHardwareType() header.ARPHardwareType {\npanic(\"unimplemented\")\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/internal/testutil/testutil.go",
"new_path": "pkg/tcpip/network/internal/testutil/testutil.go",
"diff": "@@ -90,11 +90,6 @@ func (*MockLinkEndpoint) ARPHardwareType() header.ARPHardwareType { return heade\n// AddHeader implements LinkEndpoint.AddHeader.\nfunc (*MockLinkEndpoint) AddHeader(*stack.PacketBuffer) {}\n-// WriteRawPacket implements stack.LinkEndpoint.\n-func (*MockLinkEndpoint) WriteRawPacket(*stack.PacketBuffer) tcpip.Error {\n- return &tcpip.ErrNotSupported{}\n-}\n-\n// MakeRandPkt generates a randomized packet. transportHeaderLength indicates\n// how many random bytes will be copied in the Transport Header.\n// extraHeaderReserveLength indicates how much extra space will be reserved for\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/forwarding_test.go",
"new_path": "pkg/tcpip/stack/forwarding_test.go",
"diff": "@@ -317,10 +317,6 @@ func (e *fwdTestLinkEndpoint) WritePackets(pkts PacketBufferList) (int, tcpip.Er\nreturn n, nil\n}\n-func (*fwdTestLinkEndpoint) WriteRawPacket(*PacketBuffer) tcpip.Error {\n- return &tcpip.ErrNotSupported{}\n-}\n-\n// Wait implements stack.LinkEndpoint.Wait.\nfunc (*fwdTestLinkEndpoint) Wait() {}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/nic.go",
"new_path": "pkg/tcpip/stack/nic.go",
"diff": "@@ -86,7 +86,6 @@ type nic struct {\npacketEPs map[tcpip.NetworkProtocolNumber]*packetEndpointList\nqDisc QueueingDiscipline\n- rawLinkEP LinkRawWriter\n}\n// makeNICStats initializes the NIC statistics and associates them to the global\n@@ -182,7 +181,6 @@ func newNIC(stack *Stack, id tcpip.NICID, ep LinkEndpoint, opts NICOptions) *nic\nlinkAddrResolvers: make(map[tcpip.NetworkProtocolNumber]*linkResolver),\nduplicateAddressDetectors: make(map[tcpip.NetworkProtocolNumber]DuplicateAddressDetector),\nqDisc: qDisc,\n- rawLinkEP: ep,\n}\nnic.linkResQueue.init(nic)\n@@ -343,11 +341,6 @@ func (n *nic) IsLoopback() bool {\nreturn n.NetworkLinkEndpoint.Capabilities()&CapabilityLoopback != 0\n}\n-// WriteRawPacket implements LinkRawWriter.\n-func (n *nic) WriteRawPacket(pkt *PacketBuffer) tcpip.Error {\n- return n.rawLinkEP.WriteRawPacket(pkt)\n-}\n-\n// WritePacket implements NetworkEndpoint.\nfunc (n *nic) WritePacket(r *Route, pkt *PacketBuffer) tcpip.Error {\nrouteInfo, _, err := r.resolvedFields(nil)\n@@ -392,11 +385,11 @@ func (n *nic) WritePacketToRemote(remoteLinkAddr tcpip.LinkAddress, pkt *PacketB\n}\nfunc (n *nic) writePacket(pkt *PacketBuffer) tcpip.Error {\n- // WritePacket modifies pkt, calculate numBytes first.\n- numBytes := pkt.Size()\n-\nn.NetworkLinkEndpoint.AddHeader(pkt)\n+ return n.writeRawPacket(pkt)\n+}\n+func (n *nic) writeRawPacket(pkt *PacketBuffer) tcpip.Error {\nn.deliverLinkPacket(pkt.NetworkProtocolNumber, pkt, false /* incoming */)\nif err := n.qDisc.WritePacket(pkt); err != nil {\n@@ -404,7 +397,7 @@ func (n *nic) writePacket(pkt *PacketBuffer) tcpip.Error {\n}\nn.stats.tx.packets.Increment()\n- n.stats.tx.bytes.IncrementBy(uint64(numBytes))\n+ n.stats.tx.bytes.IncrementBy(uint64(pkt.Size()))\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/registration.go",
"new_path": "pkg/tcpip/stack/registration.go",
"diff": "@@ -771,18 +771,6 @@ type LinkWriter interface {\nWritePackets(PacketBufferList) (int, tcpip.Error)\n}\n-// LinkRawWriter is an interface that must be implemented by all Link endpoints\n-// to support emitting pre-formed packets which include the Link header.\n-type LinkRawWriter interface {\n- // WriteRawPacket writes a packet directly to the link.\n- //\n- // If the link-layer has its own header, the payload must already include the\n- // header.\n- //\n- // WriteRawPacket may modify the packet.\n- WriteRawPacket(*PacketBuffer) tcpip.Error\n-}\n-\n// NetworkLinkEndpoint is a data-link layer that supports sending network\n// layer packets.\ntype NetworkLinkEndpoint interface {\n@@ -860,7 +848,6 @@ type QueueingDiscipline interface {\ntype LinkEndpoint interface {\nNetworkLinkEndpoint\nLinkWriter\n- LinkRawWriter\n}\n// InjectableLinkEndpoint is a LinkEndpoint where inbound packets are\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack.go",
"new_path": "pkg/tcpip/stack/stack.go",
"diff": "@@ -1646,7 +1646,7 @@ func (s *Stack) WriteRawPacket(nicID tcpip.NICID, proto tcpip.NetworkProtocolNum\n})\ndefer pkt.DecRef()\npkt.NetworkProtocolNumber = proto\n- return nic.WriteRawPacket(pkt)\n+ return nic.writeRawPacket(pkt)\n}\n// NetworkProtocolInstance returns the protocol instance in the stack for the\n"
}
] | Go | Apache License 2.0 | google/gvisor | Drop LinkEndpoint.WriteRawPacket
PiperOrigin-RevId: 424490855 |
259,897 | 26.01.2022 00:48:59 | -7,200 | b7ccfa5084e252f3b159aa3d44c007b889cd4287 | Fixes #7086,#6964,#3413,#7001.
Also adds fuse fsync, rename, flock support. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/fuse.go",
"new_path": "pkg/abi/linux/fuse.go",
"diff": "@@ -232,6 +232,43 @@ type FUSEInitOut struct {\n_ [8]uint32\n}\n+// FUSEStatfsOut is the reply sent by the daemon to the kernel\n+// for FUSE_STATFS.\n+// from https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/fuse.h#L252\n+//\n+// +marshal\n+type FUSEStatfsOut struct {\n+ // Blocks is the maximum number of data blocks the filesystem may store, in\n+ // units of BlockSize.\n+ Blocks uint64\n+\n+ // BlocksFree is the number of free data blocks, in units of BlockSize.\n+ BlocksFree uint64\n+\n+ // BlocksAvailable is the number of data blocks free for use by\n+ // unprivileged users, in units of BlockSize.\n+ BlocksAvailable uint64\n+\n+ // Files is the number of used file nodes on the filesystem.\n+ Files uint64\n+\n+ // FileFress is the number of free file nodes on the filesystem.\n+ FilesFree uint64\n+\n+ // BlockSize is the optimal transfer block size in bytes.\n+ BlockSize uint32\n+\n+ // NameLength is the maximum file name length.\n+ NameLength uint32\n+\n+ // FragmentSize is equivalent to BlockSize.\n+ FragmentSize uint32\n+\n+ _ uint32\n+\n+ Spare [6]uint32\n+}\n+\n// FUSE_GETATTR_FH is currently the only flag of FUSEGetAttrIn.GetAttrFlags.\n// If it is set, the file handle (FUSEGetAttrIn.Fh) is used to indicate the\n// object instead of the node id attribute in the request header.\n@@ -436,6 +473,15 @@ type FUSEOpenOut struct {\n_ uint32\n}\n+// FUSECreateOut is the reply sent by the daemon to the kernel\n+// for FUSECreateMeta.\n+//\n+// +marshal\n+type FUSECreateOut struct {\n+ FUSEEntryOut\n+ FUSEOpenOut\n+}\n+\n// FUSE_READ flags, consistent with the ones in include/uapi/linux/fuse.h.\nconst (\nFUSE_READ_LOCKOWNER = 1 << 1\n@@ -474,6 +520,7 @@ type FUSEReadIn struct {\n//\n// The second part of the payload is the\n// binary bytes of the data to be written.\n+// See FUSEWritePayloadIn that combines header & payload.\n//\n// +marshal\ntype FUSEWriteIn struct {\n@@ -498,6 +545,36 @@ type FUSEWriteIn struct {\n_ uint32\n}\n+// FUSEWritePayloadIn combines header - FUSEWriteIn and payload\n+// in a single marshallable struct when sending request by the\n+// kernel to the daemon\n+//\n+// +marshal dynamic\n+type FUSEWritePayloadIn struct {\n+ Header FUSEWriteIn\n+ Payload primitive.ByteSlice\n+}\n+\n+// SizeBytes implements marshal.Marshallable.SizeBytes.\n+func (r *FUSEWritePayloadIn) SizeBytes() int {\n+ if r == nil {\n+ return (*FUSEWriteIn)(nil).SizeBytes()\n+ }\n+ return r.Header.SizeBytes() + r.Payload.SizeBytes()\n+}\n+\n+// MarshalBytes implements marshal.Marshallable.MarshalBytes.\n+func (r *FUSEWritePayloadIn) MarshalBytes(dst []byte) []byte {\n+ dst = r.Header.MarshalUnsafe(dst)\n+ dst = r.Payload.MarshalUnsafe(dst)\n+ return dst\n+}\n+\n+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.\n+func (r *FUSEWritePayloadIn) UnmarshalBytes(src []byte) []byte {\n+ panic(\"Unimplemented, FUSEWritePayloadIn is never unmarshalled\")\n+}\n+\n// FUSEWriteOut is the payload of the reply sent by the daemon to the kernel\n// for a FUSE_WRITE request.\n//\n@@ -543,6 +620,32 @@ type FUSECreateMeta struct {\n_ uint32\n}\n+// FUSERenameIn sent by the kernel for FUSE_RENAME\n+//\n+// +marshal dynamic\n+type FUSERenameIn struct {\n+ Newdir primitive.Uint64\n+ Oldname CString\n+ Newname CString\n+}\n+\n+// MarshalBytes implements marshal.Marshallable.MarshalBytes.\n+func (r *FUSERenameIn) MarshalBytes(dst []byte) []byte {\n+ dst = r.Newdir.MarshalBytes(dst)\n+ dst = r.Oldname.MarshalBytes(dst)\n+ return r.Newname.MarshalBytes(dst)\n+}\n+\n+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.\n+func (r *FUSERenameIn) UnmarshalBytes(buf []byte) []byte {\n+ panic(\"Unimplemented, FUSERmDirIn is never unmarshalled\")\n+}\n+\n+// SizeBytes implements marshal.Marshallable.SizeBytes.\n+func (r *FUSERenameIn) SizeBytes() int {\n+ return r.Newdir.SizeBytes() + r.Oldname.SizeBytes() + r.Newname.SizeBytes()\n+}\n+\n// FUSECreateIn contains all the arguments sent by the kernel to the daemon, to\n// atomically create and open a new regular file.\n//\n@@ -929,3 +1032,16 @@ func (r *FUSEUnlinkIn) UnmarshalBytes(buf []byte) []byte {\nfunc (r *FUSEUnlinkIn) SizeBytes() int {\nreturn r.Name.SizeBytes()\n}\n+\n+// FUSEFsyncIn is the request sent by the kernel to the daemon\n+// when trying to fsync a file.\n+//\n+// +marshal\n+type FUSEFsyncIn struct {\n+ Fh uint64\n+\n+ FsyncFlags uint32\n+\n+ // padding\n+ _ uint32\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/BUILD",
"new_path": "pkg/sentry/fsimpl/fuse/BUILD",
"diff": "@@ -51,6 +51,7 @@ go_library(\n\"//pkg/hostarch\",\n\"//pkg/log\",\n\"//pkg/marshal\",\n+ \"//pkg/marshal/primitive\",\n\"//pkg/refs\",\n\"//pkg/refsvfs2\",\n\"//pkg/safemem\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/dev.go",
"new_path": "pkg/sentry/fsimpl/fuse/dev.go",
"diff": "@@ -168,7 +168,7 @@ func (fd *DeviceFD) readLocked(ctx context.Context, dst usermem.IOSequence, opts\nfor !fd.queue.Empty() {\nreq = fd.queue.Front()\n- if int64(req.hdr.Len)+int64(len(req.payload)) <= dst.NumBytes() {\n+ if int64(req.hdr.Len) <= dst.NumBytes() {\nbreak\n}\n@@ -207,17 +207,6 @@ func (fd *DeviceFD) readLocked(ctx context.Context, dst usermem.IOSequence, opts\nreturn 0, linuxerr.EIO\n}\n- if req.hdr.Opcode == linux.FUSE_WRITE {\n- written, err := dst.DropFirst(n).CopyOut(ctx, req.payload)\n- if err != nil {\n- return 0, err\n- }\n- if written != len(req.payload) {\n- return 0, linuxerr.EIO\n- }\n- n += int(written)\n- }\n-\n// Fully done with this req, remove it from the queue.\nfd.queue.Remove(req)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/file.go",
"new_path": "pkg/sentry/fsimpl/fuse/file.go",
"diff": "@@ -17,6 +17,7 @@ package fuse\nimport (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n+ \"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n@@ -29,7 +30,7 @@ type fileDescription struct {\nvfsfd vfs.FileDescription\nvfs.FileDescriptionDefaultImpl\nvfs.DentryMetadataFileDescriptionImpl\n- vfs.NoLockFD\n+ vfs.LockFD\n// the file handle used in userspace.\nFh uint64\n@@ -127,3 +128,26 @@ func (fd *fileDescription) SetStat(ctx context.Context, opts vfs.SetStatOptions)\ncreds := auth.CredentialsFromContext(ctx)\nreturn fd.inode().setAttr(ctx, fs, creds, opts, true, fd.Fh)\n}\n+\n+// Sync implements vfs.FileDescriptionImpl.Sync.\n+func (fd *fileDescription) Sync(ctx context.Context) error {\n+ if fd.inode().Mode().IsDir() {\n+ return linuxerr.EPERM\n+ }\n+ conn := fd.inode().fs.conn\n+ // no need to proceed if FUSE server doesn't implement Open.\n+ if conn.noOpen {\n+ return linuxerr.EINVAL\n+ }\n+ kernelTask := kernel.TaskFromContext(ctx)\n+\n+ in := linux.FUSEFsyncIn{\n+ Fh: fd.Fh,\n+ FsyncFlags: fd.statusFlags(),\n+ }\n+ // Ignoring errors and FUSE server reply is analogous to Linux's behavior.\n+ req := conn.NewRequest(auth.CredentialsFromContext(ctx), uint32(kernelTask.ThreadID()), fd.inode().nodeID, linux.FUSE_FSYNC, &in)\n+ // The reply will be ignored since no callback is defined in asyncCallBack().\n+ conn.CallAsync(kernelTask, req)\n+ return nil\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/fusefs.go",
"new_path": "pkg/sentry/fsimpl/fuse/fusefs.go",
"diff": "@@ -26,6 +26,7 @@ import (\n\"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n\"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/marshal\"\n+ \"gvisor.dev/gvisor/pkg/marshal/primitive\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n@@ -299,6 +300,15 @@ func (fs *filesystem) MountOptions() string {\nreturn fs.opts.mopts\n}\n+// Fh data returned by newEntry\n+type NewFhData struct {\n+ // file handler\n+ fh uint64\n+\n+ // Flags of the file.\n+ flags uint32\n+}\n+\n// inode implements kernfs.Inode.\n//\n// +stateify savable\n@@ -334,6 +344,10 @@ type inode struct {\n// link is result of following a symbolic link.\nlink string\n+\n+ // if newEntry got a new Fh from server it saves it here, until returned by Open\n+ isNewFh bool\n+ newFhData NewFhData\n}\nfunc (fs *filesystem) newRoot(ctx context.Context, creds *auth.Credentials, mode linux.FileMode) *kernfs.Dentry {\n@@ -410,11 +424,17 @@ func (i *inode) Open(ctx context.Context, rp *vfs.ResolvingPath, d *kernfs.Dentr\nfd = &(regularFD.fileDescription)\nfdImpl = regularFD\n}\n+ fd.LockFD.Init(&i.locks)\n// FOPEN_KEEP_CACHE is the defualt flag for noOpen.\nfd.OpenFlag = linux.FOPEN_KEEP_CACHE\n// Only send open request when FUSE server support open or is opening a directory.\n- if !i.fs.conn.noOpen || isDir {\n+ if i.isNewFh {\n+ // use Fh from NewEntry\n+ fd.OpenFlag = i.newFhData.flags\n+ fd.Fh = i.newFhData.fh\n+ i.isNewFh = false\n+ } else if !i.fs.conn.noOpen || isDir {\nkernelTask := kernel.TaskFromContext(ctx)\nif kernelTask == nil {\nlog.Warningf(\"fusefs.Inode.Open: couldn't get kernel task from context\")\n@@ -452,13 +472,12 @@ func (i *inode) Open(ctx context.Context, rp *vfs.ResolvingPath, d *kernfs.Dentr\n// Process the reply.\nfd.OpenFlag = out.OpenFlag\n- if isDir {\n- fd.OpenFlag &= ^uint32(linux.FOPEN_DIRECT_IO)\n- }\n-\nfd.Fh = out.Fh\n}\n}\n+ if isDir {\n+ fd.OpenFlag &= ^uint32(linux.FOPEN_DIRECT_IO)\n+ }\n// TODO(gvisor.dev/issue/3234): invalidate mmap after implemented it for FUSE Inode\nfd.DirectIO = fd.OpenFlag&linux.FOPEN_DIRECT_IO != 0\n@@ -590,6 +609,28 @@ func (i *inode) RmDir(ctx context.Context, name string, child kernfs.Inode) erro\nreturn res.Error()\n}\n+func (i *inode) Rename(ctx context.Context, oldname, newname string, child, dstDir kernfs.Inode) error {\n+ fusefs := i.fs\n+ task, creds := kernel.TaskFromContext(ctx), auth.CredentialsFromContext(ctx)\n+\n+ dstDirInode, ok := dstDir.(*inode)\n+ if !ok {\n+ return linuxerr.EXDEV\n+ }\n+\n+ in := linux.FUSERenameIn{\n+ Newdir: primitive.Uint64(dstDirInode.nodeID),\n+ Oldname: linux.CString(oldname),\n+ Newname: linux.CString(newname),\n+ }\n+ req := fusefs.conn.NewRequest(creds, uint32(task.ThreadID()), i.nodeID, linux.FUSE_RENAME, &in)\n+ res, err := i.fs.conn.Call(task, req)\n+ if err != nil {\n+ return err\n+ }\n+ return res.Error()\n+}\n+\n// newEntry calls FUSE server for entry creation and allocates corresponding entry according to response.\n// Shared by FUSE_MKNOD, FUSE_MKDIR, FUSE_SYMLINK, FUSE_LINK and FUSE_LOOKUP.\nfunc (i *inode) newEntry(ctx context.Context, name string, fileType linux.FileMode, opcode linux.FUSEOpcode, payload marshal.Marshallable) (kernfs.Inode, error) {\n@@ -606,14 +647,31 @@ func (i *inode) newEntry(ctx context.Context, name string, fileType linux.FileMo\nif err := res.Error(); err != nil {\nreturn nil, err\n}\n- out := linux.FUSEEntryOut{}\n+ out := linux.FUSECreateOut{}\n+ if opcode == linux.FUSE_CREATE {\nif err := res.UnmarshalPayload(&out); err != nil {\nreturn nil, err\n}\n+ } else {\n+ if err := res.UnmarshalPayload(&out.FUSEEntryOut); err != nil {\n+ return nil, err\n+ }\n+ }\nif opcode != linux.FUSE_LOOKUP && ((out.Attr.Mode&linux.S_IFMT)^uint32(fileType) != 0 || out.NodeID == 0 || out.NodeID == linux.FUSE_ROOT_ID) {\nreturn nil, linuxerr.EIO\n}\nchild := i.fs.newInode(ctx, out.NodeID, out.Attr)\n+ if opcode == linux.FUSE_CREATE {\n+ // File handler is returned by fuse server at a time of file create.\n+ // Save it temporary in a created child, so Open could return it when invoked\n+ // to be sure after fh is consumed reset 'isNewFh' flag of inode\n+ childI, ok := child.(*inode)\n+ if ok {\n+ childI.isNewFh = true\n+ childI.newFhData.fh = out.FUSEOpenOut.Fh\n+ childI.newFhData.flags = out.FUSEOpenOut.OpenFlag\n+ }\n+ }\nreturn child, nil\n}\n@@ -798,8 +856,39 @@ func (i *inode) DecRef(ctx context.Context) {\n// StatFS implements kernfs.Inode.StatFS.\nfunc (i *inode) StatFS(ctx context.Context, fs *vfs.Filesystem) (linux.Statfs, error) {\n- // TODO(gvisor.dev/issues/3413): Complete the implementation of statfs.\n- return vfs.GenericStatFS(linux.FUSE_SUPER_MAGIC), nil\n+ task := kernel.TaskFromContext(ctx)\n+ if task == nil {\n+ log.Warningf(\"couldn't get kernel task from context\")\n+ return linux.Statfs{}, linuxerr.EINVAL\n+ }\n+\n+ req := i.fs.conn.NewRequest(auth.CredentialsFromContext(ctx), uint32(task.ThreadID()), i.nodeID,\n+ linux.FUSE_STATFS, &linux.FUSEEmptyIn{},\n+ )\n+ res, err := i.fs.conn.Call(task, req)\n+ if err != nil {\n+ return linux.Statfs{}, err\n+ }\n+ if err := res.Error(); err != nil {\n+ return linux.Statfs{}, err\n+ }\n+\n+ var out linux.FUSEStatfsOut\n+ if err := res.UnmarshalPayload(&out); err != nil {\n+ return linux.Statfs{}, err\n+ }\n+\n+ return linux.Statfs{\n+ Type: linux.FUSE_SUPER_MAGIC,\n+ Blocks: uint64(out.Blocks),\n+ BlocksFree: out.BlocksFree,\n+ BlocksAvailable: out.BlocksAvailable,\n+ Files: out.Files,\n+ FilesFree: out.FilesFree,\n+ BlockSize: int64(out.BlockSize),\n+ NameLength: uint64(out.NameLength),\n+ FragmentSize: int64(out.FragmentSize),\n+ }, nil\n}\n// fattrMaskFromStats converts vfs.SetStatOptions.Stat.Mask to linux stats mask\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/read_write.go",
"new_path": "pkg/sentry/fsimpl/fuse/read_write.go",
"diff": "@@ -165,7 +165,8 @@ func (fs *filesystem) Write(ctx context.Context, fd *regularFileFD, off uint64,\n}\n// Reuse the same struct for unmarshalling to avoid unnecessary memory allocation.\n- in := linux.FUSEWriteIn{\n+ in := linux.FUSEWritePayloadIn {\n+ Header: linux.FUSEWriteIn{\nFh: fd.Fh,\n// TODO(gvisor.dev/issue/3245): file lock\nLockOwner: 0,\n@@ -173,6 +174,7 @@ func (fs *filesystem) Write(ctx context.Context, fd *regularFileFD, off uint64,\n// TODO(gvisor.dev/issue/3237): |= linux.FUSE_WRITE_CACHE (not added yet)\nWriteFlags: 0,\nFlags: fd.statusFlags(),\n+ },\n}\ninode := fd.inode()\n@@ -197,11 +199,11 @@ func (fs *filesystem) Write(ctx context.Context, fd *regularFileFD, off uint64,\ntoWrite = maxWrite\n}\n- in.Offset = off + uint64(written)\n- in.Size = toWrite\n+ in.Header.Offset = off + uint64(written)\n+ in.Header.Size = toWrite\n+ in.Payload = data[written : written+toWrite]\nreq := fs.conn.NewRequest(auth.CredentialsFromContext(ctx), uint32(t.ThreadID()), inode.nodeID, linux.FUSE_WRITE, &in)\n- req.payload = data[written : written+toWrite]\n// TODO(gvisor.dev/issue/3247): support async write.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/request_response.go",
"new_path": "pkg/sentry/fsimpl/fuse/request_response.go",
"diff": "@@ -95,10 +95,6 @@ type Request struct {\nhdr *linux.FUSEHeaderIn\ndata []byte\n- // payload for this request: extra bytes to write after\n- // the data slice. Used by FUSE_WRITE.\n- payload []byte\n-\n// If this request is async.\nasync bool\n// If we don't care its response.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/vfs/filesystem_type.go",
"new_path": "pkg/sentry/vfs/filesystem_type.go",
"diff": "@@ -17,6 +17,7 @@ package vfs\nimport (\n\"bytes\"\n\"fmt\"\n+ \"strings\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n@@ -102,7 +103,13 @@ func (vfs *VirtualFilesystem) MustRegisterFilesystemType(name string, fsType Fil\nfunc (vfs *VirtualFilesystem) getFilesystemType(name string) *registeredFilesystemType {\nvfs.fsTypesMu.RLock()\ndefer vfs.fsTypesMu.RUnlock()\n- return vfs.fsTypes[name]\n+ fsname := name\n+ // Fetch a meaningful part of name if there is a dot in the name\n+ // and use left part of a string as fname.\n+ if strings.Index(name, \".\") != -1 {\n+ fsname = strings.Split(name, \".\")[0]\n+ }\n+ return vfs.fsTypes[fsname]\n}\n// GenerateProcFilesystems emits the contents of /proc/filesystems for vfs to\n"
},
{
"change_type": "MODIFY",
"old_path": "test/fuse/BUILD",
"new_path": "test/fuse/BUILD",
"diff": "@@ -8,6 +8,10 @@ package(licenses = [\"notice\"])\n# fuse = \"True\",\n# test = \"//test/fuse/linux:stat_test\",\n# )\n+# syscall_test(\n+# fuse = \"True\",\n+# test = \"//test/fuse/linux:statfs_test\",\n+# )\n#\n# syscall_test(\n# fuse = \"True\",\n"
},
{
"change_type": "MODIFY",
"old_path": "test/fuse/linux/BUILD",
"new_path": "test/fuse/linux/BUILD",
"diff": "@@ -20,6 +20,21 @@ cc_binary(\n],\n)\n+cc_binary(\n+ name = \"statfs_test\",\n+ testonly = 1,\n+ srcs = [\"statfs_test.cc\"],\n+ deps = [\n+ gtest,\n+ \":fuse_fd_util\",\n+ \"//test/util:cleanup\",\n+ \"//test/util:fs_util\",\n+ \"//test/util:fuse_util\",\n+ \"//test/util:test_main\",\n+ \"//test/util:test_util\",\n+ ],\n+)\n+\ncc_binary(\nname = \"open_test\",\ntestonly = 1,\n"
},
{
"change_type": "MODIFY",
"old_path": "test/fuse/linux/mount_test.cc",
"new_path": "test/fuse/linux/mount_test.cc",
"diff": "@@ -40,6 +40,19 @@ TEST(FuseMount, Success) {\nASSERT_NO_ERRNO_AND_VALUE(Mount(\"\", dir.path(), \"fuse\", 0, mopts, 0));\n}\n+TEST(FuseMount, SuccessFstype) {\n+ const FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(\"/dev/fuse\", O_WRONLY));\n+ std::string mopts =\n+ absl::StrFormat(\"fd=%d,user_id=%d,group_id=%d,rootmode=0777\", fd.get(),\n+ getuid(), getgid());\n+\n+ const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+\n+ const auto mount =\n+ ASSERT_NO_ERRNO_AND_VALUE(Mount(\"\", dir.path(), \"fuse.testfs\", 0, mopts, 0));\n+}\n+\nTEST(FuseMount, FDNotParsable) {\nint devfd;\nEXPECT_THAT(devfd = open(\"/dev/fuse\", O_RDWR), SyscallSucceeds());\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/fuse/linux/statfs_test.cc",
"diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#include <errno.h>\n+#include <fcntl.h>\n+#include <linux/fuse.h>\n+#include <linux/magic.h>\n+#include <sys/statfs.h>\n+#include <sys/types.h>\n+#include <sys/uio.h>\n+#include <sys/vfs.h>\n+#include <unistd.h>\n+\n+#include <vector>\n+#include <iostream>\n+\n+#include \"gtest/gtest.h\"\n+#include \"test/fuse/linux/fuse_fd_util.h\"\n+#include \"test/util/cleanup.h\"\n+#include \"test/util/fs_util.h\"\n+#include \"test/util/fuse_util.h\"\n+#include \"test/util/test_util.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+\n+namespace {\n+\n+#define FUSE_SUPER_MAGIC 0x65735546\n+\n+class StatfsTest : public FuseFdTest {\n+ public:\n+ void SetUp() override {\n+ FuseFdTest::SetUp();\n+ }\n+\n+ protected:\n+ const mode_t dir_mode_ = S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO;\n+ bool StatsfsAreEqual(struct statfs expected, struct statfs actual) {\n+ return memcmp(&expected, &actual, sizeof(struct statfs)) == 0;\n+ }\n+\n+ const mode_t expected_mode = S_IFREG | S_IRUSR | S_IWUSR;\n+ const uint64_t fh = 23;\n+};\n+\n+TEST_F(StatfsTest, StatfsNormal) {\n+ SetServerInodeLookup(mount_point_.path(), dir_mode_);\n+\n+ struct fuse_out_header out_header = {\n+ .len = sizeof(struct fuse_out_header) + sizeof(struct fuse_statfs_out),\n+ };\n+ struct fuse_statfs_out out_payload = {\n+ .st = fuse_kstatfs {\n+ .blocks = 0x6000,\n+ .bfree = 0x6000,\n+ .bavail = 0x6000,\n+ .bsize = 4096,\n+ .namelen = 0x10000,\n+ },\n+ };\n+ auto iov_out = FuseGenerateIovecs(out_header, out_payload);\n+ SetServerResponse(FUSE_STATFS, iov_out);\n+\n+ // Make syscall.\n+ struct statfs st;\n+ EXPECT_THAT(statfs(mount_point_.path().c_str(), &st), SyscallSucceeds());\n+\n+ // Check filesystem operation result.\n+ struct statfs expected_stat = {\n+ .f_type = FUSE_SUPER_MAGIC,\n+ .f_bsize = out_payload.st.bsize,\n+ .f_blocks = out_payload.st.blocks,\n+ .f_bfree = out_payload.st.bfree,\n+ .f_bavail = out_payload.st.bavail,\n+ .f_namelen = out_payload.st.namelen,\n+ };\n+ EXPECT_TRUE(StatsfsAreEqual(st, expected_stat));\n+\n+ // Check FUSE request.\n+ struct fuse_in_header in_header;\n+ auto iov_in = FuseGenerateIovecs(in_header);\n+\n+ GetServerActualRequest(iov_in);\n+ EXPECT_EQ(in_header.opcode, FUSE_STATFS);\n+}\n+\n+TEST_F(StatfsTest, NotFound) {\n+ struct fuse_out_header out_header = {\n+ .len = sizeof(struct fuse_out_header),\n+ .error = -ENOENT,\n+ };\n+ auto iov_out = FuseGenerateIovecs(out_header);\n+ SetServerResponse(FUSE_STATFS, iov_out);\n+\n+ // Make syscall.\n+ struct statfs statfs_buf;\n+ EXPECT_THAT(statfs(mount_point_.path().c_str(), &statfs_buf),\n+ SyscallFailsWithErrno(ENOENT));\n+\n+ // Check FUSE request.\n+ struct fuse_in_header in_header;\n+ auto iov_in = FuseGenerateIovecs(in_header);\n+\n+ GetServerActualRequest(iov_in);\n+ EXPECT_EQ(in_header.opcode, FUSE_STATFS);\n+}\n+\n+} // namespace\n+\n+} // namespace testing\n+} // namespace gvisor\n"
},
{
"change_type": "MODIFY",
"old_path": "test/fuse/linux/write_test.cc",
"new_path": "test/fuse/linux/write_test.cc",
"diff": "@@ -120,7 +120,7 @@ TEST_F(WriteTest, WriteNormal) {\nEXPECT_EQ(in_payload_write.fh, test_fh_);\nEXPECT_EQ(in_header_write.len,\n- sizeof(in_header_write) + sizeof(in_payload_write));\n+ sizeof(in_header_write) + sizeof(in_payload_write) + n_write);\nEXPECT_EQ(in_header_write.opcode, FUSE_WRITE);\nEXPECT_EQ(in_payload_write.offset, 0);\nEXPECT_EQ(in_payload_write.size, n_write);\n@@ -157,7 +157,7 @@ TEST_F(WriteTest, WriteShort) {\nEXPECT_EQ(in_payload_write.fh, test_fh_);\nEXPECT_EQ(in_header_write.len,\n- sizeof(in_header_write) + sizeof(in_payload_write));\n+ sizeof(in_header_write) + sizeof(in_payload_write) + n_write);\nEXPECT_EQ(in_header_write.opcode, FUSE_WRITE);\nEXPECT_EQ(in_payload_write.offset, 0);\nEXPECT_EQ(in_payload_write.size, n_write);\n@@ -193,7 +193,7 @@ TEST_F(WriteTest, WriteShortZero) {\nEXPECT_EQ(in_payload_write.fh, test_fh_);\nEXPECT_EQ(in_header_write.len,\n- sizeof(in_header_write) + sizeof(in_payload_write));\n+ sizeof(in_header_write) + sizeof(in_payload_write) + n_write);\nEXPECT_EQ(in_header_write.opcode, FUSE_WRITE);\nEXPECT_EQ(in_payload_write.offset, 0);\nEXPECT_EQ(in_payload_write.size, n_write);\n@@ -240,7 +240,7 @@ TEST_F(WriteTest, PWrite) {\nEXPECT_EQ(in_payload_write.fh, test_fh_);\nEXPECT_EQ(in_header_write.len,\n- sizeof(in_header_write) + sizeof(in_payload_write));\n+ sizeof(in_header_write) + sizeof(in_payload_write) + n_write);\nEXPECT_EQ(in_header_write.opcode, FUSE_WRITE);\nEXPECT_EQ(in_payload_write.offset, offset_write);\nEXPECT_EQ(in_payload_write.size, n_write);\n@@ -287,7 +287,7 @@ TEST_F(WriteTestSmallMaxWrite, WriteSmallMaxWrie) {\nEXPECT_EQ(in_payload_write.fh, test_fh_);\nEXPECT_EQ(in_header_write.len,\n- sizeof(in_header_write) + sizeof(in_payload_write));\n+ sizeof(in_header_write) + sizeof(in_payload_write) + size_fragment);\nEXPECT_EQ(in_header_write.opcode, FUSE_WRITE);\nEXPECT_EQ(in_payload_write.offset, i * size_fragment);\nEXPECT_EQ(in_payload_write.size, size_fragment);\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fixes #7086,#6964,#3413,#7001.
Also adds fuse fsync, rename, flock support. |
259,962 | 27.01.2022 17:14:39 | 28,800 | 2083e858ad8de9652b2e5625442899dd1c4cb459 | Add support to provide a sharedmem path to sharedmem. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/queuepair.go",
"new_path": "pkg/tcpip/link/sharedmem/queuepair.go",
"diff": "@@ -26,11 +26,11 @@ import (\n)\nconst (\n- // defaultQueueDataSize is the size of the shared memory data region that\n+ // DefaultQueueDataSize is the size of the shared memory data region that\n// holds the scatter/gather buffers.\n- defaultQueueDataSize = 1 << 20 // 1MiB\n+ DefaultQueueDataSize = 1 << 20 // 1MiB\n- // defaultQueuePipeSize is the size of the pipe that holds the packet descriptors.\n+ // DefaultQueuePipeSize is the size of the pipe that holds the packet descriptors.\n//\n// Assuming each packet data is approximately 1280 bytes (IPv6 Minimum MTU)\n// then we can hold approximately 1024*1024/1280 ~ 819 packets in the data\n@@ -45,17 +45,21 @@ const (\n// descriptors. We could go with a 32 KiB pipe but to give it some slack in\n// how the upper layer may make use of the scatter gather buffers we double\n// this to hold enough descriptors.\n- defaultQueuePipeSize = 64 << 10 // 64KiB\n+ DefaultQueuePipeSize = 64 << 10 // 64KiB\n- // defaultSharedDataSize is the size of the sharedData region used to\n+ // DefaultSharedDataSize is the size of the sharedData region used to\n// enable/disable notifications.\n- defaultSharedDataSize = 4 << 10 // 4KiB\n+ DefaultSharedDataSize = 4 << 10 // 4KiB\n// DefaultBufferSize is the size of each individual buffer that the data\n// region is broken down into to hold packet data. Should be larger than\n// 1500 + 14 (Ethernet header) + 10 (VirtIO header) to fit each packet\n// in a single buffer.\nDefaultBufferSize = 2048\n+\n+ // DefaultTmpDir is the path used to create the memory files if a path\n+ // is not provided.\n+ DefaultTmpDir = \"/dev/shm\"\n)\n// A QueuePair represents a pair of TX/RX queues.\n@@ -67,24 +71,34 @@ type QueuePair struct {\nrxCfg QueueConfig\n}\n+// QueueOptions allows queue specific configuration to be specified when\n+// creating a QueuePair.\n+type QueueOptions struct {\n+ // sharedMemPath is the path to use to create the shared memory backing\n+ // files for the queue.\n+ //\n+ // If unspecified it defaults to \"/dev/shm\".\n+ sharedMemPath string\n+}\n+\n// NewQueuePair creates a shared memory QueuePair.\n-func NewQueuePair() (*QueuePair, error) {\n- txCfg, err := createQueueFDs(queueSizes{\n- dataSize: defaultQueueDataSize,\n- txPipeSize: defaultQueuePipeSize,\n- rxPipeSize: defaultQueuePipeSize,\n- sharedDataSize: defaultSharedDataSize,\n+func NewQueuePair(opts QueueOptions) (*QueuePair, error) {\n+ txCfg, err := createQueueFDs(opts.sharedMemPath, queueSizes{\n+ dataSize: DefaultQueueDataSize,\n+ txPipeSize: DefaultQueuePipeSize,\n+ rxPipeSize: DefaultQueuePipeSize,\n+ sharedDataSize: DefaultSharedDataSize,\n})\nif err != nil {\nreturn nil, fmt.Errorf(\"failed to create tx queue: %s\", err)\n}\n- rxCfg, err := createQueueFDs(queueSizes{\n- dataSize: defaultQueueDataSize,\n- txPipeSize: defaultQueuePipeSize,\n- rxPipeSize: defaultQueuePipeSize,\n- sharedDataSize: defaultSharedDataSize,\n+ rxCfg, err := createQueueFDs(opts.sharedMemPath, queueSizes{\n+ dataSize: DefaultQueueDataSize,\n+ txPipeSize: DefaultQueuePipeSize,\n+ rxPipeSize: DefaultQueuePipeSize,\n+ sharedDataSize: DefaultSharedDataSize,\n})\nif err != nil {\n@@ -121,7 +135,7 @@ type queueSizes struct {\nsharedDataSize int64\n}\n-func createQueueFDs(s queueSizes) (QueueConfig, error) {\n+func createQueueFDs(sharedMemPath string, s queueSizes) (QueueConfig, error) {\nsuccess := false\nvar eventFD eventfd.Eventfd\nvar dataFD, txPipeFD, rxPipeFD, sharedDataFD int\n@@ -141,19 +155,19 @@ func createQueueFDs(s queueSizes) (QueueConfig, error) {\nif err != nil {\nreturn QueueConfig{}, fmt.Errorf(\"eventfd failed: %v\", err)\n}\n- dataFD, err = createFile(s.dataSize, false)\n+ dataFD, err = createFile(sharedMemPath, s.dataSize, false)\nif err != nil {\nreturn QueueConfig{}, fmt.Errorf(\"failed to create dataFD: %s\", err)\n}\n- txPipeFD, err = createFile(s.txPipeSize, true)\n+ txPipeFD, err = createFile(sharedMemPath, s.txPipeSize, true)\nif err != nil {\nreturn QueueConfig{}, fmt.Errorf(\"failed to create txPipeFD: %s\", err)\n}\n- rxPipeFD, err = createFile(s.rxPipeSize, true)\n+ rxPipeFD, err = createFile(sharedMemPath, s.rxPipeSize, true)\nif err != nil {\nreturn QueueConfig{}, fmt.Errorf(\"failed to create rxPipeFD: %s\", err)\n}\n- sharedDataFD, err = createFile(s.sharedDataSize, false)\n+ sharedDataFD, err = createFile(sharedMemPath, s.sharedDataSize, false)\nif err != nil {\nreturn QueueConfig{}, fmt.Errorf(\"failed to create sharedDataFD: %s\", err)\n}\n@@ -167,8 +181,11 @@ func createQueueFDs(s queueSizes) (QueueConfig, error) {\n}, nil\n}\n-func createFile(size int64, initQueue bool) (fd int, err error) {\n- const tmpDir = \"/dev/shm/\"\n+func createFile(sharedMemPath string, size int64, initQueue bool) (fd int, err error) {\n+ var tmpDir = DefaultTmpDir\n+ if sharedMemPath != \"\" {\n+ tmpDir = sharedMemPath\n+ }\nf, err := ioutil.TempFile(tmpDir, \"sharedmem_test\")\nif err != nil {\nreturn -1, fmt.Errorf(\"TempFile failed: %v\", err)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem_server_test.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem_server_test.go",
"diff": "@@ -162,7 +162,7 @@ func newTestContext(t *testing.T) *testContext {\nif err != nil {\nt.Fatalf(\"failed to create peerFDs: %s\", err)\n}\n- q, err := sharedmem.NewQueuePair()\n+ q, err := sharedmem.NewQueuePair(sharedmem.QueueOptions{})\nif err != nil {\nt.Fatalf(\"failed to create sharedmem queue: %s\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem_test.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem_test.go",
"diff": "@@ -104,7 +104,7 @@ func newTestContext(t *testing.T, mtu, bufferSize uint32, addr tcpip.LinkAddress\nt: t,\npacketCh: make(chan struct{}, 1000000),\n}\n- c.txCfg, err = createQueueFDs(queueSizes{\n+ c.txCfg, err = createQueueFDs(\"\" /* sharedMemPath */, queueSizes{\ndataSize: queueDataSize,\ntxPipeSize: queuePipeSize,\nrxPipeSize: queuePipeSize,\n@@ -113,7 +113,7 @@ func newTestContext(t *testing.T, mtu, bufferSize uint32, addr tcpip.LinkAddress\nif err != nil {\nt.Fatalf(\"createQueueFDs for tx failed: %s\", err)\n}\n- c.rxCfg, err = createQueueFDs(queueSizes{\n+ c.rxCfg, err = createQueueFDs(\"\" /* sharedMemPath */, queueSizes{\ndataSize: queueDataSize,\ntxPipeSize: queuePipeSize,\nrxPipeSize: queuePipeSize,\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add support to provide a sharedmem path to sharedmem.
PiperOrigin-RevId: 424744312 |
259,858 | 28.01.2022 14:05:00 | 28,800 | 62665f881df71583fcfb22657f5af0ec8f9790c9 | Remove unnecessary use of _impl pkg rules.
Updates bazelbuild/rules_pkg#263 | [
{
"change_type": "MODIFY",
"old_path": "WORKSPACE",
"new_path": "WORKSPACE",
"diff": "@@ -585,8 +585,11 @@ rbe_autoconfig(name = \"rbe_default\")\nhttp_archive(\nname = \"rules_pkg\",\n- sha256 = \"353b20e8b093d42dd16889c7f918750fb8701c485ac6cceb69a5236500507c27\",\n- url = \"https://github.com/bazelbuild/rules_pkg/releases/download/0.5.0/rules_pkg-0.5.0.tar.gz\",\n+ urls = [\n+ \"https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.6.0/rules_pkg-0.6.0.tar.gz\",\n+ \"https://github.com/bazelbuild/rules_pkg/releases/download/0.6.0/rules_pkg-0.6.0.tar.gz\",\n+ ],\n+ sha256 = \"62eeb544ff1ef41d786e329e1536c1d541bb9bcad27ae984d57f18f314018e66\",\n)\nload(\"@rules_pkg//:deps.bzl\", \"rules_pkg_dependencies\")\n"
},
{
"change_type": "MODIFY",
"old_path": "debian/BUILD",
"new_path": "debian/BUILD",
"diff": "@@ -23,7 +23,6 @@ pkg_tar(\npkg_deb(\nname = \"debian\",\n- out = \"runsc-latest.deb\",\narchitecture = select_arch(\namd64 = \"amd64\",\narm64 = \"arm64\",\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/bazeldefs/pkg.bzl",
"new_path": "tools/bazeldefs/pkg.bzl",
"diff": "\"\"\"Packaging rules.\"\"\"\n-# N.B. We refer to pkg_deb_impl to avoid the macro, which cannot use select.\n-load(\"@rules_pkg//:pkg.bzl\", _pkg_deb = \"pkg_deb_impl\", _pkg_tar = \"pkg_tar\")\n+load(\"@rules_pkg//pkg:pkg.bzl\", _pkg_deb = \"pkg_deb\", _pkg_tar = \"pkg_tar\")\npkg_deb = _pkg_deb\npkg_tar = _pkg_tar\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove unnecessary use of _impl pkg rules.
Updates bazelbuild/rules_pkg#263
PiperOrigin-RevId: 424953384 |
259,907 | 28.01.2022 15:34:38 | 28,800 | e29fd32d0a4ef4b8d1386e8d3b12b79e10749fb4 | Get rid of unnecessary lisafs.Inode allocations.
lisafs.Inode is a heavy struct with linux.Statx in it. However the cost of
copying it on return is lower than that of an allocation.
Additionally unclutter the filesystem.doCreateAt function signature. It already
is quite complex. lisafs had added more complexity earlier. Revert that. | [
{
"change_type": "MODIFY",
"old_path": "pkg/lisafs/client.go",
"new_path": "pkg/lisafs/client.go",
"diff": "@@ -72,7 +72,7 @@ type Client struct {\n// the server and creates channels for fast IPC. NewClient takes ownership over\n// the passed socket. On success, it returns the initialized client along with\n// the root Inode.\n-func NewClient(sock *unet.Socket) (*Client, *Inode, error) {\n+func NewClient(sock *unet.Socket) (*Client, Inode, error) {\nmaxChans := maxChannels()\nc := &Client{\nsockComm: newSockComm(sock),\n@@ -99,7 +99,7 @@ func NewClient(sock *unet.Socket) (*Client, *Inode, error) {\nc.supported[Mount] = true\nvar mountResp MountResp\nif err := c.SndRcvMessage(Mount, 0, NoopMarshal, mountResp.CheckedUnmarshal, nil); err != nil {\n- return nil, nil, err\n+ return nil, Inode{}, err\n}\n// Initialize client.\n@@ -142,12 +142,12 @@ func NewClient(sock *unet.Socket) (*Client, *Inode, error) {\nfor _, channelErr := range channelErrs {\n// Return the first non-nil channel creation error.\nif channelErr != nil {\n- return nil, nil, channelErr\n+ return nil, Inode{}, channelErr\n}\n}\ncu.Release()\n- return c, &mountResp.Root, nil\n+ return c, mountResp.Root, nil\n}\nfunc (c *Client) watchdog() {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/lisafs/client_file.go",
"new_path": "pkg/lisafs/client_file.go",
"diff": "@@ -212,7 +212,7 @@ func (f *ClientFD) Write(ctx context.Context, src []byte, offset uint64) (uint64\n}\n// MkdirAt makes the MkdirAt RPC.\n-func (f *ClientFD) MkdirAt(ctx context.Context, name string, mode linux.FileMode, uid UID, gid GID) (*Inode, error) {\n+func (f *ClientFD) MkdirAt(ctx context.Context, name string, mode linux.FileMode, uid UID, gid GID) (Inode, error) {\nvar req MkdirAtReq\nreq.DirFD = f.fd\nreq.Name = SizedString(name)\n@@ -224,11 +224,11 @@ func (f *ClientFD) MkdirAt(ctx context.Context, name string, mode linux.FileMode\nctx.UninterruptibleSleepStart(false)\nerr := f.client.SndRcvMessage(MkdirAt, uint32(req.SizeBytes()), req.MarshalBytes, resp.CheckedUnmarshal, nil)\nctx.UninterruptibleSleepFinish(false)\n- return &resp.ChildDir, err\n+ return resp.ChildDir, err\n}\n// SymlinkAt makes the SymlinkAt RPC.\n-func (f *ClientFD) SymlinkAt(ctx context.Context, name, target string, uid UID, gid GID) (*Inode, error) {\n+func (f *ClientFD) SymlinkAt(ctx context.Context, name, target string, uid UID, gid GID) (Inode, error) {\nreq := SymlinkAtReq{\nDirFD: f.fd,\nName: SizedString(name),\n@@ -241,11 +241,11 @@ func (f *ClientFD) SymlinkAt(ctx context.Context, name, target string, uid UID,\nctx.UninterruptibleSleepStart(false)\nerr := f.client.SndRcvMessage(SymlinkAt, uint32(req.SizeBytes()), req.MarshalBytes, resp.CheckedUnmarshal, nil)\nctx.UninterruptibleSleepFinish(false)\n- return &resp.Symlink, err\n+ return resp.Symlink, err\n}\n// LinkAt makes the LinkAt RPC.\n-func (f *ClientFD) LinkAt(ctx context.Context, targetFD FDID, name string) (*Inode, error) {\n+func (f *ClientFD) LinkAt(ctx context.Context, targetFD FDID, name string) (Inode, error) {\nreq := LinkAtReq{\nDirFD: f.fd,\nTarget: targetFD,\n@@ -256,11 +256,11 @@ func (f *ClientFD) LinkAt(ctx context.Context, targetFD FDID, name string) (*Ino\nctx.UninterruptibleSleepStart(false)\nerr := f.client.SndRcvMessage(LinkAt, uint32(req.SizeBytes()), req.MarshalBytes, resp.CheckedUnmarshal, nil)\nctx.UninterruptibleSleepFinish(false)\n- return &resp.Link, err\n+ return resp.Link, err\n}\n// MknodAt makes the MknodAt RPC.\n-func (f *ClientFD) MknodAt(ctx context.Context, name string, mode linux.FileMode, uid UID, gid GID, minor, major uint32) (*Inode, error) {\n+func (f *ClientFD) MknodAt(ctx context.Context, name string, mode linux.FileMode, uid UID, gid GID, minor, major uint32) (Inode, error) {\nvar req MknodAtReq\nreq.DirFD = f.fd\nreq.Name = SizedString(name)\n@@ -274,7 +274,7 @@ func (f *ClientFD) MknodAt(ctx context.Context, name string, mode linux.FileMode\nctx.UninterruptibleSleepStart(false)\nerr := f.client.SndRcvMessage(MknodAt, uint32(req.SizeBytes()), req.MarshalBytes, resp.CheckedUnmarshal, nil)\nctx.UninterruptibleSleepFinish(false)\n- return &resp.Child, err\n+ return resp.Child, err\n}\n// SetStat makes the SetStat RPC.\n@@ -318,7 +318,7 @@ func (f *ClientFD) WalkMultiple(ctx context.Context, names []string) (WalkStatus\n}\n// Walk makes the Walk RPC with just one path component to walk.\n-func (f *ClientFD) Walk(ctx context.Context, name string) (*Inode, error) {\n+func (f *ClientFD) Walk(ctx context.Context, name string) (Inode, error) {\nreq := WalkReq{\nDirFD: f.fd,\nPath: []string{name},\n@@ -330,15 +330,15 @@ func (f *ClientFD) Walk(ctx context.Context, name string) (*Inode, error) {\nerr := f.client.SndRcvMessage(Walk, uint32(req.SizeBytes()), req.MarshalBytes, resp.CheckedUnmarshal, nil)\nctx.UninterruptibleSleepFinish(false)\nif err != nil {\n- return nil, err\n+ return Inode{}, err\n}\nswitch resp.Status {\ncase WalkComponentDoesNotExist:\n- return nil, unix.ENOENT\n+ return Inode{}, unix.ENOENT\ncase WalkComponentSymlink:\n// f is not a directory which can be walked on.\n- return nil, unix.ENOTDIR\n+ return Inode{}, unix.ENOTDIR\n}\nif n := len(resp.Inodes); n > 1 {\n@@ -346,12 +346,12 @@ func (f *ClientFD) Walk(ctx context.Context, name string) (*Inode, error) {\nf.client.CloseFDBatched(ctx, resp.Inodes[i].ControlFD)\n}\nlog.Warningf(\"requested to walk one component, but got %d results\", n)\n- return nil, unix.EIO\n+ return Inode{}, unix.EIO\n} else if n == 0 {\nlog.Warningf(\"walk has success status but no results returned\")\n- return nil, unix.ENOENT\n+ return Inode{}, unix.ENOENT\n}\n- return &inode[0], err\n+ return inode[0], err\n}\n// WalkStat makes the WalkStat RPC with multiple path components to walk.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/directory.go",
"new_path": "pkg/sentry/fsimpl/gofer/directory.go",
"diff": "@@ -22,6 +22,7 @@ import (\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n\"gvisor.dev/gvisor/pkg/hostarch\"\n+ \"gvisor.dev/gvisor/pkg/lisafs\"\n\"gvisor.dev/gvisor/pkg/p9\"\n\"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n@@ -35,6 +36,25 @@ func (d *dentry) isDir() bool {\nreturn d.fileType() == linux.S_IFDIR\n}\n+// Preconditions:\n+// - filesystem.renameMu must be locked.\n+// - d.dirMu must be locked.\n+// - d.isDir().\n+// - child must be a newly-created dentry that has never had a parent.\n+func (d *dentry) insertCreatedChildLocked(ctx context.Context, childIno *lisafs.Inode, childName string, updateChild func(child *dentry), ds **[]*dentry) error {\n+ child, err := d.fs.newDentryLisa(ctx, childIno)\n+ if err != nil {\n+ d.fs.clientLisa.CloseFDBatched(ctx, childIno.ControlFD)\n+ return err\n+ }\n+ d.cacheNewChildLocked(child, childName)\n+ appendNewChildDentry(ds, d, child)\n+ if updateChild != nil {\n+ updateChild(child)\n+ }\n+ return nil\n+}\n+\n// Preconditions:\n// * filesystem.renameMu must be locked.\n// * d.dirMu must be locked.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"new_path": "pkg/sentry/fsimpl/gofer/filesystem.go",
"diff": "@@ -388,7 +388,7 @@ func (fs *filesystem) getChildLocked(ctx context.Context, parent *dentry, name s\nreturn nil, err\n}\n// Create a new dentry representing the file.\n- child, err = fs.newDentryLisa(ctx, childInode)\n+ child, err = fs.newDentryLisa(ctx, &childInode)\nif err != nil {\nfs.clientLisa.CloseFDBatched(ctx, childInode.ControlFD)\nreturn nil, err\n@@ -482,7 +482,7 @@ func (fs *filesystem) resolveLocked(ctx context.Context, rp *vfs.ResolvingPath,\n// Preconditions:\n// * !rp.Done().\n// * For the final path component in rp, !rp.ShouldFollowSymlink().\n-func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir bool, createInRemoteDir func(parent *dentry, name string, ds **[]*dentry) (*lisafs.Inode, error), createInSyntheticDir func(parent *dentry, name string) error, updateChild func(child *dentry)) error {\n+func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir bool, createInRemoteDir func(parent *dentry, name string, ds **[]*dentry) error, createInSyntheticDir func(parent *dentry, name string) error) error {\nvar ds *[]*dentry\nfs.renameMu.RLock()\ndefer fs.renameMuRUnlockAndCheckCaching(ctx, &ds)\n@@ -569,26 +569,9 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir\n// No cached dentry exists; however, in InteropModeShared there might still be\n// an existing file at name. Just attempt the file creation RPC anyways. If a\n// file does exist, the RPC will fail with EEXIST like we would have.\n- lisaInode, err := createInRemoteDir(parent, name, &ds)\n- if err != nil {\n- return err\n- }\n- // lisafs may aggresively cache newly created inodes. This has helped reduce\n- // Walk RPCs in practice.\n- if lisaInode != nil {\n- child, err := fs.newDentryLisa(ctx, lisaInode)\n- if err != nil {\n- fs.clientLisa.CloseFDBatched(ctx, lisaInode.ControlFD)\n+ if err := createInRemoteDir(parent, name, &ds); err != nil {\nreturn err\n}\n- parent.cacheNewChildLocked(child, name)\n- appendNewChildDentry(&ds, parent, child)\n-\n- // lisafs may update dentry properties upon successful creation.\n- if updateChild != nil {\n- updateChild(child)\n- }\n- }\nif fs.opts.interop != InteropModeShared {\nif child, ok := parent.children[name]; ok && child == nil {\n// Delete the now-stale negative dentry.\n@@ -833,31 +816,35 @@ func (fs *filesystem) GetParentDentryAt(ctx context.Context, rp *vfs.ResolvingPa\n// LinkAt implements vfs.FilesystemImpl.LinkAt.\nfunc (fs *filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.VirtualDentry) error {\n- err := fs.doCreateAt(ctx, rp, false /* dir */, func(parent *dentry, childName string, ds **[]*dentry) (*lisafs.Inode, error) {\n+ err := fs.doCreateAt(ctx, rp, false /* dir */, func(parent *dentry, childName string, ds **[]*dentry) error {\nif rp.Mount() != vd.Mount() {\n- return nil, linuxerr.EXDEV\n+ return linuxerr.EXDEV\n}\nd := vd.Dentry().Impl().(*dentry)\nif d.isDir() {\n- return nil, linuxerr.EPERM\n+ return linuxerr.EPERM\n}\ngid := auth.KGID(atomic.LoadUint32(&d.gid))\nuid := auth.KUID(atomic.LoadUint32(&d.uid))\nmode := linux.FileMode(atomic.LoadUint32(&d.mode))\nif err := vfs.MayLink(rp.Credentials(), mode, uid, gid); err != nil {\n- return nil, err\n+ return err\n}\nif d.nlink == 0 {\n- return nil, linuxerr.ENOENT\n+ return linuxerr.ENOENT\n}\nif d.nlink == math.MaxUint32 {\n- return nil, linuxerr.EMLINK\n+ return linuxerr.EMLINK\n}\nif fs.opts.lisaEnabled {\n- return parent.controlFDLisa.LinkAt(ctx, d.controlFDLisa.ID(), childName)\n+ linkInode, err := parent.controlFDLisa.LinkAt(ctx, d.controlFDLisa.ID(), childName)\n+ if err != nil {\n+ return err\n+ }\n+ return parent.insertCreatedChildLocked(ctx, &linkInode, childName, nil, ds)\n}\n- return nil, parent.file.link(ctx, d.file, childName)\n- }, nil, nil)\n+ return parent.file.link(ctx, d.file, childName)\n+ }, nil)\nif err == nil {\n// Success!\n@@ -869,7 +856,7 @@ func (fs *filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.\n// MkdirAt implements vfs.FilesystemImpl.MkdirAt.\nfunc (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MkdirOptions) error {\ncreds := rp.Credentials()\n- return fs.doCreateAt(ctx, rp, true /* dir */, func(parent *dentry, name string, ds **[]*dentry) (*lisafs.Inode, error) {\n+ return fs.doCreateAt(ctx, rp, true /* dir */, func(parent *dentry, name string, ds **[]*dentry) error {\n// If the parent is a setgid directory, use the parent's GID\n// rather than the caller's and enable setgid.\nkgid := creds.EffectiveKGID\n@@ -878,12 +865,15 @@ func (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts v\nkgid = auth.KGID(atomic.LoadUint32(&parent.gid))\nmode |= linux.S_ISGID\n}\n- var (\n- childDirInode *lisafs.Inode\n- err error\n- )\n+ var err error\nif fs.opts.lisaEnabled {\n+ var childDirInode lisafs.Inode\nchildDirInode, err = parent.controlFDLisa.MkdirAt(ctx, name, mode, lisafs.UID(creds.EffectiveKUID), lisafs.GID(kgid))\n+ if err == nil {\n+ if err = parent.insertCreatedChildLocked(ctx, &childDirInode, name, nil, ds); err != nil {\n+ return err\n+ }\n+ }\n} else {\n_, err = parent.file.mkdir(ctx, name, p9.FileMode(mode), (p9.UID)(creds.EffectiveKUID), p9.GID(kgid))\n}\n@@ -891,11 +881,11 @@ func (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts v\nif fs.opts.interop != InteropModeShared {\nparent.incLinks()\n}\n- return childDirInode, nil\n+ return nil\n}\nif !opts.ForSyntheticMountpoint || linuxerr.Equals(linuxerr.EEXIST, err) {\n- return nil, err\n+ return err\n}\nctx.Infof(\"Failed to create remote directory %q: %v; falling back to synthetic directory\", name, err)\nparent.createSyntheticChildLocked(&createSyntheticOpts{\n@@ -908,7 +898,7 @@ func (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts v\nif fs.opts.interop != InteropModeShared {\nparent.incLinks()\n}\n- return nil, nil\n+ return nil\n}, func(parent *dentry, name string) error {\nif !opts.ForSyntheticMountpoint {\n// Can't create non-synthetic files in synthetic directories.\n@@ -922,26 +912,27 @@ func (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts v\n})\nparent.incLinks()\nreturn nil\n- }, nil)\n+ })\n}\n// MknodAt implements vfs.FilesystemImpl.MknodAt.\nfunc (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MknodOptions) error {\n- return fs.doCreateAt(ctx, rp, false /* dir */, func(parent *dentry, name string, ds **[]*dentry) (*lisafs.Inode, error) {\n+ return fs.doCreateAt(ctx, rp, false /* dir */, func(parent *dentry, name string, ds **[]*dentry) error {\ncreds := rp.Credentials()\n- var (\n- childInode *lisafs.Inode\n- err error\n- )\n+ var err error\nif fs.opts.lisaEnabled {\n+ var childInode lisafs.Inode\nchildInode, err = parent.controlFDLisa.MknodAt(ctx, name, opts.Mode, lisafs.UID(creds.EffectiveKUID), lisafs.GID(creds.EffectiveKGID), opts.DevMinor, opts.DevMajor)\n+ if err == nil {\n+ return parent.insertCreatedChildLocked(ctx, &childInode, name, nil, ds)\n+ }\n} else {\n_, err = parent.file.mknod(ctx, name, (p9.FileMode)(opts.Mode), opts.DevMajor, opts.DevMinor, (p9.UID)(creds.EffectiveKUID), (p9.GID)(creds.EffectiveKGID))\n}\nif err == nil {\n- return childInode, nil\n+ return nil\n} else if !linuxerr.Equals(linuxerr.EPERM, err) {\n- return nil, err\n+ return err\n}\n// EPERM means that gofer does not allow creating a socket or pipe. Fallback\n@@ -952,10 +943,10 @@ func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts v\nswitch {\ncase err == nil:\n// Step succeeded, another file exists.\n- return nil, linuxerr.EEXIST\n+ return linuxerr.EEXIST\ncase !linuxerr.Equals(linuxerr.ENOENT, err):\n// Unexpected error.\n- return nil, err\n+ return err\n}\nswitch opts.Mode.FileType() {\n@@ -968,7 +959,7 @@ func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts v\nendpoint: opts.Endpoint,\n})\n*ds = appendDentry(*ds, parent)\n- return nil, nil\n+ return nil\ncase linux.S_IFIFO:\nparent.createSyntheticChildLocked(&createSyntheticOpts{\nname: name,\n@@ -978,11 +969,11 @@ func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts v\npipe: pipe.NewVFSPipe(true /* isNamed */, pipe.DefaultPipeSize),\n})\n*ds = appendDentry(*ds, parent)\n- return nil, nil\n+ return nil\n}\n// Retain error from gofer if synthetic file cannot be created internally.\n- return nil, linuxerr.EPERM\n- }, nil, nil)\n+ return linuxerr.EPERM\n+ }, nil)\n}\n// OpenAt implements vfs.FilesystemImpl.OpenAt.\n@@ -1740,21 +1731,25 @@ func (fs *filesystem) StatFSAt(ctx context.Context, rp *vfs.ResolvingPath) (linu\n// SymlinkAt implements vfs.FilesystemImpl.SymlinkAt.\nfunc (fs *filesystem) SymlinkAt(ctx context.Context, rp *vfs.ResolvingPath, target string) error {\n- return fs.doCreateAt(ctx, rp, false /* dir */, func(parent *dentry, name string, ds **[]*dentry) (*lisafs.Inode, error) {\n+ return fs.doCreateAt(ctx, rp, false /* dir */, func(parent *dentry, name string, ds **[]*dentry) error {\ncreds := rp.Credentials()\nif fs.opts.lisaEnabled {\n- return parent.controlFDLisa.SymlinkAt(ctx, name, target, lisafs.UID(creds.EffectiveKUID), lisafs.GID(creds.EffectiveKGID))\n+ symlinkInode, err := parent.controlFDLisa.SymlinkAt(ctx, name, target, lisafs.UID(creds.EffectiveKUID), lisafs.GID(creds.EffectiveKGID))\n+ if err != nil {\n+ return err\n}\n- _, err := parent.file.symlink(ctx, target, name, (p9.UID)(creds.EffectiveKUID), (p9.GID)(creds.EffectiveKGID))\n- return nil, err\n- }, nil, func(child *dentry) {\n+ return parent.insertCreatedChildLocked(ctx, &symlinkInode, name, func(child *dentry) {\nif fs.opts.interop != InteropModeShared {\n// lisafs caches the symlink target on creation. In practice, this\n// helps avoid a lot of ReadLink RPCs.\nchild.haveTarget = true\nchild.target = target\n}\n- })\n+ }, ds)\n+ }\n+ _, err := parent.file.symlink(ctx, target, name, (p9.UID)(creds.EffectiveKUID), (p9.GID)(creds.EffectiveKGID))\n+ return err\n+ }, nil)\n}\n// UnlinkAt implements vfs.FilesystemImpl.UnlinkAt.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"new_path": "pkg/sentry/fsimpl/gofer/gofer.go",
"diff": "@@ -502,12 +502,12 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\nfunc (fs *filesystem) initClientAndRoot(ctx context.Context) error {\nvar err error\nif fs.opts.lisaEnabled {\n- var rootInode *lisafs.Inode\n+ var rootInode lisafs.Inode\nrootInode, err = fs.initClientLisa(ctx)\nif err != nil {\nreturn err\n}\n- fs.root, err = fs.newDentryLisa(ctx, rootInode)\n+ fs.root, err = fs.newDentryLisa(ctx, &rootInode)\nif err != nil {\nfs.clientLisa.CloseFDBatched(ctx, rootInode.ControlFD)\n}\n@@ -524,18 +524,18 @@ func (fs *filesystem) initClientAndRoot(ctx context.Context) error {\nreturn err\n}\n-func (fs *filesystem) initClientLisa(ctx context.Context) (*lisafs.Inode, error) {\n+func (fs *filesystem) initClientLisa(ctx context.Context) (lisafs.Inode, error) {\nsock, err := unet.NewSocket(fs.opts.fd)\nif err != nil {\n- return nil, err\n+ return lisafs.Inode{}, err\n}\n- var rootInode *lisafs.Inode\n+ var rootInode lisafs.Inode\nctx.UninterruptibleSleepStart(false)\nfs.clientLisa, rootInode, err = lisafs.NewClient(sock)\nctx.UninterruptibleSleepFinish(false)\nif err != nil {\n- return nil, err\n+ return lisafs.Inode{}, err\n}\nif fs.opts.aname == \"/\" {\nreturn rootInode, nil\n@@ -546,7 +546,7 @@ func (fs *filesystem) initClientLisa(ctx context.Context) (*lisafs.Inode, error)\nstatus, inodes, err := rootFD.WalkMultiple(ctx, strings.Split(fs.opts.aname, \"/\"))\nrootFD.CloseBatched(ctx)\nif err != nil {\n- return nil, err\n+ return lisafs.Inode{}, err\n}\n// Close all intermediate FDs to the attach point.\n@@ -558,12 +558,12 @@ func (fs *filesystem) initClientLisa(ctx context.Context) (*lisafs.Inode, error)\nswitch status {\ncase lisafs.WalkSuccess:\n- return &inodes[numInodes-1], nil\n+ return inodes[numInodes-1], nil\ndefault:\nlast := fs.clientLisa.NewFD(inodes[numInodes-1].ControlFD)\nlast.CloseBatched(ctx)\nlog.Warningf(\"initClientLisa failed because walk to attach point %q failed: lisafs.WalkStatus = %v\", fs.opts.aname, status)\n- return nil, unix.ENOENT\n+ return lisafs.Inode{}, unix.ENOENT\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/gofer/save_restore.go",
"new_path": "pkg/sentry/fsimpl/gofer/save_restore.go",
"diff": "@@ -195,7 +195,7 @@ func (fs *filesystem) CompleteRestore(ctx context.Context, opts vfs.CompleteRest\nif err != nil {\nreturn err\n}\n- if err := fs.root.restoreFileLisa(ctx, rootInode, &opts); err != nil {\n+ if err := fs.root.restoreFileLisa(ctx, &rootInode, &opts); err != nil {\nreturn err\n}\n} else {\n@@ -381,7 +381,7 @@ func (d *dentry) restoreRecursive(ctx context.Context, opts *vfs.CompleteRestore\nif err != nil {\nreturn err\n}\n- if err := d.restoreFileLisa(ctx, inode, opts); err != nil {\n+ if err := d.restoreFileLisa(ctx, &inode, opts); err != nil {\nreturn err\n}\n} else {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Get rid of unnecessary lisafs.Inode allocations.
lisafs.Inode is a heavy struct with linux.Statx in it. However the cost of
copying it on return is lower than that of an allocation.
Additionally unclutter the filesystem.doCreateAt function signature. It already
is quite complex. lisafs had added more complexity earlier. Revert that.
PiperOrigin-RevId: 424972317 |
259,962 | 28.01.2022 17:10:32 | 28,800 | 44f0f2dc400aab0cfa9881604bbe297f5249f611 | Exclude tcp_noracedetector_test.go from gotsan runs. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/test/e2e/BUILD",
"new_path": "pkg/tcpip/transport/tcp/test/e2e/BUILD",
"diff": "@@ -164,6 +164,9 @@ go_test(\nname = \"tcp_noracedetector_test\",\nsize = \"small\",\nsrcs = [\"tcp_noracedetector_test.go\"],\n+ # These tests can be extremely slow/flaky when run under gotsan,\n+ # so exclude them from gotsan runs.\n+ tags = [\"nogotsan\"],\ndeps = [\n\":e2e\",\n\"//pkg/refs\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/test/e2e/tcp_noracedetector_test.go",
"new_path": "pkg/tcpip/transport/tcp/test/e2e/tcp_noracedetector_test.go",
"diff": "// These tests are flaky when run under the go race detector due to some\n// iterations taking long enough that the retransmit timer can kick in causing\n// the congestion window measurements to fail due to extra packets etc.\n-//\n-//go:build !race\n-// +build !race\npackage tcp_noracedetector_test\n"
}
] | Go | Apache License 2.0 | google/gvisor | Exclude tcp_noracedetector_test.go from gotsan runs.
PiperOrigin-RevId: 424989750 |
259,907 | 28.01.2022 19:11:56 | 28,800 | 2a3d59997fb9731bae6f0dd3b23d4b17455fed29 | Enable reference count leak checking for lisafs.
Also add DoRepeatedLeakCheck() to refsvfs2 package.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/lisafs/connection_test.go",
"new_path": "pkg/lisafs/connection_test.go",
"diff": "@@ -104,6 +104,7 @@ func runServerClient(t testing.TB, clientFn func(c *lisafs.Client)) {\nc.Close() // This should trigger client and server shutdown.\nts.Wait()\n+ ts.Server.Destroy()\n}\n// TestStartUp tests that the server and client can be started up correctly.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/lisafs/server.go",
"new_path": "pkg/lisafs/server.go",
"diff": "@@ -103,6 +103,11 @@ func (s *Server) Wait() {\ns.connWg.Wait()\n}\n+// Destroy releases resources being used by this server.\n+func (s *Server) Destroy() {\n+ s.root.DecRef(nil)\n+}\n+\n// ServerImpl contains the implementation details for a Server.\n// Implementations of ServerImpl should contain their associated Server by\n// value as their first field.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/lisafs/testsuite/BUILD",
"new_path": "pkg/lisafs/testsuite/BUILD",
"diff": "@@ -13,6 +13,8 @@ go_library(\n\"//pkg/abi/linux\",\n\"//pkg/context\",\n\"//pkg/lisafs\",\n+ \"//pkg/refs\",\n+ \"//pkg/refsvfs2\",\n\"//pkg/unet\",\n\"@com_github_syndtr_gocapability//capability:go_default_library\",\n\"@org_golang_x_sys//unix:go_default_library\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/lisafs/testsuite/testsuite.go",
"new_path": "pkg/lisafs/testsuite/testsuite.go",
"diff": "@@ -30,6 +30,8 @@ import (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/lisafs\"\n+ \"gvisor.dev/gvisor/pkg/refs\"\n+ \"gvisor.dev/gvisor/pkg/refsvfs2\"\n\"gvisor.dev/gvisor/pkg/unet\"\n)\n@@ -49,10 +51,9 @@ type Tester interface {\n// RunAllLocalFSTests runs all local FS tests as subtests.\nfunc RunAllLocalFSTests(t *testing.T, tester Tester) {\n+ refs.SetLeakMode(refs.LeaksPanic)\nfor name, testFn := range localFSTests {\n- t.Run(name, func(t *testing.T) {\n- runServerClient(t, tester, testFn)\n- })\n+ runServerClient(t, tester, name, testFn)\n}\n}\n@@ -74,7 +75,7 @@ var localFSTests map[string]testFunc = map[string]testFunc{\n\"Getdents\": testGetdents,\n}\n-func runServerClient(t *testing.T, tester Tester, testFn testFunc) {\n+func runServerClient(t *testing.T, tester Tester, testName string, testFn testFunc) {\nmountPath, err := ioutil.TempDir(os.Getenv(\"TEST_TMPDIR\"), \"\")\nif err != nil {\nt.Fatalf(\"creation of temporary mountpoint failed: %v\", err)\n@@ -109,9 +110,16 @@ func runServerClient(t *testing.T, tester Tester, testFn testFunc) {\nrootFile := c.NewFD(root.ControlFD)\nctx := context.Background()\n+ t.Run(testName, func(t *testing.T) {\ntestFn(ctx, t, tester, rootFile)\n+ })\ncloseFD(ctx, t, rootFile)\n+ // Release server resources and check for leaks. Note that leak check must\n+ // happen before c.Close() because server cleans up resources on shutdown.\n+ server.Destroy()\n+ refsvfs2.DoRepeatedLeakCheck()\n+\nc.Close() // This should trigger client and server shutdown.\nserver.Wait()\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/refsvfs2/refs_map.go",
"new_path": "pkg/refsvfs2/refs_map.go",
"diff": "@@ -124,11 +124,24 @@ func logEvent(obj CheckedObject, msg string) {\nvar checkOnce sync.Once\n// DoLeakCheck iterates through the live object map and logs a message for each\n-// object. It is called once no reference-counted objects should be reachable\n-// anymore, at which point anything left in the map is considered a leak.\n+// object. It should be called when no reference-counted objects are reachable\n+// anymore, at which point anything left in the map is considered a leak. On\n+// multiple calls, only the first call will perform the leak check.\nfunc DoLeakCheck() {\nif leakCheckEnabled() {\n- checkOnce.Do(func() {\n+ checkOnce.Do(doLeakCheck)\n+ }\n+}\n+\n+// DoRepeatedLeakCheck is the same as DoLeakCheck except that it can be called\n+// multiple times by the caller to incrementally perform leak checking.\n+func DoRepeatedLeakCheck() {\n+ if leakCheckEnabled() {\n+ doLeakCheck()\n+ }\n+}\n+\n+func doLeakCheck() {\nliveObjectsMu.Lock()\ndefer liveObjectsMu.Unlock()\nleaked := len(liveObjects)\n@@ -142,6 +155,4 @@ func DoLeakCheck() {\n}\nlog.Warningf(msg)\n}\n- })\n- }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/gofer.go",
"new_path": "runsc/cmd/gofer.go",
"diff": "@@ -245,6 +245,7 @@ func (g *Gofer) serveLisafs(spec *specs.Spec, conf *config.Config, root string)\nserver.StartConnection(conn)\n}\nserver.Wait()\n+ server.Destroy()\nlog.Infof(\"All lisafs servers exited.\")\nreturn subcommands.ExitSuccess\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enable reference count leak checking for lisafs.
Also add DoRepeatedLeakCheck() to refsvfs2 package.
Updates #5466
PiperOrigin-RevId: 425004987 |
259,885 | 31.01.2022 12:22:36 | 28,800 | 5ef032d4df2810ec9dfe2d8c9ed13f8cd4eb4a33 | Implement faccessat2. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/file.go",
"new_path": "pkg/abi/linux/file.go",
"diff": "@@ -96,6 +96,11 @@ const (\nAT_EMPTY_PATH = 0x1000\n)\n+// Constants for faccessat2(2).\n+const (\n+ AT_EACCESS = 0x200\n+)\n+\n// Constants for all file-related ...at(2) syscalls.\nconst (\nAT_FDCWD = -100\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/vfs2/stat.go",
"new_path": "pkg/sentry/syscalls/linux/vfs2/stat.go",
"diff": "@@ -245,26 +245,29 @@ func Access(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\naddr := args[0].Pointer()\nmode := args[1].ModeT()\n- return 0, nil, accessAt(t, linux.AT_FDCWD, addr, mode)\n+ return 0, nil, accessAt(t, linux.AT_FDCWD, addr, mode, 0 /* flags */)\n}\n// Faccessat implements Linux syscall faccessat(2).\n-//\n-// Note that the faccessat() system call does not take a flags argument:\n-// \"The raw faccessat() system call takes only the first three arguments. The\n-// AT_EACCESS and AT_SYMLINK_NOFOLLOW flags are actually implemented within\n-// the glibc wrapper function for faccessat(). If either of these flags is\n-// specified, then the wrapper function employs fstatat(2) to determine access\n-// permissions.\" - faccessat(2)\nfunc Faccessat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\ndirfd := args[0].Int()\naddr := args[1].Pointer()\nmode := args[2].ModeT()\n- return 0, nil, accessAt(t, dirfd, addr, mode)\n+ return 0, nil, accessAt(t, dirfd, addr, mode, 0 /* flags */)\n+}\n+\n+// Faccessat2 implements Linux syscall faccessat2(2).\n+func Faccessat2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n+ dirfd := args[0].Int()\n+ addr := args[1].Pointer()\n+ mode := args[2].ModeT()\n+ flags := args[3].Int()\n+\n+ return 0, nil, accessAt(t, dirfd, addr, mode, flags)\n}\n-func accessAt(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr, mode uint) error {\n+func accessAt(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr, mode uint, flags int32) error {\nconst rOK = 4\nconst wOK = 2\nconst xOK = 1\n@@ -274,16 +277,23 @@ func accessAt(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr, mode uint) er\nreturn linuxerr.EINVAL\n}\n+ // faccessat2(2) isn't documented as supporting AT_EMPTY_PATH, but it does.\n+ if flags&^(linux.AT_EACCESS|linux.AT_SYMLINK_NOFOLLOW|linux.AT_EMPTY_PATH) != 0 {\n+ return linuxerr.EINVAL\n+ }\n+\npath, err := copyInPath(t, pathAddr)\nif err != nil {\nreturn err\n}\n- tpop, err := getTaskPathOperation(t, dirfd, path, disallowEmptyPath, followFinalSymlink)\n+ tpop, err := getTaskPathOperation(t, dirfd, path, shouldAllowEmptyPath(flags&linux.AT_EMPTY_PATH != 0), shouldFollowFinalSymlink(flags&linux.AT_SYMLINK_NOFOLLOW == 0))\nif err != nil {\nreturn err\n}\ndefer tpop.Release(t)\n+ creds := t.Credentials()\n+ if flags&linux.AT_EACCESS == 0 {\n// access(2) and faccessat(2) check permissions using real\n// UID/GID, not effective UID/GID.\n//\n@@ -291,7 +301,7 @@ func accessAt(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr, mode uint) er\n// uid/gid. We do this by temporarily clearing all FS-related\n// capabilities and switching the fsuid/fsgid around to the\n// real ones.\" -fs/open.c:faccessat\n- creds := t.Credentials().Fork()\n+ creds = creds.Fork()\ncreds.EffectiveKUID = creds.RealKUID\ncreds.EffectiveKGID = creds.RealKGID\nif creds.EffectiveKUID.In(creds.UserNamespace) == auth.RootUID {\n@@ -299,6 +309,7 @@ func accessAt(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr, mode uint) er\n} else {\ncreds.EffectiveCaps = 0\n}\n+ }\nreturn t.Kernel().VFS().AccessAt(t, creds, vfs.AccessTypes(mode), &tpop.pop)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/vfs2/vfs2.go",
"new_path": "pkg/sentry/syscalls/linux/vfs2/vfs2.go",
"diff": "@@ -162,6 +162,7 @@ func Override() {\ns.Table[327] = syscalls.Supported(\"preadv2\", Preadv2)\ns.Table[328] = syscalls.Supported(\"pwritev2\", Pwritev2)\ns.Table[332] = syscalls.Supported(\"statx\", Statx)\n+ s.Table[439] = syscalls.Supported(\"faccessat2\", Faccessat2)\ns.Table[441] = syscalls.Supported(\"epoll_pwait2\", EpollPwait2)\ns.Init()\n@@ -276,6 +277,7 @@ func Override() {\ns.Table[286] = syscalls.Supported(\"preadv2\", Preadv2)\ns.Table[287] = syscalls.Supported(\"pwritev2\", Pwritev2)\ns.Table[291] = syscalls.Supported(\"statx\", Statx)\n+ s.Table[439] = syscalls.Supported(\"faccessat2\", Faccessat2)\ns.Table[441] = syscalls.Supported(\"epoll_pwait2\", EpollPwait2)\ns.Init()\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/access.cc",
"new_path": "test/syscalls/linux/access.cc",
"diff": "#include <fcntl.h>\n#include <stdlib.h>\n#include <sys/stat.h>\n+#include <sys/syscall.h>\n#include <sys/types.h>\n#include <unistd.h>\n@@ -164,6 +165,90 @@ TEST_F(AccessTest, UsrReadWriteExec) {\nEXPECT_THAT(unlink(filename.c_str()), SyscallSucceeds());\n}\n+// glibc faccessat() is a wrapper around either the faccessat syscall that tries\n+// to implement flags in userspace, or the faccessat2 syscall. We want to test\n+// syscalls specifically, so use syscall(2) directly.\n+int sys_faccessat(int dirfd, const char* pathname, int mode) {\n+ return syscall(SYS_faccessat, dirfd, pathname, mode);\n+}\n+\n+#ifndef SYS_faccessat2\n+#define SYS_faccessat2 439\n+#endif // SYS_faccessat2\n+\n+int sys_faccessat2(int dirfd, const char* pathname, int mode, int flags) {\n+ return syscall(SYS_faccessat2, dirfd, pathname, mode, flags);\n+}\n+\n+TEST(FaccessatTest, SymlinkFollowed) {\n+ const std::string target_path = NewTempAbsPath();\n+ const std::string symlink_path = NewTempAbsPath();\n+ ASSERT_THAT(symlink(target_path.c_str(), symlink_path.c_str()),\n+ SyscallSucceeds());\n+\n+ // faccessat() should initially fail with ENOENT since it follows the symlink\n+ // to a file that doesn't exist.\n+ EXPECT_THAT(sys_faccessat(-1, symlink_path.c_str(), F_OK),\n+ SyscallFailsWithErrno(ENOENT));\n+\n+ // After creating the symlink target, faccessat() should succeed.\n+ int fd;\n+ ASSERT_THAT(fd = open(target_path.c_str(), O_CREAT | O_EXCL, 0644),\n+ SyscallSucceeds());\n+ close(fd);\n+ EXPECT_THAT(sys_faccessat(-1, symlink_path.c_str(), F_OK), SyscallSucceeds());\n+}\n+\n+PosixErrorOr<bool> Faccessat2Supported() {\n+ if (IsRunningOnGvisor() && !IsRunningWithVFS1()) {\n+ // faccessat2 support is expected on VFS2.\n+ return true;\n+ }\n+ int ret = sys_faccessat2(-1, \"/\", F_OK, 0);\n+ if (ret == 0) {\n+ return true;\n+ }\n+ if (errno == ENOSYS) {\n+ return false;\n+ }\n+ return PosixError(errno, \"unexpected errno from faccessat2(/)\");\n+}\n+\n+TEST(Faccessat2Test, SymlinkFollowedByDefault) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(Faccessat2Supported()));\n+\n+ const std::string target_path = NewTempAbsPath();\n+ const std::string symlink_path = NewTempAbsPath();\n+ ASSERT_THAT(symlink(target_path.c_str(), symlink_path.c_str()),\n+ SyscallSucceeds());\n+\n+ // faccessat2() should initially fail with ENOENT since, by default, it\n+ // follows the symlink to a file that doesn't exist.\n+ EXPECT_THAT(sys_faccessat2(-1, symlink_path.c_str(), F_OK, 0 /* flags */),\n+ SyscallFailsWithErrno(ENOENT));\n+\n+ // After creating the symlink target, faccessat2() should succeed.\n+ int fd;\n+ ASSERT_THAT(fd = open(target_path.c_str(), O_CREAT | O_EXCL, 0644),\n+ SyscallSucceeds());\n+ close(fd);\n+ EXPECT_THAT(sys_faccessat2(-1, symlink_path.c_str(), F_OK, 0 /* flags */),\n+ SyscallSucceeds());\n+}\n+\n+TEST(Faccessat2Test, SymlinkNofollow) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(Faccessat2Supported()));\n+\n+ const std::string target_path = NewTempAbsPath();\n+ const std::string symlink_path = NewTempAbsPath();\n+ ASSERT_THAT(symlink(target_path.c_str(), symlink_path.c_str()),\n+ SyscallSucceeds());\n+\n+ EXPECT_THAT(\n+ sys_faccessat2(-1, symlink_path.c_str(), F_OK, AT_SYMLINK_NOFOLLOW),\n+ SyscallSucceeds());\n+}\n+\n} // namespace\n} // namespace testing\n"
}
] | Go | Apache License 2.0 | google/gvisor | Implement faccessat2.
PiperOrigin-RevId: 425432076 |
259,909 | 31.01.2022 16:05:18 | 28,800 | 0f8db423e2706e01d932ee1bf3c444bf26654b08 | Fix cgroupv2 bug that set the wrong iops throttle. | [
{
"change_type": "MODIFY",
"old_path": "runsc/cgroup/cgroup_v2.go",
"new_path": "runsc/cgroup/cgroup_v2.go",
"diff": "@@ -513,7 +513,7 @@ func (*io2) set(spec *specs.LinuxResources, path string) error {\nreturn err\n}\n- if err := setThrottle2(path, \"riops\", blkio.ThrottleWriteIOPSDevice); err != nil {\n+ if err := setThrottle2(path, \"wiops\", blkio.ThrottleWriteIOPSDevice); err != nil {\nreturn err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cgroup/cgroup_v2_test.go",
"new_path": "runsc/cgroup/cgroup_v2_test.go",
"diff": "package cgroup\nimport (\n+ \"io/ioutil\"\n+ \"os\"\n+ \"path/filepath\"\n+ \"strconv\"\n\"strings\"\n\"testing\"\n+\n+ specs \"github.com/opencontainers/runtime-spec/specs-go\"\n+ \"gvisor.dev/gvisor/pkg/test/testutil\"\n)\nvar cgroupv2MountInfo = `29 22 0:26 / /sys/fs/cgroup rw shared:4 - cgroup2 cgroup2 rw,seclabel,nsdelegate`\n+func TestIO(t *testing.T) {\n+ for _, tc := range []struct {\n+ name string\n+ spec *specs.LinuxBlockIO\n+ path string\n+ wants string\n+ }{\n+ {\n+ name: \"simple\",\n+ spec: &specs.LinuxBlockIO{\n+ Weight: uint16Ptr(1),\n+ },\n+ path: \"io.weight\",\n+ wants: strconv.FormatUint(convertBlkIOToIOWeightValue(1), 10),\n+ },\n+ {\n+ name: \"throttlereadbps\",\n+ spec: &specs.LinuxBlockIO{\n+ ThrottleReadBpsDevice: []specs.LinuxThrottleDevice{\n+ makeLinuxThrottleDevice(1, 2, 3),\n+ },\n+ },\n+ path: \"io.max\",\n+ wants: \"1:2 rbps=3\",\n+ },\n+ {\n+ name: \"throttlewritebps\",\n+ spec: &specs.LinuxBlockIO{\n+ ThrottleWriteBpsDevice: []specs.LinuxThrottleDevice{\n+ makeLinuxThrottleDevice(4, 5, 6),\n+ },\n+ },\n+ path: \"io.max\",\n+ wants: \"4:5 wbps=6\",\n+ },\n+ {\n+ name: \"throttlereadiops\",\n+ spec: &specs.LinuxBlockIO{\n+ ThrottleReadIOPSDevice: []specs.LinuxThrottleDevice{\n+ makeLinuxThrottleDevice(7, 8, 9),\n+ },\n+ },\n+ path: \"io.max\",\n+ wants: \"7:8 riops=9\",\n+ },\n+ {\n+ name: \"throttlewriteiops\",\n+ spec: &specs.LinuxBlockIO{\n+ ThrottleWriteIOPSDevice: []specs.LinuxThrottleDevice{\n+ makeLinuxThrottleDevice(10, 11, 12),\n+ },\n+ },\n+ path: \"io.max\",\n+ wants: \"10:11 wiops=12\",\n+ },\n+ {\n+ name: \"nil_values\",\n+ spec: &specs.LinuxBlockIO{},\n+ path: \"not_used\",\n+ wants: \"\",\n+ },\n+ } {\n+ t.Run(tc.name, func(t *testing.T) {\n+ testutil.TmpDir()\n+ dir, err := ioutil.TempDir(testutil.TmpDir(), \"cgroup\")\n+ if err != nil {\n+ t.Fatalf(\"error creating temporary directory: %v\", err)\n+ }\n+ defer os.RemoveAll(dir)\n+\n+ fd, err := os.Create(filepath.Join(dir, tc.path))\n+ if err != nil {\n+ t.Fatalf(\"os.CreatTemp(): %v\", err)\n+ }\n+ fd.Close()\n+\n+ spec := &specs.LinuxResources{\n+ BlockIO: tc.spec,\n+ }\n+ ctrlr := io2{}\n+ if err := ctrlr.set(spec, dir); err != nil {\n+ t.Fatalf(\"ctrlr.set(): %v\", err)\n+ }\n+\n+ gotBytes, err := ioutil.ReadFile(filepath.Join(dir, tc.path))\n+ if err != nil {\n+ t.Fatal(err.Error())\n+ }\n+ got := strings.TrimSuffix(string(gotBytes), \"\\n\")\n+ if got != tc.wants {\n+ t.Errorf(\"wrong file content, file: %q, want: %q, got: %q\", tc.path, tc.wants, got)\n+ }\n+ })\n+ }\n+}\n+\nfunc TestLoadPathsCgroupv2(t *testing.T) {\nfor _, tc := range []struct {\nname string\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix cgroupv2 bug that set the wrong iops throttle.
PiperOrigin-RevId: 425483482 |
259,992 | 01.02.2022 14:00:10 | 28,800 | 5a642df6b32622af8495ee659cd671f2dae213f3 | Reorder cmd groups
Also fixed gofer which was added twice. | [
{
"change_type": "MODIFY",
"old_path": "runsc/cli/main.go",
"new_path": "runsc/cli/main.go",
"diff": "@@ -59,47 +59,46 @@ var (\nfunc Main(version string) {\n// Help and flags commands are generated automatically.\nhelp := cmd.NewHelp(subcommands.DefaultCommander)\n- help.Register(new(cmd.Syscalls))\nhelp.Register(new(cmd.Platforms))\n+ help.Register(new(cmd.Syscalls))\nsubcommands.Register(help, \"\")\nsubcommands.Register(subcommands.FlagsCommand(), \"\")\n- // Installation helpers.\n- const helperGroup = \"helpers\"\n- subcommands.Register(new(cmd.Install), helperGroup)\n- subcommands.Register(new(cmd.Uninstall), helperGroup)\n-\n- // Register user-facing runsc commands.\n+ // Register OCI user-facing runsc commands.\nsubcommands.Register(new(cmd.Checkpoint), \"\")\nsubcommands.Register(new(cmd.Create), \"\")\nsubcommands.Register(new(cmd.Delete), \"\")\nsubcommands.Register(new(cmd.Do), \"\")\nsubcommands.Register(new(cmd.Events), \"\")\nsubcommands.Register(new(cmd.Exec), \"\")\n- subcommands.Register(new(cmd.Gofer), \"\")\nsubcommands.Register(new(cmd.Kill), \"\")\nsubcommands.Register(new(cmd.List), \"\")\n- subcommands.Register(new(cmd.Pause), \"\")\nsubcommands.Register(new(cmd.PS), \"\")\n+ subcommands.Register(new(cmd.Pause), \"\")\nsubcommands.Register(new(cmd.Restore), \"\")\nsubcommands.Register(new(cmd.Resume), \"\")\nsubcommands.Register(new(cmd.Run), \"\")\nsubcommands.Register(new(cmd.Spec), \"\")\n- subcommands.Register(new(cmd.State), \"\")\nsubcommands.Register(new(cmd.Start), \"\")\n- subcommands.Register(new(cmd.Symbolize), \"\")\n- subcommands.Register(new(cmd.Wait), \"\")\n- subcommands.Register(new(cmd.Mitigate), \"\")\n+ subcommands.Register(new(cmd.State), \"\")\nsubcommands.Register(new(cmd.VerityPrepare), \"\")\n+ subcommands.Register(new(cmd.Wait), \"\")\n+\n+ // Installation helpers.\n+ const helperGroup = \"helpers\"\n+ subcommands.Register(new(cmd.Install), helperGroup)\n+ subcommands.Register(new(cmd.Mitigate), helperGroup)\n+ subcommands.Register(new(cmd.Uninstall), helperGroup)\n+\n+ const debugGroup = \"debug\"\n+ subcommands.Register(new(cmd.Debug), debugGroup)\n+ subcommands.Register(new(cmd.Statefile), debugGroup)\n+ subcommands.Register(new(cmd.Symbolize), debugGroup)\n- // Register internal commands with the internal group name. This causes\n- // them to be sorted below the user-facing commands with empty group.\n- // The string below will be printed above the commands.\n+ // Internal commands.\nconst internalGroup = \"internal use only\"\nsubcommands.Register(new(cmd.Boot), internalGroup)\n- subcommands.Register(new(cmd.Debug), internalGroup)\nsubcommands.Register(new(cmd.Gofer), internalGroup)\n- subcommands.Register(new(cmd.Statefile), internalGroup)\nconfig.RegisterFlags()\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/boot.go",
"new_path": "runsc/cmd/boot.go",
"diff": "@@ -115,7 +115,7 @@ func (*Boot) Name() string {\n// Synopsis implements subcommands.Command.Synopsis.\nfunc (*Boot) Synopsis() string {\n- return \"launch a sandbox process (internal use only)\"\n+ return \"launch a sandbox process\"\n}\n// Usage implements subcommands.Command.Usage.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/gofer.go",
"new_path": "runsc/cmd/gofer.go",
"diff": "@@ -72,7 +72,7 @@ func (*Gofer) Name() string {\n// Synopsis implements subcommands.Command.\nfunc (g *Gofer) Synopsis() string {\n- return fmt.Sprintf(\"launch a gofer process that serves files over the protocol (9P or lisafs) defined in the config (internal use only)\")\n+ return fmt.Sprintf(\"launch a gofer process that proxies access to container files\")\n}\n// Usage implements subcommands.Command.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/list.go",
"new_path": "runsc/cmd/list.go",
"diff": "@@ -30,7 +30,7 @@ import (\n\"gvisor.dev/gvisor/runsc/flag\"\n)\n-// List implements subcommands.Command for the \"list\" command for the \"list\" command.\n+// List implements subcommands.Command for the \"list\" command.\ntype List struct {\nquiet bool\nformat string\n"
}
] | Go | Apache License 2.0 | google/gvisor | Reorder cmd groups
Also fixed gofer which was added twice.
PiperOrigin-RevId: 425709601 |
259,962 | 01.02.2022 22:34:52 | 28,800 | 404b90fa4ad81c50a8d84f23eaa249d66269ba52 | Fix stale comment on Endpoint.ReadContext.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/channel/channel.go",
"new_path": "pkg/tcpip/link/channel/channel.go",
"diff": "@@ -167,7 +167,7 @@ func (e *Endpoint) Read() *stack.PacketBuffer {\n}\n// ReadContext does blocking read for one packet from the outbound packet queue.\n-// It can be cancelled by ctx, and in this case, it returns false.\n+// It can be cancelled by ctx, and in this case, it returns nil.\nfunc (e *Endpoint) ReadContext(ctx context.Context) *stack.PacketBuffer {\nreturn e.q.ReadContext(ctx)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix stale comment on Endpoint.ReadContext.
Fixes #7109
PiperOrigin-RevId: 425799066 |
259,962 | 01.02.2022 22:38:54 | 28,800 | bcba5136d0d02b7a9b3c8979a886a452a9035ff7 | Remove stale nic.DeliverNetworkPacket comment.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/nic.go",
"new_path": "pkg/tcpip/stack/nic.go",
"diff": "@@ -699,9 +699,6 @@ func (n *nic) isInGroup(addr tcpip.Address) bool {\n// DeliverNetworkPacket finds the appropriate network protocol endpoint and\n// hands the packet over for further processing. This function is called when\n// the NIC receives a packet from the link endpoint.\n-// Note that the ownership of the slice backing vv is retained by the caller.\n-// This rule applies only to the slice itself, not to the items of the slice;\n-// the ownership of the items is not retained by the caller.\nfunc (n *nic) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt *PacketBuffer) {\nenabled := n.Enabled()\n// If the NIC is not yet enabled, don't receive any packets.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove stale nic.DeliverNetworkPacket comment.
Fixes #7080
PiperOrigin-RevId: 425799488 |
259,868 | 02.02.2022 20:17:30 | 28,800 | a5ce865145c718f26c7a8f305f6c8262e992051c | fuse: Attempt to fix five data races.
I am not fully familiar with this code, but I added some `checklocks`
annotations wherever it seemed appropriate and obvious from existing comments. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/connection.go",
"new_path": "pkg/sentry/fsimpl/fuse/connection.go",
"diff": "@@ -48,7 +48,7 @@ const (\ntype connection struct {\nfd *DeviceFD\n- // mu protects access to struct memebers.\n+ // mu protects access to struct members.\nmu sync.Mutex `state:\"nosave\"`\n// attributeVersion is the version of connection's attributes.\n@@ -84,15 +84,18 @@ type connection struct {\n// umount,\n// connection abort,\n// device release.\n+ // +checklocks:mu\nconnected bool\n// connInitError if FUSE_INIT encountered error (major version mismatch).\n// Only set in INIT.\n+ // +checklocks:mu\nconnInitError bool\n// connInitSuccess if FUSE_INIT is successful.\n// Only set in INIT.\n- // Used for destory (not yet implemented).\n+ // Used for destroy (not yet implemented).\n+ // +checklocks:mu\nconnInitSuccess bool\n// aborted via sysfs, and will send ECONNABORTED to read after disconnection (instead of ENODEV).\n@@ -100,7 +103,7 @@ type connection struct {\n// TODO(gvisor.dev/issue/3525): set this to true when user aborts.\naborted bool\n- // numWating is the number of requests waiting to be\n+ // numWaiting is the number of requests waiting to be\n// sent to FUSE device or being processed by FUSE daemon.\nnumWaiting uint32\n@@ -118,19 +121,19 @@ type connection struct {\nasyncMu sync.Mutex `state:\"nosave\"`\n// asyncNum is the number of async requests.\n- // Protected by asyncMu.\n+ // +checklocks:asyncMu\nasyncNum uint16\n// asyncCongestionThreshold the number of async requests.\n// Negotiated in FUSE_INIT as \"CongestionThreshold\".\n// TODO(gvisor.dev/issue/3529): add congestion control.\n- // Protected by asyncMu.\n+ // +checklocks:asyncMu\nasyncCongestionThreshold uint16\n// asyncNumMax is the maximum number of asyncNum.\n// Connection blocks the async requests when it is reached.\n// Negotiated in FUSE_INIT as \"MaxBackground\".\n- // Protected by asyncMu.\n+ // +checklocks:asyncMu\nasyncNumMax uint16\n// maxRead is the maximum size of a read buffer in in bytes.\n@@ -201,10 +204,12 @@ func newFUSEConnection(_ context.Context, fuseFD *DeviceFD, opts *filesystemOpti\n// Create the writeBuf for the header to be stored in.\nhdrLen := uint32((*linux.FUSEHeaderOut)(nil).SizeBytes())\n+ fuseFD.mu.Lock()\nfuseFD.writeBuf = make([]byte, hdrLen)\nfuseFD.completions = make(map[linux.FUSEOpID]*futureResponse)\nfuseFD.fullQueueCh = make(chan struct{}, opts.maxActiveRequests)\nfuseFD.writeCursor = 0\n+ fuseFD.mu.Unlock()\nreturn &connection{\nfd: fuseFD,\n@@ -251,15 +256,24 @@ func (conn *connection) Call(t *kernel.Task, r *Request) (*Response, error) {\n}\n}\n- if !conn.connected {\n+ conn.fd.mu.Lock()\n+ conn.mu.Lock()\n+ connected := conn.connected\n+ connInitError := conn.connInitError\n+ conn.mu.Unlock()\n+\n+ if !connected {\n+ conn.fd.mu.Unlock()\nreturn nil, linuxerr.ENOTCONN\n}\n- if conn.connInitError {\n+ if connInitError {\n+ conn.fd.mu.Unlock()\nreturn nil, linuxerr.ECONNREFUSED\n}\nfut, err := conn.callFuture(t, r)\n+ conn.fd.mu.Unlock()\nif err != nil {\nreturn nil, err\n}\n@@ -269,10 +283,8 @@ func (conn *connection) Call(t *kernel.Task, r *Request) (*Response, error) {\n// callFuture makes a request to the server and returns a future response.\n// Call resolve() when the response needs to be fulfilled.\n+// +checklocks:conn.fd.mu\nfunc (conn *connection) callFuture(t *kernel.Task, r *Request) (*futureResponse, error) {\n- conn.fd.mu.Lock()\n- defer conn.fd.mu.Unlock()\n-\n// Is the queue full?\n//\n// We must busy wait here until the request can be queued. We don't\n@@ -299,6 +311,7 @@ func (conn *connection) callFuture(t *kernel.Task, r *Request) (*futureResponse,\n}\n// callFutureLocked makes a request to the server and returns a future response.\n+// +checklocks:conn.fd.mu\nfunc (conn *connection) callFutureLocked(t *kernel.Task, r *Request) (*futureResponse, error) {\n// Check connected again holding conn.mu.\nconn.mu.Lock()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/connection_control.go",
"new_path": "pkg/sentry/fsimpl/fuse/connection_control.go",
"diff": "@@ -109,9 +109,13 @@ func (conn *connection) InitRecv(res *Response, hasSysAdminCap bool) error {\n// Process the FUSE_INIT reply from the FUSE server.\n// It tries to acquire the conn.asyncMu lock if minor version is newer than 13.\nfunc (conn *connection) initProcessReply(out *linux.FUSEInitOut, hasSysAdminCap bool) error {\n+ conn.mu.Lock()\n// No matter error or not, always set initialzied.\n// to unblock the blocked requests.\n- defer conn.SetInitialized()\n+ defer func() {\n+ conn.SetInitialized()\n+ conn.mu.Unlock()\n+ }()\n// No support for old major fuse versions.\nif out.Major != linux.FUSE_KERNEL_VERSION {\n@@ -219,7 +223,7 @@ func (conn *connection) Abort(ctx context.Context) {\nconn.asyncMu.Unlock()\nconn.mu.Unlock()\n- // 1. The requets blocked before initialization.\n+ // 1. The request blocked before initialization.\n// Will reach call() `connected` check and return.\nif !conn.Initialized() {\nconn.SetInitialized()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/dev.go",
"new_path": "pkg/sentry/fsimpl/fuse/dev.go",
"diff": "@@ -118,10 +118,17 @@ func (fd *DeviceFD) Release(ctx context.Context) {\n}\n}\n+// filesystemIsInitialized returns true if fd.fs is set and the connection is\n+// initialized.\n+func (fd *DeviceFD) filesystemIsInitialized() bool {\n+ // FIXME(gvisor.dev/issue/4813): Access to fd.fs should be synchronized.\n+ return fd.fs != nil\n+}\n+\n// PRead implements vfs.FileDescriptionImpl.PRead.\nfunc (fd *DeviceFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {\n// Operations on /dev/fuse don't make sense until a FUSE filesystem is mounted.\n- if fd.fs == nil {\n+ if !fd.filesystemIsInitialized() {\nreturn 0, linuxerr.EPERM\n}\n@@ -131,7 +138,7 @@ func (fd *DeviceFD) PRead(ctx context.Context, dst usermem.IOSequence, offset in\n// Read implements vfs.FileDescriptionImpl.Read.\nfunc (fd *DeviceFD) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {\n// Operations on /dev/fuse don't make sense until a FUSE filesystem is mounted.\n- if fd.fs == nil {\n+ if !fd.filesystemIsInitialized() {\nreturn 0, linuxerr.EPERM\n}\n@@ -142,7 +149,9 @@ func (fd *DeviceFD) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.R\nminBuffSize := linux.FUSE_MIN_READ_BUFFER\ninHdrLen := uint32((*linux.FUSEHeaderIn)(nil).SizeBytes())\nwriteHdrLen := uint32((*linux.FUSEWriteIn)(nil).SizeBytes())\n+ fd.fs.conn.mu.Lock()\nnegotiatedMinBuffSize := inHdrLen + writeHdrLen + fd.fs.conn.maxWrite\n+ fd.fs.conn.mu.Unlock()\nif minBuffSize < negotiatedMinBuffSize {\nminBuffSize = negotiatedMinBuffSize\n}\n@@ -160,6 +169,7 @@ func (fd *DeviceFD) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.R\n// readLocked implements the reading of the fuse device while locked with DeviceFD.mu.\n//\n// Preconditions: dst is large enough for any reasonable request.\n+// +checklocks:fd.mu\nfunc (fd *DeviceFD) readLocked(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {\nvar req *Request\n@@ -233,7 +243,7 @@ func (fd *DeviceFD) readLocked(ctx context.Context, dst usermem.IOSequence, opts\n// PWrite implements vfs.FileDescriptionImpl.PWrite.\nfunc (fd *DeviceFD) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {\n// Operations on /dev/fuse don't make sense until a FUSE filesystem is mounted.\n- if fd.fs == nil {\n+ if !fd.filesystemIsInitialized() {\nreturn 0, linuxerr.EPERM\n}\n@@ -248,9 +258,10 @@ func (fd *DeviceFD) Write(ctx context.Context, src usermem.IOSequence, opts vfs.\n}\n// writeLocked implements writing to the fuse device while locked with DeviceFD.mu.\n+// +checklocks:fd.mu\nfunc (fd *DeviceFD) writeLocked(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {\n// Operations on /dev/fuse don't make sense until a FUSE filesystem is mounted.\n- if fd.fs == nil {\n+ if !fd.filesystemIsInitialized() {\nreturn 0, linuxerr.EPERM\n}\n@@ -359,10 +370,11 @@ func (fd *DeviceFD) Readiness(mask waiter.EventMask) waiter.EventMask {\n// readinessLocked implements checking the readiness of the fuse device while\n// locked with DeviceFD.mu.\n+// +checklocks:fd.mu\nfunc (fd *DeviceFD) readinessLocked(mask waiter.EventMask) waiter.EventMask {\nvar ready waiter.EventMask\n- if fd.fs == nil || fd.fs.umounted {\n+ if !fd.filesystemIsInitialized() || fd.fs.umounted {\nready |= waiter.EventErr\nreturn ready & mask\n}\n@@ -391,7 +403,7 @@ func (fd *DeviceFD) EventUnregister(e *waiter.Entry) {\n// Seek implements vfs.FileDescriptionImpl.Seek.\nfunc (fd *DeviceFD) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {\n// Operations on /dev/fuse don't make sense until a FUSE filesystem is mounted.\n- if fd.fs == nil {\n+ if !fd.filesystemIsInitialized() {\nreturn 0, linuxerr.EPERM\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/dev_test.go",
"new_path": "pkg/sentry/fsimpl/fuse/dev_test.go",
"diff": "@@ -209,7 +209,11 @@ func ReadTest(serverTask *kernel.Task, fd *vfs.FileDescription, inIOseq usermem.\n// a header, a payload, calls the server, waits for the response, and processes\n// the response.\nfunc fuseClientRun(t *testing.T, s *testutil.System, k *kernel.Kernel, conn *connection, creds *auth.Credentials, pid uint32, inode uint64, clientDone chan struct{}) {\n- defer func() { clientDone <- struct{}{} }()\n+ defer func() {\n+ if !t.Failed() {\n+ clientDone <- struct{}{}\n+ }\n+ }()\ntc := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, k.GlobalInit().Limits())\nclientTask, err := testutil.CreateTask(s.Ctx, fmt.Sprintf(\"fuse-client-%v\", pid), tc, s.MntNs, s.Root, s.Root)\n@@ -251,7 +255,11 @@ func fuseClientRun(t *testing.T, s *testutil.System, k *kernel.Kernel, conn *con\n// that simply reads a request and echos the same struct back as a response using the\n// appropriate headers.\nfunc fuseServerRun(t *testing.T, s *testutil.System, k *kernel.Kernel, fd *vfs.FileDescription, serverDone, killServer chan struct{}) {\n- defer func() { serverDone <- struct{}{} }()\n+ defer func() {\n+ if !t.Failed() {\n+ serverDone <- struct{}{}\n+ }\n+ }()\n// Create the tasks that the server will be using.\ntc := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, k.GlobalInit().Limits())\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/fusefs.go",
"new_path": "pkg/sentry/fsimpl/fuse/fusefs.go",
"diff": "@@ -272,9 +272,12 @@ func newFUSEFilesystem(ctx context.Context, vfsObj *vfs.VirtualFilesystem, fsTyp\n// reference on fuseFD, since conn uses fuseFD for communication with the\n// server? Wouldn't doing so create a circular reference?\nfs.VFSFilesystem().IncRef() // for fuseFD.fs\n- // FIXME(gvisor.dev/issue/4813): fuseFD.fs is accessed without\n- // synchronization.\n+\n+ fuseFD.mu.Lock()\n+ fs.conn.mu.Lock()\nfuseFD.fs = fs\n+ fs.conn.mu.Unlock()\n+ fuseFD.mu.Unlock()\nreturn fs, nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | fuse: Attempt to fix five data races.
I am not fully familiar with this code, but I added some `checklocks`
annotations wherever it seemed appropriate and obvious from existing comments.
PiperOrigin-RevId: 426042114 |
259,909 | 02.02.2022 21:48:57 | 28,800 | 66da66de302bbb1d0fe5da853055e76369abba61 | Fix incorrect behavior for qdisc wakeups.
Without this fix, the qdisc dispatch loop writes at most one batch,
then waits for another wakeup before writing any more packets. All
packets in the queue should be written to the link endpoint after
a wakeup. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/qdisc/fifo/fifo.go",
"new_path": "pkg/tcpip/link/qdisc/fifo/fifo.go",
"diff": "@@ -28,6 +28,13 @@ import (\nvar _ stack.QueueingDiscipline = (*discipline)(nil)\n+const (\n+ // BatchSize represents the number of packets written to the\n+ // lower link endpoint during calls to WritePackets.\n+ BatchSize = 32\n+ qDiscClosed = 1\n+)\n+\n// discipline represents a QueueingDiscipline which implements a FIFO queue for\n// all outgoing packets. discipline can have 1 or more underlying\n// queueDispatchers. All outgoing packets are consistenly hashed to a single\n@@ -41,8 +48,6 @@ type discipline struct {\nclosed int32\n}\n-const qDiscClosed = 1\n-\n// queueDispatcher is responsible for dispatching all outbound packets in its\n// queue. It will also smartly batch packets when possible and write them\n// through the lower LinkWriter.\n@@ -87,7 +92,6 @@ func (qd *queueDispatcher) dispatchLoop() {\ns.AddWaker(&qd.closeWaker)\ndefer s.Done()\n- const batchSize = 32\nvar batch stack.PacketBufferList\nfor {\nswitch w := s.Fetch(true); w {\n@@ -106,21 +110,20 @@ func (qd *queueDispatcher) dispatchLoop() {\npanic(\"unknown waker\")\n}\nqd.mu.Lock()\n- for batch.Len() < batchSize {\n- pkt := qd.queue.Front()\n- if pkt == nil {\n- break\n- }\n-\n+ for pkt := qd.queue.Front(); pkt != nil; pkt = qd.queue.Front() {\nqd.queue.Remove(pkt)\nqd.used--\nbatch.PushBack(pkt)\n+ if batch.Len() < BatchSize && qd.used != 0 {\n+ continue\n}\nqd.mu.Unlock()\n-\n_, _ = qd.lower.WritePackets(batch)\nbatch.DecRef()\nbatch.Reset()\n+ qd.mu.Lock()\n+ }\n+ qd.mu.Unlock()\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/qdisc/fifo/qdisc_test.go",
"new_path": "pkg/tcpip/link/qdisc/fifo/qdisc_test.go",
"diff": "@@ -18,6 +18,7 @@ import (\n\"math/rand\"\n\"os\"\n\"testing\"\n+ \"time\"\n\"gvisor.dev/gvisor/pkg/refs\"\n\"gvisor.dev/gvisor/pkg/refsvfs2\"\n@@ -28,20 +29,31 @@ import (\n\"gvisor.dev/gvisor/pkg/tcpip/stack\"\n)\n-var _ stack.LinkWriter = (*discardWriter)(nil)\n+var _ stack.LinkWriter = (*countWriter)(nil)\n-// discardWriter implements LinkWriter.\n-type discardWriter struct {\n+// countWriter implements LinkWriter.\n+type countWriter struct {\n+ mu sync.Mutex\n+ packetsWritten int\n+ packetsWanted int\n+ done chan struct{}\n}\n-func (*discardWriter) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) {\n+func (cw *countWriter) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) {\n+ cw.mu.Lock()\n+ defer cw.mu.Unlock()\n+ cw.packetsWritten += pkts.Len()\n+ // Opt out of using the done channel if packetsWanted is not set.\n+ if cw.packetsWanted > 0 && cw.packetsWritten == cw.packetsWanted {\n+ close(cw.done)\n+ }\nreturn pkts.Len(), nil\n}\n// In b/209690936, fast simultaneous writes on qdisc will cause panics. This test\n// reproduces the behavior shown in that bug.\nfunc TestFastSimultaneousWrites(t *testing.T) {\n- lower := &discardWriter{}\n+ lower := &countWriter{}\nlinkEP := fifo.New(lower, 16, 1000)\nv := make(buffer.View, 1)\n@@ -50,7 +62,6 @@ func TestFastSimultaneousWrites(t *testing.T) {\nnWriters := 100\nnWrites := 100\nvar wg sync.WaitGroup\n- defer wg.Done()\nfor i := 0; i < nWriters; i++ {\nwg.Add(1)\ngo func() {\n@@ -65,6 +76,8 @@ func TestFastSimultaneousWrites(t *testing.T) {\n}\n}()\n}\n+ wg.Wait()\n+ linkEP.Close()\n}\nfunc TestWriteRefusedAfterClosed(t *testing.T) {\n@@ -78,6 +91,30 @@ func TestWriteRefusedAfterClosed(t *testing.T) {\n}\n}\n+func TestWriteMorePacketsThanBatchSize(t *testing.T) {\n+ tc := []int{fifo.BatchSize + 1, fifo.BatchSize*2 + 1}\n+ v := make(buffer.View, 1)\n+\n+ for _, want := range tc {\n+ done := make(chan struct{})\n+ lower := &countWriter{done: done, packetsWanted: want}\n+ linkEp := fifo.New(lower, 1, 1000)\n+ for i := 0; i < want; i++ {\n+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n+ Data: v.ToVectorisedView(),\n+ })\n+ linkEp.WritePacket(pkt)\n+ pkt.DecRef()\n+ }\n+ select {\n+ case <-done:\n+ case <-time.After(1 * time.Second):\n+ t.Fatalf(\"expected %d packets, but got only %d\", want, lower.packetsWritten)\n+ }\n+ linkEp.Close()\n+ }\n+}\n+\nfunc TestMain(m *testing.M) {\nrefs.SetLeakMode(refs.LeaksPanic)\ncode := m.Run()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix incorrect behavior for qdisc wakeups.
Without this fix, the qdisc dispatch loop writes at most one batch,
then waits for another wakeup before writing any more packets. All
packets in the queue should be written to the link endpoint after
a wakeup.
PiperOrigin-RevId: 426053587 |
259,868 | 03.02.2022 01:43:48 | 28,800 | 95d883a92e31909b13511d34c107f22f04012172 | Refactor task start and exit from a PID namespace into separate functions. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/BUILD",
"new_path": "pkg/sentry/kernel/BUILD",
"diff": "@@ -200,6 +200,7 @@ go_library(\n\"task_work.go\",\n\"thread_group.go\",\n\"threads.go\",\n+ \"threads_impl.go\",\n\"timekeeper.go\",\n\"timekeeper_state.go\",\n\"tty.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_exit.go",
"new_path": "pkg/sentry/kernel/task_exit.go",
"diff": "@@ -651,12 +651,7 @@ func (t *Task) exitNotifyLocked(fromPtraceDetach bool) {\nif t.exitTracerAcked && t.exitParentAcked {\nt.advanceExitStateLocked(TaskExitZombie, TaskExitDead)\nfor ns := t.tg.pidns; ns != nil; ns = ns.parent {\n- tid := ns.tids[t]\n- delete(ns.tasks, tid)\n- delete(ns.tids, t)\n- if t == t.tg.leader {\n- delete(ns.tgids, t.tg)\n- }\n+ ns.deleteTask(t)\n}\nt.userCounters.decRLimitNProc()\nt.tg.exitedCPUStats.Accumulate(t.CPUStats())\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_start.go",
"new_path": "pkg/sentry/kernel/task_start.go",
"diff": "@@ -248,28 +248,25 @@ func (ts *TaskSet) assignTIDsLocked(t *Task) error {\ntid ThreadID\n}\nvar allocatedTIDs []allocatedTID\n+ var tid ThreadID\n+ var err error\nfor ns := t.tg.pidns; ns != nil; ns = ns.parent {\n- tid, err := ns.allocateTID()\n+ if tid, err = ns.allocateTID(); err != nil {\n+ break\n+ }\n+ if err = ns.addTask(t, tid); err != nil {\n+ break\n+ }\n+ allocatedTIDs = append(allocatedTIDs, allocatedTID{ns, tid})\n+ }\nif err != nil {\n// Failure. Remove the tids we already allocated in descendant\n// namespaces.\nfor _, a := range allocatedTIDs {\n- delete(a.ns.tasks, a.tid)\n- delete(a.ns.tids, t)\n- if t.tg.leader == nil {\n- delete(a.ns.tgids, t.tg)\n- }\n+ a.ns.deleteTask(t)\n}\nreturn err\n}\n- ns.tasks[tid] = t\n- ns.tids[t] = tid\n- if t.tg.leader == nil {\n- // New thread group.\n- ns.tgids[t.tg] = tid\n- }\n- allocatedTIDs = append(allocatedTIDs, allocatedTID{ns, tid})\n- }\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/threads.go",
"new_path": "pkg/sentry/kernel/threads.go",
"diff": "@@ -187,6 +187,9 @@ type PIDNamespace struct {\n// exiting indicates that the namespace's init process is exiting or has\n// exited.\nexiting bool\n+\n+ // pidNamespaceData contains additional per-PID-namespace data.\n+ extra pidNamespaceData\n}\nfunc newPIDNamespace(ts *TaskSet, parent *PIDNamespace, userns *auth.UserNamespace) *PIDNamespace {\n@@ -201,6 +204,7 @@ func newPIDNamespace(ts *TaskSet, parent *PIDNamespace, userns *auth.UserNamespa\nsids: make(map[*Session]SessionID),\nprocessGroups: make(map[ProcessGroupID]*ProcessGroup),\npgids: make(map[*ProcessGroup]ProcessGroupID),\n+ extra: newPIDNamespaceData(),\n}\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/kernel/threads_impl.go",
"diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+//go:build go1.1\n+// +build go1.1\n+\n+package kernel\n+\n+// pidNamespaceData may contain extra per-PID-namespace data.\n+// +stateify savable\n+type pidNamespaceData struct {\n+}\n+\n+// newPIDNamespaceData returns a new `pidNamespaceData` struct.\n+func newPIDNamespaceData() pidNamespaceData {\n+ return pidNamespaceData{}\n+}\n+\n+// addTask adds a Task into this PIDNamespace.\n+// It is always performed under TaskSet lock.\n+func (ns *PIDNamespace) addTask(t *Task, tid ThreadID) error {\n+ ns.tasks[tid] = t\n+ ns.tids[t] = tid\n+ if t.tg.leader == nil {\n+ // New thread group.\n+ ns.tgids[t.tg] = tid\n+ }\n+ return nil\n+}\n+\n+// deleteTask deletes a Task from this PIDNamespace.\n+// It is always performed under TaskSet lock.\n+func (ns *PIDNamespace) deleteTask(t *Task) {\n+ delete(ns.tasks, ns.tids[t])\n+ delete(ns.tids, t)\n+ if t == t.tg.leader || t.tg.leader == nil {\n+ delete(ns.tgids, t.tg)\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -1016,6 +1016,7 @@ func TestKillPid(t *testing.T) {\nif err != nil {\nt.Fatalf(\"failed to get process list: %v\", err)\n}\n+ t.Logf(\"current process list: %v\", procs)\nvar pid int32\nfor _, p := range procs {\nif pid < int32(p.PID) {\n@@ -1028,7 +1029,8 @@ func TestKillPid(t *testing.T) {\n// Verify that one process is gone.\nif err := waitForProcessCount(cont, nProcs-1); err != nil {\n- t.Fatalf(\"error waiting for processes: %v\", err)\n+ procs, procsErr := cont.Processes()\n+ t.Fatalf(\"error waiting for processes: %v; current processes: %v / %v\", err, procs, procsErr)\n}\nprocs, err = cont.Processes()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Refactor task start and exit from a PID namespace into separate functions.
PiperOrigin-RevId: 426083905 |
259,853 | 03.02.2022 10:39:16 | 28,800 | e31c3f18da12b54917a22a13a87830b94a1d7922 | kvm: sentry executable mappings have to be read-only
The sentry guest page tables must not allow to execute writable memory regions. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine.go",
"new_path": "pkg/sentry/platform/kvm/machine.go",
"diff": "@@ -198,7 +198,7 @@ func (m *machine) createVCPU(id int) *vCPU {\n// readOnlyGuestRegions contains regions that have to be mapped read-only into\n// the guest physical address space. Right now, it is used on arm64 only.\n-var readOnlyGuestRegions []region\n+var readOnlyGuestRegions []virtualRegion\n// newMachine returns a new VM context.\nfunc newMachine(vm int) (*machine, error) {\n@@ -247,7 +247,7 @@ func newMachine(vm int) (*machine, error) {\nm.kernel.PageTables.Map(\nhostarch.Addr(pr.virtual),\npr.length,\n- pagetables.MapOpts{AccessType: hostarch.AnyAccess},\n+ pagetables.MapOpts{AccessType: hostarch.ReadWrite},\npr.physical)\nreturn true // Keep iterating.\n@@ -257,7 +257,7 @@ func newMachine(vm int) (*machine, error) {\n// available in the VM. Note that this doesn't guarantee no future\n// faults, however it should guarantee that everything is available to\n// ensure successful vCPU entry.\n- mapRegion := func(vr region, flags uint32) {\n+ mapRegion := func(vr virtualRegion, flags uint32) {\nfor virtual := vr.virtual; virtual < vr.virtual+vr.length; {\nphysical, length, ok := translateToPhysical(virtual)\nif !ok {\n@@ -269,6 +269,17 @@ func newMachine(vm int) (*machine, error) {\n// Cap the length to the end of the area.\nlength = vr.virtual + vr.length - virtual\n}\n+ // Update page tables for executable mappings.\n+ if vr.accessType.Execute {\n+ if vr.accessType.Write {\n+ panic(fmt.Sprintf(\"executable mapping can't be writable: %#v\", vr))\n+ }\n+ m.kernel.PageTables.Map(\n+ hostarch.Addr(virtual),\n+ length,\n+ pagetables.MapOpts{AccessType: vr.accessType},\n+ physical)\n+ }\n// Ensure the physical range is mapped.\nm.mapPhysical(physical, length, physicalRegions, flags)\n@@ -295,7 +306,7 @@ func newMachine(vm int) (*machine, error) {\nvr.length += 1 << 20\n}\n- mapRegion(vr.region, 0)\n+ mapRegion(vr, 0)\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine_arm64.go",
"new_path": "pkg/sentry/platform/kvm/machine_arm64.go",
"diff": "@@ -107,16 +107,18 @@ func archPhysicalRegions(physicalRegions []physicalRegion) []physicalRegion {\nreturn // skip region.\n}\nif !vr.accessType.Write {\n- readOnlyGuestRegions = append(readOnlyGuestRegions, vr.region)\n+ readOnlyGuestRegions = append(readOnlyGuestRegions, vr)\n}\n})\nrdRegions := readOnlyGuestRegions[:]\n// Add an unreachable region.\n- rdRegions = append(rdRegions, region{\n+ rdRegions = append(rdRegions, virtualRegion{\n+ region: region{\nvirtual: 0xffffffffffffffff,\nlength: 0,\n+ },\n})\nvar regions []physicalRegion\n@@ -137,7 +139,7 @@ func archPhysicalRegions(physicalRegions []physicalRegion) []physicalRegion {\nstart := pr.virtual\nend := pr.virtual + pr.length\nfor start < end {\n- rdRegion := rdRegions[i]\n+ rdRegion := rdRegions[i].region\nrdStart := rdRegion.virtual\nrdEnd := rdRegion.virtual + rdRegion.length\nif rdEnd <= start {\n"
}
] | Go | Apache License 2.0 | google/gvisor | kvm: sentry executable mappings have to be read-only
The sentry guest page tables must not allow to execute writable memory regions.
PiperOrigin-RevId: 426183789 |
259,951 | 03.02.2022 13:27:19 | 28,800 | 56de63a4cb1ce9f01fa69211e7b3285fa6a34ee3 | Remove unused parameter in UDP tests instantiation
The variant kDualStack is never used by any test. It just duplicates
kIpv6 case. Because the parameters are now just kIpv4 and kIpv6, use
an int to encode the address family, instead of the enum. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/udp_socket.cc",
"new_path": "test/syscalls/linux/udp_socket.cc",
"diff": "@@ -55,8 +55,7 @@ namespace {\n// Fixture for tests parameterized by the address family to use (AF_INET and\n// AF_INET6) when creating sockets.\n-class UdpSocketTest\n- : public ::testing::TestWithParam<gvisor::testing::AddressFamily> {\n+class UdpSocketTest : public ::testing::TestWithParam<int> {\nprotected:\n// Creates two sockets that will be used by test cases.\nvoid SetUp() override;\n@@ -79,9 +78,6 @@ class UdpSocketTest\n// Disconnects socket sockfd.\nvoid Disconnect(int sockfd);\n- // Get family for the test.\n- int GetFamily();\n-\n// Socket used by Bind methods\nFileDescriptor bind_;\n@@ -91,7 +87,7 @@ class UdpSocketTest\n// Address for bind_ socket.\nstruct sockaddr* bind_addr_;\n- // Initialized to the length based on GetFamily().\n+ // Initialized to the length based on GetParam().\nsocklen_t addrlen_;\n// Storage for bind_addr_.\n@@ -138,19 +134,12 @@ void UdpSocketTest::SetUp() {\naddrlen_ = GetAddrLength();\nbind_ =\n- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetFamily(), SOCK_DGRAM, IPPROTO_UDP));\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_DGRAM, IPPROTO_UDP));\nmemset(&bind_addr_storage_, 0, sizeof(bind_addr_storage_));\nbind_addr_ = AsSockAddr(&bind_addr_storage_);\nsock_ =\n- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetFamily(), SOCK_DGRAM, IPPROTO_UDP));\n-}\n-\n-int UdpSocketTest::GetFamily() {\n- if (GetParam() == AddressFamily::kIpv4) {\n- return AF_INET;\n- }\n- return AF_INET6;\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_DGRAM, IPPROTO_UDP));\n}\nPosixError UdpSocketTest::BindLoopback() {\n@@ -183,7 +172,7 @@ PosixError UdpSocketTest::BindSocket(int socket, struct sockaddr* addr) {\nsocklen_t UdpSocketTest::GetAddrLength() {\nstruct sockaddr_storage addr;\n- if (GetFamily() == AF_INET) {\n+ if (GetParam() == AF_INET) {\nauto sin = reinterpret_cast<struct sockaddr_in*>(&addr);\nreturn sizeof(*sin);\n}\n@@ -195,9 +184,9 @@ socklen_t UdpSocketTest::GetAddrLength() {\nsockaddr_storage UdpSocketTest::InetAnyAddr() {\nstruct sockaddr_storage addr;\nmemset(&addr, 0, sizeof(addr));\n- AsSockAddr(&addr)->sa_family = GetFamily();\n+ AsSockAddr(&addr)->sa_family = GetParam();\n- if (GetFamily() == AF_INET) {\n+ if (GetParam() == AF_INET) {\nauto sin = reinterpret_cast<struct sockaddr_in*>(&addr);\nsin->sin_addr.s_addr = htonl(INADDR_ANY);\nsin->sin_port = htons(0);\n@@ -213,9 +202,9 @@ sockaddr_storage UdpSocketTest::InetAnyAddr() {\nsockaddr_storage UdpSocketTest::InetLoopbackAddr() {\nstruct sockaddr_storage addr;\nmemset(&addr, 0, sizeof(addr));\n- AsSockAddr(&addr)->sa_family = GetFamily();\n+ AsSockAddr(&addr)->sa_family = GetParam();\n- if (GetFamily() == AF_INET) {\n+ if (GetParam() == AF_INET) {\nauto sin = reinterpret_cast<struct sockaddr_in*>(&addr);\nsin->sin_addr.s_addr = htonl(INADDR_LOOPBACK);\nsin->sin_port = htons(0);\n@@ -237,7 +226,7 @@ void UdpSocketTest::Disconnect(int sockfd) {\n// Check that after disconnect the socket is bound to the ANY address.\nEXPECT_THAT(getsockname(sockfd, addr, &addrlen), SyscallSucceeds());\n- if (GetParam() == AddressFamily::kIpv4) {\n+ if (GetParam() == AF_INET) {\nauto addr_out = reinterpret_cast<struct sockaddr_in*>(addr);\nEXPECT_EQ(addrlen, sizeof(*addr_out));\nEXPECT_EQ(addr_out->sin_addr.s_addr, htonl(INADDR_ANY));\n@@ -252,13 +241,13 @@ void UdpSocketTest::Disconnect(int sockfd) {\nTEST_P(UdpSocketTest, Creation) {\nFileDescriptor sock =\n- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetFamily(), SOCK_DGRAM, IPPROTO_UDP));\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_DGRAM, IPPROTO_UDP));\nEXPECT_THAT(close(sock.release()), SyscallSucceeds());\n- sock = ASSERT_NO_ERRNO_AND_VALUE(Socket(GetFamily(), SOCK_DGRAM, 0));\n+ sock = ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_DGRAM, 0));\nEXPECT_THAT(close(sock.release()), SyscallSucceeds());\n- ASSERT_THAT(socket(GetFamily(), SOCK_STREAM, IPPROTO_UDP), SyscallFails());\n+ ASSERT_THAT(socket(GetParam(), SOCK_STREAM, IPPROTO_UDP), SyscallFails());\n}\nTEST_P(UdpSocketTest, Getsockname) {\n@@ -377,7 +366,7 @@ TEST_P(UdpSocketTest, ConnectWriteToInvalidPort) {\nsocklen_t addrlen = sizeof(addr_storage);\nstruct sockaddr* addr = AsSockAddr(&addr_storage);\nFileDescriptor s =\n- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetFamily(), SOCK_DGRAM, IPPROTO_UDP));\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_DGRAM, IPPROTO_UDP));\nASSERT_THAT(bind(s.get(), addr, addrlen), SyscallSucceeds());\nASSERT_THAT(getsockname(s.get(), addr, &addrlen), SyscallSucceeds());\nEXPECT_EQ(addrlen, addrlen_);\n@@ -411,7 +400,7 @@ TEST_P(UdpSocketTest, ConnectSimultaneousWriteToInvalidPort) {\nsocklen_t addrlen = sizeof(addr_storage);\nstruct sockaddr* addr = AsSockAddr(&addr_storage);\nFileDescriptor s =\n- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetFamily(), SOCK_DGRAM, IPPROTO_UDP));\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_DGRAM, IPPROTO_UDP));\nASSERT_THAT(bind(s.get(), addr, addrlen), SyscallSucceeds());\nASSERT_THAT(getsockname(s.get(), addr, &addrlen), SyscallSucceeds());\nEXPECT_EQ(addrlen, addrlen_);\n@@ -503,7 +492,7 @@ TEST_P(UdpSocketTest, Connect) {\nstruct sockaddr_storage bind2_storage = InetLoopbackAddr();\nstruct sockaddr* bind2_addr = AsSockAddr(&bind2_storage);\nFileDescriptor bind2 =\n- ASSERT_NO_ERRNO_AND_VALUE(Socket(GetFamily(), SOCK_DGRAM, IPPROTO_UDP));\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_DGRAM, IPPROTO_UDP));\nASSERT_NO_ERRNO(BindSocket(bind2.get(), bind2_addr));\n// Try to connect again.\n@@ -644,7 +633,7 @@ TEST_P(UdpSocketTest, DisconnectAfterBindToUnspecAndConnect) {\nsockaddr_storage unspec = {.ss_family = AF_UNSPEC};\nint bind_res = bind(sock_.get(), AsSockAddr(&unspec), sizeof(unspec));\nif ((!IsRunningOnGvisor() || IsRunningWithHostinet()) &&\n- GetFamily() == AF_INET) {\n+ GetParam() == AF_INET) {\n// Linux allows this for undocumented compatibility reasons:\n// https://github.com/torvalds/linux/commit/29c486df6a208432b370bd4be99ae1369ede28d8.\n//\n@@ -678,7 +667,7 @@ TEST_P(UdpSocketTest, BindToAnyConnnectToLocalhost) {\n// If the socket is bound to ANY and connected to a loopback address,\n// getsockname() has to return the loopback address.\n- if (GetParam() == AddressFamily::kIpv4) {\n+ if (GetParam() == AF_INET) {\nauto addr_out = reinterpret_cast<struct sockaddr_in*>(addr);\nEXPECT_EQ(addrlen, sizeof(*addr_out));\nEXPECT_EQ(addr_out->sin_addr.s_addr, htonl(INADDR_LOOPBACK));\n@@ -759,7 +748,7 @@ TEST_P(UdpSocketTest, Disconnect) {\nTEST_P(UdpSocketTest, ConnectBadAddress) {\nstruct sockaddr addr = {};\n- addr.sa_family = GetFamily();\n+ addr.sa_family = GetParam();\nASSERT_THAT(connect(sock_.get(), &addr, sizeof(addr.sa_family)),\nSyscallFailsWithErrno(EINVAL));\n}\n@@ -829,7 +818,7 @@ TEST_P(UdpSocketTest, RecvErrorConnRefused) {\nsocklen_t optlen = sizeof(v);\nint opt_level = SOL_IP;\nint opt_type = IP_RECVERR;\n- if (GetParam() != AddressFamily::kIpv4) {\n+ if (GetParam() == AF_INET6) {\nopt_level = SOL_IPV6;\nopt_type = IPV6_RECVERR;\n}\n@@ -889,7 +878,7 @@ TEST_P(UdpSocketTest, RecvErrorConnRefused) {\nstruct sock_extended_err* sock_err =\n(struct sock_extended_err*)CMSG_DATA(cmsg);\nEXPECT_EQ(sock_err->ee_errno, ECONNREFUSED);\n- if (GetParam() == AddressFamily::kIpv4) {\n+ if (GetParam() == AF_INET) {\nEXPECT_EQ(sock_err->ee_origin, SO_EE_ORIGIN_ICMP);\nEXPECT_EQ(sock_err->ee_type, ICMP_DEST_UNREACH);\nEXPECT_EQ(sock_err->ee_code, ICMP_PORT_UNREACH);\n@@ -1751,7 +1740,7 @@ TEST_P(UdpSocketTest, TimestampIoctlPersistence) {\n// TOS and TCLASS values may be different but IPv6 sockets with IPv4-mapped-IPv6\n// addresses use TOS (IPv4), not TCLASS (IPv6).\nTEST_P(UdpSocketTest, DifferentTOSAndTClass) {\n- const int kFamily = GetFamily();\n+ const int kFamily = GetParam();\nconstexpr int kToS = IPTOS_LOWDELAY;\nconstexpr int kTClass = IPTOS_THROUGHPUT;\nASSERT_NE(kToS, kTClass);\n@@ -1900,7 +1889,7 @@ TEST_P(UdpSocketTest, SetAndReceiveTOS) {\n// Allow socket to receive control message.\nint recv_level = SOL_IP;\nint recv_type = IP_RECVTOS;\n- if (GetParam() != AddressFamily::kIpv4) {\n+ if (GetParam() == AF_INET6) {\nrecv_level = SOL_IPV6;\nrecv_type = IPV6_RECVTCLASS;\n}\n@@ -1973,7 +1962,7 @@ TEST_P(UdpSocketTest, SendAndReceiveTOS) {\n// Allow socket to receive control message.\nint recv_level = SOL_IP;\nint recv_type = IP_RECVTOS;\n- if (GetParam() != AddressFamily::kIpv4) {\n+ if (GetParam() == AF_INET6) {\nrecv_level = SOL_IPV6;\nrecv_type = IPV6_RECVTCLASS;\n}\n@@ -2269,9 +2258,7 @@ TEST_P(UdpSocketTest, ConnectToZeroPortConnected) {\n}\nINSTANTIATE_TEST_SUITE_P(AllInetTests, UdpSocketTest,\n- ::testing::Values(AddressFamily::kIpv4,\n- AddressFamily::kIpv6,\n- AddressFamily::kDualStack));\n+ ::testing::Values(AF_INET, AF_INET6));\nTEST(UdpInet6SocketTest, ConnectInet4Sockaddr) {\n// glibc getaddrinfo expects the invariant expressed by this test to be held.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove unused parameter in UDP tests instantiation
The variant kDualStack is never used by any test. It just duplicates
kIpv6 case. Because the parameters are now just kIpv4 and kIpv6, use
an int to encode the address family, instead of the enum.
PiperOrigin-RevId: 426224701 |
259,853 | 03.02.2022 14:51:32 | 28,800 | 237e45d23aabd8e3f7679cbd87abbea757702ae9 | filters: don't allow to create new executable mappings | [
{
"change_type": "MODIFY",
"old_path": "pkg/seccomp/seccomp.go",
"new_path": "pkg/seccomp/seccomp.go",
"diff": "@@ -41,11 +41,14 @@ const (\n// used because it only kills the offending thread and often keeps the sentry\n// hanging.\n//\n+// denyRules describes forbidden syscalls. rules describes allowed syscalls.\n+// denyRules is executed before rules.\n+//\n// Be aware that RET_TRAP sends SIGSYS to the process and it may be ignored,\n// making it possible for the process to continue running after a violation.\n// However, it will leave a SECCOMP audit event trail behind. In any case, the\n// syscall is still blocked from executing.\n-func Install(rules SyscallRules) error {\n+func Install(rules SyscallRules, denyRules SyscallRules) error {\ndefaultAction, err := defaultAction()\nif err != nil {\nreturn err\n@@ -57,6 +60,10 @@ func Install(rules SyscallRules) error {\nlog.Infof(\"Installing seccomp filters for %d syscalls (action=%v)\", len(rules), defaultAction)\ninstrs, err := BuildProgram([]RuleSet{\n+ {\n+ Rules: denyRules,\n+ Action: defaultAction,\n+ },\n{\nRules: rules,\nAction: linux.SECCOMP_RET_ALLOW,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/seccomp/seccomp_rules.go",
"new_path": "pkg/seccomp/seccomp_rules.go",
"diff": "package seccomp\n-import \"fmt\"\n+import (\n+ \"fmt\"\n+\n+ \"golang.org/x/sys/unix\"\n+)\n// The offsets are based on the following struct in include/linux/seccomp.h.\n// struct seccomp_data {\n@@ -188,3 +192,22 @@ func (sr SyscallRules) Merge(rules SyscallRules) {\n}\n}\n}\n+\n+// DenyNewExecMappings is a set of rules that denies creating new executable\n+// mappings and converting existing ones.\n+var DenyNewExecMappings = SyscallRules{\n+ unix.SYS_MMAP: []Rule{\n+ {\n+ MatchAny{},\n+ MatchAny{},\n+ MaskedEqual(unix.PROT_EXEC, unix.PROT_EXEC),\n+ },\n+ },\n+ unix.SYS_MPROTECT: []Rule{\n+ {\n+ MatchAny{},\n+ MatchAny{},\n+ MaskedEqual(unix.PROT_EXEC, unix.PROT_EXEC),\n+ },\n+ },\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/seccomp/seccomp_test_victim.go",
"new_path": "pkg/seccomp/seccomp_test_victim.go",
"diff": "@@ -105,7 +105,7 @@ func main() {\n}\n}\n- if err := seccomp.Install(syscalls); err != nil {\n+ if err := seccomp.Install(syscalls, nil); err != nil {\nfmt.Printf(\"Failed to install seccomp: %v\", err)\nos.Exit(1)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine.go",
"new_path": "pkg/sentry/platform/kvm/machine.go",
"diff": "@@ -744,7 +744,7 @@ func seccompMmapRules(m *machine) {\n{\nseccomp.MatchAny{},\nseccomp.MatchAny{},\n- seccomp.MatchAny{},\n+ seccomp.MaskedEqual(unix.PROT_EXEC, 0),\n/* MAP_DENYWRITE is ignored and used only for filtering. */\nseccomp.MaskedEqual(unix.MAP_DENYWRITE, 0),\n},\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/filter.go",
"new_path": "runsc/boot/filter/filter.go",
"diff": "@@ -51,7 +51,7 @@ func Install(opt Options) error {\ns.Merge(opt.Platform.SyscallFilters())\n- return seccomp.Install(s)\n+ return seccomp.Install(s, seccomp.DenyNewExecMappings)\n}\n// Report writes a warning message to the log.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/filter/filter.go",
"new_path": "runsc/fsgofer/filter/filter.go",
"diff": "@@ -27,7 +27,7 @@ func Install() error {\n// when not enabled.\nallowedSyscalls.Merge(instrumentationFilters())\n- return seccomp.Install(allowedSyscalls)\n+ return seccomp.Install(allowedSyscalls, seccomp.DenyNewExecMappings)\n}\n// InstallUDSFilters extends the allowed syscalls to include those necessary for\n"
}
] | Go | Apache License 2.0 | google/gvisor | filters: don't allow to create new executable mappings
PiperOrigin-RevId: 426244201 |
259,985 | 03.02.2022 15:11:53 | 28,800 | 55ef37166897eae03ea8e572487db4eefc3f9d1d | Return well-defined error on short payload in FUSE.
Errors from syscalls without a well-defined translation to errnos
result in sentry panics.
Reported-by: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fsimpl/fuse/request_response.go",
"new_path": "pkg/sentry/fsimpl/fuse/request_response.go",
"diff": "package fuse\nimport (\n- \"fmt\"\n-\n\"golang.org/x/sys/unix\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/errors/linuxerr\"\n\"gvisor.dev/gvisor/pkg/hostarch\"\n+ \"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/marshal\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n@@ -212,7 +212,9 @@ func (r *Response) UnmarshalPayload(m marshal.Marshallable) error {\nwantDataLen := uint32(m.SizeBytes())\nif haveDataLen < wantDataLen {\n- return fmt.Errorf(\"payload too small. Minimum data lenth required: %d, but got data length %d\", wantDataLen, haveDataLen)\n+ log.Warningf(\"fusefs: Payload too small. Minimum data length required: %d, but got data length %d\", wantDataLen, haveDataLen)\n+ return linuxerr.EINVAL\n+\n}\n// The response data is empty unless there is some payload. And so, doesn't\n"
}
] | Go | Apache License 2.0 | google/gvisor | Return well-defined error on short payload in FUSE.
Errors from syscalls without a well-defined translation to errnos
result in sentry panics.
Reported-by: [email protected]
PiperOrigin-RevId: 426248731 |
259,962 | 03.02.2022 16:44:43 | 28,800 | 7345bd99ea5a5cb378ac73ed7a10c17744c82800 | Make shared memory file location configurable. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/queuepair.go",
"new_path": "pkg/tcpip/link/sharedmem/queuepair.go",
"diff": "@@ -74,16 +74,16 @@ type QueuePair struct {\n// QueueOptions allows queue specific configuration to be specified when\n// creating a QueuePair.\ntype QueueOptions struct {\n- // sharedMemPath is the path to use to create the shared memory backing\n+ // SharedMemPath is the path to use to create the shared memory backing\n// files for the queue.\n//\n// If unspecified it defaults to \"/dev/shm\".\n- sharedMemPath string\n+ SharedMemPath string\n}\n// NewQueuePair creates a shared memory QueuePair.\nfunc NewQueuePair(opts QueueOptions) (*QueuePair, error) {\n- txCfg, err := createQueueFDs(opts.sharedMemPath, queueSizes{\n+ txCfg, err := createQueueFDs(opts.SharedMemPath, queueSizes{\ndataSize: DefaultQueueDataSize,\ntxPipeSize: DefaultQueuePipeSize,\nrxPipeSize: DefaultQueuePipeSize,\n@@ -94,7 +94,7 @@ func NewQueuePair(opts QueueOptions) (*QueuePair, error) {\nreturn nil, fmt.Errorf(\"failed to create tx queue: %s\", err)\n}\n- rxCfg, err := createQueueFDs(opts.sharedMemPath, queueSizes{\n+ rxCfg, err := createQueueFDs(opts.SharedMemPath, queueSizes{\ndataSize: DefaultQueueDataSize,\ntxPipeSize: DefaultQueuePipeSize,\nrxPipeSize: DefaultQueuePipeSize,\n"
}
] | Go | Apache License 2.0 | google/gvisor | Make shared memory file location configurable.
PiperOrigin-RevId: 426268960 |
Subsets and Splits