author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
259,853
01.03.2021 12:14:47
28,800
865ca64ee8c0af9eba88a4a04e0730630fae6d8b
tcp: endpoint.Write has to send all data that has been read from payload io.Reader.ReadFull returns the number of bytes copied and an error if fewer bytes were read.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -1552,10 +1552,11 @@ func (e *endpoint) Write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcp\nreturn nil, nil\n}\nv := make([]byte, avail)\n- if _, err := io.ReadFull(p, v); err != nil {\n+ n, err := p.Read(v)\n+ if err != nil && err != io.EOF {\nreturn nil, &tcpip.ErrBadBuffer{}\n}\n- return v, nil\n+ return v[:n], nil\n}()\nif len(v) == 0 || err != nil {\nreturn nil, 0, err\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -2384,6 +2384,7 @@ cc_library(\n\"@com_google_absl//absl/memory\",\n\"@com_google_absl//absl/time\",\ngtest,\n+ \"//test/util:temp_path\",\n\"//test/util:test_util\",\n\"//test/util:thread_util\",\n],\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_ip_tcp_generic.cc", "new_path": "test/syscalls/linux/socket_ip_tcp_generic.cc", "diff": "#include \"absl/time/clock.h\"\n#include \"absl/time/time.h\"\n#include \"test/syscalls/linux/socket_test_util.h\"\n+#include \"test/util/temp_path.h\"\n#include \"test/util/test_util.h\"\n#include \"test/util/thread_util.h\"\n@@ -1059,6 +1060,28 @@ TEST_P(TCPSocketPairTest, SpliceToPipe) {\nSyscallSucceedsWithValue(buf.size()));\nEXPECT_EQ(memcmp(rbuf.data(), buf.data(), buf.size()), 0);\n}\n+\n+#include <sys/sendfile.h>\n+\n+TEST_P(TCPSocketPairTest, SendfileFromRegularFileSucceeds) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+ const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n+ const FileDescriptor in_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDWR));\n+ // Fill with some random data.\n+ std::vector<char> buf(kPageSize / 2);\n+ RandomizeBuffer(buf.data(), buf.size());\n+ ASSERT_THAT(pwrite(in_fd.get(), buf.data(), buf.size(), 0),\n+ SyscallSucceedsWithValue(buf.size()));\n+\n+ EXPECT_THAT(\n+ sendfile(sockets->first_fd(), in_fd.get(), nullptr, buf.size() + 1),\n+ SyscallSucceedsWithValue(buf.size()));\n+ std::vector<char> rbuf(buf.size() + 1);\n+ ASSERT_THAT(read(sockets->second_fd(), rbuf.data(), rbuf.size()),\n+ SyscallSucceedsWithValue(buf.size()));\n+ EXPECT_EQ(memcmp(rbuf.data(), buf.data(), buf.size()), 0);\n+}\n#endif // __linux__\nTEST_P(TCPSocketPairTest, SetTCPWindowClampBelowMinRcvBufConnectedSocket) {\n" } ]
Go
Apache License 2.0
google/gvisor
tcp: endpoint.Write has to send all data that has been read from payload io.Reader.ReadFull returns the number of bytes copied and an error if fewer bytes were read. PiperOrigin-RevId: 360247614
259,907
02.03.2021 12:41:06
28,800
a317174673562996a98f5a735771955d6651e233
[rack] Support running tcp_benchmarks with RACK.
[ { "change_type": "MODIFY", "old_path": "test/benchmarks/tcp/tcp_benchmark.sh", "new_path": "test/benchmarks/tcp/tcp_benchmark.sh", "diff": "@@ -91,6 +91,9 @@ while [ $# -gt 0 ]; do\n--sack)\nnetstack_opts=\"${netstack_opts} -sack\"\n;;\n+ --rack)\n+ netstack_opts=\"${netstack_opts} -rack\"\n+ ;;\n--cubic)\nnetstack_opts=\"${netstack_opts} -cubic\"\n;;\n@@ -152,6 +155,7 @@ while [ $# -gt 0 ]; do\necho \" --server use netstack as the server\"\necho \" --mtu set the mtu (bytes)\"\necho \" --sack enable SACK support\"\n+ echo \" --rack enable RACK support\"\necho \" --moderate-recv-buf enable TCP receive buffer auto-tuning\"\necho \" --cubic enable CUBIC congestion control for Netstack\"\necho \" --duration set the test duration (s)\"\n" }, { "change_type": "MODIFY", "old_path": "test/benchmarks/tcp/tcp_proxy.go", "new_path": "test/benchmarks/tcp/tcp_proxy.go", "diff": "@@ -56,6 +56,7 @@ var (\nmask = flag.Int(\"mask\", 8, \"mask size for address\")\niface = flag.String(\"iface\", \"\", \"network interface name to bind for netstack\")\nsack = flag.Bool(\"sack\", false, \"enable SACK support for netstack\")\n+ rack = flag.Bool(\"rack\", false, \"enable RACK in TCP\")\nmoderateRecvBuf = flag.Bool(\"moderate_recv_buf\", false, \"enable TCP Receive Buffer Auto-tuning\")\ncubic = flag.Bool(\"cubic\", false, \"enable use of CUBIC congestion control for netstack\")\ngso = flag.Int(\"gso\", 0, \"GSO maximum size\")\n@@ -232,6 +233,13 @@ func newNetstackImpl(mode string) (impl, error) {\n}\n}\n+ if *rack {\n+ opt := tcpip.TCPRecovery(tcpip.TCPRACKLossDetection)\n+ if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {\n+ return nil, fmt.Errorf(\"enabling RACK failed: %v\", err)\n+ }\n+ }\n+\n// Enable Receive Buffer Auto-Tuning.\n{\nopt := tcpip.TCPModerateReceiveBufferOption(*moderateRecvBuf)\n" } ]
Go
Apache License 2.0
google/gvisor
[rack] Support running tcp_benchmarks with RACK. PiperOrigin-RevId: 360491700
259,975
02.03.2021 14:08:33
28,800
b8a5420f49a2afd622ec08b5019e1bf537f7da82
Add reverse flag to mitigate. Add reverse operation to mitigate that just enables all CPUs.
[ { "change_type": "MODIFY", "old_path": "runsc/cmd/mitigate.go", "new_path": "runsc/cmd/mitigate.go", "diff": "@@ -16,7 +16,6 @@ package cmd\nimport (\n\"context\"\n- \"io/ioutil\"\n\"github.com/google/subcommands\"\n\"gvisor.dev/gvisor/pkg/log\"\n@@ -56,14 +55,7 @@ func (m *Mitigate) Execute(_ context.Context, f *flag.FlagSet, args ...interface\nreturn subcommands.ExitUsageError\n}\n- const path = \"/proc/cpuinfo\"\n- data, err := ioutil.ReadFile(path)\n- if err != nil {\n- log.Warningf(\"Failed to read %s: %v\", path, err)\n- return subcommands.ExitFailure\n- }\n-\n- if err := m.mitigate.Execute(data); err != nil {\n+ if err := m.mitigate.Execute(); err != nil {\nlog.Warningf(\"Execute failed: %v\", err)\nreturn subcommands.ExitFailure\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/mitigate/cpu.go", "new_path": "runsc/mitigate/cpu.go", "diff": "@@ -45,7 +45,7 @@ const (\ntype cpuSet map[cpuID]*threadGroup\n// newCPUSet creates a CPUSet from data read from /proc/cpuinfo.\n-func newCPUSet(data []byte, vulnerable func(*thread) bool) (cpuSet, error) {\n+func newCPUSet(data []byte, vulnerable func(thread) bool) (cpuSet, error) {\nprocessors, err := getThreads(string(data))\nif err != nil {\nreturn nil, err\n@@ -68,6 +68,26 @@ func newCPUSet(data []byte, vulnerable func(*thread) bool) (cpuSet, error) {\nreturn set, nil\n}\n+// newCPUSetFromPossible makes a cpuSet data read from\n+// /sys/devices/system/cpu/possible. This is used in enable operations\n+// where the caller simply wants to enable all CPUS.\n+func newCPUSetFromPossible(data []byte) (cpuSet, error) {\n+ threads, err := getThreadsFromPossible(data)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ // We don't care if a CPU is vulnerable or not, we just\n+ // want to return a list of all CPUs on the host.\n+ set := cpuSet{\n+ threads[0].id: &threadGroup{\n+ threads: threads,\n+ isVulnerable: false,\n+ },\n+ }\n+ return set, nil\n+}\n+\n// String implements the String method for CPUSet.\nfunc (c cpuSet) String() string {\nret := \"\"\n@@ -79,8 +99,8 @@ func (c cpuSet) String() string {\n// getRemainingList returns the list of threads that will remain active\n// after mitigation.\n-func (c cpuSet) getRemainingList() []*thread {\n- threads := make([]*thread, 0, len(c))\n+func (c cpuSet) getRemainingList() []thread {\n+ threads := make([]thread, 0, len(c))\nfor _, core := range c {\n// If we're vulnerable, take only one thread from the pair.\nif core.isVulnerable {\n@@ -95,8 +115,8 @@ func (c cpuSet) getRemainingList() []*thread {\n// getShutdownList returns the list of threads that will be shutdown on\n// mitigation.\n-func (c cpuSet) getShutdownList() []*thread {\n- threads := make([]*thread, 0)\n+func (c cpuSet) getShutdownList() []thread {\n+ threads := make([]thread, 0)\nfor _, core := range c {\n// Only if we're vulnerable do shutdown anything. In this case,\n// shutdown all but the first entry.\n@@ -109,12 +129,12 @@ func (c cpuSet) getShutdownList() []*thread {\n// threadGroup represents Hyperthread pairs on the same physical/core ID.\ntype threadGroup struct {\n- threads []*thread\n+ threads []thread\nisVulnerable bool\n}\n// String implements the String method for threadGroup.\n-func (c *threadGroup) String() string {\n+func (c threadGroup) String() string {\nret := fmt.Sprintf(\"ThreadGroup:\\nIsVulnerable: %t\\n\", c.isVulnerable)\nfor _, processor := range c.threads {\nret += fmt.Sprintf(\"%s\\n\", processor)\n@@ -123,13 +143,13 @@ func (c *threadGroup) String() string {\n}\n// getThreads returns threads structs from reading /proc/cpuinfo.\n-func getThreads(data string) ([]*thread, error) {\n+func getThreads(data string) ([]thread, error) {\n// Each processor entry should start with the\n// processor key. Find the beginings of each.\nr := buildRegex(processorKey, `\\d+`)\nindices := r.FindAllStringIndex(data, -1)\nif len(indices) < 1 {\n- return nil, fmt.Errorf(\"no cpus found for: %s\", data)\n+ return nil, fmt.Errorf(\"no cpus found for: %q\", data)\n}\n// Add the ending index for last entry.\n@@ -139,7 +159,7 @@ func getThreads(data string) ([]*thread, error) {\n// indexes (e.g. data[index[i], index[i+1]]).\n// There should be len(indicies) - 1 CPUs\n// since the last index is the end of the string.\n- var cpus = make([]*thread, 0, len(indices)-1)\n+ cpus := make([]thread, 0, len(indices))\n// Find each string that represents a CPU. These begin \"processor\".\nfor i := 1; i < len(indices); i++ {\nstart := indices[i-1][0]\n@@ -154,6 +174,45 @@ func getThreads(data string) ([]*thread, error) {\nreturn cpus, nil\n}\n+// getThreadsFromPossible makes threads from data read from /sys/devices/system/cpu/possible.\n+func getThreadsFromPossible(data []byte) ([]thread, error) {\n+ possibleRegex := regexp.MustCompile(`(?m)^(\\d+)(-(\\d+))?$`)\n+ matches := possibleRegex.FindStringSubmatch(string(data))\n+ if len(matches) != 4 {\n+ return nil, fmt.Errorf(\"mismatch regex from %s: %q\", allPossibleCPUs, string(data))\n+ }\n+\n+ // If matches[3] is empty, we only have one cpu entry.\n+ if matches[3] == \"\" {\n+ matches[3] = matches[1]\n+ }\n+\n+ begin, err := strconv.ParseInt(matches[1], 10, 64)\n+ if err != nil {\n+ return nil, fmt.Errorf(\"failed to parse begin: %v\", err)\n+ }\n+ end, err := strconv.ParseInt(matches[3], 10, 64)\n+ if err != nil {\n+ return nil, fmt.Errorf(\"failed to parse end: %v\", err)\n+ }\n+ if begin > end || begin < 0 || end < 0 {\n+ return nil, fmt.Errorf(\"invalid cpu bounds from possible: begin: %d end: %d\", begin, end)\n+ }\n+\n+ ret := make([]thread, 0, end-begin)\n+ for i := begin; i <= end; i++ {\n+ ret = append(ret, thread{\n+ processorNumber: i,\n+ id: cpuID{\n+ physicalID: 0, // we don't care about id for enable ops.\n+ coreID: 0,\n+ },\n+ })\n+ }\n+\n+ return ret, nil\n+}\n+\n// cpuID for each thread is defined by the physical and\n// core IDs. If equal, two threads are Hyperthread pairs.\ntype cpuID struct {\n@@ -172,43 +231,44 @@ type thread struct {\n}\n// newThread parses a CPU from a single cpu entry from /proc/cpuinfo.\n-func newThread(data string) (*thread, error) {\n+func newThread(data string) (thread, error) {\n+ empty := thread{}\nprocessor, err := parseProcessor(data)\nif err != nil {\n- return nil, err\n+ return empty, err\n}\nvendorID, err := parseVendorID(data)\nif err != nil {\n- return nil, err\n+ return empty, err\n}\ncpuFamily, err := parseCPUFamily(data)\nif err != nil {\n- return nil, err\n+ return empty, err\n}\nmodel, err := parseModel(data)\nif err != nil {\n- return nil, err\n+ return empty, err\n}\nphysicalID, err := parsePhysicalID(data)\nif err != nil {\n- return nil, err\n+ return empty, err\n}\ncoreID, err := parseCoreID(data)\nif err != nil {\n- return nil, err\n+ return empty, err\n}\nbugs, err := parseBugs(data)\nif err != nil {\n- return nil, err\n+ return empty, err\n}\n- return &thread{\n+ return thread{\nprocessorNumber: processor,\nvendorID: vendorID,\ncpuFamily: cpuFamily,\n@@ -222,7 +282,7 @@ func newThread(data string) (*thread, error) {\n}\n// String implements the String method for thread.\n-func (t *thread) String() string {\n+func (t thread) String() string {\ntemplate := `CPU: %d\nCPU ID: %+v\nVendor: %s\n@@ -237,21 +297,27 @@ Bugs: %s\nreturn fmt.Sprintf(template, t.processorNumber, t.id, t.vendorID, t.cpuFamily, t.model, strings.Join(bugs, \",\"))\n}\n-// shutdown turns off the CPU by writing 0 to /sys/devices/cpu/cpu{N}/online.\n-func (t *thread) shutdown() error {\n+// enable turns on the CPU by writing 1 to /sys/devices/cpu/cpu{N}/online.\n+func (t thread) enable() error {\n+ cpuPath := fmt.Sprintf(cpuOnlineTemplate, t.processorNumber)\n+ return ioutil.WriteFile(cpuPath, []byte{'1'}, 0644)\n+}\n+\n+// disable turns off the CPU by writing 0 to /sys/devices/cpu/cpu{N}/online.\n+func (t thread) disable() error {\ncpuPath := fmt.Sprintf(cpuOnlineTemplate, t.processorNumber)\nreturn ioutil.WriteFile(cpuPath, []byte{'0'}, 0644)\n}\n// isVulnerable checks if a CPU is vulnerable to mds.\n-func (t *thread) isVulnerable() bool {\n+func (t thread) isVulnerable() bool {\n_, ok := t.bugs[mds]\nreturn ok\n}\n// isActive checks if a CPU is active from /sys/devices/system/cpu/cpu{N}/online\n// If the file does not exist (ioutil returns in error), we assume the CPU is on.\n-func (t *thread) isActive() bool {\n+func (t thread) isActive() bool {\ncpuPath := fmt.Sprintf(cpuOnlineTemplate, t.processorNumber)\ndata, err := ioutil.ReadFile(cpuPath)\nif err != nil {\n@@ -262,7 +328,7 @@ func (t *thread) isActive() bool {\n// similarTo checks family/model/bugs fields for equality of two\n// processors.\n-func (t *thread) similarTo(other *thread) bool {\n+func (t thread) similarTo(other thread) bool {\nif t.vendorID != other.vendorID {\nreturn false\n}\n@@ -351,7 +417,7 @@ func parseRegex(data, key, match string) (string, error) {\nr := buildRegex(key, match)\nmatches := r.FindStringSubmatch(data)\nif len(matches) < 2 {\n- return \"\", fmt.Errorf(\"failed to match key %s: %s\", key, data)\n+ return \"\", fmt.Errorf(\"failed to match key %q: %q\", key, data)\n}\nreturn matches[1], nil\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/mitigate/cpu_test.go", "new_path": "runsc/mitigate/cpu_test.go", "diff": "@@ -21,8 +21,8 @@ import (\n\"testing\"\n)\n-// cpuTestCase represents data from CPUs that will be mitigated.\n-type cpuTestCase struct {\n+// mockCPU represents data from CPUs that will be mitigated.\n+type mockCPU struct {\nname string\nvendorID string\nfamily int\n@@ -34,7 +34,7 @@ type cpuTestCase struct {\nthreadsPerCore int\n}\n-var cascadeLake4 = cpuTestCase{\n+var cascadeLake4 = mockCPU{\nname: \"CascadeLake\",\nvendorID: \"GenuineIntel\",\nfamily: 6,\n@@ -46,7 +46,7 @@ var cascadeLake4 = cpuTestCase{\nthreadsPerCore: 2,\n}\n-var haswell2 = cpuTestCase{\n+var haswell2 = mockCPU{\nname: \"Haswell\",\nvendorID: \"GenuineIntel\",\nfamily: 6,\n@@ -58,7 +58,7 @@ var haswell2 = cpuTestCase{\nthreadsPerCore: 2,\n}\n-var haswell2core = cpuTestCase{\n+var haswell2core = mockCPU{\nname: \"Haswell2Physical\",\nvendorID: \"GenuineIntel\",\nfamily: 6,\n@@ -70,7 +70,7 @@ var haswell2core = cpuTestCase{\nthreadsPerCore: 1,\n}\n-var amd8 = cpuTestCase{\n+var amd8 = mockCPU{\nname: \"AMD\",\nvendorID: \"AuthenticAMD\",\nfamily: 23,\n@@ -83,7 +83,7 @@ var amd8 = cpuTestCase{\n}\n// makeCPUString makes a string formated like /proc/cpuinfo for each cpuTestCase\n-func (tc cpuTestCase) makeCPUString() string {\n+func (tc mockCPU) makeCPUString() string {\ntemplate := `processor : %d\nvendor_id : %s\ncpu family : %d\n@@ -115,10 +115,18 @@ bugs : %s\nreturn ret\n}\n+func (tc mockCPU) makeSysPossibleString() string {\n+ max := tc.physicalCores * tc.cores * tc.threadsPerCore\n+ if max == 1 {\n+ return \"0\"\n+ }\n+ return fmt.Sprintf(\"0-%d\", max-1)\n+}\n+\n// TestMockCPUSet tests mock cpu test cases against the cpuSet functions.\nfunc TestMockCPUSet(t *testing.T) {\nfor _, tc := range []struct {\n- testCase cpuTestCase\n+ testCase mockCPU\nisVulnerable bool\n}{\n{\n@@ -141,7 +149,7 @@ func TestMockCPUSet(t *testing.T) {\n} {\nt.Run(tc.testCase.name, func(t *testing.T) {\ndata := tc.testCase.makeCPUString()\n- vulnerable := func(t *thread) bool {\n+ vulnerable := func(t thread) bool {\nreturn t.isVulnerable()\n}\nset, err := newCPUSet([]byte(data), vulnerable)\n@@ -170,6 +178,18 @@ func TestMockCPUSet(t *testing.T) {\n}\ndelete(set, r.id)\n}\n+\n+ possible := tc.testCase.makeSysPossibleString()\n+ set, err = newCPUSetFromPossible([]byte(possible))\n+ if err != nil {\n+ t.Fatalf(\"Failed to make cpuSet: %v\", err)\n+ }\n+\n+ want = tc.testCase.physicalCores * tc.testCase.cores * tc.testCase.threadsPerCore\n+ got := len(set.getRemainingList())\n+ if got != want {\n+ t.Fatalf(\"Returned the wrong number of CPUs want: %d got: %d\", want, got)\n+ }\n})\n}\n}\n@@ -328,7 +348,7 @@ func TestReadFile(t *testing.T) {\nt.Fatalf(\"Failed to read cpuinfo: %v\", err)\n}\n- vulnerable := func(t *thread) bool {\n+ vulnerable := func(t thread) bool {\nreturn t.isVulnerable()\n}\n@@ -502,3 +522,84 @@ power management:`\n})\n}\n}\n+\n+func TestReverse(t *testing.T) {\n+ const noParse = \"-1-\"\n+ for _, tc := range []struct {\n+ name string\n+ output string\n+ wantErr error\n+ wantCount int\n+ }{\n+ {\n+ name: \"base\",\n+ output: \"0-7\",\n+ wantErr: nil,\n+ wantCount: 8,\n+ },\n+ {\n+ name: \"huge\",\n+ output: \"0-111\",\n+ wantErr: nil,\n+ wantCount: 112,\n+ },\n+ {\n+ name: \"not zero\",\n+ output: \"50-53\",\n+ wantErr: nil,\n+ wantCount: 4,\n+ },\n+ {\n+ name: \"small\",\n+ output: \"0\",\n+ wantErr: nil,\n+ wantCount: 1,\n+ },\n+ {\n+ name: \"invalid order\",\n+ output: \"10-6\",\n+ wantErr: fmt.Errorf(\"invalid cpu bounds from possible: begin: %d end: %d\", 10, 6),\n+ },\n+ {\n+ name: \"no parse\",\n+ output: noParse,\n+ wantErr: fmt.Errorf(`mismatch regex from /sys/devices/system/cpu/possible: %q`, noParse),\n+ },\n+ } {\n+ t.Run(tc.name, func(t *testing.T) {\n+ threads, err := getThreadsFromPossible([]byte(tc.output))\n+\n+ switch {\n+ case tc.wantErr == nil:\n+ if err != nil {\n+ t.Fatalf(\"Wanted nil err, got: %v\", err)\n+ }\n+ case err == nil:\n+ t.Fatalf(\"Want error: %v got: %v\", tc.wantErr, err)\n+ default:\n+ if tc.wantErr.Error() != err.Error() {\n+ t.Fatalf(\"Want error: %v got error: %v\", tc.wantErr, err)\n+ }\n+ }\n+\n+ if len(threads) != tc.wantCount {\n+ t.Fatalf(\"Want count: %d got: %d\", tc.wantCount, len(threads))\n+ }\n+ })\n+ }\n+}\n+\n+func TestReverseSmoke(t *testing.T) {\n+ data, err := ioutil.ReadFile(allPossibleCPUs)\n+ if err != nil {\n+ t.Fatalf(\"Failed to read from possible: %v\", err)\n+ }\n+ threads, err := getThreadsFromPossible(data)\n+ if err != nil {\n+ t.Fatalf(\"Could not parse possible output: %v\", err)\n+ }\n+\n+ if len(threads) <= 0 {\n+ t.Fatalf(\"Didn't get any CPU cores: %d\", len(threads))\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "runsc/mitigate/mitigate.go", "new_path": "runsc/mitigate/mitigate.go", "diff": "@@ -21,15 +21,23 @@ package mitigate\nimport (\n\"fmt\"\n+ \"io/ioutil\"\n\"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/runsc/flag\"\n)\n+const (\n+ cpuInfo = \"/proc/cpuinfo\"\n+ allPossibleCPUs = \"/sys/devices/system/cpu/possible\"\n+)\n+\n// Mitigate handles high level mitigate operations provided to runsc.\ntype Mitigate struct {\ndryRun bool // Run the command without changing the underlying system.\n+ reverse bool // Reverse mitigate by turning on all CPU cores.\nother mitigate // Struct holds extra mitigate logic.\n+ path string // path to read for each operation (e.g. /proc/cpuinfo).\n}\n// Usage implments Usage for cmd.Mitigate.\n@@ -37,6 +45,8 @@ func (m Mitigate) Usage() string {\nusageString := `mitigate [flags]\nMitigate mitigates a system to the \"MDS\" vulnerability by implementing a manual shutdown of SMT. The command checks /proc/cpuinfo for cpus having the MDS vulnerability, and if found, shutdown all but one CPU per hyperthread pair via /sys/devices/system/cpu/cpu{N}/online. CPUs can be restored by writing \"2\" to each file in /sys/devices/system/cpu/cpu{N}/online or performing a system reboot.\n+\n+The command can be reversed with --reverse, which reads the total CPUs from /sys/devices/system/cpu/possible and enables all with /sys/devices/system/cpu/cpu{N}/online.\n`\nreturn usageString + m.other.usage()\n}\n@@ -44,31 +54,81 @@ Mitigate mitigates a system to the \"MDS\" vulnerability by implementing a manual\n// SetFlags sets flags for the command Mitigate.\nfunc (m Mitigate) SetFlags(f *flag.FlagSet) {\nf.BoolVar(&m.dryRun, \"dryrun\", false, \"run the command without changing system\")\n+ f.BoolVar(&m.reverse, \"reverse\", false, \"reverse mitigate by enabling all CPUs\")\nm.other.setFlags(f)\n+ m.path = cpuInfo\n+ if m.reverse {\n+ m.path = allPossibleCPUs\n+ }\n}\n// Execute executes the Mitigate command.\n-func (m Mitigate) Execute(data []byte) error {\n+func (m Mitigate) Execute() error {\n+ data, err := ioutil.ReadFile(m.path)\n+ if err != nil {\n+ return fmt.Errorf(\"failed to read %s: %v\", m.path, err)\n+ }\n+\n+ if m.reverse {\n+ err := m.doReverse(data)\n+ if err != nil {\n+ return fmt.Errorf(\"reverse operation failed: %v\", err)\n+ }\n+ return nil\n+ }\n+\n+ set, err := m.doMitigate(data)\n+ if err != nil {\n+ return fmt.Errorf(\"mitigate operation failed: %v\", err)\n+ }\n+ return m.other.execute(set, m.dryRun)\n+}\n+\n+func (m Mitigate) doMitigate(data []byte) (cpuSet, error) {\nset, err := newCPUSet(data, m.other.vulnerable)\nif err != nil {\n- return err\n+ return nil, err\n}\nlog.Infof(\"Mitigate found the following CPUs...\")\nlog.Infof(\"%s\", set)\n- shutdownList := set.getShutdownList()\n- log.Infof(\"Shutting down threads on thread pairs.\")\n- for _, t := range shutdownList {\n- log.Infof(\"Shutting down thread: %s\", t)\n+ disableList := set.getShutdownList()\n+ log.Infof(\"Disabling threads on thread pairs.\")\n+ for _, t := range disableList {\n+ log.Infof(\"Disable thread: %s\", t)\nif m.dryRun {\ncontinue\n}\n- if err := t.shutdown(); err != nil {\n- return fmt.Errorf(\"error shutting down thread: %s err: %v\", t, err)\n+ if err := t.disable(); err != nil {\n+ return nil, fmt.Errorf(\"error disabling thread: %s err: %v\", t, err)\n}\n}\nlog.Infof(\"Shutdown successful.\")\n- m.other.execute(set, m.dryRun)\n+ return set, nil\n+}\n+\n+func (m Mitigate) doReverse(data []byte) error {\n+ set, err := newCPUSetFromPossible(data)\n+ if err != nil {\n+ return err\n+ }\n+\n+ log.Infof(\"Reverse mitigate found the following CPUs...\")\n+ log.Infof(\"%s\", set)\n+\n+ enableList := set.getRemainingList()\n+\n+ log.Infof(\"Enabling all CPUs...\")\n+ for _, t := range enableList {\n+ log.Infof(\"Enabling thread: %s\", t)\n+ if m.dryRun {\n+ continue\n+ }\n+ if err := t.enable(); err != nil {\n+ return fmt.Errorf(\"error enabling thread: %s err: %v\", t, err)\n+ }\n+ }\n+ log.Infof(\"Enable successful.\")\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/mitigate/mitigate_conf.go", "new_path": "runsc/mitigate/mitigate_conf.go", "diff": "@@ -32,6 +32,6 @@ func (m mitigate) execute(set cpuSet, dryrun bool) error {\nreturn nil\n}\n-func (m mitigate) vulnerable(other *thread) bool {\n+func (m mitigate) vulnerable(other thread) bool {\nreturn other.isVulnerable()\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/mitigate/mitigate_test.go", "new_path": "runsc/mitigate/mitigate_test.go", "diff": "// limitations under the License.\npackage mitigate\n+\n+import (\n+ \"fmt\"\n+ \"io/ioutil\"\n+ \"os\"\n+ \"strings\"\n+ \"testing\"\n+)\n+\n+type executeTestCase struct {\n+ name string\n+ mitigateData string\n+ mitigateError error\n+ reverseData string\n+ reverseError error\n+}\n+\n+func TestExecute(t *testing.T) {\n+\n+ partial := `processor : 1\n+vendor_id : AuthenticAMD\n+cpu family : 23\n+model : 49\n+model name : AMD EPYC 7B12\n+physical id : 0\n+bugs : sysret_ss_attrs spectre_v1 spectre_v2 spec_store_bypass\n+power management:\n+`\n+\n+ for _, tc := range []executeTestCase{\n+ {\n+ name: \"CascadeLake4\",\n+ mitigateData: cascadeLake4.makeCPUString(),\n+ reverseData: cascadeLake4.makeSysPossibleString(),\n+ },\n+ {\n+ name: \"Empty\",\n+ mitigateData: \"\",\n+ mitigateError: fmt.Errorf(`mitigate operation failed: no cpus found for: \"\"`),\n+ reverseData: \"\",\n+ reverseError: fmt.Errorf(`reverse operation failed: mismatch regex from %s: \"\"`, allPossibleCPUs),\n+ },\n+ {\n+ name: \"Partial\",\n+ mitigateData: `processor : 0\n+vendor_id : AuthenticAMD\n+cpu family : 23\n+model : 49\n+model name : AMD EPYC 7B12\n+physical id : 0\n+core id : 0\n+cpu cores : 1\n+bugs : sysret_ss_attrs spectre_v1 spectre_v2 spec_store_bypass\n+power management:\n+\n+` + partial,\n+ mitigateError: fmt.Errorf(`mitigate operation failed: failed to match key \"core id\": %q`, partial),\n+ reverseData: \"1-\",\n+ reverseError: fmt.Errorf(`reverse operation failed: mismatch regex from %s: %q`, allPossibleCPUs, \"1-\"),\n+ },\n+ } {\n+ doExecuteTest(t, Mitigate{}, tc)\n+ }\n+}\n+\n+func TestExecuteSmoke(t *testing.T) {\n+ smokeMitigate, err := ioutil.ReadFile(cpuInfo)\n+ if err != nil {\n+ t.Fatalf(\"Failed to read %s: %v\", cpuInfo, err)\n+ }\n+ smokeReverse, err := ioutil.ReadFile(allPossibleCPUs)\n+ if err != nil {\n+ t.Fatalf(\"Failed to read %s: %v\", allPossibleCPUs, err)\n+ }\n+ doExecuteTest(t, Mitigate{}, executeTestCase{\n+ name: \"SmokeTest\",\n+ mitigateData: string(smokeMitigate),\n+ reverseData: string(smokeReverse),\n+ })\n+\n+}\n+\n+// doExecuteTest runs Execute with the mitigate operation and reverse operation.\n+func doExecuteTest(t *testing.T, m Mitigate, tc executeTestCase) {\n+ t.Run(\"Mitigate\"+tc.name, func(t *testing.T) {\n+ m.dryRun = true\n+ file, err := ioutil.TempFile(\"\", \"outfile.txt\")\n+ if err != nil {\n+ t.Fatalf(\"Failed to create tmpfile: %v\", err)\n+ }\n+ defer os.Remove(file.Name())\n+\n+ if _, err := file.WriteString(tc.mitigateData); err != nil {\n+ t.Fatalf(\"Failed to write to file: %v\", err)\n+ }\n+\n+ m.path = file.Name()\n+\n+ got := m.Execute()\n+ if err = checkErr(tc.mitigateError, got); err != nil {\n+ t.Fatalf(\"Mitigate error mismatch: %v\", err)\n+ }\n+ })\n+ t.Run(\"Reverse\"+tc.name, func(t *testing.T) {\n+ m.dryRun = true\n+ m.reverse = true\n+\n+ file, err := ioutil.TempFile(\"\", \"outfile.txt\")\n+ if err != nil {\n+ t.Fatalf(\"Failed to create tmpfile: %v\", err)\n+ }\n+ defer os.Remove(file.Name())\n+\n+ if _, err := file.WriteString(tc.reverseData); err != nil {\n+ t.Fatalf(\"Failed to write to file: %v\", err)\n+ }\n+\n+ m.path = file.Name()\n+ got := m.Execute()\n+ if err = checkErr(tc.reverseError, got); err != nil {\n+ t.Fatalf(\"Mitigate error mismatch: %v\", err)\n+ }\n+ })\n+\n+}\n+\n+// checkErr checks error for equality.\n+func checkErr(want, got error) error {\n+ switch {\n+ case want == nil && got == nil:\n+ case want != nil && got == nil:\n+ fallthrough\n+ case want == nil && got != nil:\n+ fallthrough\n+ case want.Error() != strings.Trim(got.Error(), \" \"):\n+ return fmt.Errorf(\"got: %v want: %v\", got, want)\n+ }\n+ return nil\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Add reverse flag to mitigate. Add reverse operation to mitigate that just enables all CPUs. PiperOrigin-RevId: 360511215
259,898
03.03.2021 12:11:14
28,800
32578a591c01b1a64ce0bc5c9c62be4e8689232d
Use struct embedding to avoid casts and reuse methods Removed (*testbench.Connection)(&conn) like casts Removed redundant definition of Drain, Close and ExpectFrame
[ { "change_type": "MODIFY", "old_path": "test/packetimpact/testbench/connections.go", "new_path": "test/packetimpact/testbench/connections.go", "diff": "@@ -633,7 +633,9 @@ func (conn *Connection) Drain(t *testing.T) {\n}\n// TCPIPv4 maintains the state for all the layers in a TCP/IPv4 connection.\n-type TCPIPv4 Connection\n+type TCPIPv4 struct {\n+ Connection\n+}\n// NewTCPIPv4 creates a new TCPIPv4 connection with reasonable defaults.\nfunc (n *DUTTestNet) NewTCPIPv4(t *testing.T, outgoingTCP, incomingTCP TCP) TCPIPv4 {\n@@ -661,9 +663,11 @@ func (n *DUTTestNet) NewTCPIPv4(t *testing.T, outgoingTCP, incomingTCP TCP) TCPI\n}\nreturn TCPIPv4{\n+ Connection: Connection{\nlayerStates: []layerState{etherState, ipv4State, tcpState},\ninjector: injector,\nsniffer: sniffer,\n+ },\n}\n}\n@@ -715,7 +719,7 @@ func (conn *TCPIPv4) ExpectData(t *testing.T, tcp *TCP, payload *Payload, timeou\nif payload != nil {\nexpected = append(expected, payload)\n}\n- return (*Connection)(conn).ExpectFrame(t, expected, timeout)\n+ return conn.ExpectFrame(t, expected, timeout)\n}\n// ExpectNextData attempts to receive the next incoming segment for the\n@@ -739,7 +743,7 @@ func (conn *TCPIPv4) ExpectNextData(t *testing.T, tcp *TCP, payload *Payload, ti\nexpected = append(expected, payload)\ntcp.SeqNum = Uint32(uint32(*conn.RemoteSeqNum(t)) - uint32(payload.Length()))\n}\n- if !(*Connection)(conn).match(expected, got) {\n+ if !conn.match(expected, got) {\nreturn nil, fmt.Errorf(\"next frame is not matching %s during %s: got %s\", expected, timeout, got)\n}\nreturn got, nil\n@@ -750,14 +754,7 @@ func (conn *TCPIPv4) ExpectNextData(t *testing.T, tcp *TCP, payload *Payload, ti\nfunc (conn *TCPIPv4) Send(t *testing.T, tcp TCP, additionalLayers ...Layer) {\nt.Helper()\n- (*Connection)(conn).send(t, Layers{&tcp}, additionalLayers...)\n-}\n-\n-// Close frees associated resources held by the TCPIPv4 connection.\n-func (conn *TCPIPv4) Close(t *testing.T) {\n- t.Helper()\n-\n- (*Connection)(conn).Close(t)\n+ conn.send(t, Layers{&tcp}, additionalLayers...)\n}\n// Expect expects a frame with the TCP layer matching the provided TCP within\n@@ -765,7 +762,7 @@ func (conn *TCPIPv4) Close(t *testing.T) {\nfunc (conn *TCPIPv4) Expect(t *testing.T, tcp TCP, timeout time.Duration) (*TCP, error) {\nt.Helper()\n- layer, err := (*Connection)(conn).Expect(t, &tcp, timeout)\n+ layer, err := conn.Connection.Expect(t, &tcp, timeout)\nif layer == nil {\nreturn nil, err\n}\n@@ -826,16 +823,10 @@ func (conn *TCPIPv4) LocalAddr(t *testing.T) *unix.SockaddrInet4 {\nreturn sa\n}\n-// Drain drains the sniffer's receive buffer by receiving packets until there's\n-// nothing else to receive.\n-func (conn *TCPIPv4) Drain(t *testing.T) {\n- t.Helper()\n-\n- conn.sniffer.Drain(t)\n-}\n-\n// IPv4Conn maintains the state for all the layers in a IPv4 connection.\n-type IPv4Conn Connection\n+type IPv4Conn struct {\n+ Connection\n+}\n// NewIPv4Conn creates a new IPv4Conn connection with reasonable defaults.\nfunc (n *DUTTestNet) NewIPv4Conn(t *testing.T, outgoingIPv4, incomingIPv4 IPv4) IPv4Conn {\n@@ -860,9 +851,11 @@ func (n *DUTTestNet) NewIPv4Conn(t *testing.T, outgoingIPv4, incomingIPv4 IPv4)\n}\nreturn IPv4Conn{\n+ Connection: Connection{\nlayerStates: []layerState{etherState, ipv4State},\ninjector: injector,\nsniffer: sniffer,\n+ },\n}\n}\n@@ -871,26 +864,13 @@ func (n *DUTTestNet) NewIPv4Conn(t *testing.T, outgoingIPv4, incomingIPv4 IPv4)\nfunc (c *IPv4Conn) Send(t *testing.T, ipv4 IPv4, additionalLayers ...Layer) {\nt.Helper()\n- (*Connection)(c).send(t, Layers{&ipv4}, additionalLayers...)\n-}\n-\n-// Close cleans up any resources held.\n-func (c *IPv4Conn) Close(t *testing.T) {\n- t.Helper()\n-\n- (*Connection)(c).Close(t)\n-}\n-\n-// ExpectFrame expects a frame that matches the provided Layers within the\n-// timeout specified. If it doesn't arrive in time, an error is returned.\n-func (c *IPv4Conn) ExpectFrame(t *testing.T, frame Layers, timeout time.Duration) (Layers, error) {\n- t.Helper()\n-\n- return (*Connection)(c).ExpectFrame(t, frame, timeout)\n+ c.send(t, Layers{&ipv4}, additionalLayers...)\n}\n// IPv6Conn maintains the state for all the layers in a IPv6 connection.\n-type IPv6Conn Connection\n+type IPv6Conn struct {\n+ Connection\n+}\n// NewIPv6Conn creates a new IPv6Conn connection with reasonable defaults.\nfunc (n *DUTTestNet) NewIPv6Conn(t *testing.T, outgoingIPv6, incomingIPv6 IPv6) IPv6Conn {\n@@ -915,9 +895,11 @@ func (n *DUTTestNet) NewIPv6Conn(t *testing.T, outgoingIPv6, incomingIPv6 IPv6)\n}\nreturn IPv6Conn{\n+ Connection: Connection{\nlayerStates: []layerState{etherState, ipv6State},\ninjector: injector,\nsniffer: sniffer,\n+ },\n}\n}\n@@ -926,26 +908,13 @@ func (n *DUTTestNet) NewIPv6Conn(t *testing.T, outgoingIPv6, incomingIPv6 IPv6)\nfunc (conn *IPv6Conn) Send(t *testing.T, ipv6 IPv6, additionalLayers ...Layer) {\nt.Helper()\n- (*Connection)(conn).send(t, Layers{&ipv6}, additionalLayers...)\n-}\n-\n-// Close to clean up any resources held.\n-func (conn *IPv6Conn) Close(t *testing.T) {\n- t.Helper()\n-\n- (*Connection)(conn).Close(t)\n-}\n-\n-// ExpectFrame expects a frame that matches the provided Layers within the\n-// timeout specified. If it doesn't arrive in time, an error is returned.\n-func (conn *IPv6Conn) ExpectFrame(t *testing.T, frame Layers, timeout time.Duration) (Layers, error) {\n- t.Helper()\n-\n- return (*Connection)(conn).ExpectFrame(t, frame, timeout)\n+ conn.send(t, Layers{&ipv6}, additionalLayers...)\n}\n// UDPIPv4 maintains the state for all the layers in a UDP/IPv4 connection.\n-type UDPIPv4 Connection\n+type UDPIPv4 struct {\n+ Connection\n+}\n// NewUDPIPv4 creates a new UDPIPv4 connection with reasonable defaults.\nfunc (n *DUTTestNet) NewUDPIPv4(t *testing.T, outgoingUDP, incomingUDP UDP) UDPIPv4 {\n@@ -973,9 +942,11 @@ func (n *DUTTestNet) NewUDPIPv4(t *testing.T, outgoingUDP, incomingUDP UDP) UDPI\n}\nreturn UDPIPv4{\n+ Connection: Connection{\nlayerStates: []layerState{etherState, ipv4State, udpState},\ninjector: injector,\nsniffer: sniffer,\n+ },\n}\n}\n@@ -1020,7 +991,7 @@ func (conn *UDPIPv4) SrcPort(t *testing.T) uint16 {\nfunc (conn *UDPIPv4) Send(t *testing.T, udp UDP, additionalLayers ...Layer) {\nt.Helper()\n- (*Connection)(conn).send(t, Layers{&udp}, additionalLayers...)\n+ conn.send(t, Layers{&udp}, additionalLayers...)\n}\n// SendIP sends a packet with reasonable defaults, potentially overriding the\n@@ -1028,12 +999,12 @@ func (conn *UDPIPv4) Send(t *testing.T, udp UDP, additionalLayers ...Layer) {\nfunc (conn *UDPIPv4) SendIP(t *testing.T, ip IPv4, udp UDP, additionalLayers ...Layer) {\nt.Helper()\n- (*Connection)(conn).send(t, Layers{&ip, &udp}, additionalLayers...)\n+ conn.send(t, Layers{&ip, &udp}, additionalLayers...)\n}\n// SendFrame sends a frame on the wire and updates the state of all layers.\nfunc (conn *UDPIPv4) SendFrame(t *testing.T, overrideLayers Layers, additionalLayers ...Layer) {\n- (*Connection)(conn).send(t, overrideLayers, additionalLayers...)\n+ conn.send(t, overrideLayers, additionalLayers...)\n}\n// Expect expects a frame with the UDP layer matching the provided UDP within\n@@ -1041,7 +1012,7 @@ func (conn *UDPIPv4) SendFrame(t *testing.T, overrideLayers Layers, additionalLa\nfunc (conn *UDPIPv4) Expect(t *testing.T, udp UDP, timeout time.Duration) (*UDP, error) {\nt.Helper()\n- layer, err := (*Connection)(conn).Expect(t, &udp, timeout)\n+ layer, err := conn.Connection.Expect(t, &udp, timeout)\nif err != nil {\nreturn nil, err\n}\n@@ -1062,34 +1033,13 @@ func (conn *UDPIPv4) ExpectData(t *testing.T, udp UDP, payload Payload, timeout\nif payload.length() != 0 {\nexpected = append(expected, &payload)\n}\n- return (*Connection)(conn).ExpectFrame(t, expected, timeout)\n-}\n-\n-// ExpectFrame expects a frame that matches the provided Layers within the\n-// timeout specified. If it doesn't arrive in time, an error is returned.\n-func (conn *UDPIPv4) ExpectFrame(t *testing.T, frame Layers, timeout time.Duration) (Layers, error) {\n- t.Helper()\n-\n- return (*Connection)(conn).ExpectFrame(t, frame, timeout)\n-}\n-\n-// Close frees associated resources held by the UDPIPv4 connection.\n-func (conn *UDPIPv4) Close(t *testing.T) {\n- t.Helper()\n-\n- (*Connection)(conn).Close(t)\n-}\n-\n-// Drain drains the sniffer's receive buffer by receiving packets until there's\n-// nothing else to receive.\n-func (conn *UDPIPv4) Drain(t *testing.T) {\n- t.Helper()\n-\n- conn.sniffer.Drain(t)\n+ return conn.ExpectFrame(t, expected, timeout)\n}\n// UDPIPv6 maintains the state for all the layers in a UDP/IPv6 connection.\n-type UDPIPv6 Connection\n+type UDPIPv6 struct {\n+ Connection\n+}\n// NewUDPIPv6 creates a new UDPIPv6 connection with reasonable defaults.\nfunc (n *DUTTestNet) NewUDPIPv6(t *testing.T, outgoingUDP, incomingUDP UDP) UDPIPv6 {\n@@ -1116,9 +1066,11 @@ func (n *DUTTestNet) NewUDPIPv6(t *testing.T, outgoingUDP, incomingUDP UDP) UDPI\nt.Fatalf(\"can't make sniffer: %s\", err)\n}\nreturn UDPIPv6{\n+ Connection: Connection{\nlayerStates: []layerState{etherState, ipv6State, udpState},\ninjector: injector,\nsniffer: sniffer,\n+ },\n}\n}\n@@ -1168,7 +1120,7 @@ func (conn *UDPIPv6) SrcPort(t *testing.T) uint16 {\nfunc (conn *UDPIPv6) Send(t *testing.T, udp UDP, additionalLayers ...Layer) {\nt.Helper()\n- (*Connection)(conn).send(t, Layers{&udp}, additionalLayers...)\n+ conn.send(t, Layers{&udp}, additionalLayers...)\n}\n// SendIPv6 sends a packet with reasonable defaults, potentially overriding the\n@@ -1176,12 +1128,12 @@ func (conn *UDPIPv6) Send(t *testing.T, udp UDP, additionalLayers ...Layer) {\nfunc (conn *UDPIPv6) SendIPv6(t *testing.T, ip IPv6, udp UDP, additionalLayers ...Layer) {\nt.Helper()\n- (*Connection)(conn).send(t, Layers{&ip, &udp}, additionalLayers...)\n+ conn.send(t, Layers{&ip, &udp}, additionalLayers...)\n}\n// SendFrame sends a frame on the wire and updates the state of all layers.\nfunc (conn *UDPIPv6) SendFrame(t *testing.T, overrideLayers Layers, additionalLayers ...Layer) {\n- (*Connection)(conn).send(t, overrideLayers, additionalLayers...)\n+ conn.send(t, overrideLayers, additionalLayers...)\n}\n// Expect expects a frame with the UDP layer matching the provided UDP within\n@@ -1189,7 +1141,7 @@ func (conn *UDPIPv6) SendFrame(t *testing.T, overrideLayers Layers, additionalLa\nfunc (conn *UDPIPv6) Expect(t *testing.T, udp UDP, timeout time.Duration) (*UDP, error) {\nt.Helper()\n- layer, err := (*Connection)(conn).Expect(t, &udp, timeout)\n+ layer, err := conn.Connection.Expect(t, &udp, timeout)\nif err != nil {\nreturn nil, err\n}\n@@ -1210,34 +1162,13 @@ func (conn *UDPIPv6) ExpectData(t *testing.T, udp UDP, payload Payload, timeout\nif payload.length() != 0 {\nexpected = append(expected, &payload)\n}\n- return (*Connection)(conn).ExpectFrame(t, expected, timeout)\n-}\n-\n-// ExpectFrame expects a frame that matches the provided Layers within the\n-// timeout specified. If it doesn't arrive in time, an error is returned.\n-func (conn *UDPIPv6) ExpectFrame(t *testing.T, frame Layers, timeout time.Duration) (Layers, error) {\n- t.Helper()\n-\n- return (*Connection)(conn).ExpectFrame(t, frame, timeout)\n-}\n-\n-// Close frees associated resources held by the UDPIPv6 connection.\n-func (conn *UDPIPv6) Close(t *testing.T) {\n- t.Helper()\n-\n- (*Connection)(conn).Close(t)\n-}\n-\n-// Drain drains the sniffer's receive buffer by receiving packets until there's\n-// nothing else to receive.\n-func (conn *UDPIPv6) Drain(t *testing.T) {\n- t.Helper()\n-\n- conn.sniffer.Drain(t)\n+ return conn.ExpectFrame(t, expected, timeout)\n}\n// TCPIPv6 maintains the state for all the layers in a TCP/IPv6 connection.\n-type TCPIPv6 Connection\n+type TCPIPv6 struct {\n+ Connection\n+}\n// NewTCPIPv6 creates a new TCPIPv6 connection with reasonable defaults.\nfunc (n *DUTTestNet) NewTCPIPv6(t *testing.T, outgoingTCP, incomingTCP TCP) TCPIPv6 {\n@@ -1263,9 +1194,11 @@ func (n *DUTTestNet) NewTCPIPv6(t *testing.T, outgoingTCP, incomingTCP TCP) TCPI\n}\nreturn TCPIPv6{\n+ Connection: Connection{\nlayerStates: []layerState{etherState, ipv6State, tcpState},\ninjector: injector,\nsniffer: sniffer,\n+ },\n}\n}\n@@ -1285,12 +1218,5 @@ func (conn *TCPIPv6) ExpectData(t *testing.T, tcp *TCP, payload *Payload, timeou\nif payload != nil {\nexpected = append(expected, payload)\n}\n- return (*Connection)(conn).ExpectFrame(t, expected, timeout)\n-}\n-\n-// Close frees associated resources held by the TCPIPv6 connection.\n-func (conn *TCPIPv6) Close(t *testing.T) {\n- t.Helper()\n-\n- (*Connection)(conn).Close(t)\n+ return conn.ExpectFrame(t, expected, timeout)\n}\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/icmpv6_param_problem_test.go", "new_path": "test/packetimpact/tests/icmpv6_param_problem_test.go", "diff": "@@ -44,8 +44,8 @@ func TestICMPv6ParamProblemTest(t *testing.T) {\nPayload: []byte(\"hello world\"),\n}\n- toSend := (*testbench.Connection)(&conn).CreateFrame(t, testbench.Layers{&ipv6}, &icmpv6)\n- (*testbench.Connection)(&conn).SendFrame(t, toSend)\n+ toSend := conn.CreateFrame(t, testbench.Layers{&ipv6}, &icmpv6)\n+ conn.SendFrame(t, toSend)\n// Build the expected ICMPv6 payload, which includes an index to the\n// problematic byte and also the problematic packet as described in\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/ipv6_fragment_icmp_error_test.go", "new_path": "test/packetimpact/tests/ipv6_fragment_icmp_error_test.go", "diff": "@@ -37,7 +37,7 @@ func init() {\ntestbench.Initialize(flag.CommandLine)\n}\n-func fragmentedICMPEchoRequest(t *testing.T, n *testbench.DUTTestNet, conn *testbench.Connection, firstPayloadLength uint16, payload []byte, secondFragmentOffset uint16) ([]testbench.Layers, [][]byte) {\n+func fragmentedICMPEchoRequest(t *testing.T, n *testbench.DUTTestNet, conn *testbench.IPv6Conn, firstPayloadLength uint16, payload []byte, secondFragmentOffset uint16) ([]testbench.Layers, [][]byte) {\nt.Helper()\nicmpv6Header := header.ICMPv6(make([]byte, header.ICMPv6EchoMinimumSize))\n@@ -121,17 +121,16 @@ func TestIPv6ICMPEchoRequestFragmentReassembly(t *testing.T) {\nt.Run(test.name, func(t *testing.T) {\nt.Parallel()\ndut := testbench.NewDUT(t)\n- ipv6Conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})\n- conn := (*testbench.Connection)(&ipv6Conn)\n- defer ipv6Conn.Close(t)\n+ conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})\n+ defer conn.Close(t)\n- fragments, _ := fragmentedICMPEchoRequest(t, dut.Net, conn, test.firstPayloadLength, test.payload, test.secondFragmentOffset)\n+ fragments, _ := fragmentedICMPEchoRequest(t, dut.Net, &conn, test.firstPayloadLength, test.payload, test.secondFragmentOffset)\nfor _, i := range test.sendFrameOrder {\nconn.SendFrame(t, fragments[i-1])\n}\n- gotEchoReply, err := ipv6Conn.ExpectFrame(t, testbench.Layers{\n+ gotEchoReply, err := conn.ExpectFrame(t, testbench.Layers{\n&testbench.Ether{},\n&testbench.IPv6{},\n&testbench.ICMPv6{\n@@ -223,17 +222,16 @@ func TestIPv6FragmentReassemblyTimeout(t *testing.T) {\nt.Run(test.name, func(t *testing.T) {\nt.Parallel()\ndut := testbench.NewDUT(t)\n- ipv6Conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})\n- conn := (*testbench.Connection)(&ipv6Conn)\n- defer ipv6Conn.Close(t)\n+ conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})\n+ defer conn.Close(t)\n- fragments, ipv6Bytes := fragmentedICMPEchoRequest(t, dut.Net, conn, test.firstPayloadLength, test.payload, test.secondFragmentOffset)\n+ fragments, ipv6Bytes := fragmentedICMPEchoRequest(t, dut.Net, &conn, test.firstPayloadLength, test.payload, test.secondFragmentOffset)\nfor _, i := range test.sendFrameOrder {\nconn.SendFrame(t, fragments[i-1])\n}\n- gotErrorMessage, err := ipv6Conn.ExpectFrame(t, testbench.Layers{\n+ gotErrorMessage, err := conn.ExpectFrame(t, testbench.Layers{\n&testbench.Ether{},\n&testbench.IPv6{},\n&testbench.ICMPv6{\n@@ -319,17 +317,16 @@ func TestIPv6FragmentParamProblem(t *testing.T) {\nt.Run(test.name, func(t *testing.T) {\nt.Parallel()\ndut := testbench.NewDUT(t)\n- ipv6Conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})\n- conn := (*testbench.Connection)(&ipv6Conn)\n- defer ipv6Conn.Close(t)\n+ conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})\n+ defer conn.Close(t)\n- fragments, ipv6Bytes := fragmentedICMPEchoRequest(t, dut.Net, conn, test.firstPayloadLength, test.payload, test.secondFragmentOffset)\n+ fragments, ipv6Bytes := fragmentedICMPEchoRequest(t, dut.Net, &conn, test.firstPayloadLength, test.payload, test.secondFragmentOffset)\nfor _, i := range test.sendFrameOrder {\nconn.SendFrame(t, fragments[i-1])\n}\n- gotErrorMessage, err := ipv6Conn.ExpectFrame(t, testbench.Layers{\n+ gotErrorMessage, err := conn.ExpectFrame(t, testbench.Layers{\n&testbench.Ether{},\n&testbench.IPv6{},\n&testbench.ICMPv6{\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/ipv6_unknown_options_action_test.go", "new_path": "test/packetimpact/tests/ipv6_unknown_options_action_test.go", "diff": "@@ -141,9 +141,8 @@ func TestIPv6UnknownOptionAction(t *testing.T) {\n} {\nt.Run(tt.description, func(t *testing.T) {\ndut := testbench.NewDUT(t)\n- ipv6Conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})\n- conn := (*testbench.Connection)(&ipv6Conn)\n- defer ipv6Conn.Close(t)\n+ conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})\n+ defer conn.Close(t)\noutgoingOverride := testbench.Layers{}\nif tt.multicastDst {\n@@ -166,7 +165,7 @@ func TestIPv6UnknownOptionAction(t *testing.T) {\n// after the IPv6 header (after NextHeader and ExtHdrLen).\nbinary.BigEndian.PutUint32(icmpv6Payload, header.IPv6MinimumSize+2)\nicmpv6Payload = append(icmpv6Payload, invokingPacket...)\n- gotICMPv6, err := ipv6Conn.ExpectFrame(t, testbench.Layers{\n+ gotICMPv6, err := conn.ExpectFrame(t, testbench.Layers{\n&testbench.Ether{},\n&testbench.IPv6{},\n&testbench.ICMPv6{\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/tcp_network_unreachable_test.go", "new_path": "test/packetimpact/tests/tcp_network_unreachable_test.go", "diff": "@@ -57,8 +57,7 @@ func TestTCPSynSentUnreachable(t *testing.T) {\n}\n// Send a host unreachable message.\n- rawConn := (*testbench.Connection)(&conn)\n- layers := rawConn.CreateFrame(t, nil)\n+ layers := conn.CreateFrame(t, nil)\nlayers = layers[:len(layers)-1]\nconst ipLayer = 1\nconst tcpLayer = ipLayer + 1\n@@ -76,7 +75,7 @@ func TestTCPSynSentUnreachable(t *testing.T) {\n}\nlayers = append(layers, &icmpv4, ip, tcp)\n- rawConn.SendFrameStateless(t, layers)\n+ conn.SendFrameStateless(t, layers)\nif err := getConnectError(t, &dut, clientFD); err != unix.EHOSTUNREACH {\nt.Errorf(\"got connect() = %v, want EHOSTUNREACH\", err)\n@@ -112,8 +111,7 @@ func TestTCPSynSentUnreachable6(t *testing.T) {\n}\n// Send a host unreachable message.\n- rawConn := (*testbench.Connection)(&conn)\n- layers := rawConn.CreateFrame(t, nil)\n+ layers := conn.CreateFrame(t, nil)\nlayers = layers[:len(layers)-1]\nconst ipLayer = 1\nconst tcpLayer = ipLayer + 1\n@@ -132,7 +130,7 @@ func TestTCPSynSentUnreachable6(t *testing.T) {\nPayload: []byte{0, 0, 0, 0},\n}\nlayers = append(layers, &icmpv6, ip, tcp)\n- rawConn.SendFrameStateless(t, layers)\n+ conn.SendFrameStateless(t, layers)\nif err := getConnectError(t, &dut, clientFD); err != unix.ENETUNREACH {\nt.Errorf(\"got connect() = %v, want EHOSTUNREACH\", err)\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/udp_icmp_error_propagation_test.go", "new_path": "test/packetimpact/tests/udp_icmp_error_propagation_test.go", "diff": "@@ -102,7 +102,7 @@ func wantErrno(c connectionMode, icmpErr icmpError) syscall.Errno {\nfunc sendICMPError(t *testing.T, conn *testbench.UDPIPv4, icmpErr icmpError, udp *testbench.UDP) {\nt.Helper()\n- layers := (*testbench.Connection)(conn).CreateFrame(t, nil)\n+ layers := conn.CreateFrame(t, nil)\nlayers = layers[:len(layers)-1]\nip, ok := udp.Prev().(*testbench.IPv4)\nif !ok {\n@@ -120,7 +120,7 @@ func sendICMPError(t *testing.T, conn *testbench.UDPIPv4, icmpErr icmpError, udp\n// resulting in a mal-formed packet.\nlayers = append(layers, icmpErr.ToICMPv4(), ip, udp)\n- (*testbench.Connection)(conn).SendFrameStateless(t, layers)\n+ conn.SendFrameStateless(t, layers)\n}\n// testRecv tests observing the ICMP error through the recv syscall. A packet\n" } ]
Go
Apache License 2.0
google/gvisor
Use struct embedding to avoid casts and reuse methods - Removed (*testbench.Connection)(&conn) like casts - Removed redundant definition of Drain, Close and ExpectFrame PiperOrigin-RevId: 360727788
260,004
03.03.2021 12:35:26
28,800
9c80bcf32d8db3749a57ef104c5c9bab9f57d9c1
Export faketime to go branch
[ { "change_type": "MODIFY", "old_path": "BUILD", "new_path": "BUILD", "diff": "@@ -122,6 +122,7 @@ go_path(\n# Packages that are not dependencies of the above.\n\"//pkg/sentry/kernel/memevent\",\n\"//pkg/tcpip/adapters/gonet\",\n+ \"//pkg/tcpip/faketime\",\n\"//pkg/tcpip/link/channel\",\n\"//pkg/tcpip/link/ethernet\",\n\"//pkg/tcpip/link/muxed\",\n" } ]
Go
Apache License 2.0
google/gvisor
Export faketime to go branch PiperOrigin-RevId: 360732928
260,003
03.03.2021 16:14:27
28,800
76f0d2c67b75f9916866c07663dae7c7da805dbc
Deflake //pkg/tcpip/tests/integration:forward_test clientEP.Connect may fail because serverEP was not listening.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/tests/integration/forward_test.go", "new_path": "pkg/tcpip/tests/integration/forward_test.go", "diff": "@@ -135,14 +135,15 @@ func TestForwarding(t *testing.T) {\nname string\nproto tcpip.TransportProtocolNumber\nexpectedConnectErr tcpip.Error\n- setupServerSide func(t *testing.T, ep tcpip.Endpoint, ch <-chan struct{}, clientAddr tcpip.FullAddress) (tcpip.Endpoint, chan struct{})\n+ setupServer func(t *testing.T, ep tcpip.Endpoint)\n+ setupServerConn func(t *testing.T, ep tcpip.Endpoint, ch <-chan struct{}, clientAddr tcpip.FullAddress) (tcpip.Endpoint, chan struct{})\nneedRemoteAddr bool\n}{\n{\nname: \"UDP\",\nproto: udp.ProtocolNumber,\nexpectedConnectErr: nil,\n- setupServerSide: func(t *testing.T, ep tcpip.Endpoint, _ <-chan struct{}, clientAddr tcpip.FullAddress) (tcpip.Endpoint, chan struct{}) {\n+ setupServerConn: func(t *testing.T, ep tcpip.Endpoint, _ <-chan struct{}, clientAddr tcpip.FullAddress) (tcpip.Endpoint, chan struct{}) {\nt.Helper()\nif err := ep.Connect(clientAddr); err != nil {\n@@ -156,12 +157,16 @@ func TestForwarding(t *testing.T) {\nname: \"TCP\",\nproto: tcp.ProtocolNumber,\nexpectedConnectErr: &tcpip.ErrConnectStarted{},\n- setupServerSide: func(t *testing.T, ep tcpip.Endpoint, ch <-chan struct{}, clientAddr tcpip.FullAddress) (tcpip.Endpoint, chan struct{}) {\n+ setupServer: func(t *testing.T, ep tcpip.Endpoint) {\nt.Helper()\nif err := ep.Listen(1); err != nil {\nt.Fatalf(\"ep.Listen(1): %s\", err)\n}\n+ },\n+ setupServerConn: func(t *testing.T, ep tcpip.Endpoint, ch <-chan struct{}, clientAddr tcpip.FullAddress) (tcpip.Endpoint, chan struct{}) {\n+ t.Helper()\n+\nvar addr tcpip.FullAddress\nfor {\nnewEP, wq, err := ep.Accept(&addr)\n@@ -214,6 +219,9 @@ func TestForwarding(t *testing.T) {\nt.Fatalf(\"epsAndAddrs.clientEP.Bind(%#v): %s\", clientAddr, err)\n}\n+ if subTest.setupServer != nil {\n+ subTest.setupServer(t, epsAndAddrs.serverEP)\n+ }\n{\nerr := epsAndAddrs.clientEP.Connect(serverAddr)\nif diff := cmp.Diff(subTest.expectedConnectErr, err); diff != \"\" {\n@@ -229,7 +237,7 @@ func TestForwarding(t *testing.T) {\nserverEP := epsAndAddrs.serverEP\nserverCH := epsAndAddrs.serverReadableCH\n- if ep, ch := subTest.setupServerSide(t, serverEP, serverCH, clientAddr); ep != nil {\n+ if ep, ch := subTest.setupServerConn(t, serverEP, serverCH, clientAddr); ep != nil {\ndefer ep.Close()\nserverEP = ep\nserverCH = ch\n@@ -256,14 +264,21 @@ func TestForwarding(t *testing.T) {\nread := func(ch chan struct{}, ep tcpip.Endpoint, data []byte, expectedFrom tcpip.FullAddress) {\nt.Helper()\n- // Wait for the endpoint to be readable.\n- <-ch\nvar buf bytes.Buffer\n+ var res tcpip.ReadResult\n+ for {\n+ var err tcpip.Error\nopts := tcpip.ReadOptions{NeedRemoteAddr: subTest.needRemoteAddr}\n- res, err := ep.Read(&buf, opts)\n+ res, err = ep.Read(&buf, opts)\n+ if _, ok := err.(*tcpip.ErrWouldBlock); ok {\n+ <-ch\n+ continue\n+ }\nif err != nil {\nt.Fatalf(\"ep.Read(_, %d, %#v): %s\", len(data), opts, err)\n}\n+ break\n+ }\nreadResult := tcpip.ReadResult{\nCount: len(data),\n" } ]
Go
Apache License 2.0
google/gvisor
Deflake //pkg/tcpip/tests/integration:forward_test clientEP.Connect may fail because serverEP was not listening. PiperOrigin-RevId: 360780667
259,854
04.03.2021 02:34:51
28,800
9b1170123d323e1f1e49bf5cf792070629d7ae09
Fix race in unix socket transport. transport.baseEndpoint.receiver and transport.baseEndpoint.connected are protected by transport.baseEndpoint.Mutex. In order to access them without holding the mutex, we must make a copy. Notifications must be sent without holding the mutex, so we need the values without holding the mutex.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/transport/unix.go", "new_path": "pkg/sentry/socket/unix/transport/unix.go", "diff": "@@ -816,19 +816,20 @@ func (e *baseEndpoint) Connected() bool {\nfunc (e *baseEndpoint) RecvMsg(ctx context.Context, data [][]byte, creds bool, numRights int, peek bool, addr *tcpip.FullAddress) (int64, int64, ControlMessages, bool, *syserr.Error) {\ne.Lock()\n- if e.receiver == nil {\n+ receiver := e.receiver\n+ if receiver == nil {\ne.Unlock()\nreturn 0, 0, ControlMessages{}, false, syserr.ErrNotConnected\n}\n- recvLen, msgLen, cms, cmt, a, notify, err := e.receiver.Recv(ctx, data, creds, numRights, peek)\n+ recvLen, msgLen, cms, cmt, a, notify, err := receiver.Recv(ctx, data, creds, numRights, peek)\ne.Unlock()\nif err != nil {\nreturn 0, 0, ControlMessages{}, false, err\n}\nif notify {\n- e.receiver.RecvNotify()\n+ receiver.RecvNotify()\n}\nif addr != nil {\n@@ -850,11 +851,12 @@ func (e *baseEndpoint) SendMsg(ctx context.Context, data [][]byte, c ControlMess\nreturn 0, syserr.ErrAlreadyConnected\n}\n- n, notify, err := e.connected.Send(ctx, data, c, tcpip.FullAddress{Addr: tcpip.Address(e.path)})\n+ connected := e.connected\n+ n, notify, err := connected.Send(ctx, data, c, tcpip.FullAddress{Addr: tcpip.Address(e.path)})\ne.Unlock()\nif notify {\n- e.connected.SendNotify()\n+ connected.SendNotify()\n}\nreturn n, err\n" } ]
Go
Apache License 2.0
google/gvisor
Fix race in unix socket transport. transport.baseEndpoint.receiver and transport.baseEndpoint.connected are protected by transport.baseEndpoint.Mutex. In order to access them without holding the mutex, we must make a copy. Notifications must be sent without holding the mutex, so we need the values without holding the mutex.
260,003
04.03.2021 10:52:00
28,800
a9face757a2a0b7530999f112def3b633dbdecf4
Nit fix: Should use maxTimeout in backoffTimer The only user is in (*handshake).complete and it specifies MaxRTO, so there is no behavior changes.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/connect.go", "new_path": "pkg/tcpip/transport/tcp/connect.go", "diff": "@@ -606,7 +606,7 @@ func newBackoffTimer(timeout, maxTimeout time.Duration, f func()) (*backoffTimer\nfunc (bt *backoffTimer) reset() tcpip.Error {\nbt.timeout *= 2\n- if bt.timeout > MaxRTO {\n+ if bt.timeout > bt.maxTimeout {\nreturn &tcpip.ErrTimeout{}\n}\nbt.t.Reset(bt.timeout)\n" } ]
Go
Apache License 2.0
google/gvisor
Nit fix: Should use maxTimeout in backoffTimer The only user is in (*handshake).complete and it specifies MaxRTO, so there is no behavior changes. PiperOrigin-RevId: 360954447
259,898
05.03.2021 11:53:18
28,800
2f0b82a8028019d4c996cf64341f84bb1d1c65b7
Gather uname information from DUT Some OSs behave slightly differently, but still within the RFC. It can be useful to have access to uname information from the testbench.
[ { "change_type": "MODIFY", "old_path": "test/packetimpact/runner/dut.go", "new_path": "test/packetimpact/runner/dut.go", "diff": "@@ -109,6 +109,7 @@ type dutInfo struct {\ndut DUT\nctrlNet, testNet *dockerutil.Network\nnetInfo *testbench.DUTTestNet\n+ uname *testbench.DUTUname\n}\n// setUpDUT will set up one DUT and return information for setting up the\n@@ -182,6 +183,10 @@ func setUpDUT(ctx context.Context, t *testing.T, id int, mkDevice func(*dockerut\nPOSIXServerIP: AddressInSubnet(DUTAddr, *ctrlNet.Subnet),\nPOSIXServerPort: CtrlPort,\n}\n+ info.uname, err = dut.Uname(ctx)\n+ if err != nil {\n+ return dutInfo{}, fmt.Errorf(\"failed to get uname information on DUT: %w\", err)\n+ }\nreturn info, nil\n}\n@@ -195,7 +200,7 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co\ndutInfoChan := make(chan dutInfo, numDUTs)\nerrChan := make(chan error, numDUTs)\nvar dockerNetworks []*dockerutil.Network\n- var dutTestNets []*testbench.DUTTestNet\n+ var dutInfos []*testbench.DUTInfo\nvar duts []DUT\nsetUpCtx, cancelSetup := context.WithCancel(ctx)\n@@ -214,7 +219,10 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co\nselect {\ncase info := <-dutInfoChan:\ndockerNetworks = append(dockerNetworks, info.ctrlNet, info.testNet)\n- dutTestNets = append(dutTestNets, info.netInfo)\n+ dutInfos = append(dutInfos, &testbench.DUTInfo{\n+ Net: info.netInfo,\n+ Uname: info.uname,\n+ })\nduts = append(duts, info.dut)\ncase err := <-errChan:\nt.Fatal(err)\n@@ -246,23 +254,23 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co\nt.Fatalf(\"cannot start testbench container: %s\", err)\n}\n- for i := range dutTestNets {\n- name, info, err := deviceByIP(ctx, testbenchContainer, dutTestNets[i].LocalIPv4)\n+ for i := range dutInfos {\n+ name, info, err := deviceByIP(ctx, testbenchContainer, dutInfos[i].Net.LocalIPv4)\nif err != nil {\n- t.Fatalf(\"failed to get the device name associated with %s: %s\", dutTestNets[i].LocalIPv4, err)\n+ t.Fatalf(\"failed to get the device name associated with %s: %s\", dutInfos[i].Net.LocalIPv4, err)\n}\n- dutTestNets[i].LocalDevName = name\n- dutTestNets[i].LocalDevID = info.ID\n- dutTestNets[i].LocalMAC = info.MAC\n+ dutInfos[i].Net.LocalDevName = name\n+ dutInfos[i].Net.LocalDevID = info.ID\n+ dutInfos[i].Net.LocalMAC = info.MAC\nlocalIPv6, err := getOrAssignIPv6Addr(ctx, testbenchContainer, name)\nif err != nil {\nt.Fatalf(\"failed to get IPV6 address on %s: %s\", testbenchContainer.Name, err)\n}\n- dutTestNets[i].LocalIPv6 = localIPv6\n+ dutInfos[i].Net.LocalIPv6 = localIPv6\n}\n- dutTestNetsBytes, err := json.Marshal(dutTestNets)\n+ dutInfosBytes, err := json.Marshal(dutInfos)\nif err != nil {\n- t.Fatalf(\"failed to marshal %v into json: %s\", dutTestNets, err)\n+ t.Fatalf(\"failed to marshal %v into json: %s\", dutInfos, err)\n}\nbaseSnifferArgs := []string{\n@@ -296,7 +304,8 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co\n\"-n\",\n}\n}\n- for _, n := range dutTestNets {\n+ for _, info := range dutInfos {\n+ n := info.Net\nsnifferArgs := append(baseSnifferArgs, \"-i\", n.LocalDevName)\nif !tshark {\nsnifferArgs = append(\n@@ -351,7 +360,7 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co\ntestArgs = append(testArgs, extraTestArgs...)\ntestArgs = append(testArgs,\nfmt.Sprintf(\"--native=%t\", native),\n- \"--dut_test_nets_json\", string(dutTestNetsBytes),\n+ \"--dut_infos_json\", string(dutInfosBytes),\n)\ntestbenchLogs, err := testbenchContainer.Exec(ctx, dockerutil.ExecOpts{}, testArgs...)\nif (err != nil) != expectFailure {\n@@ -388,6 +397,10 @@ type DUT interface {\n// The t parameter is supposed to be used for t.Cleanup. Don't use it for\n// t.Fatal/FailNow functions.\nPrepare(ctx context.Context, t *testing.T, runOpts dockerutil.RunOpts, ctrlNet, testNet *dockerutil.Network) (net.IP, net.HardwareAddr, uint32, string, error)\n+\n+ // Uname gathers information of DUT using command uname.\n+ Uname(ctx context.Context) (*testbench.DUTUname, error)\n+\n// Logs retrieves the logs from the dut.\nLogs(ctx context.Context) (string, error)\n}\n@@ -440,6 +453,38 @@ func (dut *DockerDUT) Prepare(ctx context.Context, _ *testing.T, runOpts dockeru\nreturn remoteIPv6, dutDeviceInfo.MAC, dutDeviceInfo.ID, testNetDev, nil\n}\n+// Uname implements DUT.Uname.\n+func (dut *DockerDUT) Uname(ctx context.Context) (*testbench.DUTUname, error) {\n+ machine, err := dut.c.Exec(ctx, dockerutil.ExecOpts{}, \"uname\", \"-m\")\n+ if err != nil {\n+ return nil, err\n+ }\n+ kernelRelease, err := dut.c.Exec(ctx, dockerutil.ExecOpts{}, \"uname\", \"-r\")\n+ if err != nil {\n+ return nil, err\n+ }\n+ kernelVersion, err := dut.c.Exec(ctx, dockerutil.ExecOpts{}, \"uname\", \"-v\")\n+ if err != nil {\n+ return nil, err\n+ }\n+ kernelName, err := dut.c.Exec(ctx, dockerutil.ExecOpts{}, \"uname\", \"-s\")\n+ if err != nil {\n+ return nil, err\n+ }\n+ // TODO(gvisor.dev/issues/5586): -o is not supported on macOS.\n+ operatingSystem, err := dut.c.Exec(ctx, dockerutil.ExecOpts{}, \"uname\", \"-o\")\n+ if err != nil {\n+ return nil, err\n+ }\n+ return &testbench.DUTUname{\n+ Machine: strings.TrimRight(machine, \"\\n\"),\n+ KernelName: strings.TrimRight(kernelName, \"\\n\"),\n+ KernelRelease: strings.TrimRight(kernelRelease, \"\\n\"),\n+ KernelVersion: strings.TrimRight(kernelVersion, \"\\n\"),\n+ OperatingSystem: strings.TrimRight(operatingSystem, \"\\n\"),\n+ }, nil\n+}\n+\n// Logs implements DUT.Logs.\nfunc (dut *DockerDUT) Logs(ctx context.Context) (string, error) {\nlogs, err := dut.c.Logs(ctx)\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/testbench/BUILD", "new_path": "test/packetimpact/testbench/BUILD", "diff": "load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\npackage(\n- default_visibility = [\"//test/packetimpact:__subpackages__\"],\nlicenses = [\"notice\"],\n)\n@@ -15,6 +14,7 @@ go_library(\n\"rawsockets.go\",\n\"testbench.go\",\n],\n+ visibility = [\"//test/packetimpact:__subpackages__\"],\ndeps = [\n\"//pkg/tcpip\",\n\"//pkg/tcpip/buffer\",\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/testbench/dut.go", "new_path": "test/packetimpact/testbench/dut.go", "diff": "@@ -35,24 +35,26 @@ type DUT struct {\nconn *grpc.ClientConn\nposixServer POSIXClient\nNet *DUTTestNet\n+ Uname *DUTUname\n}\n// NewDUT creates a new connection with the DUT over gRPC.\nfunc NewDUT(t *testing.T) DUT {\nt.Helper()\n- n := GetDUTTestNet()\n- dut := n.ConnectToDUT(t)\n+ info := getDUTInfo()\n+ dut := info.ConnectToDUT(t)\nt.Cleanup(func() {\ndut.TearDownConnection()\n- dut.Net.Release()\n+ info.release()\n})\nreturn dut\n}\n// ConnectToDUT connects to DUT through gRPC.\n-func (n *DUTTestNet) ConnectToDUT(t *testing.T) DUT {\n+func (info *DUTInfo) ConnectToDUT(t *testing.T) DUT {\nt.Helper()\n+ n := info.Net\nposixServerAddress := net.JoinHostPort(n.POSIXServerIP.String(), fmt.Sprintf(\"%d\", n.POSIXServerPort))\nconn, err := grpc.Dial(posixServerAddress, grpc.WithInsecure(), grpc.WithKeepaliveParams(keepalive.ClientParameters{Timeout: RPCKeepalive}))\nif err != nil {\n@@ -63,6 +65,7 @@ func (n *DUTTestNet) ConnectToDUT(t *testing.T) DUT {\nconn: conn,\nposixServer: posixServer,\nNet: n,\n+ Uname: info.Uname,\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/testbench/testbench.go", "new_path": "test/packetimpact/testbench/testbench.go", "diff": "@@ -34,14 +34,29 @@ var (\n// RPCTimeout is the gRPC timeout.\nRPCTimeout = 100 * time.Millisecond\n- // dutTestNetsJSON is the json string that describes all the test networks to\n+ // dutInfosJSON is the json string that describes information about all the\n// duts available to use.\n- dutTestNetsJSON string\n- // dutTestNets is the pool among which the testbench can choose a DUT to work\n+ dutInfosJSON string\n+ // dutInfo is the pool among which the testbench can choose a DUT to work\n// with.\n- dutTestNets chan *DUTTestNet\n+ dutInfo chan *DUTInfo\n)\n+// DUTInfo has both network and uname information about the DUT.\n+type DUTInfo struct {\n+ Uname *DUTUname\n+ Net *DUTTestNet\n+}\n+\n+// DUTUname contains information about the DUT from uname.\n+type DUTUname struct {\n+ Machine string\n+ KernelName string\n+ KernelRelease string\n+ KernelVersion string\n+ OperatingSystem string\n+}\n+\n// DUTTestNet describes the test network setup on dut and how the testbench\n// should connect with an existing DUT.\ntype DUTTestNet struct {\n@@ -86,7 +101,7 @@ func registerFlags(fs *flag.FlagSet) {\nfs.BoolVar(&Native, \"native\", Native, \"whether the test is running natively\")\nfs.DurationVar(&RPCTimeout, \"rpc_timeout\", RPCTimeout, \"gRPC timeout\")\nfs.DurationVar(&RPCKeepalive, \"rpc_keepalive\", RPCKeepalive, \"gRPC keepalive\")\n- fs.StringVar(&dutTestNetsJSON, \"dut_test_nets_json\", dutTestNetsJSON, \"path to the dut test nets json file\")\n+ fs.StringVar(&dutInfosJSON, \"dut_infos_json\", dutInfosJSON, \"json that describes the DUTs\")\n}\n// Initialize initializes the testbench, it parse the flags and sets up the\n@@ -94,27 +109,27 @@ func registerFlags(fs *flag.FlagSet) {\nfunc Initialize(fs *flag.FlagSet) {\nregisterFlags(fs)\nflag.Parse()\n- if err := loadDUTTestNets(); err != nil {\n+ if err := loadDUTInfos(); err != nil {\npanic(err)\n}\n}\n-// loadDUTTestNets loads available DUT test networks from the json file, it\n+// loadDUTInfos loads available DUT test infos from the json file, it\n// must be called after flag.Parse().\n-func loadDUTTestNets() error {\n- var parsedTestNets []DUTTestNet\n- if err := json.Unmarshal([]byte(dutTestNetsJSON), &parsedTestNets); err != nil {\n+func loadDUTInfos() error {\n+ var dutInfos []DUTInfo\n+ if err := json.Unmarshal([]byte(dutInfosJSON), &dutInfos); err != nil {\nreturn fmt.Errorf(\"failed to unmarshal JSON: %w\", err)\n}\n- if got, want := len(parsedTestNets), 1; got < want {\n+ if got, want := len(dutInfos), 1; got < want {\nreturn fmt.Errorf(\"got %d DUTs, the test requires at least %d DUTs\", got, want)\n}\n// Using a buffered channel as semaphore\n- dutTestNets = make(chan *DUTTestNet, len(parsedTestNets))\n- for i := range parsedTestNets {\n- parsedTestNets[i].LocalIPv4 = parsedTestNets[i].LocalIPv4.To4()\n- parsedTestNets[i].RemoteIPv4 = parsedTestNets[i].RemoteIPv4.To4()\n- dutTestNets <- &parsedTestNets[i]\n+ dutInfo = make(chan *DUTInfo, len(dutInfos))\n+ for i := range dutInfos {\n+ dutInfos[i].Net.LocalIPv4 = dutInfos[i].Net.LocalIPv4.To4()\n+ dutInfos[i].Net.RemoteIPv4 = dutInfos[i].Net.RemoteIPv4.To4()\n+ dutInfo <- &dutInfos[i]\n}\nreturn nil\n}\n@@ -130,14 +145,13 @@ func GenerateRandomPayload(t *testing.T, n int) []byte {\nreturn buf\n}\n-// GetDUTTestNet gets a usable DUTTestNet, the function will block until any\n-// becomes available.\n-func GetDUTTestNet() *DUTTestNet {\n- return <-dutTestNets\n+// getDUTInfo returns information about an available DUT from the pool. If no\n+// DUT is readily available, getDUTInfo blocks until one becomes available.\n+func getDUTInfo() *DUTInfo {\n+ return <-dutInfo\n}\n-// Release releases the DUTTestNet back to the pool so that some other test\n-// can use.\n-func (n *DUTTestNet) Release() {\n- dutTestNets <- n\n+// release returns the DUTInfo back to the pool.\n+func (info *DUTInfo) release() {\n+ dutInfo <- info\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Gather uname information from DUT Some OSs behave slightly differently, but still within the RFC. It can be useful to have access to uname information from the testbench. PiperOrigin-RevId: 361193766
260,001
05.03.2021 12:03:33
28,800
808332e9e2e503f9d48b4a64e3151f22cb84e9fb
Implement IterDirent in verity fs
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/verity/verity.go", "new_path": "pkg/sentry/fsimpl/verity/verity.go", "diff": "@@ -38,6 +38,7 @@ import (\n\"fmt\"\n\"math\"\n\"strconv\"\n+ \"strings\"\n\"sync/atomic\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n@@ -750,6 +751,50 @@ func (fd *fileDescription) SetStat(ctx context.Context, opts vfs.SetStatOptions)\nreturn syserror.EPERM\n}\n+// IterDirents implements vfs.FileDescriptionImpl.IterDirents.\n+func (fd *fileDescription) IterDirents(ctx context.Context, cb vfs.IterDirentsCallback) error {\n+ if !fd.d.isDir() {\n+ return syserror.ENOTDIR\n+ }\n+ fd.mu.Lock()\n+ defer fd.mu.Unlock()\n+\n+ var ds []vfs.Dirent\n+ err := fd.lowerFD.IterDirents(ctx, vfs.IterDirentsCallbackFunc(func(dirent vfs.Dirent) error {\n+ // Do not include the Merkle tree files.\n+ if strings.Contains(dirent.Name, merklePrefix) || strings.Contains(dirent.Name, merkleRootPrefix) {\n+ return nil\n+ }\n+ if fd.d.verityEnabled() {\n+ // Verify that the child is expected.\n+ if dirent.Name != \".\" && dirent.Name != \"..\" {\n+ if _, ok := fd.d.childrenNames[dirent.Name]; !ok {\n+ return alertIntegrityViolation(fmt.Sprintf(\"Unexpected children %s\", dirent.Name))\n+ }\n+ }\n+ }\n+ ds = append(ds, dirent)\n+ return nil\n+ }))\n+\n+ if err != nil {\n+ return err\n+ }\n+\n+ // The result should contain all children plus \".\" and \"..\".\n+ if fd.d.verityEnabled() && len(ds) != len(fd.d.childrenNames)+2 {\n+ return alertIntegrityViolation(fmt.Sprintf(\"Unexpected children number %d\", len(ds)))\n+ }\n+\n+ for fd.off < int64(len(ds)) {\n+ if err := cb.Handle(ds[fd.off]); err != nil {\n+ return err\n+ }\n+ fd.off++\n+ }\n+ return nil\n+}\n+\n// Seek implements vfs.FileDescriptionImpl.Seek.\nfunc (fd *fileDescription) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {\nfd.mu.Lock()\n" } ]
Go
Apache License 2.0
google/gvisor
Implement IterDirent in verity fs PiperOrigin-RevId: 361196154
260,004
05.03.2021 13:29:51
28,800
498709250a134d4d09a22d11cffdfdc402d9f052
Include duplicate address holder info in DADResult The integrator may be interested in who owns a duplicate address so pass this information (if available) along. Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/arp/arp.go", "new_path": "pkg/tcpip/network/arp/arp.go", "diff": "@@ -232,7 +232,7 @@ func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {\nlinkAddr := tcpip.LinkAddress(h.HardwareAddressSender())\ne.mu.Lock()\n- e.mu.dad.StopLocked(addr, &stack.DADDupAddrDetected{})\n+ e.mu.dad.StopLocked(addr, &stack.DADDupAddrDetected{HolderLinkAddress: linkAddr})\ne.mu.Unlock()\n// The solicited, override, and isRouter flags are not available for ARP;\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/icmp.go", "new_path": "pkg/tcpip/network/ipv6/icmp.go", "diff": "@@ -382,6 +382,10 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r\n// stack know so it can handle such a scenario and do nothing further with\n// the NS.\nif srcAddr == header.IPv6Any {\n+ // Since this is a DAD message we know the sender does not actually hold\n+ // the target address so there is no \"holder\".\n+ var holderLinkAddress tcpip.LinkAddress\n+\n// We would get an error if the address no longer exists or the address\n// is no longer tentative (DAD resolved between the call to\n// hasTentativeAddr and this point). Both of these are valid scenarios:\n@@ -393,7 +397,7 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r\n//\n// TODO(gvisor.dev/issue/4046): Handle the scenario when a duplicate\n// address is detected for an assigned address.\n- switch err := e.dupTentativeAddrDetected(targetAddr); err.(type) {\n+ switch err := e.dupTentativeAddrDetected(targetAddr, holderLinkAddress); err.(type) {\ncase nil, *tcpip.ErrBadAddress, *tcpip.ErrInvalidEndpointState:\ndefault:\npanic(fmt.Sprintf(\"unexpected error handling duplicate tentative address: %s\", err))\n@@ -561,10 +565,24 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r\n// 5, NDP messages cannot be fragmented. Also note that in the common case\n// NDP datagrams are very small and AsView() will not incur allocations.\nna := header.NDPNeighborAdvert(payload.AsView())\n+\n+ it, err := na.Options().Iter(false /* check */)\n+ if err != nil {\n+ // If we have a malformed NDP NA option, drop the packet.\n+ received.invalid.Increment()\n+ return\n+ }\n+\n+ targetLinkAddr, ok := getTargetLinkAddr(it)\n+ if !ok {\n+ received.invalid.Increment()\n+ return\n+ }\n+\ntargetAddr := na.TargetAddress()\ne.dad.mu.Lock()\n- e.dad.mu.dad.StopLocked(targetAddr, &stack.DADDupAddrDetected{})\n+ e.dad.mu.dad.StopLocked(targetAddr, &stack.DADDupAddrDetected{HolderLinkAddress: targetLinkAddr})\ne.dad.mu.Unlock()\nif e.hasTentativeAddr(targetAddr) {\n@@ -584,7 +602,7 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r\n//\n// TODO(gvisor.dev/issue/4046): Handle the scenario when a duplicate\n// address is detected for an assigned address.\n- switch err := e.dupTentativeAddrDetected(targetAddr); err.(type) {\n+ switch err := e.dupTentativeAddrDetected(targetAddr, targetLinkAddr); err.(type) {\ncase nil, *tcpip.ErrBadAddress, *tcpip.ErrInvalidEndpointState:\nreturn\ndefault:\n@@ -592,13 +610,6 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r\n}\n}\n- it, err := na.Options().Iter(false /* check */)\n- if err != nil {\n- // If we have a malformed NDP NA option, drop the packet.\n- received.invalid.Increment()\n- return\n- }\n-\n// At this point we know that the target address is not tentative on the\n// NIC. However, the target address may still be assigned to the NIC but not\n// tentative (it could be permanent). Such a scenario is beyond the scope of\n@@ -608,11 +619,6 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r\n// TODO(b/143147598): Handle the scenario described above. Also inform the\n// netstack integration that a duplicate address was detected outside of\n// DAD.\n- targetLinkAddr, ok := getTargetLinkAddr(it)\n- if !ok {\n- received.invalid.Increment()\n- return\n- }\n// As per RFC 4861 section 7.1.2:\n// A node MUST silently discard any received Neighbor Advertisement\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/ipv6.go", "new_path": "pkg/tcpip/network/ipv6/ipv6.go", "diff": "@@ -348,7 +348,7 @@ func (e *endpoint) hasTentativeAddr(addr tcpip.Address) bool {\n// dupTentativeAddrDetected removes the tentative address if it exists. If the\n// address was generated via SLAAC, an attempt is made to generate a new\n// address.\n-func (e *endpoint) dupTentativeAddrDetected(addr tcpip.Address) tcpip.Error {\n+func (e *endpoint) dupTentativeAddrDetected(addr tcpip.Address, holderLinkAddr tcpip.LinkAddress) tcpip.Error {\ne.mu.Lock()\ndefer e.mu.Unlock()\n@@ -363,7 +363,7 @@ func (e *endpoint) dupTentativeAddrDetected(addr tcpip.Address) tcpip.Error {\n// If the address is a SLAAC address, do not invalidate its SLAAC prefix as an\n// attempt will be made to generate a new address for it.\n- if err := e.removePermanentEndpointLocked(addressEndpoint, false /* allowSLAACInvalidation */, &stack.DADDupAddrDetected{}); err != nil {\n+ if err := e.removePermanentEndpointLocked(addressEndpoint, false /* allowSLAACInvalidation */, &stack.DADDupAddrDetected{HolderLinkAddress: holderLinkAddr}); err != nil {\nreturn err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/ndp_test.go", "new_path": "pkg/tcpip/stack/ndp_test.go", "diff": "@@ -343,7 +343,7 @@ func TestDADDisabled(t *testing.T) {\nselect {\ncase e := <-ndpDisp.dadC:\nif diff := checkDADEvent(e, nicID, addr1, &stack.DADSucceeded{}); diff != \"\" {\n- t.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n+ t.Errorf(\"DAD event mismatch (-want +got):\\n%s\", diff)\n}\ndefault:\nt.Fatal(\"expected DAD event\")\n@@ -490,7 +490,7 @@ func TestDADResolve(t *testing.T) {\nt.Fatal(\"timed out waiting for DAD resolution\")\ncase e := <-ndpDisp.dadC:\nif diff := checkDADEvent(e, nicID, addr1, &stack.DADSucceeded{}); diff != \"\" {\n- t.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n+ t.Errorf(\"DAD event mismatch (-want +got):\\n%s\", diff)\n}\n}\nif err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, addrWithPrefix); err != nil {\n@@ -599,6 +599,7 @@ func TestDADFail(t *testing.T) {\nname string\nrxPkt func(e *channel.Endpoint, tgt tcpip.Address)\ngetStat func(s tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter\n+ expectedHolderLinkAddress tcpip.LinkAddress\n}{\n{\nname: \"RxSolicit\",\n@@ -606,6 +607,7 @@ func TestDADFail(t *testing.T) {\ngetStat: func(s tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\nreturn s.NeighborSolicit\n},\n+ expectedHolderLinkAddress: \"\",\n},\n{\nname: \"RxAdvert\",\n@@ -640,6 +642,7 @@ func TestDADFail(t *testing.T) {\ngetStat: func(s tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {\nreturn s.NeighborAdvert\n},\n+ expectedHolderLinkAddress: linkAddr1,\n},\n}\n@@ -689,8 +692,8 @@ func TestDADFail(t *testing.T) {\n// something is wrong.\nt.Fatal(\"timed out waiting for DAD failure\")\ncase e := <-ndpDisp.dadC:\n- if diff := checkDADEvent(e, nicID, addr1, &stack.DADDupAddrDetected{}); diff != \"\" {\n- t.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n+ if diff := checkDADEvent(e, nicID, addr1, &stack.DADDupAddrDetected{HolderLinkAddress: test.expectedHolderLinkAddress}); diff != \"\" {\n+ t.Errorf(\"DAD event mismatch (-want +got):\\n%s\", diff)\n}\n}\nif err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{}); err != nil {\n@@ -789,7 +792,7 @@ func TestDADStop(t *testing.T) {\nt.Fatal(\"timed out waiting for DAD failure\")\ncase e := <-ndpDisp.dadC:\nif diff := checkDADEvent(e, nicID, addr1, &stack.DADAborted{}); diff != \"\" {\n- t.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n+ t.Errorf(\"DAD event mismatch (-want +got):\\n%s\", diff)\n}\n}\n@@ -851,7 +854,7 @@ func TestSetNDPConfigurations(t *testing.T) {\nselect {\ncase e := <-ndpDisp.dadC:\nif diff := checkDADEvent(e, nicID, addr, &stack.DADSucceeded{}); diff != \"\" {\n- t.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n+ t.Errorf(\"DAD event mismatch (-want +got):\\n%s\", diff)\n}\ndefault:\nt.Fatalf(\"expected DAD event for %s\", addr)\n@@ -943,7 +946,7 @@ func TestSetNDPConfigurations(t *testing.T) {\nt.Fatal(\"timed out waiting for DAD resolution\")\ncase e := <-ndpDisp.dadC:\nif diff := checkDADEvent(e, nicID1, addr1, &stack.DADSucceeded{}); diff != \"\" {\n- t.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n+ t.Errorf(\"DAD event mismatch (-want +got):\\n%s\", diff)\n}\n}\nif err := checkGetMainNICAddress(s, nicID1, header.IPv6ProtocolNumber, addrWithPrefix1); err != nil {\n@@ -1962,7 +1965,7 @@ func TestAutoGenTempAddr(t *testing.T) {\nselect {\ncase e := <-ndpDisp.dadC:\nif diff := checkDADEvent(e, nicID, addr, &stack.DADSucceeded{}); diff != \"\" {\n- t.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n+ t.Errorf(\"DAD event mismatch (-want +got):\\n%s\", diff)\n}\ncase <-time.After(time.Duration(test.dupAddrTransmits)*test.retransmitTimer + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for DAD event\")\n@@ -2168,7 +2171,7 @@ func TestNoAutoGenTempAddrForLinkLocal(t *testing.T) {\nselect {\ncase e := <-ndpDisp.dadC:\nif diff := checkDADEvent(e, nicID, llAddr1, &stack.DADSucceeded{}); diff != \"\" {\n- t.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n+ t.Errorf(\"DAD event mismatch (-want +got):\\n%s\", diff)\n}\ncase <-time.After(time.Duration(test.dupAddrTransmits)*test.retransmitTimer + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for DAD event\")\n@@ -2256,7 +2259,7 @@ func TestNoAutoGenTempAddrWithoutStableAddr(t *testing.T) {\nselect {\ncase e := <-ndpDisp.dadC:\nif diff := checkDADEvent(e, nicID, addr.Address, &stack.DADSucceeded{}); diff != \"\" {\n- t.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n+ t.Errorf(\"DAD event mismatch (-want +got):\\n%s\", diff)\n}\ncase <-time.After(dadTransmits*retransmitTimer + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for DAD event\")\n@@ -2722,7 +2725,7 @@ func TestMixedSLAACAddrConflictRegen(t *testing.T) {\nclock.Advance(dupAddrTransmits * retransmitTimer)\nif diff := checkDADEvent(<-ndpDisp.dadC, nicID, addr, &stack.DADSucceeded{}); diff != \"\" {\n- t.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n+ t.Errorf(\"DAD event mismatch (-want +got):\\n%s\", diff)\n}\n}\n@@ -2753,7 +2756,7 @@ func TestMixedSLAACAddrConflictRegen(t *testing.T) {\nselect {\ncase e := <-ndpDisp.dadC:\nif diff := checkDADEvent(e, nicID, addr.Address, &stack.DADDupAddrDetected{}); diff != \"\" {\n- t.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n+ t.Errorf(\"DAD event mismatch (-want +got):\\n%s\", diff)\n}\ndefault:\nt.Fatal(\"expected DAD event\")\n@@ -3857,7 +3860,7 @@ func TestAutoGenAddrInResponseToDADConflicts(t *testing.T) {\nselect {\ncase e := <-ndpDisp.dadC:\nif diff := checkDADEvent(e, nicID, addr, res); diff != \"\" {\n- t.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n+ t.Errorf(\"DAD event mismatch (-want +got):\\n%s\", diff)\n}\ndefault:\nt.Fatal(\"expected DAD event\")\n@@ -3870,7 +3873,7 @@ func TestAutoGenAddrInResponseToDADConflicts(t *testing.T) {\nselect {\ncase e := <-ndpDisp.dadC:\nif diff := checkDADEvent(e, nicID, addr, res); diff != \"\" {\n- t.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n+ t.Errorf(\"DAD event mismatch (-want +got):\\n%s\", diff)\n}\ncase <-time.After(dadTransmits*retransmitTimer + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for DAD event\")\n@@ -4143,7 +4146,7 @@ func TestAutoGenAddrWithEUI64IIDNoDADRetries(t *testing.T) {\nselect {\ncase e := <-ndpDisp.dadC:\nif diff := checkDADEvent(e, nicID, addr.Address, &stack.DADDupAddrDetected{}); diff != \"\" {\n- t.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n+ t.Errorf(\"DAD event mismatch (-want +got):\\n%s\", diff)\n}\ndefault:\nt.Fatal(\"expected DAD event\")\n@@ -4242,7 +4245,7 @@ func TestAutoGenAddrContinuesLifetimesAfterRetry(t *testing.T) {\nselect {\ncase e := <-ndpDisp.dadC:\nif diff := checkDADEvent(e, nicID, addr.Address, &stack.DADDupAddrDetected{}); diff != \"\" {\n- t.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n+ t.Errorf(\"DAD event mismatch (-want +got):\\n%s\", diff)\n}\ndefault:\nt.Fatal(\"expected DAD event\")\n@@ -4254,7 +4257,7 @@ func TestAutoGenAddrContinuesLifetimesAfterRetry(t *testing.T) {\nselect {\ncase e := <-ndpDisp.dadC:\nif diff := checkDADEvent(e, nicID, addr.Address, &stack.DADSucceeded{}); diff != \"\" {\n- t.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n+ t.Errorf(\"DAD event mismatch (-want +got):\\n%s\", diff)\n}\ncase <-time.After(dadTransmits*retransmitTimer + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for DAD event\")\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/registration.go", "new_path": "pkg/tcpip/stack/registration.go", "diff": "@@ -884,7 +884,11 @@ func (*DADAborted) isDADResult() {}\nvar _ DADResult = (*DADDupAddrDetected)(nil)\n// DADDupAddrDetected indicates DAD detected a duplicate address.\n-type DADDupAddrDetected struct{}\n+type DADDupAddrDetected struct {\n+ // HolderLinkAddress is the link address of the node that holds the duplicate\n+ // address.\n+ HolderLinkAddress tcpip.LinkAddress\n+}\nfunc (*DADDupAddrDetected) isDADResult() {}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tests/integration/link_resolution_test.go", "new_path": "pkg/tcpip/tests/integration/link_resolution_test.go", "diff": "@@ -1192,14 +1192,14 @@ func TestDAD(t *testing.T) {\nnetProto: ipv4.ProtocolNumber,\ndadNetProto: arp.ProtocolNumber,\nremoteAddr: utils.Ipv4Addr2.AddressWithPrefix.Address,\n- expectedResult: &stack.DADDupAddrDetected{},\n+ expectedResult: &stack.DADDupAddrDetected{HolderLinkAddress: utils.LinkAddr2},\n},\n{\nname: \"IPv6 duplicate address\",\nnetProto: ipv6.ProtocolNumber,\ndadNetProto: ipv6.ProtocolNumber,\nremoteAddr: utils.Ipv6Addr2.AddressWithPrefix.Address,\n- expectedResult: &stack.DADDupAddrDetected{},\n+ expectedResult: &stack.DADDupAddrDetected{HolderLinkAddress: utils.LinkAddr2},\n},\n{\nname: \"IPv4 no duplicate address\",\n" } ]
Go
Apache License 2.0
google/gvisor
Include duplicate address holder info in DADResult The integrator may be interested in who owns a duplicate address so pass this information (if available) along. Fixes #5605. PiperOrigin-RevId: 361213556
260,004
05.03.2021 15:04:24
28,800
2db8f748593c09a283f5ce229bb6bfedc92849d1
Fix network protocol/endpoint lock order violation IPv4 would violate the lock ordering of protocol > endpoint when closing network endpoints by calling `ipv4.protocol.forgetEndpoint` while holding the network endpoint lock.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv4/BUILD", "new_path": "pkg/tcpip/network/ipv4/BUILD", "diff": "@@ -32,12 +32,14 @@ go_test(\n\"ipv4_test.go\",\n],\ndeps = [\n+ \"//pkg/sync\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/buffer\",\n\"//pkg/tcpip/checker\",\n\"//pkg/tcpip/faketime\",\n\"//pkg/tcpip/header\",\n\"//pkg/tcpip/link/channel\",\n+ \"//pkg/tcpip/link/loopback\",\n\"//pkg/tcpip/link/sniffer\",\n\"//pkg/tcpip/network/arp\",\n\"//pkg/tcpip/network/internal/testutil\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv4/ipv4.go", "new_path": "pkg/tcpip/network/ipv4/ipv4.go", "diff": "@@ -899,10 +899,9 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {\n// Close cleans up resources associated with the endpoint.\nfunc (e *endpoint) Close() {\ne.mu.Lock()\n- defer e.mu.Unlock()\n-\ne.disableLocked()\ne.mu.addressableEndpointState.Cleanup()\n+ e.mu.Unlock()\ne.protocol.forgetEndpoint(e.nic.ID())\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv4/ipv4_test.go", "new_path": "pkg/tcpip/network/ipv4/ipv4_test.go", "diff": "@@ -26,12 +26,14 @@ import (\n\"time\"\n\"github.com/google/go-cmp/cmp\"\n+ \"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/checker\"\n\"gvisor.dev/gvisor/pkg/tcpip/faketime\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n\"gvisor.dev/gvisor/pkg/tcpip/link/channel\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/link/loopback\"\n\"gvisor.dev/gvisor/pkg/tcpip/link/sniffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/network/arp\"\n\"gvisor.dev/gvisor/pkg/tcpip/network/internal/testutil\"\n@@ -2985,3 +2987,120 @@ func TestPacketQueing(t *testing.T) {\n})\n}\n}\n+\n+// TestCloseLocking test that lock ordering is followed when closing an\n+// endpoint.\n+func TestCloseLocking(t *testing.T) {\n+ const (\n+ nicID1 = 1\n+ nicID2 = 2\n+\n+ src = tcpip.Address(\"\\x10\\x00\\x00\\x01\")\n+ dst = tcpip.Address(\"\\x10\\x00\\x00\\x02\")\n+\n+ iterations = 1000\n+ )\n+\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},\n+ TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},\n+ })\n+\n+ // Perform NAT so that the endoint tries to search for a sibling endpoint\n+ // which ends up taking the protocol and endpoint lock (in that order).\n+ table := stack.Table{\n+ Rules: []stack.Rule{\n+ {Target: &stack.AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},\n+ {Target: &stack.AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},\n+ {Target: &stack.RedirectTarget{Port: 5, NetworkProtocol: header.IPv4ProtocolNumber}},\n+ {Target: &stack.AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},\n+ {Target: &stack.ErrorTarget{NetworkProtocol: header.IPv4ProtocolNumber}},\n+ },\n+ BuiltinChains: [stack.NumHooks]int{\n+ stack.Prerouting: 0,\n+ stack.Input: 1,\n+ stack.Forward: stack.HookUnset,\n+ stack.Output: 2,\n+ stack.Postrouting: 3,\n+ },\n+ Underflows: [stack.NumHooks]int{\n+ stack.Prerouting: 0,\n+ stack.Input: 1,\n+ stack.Forward: stack.HookUnset,\n+ stack.Output: 2,\n+ stack.Postrouting: 3,\n+ },\n+ }\n+ if err := s.IPTables().ReplaceTable(stack.NATID, table, false /* ipv6 */); err != nil {\n+ t.Fatalf(\"s.IPTables().ReplaceTable(...): %s\", err)\n+ }\n+\n+ e := channel.New(0, defaultMTU, \"\")\n+ if err := s.CreateNIC(nicID1, e); err != nil {\n+ t.Fatalf(\"CreateNIC(%d, _): %s\", nicID1, err)\n+ }\n+\n+ if err := s.AddAddress(nicID1, ipv4.ProtocolNumber, src); err != nil {\n+ t.Fatalf(\"AddAddress(%d, %d, %s) failed: %s\", nicID1, ipv4.ProtocolNumber, src, err)\n+ }\n+\n+ s.SetRouteTable([]tcpip.Route{{\n+ Destination: header.IPv4EmptySubnet,\n+ NIC: nicID1,\n+ }})\n+\n+ var wq waiter.Queue\n+ ep, err := s.NewEndpoint(udp.ProtocolNumber, ipv4.ProtocolNumber, &wq)\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+ defer ep.Close()\n+\n+ addr := tcpip.FullAddress{NIC: nicID1, Addr: dst, Port: 53}\n+ if err := ep.Connect(addr); err != nil {\n+ t.Errorf(\"ep.Connect(%#v): %s\", addr, err)\n+ }\n+\n+ var wg sync.WaitGroup\n+ defer wg.Wait()\n+\n+ // Writing packets should trigger NAT which requires the stack to search the\n+ // protocol for network endpoints with the destination address.\n+ //\n+ // Creating and removing interfaces should modify the protocol and endpoint\n+ // which requires taking the locks of each.\n+ //\n+ // We expect the protocol > endpoint lock ordering to be followed here.\n+ wg.Add(2)\n+ go func() {\n+ defer wg.Done()\n+\n+ data := []byte{1, 2, 3, 4}\n+\n+ for i := 0; i < iterations; i++ {\n+ var r bytes.Reader\n+ r.Reset(data)\n+ if n, err := ep.Write(&r, tcpip.WriteOptions{}); err != nil {\n+ t.Errorf(\"ep.Write(_, _): %s\", err)\n+ return\n+ } else if want := int64(len(data)); n != want {\n+ t.Errorf(\"got ep.Write(_, _) = (%d, _), want = (%d, _)\", n, want)\n+ return\n+ }\n+ }\n+ }()\n+ go func() {\n+ defer wg.Done()\n+\n+ for i := 0; i < iterations; i++ {\n+ if err := s.CreateNIC(nicID2, loopback.New()); err != nil {\n+ t.Errorf(\"CreateNIC(%d, _): %s\", nicID2, err)\n+ return\n+ }\n+ if err := s.RemoveNIC(nicID2); err != nil {\n+ t.Errorf(\"RemoveNIC(%d): %s\", nicID2, err)\n+ return\n+ }\n+ }\n+ }()\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Fix network protocol/endpoint lock order violation IPv4 would violate the lock ordering of protocol > endpoint when closing network endpoints by calling `ipv4.protocol.forgetEndpoint` while holding the network endpoint lock. PiperOrigin-RevId: 361232817
259,951
05.03.2021 16:50:00
28,800
fb733cdb8f4050fbc8ad083ea05c3e98b99b9acc
Increment the counters when sending Echo requests Updates
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/icmp/endpoint.go", "new_path": "pkg/tcpip/transport/icmp/endpoint.go", "diff": "@@ -26,6 +26,8 @@ import (\n\"gvisor.dev/gvisor/pkg/waiter\"\n)\n+// TODO(https://gvisor.dev/issues/5623): Unit test this package.\n+\n// +stateify savable\ntype icmpPacket struct {\nicmpPacketEntry\n@@ -414,6 +416,11 @@ func send4(r *stack.Route, ident uint16, data buffer.View, ttl uint8, owner tcpi\nreturn &tcpip.ErrInvalidEndpointState{}\n}\n+ // Because this icmp endpoint is implemented in the transport layer, we can\n+ // only increment the 'stack-wide' stats but we can't increment the\n+ // 'per-NetworkEndpoint' stats.\n+ sentStat := r.Stats().ICMP.V4.PacketsSent.EchoRequest\n+\nicmpv4.SetChecksum(0)\nicmpv4.SetChecksum(^header.Checksum(icmpv4, header.Checksum(data, 0)))\n@@ -422,7 +429,14 @@ func send4(r *stack.Route, ident uint16, data buffer.View, ttl uint8, owner tcpi\nif ttl == 0 {\nttl = r.DefaultTTL()\n}\n- return r.WritePacket(nil /* gso */, stack.NetworkHeaderParams{Protocol: header.ICMPv4ProtocolNumber, TTL: ttl, TOS: stack.DefaultTOS}, pkt)\n+\n+ if err := r.WritePacket(nil /* gso */, stack.NetworkHeaderParams{Protocol: header.ICMPv4ProtocolNumber, TTL: ttl, TOS: stack.DefaultTOS}, pkt); err != nil {\n+ r.Stats().ICMP.V4.PacketsSent.Dropped.Increment()\n+ return err\n+ }\n+\n+ sentStat.Increment()\n+ return nil\n}\nfunc send6(r *stack.Route, ident uint16, data buffer.View, ttl uint8) tcpip.Error {\n@@ -444,6 +458,10 @@ func send6(r *stack.Route, ident uint16, data buffer.View, ttl uint8) tcpip.Erro\nif icmpv6.Type() != header.ICMPv6EchoRequest || icmpv6.Code() != 0 {\nreturn &tcpip.ErrInvalidEndpointState{}\n}\n+ // Because this icmp endpoint is implemented in the transport layer, we can\n+ // only increment the 'stack-wide' stats but we can't increment the\n+ // 'per-NetworkEndpoint' stats.\n+ sentStat := r.Stats().ICMP.V6.PacketsSent.EchoRequest\npkt.Data().AppendView(data)\ndataRange := pkt.Data().AsRange()\n@@ -458,7 +476,13 @@ func send6(r *stack.Route, ident uint16, data buffer.View, ttl uint8) tcpip.Erro\nif ttl == 0 {\nttl = r.DefaultTTL()\n}\n- return r.WritePacket(nil /* gso */, stack.NetworkHeaderParams{Protocol: header.ICMPv6ProtocolNumber, TTL: ttl, TOS: stack.DefaultTOS}, pkt)\n+\n+ if err := r.WritePacket(nil /* gso */, stack.NetworkHeaderParams{Protocol: header.ICMPv6ProtocolNumber, TTL: ttl, TOS: stack.DefaultTOS}, pkt); err != nil {\n+ r.Stats().ICMP.V6.PacketsSent.Dropped.Increment()\n+ }\n+\n+ sentStat.Increment()\n+ return nil\n}\n// checkV4MappedLocked determines the effective network protocol and converts\n" } ]
Go
Apache License 2.0
google/gvisor
Increment the counters when sending Echo requests Updates #5597 PiperOrigin-RevId: 361252003
259,992
08.03.2021 11:16:09
28,800
cabbbb373a62971684bf012a0c2164106395e051
Fix SocketInetLoopbackTest flakiness Remove part of test that was making it flaky. It runs for native only, so not really important since it's not testing gVisor. Before: After:
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_inet_loopback.cc", "new_path": "test/syscalls/linux/socket_inet_loopback.cc", "diff": "@@ -705,12 +705,6 @@ TEST_P(SocketInetLoopbackTest, TCPFinWait2Test_NoRandomSave) {\nds.reset();\n- if (!IsRunningOnGvisor()) {\n- ASSERT_THAT(\n- bind(conn_fd2.get(), reinterpret_cast<sockaddr*>(&conn_bound_addr),\n- conn_addrlen),\n- SyscallSucceeds());\n- }\nASSERT_THAT(RetryEINTR(connect)(conn_fd2.get(),\nreinterpret_cast<sockaddr*>(&conn_addr),\nconn_addrlen),\n" } ]
Go
Apache License 2.0
google/gvisor
Fix SocketInetLoopbackTest flakiness Remove part of test that was making it flaky. It runs for native only, so not really important since it's not testing gVisor. Before: http://sponge2/37557c41-298e-408d-9b54-50ba3d41e22f After: http://sponge2/7bca72be-cb9b-42f8-8c54-af4956c39455 PiperOrigin-RevId: 361611512
259,992
08.03.2021 14:46:03
28,800
1b9d45dbe8755914d07937ad348211f60ffcfc01
Run shards in a single sandbox Run all tests (or a given test partition) in a single sandbox. Previously, each individual unit test executed in a new sandbox, which takes much longer to execute. Before After Syscall tests: 37m22.768s 14m5.272s
[ { "change_type": "MODIFY", "old_path": "test/perf/BUILD", "new_path": "test/perf/BUILD", "diff": "-load(\"//tools:defs.bzl\", \"more_shards\")\nload(\"//test/runner:defs.bzl\", \"syscall_test\")\npackage(licenses = [\"notice\"])\n@@ -38,7 +37,6 @@ syscall_test(\nsyscall_test(\nsize = \"enormous\",\ndebug = False,\n- shard_count = more_shards,\ntags = [\"nogotsan\"],\ntest = \"//test/perf/linux:getdents_benchmark\",\n)\n" }, { "change_type": "MODIFY", "old_path": "test/runner/gtest/gtest.go", "new_path": "test/runner/gtest/gtest.go", "diff": "@@ -35,6 +35,39 @@ var (\nfilterBenchmarkFlag = \"--benchmark_filter\"\n)\n+// BuildTestArgs builds arguments to be passed to the test binary to execute\n+// only the test cases in `indices`.\n+func BuildTestArgs(indices []int, testCases []TestCase) []string {\n+ var testFilter, benchFilter string\n+ for _, tci := range indices {\n+ tc := testCases[tci]\n+ if tc.all {\n+ // No argument will make all tests run.\n+ return nil\n+ }\n+ if tc.benchmark {\n+ if len(benchFilter) > 0 {\n+ benchFilter += \"|\"\n+ }\n+ benchFilter += \"^\" + tc.Name + \"$\"\n+ } else {\n+ if len(testFilter) > 0 {\n+ testFilter += \":\"\n+ }\n+ testFilter += tc.FullName()\n+ }\n+ }\n+\n+ var args []string\n+ if len(testFilter) > 0 {\n+ args = append(args, fmt.Sprintf(\"%s=%s\", filterTestFlag, testFilter))\n+ }\n+ if len(benchFilter) > 0 {\n+ args = append(args, fmt.Sprintf(\"%s=%s\", filterBenchmarkFlag, benchFilter))\n+ }\n+ return args\n+}\n+\n// TestCase is a single gtest test case.\ntype TestCase struct {\n// Suite is the suite for this test.\n@@ -59,22 +92,6 @@ func (tc TestCase) FullName() string {\nreturn fmt.Sprintf(\"%s.%s\", tc.Suite, tc.Name)\n}\n-// Args returns arguments to be passed when invoking the test.\n-func (tc TestCase) Args() []string {\n- if tc.all {\n- return []string{} // No arguments.\n- }\n- if tc.benchmark {\n- return []string{\n- fmt.Sprintf(\"%s=^%s$\", filterBenchmarkFlag, tc.Name),\n- fmt.Sprintf(\"%s=\", filterTestFlag),\n- }\n- }\n- return []string{\n- fmt.Sprintf(\"%s=%s\", filterTestFlag, tc.FullName()),\n- }\n-}\n-\n// ParseTestCases calls a gtest test binary to list its test and returns a\n// slice with the name and suite of each test.\n//\n@@ -90,6 +107,7 @@ func ParseTestCases(testBin string, benchmarks bool, extraArgs ...string) ([]Tes\n// We failed to list tests with the given flags. Just\n// return something that will run the binary with no\n// flags, which should execute all tests.\n+ fmt.Printf(\"failed to get test list: %v\\n\", err)\nreturn []TestCase{\n{\nSuite: \"Default\",\n" }, { "change_type": "MODIFY", "old_path": "test/runner/runner.go", "new_path": "test/runner/runner.go", "diff": "@@ -26,7 +26,6 @@ import (\n\"path/filepath\"\n\"strings\"\n\"syscall\"\n- \"testing\"\n\"time\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n@@ -57,13 +56,82 @@ var (\nleakCheck = flag.Bool(\"leak-check\", false, \"check for reference leaks\")\n)\n+func main() {\n+ flag.Parse()\n+ if flag.NArg() != 1 {\n+ fatalf(\"test must be provided\")\n+ }\n+\n+ log.SetLevel(log.Info)\n+ if *debug {\n+ log.SetLevel(log.Debug)\n+ }\n+\n+ if *platform != \"native\" && *runscPath == \"\" {\n+ if err := testutil.ConfigureExePath(); err != nil {\n+ panic(err.Error())\n+ }\n+ *runscPath = specutils.ExePath\n+ }\n+\n+ // Make sure stdout and stderr are opened with O_APPEND, otherwise logs\n+ // from outside the sandbox can (and will) stomp on logs from inside\n+ // the sandbox.\n+ for _, f := range []*os.File{os.Stdout, os.Stderr} {\n+ flags, err := unix.FcntlInt(f.Fd(), unix.F_GETFL, 0)\n+ if err != nil {\n+ fatalf(\"error getting file flags for %v: %v\", f, err)\n+ }\n+ if flags&unix.O_APPEND == 0 {\n+ flags |= unix.O_APPEND\n+ if _, err := unix.FcntlInt(f.Fd(), unix.F_SETFL, flags); err != nil {\n+ fatalf(\"error setting file flags for %v: %v\", f, err)\n+ }\n+ }\n+ }\n+\n+ // Resolve the absolute path for the binary.\n+ testBin, err := filepath.Abs(flag.Args()[0])\n+ if err != nil {\n+ fatalf(\"Abs(%q) failed: %v\", flag.Args()[0], err)\n+ }\n+\n+ // Get all test cases in each binary.\n+ testCases, err := gtest.ParseTestCases(testBin, true)\n+ if err != nil {\n+ fatalf(\"ParseTestCases(%q) failed: %v\", testBin, err)\n+ }\n+\n+ // Get subset of tests corresponding to shard.\n+ indices, err := testutil.TestIndicesForShard(len(testCases))\n+ if err != nil {\n+ fatalf(\"TestsForShard() failed: %v\", err)\n+ }\n+ if len(indices) == 0 {\n+ log.Warningf(\"No tests to run in this shard\")\n+ return\n+ }\n+ args := gtest.BuildTestArgs(indices, testCases)\n+\n+ switch *platform {\n+ case \"native\":\n+ if err := runTestCaseNative(testBin, args); err != nil {\n+ fatalf(err.Error())\n+ }\n+ default:\n+ if err := runTestCaseRunsc(testBin, args); err != nil {\n+ fatalf(err.Error())\n+ }\n+ }\n+}\n+\n// runTestCaseNative runs the test case directly on the host machine.\n-func runTestCaseNative(testBin string, tc gtest.TestCase, t *testing.T) {\n+func runTestCaseNative(testBin string, args []string) error {\n// These tests might be running in parallel, so make sure they have a\n// unique test temp dir.\ntmpDir, err := ioutil.TempDir(testutil.TmpDir(), \"\")\nif err != nil {\n- t.Fatalf(\"could not create temp dir: %v\", err)\n+ return fmt.Errorf(\"could not create temp dir: %v\", err)\n}\ndefer os.RemoveAll(tmpDir)\n@@ -84,12 +152,12 @@ func runTestCaseNative(testBin string, tc gtest.TestCase, t *testing.T) {\n}\n// Remove shard env variables so that the gunit binary does not try to\n// interpret them.\n- env = filterEnv(env, []string{\"TEST_SHARD_INDEX\", \"TEST_TOTAL_SHARDS\", \"GTEST_SHARD_INDEX\", \"GTEST_TOTAL_SHARDS\"})\n+ env = filterEnv(env, \"TEST_SHARD_INDEX\", \"TEST_TOTAL_SHARDS\", \"GTEST_SHARD_INDEX\", \"GTEST_TOTAL_SHARDS\")\nif *addUDSTree {\nsocketDir, cleanup, err := uds.CreateSocketTree(\"/tmp\")\nif err != nil {\n- t.Fatalf(\"failed to create socket tree: %v\", err)\n+ return fmt.Errorf(\"failed to create socket tree: %v\", err)\n}\ndefer cleanup()\n@@ -99,7 +167,7 @@ func runTestCaseNative(testBin string, tc gtest.TestCase, t *testing.T) {\nenv = append(env, \"TEST_UDS_ATTACH_TREE=\"+socketDir)\n}\n- cmd := exec.Command(testBin, tc.Args()...)\n+ cmd := exec.Command(testBin, args...)\ncmd.Env = env\ncmd.Stdout = os.Stdout\ncmd.Stderr = os.Stderr\n@@ -115,8 +183,9 @@ func runTestCaseNative(testBin string, tc gtest.TestCase, t *testing.T) {\nif err := cmd.Run(); err != nil {\nws := err.(*exec.ExitError).Sys().(unix.WaitStatus)\n- t.Errorf(\"test %q exited with status %d, want 0\", tc.FullName(), ws.ExitStatus())\n+ return fmt.Errorf(\"test exited with status %d, want 0\", ws.ExitStatus())\n}\n+ return nil\n}\n// runRunsc runs spec in runsc in a standard test configuration.\n@@ -124,7 +193,7 @@ func runTestCaseNative(testBin string, tc gtest.TestCase, t *testing.T) {\n// runsc logs will be saved to a path in TEST_UNDECLARED_OUTPUTS_DIR.\n//\n// Returns an error if the sandboxed application exits non-zero.\n-func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {\n+func runRunsc(spec *specs.Spec) error {\nbundleDir, cleanup, err := testutil.SetupBundleDir(spec)\nif err != nil {\nreturn fmt.Errorf(\"SetupBundleDir failed: %v\", err)\n@@ -137,9 +206,8 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {\n}\ndefer cleanup()\n- name := tc.FullName()\nid := testutil.RandomContainerID()\n- log.Infof(\"Running test %q in container %q\", name, id)\n+ log.Infof(\"Running test in container %q\", id)\nspecutils.LogSpec(spec)\nargs := []string{\n@@ -175,13 +243,8 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {\nargs = append(args, \"-ref-leak-mode=log-names\")\n}\n- testLogDir := \"\"\n- if undeclaredOutputsDir, ok := unix.Getenv(\"TEST_UNDECLARED_OUTPUTS_DIR\"); ok {\n- // Create log directory dedicated for this test.\n- testLogDir = filepath.Join(undeclaredOutputsDir, strings.Replace(name, \"/\", \"_\", -1))\n- if err := os.MkdirAll(testLogDir, 0755); err != nil {\n- return fmt.Errorf(\"could not create test dir: %v\", err)\n- }\n+ testLogDir := os.Getenv(\"TEST_UNDECLARED_OUTPUTS_DIR\")\n+ if len(testLogDir) > 0 {\ndebugLogDir, err := ioutil.TempDir(testLogDir, \"runsc\")\nif err != nil {\nreturn fmt.Errorf(\"could not create temp dir: %v\", err)\n@@ -226,7 +289,7 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {\nif !ok {\nreturn\n}\n- log.Warningf(\"%s: Got signal: %v\", name, s)\n+ log.Warningf(\"Got signal: %v\", s)\ndone := make(chan bool, 1)\ndArgs := append([]string{}, args...)\ndArgs = append(dArgs, \"-alsologtostderr=true\", \"debug\", \"--stacks\", id)\n@@ -259,7 +322,7 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {\nif err == nil && len(testLogDir) > 0 {\n// If the test passed, then we erase the log directory. This speeds up\n// uploading logs in continuous integration & saves on disk space.\n- os.RemoveAll(testLogDir)\n+ _ = os.RemoveAll(testLogDir)\n}\nreturn err\n@@ -314,10 +377,10 @@ func setupUDSTree(spec *specs.Spec) (cleanup func(), err error) {\n}\n// runsTestCaseRunsc runs the test case in runsc.\n-func runTestCaseRunsc(testBin string, tc gtest.TestCase, t *testing.T) {\n+func runTestCaseRunsc(testBin string, args []string) error {\n// Run a new container with the test executable and filter for the\n// given test suite and name.\n- spec := testutil.NewSpecWithArgs(append([]string{testBin}, tc.Args()...)...)\n+ spec := testutil.NewSpecWithArgs(append([]string{testBin}, args...)...)\n// Mark the root as writeable, as some tests attempt to\n// write to the rootfs, and expect EACCES, not EROFS.\n@@ -343,12 +406,12 @@ func runTestCaseRunsc(testBin string, tc gtest.TestCase, t *testing.T) {\n// users, so make sure it is world-accessible.\ntmpDir, err := ioutil.TempDir(testutil.TmpDir(), \"\")\nif err != nil {\n- t.Fatalf(\"could not create temp dir: %v\", err)\n+ return fmt.Errorf(\"could not create temp dir: %v\", err)\n}\ndefer os.RemoveAll(tmpDir)\nif err := os.Chmod(tmpDir, 0777); err != nil {\n- t.Fatalf(\"could not chmod temp dir: %v\", err)\n+ return fmt.Errorf(\"could not chmod temp dir: %v\", err)\n}\n// \"/tmp\" is not replaced with a tmpfs mount inside the sandbox\n@@ -368,13 +431,12 @@ func runTestCaseRunsc(testBin string, tc gtest.TestCase, t *testing.T) {\n// Set environment variables that indicate we are running in gVisor with\n// the given platform, network, and filesystem stack.\n- platformVar := \"TEST_ON_GVISOR\"\n- networkVar := \"GVISOR_NETWORK\"\n- env := append(os.Environ(), platformVar+\"=\"+*platform, networkVar+\"=\"+*network)\n- vfsVar := \"GVISOR_VFS\"\n+ env := []string{\"TEST_ON_GVISOR=\" + *platform, \"GVISOR_NETWORK=\" + *network}\n+ env = append(env, os.Environ()...)\n+ const vfsVar = \"GVISOR_VFS\"\nif *vfs2 {\nenv = append(env, vfsVar+\"=VFS2\")\n- fuseVar := \"FUSE_ENABLED\"\n+ const fuseVar = \"FUSE_ENABLED\"\nif *fuse {\nenv = append(env, fuseVar+\"=TRUE\")\n} else {\n@@ -386,11 +448,11 @@ func runTestCaseRunsc(testBin string, tc gtest.TestCase, t *testing.T) {\n// Remove shard env variables so that the gunit binary does not try to\n// interpret them.\n- env = filterEnv(env, []string{\"TEST_SHARD_INDEX\", \"TEST_TOTAL_SHARDS\", \"GTEST_SHARD_INDEX\", \"GTEST_TOTAL_SHARDS\"})\n+ env = filterEnv(env, \"TEST_SHARD_INDEX\", \"TEST_TOTAL_SHARDS\", \"GTEST_SHARD_INDEX\", \"GTEST_TOTAL_SHARDS\")\n// Set TEST_TMPDIR to /tmp, as some of the syscall tests require it to\n// be backed by tmpfs.\n- env = filterEnv(env, []string{\"TEST_TMPDIR\"})\n+ env = filterEnv(env, \"TEST_TMPDIR\")\nenv = append(env, fmt.Sprintf(\"TEST_TMPDIR=%s\", testTmpDir))\nspec.Process.Env = env\n@@ -398,18 +460,19 @@ func runTestCaseRunsc(testBin string, tc gtest.TestCase, t *testing.T) {\nif *addUDSTree {\ncleanup, err := setupUDSTree(spec)\nif err != nil {\n- t.Fatalf(\"error creating UDS tree: %v\", err)\n+ return fmt.Errorf(\"error creating UDS tree: %v\", err)\n}\ndefer cleanup()\n}\n- if err := runRunsc(tc, spec); err != nil {\n- t.Errorf(\"test %q failed with error %v, want nil\", tc.FullName(), err)\n+ if err := runRunsc(spec); err != nil {\n+ return fmt.Errorf(\"test failed with error %v, want nil\", err)\n}\n+ return nil\n}\n// filterEnv returns an environment with the excluded variables removed.\n-func filterEnv(env, exclude []string) []string {\n+func filterEnv(env []string, exclude ...string) []string {\nvar out []string\nfor _, kv := range env {\nok := true\n@@ -430,82 +493,3 @@ func fatalf(s string, args ...interface{}) {\nfmt.Fprintf(os.Stderr, s+\"\\n\", args...)\nos.Exit(1)\n}\n-\n-func matchString(a, b string) (bool, error) {\n- return a == b, nil\n-}\n-\n-func main() {\n- flag.Parse()\n- if flag.NArg() != 1 {\n- fatalf(\"test must be provided\")\n- }\n- testBin := flag.Args()[0] // Only argument.\n-\n- log.SetLevel(log.Info)\n- if *debug {\n- log.SetLevel(log.Debug)\n- }\n-\n- if *platform != \"native\" && *runscPath == \"\" {\n- if err := testutil.ConfigureExePath(); err != nil {\n- panic(err.Error())\n- }\n- *runscPath = specutils.ExePath\n- }\n-\n- // Make sure stdout and stderr are opened with O_APPEND, otherwise logs\n- // from outside the sandbox can (and will) stomp on logs from inside\n- // the sandbox.\n- for _, f := range []*os.File{os.Stdout, os.Stderr} {\n- flags, err := unix.FcntlInt(f.Fd(), unix.F_GETFL, 0)\n- if err != nil {\n- fatalf(\"error getting file flags for %v: %v\", f, err)\n- }\n- if flags&unix.O_APPEND == 0 {\n- flags |= unix.O_APPEND\n- if _, err := unix.FcntlInt(f.Fd(), unix.F_SETFL, flags); err != nil {\n- fatalf(\"error setting file flags for %v: %v\", f, err)\n- }\n- }\n- }\n-\n- // Get all test cases in each binary.\n- testCases, err := gtest.ParseTestCases(testBin, true)\n- if err != nil {\n- fatalf(\"ParseTestCases(%q) failed: %v\", testBin, err)\n- }\n-\n- // Get subset of tests corresponding to shard.\n- indices, err := testutil.TestIndicesForShard(len(testCases))\n- if err != nil {\n- fatalf(\"TestsForShard() failed: %v\", err)\n- }\n-\n- // Resolve the absolute path for the binary.\n- testBin, err = filepath.Abs(testBin)\n- if err != nil {\n- fatalf(\"Abs() failed: %v\", err)\n- }\n-\n- // Run the tests.\n- var tests []testing.InternalTest\n- for _, tci := range indices {\n- // Capture tc.\n- tc := testCases[tci]\n- tests = append(tests, testing.InternalTest{\n- Name: fmt.Sprintf(\"%s_%s\", tc.Suite, tc.Name),\n- F: func(t *testing.T) {\n- if *platform == \"native\" {\n- // Run the test case on host.\n- runTestCaseNative(testBin, tc, t)\n- } else {\n- // Run the test case in runsc.\n- runTestCaseRunsc(testBin, tc, t)\n- }\n- },\n- })\n- }\n-\n- testing.Main(matchString, tests, nil, nil)\n-}\n" } ]
Go
Apache License 2.0
google/gvisor
Run shards in a single sandbox Run all tests (or a given test partition) in a single sandbox. Previously, each individual unit test executed in a new sandbox, which takes much longer to execute. Before After Syscall tests: 37m22.768s 14m5.272s PiperOrigin-RevId: 361661726
259,907
08.03.2021 15:49:27
28,800
333e489763cf741035cae7d0b425f22622fea3de
[lisa] Do not generate any tests for dynamic types. The dynamic type user defines the marshalling logic, so we don't need to test for things like alignment, absence of slices, etc. For dynamic types, the go_marshal generator just generates the missing methods required to implement marshal.Marshallable.
[ { "change_type": "MODIFY", "old_path": "tools/go_marshal/gomarshal/generator.go", "new_path": "tools/go_marshal/gomarshal/generator.go", "diff": "@@ -427,7 +427,7 @@ func (g *Generator) generateOne(t *marshallableType, fset *token.FileSet) *inter\n// implementations type t.\nfunc (g *Generator) generateOneTestSuite(t *marshallableType) *testGenerator {\ni := newTestGenerator(t.spec, t.recv)\n- i.emitTests(t.slice, t.dynamic)\n+ i.emitTests(t.slice)\nreturn i\n}\n@@ -488,9 +488,13 @@ func (g *Generator) Run() error {\npanic(fmt.Sprintf(\"Generated code for '%s' referenced a non-existent import with local name '%s'. Either go-marshal needs to add an import to the generated file, or a package in an input source file has a package name differ from the final component of its path, which go-marshal doesn't know how to detect; use an import alias to work around this limitation.\", impl.typeName(), name))\n}\n}\n+ // Do not generate tests for dynamic types because they inherently\n+ // violate some go_marshal requirements.\n+ if !t.dynamic {\nts = append(ts, g.generateOneTestSuite(t))\n}\n}\n+ }\n// Write output file header. These include things like package name and\n// import statements.\n" }, { "change_type": "MODIFY", "old_path": "tools/go_marshal/gomarshal/generator_tests.go", "new_path": "tools/go_marshal/gomarshal/generator_tests.go", "diff": "@@ -216,16 +216,12 @@ func (g *testGenerator) emitTestSizeBytesOnTypedNilPtr() {\n})\n}\n-func (g *testGenerator) emitTests(slice *sliceAPI, isDynamic bool) {\n+func (g *testGenerator) emitTests(slice *sliceAPI) {\ng.emitTestNonZeroSize()\ng.emitTestSuspectAlignment()\n- if !isDynamic {\n- // Do not test these for dynamic structs because they violate some\n- // assumptions that these tests make.\ng.emitTestMarshalUnmarshalPreservesData()\ng.emitTestWriteToUnmarshalPreservesData()\ng.emitTestSizeBytesOnTypedNilPtr()\n- }\nif slice != nil {\ng.emitTestMarshalUnmarshalSlicePreservesData(slice)\n" } ]
Go
Apache License 2.0
google/gvisor
[lisa] Do not generate any tests for dynamic types. The dynamic type user defines the marshalling logic, so we don't need to test for things like alignment, absence of slices, etc. For dynamic types, the go_marshal generator just generates the missing methods required to implement marshal.Marshallable. PiperOrigin-RevId: 361676311
259,992
08.03.2021 16:55:22
28,800
3c4485966c170850bb677efc88de4c0ecaac1358
Fix proc test flakiness Thread from earlier test can show up in `/proc/self/tasks` while the thread tears down. Account for that when searching for procs for the first time in the test.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/proc.cc", "new_path": "test/syscalls/linux/proc.cc", "diff": "@@ -2162,7 +2162,13 @@ class BlockingChild {\nreturn tid_;\n}\n- void Join() { Stop(); }\n+ void Join() {\n+ {\n+ absl::MutexLock ml(&mu_);\n+ stop_ = true;\n+ }\n+ thread_.Join();\n+ }\nprivate:\nvoid Start() {\n@@ -2172,11 +2178,6 @@ class BlockingChild {\nmu_.Await(absl::Condition(&stop_));\n}\n- void Stop() {\n- absl::MutexLock ml(&mu_);\n- stop_ = true;\n- }\n-\nmutable absl::Mutex mu_;\nbool stop_ ABSL_GUARDED_BY(mu_) = false;\npid_t tid_;\n@@ -2190,16 +2191,18 @@ class BlockingChild {\nTEST(ProcTask, NewThreadAppears) {\nauto initial = ASSERT_NO_ERRNO_AND_VALUE(ListDir(\"/proc/self/task\", false));\nBlockingChild child1;\n- EXPECT_NO_ERRNO(DirContainsExactly(\"/proc/self/task\",\n- TaskFiles(initial, {child1.Tid()})));\n+ // Use Eventually* in case a proc from ealier test is still tearing down.\n+ EXPECT_NO_ERRNO(EventuallyDirContainsExactly(\n+ \"/proc/self/task\", TaskFiles(initial, {child1.Tid()})));\n}\nTEST(ProcTask, KilledThreadsDisappear) {\nauto initial = ASSERT_NO_ERRNO_AND_VALUE(ListDir(\"/proc/self/task/\", false));\nBlockingChild child1;\n- EXPECT_NO_ERRNO(DirContainsExactly(\"/proc/self/task\",\n- TaskFiles(initial, {child1.Tid()})));\n+ // Use Eventually* in case a proc from ealier test is still tearing down.\n+ EXPECT_NO_ERRNO(EventuallyDirContainsExactly(\n+ \"/proc/self/task\", TaskFiles(initial, {child1.Tid()})));\n// Stat child1's task file. Regression test for b/32097707.\nstruct stat statbuf;\n" } ]
Go
Apache License 2.0
google/gvisor
Fix proc test flakiness Thread from earlier test can show up in `/proc/self/tasks` while the thread tears down. Account for that when searching for procs for the first time in the test. PiperOrigin-RevId: 361689673
259,992
09.03.2021 20:10:16
28,800
e0e04814b40f1d4077d1cf6001395bd6a919b288
Fix invalid interface conversion in runner panic: interface conversion: interface {} is syscall.WaitStatus, not unix.WaitStatus goroutine 1 [running]: main.runTestCaseNative(0xc0001fc000, 0xe3, 0xc000119b60, 0x1, 0x1, 0x0, 0x0) test/runner/runner.go:185 +0xa94 main.main() test/runner/runner.go:118 +0x745
[ { "change_type": "MODIFY", "old_path": "runsc/specutils/namespace.go", "new_path": "runsc/specutils/namespace.go", "diff": "@@ -275,7 +275,7 @@ func MaybeRunAsRoot() error {\n}()\nif err := cmd.Wait(); err != nil {\nif exit, ok := err.(*exec.ExitError); ok {\n- if ws, ok := exit.Sys().(unix.WaitStatus); ok {\n+ if ws, ok := exit.Sys().(syscall.WaitStatus); ok {\nos.Exit(ws.ExitStatus())\n}\nlog.Warningf(\"No wait status provided, exiting with -1: %v\", err)\n" }, { "change_type": "MODIFY", "old_path": "test/runner/runner.go", "new_path": "test/runner/runner.go", "diff": "@@ -182,7 +182,7 @@ func runTestCaseNative(testBin string, args []string) error {\n}\nif err := cmd.Run(); err != nil {\n- ws := err.(*exec.ExitError).Sys().(unix.WaitStatus)\n+ ws := err.(*exec.ExitError).Sys().(syscall.WaitStatus)\nreturn fmt.Errorf(\"test exited with status %d, want 0\", ws.ExitStatus())\n}\nreturn nil\n" } ]
Go
Apache License 2.0
google/gvisor
Fix invalid interface conversion in runner panic: interface conversion: interface {} is syscall.WaitStatus, not unix.WaitStatus goroutine 1 [running]: main.runTestCaseNative(0xc0001fc000, 0xe3, 0xc000119b60, 0x1, 0x1, 0x0, 0x0) test/runner/runner.go:185 +0xa94 main.main() test/runner/runner.go:118 +0x745 PiperOrigin-RevId: 361957796
259,992
09.03.2021 20:51:53
28,800
14fc2ddd6cb2f25482ef0d16ec5e3ffda3dd0f6e
Update flock to v0.8.0
[ { "change_type": "MODIFY", "old_path": "WORKSPACE", "new_path": "WORKSPACE", "diff": "@@ -303,8 +303,8 @@ go_repository(\ngo_repository(\nname = \"com_github_gofrs_flock\",\nimportpath = \"github.com/gofrs/flock\",\n- sum = \"h1:JFTFz3HZTGmgMz4E1TabNBNJljROSYgja1b4l50FNVs=\",\n- version = \"v0.6.1-0.20180915234121-886344bea079\",\n+ sum = \"h1:MSdYClljsF3PbENUUEx85nkWfJSGfzYI9yEBZOJz6CY=\",\n+ version = \"v0.8.0\",\n)\ngo_repository(\n" }, { "change_type": "MODIFY", "old_path": "go.mod", "new_path": "go.mod", "diff": "@@ -20,7 +20,7 @@ require (\ngithub.com/docker/docker v1.4.2-0.20191028175130-9e7d5ac5ea55 // indirect\ngithub.com/docker/go-connections v0.3.0 // indirect\ngithub.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect\n- github.com/gofrs/flock v0.6.1-0.20180915234121-886344bea079 // indirect\n+ github.com/gofrs/flock v0.8.0 // indirect\ngithub.com/gogo/googleapis v1.4.0 // indirect\ngithub.com/gogo/protobuf v1.3.1 // indirect\ngithub.com/golang/mock v1.4.4 // indirect\n" }, { "change_type": "MODIFY", "old_path": "go.sum", "new_path": "go.sum", "diff": "@@ -135,6 +135,8 @@ github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME=\ngithub.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=\ngithub.com/gofrs/flock v0.6.1-0.20180915234121-886344bea079 h1:JFTFz3HZTGmgMz4E1TabNBNJljROSYgja1b4l50FNVs=\ngithub.com/gofrs/flock v0.6.1-0.20180915234121-886344bea079/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=\n+github.com/gofrs/flock v0.8.0 h1:MSdYClljsF3PbENUUEx85nkWfJSGfzYI9yEBZOJz6CY=\n+github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=\ngithub.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI=\ngithub.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=\ngithub.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=\n" }, { "change_type": "MODIFY", "old_path": "nogo.yaml", "new_path": "nogo.yaml", "diff": "@@ -46,8 +46,6 @@ global:\n- \"(field|method|struct|type) .* should be .*\"\n# Generated proto code sometimes duplicates imports with aliases.\n- \"duplicate import\"\n- # TODO(b/179817829): Upgrade to flock to v0.8.0.\n- - \"flock.NewFlock is deprecated: Use New instead\"\ninternal:\nsuppress:\n# We use ALL_CAPS for system definitions,\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/state_file.go", "new_path": "runsc/container/state_file.go", "diff": "@@ -245,7 +245,7 @@ type StateFile struct {\n// lock globally locks all locking operations for the container.\nfunc (s *StateFile) lock() error {\ns.once.Do(func() {\n- s.flock = flock.NewFlock(s.lockPath())\n+ s.flock = flock.New(s.lockPath())\n})\nif err := s.flock.Lock(); err != nil {\n" } ]
Go
Apache License 2.0
google/gvisor
Update flock to v0.8.0 PiperOrigin-RevId: 361962416
259,962
11.03.2021 08:23:55
28,800
1020ac83f47cd6b178e7655f413fcd4f3cd2aa4c
Move Arch specific code to arch specific files.
[ { "change_type": "MODIFY", "old_path": "pkg/abi/linux/ptrace_amd64.go", "new_path": "pkg/abi/linux/ptrace_amd64.go", "diff": "@@ -50,3 +50,14 @@ type PtraceRegs struct {\nFs uint64\nGs uint64\n}\n+\n+// InstructionPointer returns the address of the next instruction to\n+// be executed.\n+func (p *PtraceRegs) InstructionPointer() uint64 {\n+ return p.Rip\n+}\n+\n+// StackPointer returns the address of the Stack pointer.\n+func (p *PtraceRegs) StackPointer() uint64 {\n+ return p.Rsp\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/abi/linux/ptrace_arm64.go", "new_path": "pkg/abi/linux/ptrace_arm64.go", "diff": "@@ -27,3 +27,14 @@ type PtraceRegs struct {\nPc uint64\nPstate uint64\n}\n+\n+// InstructionPointer returns the address of the next instruction to be\n+// executed.\n+func (p *PtraceRegs) InstructionPointer() uint64 {\n+ return p.Pc\n+}\n+\n+// StackPointer returns the address of the Stack pointer.\n+func (p *PtraceRegs) StackPointer() uint64 {\n+ return p.Sp\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Move Arch specific code to arch specific files. PiperOrigin-RevId: 362297474
260,001
11.03.2021 17:01:13
28,800
df64c3a60f439dec81fd7c96690318588520eb87
Clear Merkle tree files in RuntimeEnable mode The Merkle tree files need to be cleared before enabling to avoid redundant content.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/verity/filesystem.go", "new_path": "pkg/sentry/fsimpl/verity/filesystem.go", "diff": "@@ -590,6 +590,23 @@ func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry,\nreturn nil, err\n}\n+ // Clear the Merkle tree file if they are to be generated at runtime.\n+ // TODO(b/182315468): Optimize the Merkle tree generate process to\n+ // allow only updating certain files/directories.\n+ if fs.allowRuntimeEnable {\n+ childMerkleFD, err := vfsObj.OpenAt(ctx, fs.creds, &vfs.PathOperation{\n+ Root: childMerkleVD,\n+ Start: childMerkleVD,\n+ }, &vfs.OpenOptions{\n+ Flags: linux.O_RDWR | linux.O_TRUNC,\n+ Mode: 0644,\n+ })\n+ if err != nil {\n+ return nil, err\n+ }\n+ childMerkleFD.DecRef(ctx)\n+ }\n+\n// The dentry needs to be cleaned up if any error occurs. IncRef will be\n// called if a verity child dentry is successfully created.\ndefer childMerkleVD.DecRef(ctx)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/verity/verity.go", "new_path": "pkg/sentry/fsimpl/verity/verity.go", "diff": "@@ -311,6 +311,24 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\nd.DecRef(ctx)\nreturn nil, nil, alertIntegrityViolation(\"Failed to find root Merkle file\")\n}\n+\n+ // Clear the Merkle tree file if they are to be generated at runtime.\n+ // TODO(b/182315468): Optimize the Merkle tree generate process to\n+ // allow only updating certain files/directories.\n+ if fs.allowRuntimeEnable {\n+ lowerMerkleFD, err := vfsObj.OpenAt(ctx, fs.creds, &vfs.PathOperation{\n+ Root: lowerMerkleVD,\n+ Start: lowerMerkleVD,\n+ }, &vfs.OpenOptions{\n+ Flags: linux.O_RDWR | linux.O_TRUNC,\n+ Mode: 0644,\n+ })\n+ if err != nil {\n+ return nil, nil, err\n+ }\n+ lowerMerkleFD.DecRef(ctx)\n+ }\n+\nd.lowerMerkleVD = lowerMerkleVD\n// Get metadata from the underlying file system.\n" } ]
Go
Apache License 2.0
google/gvisor
Clear Merkle tree files in RuntimeEnable mode The Merkle tree files need to be cleared before enabling to avoid redundant content. PiperOrigin-RevId: 362409591
260,001
11.03.2021 17:52:43
28,800
a7197c9c688fdfc2d37005063d3f6dbf9cef2341
Implement Merkle tree generate tool binary This binary is used to recursively enable and generate Merkle tree files for all files and directories in a file system from inside a gVisor sandbox.
[ { "change_type": "ADD", "old_path": null, "new_path": "tools/verity/BUILD", "diff": "+load(\"//tools:defs.bzl\", \"go_binary\")\n+\n+licenses([\"notice\"])\n+\n+go_binary(\n+ name = \"measure_tool\",\n+ srcs = [\n+ \"measure_tool.go\",\n+ \"measure_tool_unsafe.go\",\n+ ],\n+ pure = True,\n+ deps = [\n+ \"//pkg/abi/linux\",\n+ ],\n+)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "tools/verity/measure_tool.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// This binary can be used to run a measurement of the verity file system,\n+// generate the corresponding Merkle tree files, and return the root hash.\n+package main\n+\n+import (\n+ \"flag\"\n+ \"io/ioutil\"\n+ \"log\"\n+ \"os\"\n+ \"syscall\"\n+\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+)\n+\n+var path = flag.String(\"path\", \"\", \"path to the verity file system.\")\n+\n+const maxDigestSize = 64\n+\n+type digest struct {\n+ metadata linux.DigestMetadata\n+ digest [maxDigestSize]byte\n+}\n+\n+func main() {\n+ flag.Parse()\n+ if *path == \"\" {\n+ log.Fatalf(\"no path provided\")\n+ }\n+ if err := enableDir(*path); err != nil {\n+ log.Fatalf(\"Failed to enable file system %s: %v\", *path, err)\n+ }\n+ // Print the root hash of the file system to stdout.\n+ if err := measure(*path); err != nil {\n+ log.Fatalf(\"Failed to measure file system %s: %v\", *path, err)\n+ }\n+}\n+\n+// enableDir enables verity features on all the files and sub-directories within\n+// path.\n+func enableDir(path string) error {\n+ files, err := ioutil.ReadDir(path)\n+ if err != nil {\n+ return err\n+ }\n+ for _, file := range files {\n+ if file.IsDir() {\n+ // For directories, first enable its children.\n+ if err := enableDir(path + \"/\" + file.Name()); err != nil {\n+ return err\n+ }\n+ } else if file.Mode().IsRegular() {\n+ // For regular files, open and enable verity feature.\n+ f, err := os.Open(path + \"/\" + file.Name())\n+ if err != nil {\n+ return err\n+ }\n+ var p uintptr\n+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(f.Fd()), uintptr(linux.FS_IOC_ENABLE_VERITY), p); err != 0 {\n+ return err\n+ }\n+ }\n+ }\n+ // Once all children are enabled, enable the parent directory.\n+ f, err := os.Open(path)\n+ if err != nil {\n+ return err\n+ }\n+ var p uintptr\n+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(f.Fd()), uintptr(linux.FS_IOC_ENABLE_VERITY), p); err != 0 {\n+ return err\n+ }\n+ return nil\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "tools/verity/measure_tool_unsafe.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+package main\n+\n+import (\n+ \"encoding/hex\"\n+ \"fmt\"\n+ \"os\"\n+ \"syscall\"\n+ \"unsafe\"\n+\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+)\n+\n+// measure prints the hash of path to stdout.\n+func measure(path string) error {\n+ f, err := os.Open(path)\n+ if err != nil {\n+ return err\n+ }\n+ var digest digest\n+ digest.metadata.DigestSize = maxDigestSize\n+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(f.Fd()), uintptr(linux.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest))); err != 0 {\n+ return err\n+ }\n+ fmt.Fprintf(os.Stdout, \"%s\\n\", hex.EncodeToString(digest.digest[:digest.metadata.DigestSize]))\n+ return err\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Implement Merkle tree generate tool binary This binary is used to recursively enable and generate Merkle tree files for all files and directories in a file system from inside a gVisor sandbox. PiperOrigin-RevId: 362418770
259,891
11.03.2021 21:12:25
28,800
002df130655ff5e10196d0a057659a4d7c4f6364
Remove special casing of socket stress test With /proc/sys/net/ipv4/ip_local_port_range implemented, the socket stress test runs in a more normal time and doesn't need to sacrifice coverage to prevent timeouts.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -65,14 +65,8 @@ syscall_test(\nsyscall_test(\nsize = \"large\",\n- # Produce too many logs in the debug mode.\n- debug = False,\nshard_count = most_shards,\n- # Takes too long for TSAN. Since this is kind of a stress test that doesn't\n- # involve much concurrency, TSAN's usefulness here is limited anyway.\n- tags = [\"nogotsan\"],\ntest = \"//test/syscalls/linux:socket_stress_test\",\n- vfs2 = False,\n)\nsyscall_test(\n" } ]
Go
Apache License 2.0
google/gvisor
Remove special casing of socket stress test With /proc/sys/net/ipv4/ip_local_port_range implemented, the socket stress test runs in a more normal time and doesn't need to sacrifice coverage to prevent timeouts. PiperOrigin-RevId: 362443366
259,898
11.03.2021 22:35:26
28,800
ab488702a68b139ee3fffb04931cfd749571ff9a
Support ICMP echo sockets on Linux DUT By default net.ipv4.ping_group_range is set to "1 0" and no one (even the root) can create an ICMP socket. Setting it to "0 0" allows root, which we are inside the container, to create ICMP sockets for packetimpact tests.
[ { "change_type": "MODIFY", "old_path": "test/packetimpact/runner/dut.go", "new_path": "test/packetimpact/runner/dut.go", "diff": "@@ -249,6 +249,7 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co\ntestbenchContainer,\ntestbenchAddr,\ndockerNetworks,\n+ nil, /* sysctls */\n\"tail\", \"-f\", \"/dev/null\",\n); err != nil {\nt.Fatalf(\"cannot start testbench container: %s\", err)\n@@ -428,6 +429,10 @@ func (dut *DockerDUT) Prepare(ctx context.Context, _ *testing.T, runOpts dockeru\ndut.c,\nDUTAddr,\n[]*dockerutil.Network{ctrlNet, testNet},\n+ map[string]string{\n+ // This enables creating ICMP sockets on Linux.\n+ \"net.ipv4.ping_group_range\": \"0 0\",\n+ },\ncontainerPosixServerBinary,\n\"--ip=0.0.0.0\",\nfmt.Sprintf(\"--port=%d\", CtrlPort),\n@@ -590,11 +595,14 @@ func createDockerNetwork(ctx context.Context, n *dockerutil.Network) error {\n// StartContainer will create a container instance from runOpts, connect it\n// with the specified docker networks and start executing the specified cmd.\n-func StartContainer(ctx context.Context, runOpts dockerutil.RunOpts, c *dockerutil.Container, containerAddr net.IP, ns []*dockerutil.Network, cmd ...string) error {\n+func StartContainer(ctx context.Context, runOpts dockerutil.RunOpts, c *dockerutil.Container, containerAddr net.IP, ns []*dockerutil.Network, sysctls map[string]string, cmd ...string) error {\nconf, hostconf, netconf := c.ConfigsFrom(runOpts, cmd...)\n_ = netconf\nhostconf.AutoRemove = true\nhostconf.Sysctls = map[string]string{\"net.ipv6.conf.all.disable_ipv6\": \"0\"}\n+ for k, v := range sysctls {\n+ hostconf.Sysctls[k] = v\n+ }\nif err := c.CreateFrom(ctx, runOpts.Image, conf, hostconf, nil); err != nil {\nreturn fmt.Errorf(\"unable to create container %s: %w\", c.Name, err)\n" } ]
Go
Apache License 2.0
google/gvisor
Support ICMP echo sockets on Linux DUT By default net.ipv4.ping_group_range is set to "1 0" and no one (even the root) can create an ICMP socket. Setting it to "0 0" allows root, which we are inside the container, to create ICMP sockets for packetimpact tests. PiperOrigin-RevId: 362454201
259,907
13.03.2021 20:27:55
28,800
ee6b22ca63361a61618b00723e8f557d33e72af4
[perf] Run benchmarks with VFS2. The run-benchmark target would run the benchmark with VFS1.
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -326,6 +326,7 @@ containerd-tests: containerd-test-1.4.3\n## BENCHMARKS_FILTER - filter to be applied to the test suite.\n## BENCHMARKS_OPTIONS - options to be passed to the test.\n## BENCHMARKS_PROFILE - profile options to be passed to the test.\n+## BENCH_RUNTIME_ARGS - args to configure the runtime which runs the benchmarks.\n##\nBENCHMARKS_PROJECT ?= gvisor-benchmarks\nBENCHMARKS_DATASET ?= kokoro\n@@ -339,6 +340,7 @@ BENCHMARKS_FILTER := .\nBENCHMARKS_OPTIONS := -test.benchtime=30s\nBENCHMARKS_ARGS := -test.v -test.bench=$(BENCHMARKS_FILTER) $(BENCHMARKS_OPTIONS)\nBENCHMARKS_PROFILE := -pprof-dir=/tmp/profile -pprof-cpu -pprof-heap -pprof-block -pprof-mutex\n+BENCH_RUNTIME_ARGS ?= --vfs2\ninit-benchmark-table: ## Initializes a BigQuery table with the benchmark schema.\n@$(call run,//tools/parsers:parser,init --project=$(BENCHMARKS_PROJECT) --dataset=$(BENCHMARKS_DATASET) --table=$(BENCHMARKS_TABLE))\n@@ -359,13 +361,13 @@ run_benchmark = \\\nbenchmark-platforms: load-benchmarks $(RUNTIME_BIN) ## Runs benchmarks for runc and all given platforms in BENCHMARK_PLATFORMS.\n@$(foreach PLATFORM,$(BENCHMARKS_PLATFORMS), \\\n- $(call run_benchmark,$(PLATFORM),--platform=$(PLATFORM) --vfs2) && \\\n+ $(call run_benchmark,$(PLATFORM),--platform=$(PLATFORM) $(BENCH_RUNTIME_ARGS)) && \\\n) true\n@$(call run_benchmark,runc)\n.PHONY: benchmark-platforms\nrun-benchmark: load-benchmarks $(RUNTIME_BIN) ## Runs single benchmark and optionally sends data to BigQuery.\n- @$(call run_benchmark,$(RUNTIME),)\n+ @$(call run_benchmark,$(RUNTIME),$(BENCH_RUNTIME_ARGS))\n.PHONY: run-benchmark\n##\n" } ]
Go
Apache License 2.0
google/gvisor
[perf] Run benchmarks with VFS2. The run-benchmark target would run the benchmark with VFS1. PiperOrigin-RevId: 362754188
259,907
15.03.2021 16:38:46
25,200
ec45d969236bb98a83e7da0466bd67e540c5e8b5
[op] Make gofer client handle return partial write length when err is nil. If there was a partial write (when not using the host FD) which did not generate an error, we were incorrectly returning the number of bytes attempted to write instead of the number of bytes actually written.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/handle.go", "new_path": "pkg/sentry/fsimpl/gofer/handle.go", "diff": "@@ -124,8 +124,9 @@ func (h *handle) writeFromBlocksAt(ctx context.Context, srcs safemem.BlockSeq, o\nreturn 0, cperr\n}\nn, err := h.file.writeAt(ctx, buf[:cp], offset)\n+ // err takes precedence over cperr.\nif err != nil {\nreturn uint64(n), err\n}\n- return cp, cperr\n+ return uint64(n), cperr\n}\n" } ]
Go
Apache License 2.0
google/gvisor
[op] Make gofer client handle return partial write length when err is nil. If there was a partial write (when not using the host FD) which did not generate an error, we were incorrectly returning the number of bytes attempted to write instead of the number of bytes actually written. PiperOrigin-RevId: 363058989
259,891
15.03.2021 18:47:41
25,200
b1d57877264c2b94e3024375efc9914881f0bbe8
Make netstack (//pkg/tcpip) buildable for 32 bit Doing so involved breaking dependencies between //pkg/tcpip and the rest of gVisor, which are discouraged anyways. Tested on the Go branch via: gvisor.dev/gvisor/pkg/tcpip/... Addresses
[ { "change_type": "MODIFY", "old_path": "pkg/iovec/BUILD", "new_path": "pkg/iovec/BUILD", "diff": "@@ -4,12 +4,12 @@ package(licenses = [\"notice\"])\ngo_library(\nname = \"iovec\",\n- srcs = [\"iovec.go\"],\n- visibility = [\"//:sandbox\"],\n- deps = [\n- \"//pkg/abi/linux\",\n- \"@org_golang_x_sys//unix:go_default_library\",\n+ srcs = [\n+ \"iovec.go\",\n+ \"iovec_max.go\",\n],\n+ visibility = [\"//:sandbox\"],\n+ deps = [\"@org_golang_x_sys//unix:go_default_library\"],\n)\ngo_test(\n" }, { "change_type": "MODIFY", "old_path": "pkg/iovec/iovec.go", "new_path": "pkg/iovec/iovec.go", "diff": "@@ -20,12 +20,8 @@ package iovec\nimport (\n\"golang.org/x/sys/unix\"\n- \"gvisor.dev/gvisor/pkg/abi/linux\"\n)\n-// MaxIovs is the maximum number of iovecs host platform can accept.\n-var MaxIovs = linux.UIO_MAXIOV\n-\n// Builder is a builder for slice of unix.Iovec.\ntype Builder struct {\niovec []unix.Iovec\n@@ -47,10 +43,10 @@ func (b *Builder) Add(buf []byte) {\nb.addByAppend(buf)\nreturn\n}\n- b.iovec = append(b.iovec, unix.Iovec{\n- Base: &buf[0],\n- Len: uint64(len(buf)),\n- })\n+\n+ b.iovec = append(b.iovec, unix.Iovec{Base: &buf[0]})\n+ b.iovec[len(b.iovec)-1].SetLen(len(buf))\n+\n// Keep the last buf if iovec is at max capacity. We will need to append to it\n// for later bufs.\nif len(b.iovec) == MaxIovs {\n@@ -61,10 +57,8 @@ func (b *Builder) Add(buf []byte) {\nfunc (b *Builder) addByAppend(buf []byte) {\nb.overflow = append(b.overflow, buf...)\n- b.iovec[len(b.iovec)-1] = unix.Iovec{\n- Base: &b.overflow[0],\n- Len: uint64(len(b.overflow)),\n- }\n+ b.iovec[len(b.iovec)-1] = unix.Iovec{Base: &b.overflow[0]}\n+ b.iovec[len(b.iovec)-1].SetLen(len(b.overflow))\n}\n// Build returns the final Iovec slice. The length of returned iovec will not\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/iovec/iovec_max.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package iovec\n+\n+// MaxIovs is the maximum number of iovecs host platform can accept. It\n+// corresponds to Linux's UIO_MAXIOV, which is not in the unix package.\n+const MaxIovs = 1024\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/devices/tundev/tundev.go", "new_path": "pkg/sentry/devices/tundev/tundev.go", "diff": "@@ -87,19 +87,18 @@ func (fd *tunFD) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArg\nif _, err := req.CopyIn(t, data); err != nil {\nreturn 0, err\n}\n- flags := usermem.ByteOrder.Uint16(req.Data[:])\n+\n+ // Validate flags.\n+ flags, err := netstack.LinuxToTUNFlags(usermem.ByteOrder.Uint16(req.Data[:]))\n+ if err != nil {\n+ return 0, err\n+ }\nreturn 0, fd.device.SetIff(stack.Stack, req.Name(), flags)\ncase linux.TUNGETIFF:\nvar req linux.IFReq\n-\ncopy(req.IFName[:], fd.device.Name())\n-\n- // Linux adds IFF_NOFILTER (the same value as IFF_NO_PI unfortunately) when\n- // there is no sk_filter. See __tun_chr_ioctl() in net/drivers/tun.c.\n- flags := fd.device.Flags() | linux.IFF_NOFILTER\n- usermem.ByteOrder.PutUint16(req.Data[:], flags)\n-\n+ usermem.ByteOrder.PutUint16(req.Data[:], netstack.TUNFlagsToLinux(fd.device.Flags()))\n_, err := req.CopyOut(t, data)\nreturn 0, err\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/dev/net_tun.go", "new_path": "pkg/sentry/fs/dev/net_tun.go", "diff": "@@ -108,19 +108,18 @@ func (n *netTunFileOperations) Ioctl(ctx context.Context, file *fs.File, io user\nif _, err := req.CopyIn(t, data); err != nil {\nreturn 0, err\n}\n- flags := usermem.ByteOrder.Uint16(req.Data[:])\n+\n+ // Validate flags.\n+ flags, err := netstack.LinuxToTUNFlags(usermem.ByteOrder.Uint16(req.Data[:]))\n+ if err != nil {\n+ return 0, err\n+ }\nreturn 0, n.device.SetIff(stack.Stack, req.Name(), flags)\ncase linux.TUNGETIFF:\nvar req linux.IFReq\n-\ncopy(req.IFName[:], n.device.Name())\n-\n- // Linux adds IFF_NOFILTER (the same value as IFF_NO_PI unfortunately) when\n- // there is no sk_filter. See __tun_chr_ioctl() in net/drivers/tun.c.\n- flags := n.device.Flags() | linux.IFF_NOFILTER\n- usermem.ByteOrder.PutUint16(req.Data[:], flags)\n-\n+ usermem.ByteOrder.PutUint16(req.Data[:], netstack.TUNFlagsToLinux(n.device.Flags()))\n_, err := req.CopyOut(t, data)\nreturn 0, err\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netstack/BUILD", "new_path": "pkg/sentry/socket/netstack/BUILD", "diff": "@@ -12,6 +12,7 @@ go_library(\n\"provider_vfs2.go\",\n\"save_restore.go\",\n\"stack.go\",\n+ \"tun.go\",\n],\nvisibility = [\n\"//pkg/sentry:internal\",\n@@ -42,6 +43,7 @@ go_library(\n\"//pkg/syserror\",\n\"//pkg/tcpip\",\n\"//pkg/tcpip/header\",\n+ \"//pkg/tcpip/link/tun\",\n\"//pkg/tcpip/network/ipv4\",\n\"//pkg/tcpip/network/ipv6\",\n\"//pkg/tcpip/stack\",\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/sentry/socket/netstack/tun.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package netstack\n+\n+import (\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/syserror\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/link/tun\"\n+)\n+\n+// TUNFlagsToLinux converts a tun.Flags to Linux TUN flags.\n+func TUNFlagsToLinux(flags tun.Flags) uint16 {\n+ ret := uint16(linux.IFF_NOFILTER)\n+ if flags.TAP {\n+ ret |= linux.IFF_TAP\n+ }\n+ if flags.TUN {\n+ ret |= linux.IFF_TUN\n+ }\n+ if flags.NoPacketInfo {\n+ ret |= linux.IFF_NO_PI\n+ }\n+ return ret\n+}\n+\n+// LinuxToTUNFlags converts Linux TUN flags to a tun.Flags.\n+func LinuxToTUNFlags(flags uint16) (tun.Flags, error) {\n+ // Linux adds IFF_NOFILTER (the same value as IFF_NO_PI unfortunately)\n+ // when there is no sk_filter. See __tun_chr_ioctl() in\n+ // net/drivers/tun.c.\n+ if flags&^uint16(linux.IFF_TUN|linux.IFF_TAP|linux.IFF_NO_PI) != 0 {\n+ return tun.Flags{}, syserror.EINVAL\n+ }\n+ return tun.Flags{\n+ TUN: flags&linux.IFF_TUN != 0,\n+ TAP: flags&linux.IFF_TAP != 0,\n+ NoPacketInfo: flags&linux.IFF_NO_PI != 0,\n+ }, nil\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/fdbased/endpoint.go", "new_path": "pkg/tcpip/link/fdbased/endpoint.go", "diff": "@@ -492,7 +492,7 @@ func (e *endpoint) sendBatch(batchFD int, batch []*stack.PacketBuffer) (int, tcp\nvar mmsgHdr rawfile.MMsgHdr\nmmsgHdr.Msg.Iov = &iovecs[0]\n- mmsgHdr.Msg.Iovlen = uint64(len(iovecs))\n+ mmsgHdr.Msg.SetIovlen((len(iovecs)))\nmmsgHdrs = append(mmsgHdrs, mmsgHdr)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/fdbased/packet_dispatchers.go", "new_path": "pkg/tcpip/link/fdbased/packet_dispatchers.go", "diff": "@@ -68,10 +68,8 @@ func (b *iovecBuffer) nextIovecs() []unix.Iovec {\n// The kernel adds virtioNetHdr before each packet, but\n// we don't use it, so so we allocate a buffer for it,\n// add it in iovecs but don't add it in a view.\n- b.iovecs[0] = unix.Iovec{\n- Base: &vnetHdr[0],\n- Len: uint64(virtioNetHdrSize),\n- }\n+ b.iovecs[0] = unix.Iovec{Base: &vnetHdr[0]}\n+ b.iovecs[0].SetLen(virtioNetHdrSize)\nvnetHdrOff++\n}\nfor i := range b.views {\n@@ -80,10 +78,8 @@ func (b *iovecBuffer) nextIovecs() []unix.Iovec {\n}\nv := buffer.NewView(b.sizes[i])\nb.views[i] = v\n- b.iovecs[i+vnetHdrOff] = unix.Iovec{\n- Base: &v[0],\n- Len: uint64(len(v)),\n- }\n+ b.iovecs[i+vnetHdrOff] = unix.Iovec{Base: &v[0]}\n+ b.iovecs[i+vnetHdrOff].SetLen(len(v))\n}\nreturn b.iovecs\n}\n@@ -235,7 +231,7 @@ func (d *recvMMsgDispatcher) dispatch() (bool, tcpip.Error) {\niovLen := len(iovecs)\nd.msgHdrs[k].Len = 0\nd.msgHdrs[k].Msg.Iov = &iovecs[0]\n- d.msgHdrs[k].Msg.Iovlen = uint64(iovLen)\n+ d.msgHdrs[k].Msg.SetIovlen(iovLen)\n}\nnMsgs, err := rawfile.BlockingRecvMMsg(d.fd, d.msgHdrs)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/tun/device.go", "new_path": "pkg/tcpip/link/tun/device.go", "diff": "@@ -17,7 +17,6 @@ package tun\nimport (\n\"fmt\"\n- \"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/sync\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n@@ -49,7 +48,14 @@ type Device struct {\nmu sync.RWMutex `state:\"nosave\"`\nendpoint *tunEndpoint\nnotifyHandle *channel.NotificationHandle\n- flags uint16\n+ flags Flags\n+}\n+\n+// Flags set properties of a Device\n+type Flags struct {\n+ TUN bool\n+ TAP bool\n+ NoPacketInfo bool\n}\n// beforeSave is invoked by stateify.\n@@ -77,7 +83,7 @@ func (d *Device) Release(ctx context.Context) {\n}\n// SetIff services TUNSETIFF ioctl(2) request.\n-func (d *Device) SetIff(s *stack.Stack, name string, flags uint16) error {\n+func (d *Device) SetIff(s *stack.Stack, name string, flags Flags) error {\nd.mu.Lock()\ndefer d.mu.Unlock()\n@@ -85,21 +91,18 @@ func (d *Device) SetIff(s *stack.Stack, name string, flags uint16) error {\nreturn syserror.EINVAL\n}\n- // Input validations.\n- isTun := flags&linux.IFF_TUN != 0\n- isTap := flags&linux.IFF_TAP != 0\n- supportedFlags := uint16(linux.IFF_TUN | linux.IFF_TAP | linux.IFF_NO_PI)\n- if isTap && isTun || !isTap && !isTun || flags&^supportedFlags != 0 {\n+ // Input validation.\n+ if flags.TAP && flags.TUN || !flags.TAP && !flags.TUN {\nreturn syserror.EINVAL\n}\nprefix := \"tun\"\n- if isTap {\n+ if flags.TAP {\nprefix = \"tap\"\n}\nlinkCaps := stack.CapabilityNone\n- if isTap {\n+ if flags.TAP {\nlinkCaps |= stack.CapabilityResolutionRequired\n}\n@@ -177,7 +180,7 @@ func (d *Device) Write(data []byte) (int64, error) {\n// Packet information.\nvar pktInfoHdr PacketInfoHeader\n- if !d.hasFlags(linux.IFF_NO_PI) {\n+ if !d.flags.NoPacketInfo {\nif len(data) < PacketInfoHeaderSize {\n// Ignore bad packet.\nreturn dataLen, nil\n@@ -188,7 +191,7 @@ func (d *Device) Write(data []byte) (int64, error) {\n// Ethernet header (TAP only).\nvar ethHdr header.Ethernet\n- if d.hasFlags(linux.IFF_TAP) {\n+ if d.flags.TAP {\nif len(data) < header.EthernetMinimumSize {\n// Ignore bad packet.\nreturn dataLen, nil\n@@ -253,7 +256,7 @@ func (d *Device) encodePkt(info *channel.PacketInfo) (buffer.View, bool) {\nvar vv buffer.VectorisedView\n// Packet information.\n- if !d.hasFlags(linux.IFF_NO_PI) {\n+ if !d.flags.NoPacketInfo {\nhdr := make(PacketInfoHeader, PacketInfoHeaderSize)\nhdr.Encode(&PacketInfoFields{\nProtocol: info.Proto,\n@@ -269,7 +272,7 @@ func (d *Device) encodePkt(info *channel.PacketInfo) (buffer.View, bool) {\n}\n// Ethernet header (TAP only).\n- if d.hasFlags(linux.IFF_TAP) {\n+ if d.flags.TAP {\n// Add ethernet header if not provided.\nif info.Pkt.LinkHeader().View().IsEmpty() {\nd.endpoint.AddHeader(info.Route.LocalLinkAddress, info.Route.RemoteLinkAddress, info.Proto, info.Pkt)\n@@ -298,16 +301,12 @@ func (d *Device) Name() string {\n}\n// Flags returns the flags set for d. Zero value if unset.\n-func (d *Device) Flags() uint16 {\n+func (d *Device) Flags() Flags {\nd.mu.RLock()\ndefer d.mu.RUnlock()\nreturn d.flags\n}\n-func (d *Device) hasFlags(flags uint16) bool {\n- return d.flags&flags == flags\n-}\n-\n// Readiness implements watier.Waitable.Readiness.\nfunc (d *Device) Readiness(mask waiter.EventMask) waiter.EventMask {\nif mask&waiter.EventIn != 0 {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/snd.go", "new_path": "pkg/tcpip/transport/tcp/snd.go", "diff": "@@ -323,7 +323,9 @@ func newSender(ep *endpoint, iss, irs seqnum.Value, sndWnd seqnum.Size, mss uint\n// their initial values.\nfunc (s *sender) initCongestionControl(congestionControlName tcpip.CongestionControlOption) congestionControl {\ns.sndCwnd = InitialCwnd\n- s.sndSsthresh = math.MaxInt64\n+ // Set sndSsthresh to the maximum int value, which depends on the\n+ // platform.\n+ s.sndSsthresh = int(^uint(0) >> 1)\nswitch congestionControlName {\ncase ccCubic:\n" } ]
Go
Apache License 2.0
google/gvisor
Make netstack (//pkg/tcpip) buildable for 32 bit Doing so involved breaking dependencies between //pkg/tcpip and the rest of gVisor, which are discouraged anyways. Tested on the Go branch via: gvisor.dev/gvisor/pkg/tcpip/... Addresses #1446. PiperOrigin-RevId: 363081778
259,992
15.03.2021 19:04:16
25,200
34d0d720679778611fce51ed7f62fbdafa413d60
Deflake proc_test_native Terminating tasks from other tests can mess up with the task list of the current test. Tests were changed to look for added/removed tasks, ignoring other tasks that may exist while the test is running.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -1705,6 +1705,7 @@ cc_binary(\n\"@com_google_absl//absl/time\",\ngtest,\n\"//test/util:memory_util\",\n+ \"//test/util:multiprocess_util\",\n\"//test/util:posix_error\",\n\"//test/util:proc_util\",\n\"//test/util:temp_path\",\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/proc.cc", "new_path": "test/syscalls/linux/proc.cc", "diff": "#include \"test/util/file_descriptor.h\"\n#include \"test/util/fs_util.h\"\n#include \"test/util/memory_util.h\"\n+#include \"test/util/multiprocess_util.h\"\n#include \"test/util/posix_error.h\"\n#include \"test/util/proc_util.h\"\n#include \"test/util/temp_path.h\"\n@@ -2073,54 +2074,37 @@ TEST(ProcPidFile, SubprocessExited) {\nSyscallFailsWithErrno(ESRCH));\n}\n-PosixError DirContainsImpl(absl::string_view path,\n- const std::vector<std::string>& targets,\n- bool strict) {\n+PosixError DirContains(absl::string_view path,\n+ const std::vector<std::string>& expect,\n+ const std::vector<std::string>& exclude) {\nASSIGN_OR_RETURN_ERRNO(auto listing, ListDir(path, false));\n- bool success = true;\n- for (auto& expected_entry : targets) {\n+ for (auto& expected_entry : expect) {\nauto cursor = std::find(listing.begin(), listing.end(), expected_entry);\nif (cursor == listing.end()) {\n- success = false;\n- }\n- }\n-\n- if (!success) {\nreturn PosixError(\nENOENT,\nabsl::StrCat(\"Failed to find one or more paths in '\", path, \"'\"));\n}\n-\n- if (strict) {\n- if (targets.size() != listing.size()) {\n- return PosixError(\n- EINVAL,\n- absl::StrCat(\"Expected to find \", targets.size(), \" elements in '\",\n- path, \"', but found \", listing.size()));\n- }\n}\n-\n- return NoError();\n+ for (auto& excluded_entry : exclude) {\n+ auto cursor = std::find(listing.begin(), listing.end(), excluded_entry);\n+ if (cursor != listing.end()) {\n+ return PosixError(ENOENT, absl::StrCat(\"File '\", excluded_entry,\n+ \"' found in path '\", path, \"'\"));\n}\n-\n-PosixError DirContains(absl::string_view path,\n- const std::vector<std::string>& targets) {\n- return DirContainsImpl(path, targets, false);\n}\n-\n-PosixError DirContainsExactly(absl::string_view path,\n- const std::vector<std::string>& targets) {\n- return DirContainsImpl(path, targets, true);\n+ return NoError();\n}\n-PosixError EventuallyDirContainsExactly(\n- absl::string_view path, const std::vector<std::string>& targets) {\n+PosixError EventuallyDirContains(absl::string_view path,\n+ const std::vector<std::string>& expect,\n+ const std::vector<std::string>& exclude) {\nconstexpr int kRetryCount = 100;\nconst absl::Duration kRetryDelay = absl::Milliseconds(100);\nfor (int i = 0; i < kRetryCount; ++i) {\n- auto res = DirContainsExactly(path, targets);\n+ auto res = DirContains(path, expect, exclude);\nif (res.ok()) {\nreturn res;\n} else if (i < kRetryCount - 1) {\n@@ -2132,22 +2116,14 @@ PosixError EventuallyDirContainsExactly(\n\"Timed out while waiting for directory to contain files \");\n}\n-TEST(ProcTask, Basic) {\n- EXPECT_NO_ERRNO(\n- DirContains(\"/proc/self/task\", {\".\", \"..\", absl::StrCat(getpid())}));\n-}\n-\n-std::vector<std::string> TaskFiles(\n- const std::vector<std::string>& initial_contents,\n- const std::vector<pid_t>& pids) {\n- return VecCat<std::string>(\n- initial_contents,\n- ApplyVec<std::string>([](const pid_t p) { return absl::StrCat(p); },\n- pids));\n+std::vector<std::string> TaskFiles(const std::vector<pid_t>& pids) {\n+ return ApplyVec<std::string>([](const pid_t p) { return absl::StrCat(p); },\n+ pids);\n}\n-std::vector<std::string> TaskFiles(const std::vector<pid_t>& pids) {\n- return TaskFiles({\".\", \"..\", absl::StrCat(getpid())}, pids);\n+TEST(ProcTask, Basic) {\n+ EXPECT_NO_ERRNO(\n+ DirContains(\"/proc/self/task\", {\".\", \"..\", absl::StrCat(getpid())}, {}));\n}\n// Helper class for creating a new task in the current thread group.\n@@ -2189,20 +2165,15 @@ class BlockingChild {\n};\nTEST(ProcTask, NewThreadAppears) {\n- auto initial = ASSERT_NO_ERRNO_AND_VALUE(ListDir(\"/proc/self/task\", false));\nBlockingChild child1;\n- // Use Eventually* in case a proc from ealier test is still tearing down.\n- EXPECT_NO_ERRNO(EventuallyDirContainsExactly(\n- \"/proc/self/task\", TaskFiles(initial, {child1.Tid()})));\n+ EXPECT_NO_ERRNO(\n+ DirContains(\"/proc/self/task\", TaskFiles({child1.Tid()}), {}));\n}\nTEST(ProcTask, KilledThreadsDisappear) {\n- auto initial = ASSERT_NO_ERRNO_AND_VALUE(ListDir(\"/proc/self/task/\", false));\n-\nBlockingChild child1;\n- // Use Eventually* in case a proc from ealier test is still tearing down.\n- EXPECT_NO_ERRNO(EventuallyDirContainsExactly(\n- \"/proc/self/task\", TaskFiles(initial, {child1.Tid()})));\n+ EXPECT_NO_ERRNO(\n+ DirContains(\"/proc/self/task\", TaskFiles({child1.Tid()}), {}));\n// Stat child1's task file. Regression test for b/32097707.\nstruct stat statbuf;\n@@ -2211,26 +2182,29 @@ TEST(ProcTask, KilledThreadsDisappear) {\nEXPECT_THAT(stat(child1_task_file.c_str(), &statbuf), SyscallSucceeds());\nBlockingChild child2;\n- EXPECT_NO_ERRNO(DirContainsExactly(\n- \"/proc/self/task\", TaskFiles(initial, {child1.Tid(), child2.Tid()})));\n+ EXPECT_NO_ERRNO(DirContains(\"/proc/self/task\",\n+ TaskFiles({child1.Tid(), child2.Tid()}), {}));\nBlockingChild child3;\nBlockingChild child4;\nBlockingChild child5;\n- EXPECT_NO_ERRNO(DirContainsExactly(\n- \"/proc/self/task\",\n- TaskFiles(initial, {child1.Tid(), child2.Tid(), child3.Tid(),\n- child4.Tid(), child5.Tid()})));\n+ EXPECT_NO_ERRNO(\n+ DirContains(\"/proc/self/task\",\n+ TaskFiles({child1.Tid(), child2.Tid(), child3.Tid(),\n+ child4.Tid(), child5.Tid()}),\n+ {}));\nchild2.Join();\n- EXPECT_NO_ERRNO(EventuallyDirContainsExactly(\n- \"/proc/self/task\", TaskFiles(initial, {child1.Tid(), child3.Tid(),\n- child4.Tid(), child5.Tid()})));\n+ EXPECT_NO_ERRNO(EventuallyDirContains(\n+ \"/proc/self/task\",\n+ TaskFiles({child1.Tid(), child3.Tid(), child4.Tid(), child5.Tid()}),\n+ TaskFiles({child2.Tid()})));\nchild1.Join();\nchild4.Join();\n- EXPECT_NO_ERRNO(EventuallyDirContainsExactly(\n- \"/proc/self/task\", TaskFiles(initial, {child3.Tid(), child5.Tid()})));\n+ EXPECT_NO_ERRNO(EventuallyDirContains(\n+ \"/proc/self/task\", TaskFiles({child3.Tid(), child5.Tid()}),\n+ TaskFiles({child2.Tid(), child1.Tid(), child4.Tid()})));\n// Stat child1's task file again. This time it should fail. See b/32097707.\nEXPECT_THAT(stat(child1_task_file.c_str(), &statbuf),\n@@ -2238,18 +2212,23 @@ TEST(ProcTask, KilledThreadsDisappear) {\nchild3.Join();\nchild5.Join();\n- EXPECT_NO_ERRNO(EventuallyDirContainsExactly(\"/proc/self/task\", initial));\n+ EXPECT_NO_ERRNO(\n+ EventuallyDirContains(\"/proc/self/task\", {},\n+ TaskFiles({child2.Tid(), child1.Tid(), child4.Tid(),\n+ child3.Tid(), child5.Tid()})));\n}\nTEST(ProcTask, ChildTaskDir) {\nBlockingChild child1;\n- EXPECT_NO_ERRNO(DirContains(\"/proc/self/task\", TaskFiles({child1.Tid()})));\n+ EXPECT_NO_ERRNO(\n+ DirContains(\"/proc/self/task\", TaskFiles({child1.Tid()}), {}));\nEXPECT_NO_ERRNO(DirContains(absl::StrCat(\"/proc/\", child1.Tid(), \"/task\"),\n- TaskFiles({child1.Tid()})));\n+ TaskFiles({child1.Tid()}), {}));\n}\nPosixError VerifyPidDir(std::string path) {\n- return DirContains(path, {\"exe\", \"fd\", \"io\", \"maps\", \"ns\", \"stat\", \"status\"});\n+ return DirContains(path, {\"exe\", \"fd\", \"io\", \"maps\", \"ns\", \"stat\", \"status\"},\n+ {});\n}\nTEST(ProcTask, VerifyTaskDir) {\n@@ -2266,9 +2245,8 @@ TEST(ProcTask, VerifyTaskDir) {\n// /proc/1234/task/1234/task <- should not exist\n// /proc/1234/task/1235/task <- should not exist (where 1235 is in the same\n// thread group as 1234).\n- EXPECT_FALSE(\n- DirContains(absl::StrCat(\"/proc/self/task/\", getpid()), {\"task\"}).ok())\n- << \"Found 'task' directory in an inner directory.\";\n+ EXPECT_NO_ERRNO(\n+ DirContains(absl::StrCat(\"/proc/self/task/\", getpid()), {}, {\"task\"}));\n}\nTEST(ProcTask, TaskDirCannotBeDeleted) {\n@@ -2299,9 +2277,11 @@ TEST(ProcTask, TaskDirCanSeekToEnd) {\n}\nTEST(ProcTask, VerifyTaskDirNlinks) {\n+ const auto fn = [] {\n// A task directory will have 3 links if the taskgroup has a single\n// thread. For example, the following shows where the links to\n- // '/proc/12345/task comes' from for a single threaded process with pid 12345:\n+ // '/proc/12345/task' comes from for a single threaded process with pid\n+ // 12345:\n//\n// /proc/12345/task <-- 1 link for the directory itself\n// . <-- link from \".\"\n@@ -2311,22 +2291,28 @@ TEST(ProcTask, VerifyTaskDirNlinks) {\n// .. <-- link from \"..\" to parent.\n// <other contents of a task dir>\n//\n- // We can't assert an absolute number of links since we don't control how many\n- // threads the test framework spawns. Instead, we'll ensure creating a new\n- // thread increases the number of links as expected.\n+ // We can't assert an absolute number of links since we don't control how\n+ // many threads the test framework spawns. Instead, we'll ensure creating a\n+ // new thread increases the number of links as expected.\n- // Once we reach the test body, we can count on the thread count being stable\n- // unless we spawn a new one.\n- uint64_t initial_links = ASSERT_NO_ERRNO_AND_VALUE(Links(\"/proc/self/task\"));\n- ASSERT_GE(initial_links, 3);\n+ // Once we reach the test body, we can count on the thread count being\n+ // stable unless we spawn a new one.\n+ const uint64_t initial_links =\n+ TEST_CHECK_NO_ERRNO_AND_VALUE(Links(\"/proc/self/task\"));\n+ TEST_CHECK(initial_links >= 3);\n// For each new subtask, we should gain a new link.\nBlockingChild child1;\n- EXPECT_THAT(Links(\"/proc/self/task\"),\n- IsPosixErrorOkAndHolds(initial_links + 1));\n+ uint64_t links = TEST_CHECK_NO_ERRNO_AND_VALUE(Links(\"/proc/self/task\"));\n+ TEST_CHECK(links == initial_links + 1);\n+\nBlockingChild child2;\n- EXPECT_THAT(Links(\"/proc/self/task\"),\n- IsPosixErrorOkAndHolds(initial_links + 2));\n+ links = TEST_CHECK_NO_ERRNO_AND_VALUE(Links(\"/proc/self/task\"));\n+ TEST_CHECK(links == initial_links + 2);\n+ };\n+ // Run as a forked process to prevent terminating tasks from other tests to\n+ // show up here and race with the count.\n+ EXPECT_THAT(InForkedProcess(fn), IsPosixErrorOkAndHolds(0));\n}\nTEST(ProcTask, CommContainsThreadNameAndTrailingNewline) {\n@@ -2340,7 +2326,7 @@ TEST(ProcTask, CommContainsThreadNameAndTrailingNewline) {\n}\nTEST(ProcTaskNs, NsDirExistsAndHasCorrectMetadata) {\n- EXPECT_NO_ERRNO(DirContains(\"/proc/self/ns\", {\"net\", \"pid\", \"user\"}));\n+ EXPECT_NO_ERRNO(DirContains(\"/proc/self/ns\", {\"net\", \"pid\", \"user\"}, {}));\n// Let's just test the 'pid' entry, all of them are very similar.\nstruct stat st;\n" } ]
Go
Apache License 2.0
google/gvisor
Deflake proc_test_native Terminating tasks from other tests can mess up with the task list of the current test. Tests were changed to look for added/removed tasks, ignoring other tasks that may exist while the test is running. PiperOrigin-RevId: 363084261
259,868
15.03.2021 20:09:37
25,200
f4b74218202c8f318c603c57aa35895554194d98
Move `MaxIovs` back to a variable in `iovec.go`.
[ { "change_type": "MODIFY", "old_path": "pkg/iovec/BUILD", "new_path": "pkg/iovec/BUILD", "diff": "@@ -4,10 +4,7 @@ package(licenses = [\"notice\"])\ngo_library(\nname = \"iovec\",\n- srcs = [\n- \"iovec.go\",\n- \"iovec_max.go\",\n- ],\n+ srcs = [\"iovec.go\"],\nvisibility = [\"//:sandbox\"],\ndeps = [\"@org_golang_x_sys//unix:go_default_library\"],\n)\n" }, { "change_type": "MODIFY", "old_path": "pkg/iovec/iovec.go", "new_path": "pkg/iovec/iovec.go", "diff": "@@ -22,6 +22,9 @@ import (\n\"golang.org/x/sys/unix\"\n)\n+// MaxIovs is the maximum number of iovecs host platform can accept.\n+var MaxIovs = 1024\n+\n// Builder is a builder for slice of unix.Iovec.\ntype Builder struct {\niovec []unix.Iovec\n" }, { "change_type": "DELETE", "old_path": "pkg/iovec/iovec_max.go", "new_path": null, "diff": "-// Copyright 2021 The gVisor Authors.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-package iovec\n-\n-// MaxIovs is the maximum number of iovecs host platform can accept. It\n-// corresponds to Linux's UIO_MAXIOV, which is not in the unix package.\n-const MaxIovs = 1024\n" } ]
Go
Apache License 2.0
google/gvisor
Move `MaxIovs` back to a variable in `iovec.go`. PiperOrigin-RevId: 363091954
259,868
15.03.2021 20:12:40
25,200
f7e841c2cede357c4cbd6117605e3f3d54f1961c
Turn sys_thread constants into variables.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_thread.go", "new_path": "pkg/sentry/syscalls/linux/sys_thread.go", "diff": "@@ -31,6 +31,11 @@ import (\n)\nconst (\n+ // exitSignalMask is the signal mask to be sent at exit. Same as CSIGNAL in linux.\n+ exitSignalMask = 0xff\n+)\n+\n+var (\n// ExecMaxTotalSize is the maximum length of all argv and envv entries.\n//\n// N.B. The behavior here is different than Linux. Linux provides a limit on\n@@ -42,9 +47,6 @@ const (\n// ExecMaxElemSize is the maximum length of a single argv or envv entry.\nExecMaxElemSize = 32 * usermem.PageSize\n-\n- // exitSignalMask is the signal mask to be sent at exit. Same as CSIGNAL in linux.\n- exitSignalMask = 0xff\n)\n// Getppid implements linux syscall getppid(2).\n" } ]
Go
Apache License 2.0
google/gvisor
Turn sys_thread constants into variables. PiperOrigin-RevId: 363092268
260,004
16.03.2021 10:28:04
25,200
ebd7c1b889e5d212f4a694d3addbada241936e8e
Do not call into Stack from LinkAddressRequest Calling into the stack from LinkAddressRequest is not needed as we already have a reference to the network endpoint (IPv6) or network interface (IPv4/ARP).
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/arp/arp.go", "new_path": "pkg/tcpip/network/arp/arp.go", "diff": "@@ -305,8 +305,6 @@ func (*endpoint) LinkAddressProtocol() tcpip.NetworkProtocolNumber {\n// LinkAddressRequest implements stack.LinkAddressResolver.LinkAddressRequest.\nfunc (e *endpoint) LinkAddressRequest(targetAddr, localAddr tcpip.Address, remoteLinkAddr tcpip.LinkAddress) tcpip.Error {\n- nicID := e.nic.ID()\n-\nstats := e.stats.arp\nif len(remoteLinkAddr) == 0 {\n@@ -314,9 +312,9 @@ func (e *endpoint) LinkAddressRequest(targetAddr, localAddr tcpip.Address, remot\n}\nif len(localAddr) == 0 {\n- addr, ok := e.protocol.stack.GetMainNICAddress(nicID, header.IPv4ProtocolNumber)\n- if !ok {\n- return &tcpip.ErrUnknownNICID{}\n+ addr, err := e.nic.PrimaryAddress(header.IPv4ProtocolNumber)\n+ if err != nil {\n+ return err\n}\nif len(addr.Address) == 0 {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ip_test.go", "new_path": "pkg/tcpip/network/ip_test.go", "diff": "@@ -335,6 +335,10 @@ func (*testInterface) HandleNeighborConfirmation(tcpip.NetworkProtocolNumber, tc\nreturn nil\n}\n+func (*testInterface) PrimaryAddress(tcpip.NetworkProtocolNumber) (tcpip.AddressWithPrefix, tcpip.Error) {\n+ return tcpip.AddressWithPrefix{}, nil\n+}\n+\nfunc (*testInterface) CheckLocalAddress(tcpip.NetworkProtocolNumber, tcpip.Address) bool {\nreturn false\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/icmp.go", "new_path": "pkg/tcpip/network/ipv6/icmp.go", "diff": "@@ -899,13 +899,16 @@ func (e *endpoint) LinkAddressRequest(targetAddr, localAddr tcpip.Address, remot\n}\nif len(localAddr) == 0 {\n+ // Find an address that we can use as our source address.\naddressEndpoint := e.AcquireOutgoingPrimaryAddress(remoteAddr, false /* allowExpired */)\nif addressEndpoint == nil {\nreturn &tcpip.ErrNetworkUnreachable{}\n}\nlocalAddr = addressEndpoint.AddressWithPrefix().Address\n- } else if e.protocol.stack.CheckLocalAddress(e.nic.ID(), ProtocolNumber, localAddr) == 0 {\n+ addressEndpoint.DecRef()\n+ } else if !e.checkLocalAddress(localAddr) {\n+ // The provided local address is not assigned to us.\nreturn &tcpip.ErrBadLocalAddress{}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/icmp_test.go", "new_path": "pkg/tcpip/network/ipv6/icmp_test.go", "diff": "@@ -155,6 +155,10 @@ func (t *testInterface) HandleNeighborConfirmation(tcpip.NetworkProtocolNumber,\nreturn nil\n}\n+func (*testInterface) PrimaryAddress(tcpip.NetworkProtocolNumber) (tcpip.AddressWithPrefix, tcpip.Error) {\n+ return tcpip.AddressWithPrefix{}, nil\n+}\n+\nfunc (*testInterface) CheckLocalAddress(tcpip.NetworkProtocolNumber, tcpip.Address) bool {\nreturn false\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/nic.go", "new_path": "pkg/tcpip/stack/nic.go", "diff": "@@ -568,23 +568,19 @@ func (n *nic) primaryAddresses() []tcpip.ProtocolAddress {\nreturn addrs\n}\n-// primaryAddress returns the primary address associated with this NIC.\n-//\n-// primaryAddress will return the first non-deprecated address if such an\n-// address exists. If no non-deprecated address exists, the first deprecated\n-// address will be returned.\n-func (n *nic) primaryAddress(proto tcpip.NetworkProtocolNumber) tcpip.AddressWithPrefix {\n+// PrimaryAddress implements NetworkInterface.\n+func (n *nic) PrimaryAddress(proto tcpip.NetworkProtocolNumber) (tcpip.AddressWithPrefix, tcpip.Error) {\nep, ok := n.networkEndpoints[proto]\nif !ok {\n- return tcpip.AddressWithPrefix{}\n+ return tcpip.AddressWithPrefix{}, &tcpip.ErrUnknownProtocol{}\n}\naddressableEndpoint, ok := ep.(AddressableEndpoint)\nif !ok {\n- return tcpip.AddressWithPrefix{}\n+ return tcpip.AddressWithPrefix{}, &tcpip.ErrNotSupported{}\n}\n- return addressableEndpoint.MainAddress()\n+ return addressableEndpoint.MainAddress(), nil\n}\n// removeAddress removes an address from n.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/registration.go", "new_path": "pkg/tcpip/stack/registration.go", "diff": "@@ -525,6 +525,14 @@ type NetworkInterface interface {\n// assigned to it.\nSpoofing() bool\n+ // PrimaryAddress returns the primary address associated with the interface.\n+ //\n+ // PrimaryAddress will return the first non-deprecated address if such an\n+ // address exists. If no non-deprecated addresses exist, the first deprecated\n+ // address will be returned. If no deprecated addresses exist, the zero value\n+ // will be returned.\n+ PrimaryAddress(tcpip.NetworkProtocolNumber) (tcpip.AddressWithPrefix, tcpip.Error)\n+\n// CheckLocalAddress returns true if the address exists on the interface.\nCheckLocalAddress(tcpip.NetworkProtocolNumber, tcpip.Address) bool\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/stack.go", "new_path": "pkg/tcpip/stack/stack.go", "diff": "@@ -1224,7 +1224,8 @@ func (s *Stack) GetMainNICAddress(id tcpip.NICID, protocol tcpip.NetworkProtocol\nreturn tcpip.AddressWithPrefix{}, false\n}\n- return nic.primaryAddress(protocol), true\n+ addr, err := nic.PrimaryAddress(protocol)\n+ return addr, err == nil\n}\nfunc (s *Stack) getAddressEP(nic *nic, localAddr, remoteAddr tcpip.Address, netProto tcpip.NetworkProtocolNumber) AssignableAddressEndpoint {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/stack_test.go", "new_path": "pkg/tcpip/stack/stack_test.go", "diff": "@@ -1926,6 +1926,39 @@ func TestGetMainNICAddressAddPrimaryNonPrimary(t *testing.T) {\n}\n}\n+func TestGetMainNICAddressErrors(t *testing.T) {\n+ const nicID = 1\n+\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, arp.NewProtocol},\n+ })\n+ if err := s.CreateNIC(nicID, loopback.New()); err != nil {\n+ t.Fatalf(\"CreateNIC(%d, _): %s\", nicID, err)\n+ }\n+\n+ // Sanity check with a successful call.\n+ if addr, ok := s.GetMainNICAddress(nicID, ipv4.ProtocolNumber); !ok {\n+ t.Errorf(\"got s.GetMainNICAddress(%d, %d) = (%s, false), want = (_, true)\", nicID, ipv4.ProtocolNumber, addr)\n+ } else if want := (tcpip.AddressWithPrefix{}); addr != want {\n+ t.Errorf(\"got s.GetMainNICAddress(%d, %d) = (%s, _), want = (%s, _)\", nicID, ipv4.ProtocolNumber, addr, want)\n+ }\n+\n+ const unknownNICID = nicID + 1\n+ if addr, ok := s.GetMainNICAddress(unknownNICID, ipv4.ProtocolNumber); ok {\n+ t.Errorf(\"got s.GetMainNICAddress(%d, %d) = (%s, true), want = (_, false)\", unknownNICID, ipv4.ProtocolNumber, addr)\n+ }\n+\n+ // ARP is not an addressable network endpoint.\n+ if addr, ok := s.GetMainNICAddress(nicID, arp.ProtocolNumber); ok {\n+ t.Errorf(\"got s.GetMainNICAddress(%d, %d) = (%s, true), want = (_, false)\", nicID, arp.ProtocolNumber, addr)\n+ }\n+\n+ const unknownProtocolNumber = 1234\n+ if addr, ok := s.GetMainNICAddress(nicID, unknownProtocolNumber); ok {\n+ t.Errorf(\"got s.GetMainNICAddress(%d, %d) = (%s, true), want = (_, false)\", nicID, unknownProtocolNumber, addr)\n+ }\n+}\n+\nfunc TestGetMainNICAddressAddRemove(t *testing.T) {\ns := stack.New(stack.Options{\nNetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},\n@@ -2507,11 +2540,15 @@ func TestNICAutoGenLinkLocalAddr(t *testing.T) {\n}\n}\n- // Check that we get no address after removal.\n- if err := checkGetMainNICAddress(s, 1, fakeNetNumber, tcpip.AddressWithPrefix{}); err != nil {\n+ if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, expectedMainAddr); err != nil {\nt.Fatal(err)\n}\n- if err := checkGetMainNICAddress(s, 1, header.IPv6ProtocolNumber, expectedMainAddr); err != nil {\n+\n+ // Disabling the NIC should remove the auto-generated address.\n+ if err := s.DisableNIC(nicID); err != nil {\n+ t.Fatalf(\"s.DisableNIC(%d): %s\", nicID, err)\n+ }\n+ if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{}); err != nil {\nt.Fatal(err)\n}\n})\n" } ]
Go
Apache License 2.0
google/gvisor
Do not call into Stack from LinkAddressRequest Calling into the stack from LinkAddressRequest is not needed as we already have a reference to the network endpoint (IPv6) or network interface (IPv4/ARP). PiperOrigin-RevId: 363213973
260,004
16.03.2021 12:02:52
25,200
05193de1ccaf487a175dead4121c62b99e02d0f5
Unexport methods on NDPOption They are not used outside of the header package.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ndp_options.go", "new_path": "pkg/tcpip/header/ndp_options.go", "diff": "@@ -26,33 +26,33 @@ import (\n\"gvisor.dev/gvisor/pkg/tcpip\"\n)\n-// NDPOptionIdentifier is an NDP option type identifier.\n-type NDPOptionIdentifier uint8\n+// ndpOptionIdentifier is an NDP option type identifier.\n+type ndpOptionIdentifier uint8\nconst (\n- // NDPSourceLinkLayerAddressOptionType is the type of the Source Link Layer\n+ // ndpSourceLinkLayerAddressOptionType is the type of the Source Link Layer\n// Address option, as per RFC 4861 section 4.6.1.\n- NDPSourceLinkLayerAddressOptionType NDPOptionIdentifier = 1\n+ ndpSourceLinkLayerAddressOptionType ndpOptionIdentifier = 1\n- // NDPTargetLinkLayerAddressOptionType is the type of the Target Link Layer\n+ // ndpTargetLinkLayerAddressOptionType is the type of the Target Link Layer\n// Address option, as per RFC 4861 section 4.6.1.\n- NDPTargetLinkLayerAddressOptionType NDPOptionIdentifier = 2\n+ ndpTargetLinkLayerAddressOptionType ndpOptionIdentifier = 2\n- // NDPPrefixInformationType is the type of the Prefix Information\n+ // ndpPrefixInformationType is the type of the Prefix Information\n// option, as per RFC 4861 section 4.6.2.\n- NDPPrefixInformationType NDPOptionIdentifier = 3\n+ ndpPrefixInformationType ndpOptionIdentifier = 3\n- // NDPNonceOptionType is the type of the Nonce option, as per\n+ // ndpNonceOptionType is the type of the Nonce option, as per\n// RFC 3971 section 5.3.2.\n- NDPNonceOptionType NDPOptionIdentifier = 14\n+ ndpNonceOptionType ndpOptionIdentifier = 14\n- // NDPRecursiveDNSServerOptionType is the type of the Recursive DNS\n+ // ndpRecursiveDNSServerOptionType is the type of the Recursive DNS\n// Server option, as per RFC 8106 section 5.1.\n- NDPRecursiveDNSServerOptionType NDPOptionIdentifier = 25\n+ ndpRecursiveDNSServerOptionType ndpOptionIdentifier = 25\n- // NDPDNSSearchListOptionType is the type of the DNS Search List option,\n+ // ndpDNSSearchListOptionType is the type of the DNS Search List option,\n// as per RFC 8106 section 5.2.\n- NDPDNSSearchListOptionType NDPOptionIdentifier = 31\n+ ndpDNSSearchListOptionType ndpOptionIdentifier = 31\n)\nconst (\n@@ -202,7 +202,7 @@ func (i *NDPOptionIterator) Next() (NDPOption, bool, error) {\n// bytes for the whole option.\nreturn nil, true, fmt.Errorf(\"unexpectedly exhausted buffer when reading the option's Type field: %w\", io.ErrUnexpectedEOF)\n}\n- kind := NDPOptionIdentifier(temp)\n+ kind := ndpOptionIdentifier(temp)\n// Get the Length field.\nlength, err := i.opts.ReadByte()\n@@ -229,16 +229,16 @@ func (i *NDPOptionIterator) Next() (NDPOption, bool, error) {\n}\nswitch kind {\n- case NDPSourceLinkLayerAddressOptionType:\n+ case ndpSourceLinkLayerAddressOptionType:\nreturn NDPSourceLinkLayerAddressOption(body), false, nil\n- case NDPTargetLinkLayerAddressOptionType:\n+ case ndpTargetLinkLayerAddressOptionType:\nreturn NDPTargetLinkLayerAddressOption(body), false, nil\n- case NDPNonceOptionType:\n+ case ndpNonceOptionType:\nreturn NDPNonceOption(body), false, nil\n- case NDPPrefixInformationType:\n+ case ndpPrefixInformationType:\n// Make sure the length of a Prefix Information option\n// body is ndpPrefixInformationLength, as per RFC 4861\n// section 4.6.2.\n@@ -248,7 +248,7 @@ func (i *NDPOptionIterator) Next() (NDPOption, bool, error) {\nreturn NDPPrefixInformation(body), false, nil\n- case NDPRecursiveDNSServerOptionType:\n+ case ndpRecursiveDNSServerOptionType:\nopt := NDPRecursiveDNSServer(body)\nif err := opt.checkAddresses(); err != nil {\nreturn nil, true, err\n@@ -256,7 +256,7 @@ func (i *NDPOptionIterator) Next() (NDPOption, bool, error) {\nreturn opt, false, nil\n- case NDPDNSSearchListOptionType:\n+ case ndpDNSSearchListOptionType:\nopt := NDPDNSSearchList(body)\nif err := opt.checkDomainNames(); err != nil {\nreturn nil, true, err\n@@ -323,7 +323,7 @@ func (b NDPOptions) Serialize(s NDPOptionsSerializer) int {\ncontinue\n}\n- b[0] = byte(o.Type())\n+ b[0] = byte(o.kind())\n// We know this safe because paddedLength would have returned\n// 0 if o had an invalid length (> 255 * lengthByteUnits).\n@@ -348,11 +348,11 @@ func (b NDPOptions) Serialize(s NDPOptionsSerializer) int {\ntype NDPOption interface {\nfmt.Stringer\n- // Type returns the type of the receiver.\n- Type() NDPOptionIdentifier\n+ // kind returns the type of the receiver.\n+ kind() ndpOptionIdentifier\n- // Length returns the length of the body of the receiver, in bytes.\n- Length() int\n+ // length returns the length of the body of the receiver, in bytes.\n+ length() int\n// serializeInto serializes the receiver into the provided byte\n// buffer.\n@@ -372,7 +372,7 @@ type NDPOption interface {\n// paddedLength returns the length of o, in bytes, with any padding bytes, if\n// required.\nfunc paddedLength(o NDPOption) int {\n- l := o.Length()\n+ l := o.length()\nif l == 0 {\nreturn 0\n@@ -429,13 +429,13 @@ func (b NDPOptionsSerializer) Length() int {\n// where X is the value in Length multiplied by lengthByteUnits - 2 bytes.\ntype NDPNonceOption []byte\n-// Type implements NDPOption.\n-func (o NDPNonceOption) Type() NDPOptionIdentifier {\n- return NDPNonceOptionType\n+// kind implements NDPOption.\n+func (o NDPNonceOption) kind() ndpOptionIdentifier {\n+ return ndpNonceOptionType\n}\n-// Length implements NDPOption.\n-func (o NDPNonceOption) Length() int {\n+// length implements NDPOption.\n+func (o NDPNonceOption) length() int {\nreturn len(o)\n}\n@@ -461,22 +461,22 @@ func (o NDPNonceOption) Nonce() []byte {\n// where X is the value in Length multiplied by lengthByteUnits - 2 bytes.\ntype NDPSourceLinkLayerAddressOption tcpip.LinkAddress\n-// Type implements NDPOption.Type.\n-func (o NDPSourceLinkLayerAddressOption) Type() NDPOptionIdentifier {\n- return NDPSourceLinkLayerAddressOptionType\n+// kind implements NDPOption.\n+func (o NDPSourceLinkLayerAddressOption) kind() ndpOptionIdentifier {\n+ return ndpSourceLinkLayerAddressOptionType\n}\n-// Length implements NDPOption.Length.\n-func (o NDPSourceLinkLayerAddressOption) Length() int {\n+// length implements NDPOption.\n+func (o NDPSourceLinkLayerAddressOption) length() int {\nreturn len(o)\n}\n-// serializeInto implements NDPOption.serializeInto.\n+// serializeInto implements NDPOption.\nfunc (o NDPSourceLinkLayerAddressOption) serializeInto(b []byte) int {\nreturn copy(b, o)\n}\n-// String implements fmt.Stringer.String.\n+// String implements fmt.Stringer.\nfunc (o NDPSourceLinkLayerAddressOption) String() string {\nreturn fmt.Sprintf(\"%T(%s)\", o, tcpip.LinkAddress(o))\n}\n@@ -501,22 +501,22 @@ func (o NDPSourceLinkLayerAddressOption) EthernetAddress() tcpip.LinkAddress {\n// where X is the value in Length multiplied by lengthByteUnits - 2 bytes.\ntype NDPTargetLinkLayerAddressOption tcpip.LinkAddress\n-// Type implements NDPOption.Type.\n-func (o NDPTargetLinkLayerAddressOption) Type() NDPOptionIdentifier {\n- return NDPTargetLinkLayerAddressOptionType\n+// kind implements NDPOption.\n+func (o NDPTargetLinkLayerAddressOption) kind() ndpOptionIdentifier {\n+ return ndpTargetLinkLayerAddressOptionType\n}\n-// Length implements NDPOption.Length.\n-func (o NDPTargetLinkLayerAddressOption) Length() int {\n+// length implements NDPOption.\n+func (o NDPTargetLinkLayerAddressOption) length() int {\nreturn len(o)\n}\n-// serializeInto implements NDPOption.serializeInto.\n+// serializeInto implements NDPOption.\nfunc (o NDPTargetLinkLayerAddressOption) serializeInto(b []byte) int {\nreturn copy(b, o)\n}\n-// String implements fmt.Stringer.String.\n+// String implements fmt.Stringer.\nfunc (o NDPTargetLinkLayerAddressOption) String() string {\nreturn fmt.Sprintf(\"%T(%s)\", o, tcpip.LinkAddress(o))\n}\n@@ -541,17 +541,17 @@ func (o NDPTargetLinkLayerAddressOption) EthernetAddress() tcpip.LinkAddress {\n// ndpPrefixInformationLength bytes.\ntype NDPPrefixInformation []byte\n-// Type implements NDPOption.Type.\n-func (o NDPPrefixInformation) Type() NDPOptionIdentifier {\n- return NDPPrefixInformationType\n+// kind implements NDPOption.\n+func (o NDPPrefixInformation) kind() ndpOptionIdentifier {\n+ return ndpPrefixInformationType\n}\n-// Length implements NDPOption.Length.\n-func (o NDPPrefixInformation) Length() int {\n+// length implements NDPOption.\n+func (o NDPPrefixInformation) length() int {\nreturn ndpPrefixInformationLength\n}\n-// serializeInto implements NDPOption.serializeInto.\n+// serializeInto implements NDPOption.\nfunc (o NDPPrefixInformation) serializeInto(b []byte) int {\nused := copy(b, o)\n@@ -567,7 +567,7 @@ func (o NDPPrefixInformation) serializeInto(b []byte) int {\nreturn used\n}\n-// String implements fmt.Stringer.String.\n+// String implements fmt.Stringer.\nfunc (o NDPPrefixInformation) String() string {\nreturn fmt.Sprintf(\"%T(O=%t, A=%t, PL=%s, VL=%s, Prefix=%s)\",\no,\n@@ -665,17 +665,17 @@ type NDPRecursiveDNSServer []byte\n// Type returns the type of an NDP Recursive DNS Server option.\n//\n-// Type implements NDPOption.Type.\n-func (NDPRecursiveDNSServer) Type() NDPOptionIdentifier {\n- return NDPRecursiveDNSServerOptionType\n+// kind implements NDPOption.\n+func (NDPRecursiveDNSServer) kind() ndpOptionIdentifier {\n+ return ndpRecursiveDNSServerOptionType\n}\n-// Length implements NDPOption.Length.\n-func (o NDPRecursiveDNSServer) Length() int {\n+// length implements NDPOption.\n+func (o NDPRecursiveDNSServer) length() int {\nreturn len(o)\n}\n-// serializeInto implements NDPOption.serializeInto.\n+// serializeInto implements NDPOption.\nfunc (o NDPRecursiveDNSServer) serializeInto(b []byte) int {\nused := copy(b, o)\n@@ -687,7 +687,7 @@ func (o NDPRecursiveDNSServer) serializeInto(b []byte) int {\nreturn used\n}\n-// String implements fmt.Stringer.String.\n+// String implements fmt.Stringer.\nfunc (o NDPRecursiveDNSServer) String() string {\nlt := o.Lifetime()\naddrs, err := o.Addresses()\n@@ -760,17 +760,17 @@ func (o NDPRecursiveDNSServer) iterAddresses(fn func(tcpip.Address)) error {\n// RFC 8106 section 5.2.\ntype NDPDNSSearchList []byte\n-// Type implements NDPOption.Type.\n-func (o NDPDNSSearchList) Type() NDPOptionIdentifier {\n- return NDPDNSSearchListOptionType\n+// kind implements NDPOption.\n+func (o NDPDNSSearchList) kind() ndpOptionIdentifier {\n+ return ndpDNSSearchListOptionType\n}\n-// Length implements NDPOption.Length.\n-func (o NDPDNSSearchList) Length() int {\n+// length implements NDPOption.\n+func (o NDPDNSSearchList) length() int {\nreturn len(o)\n}\n-// serializeInto implements NDPOption.serializeInto.\n+// serializeInto implements NDPOption.\nfunc (o NDPDNSSearchList) serializeInto(b []byte) int {\nused := copy(b, o)\n@@ -782,7 +782,7 @@ func (o NDPDNSSearchList) serializeInto(b []byte) int {\nreturn used\n}\n-// String implements fmt.Stringer.String.\n+// String implements fmt.Stringer.\nfunc (o NDPDNSSearchList) String() string {\nlt := o.Lifetime()\ndomainNames, err := o.DomainNames()\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ndp_test.go", "new_path": "pkg/tcpip/header/ndp_test.go", "diff": "@@ -233,8 +233,8 @@ func TestOpts(t *testing.T) {\ncheckNonce := func(expectedNonce []byte) func(*testing.T, NDPOption) {\nreturn func(t *testing.T, opt NDPOption) {\n- if got := opt.Type(); got != NDPNonceOptionType {\n- t.Errorf(\"got Type() = %d, want = %d\", got, NDPNonceOptionType)\n+ if got := opt.kind(); got != ndpNonceOptionType {\n+ t.Errorf(\"got kind() = %d, want = %d\", got, ndpNonceOptionType)\n}\nnonce, ok := opt.(NDPNonceOption)\nif !ok {\n@@ -248,8 +248,8 @@ func TestOpts(t *testing.T) {\ncheckTLL := func(expectedAddr tcpip.LinkAddress) func(*testing.T, NDPOption) {\nreturn func(t *testing.T, opt NDPOption) {\n- if got := opt.Type(); got != NDPTargetLinkLayerAddressOptionType {\n- t.Errorf(\"got Type() = %d, want = %d\", got, NDPTargetLinkLayerAddressOptionType)\n+ if got := opt.kind(); got != ndpTargetLinkLayerAddressOptionType {\n+ t.Errorf(\"got kind() = %d, want = %d\", got, ndpTargetLinkLayerAddressOptionType)\n}\ntll, ok := opt.(NDPTargetLinkLayerAddressOption)\nif !ok {\n@@ -263,8 +263,8 @@ func TestOpts(t *testing.T) {\ncheckSLL := func(expectedAddr tcpip.LinkAddress) func(*testing.T, NDPOption) {\nreturn func(t *testing.T, opt NDPOption) {\n- if got := opt.Type(); got != NDPSourceLinkLayerAddressOptionType {\n- t.Errorf(\"got Type() = %d, want = %d\", got, NDPSourceLinkLayerAddressOptionType)\n+ if got := opt.kind(); got != ndpSourceLinkLayerAddressOptionType {\n+ t.Errorf(\"got kind() = %d, want = %d\", got, ndpSourceLinkLayerAddressOptionType)\n}\nsll, ok := opt.(NDPSourceLinkLayerAddressOption)\nif !ok {\n@@ -443,15 +443,15 @@ func TestOpts(t *testing.T) {\nopt: NDPRecursiveDNSServer(rdnssBytes[optionHeaderLen:]),\nexpectedBuf: expectedRDNSSBytes[:],\ncheck: func(t *testing.T, opt NDPOption) {\n- if got := opt.Type(); got != NDPRecursiveDNSServerOptionType {\n- t.Errorf(\"got Type() = %d, want = %d\", got, NDPRecursiveDNSServerOptionType)\n+ if got := opt.kind(); got != ndpRecursiveDNSServerOptionType {\n+ t.Errorf(\"got kind() = %d, want = %d\", got, ndpRecursiveDNSServerOptionType)\n}\nrdnss, ok := opt.(NDPRecursiveDNSServer)\nif !ok {\nt.Fatalf(\"got opt = %T, want = NDPRecursiveDNSServer\", opt)\n}\n- if got, want := rdnss.Length(), len(expectedRDNSSBytes[optionHeaderLen:]); got != want {\n- t.Errorf(\"got Length() = %d, want = %d\", got, want)\n+ if got, want := rdnss.length(), len(expectedRDNSSBytes[optionHeaderLen:]); got != want {\n+ t.Errorf(\"got length() = %d, want = %d\", got, want)\n}\nif got, want := rdnss.Lifetime(), validLifetimeSeconds*time.Second; got != want {\nt.Errorf(\"got Lifetime() = %s, want = %s\", got, want)\n@@ -470,16 +470,16 @@ func TestOpts(t *testing.T) {\nopt: NDPDNSSearchList(searchListBytes[optionHeaderLen:]),\nexpectedBuf: expectedSearchListBytes[:],\ncheck: func(t *testing.T, opt NDPOption) {\n- if got := opt.Type(); got != NDPDNSSearchListOptionType {\n- t.Errorf(\"got Type() = %d, want = %d\", got, NDPDNSSearchListOptionType)\n+ if got := opt.kind(); got != ndpDNSSearchListOptionType {\n+ t.Errorf(\"got kind() = %d, want = %d\", got, ndpDNSSearchListOptionType)\n}\ndnssl, ok := opt.(NDPDNSSearchList)\nif !ok {\nt.Fatalf(\"got opt = %T, want = NDPDNSSearchList\", opt)\n}\n- if got, want := dnssl.Length(), len(expectedRDNSSBytes[optionHeaderLen:]); got != want {\n- t.Errorf(\"got Length() = %d, want = %d\", got, want)\n+ if got, want := dnssl.length(), len(expectedRDNSSBytes[optionHeaderLen:]); got != want {\n+ t.Errorf(\"got length() = %d, want = %d\", got, want)\n}\nif got, want := dnssl.Lifetime(), validLifetimeSeconds*time.Second; got != want {\nt.Errorf(\"got Lifetime() = %s, want = %s\", got, want)\n@@ -500,8 +500,8 @@ func TestOpts(t *testing.T) {\nopt: NDPPrefixInformation(prefixInformationBytes[optionHeaderLen:]),\nexpectedBuf: expectedPrefixInformationBytes[:],\ncheck: func(t *testing.T, opt NDPOption) {\n- if got := opt.Type(); got != NDPPrefixInformationType {\n- t.Errorf(\"got Type() = %d, want = %d\", got, NDPPrefixInformationType)\n+ if got := opt.kind(); got != ndpPrefixInformationType {\n+ t.Errorf(\"got kind() = %d, want = %d\", got, ndpPrefixInformationType)\n}\npi, ok := opt.(NDPPrefixInformation)\n@@ -509,8 +509,8 @@ func TestOpts(t *testing.T) {\nt.Fatalf(\"got opt = %T, want = NDPPrefixInformation\", opt)\n}\n- if got, want := pi.Length(), len(expectedPrefixInformationBytes[optionHeaderLen:]); got != want {\n- t.Errorf(\"got Length() = %d, want = %d\", got, want)\n+ if got, want := pi.length(), len(expectedPrefixInformationBytes[optionHeaderLen:]); got != want {\n+ t.Errorf(\"got length() = %d, want = %d\", got, want)\n}\nif got := pi.PrefixLength(); got != prefixLength {\nt.Errorf(\"got PrefixLength() = %d, want = %d\", got, prefixLength)\n@@ -646,8 +646,8 @@ func TestNDPRecursiveDNSServerOption(t *testing.T) {\nif done {\nt.Fatal(\"got Next = (_, true, _), want = (_, false, _)\")\n}\n- if got := next.Type(); got != NDPRecursiveDNSServerOptionType {\n- t.Fatalf(\"got Type = %d, want = %d\", got, NDPRecursiveDNSServerOptionType)\n+ if got := next.kind(); got != ndpRecursiveDNSServerOptionType {\n+ t.Fatalf(\"got Type = %d, want = %d\", got, ndpRecursiveDNSServerOptionType)\n}\nopt, ok := next.(NDPRecursiveDNSServer)\n@@ -1403,8 +1403,8 @@ func TestNDPOptionsIter(t *testing.T) {\nif got, want := []byte(next.(NDPSourceLinkLayerAddressOption)), buf[2:][:6]; !bytes.Equal(got, want) {\nt.Errorf(\"got Next = (%x, _, _), want = (%x, _, _)\", got, want)\n}\n- if got := next.Type(); got != NDPSourceLinkLayerAddressOptionType {\n- t.Errorf(\"got Type = %d, want = %d\", got, NDPSourceLinkLayerAddressOptionType)\n+ if got := next.kind(); got != ndpSourceLinkLayerAddressOptionType {\n+ t.Errorf(\"got Type = %d, want = %d\", got, ndpSourceLinkLayerAddressOptionType)\n}\n// Test the next (Target Link-Layer) option.\n@@ -1418,8 +1418,8 @@ func TestNDPOptionsIter(t *testing.T) {\nif got, want := []byte(next.(NDPTargetLinkLayerAddressOption)), buf[10:][:6]; !bytes.Equal(got, want) {\nt.Errorf(\"got Next = (%x, _, _), want = (%x, _, _)\", got, want)\n}\n- if got := next.Type(); got != NDPTargetLinkLayerAddressOptionType {\n- t.Errorf(\"got Type = %d, want = %d\", got, NDPTargetLinkLayerAddressOptionType)\n+ if got := next.kind(); got != ndpTargetLinkLayerAddressOptionType {\n+ t.Errorf(\"got Type = %d, want = %d\", got, ndpTargetLinkLayerAddressOptionType)\n}\n// Test the next (Prefix Information) option.\n@@ -1434,8 +1434,8 @@ func TestNDPOptionsIter(t *testing.T) {\nif got, want := next.(NDPPrefixInformation), buf[34:][:30]; !bytes.Equal(got, want) {\nt.Errorf(\"got Next = (%x, _, _), want = (%x, _, _)\", got, want)\n}\n- if got := next.Type(); got != NDPPrefixInformationType {\n- t.Errorf(\"got Type = %d, want = %d\", got, NDPPrefixInformationType)\n+ if got := next.kind(); got != ndpPrefixInformationType {\n+ t.Errorf(\"got Type = %d, want = %d\", got, ndpPrefixInformationType)\n}\n// Iterator should not return anything else.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ndpoptionidentifier_string.go", "new_path": "pkg/tcpip/header/ndpoptionidentifier_string.go", "diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-// Code generated by \"stringer -type NDPOptionIdentifier\"; DO NOT EDIT.\n+// Code generated by \"stringer -type ndpOptionIdentifier\"; DO NOT EDIT.\npackage header\n@@ -22,37 +22,37 @@ func _() {\n// An \"invalid array index\" compiler error signifies that the constant values have changed.\n// Re-run the stringer command to generate them again.\nvar x [1]struct{}\n- _ = x[NDPSourceLinkLayerAddressOptionType-1]\n- _ = x[NDPTargetLinkLayerAddressOptionType-2]\n- _ = x[NDPPrefixInformationType-3]\n- _ = x[NDPNonceOptionType-14]\n- _ = x[NDPRecursiveDNSServerOptionType-25]\n- _ = x[NDPDNSSearchListOptionType-31]\n+ _ = x[ndpSourceLinkLayerAddressOptionType-1]\n+ _ = x[ndpTargetLinkLayerAddressOptionType-2]\n+ _ = x[ndpPrefixInformationType-3]\n+ _ = x[ndpNonceOptionType-14]\n+ _ = x[ndpRecursiveDNSServerOptionType-25]\n+ _ = x[ndpDNSSearchListOptionType-31]\n}\nconst (\n- _NDPOptionIdentifier_name_0 = \"NDPSourceLinkLayerAddressOptionTypeNDPTargetLinkLayerAddressOptionTypeNDPPrefixInformationType\"\n- _NDPOptionIdentifier_name_1 = \"NDPNonceOptionType\"\n- _NDPOptionIdentifier_name_2 = \"NDPRecursiveDNSServerOptionType\"\n- _NDPOptionIdentifier_name_3 = \"NDPDNSSearchListOptionType\"\n+ _ndpOptionIdentifier_name_0 = \"ndpSourceLinkLayerAddressOptionTypendpTargetLinkLayerAddressOptionTypendpPrefixInformationType\"\n+ _ndpOptionIdentifier_name_1 = \"ndpNonceOptionType\"\n+ _ndpOptionIdentifier_name_2 = \"ndpRecursiveDNSServerOptionType\"\n+ _ndpOptionIdentifier_name_3 = \"ndpDNSSearchListOptionType\"\n)\nvar (\n- _NDPOptionIdentifier_index_0 = [...]uint8{0, 35, 70, 94}\n+ _ndpOptionIdentifier_index_0 = [...]uint8{0, 35, 70, 94}\n)\n-func (i NDPOptionIdentifier) String() string {\n+func (i ndpOptionIdentifier) String() string {\nswitch {\ncase 1 <= i && i <= 3:\ni -= 1\n- return _NDPOptionIdentifier_name_0[_NDPOptionIdentifier_index_0[i]:_NDPOptionIdentifier_index_0[i+1]]\n+ return _ndpOptionIdentifier_name_0[_ndpOptionIdentifier_index_0[i]:_ndpOptionIdentifier_index_0[i+1]]\ncase i == 14:\n- return _NDPOptionIdentifier_name_1\n+ return _ndpOptionIdentifier_name_1\ncase i == 25:\n- return _NDPOptionIdentifier_name_2\n+ return _ndpOptionIdentifier_name_2\ncase i == 31:\n- return _NDPOptionIdentifier_name_3\n+ return _ndpOptionIdentifier_name_3\ndefault:\n- return \"NDPOptionIdentifier(\" + strconv.FormatInt(int64(i), 10) + \")\"\n+ return \"ndpOptionIdentifier(\" + strconv.FormatInt(int64(i), 10) + \")\"\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Unexport methods on NDPOption They are not used outside of the header package. PiperOrigin-RevId: 363237708
259,891
16.03.2021 14:53:42
25,200
607a1e481c276c8ab0c3e194ed04b38bc07b71b6
setgid directory support in overlayfs
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/overlay/filesystem.go", "new_path": "pkg/sentry/fsimpl/overlay/filesystem.go", "diff": "@@ -689,13 +689,9 @@ func (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts v\n}\nreturn err\n}\n- creds := rp.Credentials()\n+\nif err := vfsObj.SetStatAt(ctx, fs.creds, &pop, &vfs.SetStatOptions{\n- Stat: linux.Statx{\n- Mask: linux.STATX_UID | linux.STATX_GID,\n- UID: uint32(creds.EffectiveKUID),\n- GID: uint32(creds.EffectiveKGID),\n- },\n+ Stat: parent.newChildOwnerStat(opts.Mode, rp.Credentials()),\n}); err != nil {\nif cleanupErr := vfsObj.RmdirAt(ctx, fs.creds, &pop); cleanupErr != nil {\npanic(fmt.Sprintf(\"unrecoverable overlayfs inconsistency: failed to delete upper layer directory after MkdirAt metadata update failure: %v\", cleanupErr))\n@@ -750,11 +746,7 @@ func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts v\n}\ncreds := rp.Credentials()\nif err := vfsObj.SetStatAt(ctx, fs.creds, &pop, &vfs.SetStatOptions{\n- Stat: linux.Statx{\n- Mask: linux.STATX_UID | linux.STATX_GID,\n- UID: uint32(creds.EffectiveKUID),\n- GID: uint32(creds.EffectiveKGID),\n- },\n+ Stat: parent.newChildOwnerStat(opts.Mode, creds),\n}); err != nil {\nif cleanupErr := vfsObj.UnlinkAt(ctx, fs.creds, &pop); cleanupErr != nil {\npanic(fmt.Sprintf(\"unrecoverable overlayfs inconsistency: failed to delete upper layer file after MknodAt metadata update failure: %v\", cleanupErr))\n@@ -963,14 +955,11 @@ func (fs *filesystem) createAndOpenLocked(ctx context.Context, rp *vfs.Resolving\n}\nreturn nil, err\n}\n+\n// Change the file's owner to the caller. We can't use upperFD.SetStat()\n// because it will pick up creds from ctx.\nif err := vfsObj.SetStatAt(ctx, fs.creds, &pop, &vfs.SetStatOptions{\n- Stat: linux.Statx{\n- Mask: linux.STATX_UID | linux.STATX_GID,\n- UID: uint32(creds.EffectiveKUID),\n- GID: uint32(creds.EffectiveKGID),\n- },\n+ Stat: parent.newChildOwnerStat(opts.Mode, creds),\n}); err != nil {\nif cleanupErr := vfsObj.UnlinkAt(ctx, fs.creds, &pop); cleanupErr != nil {\npanic(fmt.Sprintf(\"unrecoverable overlayfs inconsistency: failed to delete upper layer file after OpenAt(O_CREAT) metadata update failure: %v\", cleanupErr))\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/overlay/overlay.go", "new_path": "pkg/sentry/fsimpl/overlay/overlay.go", "diff": "@@ -749,6 +749,27 @@ func (d *dentry) mayDelete(creds *auth.Credentials, child *dentry) error {\n)\n}\n+// newChildOwnerStat returns a Statx for configuring the UID, GID, and mode of\n+// children.\n+func (d *dentry) newChildOwnerStat(mode linux.FileMode, creds *auth.Credentials) linux.Statx {\n+ stat := linux.Statx{\n+ Mask: uint32(linux.STATX_UID | linux.STATX_GID),\n+ UID: uint32(creds.EffectiveKUID),\n+ GID: uint32(creds.EffectiveKGID),\n+ }\n+ // Set GID and possibly the SGID bit if the parent is an SGID directory.\n+ d.copyMu.RLock()\n+ defer d.copyMu.RUnlock()\n+ if atomic.LoadUint32(&d.mode)&linux.ModeSetGID == linux.ModeSetGID {\n+ stat.GID = atomic.LoadUint32(&d.gid)\n+ if stat.Mode&linux.ModeDirectory == linux.ModeDirectory {\n+ stat.Mode = uint16(mode) | linux.ModeSetGID\n+ stat.Mask |= linux.STATX_MODE\n+ }\n+ }\n+ return stat\n+}\n+\n// fileDescription is embedded by overlay implementations of\n// vfs.FileDescriptionImpl.\n//\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/overlay/regular_file.go", "new_path": "pkg/sentry/fsimpl/overlay/regular_file.go", "diff": "@@ -205,6 +205,20 @@ func (fd *regularFileFD) SetStat(ctx context.Context, opts vfs.SetStatOptions) e\nif err := wrappedFD.SetStat(ctx, opts); err != nil {\nreturn err\n}\n+\n+ // Changing owners may clear one or both of the setuid and setgid bits,\n+ // so we may have to update opts before setting d.mode.\n+ if opts.Stat.Mask&(linux.STATX_UID|linux.STATX_GID) != 0 {\n+ stat, err := wrappedFD.Stat(ctx, vfs.StatOptions{\n+ Mask: linux.STATX_MODE,\n+ })\n+ if err != nil {\n+ return err\n+ }\n+ opts.Stat.Mode = stat.Mode\n+ opts.Stat.Mask |= linux.STATX_MODE\n+ }\n+\nd.updateAfterSetStatLocked(&opts)\nif ev := vfs.InotifyEventFromStatMask(opts.Stat.Mask); ev != 0 {\nd.InotifyWithParent(ctx, ev, 0, vfs.InodeEvent)\n@@ -295,7 +309,11 @@ func (fd *regularFileFD) PWrite(ctx context.Context, src usermem.IOSequence, off\nreturn 0, err\n}\ndefer wrappedFD.DecRef(ctx)\n- return wrappedFD.PWrite(ctx, src, offset, opts)\n+ n, err := wrappedFD.PWrite(ctx, src, offset, opts)\n+ if err != nil {\n+ return n, err\n+ }\n+ return fd.updateSetUserGroupIDs(ctx, wrappedFD, n)\n}\n// Write implements vfs.FileDescriptionImpl.Write.\n@@ -307,7 +325,28 @@ func (fd *regularFileFD) Write(ctx context.Context, src usermem.IOSequence, opts\nif err != nil {\nreturn 0, err\n}\n- return wrappedFD.Write(ctx, src, opts)\n+ n, err := wrappedFD.Write(ctx, src, opts)\n+ if err != nil {\n+ return n, err\n+ }\n+ return fd.updateSetUserGroupIDs(ctx, wrappedFD, n)\n+}\n+\n+func (fd *regularFileFD) updateSetUserGroupIDs(ctx context.Context, wrappedFD *vfs.FileDescription, written int64) (int64, error) {\n+ // Writing can clear the setuid and/or setgid bits. We only have to\n+ // check this if something was written and one of those bits was set.\n+ dentry := fd.dentry()\n+ if written == 0 || atomic.LoadUint32(&dentry.mode)&(linux.S_ISUID|linux.S_ISGID) == 0 {\n+ return written, nil\n+ }\n+ stat, err := wrappedFD.Stat(ctx, vfs.StatOptions{Mask: linux.STATX_MODE})\n+ if err != nil {\n+ return written, err\n+ }\n+ dentry.copyMu.Lock()\n+ defer dentry.copyMu.Unlock()\n+ atomic.StoreUint32(&dentry.mode, uint32(stat.Mode))\n+ return written, nil\n}\n// Seek implements vfs.FileDescriptionImpl.Seek.\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -2162,6 +2162,7 @@ cc_binary(\n\"//test/util:temp_path\",\n\"//test/util:test_main\",\n\"//test/util:test_util\",\n+ \"@com_google_absl//absl/flags:flag\",\n\"@com_google_absl//absl/strings\",\ngtest,\n],\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/setgid.cc", "new_path": "test/syscalls/linux/setgid.cc", "diff": "#include <unistd.h>\n#include \"gtest/gtest.h\"\n+#include \"absl/flags/flag.h\"\n#include \"test/util/capability_util.h\"\n#include \"test/util/cleanup.h\"\n#include \"test/util/fs_util.h\"\n#include \"test/util/temp_path.h\"\n#include \"test/util/test_util.h\"\n+ABSL_FLAG(std::vector<std::string>, groups, std::vector<std::string>({}),\n+ \"groups the test can use\");\n+\n+constexpr gid_t kNobody = 65534;\n+\nnamespace gvisor {\nnamespace testing {\n@@ -46,6 +52,18 @@ PosixErrorOr<Cleanup> Setegid(gid_t egid) {\n// Returns a pair of groups that the user is a member of.\nPosixErrorOr<std::pair<gid_t, gid_t>> Groups() {\n+ // Were we explicitly passed GIDs?\n+ std::vector<std::string> flagged_groups = absl::GetFlag(FLAGS_groups);\n+ if (flagged_groups.size() >= 2) {\n+ int group1;\n+ int group2;\n+ if (!absl::SimpleAtoi(flagged_groups[0], &group1) ||\n+ !absl::SimpleAtoi(flagged_groups[1], &group2)) {\n+ return PosixError(EINVAL, \"failed converting group flags to ints\");\n+ }\n+ return std::pair<gid_t, gid_t>(group1, group2);\n+ }\n+\n// See whether the user is a member of at least 2 groups.\nstd::vector<gid_t> groups(64);\nfor (; groups.size() <= NGROUPS_MAX; groups.resize(groups.size() * 2)) {\n@@ -58,26 +76,47 @@ PosixErrorOr<std::pair<gid_t, gid_t>> Groups() {\nreturn PosixError(errno, absl::StrFormat(\"getgroups(%d, %p)\",\ngroups.size(), groups.data()));\n}\n- if (ngroups >= 2) {\n- return std::pair<gid_t, gid_t>(groups[0], groups[1]);\n- }\n+\n+ if (ngroups < 2) {\n// There aren't enough groups.\nbreak;\n}\n- // If we're root in the root user namespace, we can set our GID to whatever we\n- // want. Try that before giving up.\n- constexpr gid_t kGID1 = 1111;\n- constexpr gid_t kGID2 = 2222;\n- auto cleanup1 = Setegid(kGID1);\n+ // TODO(b/181878080): Read /proc/sys/fs/overflowgid once it is supported in\n+ // gVisor.\n+ if (groups[0] == kNobody || groups[1] == kNobody) {\n+ // These groups aren't mapped into our user namespace, so we can't use\n+ // them.\n+ break;\n+ }\n+ return std::pair<gid_t, gid_t>(groups[0], groups[1]);\n+ }\n+\n+ // If we're running in gVisor and are root in the root user namespace, we can\n+ // set our GID to whatever we want. Try that before giving up.\n+ //\n+ // This won't work in native tests, as despite having CAP_SETGID, the gofer\n+ // process will be sandboxed and unable to change file GIDs.\n+ if (!IsRunningOnGvisor()) {\n+ return PosixError(EPERM, \"no valid groups for native testing\");\n+ }\n+ PosixErrorOr<bool> capable = HaveCapability(CAP_SETGID);\n+ if (!capable.ok()) {\n+ return capable.error();\n+ }\n+ if (!capable.ValueOrDie()) {\n+ return PosixError(EPERM, \"missing CAP_SETGID\");\n+ }\n+ gid_t gid = getegid();\n+ auto cleanup1 = Setegid(gid);\nif (!cleanup1.ok()) {\nreturn cleanup1.error();\n}\n- auto cleanup2 = Setegid(kGID2);\n+ auto cleanup2 = Setegid(kNobody);\nif (!cleanup2.ok()) {\nreturn cleanup2.error();\n}\n- return std::pair<gid_t, gid_t>(kGID1, kGID2);\n+ return std::pair<gid_t, gid_t>(gid, kNobody);\n}\nclass SetgidDirTest : public ::testing::Test {\n@@ -85,17 +124,20 @@ class SetgidDirTest : public ::testing::Test {\nvoid SetUp() override {\noriginal_gid_ = getegid();\n- // TODO(b/175325250): Enable when setgid directories are supported.\nSKIP_IF(IsRunningWithVFS1());\n- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SETGID)));\ntemp_dir_ = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateDirWith(GetAbsoluteTestTmpdir(), 0777 /* mode */));\n- groups_ = ASSERT_NO_ERRNO_AND_VALUE(Groups());\n+\n+ // If we can't find two usable groups, we're in an unsupporting environment.\n+ // Skip the test.\n+ PosixErrorOr<std::pair<gid_t, gid_t>> groups = Groups();\n+ SKIP_IF(!groups.ok());\n+ groups_ = groups.ValueOrDie();\n}\nvoid TearDown() override {\n- ASSERT_THAT(setegid(original_gid_), SyscallSucceeds());\n+ EXPECT_THAT(setegid(original_gid_), SyscallSucceeds());\n}\nvoid MkdirAsGid(gid_t gid, const std::string& path, mode_t mode) {\n@@ -131,7 +173,7 @@ TEST_F(SetgidDirTest, Control) {\nASSERT_NO_FATAL_FAILURE(MkdirAsGid(groups_.first, g1owned, 0777));\n// Set group to G2, create a file in g1owned, and confirm that G2 owns it.\n- ASSERT_THAT(setegid(groups_.second), SyscallSucceeds());\n+ auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(Setegid(groups_.second));\nFileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(\nOpen(JoinPath(g1owned, \"g2owned\").c_str(), O_CREAT | O_RDWR, 0777));\nstruct stat stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(fd));\n@@ -146,7 +188,7 @@ TEST_F(SetgidDirTest, CreateFile) {\nASSERT_THAT(chmod(g1owned.c_str(), kDirmodeSgid), SyscallSucceeds());\n// Set group to G2, create a file, and confirm that G1 owns it.\n- ASSERT_THAT(setegid(groups_.second), SyscallSucceeds());\n+ auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(Setegid(groups_.second));\nFileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(\nOpen(JoinPath(g1owned, \"g2created\").c_str(), O_CREAT | O_RDWR, 0666));\nstruct stat stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(fd));\n@@ -194,7 +236,7 @@ TEST_F(SetgidDirTest, OldFile) {\nASSERT_THAT(chmod(g1owned.c_str(), kDirmodeNoSgid), SyscallSucceeds());\n// Set group to G2, create a file, confirm that G2 owns it.\n- ASSERT_THAT(setegid(groups_.second), SyscallSucceeds());\n+ auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(Setegid(groups_.second));\nFileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(\nOpen(JoinPath(g1owned, \"g2created\").c_str(), O_CREAT | O_RDWR, 0666));\nstruct stat stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(fd));\n@@ -217,7 +259,7 @@ TEST_F(SetgidDirTest, OldDir) {\nASSERT_THAT(chmod(g1owned.c_str(), kDirmodeNoSgid), SyscallSucceeds());\n// Set group to G2, create a directory, confirm that G2 owns it.\n- ASSERT_THAT(setegid(groups_.second), SyscallSucceeds());\n+ auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(Setegid(groups_.second));\nauto g2created = JoinPath(g1owned, \"g2created\");\nASSERT_NO_FATAL_FAILURE(MkdirAsGid(groups_.second, g2created, 0666));\nstruct stat stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(g2created));\n" } ]
Go
Apache License 2.0
google/gvisor
setgid directory support in overlayfs PiperOrigin-RevId: 363276495
259,898
16.03.2021 16:57:31
25,200
3dd7ad13b4586791cdc1c1efdef90760a7deff8e
Fix tcp_fin_retransmission_netstack_test Netstack does not check ACK number for FIN-ACK packets and goes into TIMEWAIT unconditionally. Fixing the state machine will give us back the retransmission of FIN.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/rcv.go", "new_path": "pkg/tcpip/transport/tcp/rcv.go", "diff": "@@ -260,7 +260,7 @@ func (r *receiver) consumeSegment(s *segment, segSeq seqnum.Value, segLen seqnum\ncase StateEstablished:\nr.ep.setEndpointState(StateCloseWait)\ncase StateFinWait1:\n- if s.flagIsSet(header.TCPFlagAck) {\n+ if s.flagIsSet(header.TCPFlagAck) && s.ackNumber == r.ep.snd.sndNxt {\n// FIN-ACK, transition to TIME-WAIT.\nr.ep.setEndpointState(StateTimeWait)\n} else {\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/runner/defs.bzl", "new_path": "test/packetimpact/runner/defs.bzl", "diff": "@@ -289,8 +289,6 @@ ALL_TESTS = [\n),\nPacketimpactTestInfo(\nname = \"tcp_fin_retransmission\",\n- # TODO(b/181625316): Fix netstack then remove the line below.\n- expect_netstack_failure = True,\n),\n]\n" } ]
Go
Apache License 2.0
google/gvisor
Fix tcp_fin_retransmission_netstack_test Netstack does not check ACK number for FIN-ACK packets and goes into TIMEWAIT unconditionally. Fixing the state machine will give us back the retransmission of FIN. PiperOrigin-RevId: 363301883
260,004
17.03.2021 12:25:59
25,200
d3a433caae7111973915197a9ecc61660df3b810
Do not use martian loopback packets in tests Transport demuxer and UDP tests should not use a loopback address as the source address for packets injected into the stack as martian loopback packets will be dropped in a later change.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/transport_demuxer_test.go", "new_path": "pkg/tcpip/stack/transport_demuxer_test.go", "diff": "@@ -33,8 +33,8 @@ import (\n)\nconst (\n- testSrcAddrV6 = \"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\"\n- testDstAddrV6 = \"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\"\n+ testSrcAddrV6 = \"\\x0a\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\"\n+ testDstAddrV6 = \"\\x0a\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\"\ntestSrcAddrV4 = \"\\x0a\\x00\\x00\\x01\"\ntestDstAddrV4 = \"\\x0a\\x00\\x00\\x02\"\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/udp/udp_test.go", "new_path": "pkg/tcpip/transport/udp/udp_test.go", "diff": "@@ -45,8 +45,8 @@ import (\n// represents the remote endpoint.\nconst (\nv4MappedAddrPrefix = \"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xff\"\n- stackV6Addr = \"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\"\n- testV6Addr = \"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\"\n+ stackV6Addr = \"\\x0a\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\"\n+ testV6Addr = \"\\x0a\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\"\nstackV4MappedAddr = v4MappedAddrPrefix + stackAddr\ntestV4MappedAddr = v4MappedAddrPrefix + testAddr\nmulticastV4MappedAddr = v4MappedAddrPrefix + multicastAddr\n" } ]
Go
Apache License 2.0
google/gvisor
Do not use martian loopback packets in tests Transport demuxer and UDP tests should not use a loopback address as the source address for packets injected into the stack as martian loopback packets will be dropped in a later change. PiperOrigin-RevId: 363479681
259,975
18.03.2021 12:11:59
25,200
29be908ab69d2d333572f6990d331e494b1e51fd
Address post submit comments for fs benchmarks. Also, drop fio total reads/writes to 1GB as 10GB is prohibitively slow.
[ { "change_type": "MODIFY", "old_path": ".buildkite/pipeline.yaml", "new_path": ".buildkite/pipeline.yaml", "diff": "@@ -186,10 +186,10 @@ steps:\n# For fio, running with --test.benchtime=Xs scales the written/read\n# bytes to several GB. This is not a problem for root/bind/volume mounts,\n# but for tmpfs mounts, the size can grow to more memory than the machine\n- # has availabe. Fix the runs to 10GB written/read for the benchmark.\n+ # has availabe. Fix the runs to 1GB written/read for the benchmark.\n- <<: *benchmarks\nlabel: \":floppy_disk: FIO benchmarks\"\n- command: make benchmark-platforms BENCHMARKS_SUITE=fio BENCHMARKS_TARGETS=test/benchmarks/fs:fio_test BENCHMARKS_OPTIONS=--test.benchtime=10000x\n+ command: make benchmark-platforms BENCHMARKS_SUITE=fio BENCHMARKS_TARGETS=test/benchmarks/fs:fio_test BENCHMARKS_OPTIONS=--test.benchtime=1000x\n- <<: *benchmarks\nlabel: \":globe_with_meridians: HTTPD benchmarks\"\ncommand: make benchmark-platforms BENCHMARKS_FILTER=\"Continuous\" BENCHMARKS_SUITE=httpd BENCHMARKS_TARGETS=test/benchmarks/network:httpd_test\n" }, { "change_type": "MODIFY", "old_path": "test/benchmarks/fs/BUILD", "new_path": "test/benchmarks/fs/BUILD", "diff": "@@ -8,6 +8,7 @@ benchmark_test(\nsrcs = [\"bazel_test.go\"],\nvisibility = [\"//:sandbox\"],\ndeps = [\n+ \"//pkg/cleanup\",\n\"//pkg/test/dockerutil\",\n\"//test/benchmarks/harness\",\n\"//test/benchmarks/tools\",\n@@ -21,6 +22,7 @@ benchmark_test(\nsrcs = [\"fio_test.go\"],\nvisibility = [\"//:sandbox\"],\ndeps = [\n+ \"//pkg/cleanup\",\n\"//pkg/test/dockerutil\",\n\"//test/benchmarks/harness\",\n\"//test/benchmarks/tools\",\n" }, { "change_type": "MODIFY", "old_path": "test/benchmarks/fs/bazel_test.go", "new_path": "test/benchmarks/fs/bazel_test.go", "diff": "@@ -20,6 +20,7 @@ import (\n\"strings\"\n\"testing\"\n+ \"gvisor.dev/gvisor/pkg/cleanup\"\n\"gvisor.dev/gvisor/pkg/test/dockerutil\"\n\"gvisor.dev/gvisor/test/benchmarks/harness\"\n\"gvisor.dev/gvisor/test/benchmarks/tools\"\n@@ -29,7 +30,7 @@ import (\n// and if the mount on which we are compiling is a tmpfs/bind mount.\ntype benchmark struct {\nclearCache bool // clearCache drops caches before running.\n- fstype string // type of filesystem to use.\n+ fstype harness.FileSystemType // type of filesystem to use.\n}\n// Note: CleanCache versions of this test require running with root permissions.\n@@ -48,12 +49,12 @@ func runBuildBenchmark(b *testing.B, image, workdir, target string) {\n// Get a machine from the Harness on which to run.\nmachine, err := harness.GetMachine()\nif err != nil {\n- b.Fatalf(\"failed to get machine: %v\", err)\n+ b.Fatalf(\"Failed to get machine: %v\", err)\n}\ndefer machine.CleanUp()\nbenchmarks := make([]benchmark, 0, 6)\n- for _, filesys := range []string{harness.BindFS, harness.TmpFS, harness.RootFS} {\n+ for _, filesys := range []harness.FileSystemType{harness.BindFS, harness.TmpFS, harness.RootFS} {\nbenchmarks = append(benchmarks, benchmark{\nclearCache: true,\nfstype: filesys,\n@@ -75,7 +76,7 @@ func runBuildBenchmark(b *testing.B, image, workdir, target string) {\nfilesystem := tools.Parameter{\nName: \"filesystem\",\n- Value: bm.fstype,\n+ Value: string(bm.fstype),\n}\nname, err := tools.ParametersToName(pageCache, filesystem)\nif err != nil {\n@@ -86,13 +87,14 @@ func runBuildBenchmark(b *testing.B, image, workdir, target string) {\n// Grab a container.\nctx := context.Background()\ncontainer := machine.GetContainer(ctx, b)\n- defer container.CleanUp(ctx)\n-\n- mts, prefix, cleanup, err := harness.MakeMount(machine, bm.fstype)\n+ cu := cleanup.Make(func() {\n+ container.CleanUp(ctx)\n+ })\n+ defer cu.Clean()\n+ mts, prefix, err := harness.MakeMount(machine, bm.fstype, &cu)\nif err != nil {\nb.Fatalf(\"Failed to make mount: %v\", err)\n}\n- defer cleanup()\nrunOpts := dockerutil.RunOpts{\nImage: image,\n@@ -104,8 +106,9 @@ func runBuildBenchmark(b *testing.B, image, workdir, target string) {\nb.Fatalf(\"run failed with: %v\", err)\n}\n+ cpCmd := fmt.Sprintf(\"mkdir -p %s && cp -r %s %s/.\", prefix, workdir, prefix)\nif out, err := container.Exec(ctx, dockerutil.ExecOpts{},\n- \"cp\", \"-rf\", workdir, prefix+\"/.\"); err != nil {\n+ \"/bin/sh\", \"-c\", cpCmd); err != nil {\nb.Fatalf(\"failed to copy directory: %v (%s)\", err, out)\n}\n" }, { "change_type": "MODIFY", "old_path": "test/benchmarks/fs/fio_test.go", "new_path": "test/benchmarks/fs/fio_test.go", "diff": "@@ -21,6 +21,7 @@ import (\n\"strings\"\n\"testing\"\n+ \"gvisor.dev/gvisor/pkg/cleanup\"\n\"gvisor.dev/gvisor/pkg/test/dockerutil\"\n\"gvisor.dev/gvisor/test/benchmarks/harness\"\n\"gvisor.dev/gvisor/test/benchmarks/tools\"\n@@ -69,7 +70,7 @@ func BenchmarkFio(b *testing.B) {\n}\ndefer machine.CleanUp()\n- for _, fsType := range []string{harness.BindFS, harness.TmpFS, harness.RootFS} {\n+ for _, fsType := range []harness.FileSystemType{harness.BindFS, harness.TmpFS, harness.RootFS} {\nfor _, tc := range testCases {\noperation := tools.Parameter{\nName: \"operation\",\n@@ -81,7 +82,7 @@ func BenchmarkFio(b *testing.B) {\n}\nfilesystem := tools.Parameter{\nName: \"filesystem\",\n- Value: fsType,\n+ Value: string(fsType),\n}\nname, err := tools.ParametersToName(operation, blockSize, filesystem)\nif err != nil {\n@@ -90,15 +91,18 @@ func BenchmarkFio(b *testing.B) {\nb.Run(name, func(b *testing.B) {\nb.StopTimer()\ntc.Size = b.N\n+\nctx := context.Background()\ncontainer := machine.GetContainer(ctx, b)\n- defer container.CleanUp(ctx)\n+ cu := cleanup.Make(func() {\n+ container.CleanUp(ctx)\n+ })\n+ defer cu.Clean()\n- mnts, outdir, mountCleanup, err := harness.MakeMount(machine, fsType)\n+ mnts, outdir, err := harness.MakeMount(machine, fsType, &cu)\nif err != nil {\nb.Fatalf(\"failed to make mount: %v\", err)\n}\n- defer mountCleanup()\n// Start the container with the mount.\nif err := container.Spawn(\n@@ -112,6 +116,11 @@ func BenchmarkFio(b *testing.B) {\nb.Fatalf(\"failed to start fio container with: %v\", err)\n}\n+ if out, err := container.Exec(ctx, dockerutil.ExecOpts{},\n+ \"mkdir\", \"-p\", outdir); err != nil {\n+ b.Fatalf(\"failed to copy directory: %v (%s)\", err, out)\n+ }\n+\n// Directory and filename inside container where fio will read/write.\noutfile := filepath.Join(outdir, \"test.txt\")\n@@ -130,7 +139,6 @@ func BenchmarkFio(b *testing.B) {\n}\ncmd := tc.MakeCmd(outfile)\n-\nif err := harness.DropCaches(machine); err != nil {\nb.Fatalf(\"failed to drop caches: %v\", err)\n}\n" }, { "change_type": "MODIFY", "old_path": "test/benchmarks/harness/BUILD", "new_path": "test/benchmarks/harness/BUILD", "diff": "@@ -12,6 +12,7 @@ go_library(\n],\nvisibility = [\"//:sandbox\"],\ndeps = [\n+ \"//pkg/cleanup\",\n\"//pkg/test/dockerutil\",\n\"//pkg/test/testutil\",\n\"@com_github_docker_docker//api/types/mount:go_default_library\",\n" }, { "change_type": "MODIFY", "old_path": "test/benchmarks/harness/util.go", "new_path": "test/benchmarks/harness/util.go", "diff": "@@ -22,6 +22,7 @@ import (\n\"testing\"\n\"github.com/docker/docker/api/types/mount\"\n+ \"gvisor.dev/gvisor/pkg/cleanup\"\n\"gvisor.dev/gvisor/pkg/test/dockerutil\"\n\"gvisor.dev/gvisor/pkg/test/testutil\"\n)\n@@ -58,52 +59,55 @@ func DebugLog(b *testing.B, msg string, args ...interface{}) {\n}\n}\n+// FileSystemType represents a type container mount.\n+type FileSystemType string\n+\nconst (\n// BindFS indicates a bind mount should be created.\n- BindFS = \"bindfs\"\n+ BindFS FileSystemType = \"bindfs\"\n// TmpFS indicates a tmpfs mount should be created.\n- TmpFS = \"tmpfs\"\n+ TmpFS FileSystemType = \"tmpfs\"\n// RootFS indicates no mount should be created and the root mount should be used.\n- RootFS = \"rootfs\"\n+ RootFS FileSystemType = \"rootfs\"\n)\n// MakeMount makes a mount and cleanup based on the requested type. Bind\n// and volume mounts are backed by a temp directory made with mktemp.\n// tmpfs mounts require no such backing and are just made.\n// rootfs mounts do not make a mount, but instead return a target direectory at root.\n-// It is up to the caller to call the returned cleanup.\n-func MakeMount(machine Machine, fsType string) ([]mount.Mount, string, func(), error) {\n+// It is up to the caller to call Clean on the passed *cleanup.Cleanup\n+func MakeMount(machine Machine, fsType FileSystemType, cu *cleanup.Cleanup) ([]mount.Mount, string, error) {\nmounts := make([]mount.Mount, 0, 1)\n+ target := \"/data\"\nswitch fsType {\ncase BindFS:\ndir, err := machine.RunCommand(\"mktemp\", \"-d\")\nif err != nil {\n- return mounts, \"\", func() {}, fmt.Errorf(\"failed to create tempdir: %v\", err)\n+ return mounts, \"\", fmt.Errorf(\"failed to create tempdir: %v\", err)\n}\ndir = strings.TrimSuffix(dir, \"\\n\")\n-\n+ cu.Add(func() {\n+ machine.RunCommand(\"rm\", \"-rf\", dir)\n+ })\nout, err := machine.RunCommand(\"chmod\", \"777\", dir)\nif err != nil {\n- machine.RunCommand(\"rm\", \"-rf\", dir)\n- return mounts, \"\", func() {}, fmt.Errorf(\"failed modify directory: %v %s\", err, out)\n+ return mounts, \"\", fmt.Errorf(\"failed modify directory: %v %s\", err, out)\n}\n- target := \"/data\"\nmounts = append(mounts, mount.Mount{\nTarget: target,\nSource: dir,\nType: mount.TypeBind,\n})\n- return mounts, target, func() { machine.RunCommand(\"rm\", \"-rf\", dir) }, nil\n+ return mounts, target, nil\ncase RootFS:\n- return mounts, \"/\", func() {}, nil\n+ return mounts, target, nil\ncase TmpFS:\n- target := \"/data\"\nmounts = append(mounts, mount.Mount{\nTarget: target,\nType: mount.TypeTmpfs,\n})\n- return mounts, target, func() {}, nil\n+ return mounts, target, nil\ndefault:\n- return mounts, \"\", func() {}, fmt.Errorf(\"illegal mount type not supported: %v\", fsType)\n+ return mounts, \"\", fmt.Errorf(\"illegal mount type not supported: %v\", fsType)\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Address post submit comments for fs benchmarks. Also, drop fio total reads/writes to 1GB as 10GB is prohibitively slow. PiperOrigin-RevId: 363714060
259,898
22.03.2021 00:04:17
25,200
cbac2d9f97031bdc18cb301e95db7c052dccc1ee
Fix and merge tcp_{outside_the_window,tcp_unacc_seq_ack}_closing The tests were not using the correct windowSize so the testing segments were actually within the window for seqNumOffset=0 tests. The issue is already fixed by
[ { "change_type": "MODIFY", "old_path": "test/packetimpact/runner/defs.bzl", "new_path": "test/packetimpact/runner/defs.bzl", "diff": "@@ -202,11 +202,6 @@ ALL_TESTS = [\nPacketimpactTestInfo(\nname = \"tcp_outside_the_window\",\n),\n- PacketimpactTestInfo(\n- name = \"tcp_outside_the_window_closing\",\n- # TODO(b/181625316): Fix netstack then merge into tcp_outside_the_window.\n- expect_netstack_failure = True,\n- ),\nPacketimpactTestInfo(\nname = \"tcp_noaccept_close_rst\",\n),\n@@ -216,11 +211,6 @@ ALL_TESTS = [\nPacketimpactTestInfo(\nname = \"tcp_unacc_seq_ack\",\n),\n- PacketimpactTestInfo(\n- name = \"tcp_unacc_seq_ack_closing\",\n- # TODO(b/181625316): Fix netstack then merge into tcp_unacc_seq_ack.\n- expect_netstack_failure = True,\n- ),\nPacketimpactTestInfo(\nname = \"tcp_paws_mechanism\",\n# TODO(b/156682000): Fix netstack then remove the line below.\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/BUILD", "new_path": "test/packetimpact/tests/BUILD", "diff": "@@ -123,17 +123,6 @@ packetimpact_testbench(\n],\n)\n-packetimpact_testbench(\n- name = \"tcp_outside_the_window_closing\",\n- srcs = [\"tcp_outside_the_window_closing_test.go\"],\n- deps = [\n- \"//pkg/tcpip/header\",\n- \"//pkg/tcpip/seqnum\",\n- \"//test/packetimpact/testbench\",\n- \"@org_golang_x_sys//unix:go_default_library\",\n- ],\n-)\n-\npacketimpact_testbench(\nname = \"tcp_noaccept_close_rst\",\nsrcs = [\"tcp_noaccept_close_rst_test.go\"],\n@@ -165,17 +154,6 @@ packetimpact_testbench(\n],\n)\n-packetimpact_testbench(\n- name = \"tcp_unacc_seq_ack_closing\",\n- srcs = [\"tcp_unacc_seq_ack_closing_test.go\"],\n- deps = [\n- \"//pkg/tcpip/header\",\n- \"//pkg/tcpip/seqnum\",\n- \"//test/packetimpact/testbench\",\n- \"@org_golang_x_sys//unix:go_default_library\",\n- ],\n-)\n-\npacketimpact_testbench(\nname = \"tcp_paws_mechanism\",\nsrcs = [\"tcp_paws_mechanism_test.go\"],\n" }, { "change_type": "DELETE", "old_path": "test/packetimpact/tests/tcp_outside_the_window_closing_test.go", "new_path": null, "diff": "-// Copyright 2021 The gVisor Authors.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-package tcp_outside_the_window_closing_test\n-\n-import (\n- \"flag\"\n- \"fmt\"\n- \"testing\"\n- \"time\"\n-\n- \"golang.org/x/sys/unix\"\n- \"gvisor.dev/gvisor/pkg/tcpip/header\"\n- \"gvisor.dev/gvisor/pkg/tcpip/seqnum\"\n- \"gvisor.dev/gvisor/test/packetimpact/testbench\"\n-)\n-\n-func init() {\n- testbench.Initialize(flag.CommandLine)\n-}\n-\n-// TestAckOTWSeqInClosing tests that the DUT should send an ACK with\n-// the right ACK number when receiving a packet with OTW Seq number\n-// in CLOSING state. https://tools.ietf.org/html/rfc793#page-69\n-func TestAckOTWSeqInClosing(t *testing.T) {\n- for seqNumOffset := seqnum.Size(0); seqNumOffset < 3; seqNumOffset++ {\n- for _, tt := range []struct {\n- description string\n- flags header.TCPFlags\n- payloads testbench.Layers\n- }{\n- {\"SYN\", header.TCPFlagSyn, nil},\n- {\"SYNACK\", header.TCPFlagSyn | header.TCPFlagAck, nil},\n- {\"ACK\", header.TCPFlagAck, nil},\n- {\"FINACK\", header.TCPFlagFin | header.TCPFlagAck, nil},\n- {\"Data\", header.TCPFlagAck, []testbench.Layer{&testbench.Payload{Bytes: []byte(\"abc123\")}}},\n- } {\n- t.Run(fmt.Sprintf(\"%s%d\", tt.description, seqNumOffset), func(t *testing.T) {\n- dut := testbench.NewDUT(t)\n- listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)\n- defer dut.Close(t, listenFD)\n- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})\n- defer conn.Close(t)\n- conn.Connect(t)\n- acceptFD, _ := dut.Accept(t, listenFD)\n- defer dut.Close(t, acceptFD)\n-\n- dut.Shutdown(t, acceptFD, unix.SHUT_WR)\n-\n- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {\n- t.Fatalf(\"expected FINACK from DUT, but got none: %s\", err)\n- }\n-\n- // Do not ack the FIN from DUT so that the TCP state on DUT is CLOSING instead of CLOSED.\n- seqNumForTheirFIN := testbench.Uint32(uint32(*conn.RemoteSeqNum(t)) - 1)\n- conn.Send(t, testbench.TCP{AckNum: seqNumForTheirFIN, Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)})\n-\n- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second); err != nil {\n- t.Errorf(\"expected an ACK to our FIN, but got none: %s\", err)\n- }\n-\n- windowSize := seqnum.Size(*conn.SynAck(t).WindowSize) + seqNumOffset\n- conn.SendFrameStateless(t, conn.CreateFrame(t, testbench.Layers{&testbench.TCP{\n- SeqNum: testbench.Uint32(uint32(conn.LocalSeqNum(t).Add(windowSize))),\n- AckNum: seqNumForTheirFIN,\n- Flags: testbench.TCPFlags(tt.flags),\n- }}, tt.payloads...))\n-\n- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second); err != nil {\n- t.Errorf(\"expected an ACK but got none: %s\", err)\n- }\n- })\n- }\n- }\n-}\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/tcp_outside_the_window_test.go", "new_path": "test/packetimpact/tests/tcp_outside_the_window_test.go", "diff": "@@ -108,3 +108,75 @@ func TestTCPOutsideTheWindow(t *testing.T) {\n})\n}\n}\n+\n+// TestAckOTWSeqInClosing tests that the DUT should send an ACK with\n+// the right ACK number when receiving a packet with OTW Seq number\n+// in CLOSING state. https://tools.ietf.org/html/rfc793#page-69\n+func TestAckOTWSeqInClosing(t *testing.T) {\n+ for _, tt := range []struct {\n+ description string\n+ flags header.TCPFlags\n+ payloads testbench.Layers\n+ seqNumOffset seqnum.Size\n+ expectACK bool\n+ }{\n+ {\"SYN\", header.TCPFlagSyn, nil, 0, true},\n+ {\"SYNACK\", header.TCPFlagSyn | header.TCPFlagAck, nil, 0, true},\n+ {\"ACK\", header.TCPFlagAck, nil, 0, false},\n+ {\"FINACK\", header.TCPFlagFin | header.TCPFlagAck, nil, 0, false},\n+ {\"Data\", header.TCPFlagAck, []testbench.Layer{&testbench.Payload{Bytes: []byte(\"Sample Data\")}}, 0, false},\n+\n+ {\"SYN\", header.TCPFlagSyn, nil, 1, true},\n+ {\"SYNACK\", header.TCPFlagSyn | header.TCPFlagAck, nil, 1, true},\n+ {\"ACK\", header.TCPFlagAck, nil, 1, true},\n+ {\"FINACK\", header.TCPFlagFin | header.TCPFlagAck, nil, 1, true},\n+ {\"Data\", header.TCPFlagAck, []testbench.Layer{&testbench.Payload{Bytes: []byte(\"Sample Data\")}}, 1, true},\n+\n+ {\"SYN\", header.TCPFlagSyn, nil, 2, true},\n+ {\"SYNACK\", header.TCPFlagSyn | header.TCPFlagAck, nil, 2, true},\n+ {\"ACK\", header.TCPFlagAck, nil, 2, true},\n+ {\"FINACK\", header.TCPFlagFin | header.TCPFlagAck, nil, 2, true},\n+ {\"Data\", header.TCPFlagAck, []testbench.Layer{&testbench.Payload{Bytes: []byte(\"Sample Data\")}}, 2, true},\n+ } {\n+ t.Run(fmt.Sprintf(\"%s%d\", tt.description, tt.seqNumOffset), func(t *testing.T) {\n+ dut := testbench.NewDUT(t)\n+ listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)\n+ defer dut.Close(t, listenFD)\n+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})\n+ defer conn.Close(t)\n+ conn.Connect(t)\n+ acceptFD, _ := dut.Accept(t, listenFD)\n+ defer dut.Close(t, acceptFD)\n+\n+ dut.Shutdown(t, acceptFD, unix.SHUT_WR)\n+\n+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {\n+ t.Fatalf(\"expected FINACK from DUT, but got none: %s\", err)\n+ }\n+\n+ // Do not ack the FIN from DUT so that the TCP state on DUT is CLOSING instead of CLOSED.\n+ seqNumForTheirFIN := testbench.Uint32(uint32(*conn.RemoteSeqNum(t)) - 1)\n+ conn.Send(t, testbench.TCP{AckNum: seqNumForTheirFIN, Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)})\n+\n+ gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)\n+ if err != nil {\n+ t.Fatalf(\"expected an ACK to our FIN, but got none: %s\", err)\n+ }\n+\n+ windowSize := seqnum.Size(*gotTCP.WindowSize) + tt.seqNumOffset\n+ conn.SendFrameStateless(t, conn.CreateFrame(t, testbench.Layers{&testbench.TCP{\n+ SeqNum: testbench.Uint32(uint32(conn.LocalSeqNum(t).Add(windowSize))),\n+ AckNum: seqNumForTheirFIN,\n+ Flags: testbench.TCPFlags(tt.flags),\n+ }}, tt.payloads...))\n+\n+ gotACK, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)\n+ if tt.expectACK && err != nil {\n+ t.Errorf(\"expected an ACK but got none: %s\", err)\n+ }\n+ if !tt.expectACK && gotACK != nil {\n+ t.Errorf(\"expected no ACK but got one: %s\", gotACK)\n+ }\n+ })\n+ }\n+}\n" }, { "change_type": "DELETE", "old_path": "test/packetimpact/tests/tcp_unacc_seq_ack_closing_test.go", "new_path": null, "diff": "-// Copyright 2021 The gVisor Authors.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-package tcp_unacc_seq_ack_closing_test\n-\n-import (\n- \"flag\"\n- \"fmt\"\n- \"testing\"\n- \"time\"\n-\n- \"golang.org/x/sys/unix\"\n- \"gvisor.dev/gvisor/pkg/tcpip/header\"\n- \"gvisor.dev/gvisor/pkg/tcpip/seqnum\"\n- \"gvisor.dev/gvisor/test/packetimpact/testbench\"\n-)\n-\n-func init() {\n- testbench.Initialize(flag.CommandLine)\n-}\n-\n-func TestSimultaneousCloseUnaccSeqAck(t *testing.T) {\n- for _, tt := range []struct {\n- description string\n- makeTestingTCP func(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset, windowSize seqnum.Size) testbench.TCP\n- seqNumOffset seqnum.Size\n- expectAck bool\n- }{\n- {description: \"OTWSeq\", makeTestingTCP: testbench.GenerateOTWSeqSegment, seqNumOffset: 0, expectAck: true},\n- {description: \"OTWSeq\", makeTestingTCP: testbench.GenerateOTWSeqSegment, seqNumOffset: 1, expectAck: true},\n- {description: \"OTWSeq\", makeTestingTCP: testbench.GenerateOTWSeqSegment, seqNumOffset: 2, expectAck: true},\n- {description: \"UnaccAck\", makeTestingTCP: testbench.GenerateUnaccACKSegment, seqNumOffset: 0, expectAck: false},\n- {description: \"UnaccAck\", makeTestingTCP: testbench.GenerateUnaccACKSegment, seqNumOffset: 1, expectAck: true},\n- {description: \"UnaccAck\", makeTestingTCP: testbench.GenerateUnaccACKSegment, seqNumOffset: 2, expectAck: true},\n- } {\n- t.Run(fmt.Sprintf(\"%s:offset=%d\", tt.description, tt.seqNumOffset), func(t *testing.T) {\n- dut := testbench.NewDUT(t)\n- listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1 /*backlog*/)\n- defer dut.Close(t, listenFD)\n- conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})\n- defer conn.Close(t)\n-\n- conn.Connect(t)\n- acceptFD, _ := dut.Accept(t, listenFD)\n-\n- // Trigger active close.\n- dut.Shutdown(t, acceptFD, unix.SHUT_WR)\n-\n- gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second)\n- if err != nil {\n- t.Fatalf(\"expected a FIN: %s\", err)\n- }\n- // Do not ack the FIN from DUT so that we get to CLOSING.\n- seqNumForTheirFIN := testbench.Uint32(uint32(*conn.RemoteSeqNum(t)) - 1)\n- conn.Send(t, testbench.TCP{AckNum: seqNumForTheirFIN, Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)})\n-\n- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second); err != nil {\n- t.Errorf(\"expected an ACK to our FIN, but got none: %s\", err)\n- }\n-\n- sampleData := []byte(\"Sample Data\")\n- samplePayload := &testbench.Payload{Bytes: sampleData}\n-\n- origSeq := uint32(*conn.LocalSeqNum(t))\n- // Send a segment with OTW Seq / unacc ACK.\n- tcp := tt.makeTestingTCP(t, &conn, tt.seqNumOffset, seqnum.Size(*gotTCP.WindowSize))\n- if tt.description == \"OTWSeq\" {\n- // If we generate an OTW Seq segment, make sure we don't acknowledge their FIN so that\n- // we stay in CLOSING.\n- tcp.AckNum = seqNumForTheirFIN\n- }\n- conn.Send(t, tcp, samplePayload)\n-\n- got, err := conn.Expect(t, testbench.TCP{AckNum: testbench.Uint32(origSeq), Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)\n- if tt.expectAck && err != nil {\n- t.Errorf(\"expected an ack in CLOSING state, but got none: %s\", err)\n- }\n- if !tt.expectAck && got != nil {\n- t.Errorf(\"expected no ack in CLOSING state, but got one: %s\", got)\n- }\n- })\n- }\n-}\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/tcp_unacc_seq_ack_test.go", "new_path": "test/packetimpact/tests/tcp_unacc_seq_ack_test.go", "diff": "@@ -209,3 +209,66 @@ func TestActiveCloseUnaccpSeqAck(t *testing.T) {\n})\n}\n}\n+\n+func TestSimultaneousCloseUnaccSeqAck(t *testing.T) {\n+ for _, tt := range []struct {\n+ description string\n+ makeTestingTCP func(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset, windowSize seqnum.Size) testbench.TCP\n+ seqNumOffset seqnum.Size\n+ expectAck bool\n+ }{\n+ {description: \"OTWSeq\", makeTestingTCP: testbench.GenerateOTWSeqSegment, seqNumOffset: 0, expectAck: false},\n+ {description: \"OTWSeq\", makeTestingTCP: testbench.GenerateOTWSeqSegment, seqNumOffset: 1, expectAck: true},\n+ {description: \"OTWSeq\", makeTestingTCP: testbench.GenerateOTWSeqSegment, seqNumOffset: 2, expectAck: true},\n+ {description: \"UnaccAck\", makeTestingTCP: testbench.GenerateUnaccACKSegment, seqNumOffset: 0, expectAck: false},\n+ {description: \"UnaccAck\", makeTestingTCP: testbench.GenerateUnaccACKSegment, seqNumOffset: 1, expectAck: true},\n+ {description: \"UnaccAck\", makeTestingTCP: testbench.GenerateUnaccACKSegment, seqNumOffset: 2, expectAck: true},\n+ } {\n+ t.Run(fmt.Sprintf(\"%s:offset=%d\", tt.description, tt.seqNumOffset), func(t *testing.T) {\n+ dut := testbench.NewDUT(t)\n+ listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1 /*backlog*/)\n+ defer dut.Close(t, listenFD)\n+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})\n+ defer conn.Close(t)\n+\n+ conn.Connect(t)\n+ acceptFD, _ := dut.Accept(t, listenFD)\n+\n+ // Trigger active close.\n+ dut.Shutdown(t, acceptFD, unix.SHUT_WR)\n+\n+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {\n+ t.Fatalf(\"expected a FIN: %s\", err)\n+ }\n+ // Do not ack the FIN from DUT so that we get to CLOSING.\n+ seqNumForTheirFIN := testbench.Uint32(uint32(*conn.RemoteSeqNum(t)) - 1)\n+ conn.Send(t, testbench.TCP{AckNum: seqNumForTheirFIN, Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)})\n+\n+ gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)\n+ if err != nil {\n+ t.Errorf(\"expected an ACK to our FIN, but got none: %s\", err)\n+ }\n+\n+ sampleData := []byte(\"Sample Data\")\n+ samplePayload := &testbench.Payload{Bytes: sampleData}\n+\n+ origSeq := uint32(*conn.LocalSeqNum(t))\n+ // Send a segment with OTW Seq / unacc ACK.\n+ tcp := tt.makeTestingTCP(t, &conn, tt.seqNumOffset, seqnum.Size(*gotTCP.WindowSize))\n+ if tt.description == \"OTWSeq\" {\n+ // If we generate an OTW Seq segment, make sure we don't acknowledge their FIN so that\n+ // we stay in CLOSING.\n+ tcp.AckNum = seqNumForTheirFIN\n+ }\n+ conn.Send(t, tcp, samplePayload)\n+\n+ got, err := conn.Expect(t, testbench.TCP{AckNum: testbench.Uint32(origSeq), Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)\n+ if tt.expectAck && err != nil {\n+ t.Errorf(\"expected an ack in CLOSING state, but got none: %s\", err)\n+ }\n+ if !tt.expectAck && got != nil {\n+ t.Errorf(\"expected no ack in CLOSING state, but got one: %s\", got)\n+ }\n+ })\n+ }\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Fix and merge tcp_{outside_the_window,tcp_unacc_seq_ack}_closing The tests were not using the correct windowSize so the testing segments were actually within the window for seqNumOffset=0 tests. The issue is already fixed by #5674. PiperOrigin-RevId: 364252630
259,985
22.03.2021 12:00:05
25,200
6bd2c6ce7307a6422e9aad26b8b3dec006f75e2d
Emit comment about build tags in gomarshal generated files. This may be useful for tracking down where build tags come from and understanding tag import issues in generated files.
[ { "change_type": "MODIFY", "old_path": "tools/go_marshal/gomarshal/generator.go", "new_path": "tools/go_marshal/gomarshal/generator.go", "diff": "@@ -126,6 +126,12 @@ func (g *Generator) writeHeader() error {\nb.emit(\"// Automatically generated marshal implementation. See tools/go_marshal.\\n\\n\")\n// Emit build tags.\n+ b.emit(\"// If there are issues with build tag aggregation, see\\n\")\n+ b.emit(\"// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here\\n\")\n+ b.emit(\"// come from the input set of files used to generate this file. This input set\\n\")\n+ b.emit(\"// is filtered based on pre-defined file suffixes related to build tags, see \\n\")\n+ b.emit(\"// tools/defs.bzl:calculate_sets().\\n\\n\")\n+\nif t := tags.Aggregate(g.inputs); len(t) > 0 {\nb.emit(strings.Join(t.Lines(), \"\\n\"))\nb.emit(\"\\n\\n\")\n" } ]
Go
Apache License 2.0
google/gvisor
Emit comment about build tags in gomarshal generated files. This may be useful for tracking down where build tags come from and understanding tag import issues in generated files. PiperOrigin-RevId: 364374931
260,004
22.03.2021 12:30:03
25,200
a073d76979d1950a52462823c10b495f4f8c3728
Return tcpip.Error from (*Stack).GetMainNICAddress
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv4/ipv4.go", "new_path": "pkg/tcpip/network/ipv4/ipv4.go", "diff": "@@ -1619,9 +1619,8 @@ func (e *endpoint) processIPOptions(pkt *stack.PacketBuffer, orig header.IPv4Opt\n// TODO(https://gvisor.dev/issue/4586): This will need tweaking when we start\n// really forwarding packets as we may need to get two addresses, for rx and\n// tx interfaces. We will also have to take usage into account.\n- prefixedAddress, ok := e.protocol.stack.GetMainNICAddress(e.nic.ID(), ProtocolNumber)\n- localAddress := prefixedAddress.Address\n- if !ok {\n+ localAddress := e.MainAddress().Address\n+ if len(localAddress) == 0 {\nh := header.IPv4(pkt.NetworkHeader().View())\ndstAddr := h.DestinationAddress()\nif pkt.NetworkPacketInfo.LocalAddressBroadcast || header.IsV4MulticastAddress(dstAddr) {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/ipv6_test.go", "new_path": "pkg/tcpip/network/ipv6/ipv6_test.go", "diff": "@@ -343,6 +343,8 @@ func TestReceiveOnSolicitedNodeAddr(t *testing.T) {\n// TestAddIpv6Address tests adding IPv6 addresses.\nfunc TestAddIpv6Address(t *testing.T) {\n+ const nicID = 1\n+\ntests := []struct {\nname string\naddr tcpip.Address\n@@ -367,18 +369,18 @@ func TestAddIpv6Address(t *testing.T) {\ns := stack.New(stack.Options{\nNetworkProtocols: []stack.NetworkProtocolFactory{NewProtocol},\n})\n- if err := s.CreateNIC(1, &stubLinkEndpoint{}); err != nil {\n- t.Fatalf(\"CreateNIC(_) = %s\", err)\n+ if err := s.CreateNIC(nicID, &stubLinkEndpoint{}); err != nil {\n+ t.Fatalf(\"CreateNIC(%d, _) = %s\", nicID, err)\n}\n- if err := s.AddAddress(1, ProtocolNumber, test.addr); err != nil {\n- t.Fatalf(\"AddAddress(_, %d, nil) = %s\", ProtocolNumber, err)\n+ if err := s.AddAddress(nicID, ProtocolNumber, test.addr); err != nil {\n+ t.Fatalf(\"AddAddress(%d, %d, nil) = %s\", nicID, ProtocolNumber, err)\n}\n- if addr, ok := s.GetMainNICAddress(1, header.IPv6ProtocolNumber); !ok {\n- t.Fatalf(\"got stack.GetMainNICAddress(1, %d) = (_, false), want = (_, true)\", header.IPv6ProtocolNumber)\n+ if addr, err := s.GetMainNICAddress(nicID, ProtocolNumber); err != nil {\n+ t.Fatalf(\"stack.GetMainNICAddress(%d, %d): %s\", nicID, ProtocolNumber, err)\n} else if addr.Address != test.addr {\n- t.Fatalf(\"got stack.GetMainNICAddress(1_, %d) = (%s, true), want = (%s, true)\", header.IPv6ProtocolNumber, addr.Address, test.addr)\n+ t.Fatalf(\"got stack.GetMainNICAddress(%d, %d) = %s, want = %s\", nicID, ProtocolNumber, addr.Address, test.addr)\n}\n})\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/stack.go", "new_path": "pkg/tcpip/stack/stack.go", "diff": "@@ -1223,21 +1223,19 @@ func (s *Stack) AllAddresses() map[tcpip.NICID][]tcpip.ProtocolAddress {\n}\n// GetMainNICAddress returns the first non-deprecated primary address and prefix\n-// for the given NIC and protocol. If no non-deprecated primary address exists,\n-// a deprecated primary address and prefix will be returned. Returns false if\n-// the NIC doesn't exist and an empty value if the NIC doesn't have a primary\n-// address for the given protocol.\n-func (s *Stack) GetMainNICAddress(id tcpip.NICID, protocol tcpip.NetworkProtocolNumber) (tcpip.AddressWithPrefix, bool) {\n+// for the given NIC and protocol. If no non-deprecated primary addresses exist,\n+// a deprecated address will be returned. If no deprecated addresses exist, the\n+// zero value will be returned.\n+func (s *Stack) GetMainNICAddress(id tcpip.NICID, protocol tcpip.NetworkProtocolNumber) (tcpip.AddressWithPrefix, tcpip.Error) {\ns.mu.RLock()\ndefer s.mu.RUnlock()\nnic, ok := s.nics[id]\nif !ok {\n- return tcpip.AddressWithPrefix{}, false\n+ return tcpip.AddressWithPrefix{}, &tcpip.ErrUnknownNICID{}\n}\n- addr, err := nic.PrimaryAddress(protocol)\n- return addr, err == nil\n+ return nic.PrimaryAddress(protocol)\n}\nfunc (s *Stack) getAddressEP(nic *nic, localAddr, remoteAddr tcpip.Address, netProto tcpip.NetworkProtocolNumber) AssignableAddressEndpoint {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/stack_test.go", "new_path": "pkg/tcpip/stack/stack_test.go", "diff": "@@ -62,10 +62,10 @@ const (\n)\nfunc checkGetMainNICAddress(s *stack.Stack, nicID tcpip.NICID, proto tcpip.NetworkProtocolNumber, want tcpip.AddressWithPrefix) error {\n- if addr, ok := s.GetMainNICAddress(nicID, proto); !ok {\n- return fmt.Errorf(\"got stack.GetMainNICAddress(%d, %d) = (_, false), want = (_, true)\", nicID, proto)\n+ if addr, err := s.GetMainNICAddress(nicID, proto); err != nil {\n+ return fmt.Errorf(\"stack.GetMainNICAddress(%d, %d): %s\", nicID, proto, err)\n} else if addr != want {\n- return fmt.Errorf(\"got stack.GetMainNICAddress(%d, %d) = (%s, true), want = (%s, true)\", nicID, proto, addr, want)\n+ return fmt.Errorf(\"got stack.GetMainNICAddress(%d, %d) = %s, want = %s\", nicID, proto, addr, want)\n}\nreturn nil\n}\n@@ -1854,6 +1854,8 @@ func TestNetworkOption(t *testing.T) {\n}\nfunc TestGetMainNICAddressAddPrimaryNonPrimary(t *testing.T) {\n+ const nicID = 1\n+\nfor _, addrLen := range []int{4, 16} {\nt.Run(fmt.Sprintf(\"addrLen=%d\", addrLen), func(t *testing.T) {\nfor canBe := 0; canBe < 3; canBe++ {\n@@ -1864,8 +1866,8 @@ func TestGetMainNICAddressAddPrimaryNonPrimary(t *testing.T) {\nNetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},\n})\nep := channel.New(10, defaultMTU, \"\")\n- if err := s.CreateNIC(1, ep); err != nil {\n- t.Fatal(\"CreateNIC failed:\", err)\n+ if err := s.CreateNIC(nicID, ep); err != nil {\n+ t.Fatalf(\"CreateNIC(%d, _): %s\", nicID, err)\n}\n// Insert <canBe> primary and <never> never-primary addresses.\n// Each one will add a network endpoint to the NIC.\n@@ -1888,34 +1890,34 @@ func TestGetMainNICAddressAddPrimaryNonPrimary(t *testing.T) {\nPrefixLen: addrLen * 8,\n},\n}\n- if err := s.AddProtocolAddressWithOptions(1, protocolAddress, behavior); err != nil {\n- t.Fatal(\"AddProtocolAddressWithOptions failed:\", err)\n+ if err := s.AddProtocolAddressWithOptions(nicID, protocolAddress, behavior); err != nil {\n+ t.Fatalf(\"AddProtocolAddressWithOptions(%d, %#v, %d): %s\", nicID, protocolAddress, behavior, err)\n}\n// Remember the address/prefix.\nprimaryAddrAdded[protocolAddress.AddressWithPrefix] = struct{}{}\n} else {\n- if err := s.AddAddressWithOptions(1, fakeNetNumber, address, behavior); err != nil {\n- t.Fatal(\"AddAddressWithOptions failed:\", err)\n+ if err := s.AddAddressWithOptions(nicID, fakeNetNumber, address, behavior); err != nil {\n+ t.Fatalf(\"AddAddressWithOptions(%d, %d, %s, %d): %s:\", nicID, fakeNetNumber, address, behavior, err)\n}\n}\n}\n// Check that GetMainNICAddress returns an address if at least\n// one primary address was added. In that case make sure the\n// address/prefixLen matches what we added.\n- gotAddr, ok := s.GetMainNICAddress(1, fakeNetNumber)\n- if !ok {\n- t.Fatalf(\"got GetMainNICAddress(1, %d) = (_, false), want = (_, true)\", fakeNetNumber)\n+ gotAddr, err := s.GetMainNICAddress(nicID, fakeNetNumber)\n+ if err != nil {\n+ t.Fatalf(\"GetMainNICAddress(%d, %d): %s\", nicID, fakeNetNumber, err)\n}\nif len(primaryAddrAdded) == 0 {\n// No primary addresses present.\nif wantAddr := (tcpip.AddressWithPrefix{}); gotAddr != wantAddr {\n- t.Fatalf(\"got GetMainNICAddress(1, %d) = (%s, true), want = (%s, true)\", fakeNetNumber, gotAddr, wantAddr)\n+ t.Fatalf(\"got GetMainNICAddress(%d, %d) = %s, want = %s\", nicID, fakeNetNumber, gotAddr, wantAddr)\n}\n} else {\n// At least one primary address was added, verify the returned\n// address is in the list of primary addresses we added.\nif _, ok := primaryAddrAdded[gotAddr]; !ok {\n- t.Fatalf(\"got GetMainNICAddress(1, %d) = (%s, true), want = (%s, true)\", fakeNetNumber, gotAddr, primaryAddrAdded)\n+ t.Fatalf(\"got GetMainNICAddress(%d, %d) = %s, want = %s\", nicID, fakeNetNumber, gotAddr, primaryAddrAdded)\n}\n}\n})\n@@ -1937,25 +1939,31 @@ func TestGetMainNICAddressErrors(t *testing.T) {\n}\n// Sanity check with a successful call.\n- if addr, ok := s.GetMainNICAddress(nicID, ipv4.ProtocolNumber); !ok {\n- t.Errorf(\"got s.GetMainNICAddress(%d, %d) = (%s, false), want = (_, true)\", nicID, ipv4.ProtocolNumber, addr)\n+ if addr, err := s.GetMainNICAddress(nicID, ipv4.ProtocolNumber); err != nil {\n+ t.Errorf(\"s.GetMainNICAddress(%d, %d): %s\", nicID, ipv4.ProtocolNumber, err)\n} else if want := (tcpip.AddressWithPrefix{}); addr != want {\n- t.Errorf(\"got s.GetMainNICAddress(%d, %d) = (%s, _), want = (%s, _)\", nicID, ipv4.ProtocolNumber, addr, want)\n+ t.Errorf(\"got s.GetMainNICAddress(%d, %d) = %s, want = %s\", nicID, ipv4.ProtocolNumber, addr, want)\n}\nconst unknownNICID = nicID + 1\n- if addr, ok := s.GetMainNICAddress(unknownNICID, ipv4.ProtocolNumber); ok {\n- t.Errorf(\"got s.GetMainNICAddress(%d, %d) = (%s, true), want = (_, false)\", unknownNICID, ipv4.ProtocolNumber, addr)\n+ switch addr, err := s.GetMainNICAddress(unknownNICID, ipv4.ProtocolNumber); err.(type) {\n+ case *tcpip.ErrUnknownNICID:\n+ default:\n+ t.Errorf(\"got s.GetMainNICAddress(%d, %d) = (%s, %T), want = (_, tcpip.ErrUnknownNICID)\", unknownNICID, ipv4.ProtocolNumber, addr, err)\n}\n// ARP is not an addressable network endpoint.\n- if addr, ok := s.GetMainNICAddress(nicID, arp.ProtocolNumber); ok {\n- t.Errorf(\"got s.GetMainNICAddress(%d, %d) = (%s, true), want = (_, false)\", nicID, arp.ProtocolNumber, addr)\n+ switch addr, err := s.GetMainNICAddress(nicID, arp.ProtocolNumber); err.(type) {\n+ case *tcpip.ErrNotSupported:\n+ default:\n+ t.Errorf(\"got s.GetMainNICAddress(%d, %d) = (%s, %T), want = (_, tcpip.ErrNotSupported)\", nicID, arp.ProtocolNumber, addr, err)\n}\nconst unknownProtocolNumber = 1234\n- if addr, ok := s.GetMainNICAddress(nicID, unknownProtocolNumber); ok {\n- t.Errorf(\"got s.GetMainNICAddress(%d, %d) = (%s, true), want = (_, false)\", nicID, unknownProtocolNumber, addr)\n+ switch addr, err := s.GetMainNICAddress(nicID, unknownProtocolNumber); err.(type) {\n+ case *tcpip.ErrUnknownProtocol:\n+ default:\n+ t.Errorf(\"got s.GetMainNICAddress(%d, %d) = (%s, %T), want = (_, tcpip.ErrUnknownProtocol)\", nicID, unknownProtocolNumber, addr, err)\n}\n}\n@@ -2654,6 +2662,8 @@ func TestNICAutoGenAddrDoesDAD(t *testing.T) {\n// TestNewPEB tests that a new PrimaryEndpointBehavior value (peb) is respected\n// when an address's kind gets \"promoted\" to permanent from permanentExpired.\nfunc TestNewPEBOnPromotionToPermanent(t *testing.T) {\n+ const nicID = 1\n+\npebs := []stack.PrimaryEndpointBehavior{\nstack.NeverPrimaryEndpoint,\nstack.CanBePrimaryEndpoint,\n@@ -2667,8 +2677,8 @@ func TestNewPEBOnPromotionToPermanent(t *testing.T) {\nNetworkProtocols: []stack.NetworkProtocolFactory{fakeNetFactory},\n})\nep1 := channel.New(10, defaultMTU, \"\")\n- if err := s.CreateNIC(1, ep1); err != nil {\n- t.Fatal(\"CreateNIC failed:\", err)\n+ if err := s.CreateNIC(nicID, ep1); err != nil {\n+ t.Fatalf(\"CreateNIC(%d, _): %s\", nicID, err)\n}\n// Add a permanent address with initial\n@@ -2676,20 +2686,21 @@ func TestNewPEBOnPromotionToPermanent(t *testing.T) {\n// NeverPrimaryEndpoint, the address should not\n// be returned by a call to GetMainNICAddress;\n// else, it should.\n- if err := s.AddAddressWithOptions(1, fakeNetNumber, \"\\x01\", pi); err != nil {\n- t.Fatal(\"AddAddressWithOptions failed:\", err)\n+ const address1 = tcpip.Address(\"\\x01\")\n+ if err := s.AddAddressWithOptions(nicID, fakeNetNumber, address1, pi); err != nil {\n+ t.Fatalf(\"AddAddressWithOptions(%d, %d, %s, %d): %s\", nicID, fakeNetNumber, address1, pi, err)\n}\n- addr, ok := s.GetMainNICAddress(1, fakeNetNumber)\n- if !ok {\n- t.Fatalf(\"GetMainNICAddress(1, %d) = (_, false), want = (_, true)\", fakeNetNumber)\n+ addr, err := s.GetMainNICAddress(nicID, fakeNetNumber)\n+ if err != nil {\n+ t.Fatalf(\"GetMainNICAddress(%d, %d): %s\", nicID, fakeNetNumber, err)\n}\nif pi == stack.NeverPrimaryEndpoint {\nif want := (tcpip.AddressWithPrefix{}); addr != want {\n- t.Fatalf(\"got GetMainNICAddress(1, %d) = (%s, true), want = (%s, true)\", fakeNetNumber, addr, want)\n+ t.Fatalf(\"got GetMainNICAddress(%d, %d) = %s, want = %s\", nicID, fakeNetNumber, addr, want)\n}\n- } else if addr.Address != \"\\x01\" {\n- t.Fatalf(\"got GetMainNICAddress(1, %d) = (%s, true), want = (1, true)\", fakeNetNumber, addr.Address)\n+ } else if addr.Address != address1 {\n+ t.Fatalf(\"got GetMainNICAddress(%d, %d) = %s, want = %s\", nicID, fakeNetNumber, addr.Address, address1)\n}\n{\n@@ -2707,13 +2718,14 @@ func TestNewPEBOnPromotionToPermanent(t *testing.T) {\n// new peb is respected when an address gets\n// \"promoted\" to permanent from a\n// permanentExpired kind.\n- r, err := s.FindRoute(1, \"\\x01\", \"\\x02\", fakeNetNumber, false)\n+ const address2 = tcpip.Address(\"\\x02\")\n+ r, err := s.FindRoute(nicID, address1, address2, fakeNetNumber, false)\nif err != nil {\n- t.Fatalf(\"FindRoute failed: %v\", err)\n+ t.Fatalf(\"FindRoute(%d, %s, %s, %d, false): %s\", nicID, address1, address2, fakeNetNumber, err)\n}\ndefer r.Release()\n- if err := s.RemoveAddress(1, \"\\x01\"); err != nil {\n- t.Fatalf(\"RemoveAddress failed: %v\", err)\n+ if err := s.RemoveAddress(nicID, address1); err != nil {\n+ t.Fatalf(\"RemoveAddress(%d, %s): %s\", nicID, address1, err)\n}\n//\n@@ -2724,19 +2736,20 @@ func TestNewPEBOnPromotionToPermanent(t *testing.T) {\n// Add some other address with peb set to\n// FirstPrimaryEndpoint.\n- if err := s.AddAddressWithOptions(1, fakeNetNumber, \"\\x03\", stack.FirstPrimaryEndpoint); err != nil {\n- t.Fatalf(\"AddAddressWithOptions failed: %v\", err)\n+ const address3 = tcpip.Address(\"\\x03\")\n+ if err := s.AddAddressWithOptions(nicID, fakeNetNumber, address3, stack.FirstPrimaryEndpoint); err != nil {\n+ t.Fatalf(\"AddAddressWithOptions(%d, %d, %s, %d): %s\", nicID, fakeNetNumber, address3, stack.FirstPrimaryEndpoint, err)\n}\n// Add back the address we removed earlier and\n// make sure the new peb was respected.\n// (The address should just be promoted now).\n- if err := s.AddAddressWithOptions(1, fakeNetNumber, \"\\x01\", ps); err != nil {\n- t.Fatalf(\"AddAddressWithOptions failed: %v\", err)\n+ if err := s.AddAddressWithOptions(nicID, fakeNetNumber, address1, ps); err != nil {\n+ t.Fatalf(\"AddAddressWithOptions(%d, %d, %s, %d): %s\", nicID, fakeNetNumber, address1, pi, err)\n}\nvar primaryAddrs []tcpip.Address\n- for _, pa := range s.NICInfo()[1].ProtocolAddresses {\n+ for _, pa := range s.NICInfo()[nicID].ProtocolAddresses {\nprimaryAddrs = append(primaryAddrs, pa.AddressWithPrefix.Address)\n}\nvar expectedList []tcpip.Address\n@@ -2765,20 +2778,20 @@ func TestNewPEBOnPromotionToPermanent(t *testing.T) {\n// should be returned by a call to\n// GetMainNICAddress; else, our original address\n// should be returned.\n- if err := s.RemoveAddress(1, \"\\x03\"); err != nil {\n- t.Fatalf(\"RemoveAddress failed: %v\", err)\n+ if err := s.RemoveAddress(nicID, address3); err != nil {\n+ t.Fatalf(\"RemoveAddress(%d, %s): %s\", nicID, address3, err)\n}\n- addr, ok = s.GetMainNICAddress(1, fakeNetNumber)\n- if !ok {\n- t.Fatalf(\"got GetMainNICAddress(1, %d) = (_, false), want = (_, true)\", fakeNetNumber)\n+ addr, err = s.GetMainNICAddress(nicID, fakeNetNumber)\n+ if err != nil {\n+ t.Fatalf(\"GetMainNICAddress(%d, %d): %s\", nicID, fakeNetNumber, err)\n}\nif ps == stack.NeverPrimaryEndpoint {\nif want := (tcpip.AddressWithPrefix{}); addr != want {\n- t.Fatalf(\"got GetMainNICAddress(1, %d) = (%s, true), want = (%s, true)\", fakeNetNumber, addr, want)\n+ t.Fatalf(\"got GetMainNICAddress(%d, %d) = %s, want = %s\", nicID, fakeNetNumber, addr, want)\n}\n} else {\n- if addr.Address != \"\\x01\" {\n- t.Fatalf(\"got GetMainNICAddress(1, %d) = (%s, true), want = (1, true)\", fakeNetNumber, addr.Address)\n+ if addr.Address != address1 {\n+ t.Fatalf(\"got GetMainNICAddress(%d, %d) = %s, want = %s\", nicID, fakeNetNumber, addr.Address, address1)\n}\n}\n})\n" } ]
Go
Apache License 2.0
google/gvisor
Return tcpip.Error from (*Stack).GetMainNICAddress PiperOrigin-RevId: 364381970
259,898
22.03.2021 14:07:55
25,200
9e86dfc9c5b56eaa91485826bcf3f1f7617d2eb0
Fix logs for packetimpact tests cleanup Don't cleanup containers in Network.Cleanup, otherwise containers will be killed and removed several times. Don't set AutoRemove for containers. This will prevent the confusing 'removal already in progress' messages. Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/test/dockerutil/network.go", "new_path": "pkg/test/dockerutil/network.go", "diff": "@@ -102,11 +102,8 @@ func (n *Network) Inspect(ctx context.Context) (types.NetworkResource, error) {\nreturn n.client.NetworkInspect(ctx, n.id, types.NetworkInspectOptions{Verbose: true})\n}\n-// Cleanup cleans up the docker network and all the containers attached to it.\n+// Cleanup cleans up the docker network.\nfunc (n *Network) Cleanup(ctx context.Context) error {\n- for _, c := range n.containers {\n- c.CleanUp(ctx)\n- }\nn.containers = nil\nreturn n.client.NetworkRemove(ctx, n.id)\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/runner/dut.go", "new_path": "test/packetimpact/runner/dut.go", "diff": "@@ -137,7 +137,7 @@ func setUpDUT(ctx context.Context, t *testing.T, id int, mkDevice func(*dockerut\ndn := dn\nt.Cleanup(func() {\nif err := dn.Cleanup(ctx); err != nil {\n- t.Errorf(\"unable to cleanup container %s: %s\", dn.Name, err)\n+ t.Errorf(\"failed to cleanup network %s: %s\", dn.Name, err)\n}\n})\n// Sanity check.\n@@ -151,13 +151,15 @@ func setUpDUT(ctx context.Context, t *testing.T, id int, mkDevice func(*dockerut\ninfo.testNet = testNet\n// Create the Docker container for the DUT.\n- var dut DUT\n+ makeContainer := dockerutil.MakeContainer\nif native {\n- dut = mkDevice(dockerutil.MakeNativeContainer(ctx, logger(fmt.Sprintf(\"dut-%d\", id))))\n- } else {\n- dut = mkDevice(dockerutil.MakeContainer(ctx, logger(fmt.Sprintf(\"dut-%d\", id))))\n+ makeContainer = dockerutil.MakeNativeContainer\n}\n- info.dut = dut\n+ dutContainer := makeContainer(ctx, logger(fmt.Sprintf(\"dut-%d\", id)))\n+ t.Cleanup(func() {\n+ dutContainer.CleanUp(ctx)\n+ })\n+ info.dut = mkDevice(dutContainer)\nrunOpts := dockerutil.RunOpts{\nImage: \"packetimpact\",\n@@ -168,7 +170,7 @@ func setUpDUT(ctx context.Context, t *testing.T, id int, mkDevice func(*dockerut\n}\nipv4PrefixLength, _ := testNet.Subnet.Mask.Size()\n- remoteIPv6, remoteMAC, dutDeviceID, dutTestNetDev, err := dut.Prepare(ctx, t, runOpts, ctrlNet, testNet)\n+ remoteIPv6, remoteMAC, dutDeviceID, dutTestNetDev, err := info.dut.Prepare(ctx, t, runOpts, ctrlNet, testNet)\nif err != nil {\nreturn dutInfo{}, err\n}\n@@ -183,7 +185,7 @@ func setUpDUT(ctx context.Context, t *testing.T, id int, mkDevice func(*dockerut\nPOSIXServerIP: AddressInSubnet(DUTAddr, *ctrlNet.Subnet),\nPOSIXServerPort: CtrlPort,\n}\n- info.uname, err = dut.Uname(ctx)\n+ info.uname, err = info.dut.Uname(ctx)\nif err != nil {\nreturn dutInfo{}, fmt.Errorf(\"failed to get uname information on DUT: %w\", err)\n}\n@@ -231,6 +233,9 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co\n// Create the Docker container for the testbench.\ntestbenchContainer := dockerutil.MakeNativeContainer(ctx, logger(\"testbench\"))\n+ t.Cleanup(func() {\n+ testbenchContainer.CleanUp(ctx)\n+ })\nrunOpts := dockerutil.RunOpts{\nImage: \"packetimpact\",\n@@ -598,7 +603,6 @@ func createDockerNetwork(ctx context.Context, n *dockerutil.Network) error {\nfunc StartContainer(ctx context.Context, runOpts dockerutil.RunOpts, c *dockerutil.Container, containerAddr net.IP, ns []*dockerutil.Network, sysctls map[string]string, cmd ...string) error {\nconf, hostconf, netconf := c.ConfigsFrom(runOpts, cmd...)\n_ = netconf\n- hostconf.AutoRemove = true\nhostconf.Sysctls = map[string]string{\"net.ipv6.conf.all.disable_ipv6\": \"0\"}\nfor k, v := range sysctls {\nhostconf.Sysctls[k] = v\n" } ]
Go
Apache License 2.0
google/gvisor
Fix logs for packetimpact tests cleanup - Don't cleanup containers in Network.Cleanup, otherwise containers will be killed and removed several times. - Don't set AutoRemove for containers. This will prevent the confusing 'removal already in progress' messages. Fixes #3795 PiperOrigin-RevId: 364404414
259,858
22.03.2021 23:14:49
25,200
7dbd6924a3f428d9b8698a5a7bf2707539722b6f
Update apt repository to limit to supported architectures. Fixes
[ { "change_type": "MODIFY", "old_path": "g3doc/user_guide/install.md", "new_path": "g3doc/user_guide/install.md", "diff": "@@ -59,7 +59,7 @@ Next, the configure the key used to sign archives and the repository:\n```bash\ncurl -fsSL https://gvisor.dev/archive.key | sudo apt-key add -\n-sudo add-apt-repository \"deb https://storage.googleapis.com/gvisor/releases release main\"\n+sudo add-apt-repository \"deb [arch=amd64,arm64] https://storage.googleapis.com/gvisor/releases release main\"\n```\nNow the runsc package can be installed:\n@@ -96,7 +96,7 @@ You can use this link with the steps described in\nFor `apt` installation, use the `master` to configure the repository:\n```bash\n-sudo add-apt-repository \"deb https://storage.googleapis.com/gvisor/releases master main\"\n+sudo add-apt-repository \"deb [arch=amd64,arm64] https://storage.googleapis.com/gvisor/releases master main\"\n```\n### Nightly\n@@ -118,7 +118,7 @@ Note that a release may not be available for every day.\nFor `apt` installation, use the `nightly` to configure the repository:\n```bash\n-sudo add-apt-repository \"deb https://storage.googleapis.com/gvisor/releases nightly main\"\n+sudo add-apt-repository \"deb [arch=amd64,arm64] https://storage.googleapis.com/gvisor/releases nightly main\"\n```\n### Latest release\n@@ -133,7 +133,7 @@ You can use this link with the steps described in\nFor `apt` installation, use the `release` to configure the repository:\n```bash\n-sudo add-apt-repository \"deb https://storage.googleapis.com/gvisor/releases release main\"\n+sudo add-apt-repository \"deb [arch=amd64,arm64] https://storage.googleapis.com/gvisor/releases release main\"\n```\n### Specific release\n@@ -152,7 +152,7 @@ For `apt` installation of a specific release, which may include point updates,\nuse the date of the release for repository, e.g. `${yyyymmdd}`.\n```bash\n-sudo add-apt-repository \"deb https://storage.googleapis.com/gvisor/releases yyyymmdd main\"\n+sudo add-apt-repository \"deb [arch=amd64,arm64] https://storage.googleapis.com/gvisor/releases yyyymmdd main\"\n```\n> Note: only newer releases may be available as `apt` repositories.\n" } ]
Go
Apache License 2.0
google/gvisor
Update apt repository to limit to supported architectures. Fixes #5703 PiperOrigin-RevId: 364492235
260,001
23.03.2021 11:04:08
25,200
beb11cec7669d029172751e5b4dfe21c0672a25a
Allow FSETXATTR/FGETXATTR host calls for Verity These host calls are needed for Verity fs to generate/verify hashes.
[ { "change_type": "MODIFY", "old_path": "runsc/cmd/gofer.go", "new_path": "runsc/cmd/gofer.go", "diff": "@@ -166,7 +166,7 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nats := make([]p9.Attacher, 0, len(spec.Mounts)+1)\nap, err := fsgofer.NewAttachPoint(\"/\", fsgofer.Config{\nROMount: spec.Root.Readonly || conf.Overlay,\n- EnableXattr: conf.Verity,\n+ EnableVerityXattr: conf.Verity,\n})\nif err != nil {\nFatalf(\"creating attach point: %v\", err)\n@@ -180,7 +180,7 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\ncfg := fsgofer.Config{\nROMount: isReadonlyMount(m.Options) || conf.Overlay,\nHostUDS: conf.FSGoferHostUDS,\n- EnableXattr: conf.Verity,\n+ EnableVerityXattr: conf.Verity,\n}\nap, err := fsgofer.NewAttachPoint(m.Destination, cfg)\nif err != nil {\n@@ -203,6 +203,10 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nfilter.InstallUDSFilters()\n}\n+ if conf.Verity {\n+ filter.InstallXattrFilters()\n+ }\n+\nif err := filter.Install(); err != nil {\nFatalf(\"installing seccomp filters: %v\", err)\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/filter/config.go", "new_path": "runsc/fsgofer/filter/config.go", "diff": "@@ -247,3 +247,8 @@ var udsSyscalls = seccomp.SyscallRules{\n},\n},\n}\n+\n+var xattrSyscalls = seccomp.SyscallRules{\n+ unix.SYS_FGETXATTR: {},\n+ unix.SYS_FSETXATTR: {},\n+}\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/filter/filter.go", "new_path": "runsc/fsgofer/filter/filter.go", "diff": "@@ -36,3 +36,9 @@ func InstallUDSFilters() {\n// Add additional filters required for connecting to the host's sockets.\nallowedSyscalls.Merge(udsSyscalls)\n}\n+\n+// InstallXattrFilters extends the allowed syscalls to include xattr calls that\n+// are necessary for Verity enabled file systems.\n+func InstallXattrFilters() {\n+ allowedSyscalls.Merge(xattrSyscalls)\n+}\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/fsgofer.go", "new_path": "runsc/fsgofer/fsgofer.go", "diff": "@@ -48,6 +48,14 @@ const (\nallowedOpenFlags = unix.O_TRUNC\n)\n+// verityXattrs are the extended attributes used by verity file system.\n+var verityXattrs = map[string]struct{}{\n+ \"user.merkle.offset\": struct{}{},\n+ \"user.merkle.size\": struct{}{},\n+ \"user.merkle.childrenOffset\": struct{}{},\n+ \"user.merkle.childrenSize\": struct{}{},\n+}\n+\n// join is equivalent to path.Join() but skips path.Clean() which is expensive.\nfunc join(parent, child string) string {\nif child == \".\" || child == \"..\" {\n@@ -67,8 +75,9 @@ type Config struct {\n// HostUDS signals whether the gofer can mount a host's UDS.\nHostUDS bool\n- // enableXattr allows Get/SetXattr for the mounted file systems.\n- EnableXattr bool\n+ // EnableVerityXattr allows access to extended attributes used by the\n+ // verity file system.\n+ EnableVerityXattr bool\n}\ntype attachPoint struct {\n@@ -799,7 +808,10 @@ func (l *localFile) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error {\n}\nfunc (l *localFile) GetXattr(name string, size uint64) (string, error) {\n- if !l.attachPoint.conf.EnableXattr {\n+ if !l.attachPoint.conf.EnableVerityXattr {\n+ return \"\", unix.EOPNOTSUPP\n+ }\n+ if _, ok := verityXattrs[name]; !ok {\nreturn \"\", unix.EOPNOTSUPP\n}\nbuffer := make([]byte, size)\n@@ -810,7 +822,10 @@ func (l *localFile) GetXattr(name string, size uint64) (string, error) {\n}\nfunc (l *localFile) SetXattr(name string, value string, flags uint32) error {\n- if !l.attachPoint.conf.EnableXattr {\n+ if !l.attachPoint.conf.EnableVerityXattr {\n+ return unix.EOPNOTSUPP\n+ }\n+ if _, ok := verityXattrs[name]; !ok {\nreturn unix.EOPNOTSUPP\n}\nreturn unix.Fsetxattr(l.file.FD(), name, []byte(value), int(flags))\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/fsgofer_test.go", "new_path": "runsc/fsgofer/fsgofer_test.go", "diff": "@@ -579,20 +579,24 @@ func SetGetXattr(l *localFile, name string, value string) error {\nreturn nil\n}\n-func TestSetGetXattr(t *testing.T) {\n- xattrConfs := []Config{{ROMount: false, EnableXattr: false}, {ROMount: false, EnableXattr: true}}\n- runCustom(t, []uint32{unix.S_IFREG}, xattrConfs, func(t *testing.T, s state) {\n- name := \"user.test\"\n+func TestSetGetDisabledXattr(t *testing.T) {\n+ runCustom(t, []uint32{unix.S_IFREG}, rwConfs, func(t *testing.T, s state) {\n+ name := \"user.merkle.offset\"\nvalue := \"tmp\"\nerr := SetGetXattr(s.file, name, value)\n- if s.conf.EnableXattr {\n- if err != nil {\n- t.Fatalf(\"%v: SetGetXattr failed, err: %v\", s, err)\n- }\n- } else {\nif err == nil {\nt.Fatalf(\"%v: SetGetXattr should have failed\", s)\n}\n+ })\n+}\n+\n+func TestSetGetXattr(t *testing.T) {\n+ runCustom(t, []uint32{unix.S_IFREG}, []Config{{ROMount: false, EnableVerityXattr: true}}, func(t *testing.T, s state) {\n+ name := \"user.merkle.offset\"\n+ value := \"tmp\"\n+ err := SetGetXattr(s.file, name, value)\n+ if err != nil {\n+ t.Fatalf(\"%v: SetGetXattr failed, err: %v\", s, err)\n}\n})\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Allow FSETXATTR/FGETXATTR host calls for Verity These host calls are needed for Verity fs to generate/verify hashes. PiperOrigin-RevId: 364598180
259,891
23.03.2021 15:40:17
25,200
92374e51976c8a47e4705943f73cecbc6a27073b
setgid directory support in goferfs Also adds support for clearing the setuid bit when appropriate (writing, truncating, changing size, changing UID, or changing GID). VFS2 only.
[ { "change_type": "MODIFY", "old_path": "pkg/p9/p9.go", "new_path": "pkg/p9/p9.go", "diff": "@@ -151,9 +151,16 @@ const (\n// Sticky is a mode bit indicating sticky directories.\nSticky FileMode = 01000\n+ // SetGID is the set group ID bit.\n+ SetGID FileMode = 02000\n+\n+ // SetUID is the set user ID bit.\n+ SetUID FileMode = 04000\n+\n// permissionsMask is the mask to apply to FileModes for permissions. It\n- // includes rwx bits for user, group and others, and sticky bit.\n- permissionsMask FileMode = 01777\n+ // includes rwx bits for user, group, and others, as well as the sticky\n+ // bit, setuid bit, and setgid bit.\n+ permissionsMask FileMode = 07777\n)\n// QIDType is the most significant byte of the FileMode word, to be used as the\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "new_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "diff": "@@ -783,7 +783,15 @@ func (fs *filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.\nfunc (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MkdirOptions) error {\ncreds := rp.Credentials()\nreturn fs.doCreateAt(ctx, rp, true /* dir */, func(parent *dentry, name string, _ **[]*dentry) error {\n- if _, err := parent.file.mkdir(ctx, name, (p9.FileMode)(opts.Mode), (p9.UID)(creds.EffectiveKUID), (p9.GID)(creds.EffectiveKGID)); err != nil {\n+ // If the parent is a setgid directory, use the parent's GID\n+ // rather than the caller's and enable setgid.\n+ kgid := creds.EffectiveKGID\n+ mode := opts.Mode\n+ if atomic.LoadUint32(&parent.mode)&linux.S_ISGID != 0 {\n+ kgid = auth.KGID(atomic.LoadUint32(&parent.gid))\n+ mode |= linux.S_ISGID\n+ }\n+ if _, err := parent.file.mkdir(ctx, name, p9.FileMode(mode), (p9.UID)(creds.EffectiveKUID), p9.GID(kgid)); err != nil {\nif !opts.ForSyntheticMountpoint || err == syserror.EEXIST {\nreturn err\n}\n@@ -1145,7 +1153,15 @@ func (d *dentry) createAndOpenChildLocked(ctx context.Context, rp *vfs.Resolving\nname := rp.Component()\n// We only want the access mode for creating the file.\ncreateFlags := p9.OpenFlags(opts.Flags) & p9.OpenFlagsModeMask\n- fdobj, openFile, createQID, _, err := dirfile.create(ctx, name, createFlags, (p9.FileMode)(opts.Mode), (p9.UID)(creds.EffectiveKUID), (p9.GID)(creds.EffectiveKGID))\n+\n+ // If the parent is a setgid directory, use the parent's GID rather\n+ // than the caller's.\n+ kgid := creds.EffectiveKGID\n+ if atomic.LoadUint32(&d.mode)&linux.S_ISGID != 0 {\n+ kgid = auth.KGID(atomic.LoadUint32(&d.gid))\n+ }\n+\n+ fdobj, openFile, createQID, _, err := dirfile.create(ctx, name, createFlags, p9.FileMode(opts.Mode), (p9.UID)(creds.EffectiveKUID), p9.GID(kgid))\nif err != nil {\ndirfile.close(ctx)\nreturn nil, err\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/gofer.go", "new_path": "pkg/sentry/fsimpl/gofer/gofer.go", "diff": "@@ -1102,10 +1102,26 @@ func (d *dentry) setStat(ctx context.Context, creds *auth.Credentials, opts *vfs\nd.metadataMu.Lock()\ndefer d.metadataMu.Unlock()\n+\n+ // As with Linux, if the UID, GID, or file size is changing, we have to\n+ // clear permission bits. Note that when set, clearSGID causes\n+ // permissions to be updated, but does not modify stat.Mask, as\n+ // modification would cause an extra inotify flag to be set.\n+ clearSGID := stat.Mask&linux.STATX_UID != 0 && stat.UID != atomic.LoadUint32(&d.uid) ||\n+ stat.Mask&linux.STATX_GID != 0 && stat.GID != atomic.LoadUint32(&d.gid) ||\n+ stat.Mask&linux.STATX_SIZE != 0\n+ if clearSGID {\n+ if stat.Mask&linux.STATX_MODE != 0 {\n+ stat.Mode = uint16(vfs.ClearSUIDAndSGID(uint32(stat.Mode)))\n+ } else {\n+ stat.Mode = uint16(vfs.ClearSUIDAndSGID(atomic.LoadUint32(&d.mode)))\n+ }\n+ }\n+\nif !d.isSynthetic() {\nif stat.Mask != 0 {\nif err := d.file.setAttr(ctx, p9.SetAttrMask{\n- Permissions: stat.Mask&linux.STATX_MODE != 0,\n+ Permissions: stat.Mask&linux.STATX_MODE != 0 || clearSGID,\nUID: stat.Mask&linux.STATX_UID != 0,\nGID: stat.Mask&linux.STATX_GID != 0,\nSize: stat.Mask&linux.STATX_SIZE != 0,\n@@ -1140,7 +1156,7 @@ func (d *dentry) setStat(ctx context.Context, creds *auth.Credentials, opts *vfs\nreturn nil\n}\n}\n- if stat.Mask&linux.STATX_MODE != 0 {\n+ if stat.Mask&linux.STATX_MODE != 0 || clearSGID {\natomic.StoreUint32(&d.mode, d.fileType()|uint32(stat.Mode))\n}\nif stat.Mask&linux.STATX_UID != 0 {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/regular_file.go", "new_path": "pkg/sentry/fsimpl/gofer/regular_file.go", "diff": "@@ -266,6 +266,20 @@ func (fd *regularFileFD) pwrite(ctx context.Context, src usermem.IOSequence, off\nreturn 0, offset, err\n}\n}\n+\n+ // As with Linux, writing clears the setuid and setgid bits.\n+ if n > 0 {\n+ oldMode := atomic.LoadUint32(&d.mode)\n+ // If setuid or setgid were set, update d.mode and propagate\n+ // changes to the host.\n+ if newMode := vfs.ClearSUIDAndSGID(oldMode); newMode != oldMode {\n+ atomic.StoreUint32(&d.mode, newMode)\n+ if err := d.file.setAttr(ctx, p9.SetAttrMask{Permissions: true}, p9.SetAttr{Permissions: p9.FileMode(newMode)}); err != nil {\n+ return 0, offset, err\n+ }\n+ }\n+ }\n+\nreturn n, offset + n, nil\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/cmd/do.go", "new_path": "runsc/cmd/do.go", "diff": "@@ -46,6 +46,7 @@ type Do struct {\ncwd string\nip string\nquiet bool\n+ overlay bool\n}\n// Name implements subcommands.Command.Name.\n@@ -76,6 +77,7 @@ func (c *Do) SetFlags(f *flag.FlagSet) {\nf.StringVar(&c.cwd, \"cwd\", \".\", \"path to the current directory, defaults to the current directory\")\nf.StringVar(&c.ip, \"ip\", \"192.168.10.2\", \"IPv4 address for the sandbox\")\nf.BoolVar(&c.quiet, \"quiet\", false, \"suppress runsc messages to stdout. Application output is still sent to stdout and stderr\")\n+ f.BoolVar(&c.overlay, \"force-overlay\", true, \"use an overlay. WARNING: disabling gives the command write access to the host\")\n}\n// Execute implements subcommands.Command.Execute.\n@@ -100,9 +102,8 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su\nreturn Errorf(\"Error to retrieve hostname: %v\", err)\n}\n- // Map the entire host file system, but make it readonly with a writable\n- // overlay on top (ignore --overlay option).\n- conf.Overlay = true\n+ // Map the entire host file system, optionally using an overlay.\n+ conf.Overlay = c.overlay\nabsRoot, err := resolvePath(c.root)\nif err != nil {\nreturn Errorf(\"Error resolving root: %v\", err)\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/setgid.cc", "new_path": "test/syscalls/linux/setgid.cc", "diff": "@@ -126,14 +126,15 @@ class SetgidDirTest : public ::testing::Test {\nSKIP_IF(IsRunningWithVFS1());\n- temp_dir_ = ASSERT_NO_ERRNO_AND_VALUE(\n- TempPath::CreateDirWith(GetAbsoluteTestTmpdir(), 0777 /* mode */));\n-\n// If we can't find two usable groups, we're in an unsupporting environment.\n// Skip the test.\nPosixErrorOr<std::pair<gid_t, gid_t>> groups = Groups();\nSKIP_IF(!groups.ok());\ngroups_ = groups.ValueOrDie();\n+\n+ auto cleanup = Setegid(groups_.first);\n+ temp_dir_ = ASSERT_NO_ERRNO_AND_VALUE(\n+ TempPath::CreateDirWith(GetAbsoluteTestTmpdir(), 0777 /* mode */));\n}\nvoid TearDown() override {\n@@ -348,6 +349,10 @@ class FileModeTest : public ::testing::TestWithParam<FileModeTestcase> {};\nTEST_P(FileModeTest, WriteToFile) {\nSKIP_IF(IsRunningWithVFS1());\n+ PosixErrorOr<std::pair<gid_t, gid_t>> groups = Groups();\n+ SKIP_IF(!groups.ok());\n+\n+ auto cleanup = Setegid(groups.ValueOrDie().first);\nauto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateDirWith(GetAbsoluteTestTmpdir(), 0777 /* mode */));\nauto path = JoinPath(temp_dir.path(), GetParam().name);\n@@ -371,26 +376,28 @@ TEST_P(FileModeTest, WriteToFile) {\nTEST_P(FileModeTest, TruncateFile) {\nSKIP_IF(IsRunningWithVFS1());\n+ PosixErrorOr<std::pair<gid_t, gid_t>> groups = Groups();\n+ SKIP_IF(!groups.ok());\n+\n+ auto cleanup = Setegid(groups.ValueOrDie().first);\nauto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateDirWith(GetAbsoluteTestTmpdir(), 0777 /* mode */));\nauto path = JoinPath(temp_dir.path(), GetParam().name);\nFileDescriptor fd =\nASSERT_NO_ERRNO_AND_VALUE(Open(path.c_str(), O_CREAT | O_RDWR, 0666));\n- ASSERT_THAT(fchmod(fd.get(), GetParam().mode), SyscallSucceeds());\n- struct stat stats;\n- ASSERT_THAT(fstat(fd.get(), &stats), SyscallSucceeds());\n- EXPECT_EQ(stats.st_mode & kDirmodeMask, GetParam().mode);\n// Write something to the file, as truncating an empty file is a no-op.\nconstexpr char c = 'M';\nASSERT_THAT(write(fd.get(), &c, sizeof(c)),\nSyscallSucceedsWithValue(sizeof(c)));\n+ ASSERT_THAT(fchmod(fd.get(), GetParam().mode), SyscallSucceeds());\n// For security reasons, truncating the file clears the SUID bit, and clears\n// the SGID bit when the group executable bit is unset (which is not a true\n// SGID binary).\nASSERT_THAT(ftruncate(fd.get(), 0), SyscallSucceeds());\n+ struct stat stats;\nASSERT_THAT(fstat(fd.get(), &stats), SyscallSucceeds());\nEXPECT_EQ(stats.st_mode & kDirmodeMask, GetParam().result_mode);\n}\n" } ]
Go
Apache License 2.0
google/gvisor
setgid directory support in goferfs Also adds support for clearing the setuid bit when appropriate (writing, truncating, changing size, changing UID, or changing GID). VFS2 only. PiperOrigin-RevId: 364661835
259,992
23.03.2021 16:19:02
25,200
960155cdaad49ccea07e45152f124beeb7e7fdcc
Add --file-access-mounts flag file-access-mounts flag is similar to --file-access, but controls non-root mounts that were previously mounted in shared mode only. This gives more flexibility to control how mounts are shared within a container.
[ { "change_type": "MODIFY", "old_path": "runsc/boot/fs.go", "new_path": "runsc/boot/fs.go", "diff": "@@ -792,7 +792,7 @@ func (c *containerMounter) getMountNameAndOptions(conf *config.Config, m specs.M\ncase bind:\nfd := c.fds.remove()\nfsName = gofervfs2.Name\n- opts = p9MountData(fd, c.getMountAccessType(m), conf.VFS2)\n+ opts = p9MountData(fd, c.getMountAccessType(conf, m), conf.VFS2)\n// If configured, add overlay to all writable mounts.\nuseOverlay = conf.Overlay && !mountFlags(m.Options).ReadOnly\n@@ -802,12 +802,11 @@ func (c *containerMounter) getMountNameAndOptions(conf *config.Config, m specs.M\nreturn fsName, opts, useOverlay, nil\n}\n-func (c *containerMounter) getMountAccessType(mount specs.Mount) config.FileAccessType {\n+func (c *containerMounter) getMountAccessType(conf *config.Config, mount specs.Mount) config.FileAccessType {\nif hint := c.hints.findMount(mount); hint != nil {\nreturn hint.fileAccessType()\n}\n- // Non-root bind mounts are always shared if no hints were provided.\n- return config.FileAccessShared\n+ return conf.FileAccessMounts\n}\n// mountSubmount mounts volumes inside the container's root. Because mounts may\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/fs_test.go", "new_path": "runsc/boot/fs_test.go", "diff": "@@ -243,7 +243,8 @@ func TestGetMountAccessType(t *testing.T) {\nt.Fatalf(\"newPodMountHints failed: %v\", err)\n}\nmounter := containerMounter{hints: podHints}\n- if got := mounter.getMountAccessType(specs.Mount{Source: source}); got != tst.want {\n+ conf := &config.Config{FileAccessMounts: config.FileAccessShared}\n+ if got := mounter.getMountAccessType(conf, specs.Mount{Source: source}); got != tst.want {\nt.Errorf(\"getMountAccessType(), want: %v, got: %v\", tst.want, got)\n}\n})\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/vfs.go", "new_path": "runsc/boot/vfs.go", "diff": "@@ -494,7 +494,7 @@ func (c *containerMounter) getMountNameAndOptionsVFS2(conf *config.Config, m *mo\n// but unlikely to be correct in this context.\nreturn \"\", nil, false, fmt.Errorf(\"9P mount requires a connection FD\")\n}\n- data = p9MountData(m.fd, c.getMountAccessType(m.Mount), true /* vfs2 */)\n+ data = p9MountData(m.fd, c.getMountAccessType(conf, m.Mount), true /* vfs2 */)\niopts = gofer.InternalFilesystemOptions{\nUniqueID: m.Destination,\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/config/config.go", "new_path": "runsc/config/config.go", "diff": "@@ -58,9 +58,12 @@ type Config struct {\n// DebugLogFormat is the log format for debug.\nDebugLogFormat string `flag:\"debug-log-format\"`\n- // FileAccess indicates how the filesystem is accessed.\n+ // FileAccess indicates how the root filesystem is accessed.\nFileAccess FileAccessType `flag:\"file-access\"`\n+ // FileAccessMounts indicates how non-root volumes are accessed.\n+ FileAccessMounts FileAccessType `flag:\"file-access-mounts\"`\n+\n// Overlay is whether to wrap the root filesystem in an overlay.\nOverlay bool `flag:\"overlay\"`\n@@ -197,13 +200,19 @@ func (c *Config) validate() error {\ntype FileAccessType int\nconst (\n- // FileAccessExclusive is the same as FileAccessShared, but enables\n- // extra caching for improved performance. It should only be used if\n- // the sandbox has exclusive access to the filesystem.\n+ // FileAccessExclusive gives the sandbox exclusive access over files and\n+ // directories in the filesystem. No external modifications are permitted and\n+ // can lead to undefined behavior.\n+ //\n+ // Exclusive filesystem access enables more aggressive caching and offers\n+ // significantly better performance. This is the default mode for the root\n+ // volume.\nFileAccessExclusive FileAccessType = iota\n- // FileAccessShared sends IO requests to a Gofer process that validates the\n- // requests and forwards them to the host.\n+ // FileAccessShared is used for volumes that can have external changes. It\n+ // requires revalidation on every filesystem access to detect external\n+ // changes, and reduces the amount of caching that can be done. This is the\n+ // default mode for non-root volumes.\nFileAccessShared\n)\n" }, { "change_type": "MODIFY", "old_path": "runsc/config/flags.go", "new_path": "runsc/config/flags.go", "diff": "@@ -67,7 +67,8 @@ func RegisterFlags() {\nflag.Bool(\"oci-seccomp\", false, \"Enables loading OCI seccomp filters inside the sandbox.\")\n// Flags that control sandbox runtime behavior: FS related.\n- flag.Var(fileAccessTypePtr(FileAccessExclusive), \"file-access\", \"specifies which filesystem to use for the root mount: exclusive (default), shared. Volume mounts are always shared.\")\n+ flag.Var(fileAccessTypePtr(FileAccessExclusive), \"file-access\", \"specifies which filesystem validation to use for the root mount: exclusive (default), shared.\")\n+ flag.Var(fileAccessTypePtr(FileAccessShared), \"file-access-mounts\", \"specifies which filesystem validation to use for volumes other than the root mount: shared (default), exclusive.\")\nflag.Bool(\"overlay\", false, \"wrap filesystem mounts with writable overlay. All modifications are stored in memory inside the sandbox.\")\nflag.Bool(\"verity\", false, \"specifies whether a verity file system will be mounted.\")\nflag.Bool(\"overlayfs-stale-read\", true, \"assume root mount is an overlay filesystem\")\n" } ]
Go
Apache License 2.0
google/gvisor
Add --file-access-mounts flag --file-access-mounts flag is similar to --file-access, but controls non-root mounts that were previously mounted in shared mode only. This gives more flexibility to control how mounts are shared within a container. PiperOrigin-RevId: 364669882
259,962
24.03.2021 12:05:06
25,200
72ff6a1cac6ab35132b4f79b1149590e103e5291
Fix data race in fdbased when accessing fanoutID.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/fdbased/endpoint.go", "new_path": "pkg/tcpip/link/fdbased/endpoint.go", "diff": "@@ -41,6 +41,8 @@ package fdbased\nimport (\n\"fmt\"\n+ \"math\"\n+ \"sync/atomic\"\n\"golang.org/x/sys/unix\"\n\"gvisor.dev/gvisor/pkg/binary\"\n@@ -188,7 +190,9 @@ type Options struct {\n// set of FD's that point to the same NIC. Trying to set the PACKET_FANOUT\n// option for an FD with a fanoutID already in use by another FD for a different\n// NIC will return an EINVAL.\n-var fanoutID = 1\n+//\n+// Must be accessed using atomic operations.\n+var fanoutID int32 = 0\n// New creates a new fd-based endpoint.\n//\n@@ -233,6 +237,10 @@ func New(opts *Options) (stack.LinkEndpoint, error) {\npacketDispatchMode: opts.PacketDispatchMode,\n}\n+ // Increment fanoutID to ensure that we don't re-use the same fanoutID for\n+ // the next endpoint.\n+ fid := atomic.AddInt32(&fanoutID, 1)\n+\n// Create per channel dispatchers.\nfor i := 0; i < len(e.fds); i++ {\nfd := e.fds[i]\n@@ -254,21 +262,17 @@ func New(opts *Options) (stack.LinkEndpoint, error) {\ne.gsoMaxSize = opts.GSOMaxSize\n}\n}\n- inboundDispatcher, err := createInboundDispatcher(e, fd, isSocket)\n+ inboundDispatcher, err := createInboundDispatcher(e, fd, isSocket, fid)\nif err != nil {\nreturn nil, fmt.Errorf(\"createInboundDispatcher(...) = %v\", err)\n}\ne.inboundDispatchers = append(e.inboundDispatchers, inboundDispatcher)\n}\n- // Increment fanoutID to ensure that we don't re-use the same fanoutID for\n- // the next endpoint.\n- fanoutID++\n-\nreturn e, nil\n}\n-func createInboundDispatcher(e *endpoint, fd int, isSocket bool) (linkDispatcher, error) {\n+func createInboundDispatcher(e *endpoint, fd int, isSocket bool, fID int32) (linkDispatcher, error) {\n// By default use the readv() dispatcher as it works with all kinds of\n// FDs (tap/tun/unix domain sockets and af_packet).\ninboundDispatcher, err := newReadVDispatcher(fd, e)\n@@ -283,13 +287,32 @@ func createInboundDispatcher(e *endpoint, fd int, isSocket bool) (linkDispatcher\n}\nswitch sa.(type) {\ncase *unix.SockaddrLinklayer:\n+ // See: PACKET_FANOUT_MAX in net/packet/internal.h\n+ const packetFanoutMax = 1 << 16\n+ if fID > packetFanoutMax {\n+ return nil, fmt.Errorf(\"host fanoutID limit exceeded, fanoutID must be <= %d\", math.MaxUint16)\n+ }\n// Enable PACKET_FANOUT mode if the underlying socket is of type\n// AF_PACKET. We do not enable PACKET_FANOUT_FLAG_DEFRAG as that will\n// prevent gvisor from receiving fragmented packets and the host does the\n// reassembly on our behalf before delivering the fragments. This makes it\n// hard to test fragmentation reassembly code in Netstack.\n+ //\n+ // See: include/uapi/linux/if_packet.h (struct fanout_args).\n+ //\n+ // NOTE: We are using SetSockOptInt here even though the underlying\n+ // option is actually a struct. The code follows the example in the\n+ // kernel documentation as described at the link below:\n+ //\n+ // See: https://www.kernel.org/doc/Documentation/networking/packet_mmap.txt\n+ //\n+ // This works out because the actual implementation for the option zero\n+ // initializes the structure and will initialize the max_members field\n+ // to a proper value if zero.\n+ //\n+ // See: https://github.com/torvalds/linux/blob/7acac4b3196caee5e21fb5ea53f8bc124e6a16fc/net/packet/af_packet.c#L3881\nconst fanoutType = unix.PACKET_FANOUT_HASH\n- fanoutArg := fanoutID | fanoutType<<16\n+ fanoutArg := int(fID) | fanoutType<<16\nif err := unix.SetsockoptInt(fd, unix.SOL_PACKET, unix.PACKET_FANOUT, fanoutArg); err != nil {\nreturn nil, fmt.Errorf(\"failed to enable PACKET_FANOUT option: %v\", err)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Fix data race in fdbased when accessing fanoutID. PiperOrigin-RevId: 364859173
259,884
24.03.2021 17:54:45
25,200
e4772bd84512c03ca431701dc139e126a885abd4
Fix highlighting sidebar menu on the website Highlighting previously highlighted multiple items in the sidebar if the had the same page name (not full url). This change simplifies this by adding the highlight class in the jekyll template rather than javascript, and highlights only the correct page.
[ { "change_type": "MODIFY", "old_path": "website/_includes/footer.html", "new_path": "website/_includes/footer.html", "diff": "@@ -27,25 +27,6 @@ if (!doNotTrack) {\n};\nwindow.addEventListener(\"hashchange\", shiftWindow);\n- var highlightCurrentSidebarNav = function() {\n- var href = location.pathname;\n- var item = $('#sidebar-nav [href$=\"' + href + '\"]');\n- if (item) {\n- var li = item.parent();\n- li.addClass(\"active\");\n-\n- if (li.parent() && li.parent().is(\"ul\")) {\n- do {\n- var ul = li.parent();\n- if (ul.hasClass(\"collapse\")) {\n- ul.collapse(\"show\");\n- }\n- li = ul.parent();\n- } while (li && li.is(\"li\"));\n- }\n- }\n- };\n-\n$(document).ready(function() {\n// Scroll to anchor of location hash, adjusted for fixed navbar.\nwindow.setTimeout(function() {\n@@ -65,8 +46,5 @@ if (!doNotTrack) {\ntoggle.removeClass(\"dropup\");\n}\n});\n-\n- // Highlight the current page on the sidebar nav.\n- highlightCurrentSidebarNav();\n});\n</script>\n" }, { "change_type": "MODIFY", "old_path": "website/_layouts/docs.html", "new_path": "website/_layouts/docs.html", "diff": "@@ -20,15 +20,20 @@ categories:\n{% comment %}If all pages in the subcategory are excluded don't show it.{% endcomment %}\n{% if sorted_pages.size > 0 %}\n{% if subcategory.name != \"\" %}\n- {% assign ac = \"aria-controls\" %}\n{% assign cid = category | remove: \" \" | downcase %}\n{% assign sid = subcategory.name | remove: \" \" | downcase %}\n<li>\n- <a class=\"sidebar-nav-heading\" data-toggle=\"collapse\" href=\"#{{ cid }}-{{ sid }}\" aria-expanded=\"false\" {{ ac }}=\"{{ cid }}-{{ sid }}\">{{ subcategory.name }}<span class=\"caret\"></span></a>\n- <ul class=\"collapse sidebar-nav sidebar-submenu\" id=\"{{ cid }}-{{ sid }}\">\n+ {% comment %}\n+ If the current page is in the sub-category then set the collapsible to expanded.\n+ See: https://getbootstrap.com/docs/3.3/javascript/#collapse\n+ {% endcomment %}\n+ {% assign expanded = false %}\n+ {% for p in sorted_pages %}{% if page.url == p.url %}{% assign expanded = true %}{% endif %}{% endfor %}\n+ <a class=\"sidebar-nav-heading\" data-toggle=\"collapse\" href=\"#{{ cid }}-{{ sid }}\" {% if expanded %}aria-expanded=\"true\"{% else %}aria-expanded=\"false\"{% endif %} aria-controls=\"{{ cid }}-{{ sid }}\">{{ subcategory.name }}<span class=\"caret\"></span></a>\n+ <ul class=\"collapse{% if expanded %} in{% endif %} sidebar-nav sidebar-submenu\" id=\"{{ cid }}-{{ sid }}\">\n{% endif %}\n{% for p in sorted_pages %}\n- <li><a href=\"{{ p.url }}\">{{ p.title }}</a></li>\n+ <li{% if page.url == p.url %} class=\"active\"{% endif %}><a href=\"{{ p.url }}\">{{ p.title }}</a></li>\n{% endfor %}\n{% if subcategory.name != \"\" %}\n</li>\n" } ]
Go
Apache License 2.0
google/gvisor
Fix highlighting sidebar menu on the website Highlighting previously highlighted multiple items in the sidebar if the had the same page name (not full url). This change simplifies this by adding the highlight class in the jekyll template rather than javascript, and highlights only the correct page. PiperOrigin-RevId: 364931350
259,884
24.03.2021 17:55:06
25,200
c27fac421b760e62a8becf5d2c53ddbdf4ae5a4c
Fix path to runsc in CNI tutorial.
[ { "change_type": "MODIFY", "old_path": "g3doc/user_guide/tutorials/cni.md", "new_path": "g3doc/user_guide/tutorials/cni.md", "diff": "@@ -131,7 +131,7 @@ sudo sh -c 'echo \"Hello World!\" > rootfs/var/www/html/index.html'\nNext create the `config.json` specifying the network namespace.\n```\n-sudo /usr/local/bin/runsc spec \\\n+sudo runsc spec \\\n--cwd /var/www/html \\\n--netns /var/run/netns/${CNI_CONTAINERID} \\\n-- python -m http.server\n" } ]
Go
Apache License 2.0
google/gvisor
Fix path to runsc in CNI tutorial. PiperOrigin-RevId: 364931406
259,891
25.03.2021 11:58:35
25,200
6b085ba47715a3e7283ec383c1ddf0f8b14dc60c
setgid: skip tests when we can't find usable GIDs
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/setgid.cc", "new_path": "test/syscalls/linux/setgid.cc", "diff": "@@ -107,16 +107,7 @@ PosixErrorOr<std::pair<gid_t, gid_t>> Groups() {\nif (!capable.ValueOrDie()) {\nreturn PosixError(EPERM, \"missing CAP_SETGID\");\n}\n- gid_t gid = getegid();\n- auto cleanup1 = Setegid(gid);\n- if (!cleanup1.ok()) {\n- return cleanup1.error();\n- }\n- auto cleanup2 = Setegid(kNobody);\n- if (!cleanup2.ok()) {\n- return cleanup2.error();\n- }\n- return std::pair<gid_t, gid_t>(gid, kNobody);\n+ return std::pair<gid_t, gid_t>(getegid(), kNobody);\n}\nclass SetgidDirTest : public ::testing::Test {\n@@ -132,6 +123,12 @@ class SetgidDirTest : public ::testing::Test {\nSKIP_IF(!groups.ok());\ngroups_ = groups.ValueOrDie();\n+ // Ensure we can actually use both groups.\n+ auto cleanup1 = Setegid(groups_.first);\n+ SKIP_IF(!cleanup1.ok());\n+ auto cleanup2 = Setegid(groups_.second);\n+ SKIP_IF(!cleanup2.ok());\n+\nauto cleanup = Setegid(groups_.first);\ntemp_dir_ = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateDirWith(GetAbsoluteTestTmpdir(), 0777 /* mode */));\n" } ]
Go
Apache License 2.0
google/gvisor
setgid: skip tests when we can't find usable GIDs PiperOrigin-RevId: 365092320
259,885
25.03.2021 16:47:57
25,200
79bc446facf3d6920853c4cf35e72d25dd4c1011
Lock TaskSet mutex for writing in ptraceClone(). This is necessary since ptraceClone() mutates tracer.ptraceTracees.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/ptrace.go", "new_path": "pkg/sentry/kernel/ptrace.go", "diff": "@@ -770,8 +770,8 @@ func (t *Task) ptraceClone(kind ptraceCloneKind, child *Task, opts *CloneOptions\nif !t.hasTracer() {\nreturn false\n}\n- t.tg.pidns.owner.mu.RLock()\n- defer t.tg.pidns.owner.mu.RUnlock()\n+ t.tg.pidns.owner.mu.Lock()\n+ defer t.tg.pidns.owner.mu.Unlock()\nevent := false\nif !opts.Untraced {\nswitch kind {\n" } ]
Go
Apache License 2.0
google/gvisor
Lock TaskSet mutex for writing in ptraceClone(). This is necessary since ptraceClone() mutates tracer.ptraceTracees. PiperOrigin-RevId: 365152396
259,858
01.04.2021 15:33:04
25,200
513de4039c9ba8fea3fb81c796312f20242c1c5d
Remove invalid dependency.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -154,7 +154,6 @@ cc_library(\ndefines = select_system(),\ndeps = default_net_util() + [\ngtest,\n- \"//net/util:ports\",\n\"@com_google_absl//absl/memory\",\n\"@com_google_absl//absl/strings\",\n\"@com_google_absl//absl/strings:str_format\",\n" } ]
Go
Apache License 2.0
google/gvisor
Remove invalid dependency. PiperOrigin-RevId: 366344222
259,975
02.04.2021 10:39:33
25,200
1b53550e556edc2864eb1525aded5874e931c92b
Add vfs1 to go/runsc-benchmarks
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -340,7 +340,8 @@ BENCHMARKS_FILTER := .\nBENCHMARKS_OPTIONS := -test.benchtime=30s\nBENCHMARKS_ARGS := -test.v -test.bench=$(BENCHMARKS_FILTER) $(BENCHMARKS_OPTIONS)\nBENCHMARKS_PROFILE := -pprof-dir=/tmp/profile -pprof-cpu -pprof-heap -pprof-block -pprof-mutex\n-BENCH_RUNTIME_ARGS ?= --vfs2\n+BENCH_VFS := --vfs2\n+BENCH_RUNTIME_ARGS ?=\ninit-benchmark-table: ## Initializes a BigQuery table with the benchmark schema.\n@$(call run,//tools/parsers:parser,init --project=$(BENCHMARKS_PROJECT) --dataset=$(BENCHMARKS_DATASET) --table=$(BENCHMARKS_TABLE))\n@@ -361,13 +362,14 @@ run_benchmark = \\\nbenchmark-platforms: load-benchmarks $(RUNTIME_BIN) ## Runs benchmarks for runc and all given platforms in BENCHMARK_PLATFORMS.\n@$(foreach PLATFORM,$(BENCHMARKS_PLATFORMS), \\\n- $(call run_benchmark,$(PLATFORM),--platform=$(PLATFORM) $(BENCH_RUNTIME_ARGS)) && \\\n+ $(call run_benchmark,$(PLATFORM),--platform=$(PLATFORM) $(BENCH_RUNTIME_ARGS) --vfs2) && \\\n+ $(call run_benchmark,$(PLATFORM)_vfs1,--platform=$(PLATFORM) $(BENCH_RUNTIME_ARGS)) && \\\n) true\n@$(call run_benchmark,runc)\n.PHONY: benchmark-platforms\nrun-benchmark: load-benchmarks $(RUNTIME_BIN) ## Runs single benchmark and optionally sends data to BigQuery.\n- @$(call run_benchmark,$(RUNTIME),$(BENCH_RUNTIME_ARGS))\n+ @$(call run_benchmark,$(RUNTIME)$(BENCH_VFS),$(BENCH_RUNTIME_ARGS) $(BENCH_VFS))\n.PHONY: run-benchmark\n##\n" } ]
Go
Apache License 2.0
google/gvisor
Add vfs1 to go/runsc-benchmarks PiperOrigin-RevId: 366470480
259,985
02.04.2021 19:33:07
25,200
491b106d62ba97cafb63252bf7d5bdd4749d417a
Implement the runsc verity-prepare command. Implement a new runsc command to set up a sandbox with verityfs and run the measure tool. This is loosely forked from the do command, and currently requires the caller to provide the measure tool binary.
[ { "change_type": "MODIFY", "old_path": "runsc/boot/BUILD", "new_path": "runsc/boot/BUILD", "diff": "@@ -66,6 +66,7 @@ go_library(\n\"//pkg/sentry/fsimpl/proc\",\n\"//pkg/sentry/fsimpl/sys\",\n\"//pkg/sentry/fsimpl/tmpfs\",\n+ \"//pkg/sentry/fsimpl/verity\",\n\"//pkg/sentry/inet\",\n\"//pkg/sentry/kernel\",\n\"//pkg/sentry/kernel:uncaught_signal_go_proto\",\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/vfs.go", "new_path": "runsc/boot/vfs.go", "diff": "@@ -16,6 +16,7 @@ package boot\nimport (\n\"fmt\"\n+ \"path\"\n\"sort\"\n\"strings\"\n@@ -37,6 +38,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/proc\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/sys\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs\"\n+ \"gvisor.dev/gvisor/pkg/sentry/fsimpl/verity\"\n\"gvisor.dev/gvisor/pkg/sentry/inet\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n@@ -83,6 +85,10 @@ func registerFilesystems(k *kernel.Kernel) error {\nAllowUserMount: true,\nAllowUserList: true,\n})\n+ vfsObj.MustRegisterFilesystemType(verity.Name, &verity.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{\n+ AllowUserList: true,\n+ AllowUserMount: false,\n+ })\n// Setup files in devtmpfs.\nif err := memdev.Register(vfsObj); err != nil {\n@@ -472,6 +478,12 @@ func (c *containerMounter) getMountNameAndOptionsVFS2(conf *config.Config, m *mo\nvar data []string\nvar iopts interface{}\n+ verityOpts, verityRequested, remainingMOpts, err := parseVerityMountOptions(m.Options)\n+ if err != nil {\n+ return \"\", nil, false, err\n+ }\n+ m.Options = remainingMOpts\n+\n// Find filesystem name and FS specific data field.\nswitch m.Type {\ncase devpts.Name, devtmpfs.Name, proc.Name, sys.Name:\n@@ -530,9 +542,73 @@ func (c *containerMounter) getMountNameAndOptionsVFS2(conf *config.Config, m *mo\n}\n}\n+ if verityRequested {\n+ verityOpts.RootMerkleFileName = path.Base(m.Mount.Destination)\n+ verityOpts.LowerName = fsName\n+ verityOpts.LowerGetFSOptions = opts.GetFilesystemOptions\n+ fsName = verity.Name\n+ opts = &vfs.MountOptions{\n+ GetFilesystemOptions: vfs.GetFilesystemOptions{\n+ Data: strings.Join(data, \",\"),\n+ InternalData: verityOpts,\n+ },\n+ InternalMount: true,\n+ }\n+ }\n+\nreturn fsName, opts, useOverlay, nil\n}\n+func parseKeyValue(s string) (string, string, bool) {\n+ tokens := strings.SplitN(s, \"=\", 2)\n+ if len(tokens) < 2 {\n+ return \"\", \"\", false\n+ }\n+ return strings.TrimSpace(tokens[0]), strings.TrimSpace(tokens[1]), true\n+}\n+\n+// parseAndFilterOptions scans the provided mount options for verity-related\n+// mount options. It returns the parsed set of verity mount options, as well as\n+// the filtered set of mount options unrelated to verity.\n+func parseVerityMountOptions(mopts []string) (verity.InternalFilesystemOptions, bool, []string, error) {\n+ nonVerity := []string{}\n+ found := false\n+ verityOpts := verity.InternalFilesystemOptions{\n+ Action: verity.PanicOnViolation,\n+ }\n+ for _, o := range mopts {\n+ if !strings.HasPrefix(o, \"verity.\") {\n+ nonVerity = append(nonVerity, o)\n+ continue\n+ }\n+\n+ k, v, ok := parseKeyValue(o)\n+ if !ok {\n+ return verityOpts, found, nonVerity, fmt.Errorf(\"invalid verity mount option with no value: %q\", o)\n+ }\n+\n+ found = true\n+ switch k {\n+ case \"verity.roothash\":\n+ verityOpts.RootHash = []byte(v)\n+ case \"verity.action\":\n+ switch v {\n+ case \"error\":\n+ verityOpts.Action = verity.ErrorOnViolation\n+ case \"panic\":\n+ verityOpts.Action = verity.PanicOnViolation\n+ default:\n+ log.Warningf(\"Invalid verity action %q\", v)\n+ verityOpts.Action = verity.PanicOnViolation\n+ }\n+ default:\n+ return verityOpts, found, nonVerity, fmt.Errorf(\"unknown verity mount option: %q\", k)\n+ }\n+ }\n+ verityOpts.AllowRuntimeEnable = len(verityOpts.RootHash) == 0\n+ return verityOpts, found, nonVerity, nil\n+}\n+\n// mountTmpVFS2 mounts an internal tmpfs at '/tmp' if it's safe to do so.\n// Technically we don't have to mount tmpfs at /tmp, as we could just rely on\n// the host /tmp, but this is a nice optimization, and fixes some apps that call\n" }, { "change_type": "MODIFY", "old_path": "runsc/cli/main.go", "new_path": "runsc/cli/main.go", "diff": "@@ -86,6 +86,7 @@ func Main(version string) {\nsubcommands.Register(new(cmd.Symbolize), \"\")\nsubcommands.Register(new(cmd.Wait), \"\")\nsubcommands.Register(new(cmd.Mitigate), \"\")\n+ subcommands.Register(new(cmd.VerityPrepare), \"\")\n// Register internal commands with the internal group name. This causes\n// them to be sorted below the user-facing commands with empty group.\n" }, { "change_type": "MODIFY", "old_path": "runsc/cmd/BUILD", "new_path": "runsc/cmd/BUILD", "diff": "@@ -35,6 +35,7 @@ go_library(\n\"statefile.go\",\n\"symbolize.go\",\n\"syscalls.go\",\n+ \"verity_prepare.go\",\n\"wait.go\",\n],\nvisibility = [\n" }, { "change_type": "MODIFY", "old_path": "runsc/cmd/do.go", "new_path": "runsc/cmd/do.go", "diff": "@@ -126,9 +126,8 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su\nHostname: hostname,\n}\n- specutils.LogSpec(spec)\n-\ncid := fmt.Sprintf(\"runsc-%06d\", rand.Int31n(1000000))\n+\nif conf.Network == config.NetworkNone {\naddNamespace(spec, specs.LinuxNamespace{Type: specs.NetworkNamespace})\n@@ -154,55 +153,7 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su\n}\n}\n- out, err := json.Marshal(spec)\n- if err != nil {\n- return Errorf(\"Error to marshal spec: %v\", err)\n- }\n- tmpDir, err := ioutil.TempDir(\"\", \"runsc-do\")\n- if err != nil {\n- return Errorf(\"Error to create tmp dir: %v\", err)\n- }\n- defer os.RemoveAll(tmpDir)\n-\n- log.Infof(\"Changing configuration RootDir to %q\", tmpDir)\n- conf.RootDir = tmpDir\n-\n- cfgPath := filepath.Join(tmpDir, \"config.json\")\n- if err := ioutil.WriteFile(cfgPath, out, 0755); err != nil {\n- return Errorf(\"Error write spec: %v\", err)\n- }\n-\n- containerArgs := container.Args{\n- ID: cid,\n- Spec: spec,\n- BundleDir: tmpDir,\n- Attached: true,\n- }\n- ct, err := container.New(conf, containerArgs)\n- if err != nil {\n- return Errorf(\"creating container: %v\", err)\n- }\n- defer ct.Destroy()\n-\n- if err := ct.Start(conf); err != nil {\n- return Errorf(\"starting container: %v\", err)\n- }\n-\n- // Forward signals to init in the container. Thus if we get SIGINT from\n- // ^C, the container gracefully exit, and we can clean up.\n- //\n- // N.B. There is a still a window before this where a signal may kill\n- // this process, skipping cleanup.\n- stopForwarding := ct.ForwardSignals(0 /* pid */, false /* fgProcess */)\n- defer stopForwarding()\n-\n- ws, err := ct.Wait()\n- if err != nil {\n- return Errorf(\"waiting for container: %v\", err)\n- }\n-\n- *waitStatus = ws\n- return subcommands.ExitSuccess\n+ return startContainerAndWait(spec, conf, cid, waitStatus)\n}\nfunc addNamespace(spec *specs.Spec, ns specs.LinuxNamespace) {\n@@ -397,3 +348,58 @@ func calculatePeerIP(ip string) (string, error) {\n}\nreturn fmt.Sprintf(\"%s.%s.%s.%d\", parts[0], parts[1], parts[2], n), nil\n}\n+\n+func startContainerAndWait(spec *specs.Spec, conf *config.Config, cid string, waitStatus *unix.WaitStatus) subcommands.ExitStatus {\n+ specutils.LogSpec(spec)\n+\n+ out, err := json.Marshal(spec)\n+ if err != nil {\n+ return Errorf(\"Error to marshal spec: %v\", err)\n+ }\n+ tmpDir, err := ioutil.TempDir(\"\", \"runsc-do\")\n+ if err != nil {\n+ return Errorf(\"Error to create tmp dir: %v\", err)\n+ }\n+ defer os.RemoveAll(tmpDir)\n+\n+ log.Infof(\"Changing configuration RootDir to %q\", tmpDir)\n+ conf.RootDir = tmpDir\n+\n+ cfgPath := filepath.Join(tmpDir, \"config.json\")\n+ if err := ioutil.WriteFile(cfgPath, out, 0755); err != nil {\n+ return Errorf(\"Error write spec: %v\", err)\n+ }\n+\n+ containerArgs := container.Args{\n+ ID: cid,\n+ Spec: spec,\n+ BundleDir: tmpDir,\n+ Attached: true,\n+ }\n+\n+ ct, err := container.New(conf, containerArgs)\n+ if err != nil {\n+ return Errorf(\"creating container: %v\", err)\n+ }\n+ defer ct.Destroy()\n+\n+ if err := ct.Start(conf); err != nil {\n+ return Errorf(\"starting container: %v\", err)\n+ }\n+\n+ // Forward signals to init in the container. Thus if we get SIGINT from\n+ // ^C, the container gracefully exit, and we can clean up.\n+ //\n+ // N.B. There is a still a window before this where a signal may kill\n+ // this process, skipping cleanup.\n+ stopForwarding := ct.ForwardSignals(0 /* pid */, false /* fgProcess */)\n+ defer stopForwarding()\n+\n+ ws, err := ct.Wait()\n+ if err != nil {\n+ return Errorf(\"waiting for container: %v\", err)\n+ }\n+\n+ *waitStatus = ws\n+ return subcommands.ExitSuccess\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "runsc/cmd/verity_prepare.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package cmd\n+\n+import (\n+ \"context\"\n+ \"fmt\"\n+ \"math/rand\"\n+ \"os\"\n+\n+ \"github.com/google/subcommands\"\n+ specs \"github.com/opencontainers/runtime-spec/specs-go\"\n+ \"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/runsc/config\"\n+ \"gvisor.dev/gvisor/runsc/flag\"\n+ \"gvisor.dev/gvisor/runsc/specutils\"\n+)\n+\n+// VerityPrepare implements subcommands.Commands for the \"verity-prepare\"\n+// command. It sets up a sandbox with a writable verity mount mapped to \"--dir\",\n+// and executes the verity measure tool specified by \"--tool\" in the sandbox. It\n+// is intended to prepare --dir to be mounted as a verity filesystem.\n+type VerityPrepare struct {\n+ root string\n+ tool string\n+ dir string\n+}\n+\n+// Name implements subcommands.Command.Name.\n+func (*VerityPrepare) Name() string {\n+ return \"verity-prepare\"\n+}\n+\n+// Synopsis implements subcommands.Command.Synopsis.\n+func (*VerityPrepare) Synopsis() string {\n+ return \"Generates the data structures necessary to enable verityfs on a filesystem.\"\n+}\n+\n+// Usage implements subcommands.Command.Usage.\n+func (*VerityPrepare) Usage() string {\n+ return \"verity-prepare --tool=<measure_tool> --dir=<path>\"\n+}\n+\n+// SetFlags implements subcommands.Command.SetFlags.\n+func (c *VerityPrepare) SetFlags(f *flag.FlagSet) {\n+ f.StringVar(&c.root, \"root\", \"/\", `path to the root directory, defaults to \"/\"`)\n+ f.StringVar(&c.tool, \"tool\", \"\", \"path to the verity measure_tool\")\n+ f.StringVar(&c.dir, \"dir\", \"\", \"path to the directory to be hashed\")\n+}\n+\n+// Execute implements subcommands.Command.Execute.\n+func (c *VerityPrepare) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {\n+ conf := args[0].(*config.Config)\n+ waitStatus := args[1].(*unix.WaitStatus)\n+\n+ hostname, err := os.Hostname()\n+ if err != nil {\n+ return Errorf(\"Error to retrieve hostname: %v\", err)\n+ }\n+\n+ // Map the entire host file system.\n+ absRoot, err := resolvePath(c.root)\n+ if err != nil {\n+ return Errorf(\"Error resolving root: %v\", err)\n+ }\n+\n+ spec := &specs.Spec{\n+ Root: &specs.Root{\n+ Path: absRoot,\n+ },\n+ Process: &specs.Process{\n+ Cwd: absRoot,\n+ Args: []string{c.tool, \"--path\", \"/verityroot\"},\n+ Env: os.Environ(),\n+ Capabilities: specutils.AllCapabilities(),\n+ },\n+ Hostname: hostname,\n+ Mounts: []specs.Mount{\n+ specs.Mount{\n+ Source: c.dir,\n+ Destination: \"/verityroot\",\n+ Type: \"bind\",\n+ Options: []string{\"verity.roothash=\"},\n+ },\n+ },\n+ }\n+\n+ cid := fmt.Sprintf(\"runsc-%06d\", rand.Int31n(1000000))\n+\n+ // Force no networking, it is not necessary to run the verity measure tool.\n+ conf.Network = config.NetworkNone\n+\n+ return startContainerAndWait(spec, conf, cid, waitStatus)\n+}\n" }, { "change_type": "MODIFY", "old_path": "runsc/specutils/fs.go", "new_path": "runsc/specutils/fs.go", "diff": "@@ -18,6 +18,7 @@ import (\n\"fmt\"\n\"math/bits\"\n\"path\"\n+ \"strings\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"golang.org/x/sys/unix\"\n@@ -64,6 +65,12 @@ var optionsMap = map[string]mapping{\n\"sync\": {set: true, val: unix.MS_SYNCHRONOUS},\n}\n+// verityMountOptions is the set of valid verity mount option keys.\n+var verityMountOptions = map[string]struct{}{\n+ \"verity.roothash\": struct{}{},\n+ \"verity.action\": struct{}{},\n+}\n+\n// propOptionsMap is similar to optionsMap, but it lists propagation options\n// that cannot be used together with other flags.\nvar propOptionsMap = map[string]mapping{\n@@ -117,6 +124,14 @@ func validateMount(mnt *specs.Mount) error {\nreturn nil\n}\n+func moptKey(opt string) string {\n+ if len(opt) == 0 {\n+ return opt\n+ }\n+ // Guaranteed to have at least one token, since opt is not empty.\n+ return strings.SplitN(opt, \"=\", 2)[0]\n+}\n+\n// ValidateMountOptions validates that mount options are correct.\nfunc ValidateMountOptions(opts []string) error {\nfor _, o := range opts {\n@@ -125,7 +140,8 @@ func ValidateMountOptions(opts []string) error {\n}\n_, ok1 := optionsMap[o]\n_, ok2 := propOptionsMap[o]\n- if !ok1 && !ok2 {\n+ _, ok3 := verityMountOptions[moptKey(o)]\n+ if !ok1 && !ok2 && !ok3 {\nreturn fmt.Errorf(\"unknown mount option %q\", o)\n}\nif err := validatePropagation(o); err != nil {\n" } ]
Go
Apache License 2.0
google/gvisor
Implement the runsc verity-prepare command. Implement a new runsc command to set up a sandbox with verityfs and run the measure tool. This is loosely forked from the do command, and currently requires the caller to provide the measure tool binary. PiperOrigin-RevId: 366553769
259,858
03.04.2021 00:16:37
25,200
9a8692c82ac543969cc0c9cf3541f2ab925ba4ac
Remove eternal and enormous tests.
[ { "change_type": "MODIFY", "old_path": "test/benchmarks/base/BUILD", "new_path": "test/benchmarks/base/BUILD", "diff": "@@ -17,7 +17,6 @@ go_library(\nbenchmark_test(\nname = \"startup_test\",\n- size = \"enormous\",\nsrcs = [\"startup_test.go\"],\nvisibility = [\"//:sandbox\"],\ndeps = [\n@@ -29,7 +28,6 @@ benchmark_test(\nbenchmark_test(\nname = \"size_test\",\n- size = \"enormous\",\nsrcs = [\"size_test.go\"],\nvisibility = [\"//:sandbox\"],\ndeps = [\n@@ -42,7 +40,6 @@ benchmark_test(\nbenchmark_test(\nname = \"sysbench_test\",\n- size = \"enormous\",\nsrcs = [\"sysbench_test.go\"],\nvisibility = [\"//:sandbox\"],\ndeps = [\n" }, { "change_type": "MODIFY", "old_path": "test/benchmarks/database/BUILD", "new_path": "test/benchmarks/database/BUILD", "diff": "@@ -11,7 +11,6 @@ go_library(\nbenchmark_test(\nname = \"redis_test\",\n- size = \"enormous\",\nsrcs = [\"redis_test.go\"],\nlibrary = \":database\",\nvisibility = [\"//:sandbox\"],\n" }, { "change_type": "MODIFY", "old_path": "test/benchmarks/fs/BUILD", "new_path": "test/benchmarks/fs/BUILD", "diff": "@@ -4,7 +4,6 @@ package(licenses = [\"notice\"])\nbenchmark_test(\nname = \"bazel_test\",\n- size = \"enormous\",\nsrcs = [\"bazel_test.go\"],\nvisibility = [\"//:sandbox\"],\ndeps = [\n@@ -18,7 +17,6 @@ benchmark_test(\nbenchmark_test(\nname = \"fio_test\",\n- size = \"enormous\",\nsrcs = [\"fio_test.go\"],\nvisibility = [\"//:sandbox\"],\ndeps = [\n" }, { "change_type": "MODIFY", "old_path": "test/benchmarks/media/BUILD", "new_path": "test/benchmarks/media/BUILD", "diff": "@@ -11,7 +11,6 @@ go_library(\nbenchmark_test(\nname = \"ffmpeg_test\",\n- size = \"enormous\",\nsrcs = [\"ffmpeg_test.go\"],\nlibrary = \":media\",\nvisibility = [\"//:sandbox\"],\n" }, { "change_type": "MODIFY", "old_path": "test/benchmarks/ml/BUILD", "new_path": "test/benchmarks/ml/BUILD", "diff": "@@ -11,7 +11,6 @@ go_library(\nbenchmark_test(\nname = \"tensorflow_test\",\n- size = \"enormous\",\nsrcs = [\"tensorflow_test.go\"],\nlibrary = \":ml\",\nvisibility = [\"//:sandbox\"],\n" }, { "change_type": "MODIFY", "old_path": "test/benchmarks/network/BUILD", "new_path": "test/benchmarks/network/BUILD", "diff": "@@ -18,7 +18,6 @@ go_library(\nbenchmark_test(\nname = \"iperf_test\",\n- size = \"enormous\",\nsrcs = [\n\"iperf_test.go\",\n],\n@@ -34,7 +33,6 @@ benchmark_test(\nbenchmark_test(\nname = \"node_test\",\n- size = \"enormous\",\nsrcs = [\n\"node_test.go\",\n],\n@@ -49,7 +47,6 @@ benchmark_test(\nbenchmark_test(\nname = \"ruby_test\",\n- size = \"enormous\",\nsrcs = [\n\"ruby_test.go\",\n],\n@@ -64,7 +61,6 @@ benchmark_test(\nbenchmark_test(\nname = \"nginx_test\",\n- size = \"enormous\",\nsrcs = [\n\"nginx_test.go\",\n],\n@@ -79,7 +75,6 @@ benchmark_test(\nbenchmark_test(\nname = \"httpd_test\",\n- size = \"enormous\",\nsrcs = [\n\"httpd_test.go\",\n],\n" }, { "change_type": "MODIFY", "old_path": "test/perf/BUILD", "new_path": "test/perf/BUILD", "diff": "@@ -35,7 +35,7 @@ syscall_test(\n)\nsyscall_test(\n- size = \"enormous\",\n+ size = \"large\",\ndebug = False,\ntags = [\"nogotsan\"],\ntest = \"//test/perf/linux:getdents_benchmark\",\n@@ -48,7 +48,7 @@ syscall_test(\n)\nsyscall_test(\n- size = \"enormous\",\n+ size = \"large\",\ndebug = False,\ntags = [\"nogotsan\"],\ntest = \"//test/perf/linux:gettid_benchmark\",\n@@ -106,7 +106,7 @@ syscall_test(\n)\nsyscall_test(\n- size = \"enormous\",\n+ size = \"large\",\ndebug = False,\ntest = \"//test/perf/linux:signal_benchmark\",\n)\n@@ -124,7 +124,7 @@ syscall_test(\n)\nsyscall_test(\n- size = \"enormous\",\n+ size = \"large\",\nadd_overlay = True,\ndebug = False,\ntest = \"//test/perf/linux:unlink_benchmark\",\n" }, { "change_type": "MODIFY", "old_path": "test/runtimes/defs.bzl", "new_path": "test/runtimes/defs.bzl", "diff": "@@ -75,7 +75,6 @@ def runtime_test(name, **kwargs):\n\"local\",\n\"manual\",\n],\n- size = \"enormous\",\n**kwargs\n)\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -772,8 +772,7 @@ syscall_test(\n)\nsyscall_test(\n- # NOTE(b/116636318): Large sendmsg may stall a long time.\n- size = \"enormous\",\n+ flaky = 1, # NOTE(b/116636318): Large sendmsg may stall a long time.\nshard_count = more_shards,\ntest = \"//test/syscalls/linux:socket_unix_dgram_local_test\",\n)\n@@ -791,8 +790,7 @@ syscall_test(\n)\nsyscall_test(\n- # NOTE(b/116636318): Large sendmsg may stall a long time.\n- size = \"enormous\",\n+ flaky = 1, # NOTE(b/116636318): Large sendmsg may stall a long time.\nshard_count = more_shards,\ntest = \"//test/syscalls/linux:socket_unix_seqpacket_local_test\",\n)\n" } ]
Go
Apache License 2.0
google/gvisor
Remove eternal and enormous tests. PiperOrigin-RevId: 366573366
259,992
05.04.2021 11:37:56
25,200
3007ae647d2e7a8800f3550f5ffc53c5e73415ce
Fail tests when container returns non-zero status
[ { "change_type": "MODIFY", "old_path": "pkg/test/dockerutil/container.go", "new_path": "pkg/test/dockerutil/container.go", "diff": "@@ -434,7 +434,14 @@ func (c *Container) Wait(ctx context.Context) error {\nselect {\ncase err := <-errChan:\nreturn err\n- case <-statusChan:\n+ case res := <-statusChan:\n+ if res.StatusCode != 0 {\n+ var msg string\n+ if res.Error != nil {\n+ msg = res.Error.Message\n+ }\n+ return fmt.Errorf(\"container returned non-zero status: %d, msg: %q\", res.StatusCode, msg)\n+ }\nreturn nil\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "test/image/image_test.go", "new_path": "test/image/image_test.go", "diff": "@@ -183,7 +183,10 @@ func TestMysql(t *testing.T) {\n// Start the container.\nif err := server.Spawn(ctx, dockerutil.RunOpts{\nImage: \"basic/mysql\",\n- Env: []string{\"MYSQL_ROOT_PASSWORD=foobar123\"},\n+ Env: []string{\n+ \"MYSQL_ROOT_PASSWORD=foobar123\",\n+ \"MYSQL_ROOT_HOST=%\", // Allow anyone to connect to the server.\n+ },\n}); err != nil {\nt.Fatalf(\"docker run failed: %v\", err)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Fail tests when container returns non-zero status PiperOrigin-RevId: 366839955
260,001
05.04.2021 11:55:11
25,200
58afd120d35c385d98ad0dfef0be454532035180
Set Verity bit in verity_prepare cmd This is needed to enable Xattrs features required by verity.
[ { "change_type": "MODIFY", "old_path": "runsc/cmd/verity_prepare.go", "new_path": "runsc/cmd/verity_prepare.go", "diff": "@@ -102,5 +102,7 @@ func (c *VerityPrepare) Execute(_ context.Context, f *flag.FlagSet, args ...inte\n// Force no networking, it is not necessary to run the verity measure tool.\nconf.Network = config.NetworkNone\n+ conf.Verity = true\n+\nreturn startContainerAndWait(spec, conf, cid, waitStatus)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Set Verity bit in verity_prepare cmd This is needed to enable Xattrs features required by verity. PiperOrigin-RevId: 366843640
260,001
05.04.2021 11:56:07
25,200
e21a71bff18ba9da30a0ef977c747376d51ce8cb
Allow user mount for verity fs Allow user mounting a verity fs on an existing mount by specifying mount flags root_hash and lower_path.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/verity/verity.go", "new_path": "pkg/sentry/fsimpl/verity/verity.go", "diff": "@@ -35,6 +35,7 @@ package verity\nimport (\n\"bytes\"\n+ \"encoding/hex\"\n\"encoding/json\"\n\"fmt\"\n\"math\"\n@@ -105,6 +106,13 @@ var (\nverityMu sync.RWMutex\n)\n+// Mount option names for verityfs.\n+const (\n+ moptLowerPath = \"lower_path\"\n+ moptRootHash = \"root_hash\"\n+ moptRootName = \"root_name\"\n+)\n+\n// HashAlgorithm is a type specifying the algorithm used to hash the file\n// content.\ntype HashAlgorithm int\n@@ -171,6 +179,9 @@ type filesystem struct {\n// system.\nalg HashAlgorithm\n+ // opts is the string mount options passed to opts.Data.\n+ opts string\n+\n// renameMu synchronizes renaming with non-renaming operations in order\n// to ensure consistent lock ordering between dentry.dirMu in different\n// dentries.\n@@ -193,9 +204,6 @@ type filesystem struct {\n//\n// +stateify savable\ntype InternalFilesystemOptions struct {\n- // RootMerkleFileName is the name of the verity root Merkle tree file.\n- RootMerkleFileName string\n-\n// LowerName is the name of the filesystem wrapped by verity fs.\nLowerName string\n@@ -203,9 +211,6 @@ type InternalFilesystemOptions struct {\n// system.\nAlg HashAlgorithm\n- // RootHash is the root hash of the overall verity file system.\n- RootHash []byte\n-\n// AllowRuntimeEnable specifies whether the verity file system allows\n// enabling verification for files (i.e. building Merkle trees) during\n// runtime.\n@@ -239,28 +244,99 @@ func alertIntegrityViolation(msg string) error {\n// GetFilesystem implements vfs.FilesystemType.GetFilesystem.\nfunc (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.VirtualFilesystem, creds *auth.Credentials, source string, opts vfs.GetFilesystemOptions) (*vfs.Filesystem, *vfs.Dentry, error) {\n+ mopts := vfs.GenericParseMountOptions(opts.Data)\n+ var rootHash []byte\n+ if encodedRootHash, ok := mopts[moptRootHash]; ok {\n+ delete(mopts, moptRootHash)\n+ hash, err := hex.DecodeString(encodedRootHash)\n+ if err != nil {\n+ ctx.Warningf(\"verity.FilesystemType.GetFilesystem: Failed to decode root hash: %v\", err)\n+ return nil, nil, syserror.EINVAL\n+ }\n+ rootHash = hash\n+ }\n+ var lowerPathname string\n+ if path, ok := mopts[moptLowerPath]; ok {\n+ delete(mopts, moptLowerPath)\n+ lowerPathname = path\n+ }\n+ rootName := \"root\"\n+ if root, ok := mopts[moptRootName]; ok {\n+ delete(mopts, moptRootName)\n+ rootName = root\n+ }\n+\n+ // Check for unparsed options.\n+ if len(mopts) != 0 {\n+ ctx.Warningf(\"verity.FilesystemType.GetFilesystem: unknown options: %v\", mopts)\n+ return nil, nil, syserror.EINVAL\n+ }\n+\n+ // Handle internal options.\niopts, ok := opts.InternalData.(InternalFilesystemOptions)\n- if !ok {\n+ if len(lowerPathname) == 0 && !ok {\nctx.Warningf(\"verity.FilesystemType.GetFilesystem: missing verity configs\")\nreturn nil, nil, syserror.EINVAL\n}\n+ if len(lowerPathname) != 0 {\n+ if ok {\n+ ctx.Warningf(\"verity.FilesystemType.GetFilesystem: unexpected verity configs with specified lower path\")\n+ return nil, nil, syserror.EINVAL\n+ }\n+ iopts = InternalFilesystemOptions{\n+ AllowRuntimeEnable: len(rootHash) == 0,\n+ Action: ErrorOnViolation,\n+ }\n+ }\naction = iopts.Action\n+ var lowerMount *vfs.Mount\n+ var mountedLowerVD vfs.VirtualDentry\n+ // Use an existing mount if lowerPath is provided.\n+ if len(lowerPathname) != 0 {\n+ vfsroot := vfs.RootFromContext(ctx)\n+ if vfsroot.Ok() {\n+ defer vfsroot.DecRef(ctx)\n+ }\n+ lowerPath := fspath.Parse(lowerPathname)\n+ if !lowerPath.Absolute {\n+ ctx.Infof(\"verity.FilesystemType.GetFilesystem: lower_path %q must be absolute\", lowerPathname)\n+ return nil, nil, syserror.EINVAL\n+ }\n+ var err error\n+ mountedLowerVD, err = vfsObj.GetDentryAt(ctx, creds, &vfs.PathOperation{\n+ Root: vfsroot,\n+ Start: vfsroot,\n+ Path: lowerPath,\n+ FollowFinalSymlink: true,\n+ }, &vfs.GetDentryOptions{\n+ CheckSearchable: true,\n+ })\n+ if err != nil {\n+ ctx.Infof(\"verity.FilesystemType.GetFilesystem: failed to resolve lower_path %q: %v\", lowerPathname, err)\n+ return nil, nil, err\n+ }\n+ lowerMount = mountedLowerVD.Mount()\n+ defer mountedLowerVD.DecRef(ctx)\n+ } else {\n// Mount the lower file system. The lower file system is wrapped inside\n// verity, and should not be exposed or connected.\n- mopts := &vfs.MountOptions{\n+ mountOpts := &vfs.MountOptions{\nGetFilesystemOptions: iopts.LowerGetFSOptions,\nInternalMount: true,\n}\n- mnt, err := vfsObj.MountDisconnected(ctx, creds, \"\", iopts.LowerName, mopts)\n+ mnt, err := vfsObj.MountDisconnected(ctx, creds, \"\", iopts.LowerName, mountOpts)\nif err != nil {\nreturn nil, nil, err\n}\n+ lowerMount = mnt\n+ }\nfs := &filesystem{\ncreds: creds.Fork(),\nalg: iopts.Alg,\n- lowerMount: mnt,\n+ lowerMount: lowerMount,\n+ opts: opts.Data,\nallowRuntimeEnable: iopts.AllowRuntimeEnable,\n}\nfs.vfsfs.Init(vfsObj, &fstype, fs)\n@@ -268,11 +344,11 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\n// Construct the root dentry.\nd := fs.newDentry()\nd.refs = 1\n- lowerVD := vfs.MakeVirtualDentry(mnt, mnt.Root())\n+ lowerVD := vfs.MakeVirtualDentry(lowerMount, lowerMount.Root())\nlowerVD.IncRef()\nd.lowerVD = lowerVD\n- rootMerkleName := merkleRootPrefix + iopts.RootMerkleFileName\n+ rootMerkleName := merkleRootPrefix + rootName\nlowerMerkleVD, err := vfsObj.GetDentryAt(ctx, fs.creds, &vfs.PathOperation{\nRoot: lowerVD,\n@@ -352,7 +428,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\nd.mode = uint32(stat.Mode)\nd.uid = stat.UID\nd.gid = stat.GID\n- d.hash = make([]byte, len(iopts.RootHash))\n+ d.hash = make([]byte, len(rootHash))\nd.childrenNames = make(map[string]struct{})\nif !d.isDir() {\n@@ -427,7 +503,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\n}\nd.hashMu.Lock()\n- copy(d.hash, iopts.RootHash)\n+ copy(d.hash, rootHash)\nd.hashMu.Unlock()\nd.vfsd.Init(d)\n@@ -443,7 +519,7 @@ func (fs *filesystem) Release(ctx context.Context) {\n// MountOptions implements vfs.FilesystemImpl.MountOptions.\nfunc (fs *filesystem) MountOptions() string {\n- return \"\"\n+ return fs.opts\n}\n// dentry implements vfs.DentryImpl.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/verity/verity_test.go", "new_path": "pkg/sentry/fsimpl/verity/verity_test.go", "diff": "@@ -89,10 +89,11 @@ func newVerityRoot(t *testing.T, hashAlg HashAlgorithm) (*vfs.VirtualFilesystem,\nAllowUserMount: true,\n})\n+ data := \"root_name=\" + rootMerkleFilename\nmntns, err := vfsObj.NewMountNamespace(ctx, auth.CredentialsFromContext(ctx), \"\", \"verity\", &vfs.MountOptions{\nGetFilesystemOptions: vfs.GetFilesystemOptions{\n+ Data: data,\nInternalData: InternalFilesystemOptions{\n- RootMerkleFileName: rootMerkleFilename,\nLowerName: \"tmpfs\",\nAlg: hashAlg,\nAllowRuntimeEnable: true,\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/vfs.go", "new_path": "runsc/boot/vfs.go", "diff": "@@ -92,7 +92,7 @@ func registerFilesystems(k *kernel.Kernel) error {\n})\nvfsObj.MustRegisterFilesystemType(verity.Name, &verity.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{\nAllowUserList: true,\n- AllowUserMount: false,\n+ AllowUserMount: true,\n})\n// Setup files in devtmpfs.\n@@ -483,7 +483,7 @@ func (c *containerMounter) getMountNameAndOptionsVFS2(conf *config.Config, m *mo\nvar data []string\nvar iopts interface{}\n- verityOpts, verityRequested, remainingMOpts, err := parseVerityMountOptions(m.Options)\n+ verityData, verityOpts, verityRequested, remainingMOpts, err := parseVerityMountOptions(m.Options)\nif err != nil {\nreturn \"\", nil, false, err\n}\n@@ -555,13 +555,13 @@ func (c *containerMounter) getMountNameAndOptionsVFS2(conf *config.Config, m *mo\n}\nif verityRequested {\n- verityOpts.RootMerkleFileName = path.Base(m.Mount.Destination)\n+ verityData = verityData + \"root_name=\" + path.Base(m.Mount.Destination)\nverityOpts.LowerName = fsName\nverityOpts.LowerGetFSOptions = opts.GetFilesystemOptions\nfsName = verity.Name\nopts = &vfs.MountOptions{\nGetFilesystemOptions: vfs.GetFilesystemOptions{\n- Data: strings.Join(data, \",\"),\n+ Data: verityData,\nInternalData: verityOpts,\n},\nInternalMount: true,\n@@ -582,9 +582,10 @@ func parseKeyValue(s string) (string, string, bool) {\n// parseAndFilterOptions scans the provided mount options for verity-related\n// mount options. It returns the parsed set of verity mount options, as well as\n// the filtered set of mount options unrelated to verity.\n-func parseVerityMountOptions(mopts []string) (verity.InternalFilesystemOptions, bool, []string, error) {\n+func parseVerityMountOptions(mopts []string) (string, verity.InternalFilesystemOptions, bool, []string, error) {\nnonVerity := []string{}\nfound := false\n+ var rootHash string\nverityOpts := verity.InternalFilesystemOptions{\nAction: verity.PanicOnViolation,\n}\n@@ -596,13 +597,13 @@ func parseVerityMountOptions(mopts []string) (verity.InternalFilesystemOptions,\nk, v, ok := parseKeyValue(o)\nif !ok {\n- return verityOpts, found, nonVerity, fmt.Errorf(\"invalid verity mount option with no value: %q\", o)\n+ return \"\", verityOpts, found, nonVerity, fmt.Errorf(\"invalid verity mount option with no value: %q\", o)\n}\nfound = true\nswitch k {\ncase \"verity.roothash\":\n- verityOpts.RootHash = []byte(v)\n+ rootHash = v\ncase \"verity.action\":\nswitch v {\ncase \"error\":\n@@ -614,11 +615,12 @@ func parseVerityMountOptions(mopts []string) (verity.InternalFilesystemOptions,\nverityOpts.Action = verity.PanicOnViolation\n}\ndefault:\n- return verityOpts, found, nonVerity, fmt.Errorf(\"unknown verity mount option: %q\", k)\n+ return \"\", verityOpts, found, nonVerity, fmt.Errorf(\"unknown verity mount option: %q\", k)\n}\n}\n- verityOpts.AllowRuntimeEnable = len(verityOpts.RootHash) == 0\n- return verityOpts, found, nonVerity, nil\n+ verityOpts.AllowRuntimeEnable = len(rootHash) == 0\n+ verityData := \"root_hash=\" + rootHash + \",\"\n+ return verityData, verityOpts, found, nonVerity, nil\n}\n// mountTmpVFS2 mounts an internal tmpfs at '/tmp' if it's safe to do so.\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -317,6 +317,10 @@ syscall_test(\ntest = \"//test/syscalls/linux:mount_test\",\n)\n+syscall_test(\n+ test = \"//test/syscalls/linux:verity_mount_test\",\n+)\n+\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:mremap_test\",\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -1303,6 +1303,20 @@ cc_binary(\n],\n)\n+cc_binary(\n+ name = \"verity_mount_test\",\n+ testonly = 1,\n+ srcs = [\"verity_mount.cc\"],\n+ linkstatic = 1,\n+ deps = [\n+ gtest,\n+ \"//test/util:capability_util\",\n+ \"//test/util:temp_path\",\n+ \"//test/util:test_main\",\n+ \"//test/util:test_util\",\n+ ],\n+)\n+\ncc_binary(\nname = \"mremap_test\",\ntestonly = 1,\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/syscalls/linux/verity_mount.cc", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#include <sys/mount.h>\n+\n+#include <iomanip>\n+#include <sstream>\n+\n+#include \"gmock/gmock.h\"\n+#include \"gtest/gtest.h\"\n+#include \"test/util/capability_util.h\"\n+#include \"test/util/temp_path.h\"\n+#include \"test/util/test_util.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+\n+namespace {\n+\n+// Mount verity file system on an existing gofer mount.\n+TEST(MountTest, MountExisting) {\n+ // Verity is implemented in VFS2.\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+\n+ // Mount a new tmpfs file system.\n+ auto const tmpfs_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ ASSERT_THAT(mount(\"\", tmpfs_dir.path().c_str(), \"tmpfs\", 0, \"\"),\n+ SyscallSucceeds());\n+\n+ // Mount a verity file system on the existing gofer mount.\n+ auto const verity_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ std::string opts = \"lower_path=\" + tmpfs_dir.path();\n+ EXPECT_THAT(mount(\"\", verity_dir.path().c_str(), \"verity\", 0, opts.c_str()),\n+ SyscallSucceeds());\n+}\n+\n+} // namespace\n+\n+} // namespace testing\n+} // namespace gvisor\n" } ]
Go
Apache License 2.0
google/gvisor
Allow user mount for verity fs Allow user mounting a verity fs on an existing mount by specifying mount flags root_hash and lower_path. PiperOrigin-RevId: 366843846
259,891
05.04.2021 12:34:12
25,200
e7b2023647df994736ccaaa99a66a8712db2c631
deflake semaphore test There's no reason to actually increment the semaphore, it just introduces the chance of a race.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/semaphore.cc", "new_path": "test/syscalls/linux/semaphore.cc", "diff": "@@ -234,14 +234,6 @@ TEST(SemaphoreTest, SemTimedOpBlock) {\nAutoSem sem(semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT));\nASSERT_THAT(sem.get(), SyscallSucceeds());\n- ScopedThread th([&sem] {\n- absl::SleepFor(absl::Milliseconds(100));\n-\n- struct sembuf buf = {};\n- buf.sem_op = 1;\n- ASSERT_THAT(RetryEINTR(semop)(sem.get(), &buf, 1), SyscallSucceeds());\n- });\n-\nstruct sembuf buf = {};\nbuf.sem_op = -1;\nstruct timespec timeout = {};\n" } ]
Go
Apache License 2.0
google/gvisor
deflake semaphore test There's no reason to actually increment the semaphore, it just introduces the chance of a race. PiperOrigin-RevId: 366851795
259,907
05.04.2021 15:59:55
25,200
2d9095c7a669ad2632f12de8d0918f8bf48b499e
Actually don't run unlink_benchmark with TSAN. This benchmark currently takes > 15 minutes to run in that case.
[ { "change_type": "MODIFY", "old_path": "test/perf/BUILD", "new_path": "test/perf/BUILD", "diff": "@@ -127,6 +127,7 @@ syscall_test(\nsize = \"large\",\nadd_overlay = True,\ndebug = False,\n+ tags = [\"nogotsan\"],\ntest = \"//test/perf/linux:unlink_benchmark\",\n)\n@@ -134,6 +135,5 @@ syscall_test(\nsize = \"large\",\nadd_overlay = True,\ndebug = False,\n- tags = [\"nogotsan\"],\ntest = \"//test/perf/linux:write_benchmark\",\n)\n" } ]
Go
Apache License 2.0
google/gvisor
Actually don't run unlink_benchmark with TSAN. This benchmark currently takes > 15 minutes to run in that case. PiperOrigin-RevId: 366891726
259,985
05.04.2021 16:00:17
25,200
88f198c2a9da1bac9726db18af4e7615aaa65476
Allow default control values to be set for cgroupfs.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/cgroupfs/cgroupfs.go", "new_path": "pkg/sentry/fsimpl/cgroupfs/cgroupfs.go", "diff": "@@ -94,6 +94,14 @@ var SupportedMountOptions = []string{\"all\", \"cpu\", \"cpuacct\", \"cpuset\", \"memory\"\n// +stateify savable\ntype FilesystemType struct{}\n+// InternalData contains internal data passed in to the cgroupfs mount via\n+// vfs.GetFilesystemOptions.InternalData.\n+//\n+// +stateify savable\n+type InternalData struct {\n+ DefaultControlValues map[string]int64\n+}\n+\n// filesystem implements vfs.FilesystemImpl.\n//\n// +stateify savable\n@@ -218,13 +226,19 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\nfs.MaxCachedDentries = maxCachedDentries\nfs.VFSFilesystem().Init(vfsObj, &fsType, fs)\n+ var defaults map[string]int64\n+ if opts.InternalData != nil {\n+ ctx.Debugf(\"cgroupfs.FilesystemType.GetFilesystem: default control values: %v\", defaults)\n+ defaults = opts.InternalData.(*InternalData).DefaultControlValues\n+ }\n+\nfor _, ty := range wantControllers {\nvar c controller\nswitch ty {\ncase controllerMemory:\n- c = newMemoryController(fs)\n+ c = newMemoryController(fs, defaults)\ncase controllerCPU:\n- c = newCPUController(fs)\n+ c = newCPUController(fs, defaults)\ncase controllerCPUAcct:\nc = newCPUAcctController(fs)\ncase controllerCPUSet:\n@@ -235,6 +249,12 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\nfs.controllers = append(fs.controllers, c)\n}\n+ if len(defaults) != 0 {\n+ // Internal data is always provided at sentry startup and unused values\n+ // indicate a problem with the sandbox config. Fail fast.\n+ panic(fmt.Sprintf(\"cgroupfs.FilesystemType.GetFilesystem: unknown internal mount data: %v\", defaults))\n+ }\n+\n// Controllers usually appear in alphabetical order when displayed. Sort it\n// here now, so it never needs to be sorted elsewhere.\nsort.Slice(fs.controllers, func(i, j int) bool { return fs.controllers[i].Type() < fs.controllers[j].Type() })\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/cgroupfs/cpu.go", "new_path": "pkg/sentry/fsimpl/cgroupfs/cpu.go", "diff": "@@ -28,22 +28,36 @@ type cpuController struct {\ncontrollerCommon\n// CFS bandwidth control parameters, values in microseconds.\n- cfsPeriod uint64\n+ cfsPeriod int64\ncfsQuota int64\n// CPU shares, values should be (num core * 1024).\n- shares uint64\n+ shares int64\n}\nvar _ controller = (*cpuController)(nil)\n-func newCPUController(fs *filesystem) *cpuController {\n+func newCPUController(fs *filesystem, defaults map[string]int64) *cpuController {\n// Default values for controller parameters from Linux.\nc := &cpuController{\ncfsPeriod: 100000,\ncfsQuota: -1,\nshares: 1024,\n}\n+\n+ if val, ok := defaults[\"cpu.cfs_period_us\"]; ok {\n+ c.cfsPeriod = val\n+ delete(defaults, \"cpu.cfs_period_us\")\n+ }\n+ if val, ok := defaults[\"cpu.cfs_quota_us\"]; ok {\n+ c.cfsQuota = val\n+ delete(defaults, \"cpu.cfs_quota_us\")\n+ }\n+ if val, ok := defaults[\"cpu.shares\"]; ok {\n+ c.shares = val\n+ delete(defaults, \"cpu.shares\")\n+ }\n+\nc.controllerCommon.init(controllerCPU, fs)\nreturn c\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/cgroupfs/memory.go", "new_path": "pkg/sentry/fsimpl/cgroupfs/memory.go", "diff": "@@ -17,7 +17,9 @@ package cgroupfs\nimport (\n\"bytes\"\n\"fmt\"\n+ \"math\"\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n@@ -28,12 +30,23 @@ import (\n// +stateify savable\ntype memoryController struct {\ncontrollerCommon\n+\n+ limitBytes int64\n}\nvar _ controller = (*memoryController)(nil)\n-func newMemoryController(fs *filesystem) *memoryController {\n- c := &memoryController{}\n+func newMemoryController(fs *filesystem, defaults map[string]int64) *memoryController {\n+ c := &memoryController{\n+ // Linux sets this to (PAGE_COUNTER_MAX * PAGE_SIZE) by default, which\n+ // is ~ 2**63 on a 64-bit system. So essentially, inifinity. The exact\n+ // value isn't very important.\n+ limitBytes: math.MaxInt64,\n+ }\n+ if val, ok := defaults[\"memory.limit_in_bytes\"]; ok {\n+ c.limitBytes = val\n+ delete(defaults, \"memory.limit_in_bytes\")\n+ }\nc.controllerCommon.init(controllerMemory, fs)\nreturn c\n}\n@@ -41,6 +54,7 @@ func newMemoryController(fs *filesystem) *memoryController {\n// AddControlFiles implements controller.AddControlFiles.\nfunc (c *memoryController) AddControlFiles(ctx context.Context, creds *auth.Credentials, _ *cgroupInode, contents map[string]kernfs.Inode) {\ncontents[\"memory.usage_in_bytes\"] = c.fs.newControllerFile(ctx, creds, &memoryUsageInBytesData{})\n+ contents[\"memory.limit_in_bytes\"] = c.fs.newStaticControllerFile(ctx, creds, linux.FileMode(0644), fmt.Sprintf(\"%d\\n\", c.limitBytes))\n}\n// +stateify savable\n" } ]
Go
Apache License 2.0
google/gvisor
Allow default control values to be set for cgroupfs. PiperOrigin-RevId: 366891806
259,992
05.04.2021 16:58:07
25,200
198e0dcde2476d847de699dc3c0d20421a855e86
Add fsstress on tmpfs to presubmit Updates
[ { "change_type": "MODIFY", "old_path": ".buildkite/pipeline.yaml", "new_path": ".buildkite/pipeline.yaml", "diff": "@@ -89,6 +89,9 @@ steps:\n- <<: *common\nlabel: \":person_in_lotus_position: KVM tests\"\ncommand: make kvm-tests\n+ - <<: *common\n+ label: \":weight_lifter: Fsstress test\"\n+ command: make fsstress-test\n- <<: *common\nlabel: \":docker: Containerd 1.3.9 tests\"\ncommand: make containerd-test-1.3.9\n" }, { "change_type": "MODIFY", "old_path": "test/fsstress/BUILD", "new_path": "test/fsstress/BUILD", "diff": "@@ -14,9 +14,7 @@ go_test(\n\"manual\",\n\"local\",\n],\n- deps = [\n- \"//pkg/test/dockerutil\",\n- ],\n+ deps = [\"//pkg/test/dockerutil\"],\n)\ngo_library(\n" }, { "change_type": "MODIFY", "old_path": "test/fsstress/fsstress_test.go", "new_path": "test/fsstress/fsstress_test.go", "diff": "@@ -17,7 +17,9 @@ package fsstress\nimport (\n\"context\"\n+ \"flag\"\n\"math/rand\"\n+ \"os\"\n\"strconv\"\n\"strings\"\n\"testing\"\n@@ -30,33 +32,44 @@ func init() {\nrand.Seed(int64(time.Now().Nanosecond()))\n}\n-func fsstress(t *testing.T, dir string) {\n+func TestMain(m *testing.M) {\n+ dockerutil.EnsureSupportedDockerVersion()\n+ flag.Parse()\n+ os.Exit(m.Run())\n+}\n+\n+type config struct {\n+ operations string\n+ processes string\n+ target string\n+}\n+\n+func fsstress(t *testing.T, conf config) {\nctx := context.Background()\nd := dockerutil.MakeContainer(ctx, t)\ndefer d.CleanUp(ctx)\n- const (\n- operations = \"10000\"\n- processes = \"100\"\n- image = \"basic/fsstress\"\n- )\n+ const image = \"basic/fsstress\"\nseed := strconv.FormatUint(uint64(rand.Uint32()), 10)\n- args := []string{\"-d\", dir, \"-n\", operations, \"-p\", processes, \"-s\", seed, \"-X\"}\n- t.Logf(\"Repro: docker run --rm --runtime=runsc %s %s\", image, strings.Join(args, \"\"))\n+ args := []string{\"-d\", conf.target, \"-n\", conf.operations, \"-p\", conf.processes, \"-s\", seed, \"-X\"}\n+ t.Logf(\"Repro: docker run --rm --runtime=%s gvisor.dev/images/%s %s\", dockerutil.Runtime(), image, strings.Join(args, \" \"))\nout, err := d.Run(ctx, dockerutil.RunOpts{Image: image}, args...)\nif err != nil {\nt.Fatalf(\"docker run failed: %v\\noutput: %s\", err, out)\n}\n- lines := strings.SplitN(out, \"\\n\", 2)\n- if len(lines) > 1 || !strings.HasPrefix(out, \"seed =\") {\n+ // This is to catch cases where fsstress spews out error messages during clean\n+ // up but doesn't return error.\n+ if len(out) > 0 {\nt.Fatalf(\"unexpected output: %s\", out)\n}\n}\n-func TestFsstressGofer(t *testing.T) {\n- fsstress(t, \"/test\")\n-}\n-\nfunc TestFsstressTmpfs(t *testing.T) {\n- fsstress(t, \"/tmp\")\n+ // This takes between 10s to run on my machine. Adjust as needed.\n+ cfg := config{\n+ operations: \"5000\",\n+ processes: \"20\",\n+ target: \"/tmp\",\n+ }\n+ fsstress(t, cfg)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Add fsstress on tmpfs to presubmit Updates #5273 PiperOrigin-RevId: 366902314
259,992
05.04.2021 17:07:59
25,200
661e5ae7ae496332c0b5440980d0a9a00f9b9fbd
Enable Checkpoint/Restore test with VFS2 Closes
[ { "change_type": "MODIFY", "old_path": "test/e2e/BUILD", "new_path": "test/e2e/BUILD", "diff": "@@ -8,7 +8,6 @@ go_test(\nsrcs = [\n\"exec_test.go\",\n\"integration_test.go\",\n- \"regression_test.go\",\n],\nlibrary = \":integration\",\ntags = [\n" }, { "change_type": "MODIFY", "old_path": "test/e2e/integration_test.go", "new_path": "test/e2e/integration_test.go", "diff": "@@ -168,13 +168,6 @@ func TestCheckpointRestore(t *testing.T) {\nt.Skip(\"Pause/resume is not supported.\")\n}\n- // TODO(gvisor.dev/issue/3373): Remove after implementing.\n- if usingVFS2, err := dockerutil.UsingVFS2(); usingVFS2 {\n- t.Skip(\"CheckpointRestore not implemented in VFS2.\")\n- } else if err != nil {\n- t.Fatalf(\"failed to read config for runtime %s: %v\", dockerutil.Runtime(), err)\n- }\n-\nctx := context.Background()\nd := dockerutil.MakeContainer(ctx, t)\ndefer d.CleanUp(ctx)\n@@ -592,6 +585,30 @@ func runIntegrationTest(t *testing.T, capAdd []string, args ...string) {\n}\n}\n+// Test that UDS can be created using overlay when parent directory is in lower\n+// layer only (b/134090485).\n+//\n+// Prerequisite: the directory where the socket file is created must not have\n+// been open for write before bind(2) is called.\n+func TestBindOverlay(t *testing.T) {\n+ ctx := context.Background()\n+ d := dockerutil.MakeContainer(ctx, t)\n+ defer d.CleanUp(ctx)\n+\n+ // Run the container.\n+ got, err := d.Run(ctx, dockerutil.RunOpts{\n+ Image: \"basic/ubuntu\",\n+ }, \"bash\", \"-c\", \"nc -q -1 -l -U /var/run/sock & p=$! && sleep 1 && echo foobar-asdf | nc -q 0 -U /var/run/sock && wait $p\")\n+ if err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n+ }\n+\n+ // Check the output contains what we want.\n+ if want := \"foobar-asdf\"; !strings.Contains(got, want) {\n+ t.Fatalf(\"docker run output is missing %q: %s\", want, got)\n+ }\n+}\n+\nfunc TestMain(m *testing.M) {\ndockerutil.EnsureSupportedDockerVersion()\nflag.Parse()\n" }, { "change_type": "DELETE", "old_path": "test/e2e/regression_test.go", "new_path": null, "diff": "-// Copyright 2019 The gVisor Authors.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-package integration\n-\n-import (\n- \"context\"\n- \"strings\"\n- \"testing\"\n-\n- \"gvisor.dev/gvisor/pkg/test/dockerutil\"\n-)\n-\n-// Test that UDS can be created using overlay when parent directory is in lower\n-// layer only (b/134090485).\n-//\n-// Prerequisite: the directory where the socket file is created must not have\n-// been open for write before bind(2) is called.\n-func TestBindOverlay(t *testing.T) {\n- ctx := context.Background()\n- d := dockerutil.MakeContainer(ctx, t)\n- defer d.CleanUp(ctx)\n-\n- // Run the container.\n- got, err := d.Run(ctx, dockerutil.RunOpts{\n- Image: \"basic/ubuntu\",\n- }, \"bash\", \"-c\", \"nc -q -1 -l -U /var/run/sock & p=$! && sleep 1 && echo foobar-asdf | nc -q 0 -U /var/run/sock && wait $p\")\n- if err != nil {\n- t.Fatalf(\"docker run failed: %v\", err)\n- }\n-\n- // Check the output contains what we want.\n- if want := \"foobar-asdf\"; !strings.Contains(got, want) {\n- t.Fatalf(\"docker run output is missing %q: %s\", want, got)\n- }\n-}\n" } ]
Go
Apache License 2.0
google/gvisor
Enable Checkpoint/Restore test with VFS2 Closes #3373 PiperOrigin-RevId: 366903991
260,001
05.04.2021 17:29:30
25,200
63340e61388621d41a5abb08d8902a1565d02a96
Add initial verity ioctl syscall tests
[ { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -243,6 +243,10 @@ syscall_test(\ntest = \"//test/syscalls/linux:ioctl_test\",\n)\n+syscall_test(\n+ test = \"//test/syscalls/linux:verity_ioctl_test\",\n+)\n+\nsyscall_test(\ntest = \"//test/syscalls/linux:iptables_test\",\n)\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -1014,6 +1014,22 @@ cc_binary(\n],\n)\n+cc_binary(\n+ name = \"verity_ioctl_test\",\n+ testonly = 1,\n+ srcs = [\"verity_ioctl.cc\"],\n+ linkstatic = 1,\n+ deps = [\n+ \"//test/util:capability_util\",\n+ gtest,\n+ \"//test/util:fs_util\",\n+ \"//test/util:mount_util\",\n+ \"//test/util:temp_path\",\n+ \"//test/util:test_main\",\n+ \"//test/util:test_util\",\n+ ],\n+)\n+\ncc_library(\nname = \"iptables_types\",\ntestonly = 1,\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/syscalls/linux/verity_ioctl.cc", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#include <sys/mount.h>\n+\n+#include \"gmock/gmock.h\"\n+#include \"gtest/gtest.h\"\n+#include \"test/util/capability_util.h\"\n+#include \"test/util/fs_util.h\"\n+#include \"test/util/mount_util.h\"\n+#include \"test/util/temp_path.h\"\n+#include \"test/util/test_util.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+\n+namespace {\n+\n+#ifndef FS_IOC_ENABLE_VERITY\n+#define FS_IOC_ENABLE_VERITY 1082156677\n+#endif\n+\n+#ifndef FS_IOC_MEASURE_VERITY\n+#define FS_IOC_MEASURE_VERITY 3221513862\n+#endif\n+\n+#ifndef FS_VERITY_FL\n+#define FS_VERITY_FL 1048576\n+#endif\n+\n+#ifndef FS_IOC_GETFLAGS\n+#define FS_IOC_GETFLAGS 2148034049\n+#endif\n+\n+struct fsverity_digest {\n+ __u16 digest_algorithm;\n+ __u16 digest_size; /* input/output */\n+ __u8 digest[];\n+};\n+\n+const int fsverity_max_digest_size = 64;\n+const int fsverity_default_digest_size = 32;\n+\n+class IoctlTest : public ::testing::Test {\n+ protected:\n+ void SetUp() override {\n+ // Verity is implemented in VFS2.\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+ // Mount a tmpfs file system, to be wrapped by a verity fs.\n+ tmpfs_dir_ = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ ASSERT_THAT(mount(\"\", tmpfs_dir_.path().c_str(), \"tmpfs\", 0, \"\"),\n+ SyscallSucceeds());\n+\n+ // Create a new file in the tmpfs mount.\n+ constexpr char kContents[] = \"foobarbaz\";\n+ file_ = ASSERT_NO_ERRNO_AND_VALUE(\n+ TempPath::CreateFileWith(tmpfs_dir_.path(), kContents, 0777));\n+ filename_ = Basename(file_.path());\n+ }\n+\n+ TempPath tmpfs_dir_;\n+ TempPath file_;\n+ std::string filename_;\n+};\n+\n+TEST_F(IoctlTest, Enable) {\n+ // mount a verity fs on the existing tmpfs mount.\n+ std::string mount_opts = \"lower_path=\" + tmpfs_dir_.path();\n+ auto const verity_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ ASSERT_THAT(\n+ mount(\"\", verity_dir.path().c_str(), \"verity\", 0, mount_opts.c_str()),\n+ SyscallSucceeds());\n+\n+ printf(\"verity path: %s, filename: %s\\n\", verity_dir.path().c_str(),\n+ filename_.c_str());\n+ fflush(nullptr);\n+ // Confirm that the verity flag is absent.\n+ int flag = 0;\n+ auto const fd = ASSERT_NO_ERRNO_AND_VALUE(\n+ Open(JoinPath(verity_dir.path(), filename_), O_RDONLY, 0777));\n+ ASSERT_THAT(ioctl(fd.get(), FS_IOC_GETFLAGS, &flag), SyscallSucceeds());\n+ EXPECT_EQ(flag & FS_VERITY_FL, 0);\n+\n+ // Enable the file and confirm that the verity flag is present.\n+ ASSERT_THAT(ioctl(fd.get(), FS_IOC_ENABLE_VERITY), SyscallSucceeds());\n+ ASSERT_THAT(ioctl(fd.get(), FS_IOC_GETFLAGS, &flag), SyscallSucceeds());\n+ EXPECT_EQ(flag & FS_VERITY_FL, FS_VERITY_FL);\n+}\n+\n+TEST_F(IoctlTest, Measure) {\n+ // mount a verity fs on the existing tmpfs mount.\n+ std::string mount_opts = \"lower_path=\" + tmpfs_dir_.path();\n+ auto const verity_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ ASSERT_THAT(\n+ mount(\"\", verity_dir.path().c_str(), \"verity\", 0, mount_opts.c_str()),\n+ SyscallSucceeds());\n+\n+ // Confirm that the file cannot be measured.\n+ auto const fd = ASSERT_NO_ERRNO_AND_VALUE(\n+ Open(JoinPath(verity_dir.path(), filename_), O_RDONLY, 0777));\n+ int digest_size = sizeof(struct fsverity_digest) + fsverity_max_digest_size;\n+ struct fsverity_digest *digest =\n+ reinterpret_cast<struct fsverity_digest *>(malloc(digest_size));\n+ memset(digest, 0, digest_size);\n+ digest->digest_size = fsverity_max_digest_size;\n+ ASSERT_THAT(ioctl(fd.get(), FS_IOC_MEASURE_VERITY, digest),\n+ SyscallFailsWithErrno(ENODATA));\n+\n+ // Enable the file and confirm that the file can be measured.\n+ ASSERT_THAT(ioctl(fd.get(), FS_IOC_ENABLE_VERITY), SyscallSucceeds());\n+ ASSERT_THAT(ioctl(fd.get(), FS_IOC_MEASURE_VERITY, digest),\n+ SyscallSucceeds());\n+ EXPECT_EQ(digest->digest_size, fsverity_default_digest_size);\n+ free(digest);\n+}\n+\n+} // namespace\n+\n+} // namespace testing\n+} // namespace gvisor\n" } ]
Go
Apache License 2.0
google/gvisor
Add initial verity ioctl syscall tests PiperOrigin-RevId: 366907152
259,985
05.04.2021 19:44:12
25,200
7a7fcf2dbaa7bdcdb9b523358de91c71d5cb05d8
Report task CPU usage through the cpuacct cgroup controller.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/cgroupfs/base.go", "new_path": "pkg/sentry/fsimpl/cgroupfs/base.go", "diff": "@@ -167,8 +167,8 @@ func (d *cgroupProcsData) Generate(ctx context.Context, buf *bytes.Buffer) error\npgids := make(map[kernel.ThreadID]struct{})\n- d.fs.tasksMu.Lock()\n- defer d.fs.tasksMu.Unlock()\n+ d.fs.tasksMu.RLock()\n+ defer d.fs.tasksMu.RUnlock()\nfor task := range d.ts {\n// Map dedups pgid, since iterating over all tasks produces multiple\n@@ -209,8 +209,8 @@ func (d *tasksData) Generate(ctx context.Context, buf *bytes.Buffer) error {\nvar pids []kernel.ThreadID\n- d.fs.tasksMu.Lock()\n- defer d.fs.tasksMu.Unlock()\n+ d.fs.tasksMu.RLock()\n+ defer d.fs.tasksMu.RUnlock()\nfor task := range d.ts {\nif pid := currPidns.IDOfTask(task); pid != 0 {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/cgroupfs/cgroupfs.go", "new_path": "pkg/sentry/fsimpl/cgroupfs/cgroupfs.go", "diff": "@@ -129,7 +129,7 @@ type filesystem struct {\n// tasksMu serializes task membership changes across all cgroups within a\n// filesystem.\n- tasksMu sync.Mutex `state:\"nosave\"`\n+ tasksMu sync.RWMutex `state:\"nosave\"`\n}\n// Name implements vfs.FilesystemType.Name.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/cgroupfs/cpuacct.go", "new_path": "pkg/sentry/fsimpl/cgroupfs/cpuacct.go", "diff": "package cgroupfs\nimport (\n+ \"bytes\"\n+ \"fmt\"\n+\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n+ \"gvisor.dev/gvisor/pkg/sentry/usage\"\n)\n// +stateify savable\n@@ -34,6 +39,76 @@ func newCPUAcctController(fs *filesystem) *cpuacctController {\n}\n// AddControlFiles implements controller.AddControlFiles.\n-func (c *cpuacctController) AddControlFiles(ctx context.Context, creds *auth.Credentials, _ *cgroupInode, contents map[string]kernfs.Inode) {\n- // This controller is currently intentionally empty.\n+func (c *cpuacctController) AddControlFiles(ctx context.Context, creds *auth.Credentials, cg *cgroupInode, contents map[string]kernfs.Inode) {\n+ cpuacctCG := &cpuacctCgroup{cg}\n+ contents[\"cpuacct.stat\"] = c.fs.newControllerFile(ctx, creds, &cpuacctStatData{cpuacctCG})\n+ contents[\"cpuacct.usage\"] = c.fs.newControllerFile(ctx, creds, &cpuacctUsageData{cpuacctCG})\n+ contents[\"cpuacct.usage_user\"] = c.fs.newControllerFile(ctx, creds, &cpuacctUsageUserData{cpuacctCG})\n+ contents[\"cpuacct.usage_sys\"] = c.fs.newControllerFile(ctx, creds, &cpuacctUsageSysData{cpuacctCG})\n+}\n+\n+// +stateify savable\n+type cpuacctCgroup struct {\n+ *cgroupInode\n+}\n+\n+func (c *cpuacctCgroup) collectCPUStats() usage.CPUStats {\n+ var cs usage.CPUStats\n+ c.fs.tasksMu.RLock()\n+ // Note: This isn't very accurate, since the tasks are potentially\n+ // still running as we accumulate their stats.\n+ for t := range c.ts {\n+ cs.Accumulate(t.CPUStats())\n+ }\n+ c.fs.tasksMu.RUnlock()\n+ return cs\n+}\n+\n+// +stateify savable\n+type cpuacctStatData struct {\n+ *cpuacctCgroup\n+}\n+\n+// Generate implements vfs.DynamicBytesSource.Generate.\n+func (d *cpuacctStatData) Generate(ctx context.Context, buf *bytes.Buffer) error {\n+ cs := d.collectCPUStats()\n+ fmt.Fprintf(buf, \"user %d\\n\", linux.ClockTFromDuration(cs.UserTime))\n+ fmt.Fprintf(buf, \"system %d\\n\", linux.ClockTFromDuration(cs.SysTime))\n+ return nil\n+}\n+\n+// +stateify savable\n+type cpuacctUsageData struct {\n+ *cpuacctCgroup\n+}\n+\n+// Generate implements vfs.DynamicBytesSource.Generate.\n+func (d *cpuacctUsageData) Generate(ctx context.Context, buf *bytes.Buffer) error {\n+ cs := d.collectCPUStats()\n+ fmt.Fprintf(buf, \"%d\\n\", cs.UserTime.Nanoseconds()+cs.SysTime.Nanoseconds())\n+ return nil\n+}\n+\n+// +stateify savable\n+type cpuacctUsageUserData struct {\n+ *cpuacctCgroup\n+}\n+\n+// Generate implements vfs.DynamicBytesSource.Generate.\n+func (d *cpuacctUsageUserData) Generate(ctx context.Context, buf *bytes.Buffer) error {\n+ cs := d.collectCPUStats()\n+ fmt.Fprintf(buf, \"%d\\n\", cs.UserTime.Nanoseconds())\n+ return nil\n+}\n+\n+// +stateify savable\n+type cpuacctUsageSysData struct {\n+ *cpuacctCgroup\n+}\n+\n+// Generate implements vfs.DynamicBytesSource.Generate.\n+func (d *cpuacctUsageSysData) Generate(ctx context.Context, buf *bytes.Buffer) error {\n+ cs := d.collectCPUStats()\n+ fmt.Fprintf(buf, \"%d\\n\", cs.SysTime.Nanoseconds())\n+ return nil\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/cgroup.cc", "new_path": "test/syscalls/linux/cgroup.cc", "diff": "#include \"gtest/gtest.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n+#include \"absl/strings/str_split.h\"\n#include \"test/util/capability_util.h\"\n#include \"test/util/cgroup_util.h\"\n#include \"test/util/temp_path.h\"\n@@ -31,6 +32,7 @@ namespace testing {\nnamespace {\nusing ::testing::_;\n+using ::testing::Ge;\nusing ::testing::Gt;\nstd::vector<std::string> known_controllers = {\"cpu\", \"cpuset\", \"cpuacct\",\n@@ -206,6 +208,55 @@ TEST(CPUCgroup, ControlFilesHaveDefaultValues) {\nIsPosixErrorOkAndHolds(1024));\n}\n+TEST(CPUAcctCgroup, CPUAcctUsage) {\n+ SKIP_IF(!CgroupsAvailable());\n+\n+ Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));\n+ Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(\"cpuacct\"));\n+\n+ const int64_t usage =\n+ ASSERT_NO_ERRNO_AND_VALUE(c.ReadIntegerControlFile(\"cpuacct.usage\"));\n+ const int64_t usage_user =\n+ ASSERT_NO_ERRNO_AND_VALUE(c.ReadIntegerControlFile(\"cpuacct.usage_user\"));\n+ const int64_t usage_sys =\n+ ASSERT_NO_ERRNO_AND_VALUE(c.ReadIntegerControlFile(\"cpuacct.usage_sys\"));\n+\n+ EXPECT_GE(usage, 0);\n+ EXPECT_GE(usage_user, 0);\n+ EXPECT_GE(usage_sys, 0);\n+\n+ EXPECT_GE(usage_user + usage_sys, usage);\n+}\n+\n+TEST(CPUAcctCgroup, CPUAcctStat) {\n+ SKIP_IF(!CgroupsAvailable());\n+\n+ Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));\n+ Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(\"cpuacct\"));\n+\n+ std::string stat =\n+ ASSERT_NO_ERRNO_AND_VALUE(c.ReadControlFile(\"cpuacct.stat\"));\n+\n+ // We're expecting the contents of \"cpuacct.stat\" to look similar to this:\n+ //\n+ // user 377986\n+ // system 220662\n+\n+ std::vector<absl::string_view> lines =\n+ absl::StrSplit(stat, '\\n', absl::SkipEmpty());\n+ ASSERT_EQ(lines.size(), 2);\n+\n+ std::vector<absl::string_view> user_tokens =\n+ StrSplit(lines[0], absl::ByChar(' '));\n+ EXPECT_EQ(user_tokens[0], \"user\");\n+ EXPECT_THAT(Atoi<int64_t>(user_tokens[1]), IsPosixErrorOkAndHolds(Ge(0)));\n+\n+ std::vector<absl::string_view> sys_tokens =\n+ StrSplit(lines[1], absl::ByChar(' '));\n+ EXPECT_EQ(sys_tokens[0], \"system\");\n+ EXPECT_THAT(Atoi<int64_t>(sys_tokens[1]), IsPosixErrorOkAndHolds(Ge(0)));\n+}\n+\nTEST(ProcCgroups, Empty) {\nSKIP_IF(!CgroupsAvailable());\n" } ]
Go
Apache License 2.0
google/gvisor
Report task CPU usage through the cpuacct cgroup controller. PiperOrigin-RevId: 366923274
260,004
06.04.2021 10:14:10
25,200
d7fd00bad1416fb44b3303331ee223c28c69fe9b
Do not perform MLD for certain multicast scopes ...as per RFC 2710 section 5 page 10. Test: ipv6_test.TestMLDSkipProtocol
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ipv6.go", "new_path": "pkg/tcpip/header/ipv6.go", "diff": "@@ -142,11 +142,6 @@ const (\n// ipv6MulticastAddressScopeMask is the mask for the scope (scop) field,\n// within the byte holding the field, as per RFC 4291 section 2.7.\nipv6MulticastAddressScopeMask = 0xF\n-\n- // ipv6LinkLocalMulticastScope is the value of the scope (scop) field within\n- // a multicast IPv6 address that indicates the address has link-local scope,\n- // as per RFC 4291 section 2.7.\n- ipv6LinkLocalMulticastScope = 2\n)\n// IPv6EmptySubnet is the empty IPv6 subnet. It may also be known as the\n@@ -399,7 +394,7 @@ func IsV6LoopbackAddress(addr tcpip.Address) bool {\n// IsV6LinkLocalMulticastAddress determines if the provided address is an IPv6\n// link-local multicast address.\nfunc IsV6LinkLocalMulticastAddress(addr tcpip.Address) bool {\n- return IsV6MulticastAddress(addr) && addr[ipv6MulticastAddressScopeByteIdx]&ipv6MulticastAddressScopeMask == ipv6LinkLocalMulticastScope\n+ return IsV6MulticastAddress(addr) && V6MulticastScope(addr) == IPv6LinkLocalMulticastScope\n}\n// AppendOpaqueInterfaceIdentifier appends a 64 bit opaque interface identifier\n@@ -520,3 +515,45 @@ func GenerateTempIPv6SLAACAddr(tempIIDHistory []byte, stableAddr tcpip.Address)\nPrefixLen: IIDOffsetInIPv6Address * 8,\n}\n}\n+\n+// IPv6MulticastScope is the scope of a multicast IPv6 address.\n+type IPv6MulticastScope uint8\n+\n+// The various values for IPv6 multicast scopes, as per RFC 7346 section 2:\n+//\n+// +------+--------------------------+-------------------------+\n+// | scop | NAME | REFERENCE |\n+// +------+--------------------------+-------------------------+\n+// | 0 | Reserved | [RFC4291], RFC 7346 |\n+// | 1 | Interface-Local scope | [RFC4291], RFC 7346 |\n+// | 2 | Link-Local scope | [RFC4291], RFC 7346 |\n+// | 3 | Realm-Local scope | [RFC4291], RFC 7346 |\n+// | 4 | Admin-Local scope | [RFC4291], RFC 7346 |\n+// | 5 | Site-Local scope | [RFC4291], RFC 7346 |\n+// | 6 | Unassigned | |\n+// | 7 | Unassigned | |\n+// | 8 | Organization-Local scope | [RFC4291], RFC 7346 |\n+// | 9 | Unassigned | |\n+// | A | Unassigned | |\n+// | B | Unassigned | |\n+// | C | Unassigned | |\n+// | D | Unassigned | |\n+// | E | Global scope | [RFC4291], RFC 7346 |\n+// | F | Reserved | [RFC4291], RFC 7346 |\n+// +------+--------------------------+-------------------------+\n+const (\n+ IPv6Reserved0MulticastScope = IPv6MulticastScope(0x0)\n+ IPv6InterfaceLocalMulticastScope = IPv6MulticastScope(0x1)\n+ IPv6LinkLocalMulticastScope = IPv6MulticastScope(0x2)\n+ IPv6RealmLocalMulticastScope = IPv6MulticastScope(0x3)\n+ IPv6AdminLocalMulticastScope = IPv6MulticastScope(0x4)\n+ IPv6SiteLocalMulticastScope = IPv6MulticastScope(0x5)\n+ IPv6OrganizationLocalMulticastScope = IPv6MulticastScope(0x8)\n+ IPv6GlobalMulticastScope = IPv6MulticastScope(0xE)\n+ IPv6ReservedFMulticastScope = IPv6MulticastScope(0xF)\n+)\n+\n+// V6MulticastScope returns the scope of a multicast address.\n+func V6MulticastScope(addr tcpip.Address) IPv6MulticastScope {\n+ return IPv6MulticastScope(addr[ipv6MulticastAddressScopeByteIdx] & ipv6MulticastAddressScopeMask)\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ipv6_test.go", "new_path": "pkg/tcpip/header/ipv6_test.go", "diff": "@@ -373,3 +373,83 @@ func TestSolicitedNodeAddr(t *testing.T) {\n})\n}\n}\n+\n+func TestV6MulticastScope(t *testing.T) {\n+ tests := []struct {\n+ addr tcpip.Address\n+ want header.IPv6MulticastScope\n+ }{\n+ {\n+ addr: \"\\xff\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\",\n+ want: header.IPv6Reserved0MulticastScope,\n+ },\n+ {\n+ addr: \"\\xff\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\",\n+ want: header.IPv6InterfaceLocalMulticastScope,\n+ },\n+ {\n+ addr: \"\\xff\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\",\n+ want: header.IPv6LinkLocalMulticastScope,\n+ },\n+ {\n+ addr: \"\\xff\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\",\n+ want: header.IPv6RealmLocalMulticastScope,\n+ },\n+ {\n+ addr: \"\\xff\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\",\n+ want: header.IPv6AdminLocalMulticastScope,\n+ },\n+ {\n+ addr: \"\\xff\\x05\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\",\n+ want: header.IPv6SiteLocalMulticastScope,\n+ },\n+ {\n+ addr: \"\\xff\\x06\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\",\n+ want: header.IPv6MulticastScope(6),\n+ },\n+ {\n+ addr: \"\\xff\\x07\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\",\n+ want: header.IPv6MulticastScope(7),\n+ },\n+ {\n+ addr: \"\\xff\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\",\n+ want: header.IPv6OrganizationLocalMulticastScope,\n+ },\n+ {\n+ addr: \"\\xff\\x09\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\",\n+ want: header.IPv6MulticastScope(9),\n+ },\n+ {\n+ addr: \"\\xff\\x0a\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\",\n+ want: header.IPv6MulticastScope(10),\n+ },\n+ {\n+ addr: \"\\xff\\x0b\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\",\n+ want: header.IPv6MulticastScope(11),\n+ },\n+ {\n+ addr: \"\\xff\\x0c\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\",\n+ want: header.IPv6MulticastScope(12),\n+ },\n+ {\n+ addr: \"\\xff\\x0d\\x05\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\",\n+ want: header.IPv6MulticastScope(13),\n+ },\n+ {\n+ addr: \"\\xff\\x0e\\x06\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\",\n+ want: header.IPv6GlobalMulticastScope,\n+ },\n+ {\n+ addr: \"\\xff\\x0f\\x06\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\",\n+ want: header.IPv6ReservedFMulticastScope,\n+ },\n+ }\n+\n+ for _, test := range tests {\n+ t.Run(fmt.Sprintf(\"%s\", test.addr), func(t *testing.T) {\n+ if got := header.V6MulticastScope(test.addr); got != test.want {\n+ t.Fatalf(\"got header.V6MulticastScope(%s) = %d, want = %d\", test.addr, got, test.want)\n+ }\n+ })\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/internal/ip/generic_multicast_protocol.go", "new_path": "pkg/tcpip/network/internal/ip/generic_multicast_protocol.go", "diff": "@@ -156,14 +156,6 @@ type GenericMulticastProtocolOptions struct {\n//\n// Unsolicited reports are transmitted when a group is newly joined.\nMaxUnsolicitedReportDelay time.Duration\n-\n- // AllNodesAddress is a multicast address that all nodes on a network should\n- // be a member of.\n- //\n- // This address will not have the generic multicast protocol performed on it;\n- // it will be left in the non member/listener state, and packets will never\n- // be sent for it.\n- AllNodesAddress tcpip.Address\n}\n// MulticastGroupProtocol is a multicast group protocol whose core state machine\n@@ -188,6 +180,10 @@ type MulticastGroupProtocol interface {\n// SendLeave sends a multicast leave for the specified group address.\nSendLeave(groupAddress tcpip.Address) tcpip.Error\n+\n+ // ShouldPerformProtocol returns true iff the protocol should be performed for\n+ // the specified group.\n+ ShouldPerformProtocol(tcpip.Address) bool\n}\n// GenericMulticastProtocolState is the per interface generic multicast protocol\n@@ -455,20 +451,7 @@ func (g *GenericMulticastProtocolState) initializeNewMemberLocked(groupAddress t\ninfo.lastToSendReport = false\n- if groupAddress == g.opts.AllNodesAddress {\n- // As per RFC 2236 section 6 page 10 (for IGMPv2),\n- //\n- // The all-systems group (address 224.0.0.1) is handled as a special\n- // case. The host starts in Idle Member state for that group on every\n- // interface, never transitions to another state, and never sends a\n- // report for that group.\n- //\n- // As per RFC 2710 section 5 page 10 (for MLDv1),\n- //\n- // The link-scope all-nodes address (FF02::1) is handled as a special\n- // case. The node starts in Idle Listener state for that address on\n- // every interface, never transitions to another state, and never sends\n- // a Report or Done for that address.\n+ if !g.opts.Protocol.ShouldPerformProtocol(groupAddress) {\ninfo.state = idleMember\nreturn\n}\n@@ -537,20 +520,7 @@ func (g *GenericMulticastProtocolState) maybeSendLeave(groupAddress tcpip.Addres\nreturn\n}\n- if groupAddress == g.opts.AllNodesAddress {\n- // As per RFC 2236 section 6 page 10 (for IGMPv2),\n- //\n- // The all-systems group (address 224.0.0.1) is handled as a special\n- // case. The host starts in Idle Member state for that group on every\n- // interface, never transitions to another state, and never sends a\n- // report for that group.\n- //\n- // As per RFC 2710 section 5 page 10 (for MLDv1),\n- //\n- // The link-scope all-nodes address (FF02::1) is handled as a special\n- // case. The node starts in Idle Listener state for that address on\n- // every interface, never transitions to another state, and never sends\n- // a Report or Done for that address.\n+ if !g.opts.Protocol.ShouldPerformProtocol(groupAddress) {\nreturn\n}\n@@ -627,20 +597,7 @@ func (g *GenericMulticastProtocolState) setDelayTimerForAddressRLocked(groupAddr\nreturn\n}\n- if groupAddress == g.opts.AllNodesAddress {\n- // As per RFC 2236 section 6 page 10 (for IGMPv2),\n- //\n- // The all-systems group (address 224.0.0.1) is handled as a special\n- // case. The host starts in Idle Member state for that group on every\n- // interface, never transitions to another state, and never sends a\n- // report for that group.\n- //\n- // As per RFC 2710 section 5 page 10 (for MLDv1),\n- //\n- // The link-scope all-nodes address (FF02::1) is handled as a special\n- // case. The node starts in Idle Listener state for that address on\n- // every interface, never transitions to another state, and never sends\n- // a Report or Done for that address.\n+ if !g.opts.Protocol.ShouldPerformProtocol(groupAddress) {\nreturn\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/internal/ip/generic_multicast_protocol_test.go", "new_path": "pkg/tcpip/network/internal/ip/generic_multicast_protocol_test.go", "diff": "@@ -43,6 +43,8 @@ type mockMulticastGroupProtocolProtectedFields struct {\ntype mockMulticastGroupProtocol struct {\nt *testing.T\n+ skipProtocolAddress tcpip.Address\n+\nmu mockMulticastGroupProtocolProtectedFields\n}\n@@ -165,6 +167,11 @@ func (m *mockMulticastGroupProtocol) SendLeave(groupAddress tcpip.Address) tcpip\nreturn nil\n}\n+// ShouldPerformProtocol implements ip.MulticastGroupProtocol.\n+func (m *mockMulticastGroupProtocol) ShouldPerformProtocol(groupAddress tcpip.Address) bool {\n+ return groupAddress != m.skipProtocolAddress\n+}\n+\nfunc (m *mockMulticastGroupProtocol) check(sendReportGroupAddresses []tcpip.Address, sendLeaveGroupAddresses []tcpip.Address) string {\nm.mu.Lock()\ndefer m.mu.Unlock()\n@@ -193,10 +200,11 @@ func (m *mockMulticastGroupProtocol) check(sendReportGroupAddresses []tcpip.Addr\ncmp.FilterPath(\nfunc(p cmp.Path) bool {\nswitch p.Last().String() {\n- case \".RWMutex\", \".t\", \".makeQueuePackets\", \".disabled\", \".genericMulticastGroup\":\n+ case \".RWMutex\", \".t\", \".makeQueuePackets\", \".disabled\", \".genericMulticastGroup\", \".skipProtocolAddress\":\nreturn true\n- }\n+ default:\nreturn false\n+ }\n},\ncmp.Ignore(),\n),\n@@ -225,14 +233,13 @@ func TestJoinGroup(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- mgp := mockMulticastGroupProtocol{t: t}\n+ mgp := mockMulticastGroupProtocol{t: t, skipProtocolAddress: addr2}\nclock := faketime.NewManualClock()\nmgp.init(ip.GenericMulticastProtocolOptions{\nRand: rand.New(rand.NewSource(0)),\nClock: clock,\nMaxUnsolicitedReportDelay: maxUnsolicitedReportDelay,\n- AllNodesAddress: addr2,\n})\n// Joining a group should send a report immediately and another after\n@@ -279,14 +286,13 @@ func TestLeaveGroup(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- mgp := mockMulticastGroupProtocol{t: t}\n+ mgp := mockMulticastGroupProtocol{t: t, skipProtocolAddress: addr2}\nclock := faketime.NewManualClock()\nmgp.init(ip.GenericMulticastProtocolOptions{\nRand: rand.New(rand.NewSource(1)),\nClock: clock,\nMaxUnsolicitedReportDelay: maxUnsolicitedReportDelay,\n- AllNodesAddress: addr2,\n})\nmgp.joinGroup(test.addr)\n@@ -356,14 +362,13 @@ func TestHandleReport(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- mgp := mockMulticastGroupProtocol{t: t}\n+ mgp := mockMulticastGroupProtocol{t: t, skipProtocolAddress: addr3}\nclock := faketime.NewManualClock()\nmgp.init(ip.GenericMulticastProtocolOptions{\nRand: rand.New(rand.NewSource(2)),\nClock: clock,\nMaxUnsolicitedReportDelay: maxUnsolicitedReportDelay,\n- AllNodesAddress: addr3,\n})\nmgp.joinGroup(addr1)\n@@ -446,14 +451,13 @@ func TestHandleQuery(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- mgp := mockMulticastGroupProtocol{t: t}\n+ mgp := mockMulticastGroupProtocol{t: t, skipProtocolAddress: addr3}\nclock := faketime.NewManualClock()\nmgp.init(ip.GenericMulticastProtocolOptions{\nRand: rand.New(rand.NewSource(3)),\nClock: clock,\nMaxUnsolicitedReportDelay: maxUnsolicitedReportDelay,\n- AllNodesAddress: addr3,\n})\nmgp.joinGroup(addr1)\n@@ -574,14 +578,13 @@ func TestJoinCount(t *testing.T) {\n}\nfunc TestMakeAllNonMemberAndInitialize(t *testing.T) {\n- mgp := mockMulticastGroupProtocol{t: t}\n+ mgp := mockMulticastGroupProtocol{t: t, skipProtocolAddress: addr3}\nclock := faketime.NewManualClock()\nmgp.init(ip.GenericMulticastProtocolOptions{\nRand: rand.New(rand.NewSource(3)),\nClock: clock,\nMaxUnsolicitedReportDelay: maxUnsolicitedReportDelay,\n- AllNodesAddress: addr3,\n})\nmgp.joinGroup(addr1)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv4/igmp.go", "new_path": "pkg/tcpip/network/ipv4/igmp.go", "diff": "@@ -126,6 +126,17 @@ func (igmp *igmpState) SendLeave(groupAddress tcpip.Address) tcpip.Error {\nreturn err\n}\n+// ShouldPerformProtocol implements ip.MulticastGroupProtocol.\n+func (igmp *igmpState) ShouldPerformProtocol(groupAddress tcpip.Address) bool {\n+ // As per RFC 2236 section 6 page 10,\n+ //\n+ // The all-systems group (address 224.0.0.1) is handled as a special\n+ // case. The host starts in Idle Member state for that group on every\n+ // interface, never transitions to another state, and never sends a\n+ // report for that group.\n+ return groupAddress != header.IPv4AllSystems\n+}\n+\n// init sets up an igmpState struct, and is required to be called before using\n// a new igmpState.\n//\n@@ -137,7 +148,6 @@ func (igmp *igmpState) init(ep *endpoint) {\nClock: ep.protocol.stack.Clock(),\nProtocol: igmp,\nMaxUnsolicitedReportDelay: UnsolicitedReportIntervalMax,\n- AllNodesAddress: header.IPv4AllSystems,\n})\nigmp.igmpV1Present = igmpV1PresentDefault\nigmp.igmpV1Job = ep.protocol.stack.NewJob(&ep.mu, func() {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/mld.go", "new_path": "pkg/tcpip/network/ipv6/mld.go", "diff": "@@ -80,6 +80,25 @@ func (mld *mldState) SendLeave(groupAddress tcpip.Address) tcpip.Error {\nreturn err\n}\n+// ShouldPerformProtocol implements ip.MulticastGroupProtocol.\n+func (mld *mldState) ShouldPerformProtocol(groupAddress tcpip.Address) bool {\n+ // As per RFC 2710 section 5 page 10,\n+ //\n+ // The link-scope all-nodes address (FF02::1) is handled as a special\n+ // case. The node starts in Idle Listener state for that address on\n+ // every interface, never transitions to another state, and never sends\n+ // a Report or Done for that address.\n+ //\n+ // MLD messages are never sent for multicast addresses whose scope is 0\n+ // (reserved) or 1 (node-local).\n+ if groupAddress == header.IPv6AllNodesMulticastAddress {\n+ return false\n+ }\n+\n+ scope := header.V6MulticastScope(groupAddress)\n+ return scope != header.IPv6Reserved0MulticastScope && scope != header.IPv6InterfaceLocalMulticastScope\n+}\n+\n// init sets up an mldState struct, and is required to be called before using\n// a new mldState.\n//\n@@ -91,7 +110,6 @@ func (mld *mldState) init(ep *endpoint) {\nClock: ep.protocol.stack.Clock(),\nProtocol: mld,\nMaxUnsolicitedReportDelay: UnsolicitedReportIntervalMax,\n- AllNodesAddress: header.IPv6AllNodesMulticastAddress,\n})\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/mld_test.go", "new_path": "pkg/tcpip/network/ipv6/mld_test.go", "diff": "@@ -464,3 +464,141 @@ func TestMLDPacketValidation(t *testing.T) {\n})\n}\n}\n+\n+func TestMLDSkipProtocol(t *testing.T) {\n+ const nicID = 1\n+\n+ tests := []struct {\n+ name string\n+ group tcpip.Address\n+ expectReport bool\n+ }{\n+ {\n+ name: \"Reserverd0\",\n+ group: \"\\xff\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x11\",\n+ expectReport: false,\n+ },\n+ {\n+ name: \"Interface Local\",\n+ group: \"\\xff\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x11\",\n+ expectReport: false,\n+ },\n+ {\n+ name: \"Link Local\",\n+ group: \"\\xff\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x11\",\n+ expectReport: true,\n+ },\n+ {\n+ name: \"Realm Local\",\n+ group: \"\\xff\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x11\",\n+ expectReport: true,\n+ },\n+ {\n+ name: \"Admin Local\",\n+ group: \"\\xff\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x11\",\n+ expectReport: true,\n+ },\n+ {\n+ name: \"Site Local\",\n+ group: \"\\xff\\x05\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x11\",\n+ expectReport: true,\n+ },\n+ {\n+ name: \"Unassigned(6)\",\n+ group: \"\\xff\\x06\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x11\",\n+ expectReport: true,\n+ },\n+ {\n+ name: \"Unassigned(7)\",\n+ group: \"\\xff\\x07\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x11\",\n+ expectReport: true,\n+ },\n+ {\n+ name: \"Organization Local\",\n+ group: \"\\xff\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x11\",\n+ expectReport: true,\n+ },\n+ {\n+ name: \"Unassigned(9)\",\n+ group: \"\\xff\\x09\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x11\",\n+ expectReport: true,\n+ },\n+ {\n+ name: \"Unassigned(A)\",\n+ group: \"\\xff\\x0a\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x11\",\n+ expectReport: true,\n+ },\n+ {\n+ name: \"Unassigned(B)\",\n+ group: \"\\xff\\x0b\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x11\",\n+ expectReport: true,\n+ },\n+ {\n+ name: \"Unassigned(C)\",\n+ group: \"\\xff\\x0c\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x11\",\n+ expectReport: true,\n+ },\n+ {\n+ name: \"Unassigned(D)\",\n+ group: \"\\xff\\x0d\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x11\",\n+ expectReport: true,\n+ },\n+ {\n+ name: \"Global\",\n+ group: \"\\xff\\x0e\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x11\",\n+ expectReport: true,\n+ },\n+ {\n+ name: \"ReservedF\",\n+ group: \"\\xff\\x0f\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x11\",\n+ expectReport: true,\n+ },\n+ }\n+\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocolWithOptions(ipv6.Options{\n+ MLD: ipv6.MLDOptions{\n+ Enabled: true,\n+ },\n+ })},\n+ })\n+ e := channel.New(1, header.IPv6MinimumMTU, \"\")\n+ if err := s.CreateNIC(nicID, e); err != nil {\n+ t.Fatalf(\"CreateNIC(%d, _): %s\", nicID, err)\n+ }\n+ if err := s.AddAddress(nicID, ipv6.ProtocolNumber, linkLocalAddr); err != nil {\n+ t.Fatalf(\"AddAddress(%d, %d, %s) = %s\", nicID, ipv6.ProtocolNumber, linkLocalAddr, err)\n+ }\n+ if p, ok := e.Read(); !ok {\n+ t.Fatal(\"expected a report message to be sent\")\n+ } else {\n+ validateMLDPacket(t, stack.PayloadSince(p.Pkt.NetworkHeader()), linkLocalAddr, linkLocalAddrSNMC, header.ICMPv6MulticastListenerReport, linkLocalAddrSNMC)\n+ }\n+\n+ if err := s.JoinGroup(ipv6.ProtocolNumber, nicID, test.group); err != nil {\n+ t.Fatalf(\"s.JoinGroup(%d, %d, %s): %s\", ipv6.ProtocolNumber, nicID, test.group, err)\n+ }\n+ if isInGroup, err := s.IsInGroup(nicID, test.group); err != nil {\n+ t.Fatalf(\"IsInGroup(%d, %s): %s\", nicID, test.group, err)\n+ } else if !isInGroup {\n+ t.Fatalf(\"got IsInGroup(%d, %s) = false, want = true\", nicID, test.group)\n+ }\n+\n+ if !test.expectReport {\n+ if p, ok := e.Read(); ok {\n+ t.Fatalf(\"got e.Read() = (%#v, true), want = (_, false)\", p)\n+ }\n+\n+ return\n+ }\n+\n+ if p, ok := e.Read(); !ok {\n+ t.Fatal(\"expected a report message to be sent\")\n+ } else {\n+ validateMLDPacket(t, stack.PayloadSince(p.Pkt.NetworkHeader()), linkLocalAddr, test.group, header.ICMPv6MulticastListenerReport, test.group)\n+ }\n+ })\n+ }\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Do not perform MLD for certain multicast scopes ...as per RFC 2710 section 5 page 10. Test: ipv6_test.TestMLDSkipProtocol PiperOrigin-RevId: 367031126
259,853
07.04.2021 16:22:43
25,200
0e55b57452d3437f494690dca67bcc55f11d3c60
perf/getpid: add a case when syscalls are executed via mov $XXX, %eax; syscall This is the most often pattern of calling system calls in real applications.
[ { "change_type": "MODIFY", "old_path": "test/perf/linux/getpid_benchmark.cc", "new_path": "test/perf/linux/getpid_benchmark.cc", "diff": "@@ -31,6 +31,24 @@ void BM_Getpid(benchmark::State& state) {\nBENCHMARK(BM_Getpid);\n+#ifdef __x86_64__\n+\n+#define SYSNO_STR1(x) #x\n+#define SYSNO_STR(x) SYSNO_STR1(x)\n+\n+// BM_GetpidOpt uses the most often pattern of calling system calls:\n+// mov $SYS_XXX, %eax; syscall.\n+void BM_GetpidOpt(benchmark::State& state) {\n+ for (auto s : state) {\n+ __asm__(\"movl $\" SYSNO_STR(SYS_getpid) \", %%eax\\n\"\n+ \"syscall\\n\"\n+ : : : \"rax\", \"rcx\", \"r11\");\n+ }\n+}\n+\n+BENCHMARK(BM_GetpidOpt);\n+#endif // __x86_64__\n+\n} // namespace\n} // namespace testing\n" } ]
Go
Apache License 2.0
google/gvisor
perf/getpid: add a case when syscalls are executed via mov $XXX, %eax; syscall This is the most often pattern of calling system calls in real applications. PiperOrigin-RevId: 367320048
259,858
07.04.2021 17:10:58
25,200
192f20788b338900291b0692e2518a8e8bc0f56d
Add internal staging tags to //runsc and //shim binaries.
[ { "change_type": "MODIFY", "old_path": "runsc/BUILD", "new_path": "runsc/BUILD", "diff": "@@ -9,6 +9,7 @@ go_binary(\n\"version.go\",\n],\npure = True,\n+ tags = [\"staging\"],\nvisibility = [\n\"//visibility:public\",\n],\n" }, { "change_type": "MODIFY", "old_path": "shim/BUILD", "new_path": "shim/BUILD", "diff": "@@ -6,6 +6,7 @@ go_binary(\nname = \"containerd-shim-runsc-v1\",\nsrcs = [\"main.go\"],\nstatic = True,\n+ tags = [\"staging\"],\nvisibility = [\n\"//visibility:public\",\n],\n" } ]
Go
Apache License 2.0
google/gvisor
Add internal staging tags to //runsc and //shim binaries. PiperOrigin-RevId: 367328273
259,858
08.04.2021 09:31:51
25,200
cbf00d633db744febdac262ecfbc4908aa4e8a2f
Clarify platform errors.
[ { "change_type": "MODIFY", "old_path": "runsc/sandbox/sandbox.go", "new_path": "runsc/sandbox/sandbox.go", "diff": "@@ -486,7 +486,7 @@ func (s *Sandbox) createSandboxProcess(conf *config.Config, args *Args, startSyn\n}\nif deviceFile, err := gPlatform.OpenDevice(); err != nil {\n- return fmt.Errorf(\"opening device file for platform %q: %v\", gPlatform, err)\n+ return fmt.Errorf(\"opening device file for platform %q: %v\", conf.Platform, err)\n} else if deviceFile != nil {\ndefer deviceFile.Close()\ncmd.ExtraFiles = append(cmd.ExtraFiles, deviceFile)\n@@ -1174,7 +1174,7 @@ func deviceFileForPlatform(name string) (*os.File, error) {\nf, err := p.OpenDevice()\nif err != nil {\n- return nil, fmt.Errorf(\"opening device file for platform %q: %v\", p, err)\n+ return nil, fmt.Errorf(\"opening device file for platform %q: %w\", name, err)\n}\nreturn f, nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Clarify platform errors. PiperOrigin-RevId: 367446222
260,004
08.04.2021 09:48:17
25,200
9e4a1e31d4fbf7d4439d503bf318517c92c8e885
Join all routers group when forwarding is enabled See comments inline code for rationale. Test: ip_test.TestJoinLeaveAllRoutersGroup
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ipv6.go", "new_path": "pkg/tcpip/header/ipv6.go", "diff": "@@ -98,12 +98,27 @@ const (\n// The address is ff02::1.\nIPv6AllNodesMulticastAddress tcpip.Address = \"\\xff\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\"\n- // IPv6AllRoutersMulticastAddress is a link-local multicast group that\n- // all IPv6 routers MUST join, as per RFC 4291, section 2.8. Packets\n+ // IPv6AllRoutersInterfaceLocalMulticastAddress is an interface-local\n+ // multicast group that all IPv6 routers MUST join, as per RFC 4291, section\n+ // 2.8. Packets destined to this address will reach the router on an\n+ // interface.\n+ //\n+ // The address is ff01::2.\n+ IPv6AllRoutersInterfaceLocalMulticastAddress tcpip.Address = \"\\xff\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\"\n+\n+ // IPv6AllRoutersLinkLocalMulticastAddress is a link-local multicast group\n+ // that all IPv6 routers MUST join, as per RFC 4291, section 2.8. Packets\n// destined to this address will reach all routers on a link.\n//\n// The address is ff02::2.\n- IPv6AllRoutersMulticastAddress tcpip.Address = \"\\xff\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\"\n+ IPv6AllRoutersLinkLocalMulticastAddress tcpip.Address = \"\\xff\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\"\n+\n+ // IPv6AllRoutersSiteLocalMulticastAddress is a site-local multicast group\n+ // that all IPv6 routers MUST join, as per RFC 4291, section 2.8. Packets\n+ // destined to this address will reach all routers in a site.\n+ //\n+ // The address is ff05::2.\n+ IPv6AllRoutersSiteLocalMulticastAddress tcpip.Address = \"\\xff\\x05\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\"\n// IPv6MinimumMTU is the minimum MTU required by IPv6, per RFC 8200,\n// section 5:\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ip_test.go", "new_path": "pkg/tcpip/network/ip_test.go", "diff": "package ip_test\nimport (\n+ \"fmt\"\n\"strings\"\n\"testing\"\n@@ -1938,3 +1939,80 @@ func TestICMPInclusionSize(t *testing.T) {\n})\n}\n}\n+\n+func TestJoinLeaveAllRoutersGroup(t *testing.T) {\n+ const nicID = 1\n+\n+ tests := []struct {\n+ name string\n+ netProto tcpip.NetworkProtocolNumber\n+ protoFactory stack.NetworkProtocolFactory\n+ allRoutersAddr tcpip.Address\n+ }{\n+ {\n+ name: \"IPv4\",\n+ netProto: ipv4.ProtocolNumber,\n+ protoFactory: ipv4.NewProtocol,\n+ allRoutersAddr: header.IPv4AllRoutersGroup,\n+ },\n+ {\n+ name: \"IPv6 Interface Local\",\n+ netProto: ipv6.ProtocolNumber,\n+ protoFactory: ipv6.NewProtocol,\n+ allRoutersAddr: header.IPv6AllRoutersInterfaceLocalMulticastAddress,\n+ },\n+ {\n+ name: \"IPv6 Link Local\",\n+ netProto: ipv6.ProtocolNumber,\n+ protoFactory: ipv6.NewProtocol,\n+ allRoutersAddr: header.IPv6AllRoutersLinkLocalMulticastAddress,\n+ },\n+ {\n+ name: \"IPv6 Site Local\",\n+ netProto: ipv6.ProtocolNumber,\n+ protoFactory: ipv6.NewProtocol,\n+ allRoutersAddr: header.IPv6AllRoutersSiteLocalMulticastAddress,\n+ },\n+ }\n+\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\n+ for _, nicDisabled := range [...]bool{true, false} {\n+ t.Run(fmt.Sprintf(\"NIC Disabled = %t\", nicDisabled), func(t *testing.T) {\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},\n+ TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol, tcp.NewProtocol},\n+ })\n+ opts := stack.NICOptions{Disabled: nicDisabled}\n+ if err := s.CreateNICWithOptions(nicID, channel.New(0, 0, \"\"), opts); err != nil {\n+ t.Fatalf(\"CreateNICWithOptions(%d, _, %#v) = %s\", nicID, opts, err)\n+ }\n+\n+ if got, err := s.IsInGroup(nicID, test.allRoutersAddr); err != nil {\n+ t.Fatalf(\"s.IsInGroup(%d, %s): %s\", nicID, test.allRoutersAddr, err)\n+ } else if got {\n+ t.Fatalf(\"got s.IsInGroup(%d, %s) = true, want = false\", nicID, test.allRoutersAddr)\n+ }\n+\n+ if err := s.SetForwarding(test.netProto, true); err != nil {\n+ t.Fatalf(\"s.SetForwarding(%d, true): %s\", test.netProto, err)\n+ }\n+ if got, err := s.IsInGroup(nicID, test.allRoutersAddr); err != nil {\n+ t.Fatalf(\"s.IsInGroup(%d, %s): %s\", nicID, test.allRoutersAddr, err)\n+ } else if !got {\n+ t.Fatalf(\"got s.IsInGroup(%d, %s) = false, want = true\", nicID, test.allRoutersAddr)\n+ }\n+\n+ if err := s.SetForwarding(test.netProto, false); err != nil {\n+ t.Fatalf(\"s.SetForwarding(%d, false): %s\", test.netProto, err)\n+ }\n+ if got, err := s.IsInGroup(nicID, test.allRoutersAddr); err != nil {\n+ t.Fatalf(\"s.IsInGroup(%d, %s): %s\", nicID, test.allRoutersAddr, err)\n+ } else if got {\n+ t.Fatalf(\"got s.IsInGroup(%d, %s) = true, want = false\", nicID, test.allRoutersAddr)\n+ }\n+ })\n+ }\n+ })\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv4/ipv4.go", "new_path": "pkg/tcpip/network/ipv4/ipv4.go", "diff": "@@ -150,6 +150,38 @@ func (p *protocol) forgetEndpoint(nicID tcpip.NICID) {\ndelete(p.mu.eps, nicID)\n}\n+// transitionForwarding transitions the endpoint's forwarding status to\n+// forwarding.\n+//\n+// Must only be called when the forwarding status changes.\n+func (e *endpoint) transitionForwarding(forwarding bool) {\n+ e.mu.Lock()\n+ defer e.mu.Unlock()\n+\n+ if forwarding {\n+ // There does not seem to be an RFC requirement for a node to join the all\n+ // routers multicast address but\n+ // https://www.iana.org/assignments/multicast-addresses/multicast-addresses.xhtml\n+ // specifies the address as a group for all routers on a subnet so we join\n+ // the group here.\n+ if err := e.joinGroupLocked(header.IPv4AllRoutersGroup); err != nil {\n+ // joinGroupLocked only returns an error if the group address is not a\n+ // valid IPv4 multicast address.\n+ panic(fmt.Sprintf(\"e.joinGroupLocked(%s): %s\", header.IPv4AllRoutersGroup, err))\n+ }\n+\n+ return\n+ }\n+\n+ switch err := e.leaveGroupLocked(header.IPv4AllRoutersGroup).(type) {\n+ case nil:\n+ case *tcpip.ErrBadLocalAddress:\n+ // The endpoint may have already left the multicast group.\n+ default:\n+ panic(fmt.Sprintf(\"e.leaveGroupLocked(%s): %s\", header.IPv4AllRoutersGroup, err))\n+ }\n+}\n+\n// Enable implements stack.NetworkEndpoint.\nfunc (e *endpoint) Enable() tcpip.Error {\ne.mu.Lock()\n@@ -226,7 +258,7 @@ func (e *endpoint) disableLocked() {\n}\n// The endpoint may have already left the multicast group.\n- switch err := e.leaveGroupLocked(header.IPv4AllSystems); err.(type) {\n+ switch err := e.leaveGroupLocked(header.IPv4AllSystems).(type) {\ncase nil, *tcpip.ErrBadLocalAddress:\ndefault:\npanic(fmt.Sprintf(\"unexpected error when leaving group = %s: %s\", header.IPv4AllSystems, err))\n@@ -1168,12 +1200,27 @@ func (p *protocol) Forwarding() bool {\nreturn uint8(atomic.LoadUint32(&p.forwarding)) == 1\n}\n+// setForwarding sets the forwarding status for the protocol.\n+//\n+// Returns true if the forwarding status was updated.\n+func (p *protocol) setForwarding(v bool) bool {\n+ if v {\n+ return atomic.CompareAndSwapUint32(&p.forwarding, 0 /* old */, 1 /* new */)\n+ }\n+ return atomic.CompareAndSwapUint32(&p.forwarding, 1 /* old */, 0 /* new */)\n+}\n+\n// SetForwarding implements stack.ForwardingNetworkProtocol.\nfunc (p *protocol) SetForwarding(v bool) {\n- if v {\n- atomic.StoreUint32(&p.forwarding, 1)\n- } else {\n- atomic.StoreUint32(&p.forwarding, 0)\n+ p.mu.Lock()\n+ defer p.mu.Unlock()\n+\n+ if !p.setForwarding(v) {\n+ return\n+ }\n+\n+ for _, ep := range p.mu.eps {\n+ ep.transitionForwarding(v)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/ipv6.go", "new_path": "pkg/tcpip/network/ipv6/ipv6.go", "diff": "@@ -410,22 +410,65 @@ func (e *endpoint) dupTentativeAddrDetected(addr tcpip.Address, holderLinkAddr t\n//\n// Must only be called when the forwarding status changes.\nfunc (e *endpoint) transitionForwarding(forwarding bool) {\n+ allRoutersGroups := [...]tcpip.Address{\n+ header.IPv6AllRoutersInterfaceLocalMulticastAddress,\n+ header.IPv6AllRoutersLinkLocalMulticastAddress,\n+ header.IPv6AllRoutersSiteLocalMulticastAddress,\n+ }\n+\ne.mu.Lock()\ndefer e.mu.Unlock()\n- if !e.Enabled() {\n- return\n- }\n-\nif forwarding {\n// When transitioning into an IPv6 router, host-only state (NDP discovered\n// routers, discovered on-link prefixes, and auto-generated addresses) is\n// cleaned up/invalidated and NDP router solicitations are stopped.\ne.mu.ndp.stopSolicitingRouters()\ne.mu.ndp.cleanupState(true /* hostOnly */)\n- } else {\n+\n+ // As per RFC 4291 section 2.8:\n+ //\n+ // A router is required to recognize all addresses that a host is\n+ // required to recognize, plus the following addresses as identifying\n+ // itself:\n+ //\n+ // o The All-Routers multicast addresses defined in Section 2.7.1.\n+ //\n+ // As per RFC 4291 section 2.7.1,\n+ //\n+ // All Routers Addresses: FF01:0:0:0:0:0:0:2\n+ // FF02:0:0:0:0:0:0:2\n+ // FF05:0:0:0:0:0:0:2\n+ //\n+ // The above multicast addresses identify the group of all IPv6 routers,\n+ // within scope 1 (interface-local), 2 (link-local), or 5 (site-local).\n+ for _, g := range allRoutersGroups {\n+ if err := e.joinGroupLocked(g); err != nil {\n+ // joinGroupLocked only returns an error if the group address is not a\n+ // valid IPv6 multicast address.\n+ panic(fmt.Sprintf(\"e.joinGroupLocked(%s): %s\", g, err))\n+ }\n+ }\n+\n+ return\n+ }\n+\n+ for _, g := range allRoutersGroups {\n+ switch err := e.leaveGroupLocked(g).(type) {\n+ case nil:\n+ case *tcpip.ErrBadLocalAddress:\n+ // The endpoint may have already left the multicast group.\n+ default:\n+ panic(fmt.Sprintf(\"e.leaveGroupLocked(%s): %s\", g, err))\n+ }\n+ }\n+\n// When transitioning into an IPv6 host, NDP router solicitations are\n- // started.\n+ // started if the endpoint is enabled.\n+ //\n+ // If the endpoint is not currently enabled, routers will be solicited when\n+ // the endpoint becomes enabled (if it is still a host).\n+ if e.Enabled() {\ne.mu.ndp.startSolicitingRouters()\n}\n}\n@@ -573,7 +616,7 @@ func (e *endpoint) disableLocked() {\ne.mu.ndp.cleanupState(false /* hostOnly */)\n// The endpoint may have already left the multicast group.\n- switch err := e.leaveGroupLocked(header.IPv6AllNodesMulticastAddress); err.(type) {\n+ switch err := e.leaveGroupLocked(header.IPv6AllNodesMulticastAddress).(type) {\ncase nil, *tcpip.ErrBadLocalAddress:\ndefault:\npanic(fmt.Sprintf(\"unexpected error when leaving group = %s: %s\", header.IPv6AllNodesMulticastAddress, err))\n@@ -1979,9 +2022,9 @@ func (p *protocol) Forwarding() bool {\n// Returns true if the forwarding status was updated.\nfunc (p *protocol) setForwarding(v bool) bool {\nif v {\n- return atomic.SwapUint32(&p.forwarding, 1) == 0\n+ return atomic.CompareAndSwapUint32(&p.forwarding, 0 /* old */, 1 /* new */)\n}\n- return atomic.SwapUint32(&p.forwarding, 0) == 1\n+ return atomic.CompareAndSwapUint32(&p.forwarding, 1 /* old */, 0 /* new */)\n}\n// SetForwarding implements stack.ForwardingNetworkProtocol.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/mld.go", "new_path": "pkg/tcpip/network/ipv6/mld.go", "diff": "@@ -76,7 +76,7 @@ func (mld *mldState) SendReport(groupAddress tcpip.Address) (bool, tcpip.Error)\n//\n// Precondition: mld.ep.mu must be read locked.\nfunc (mld *mldState) SendLeave(groupAddress tcpip.Address) tcpip.Error {\n- _, err := mld.writePacket(header.IPv6AllRoutersMulticastAddress, groupAddress, header.ICMPv6MulticastListenerDone)\n+ _, err := mld.writePacket(header.IPv6AllRoutersLinkLocalMulticastAddress, groupAddress, header.ICMPv6MulticastListenerDone)\nreturn err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/mld_test.go", "new_path": "pkg/tcpip/network/ipv6/mld_test.go", "diff": "@@ -93,7 +93,7 @@ func TestIPv6JoinLeaveSolicitedNodeAddressPerformsMLD(t *testing.T) {\nif p, ok := e.Read(); !ok {\nt.Fatal(\"expected a done message to be sent\")\n} else {\n- validateMLDPacket(t, stack.PayloadSince(p.Pkt.NetworkHeader()), header.IPv6Any, header.IPv6AllRoutersMulticastAddress, header.ICMPv6MulticastListenerDone, linkLocalAddrSNMC)\n+ validateMLDPacket(t, stack.PayloadSince(p.Pkt.NetworkHeader()), header.IPv6Any, header.IPv6AllRoutersLinkLocalMulticastAddress, header.ICMPv6MulticastListenerDone, linkLocalAddrSNMC)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/ndp.go", "new_path": "pkg/tcpip/network/ipv6/ndp.go", "diff": "@@ -1703,7 +1703,7 @@ func (ndp *ndpState) startSolicitingRouters() {\n// the unspecified address if no address is assigned\n// to the sending interface.\nlocalAddr := header.IPv6Any\n- if addressEndpoint := ndp.ep.AcquireOutgoingPrimaryAddress(header.IPv6AllRoutersMulticastAddress, false); addressEndpoint != nil {\n+ if addressEndpoint := ndp.ep.AcquireOutgoingPrimaryAddress(header.IPv6AllRoutersLinkLocalMulticastAddress, false); addressEndpoint != nil {\nlocalAddr = addressEndpoint.AddressWithPrefix().Address\naddressEndpoint.DecRef()\n}\n@@ -1730,7 +1730,7 @@ func (ndp *ndpState) startSolicitingRouters() {\nicmpData.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{\nHeader: icmpData,\nSrc: localAddr,\n- Dst: header.IPv6AllRoutersMulticastAddress,\n+ Dst: header.IPv6AllRoutersLinkLocalMulticastAddress,\n}))\npkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n@@ -1739,14 +1739,14 @@ func (ndp *ndpState) startSolicitingRouters() {\n})\nsent := ndp.ep.stats.icmp.packetsSent\n- if err := addIPHeader(localAddr, header.IPv6AllRoutersMulticastAddress, pkt, stack.NetworkHeaderParams{\n+ if err := addIPHeader(localAddr, header.IPv6AllRoutersLinkLocalMulticastAddress, pkt, stack.NetworkHeaderParams{\nProtocol: header.ICMPv6ProtocolNumber,\nTTL: header.NDPHopLimit,\n}, nil /* extensionHeaders */); err != nil {\npanic(fmt.Sprintf(\"failed to add IP header: %s\", err))\n}\n- if err := ndp.ep.nic.WritePacketToRemote(header.EthernetAddressFromMulticastIPv6Address(header.IPv6AllRoutersMulticastAddress), nil /* gso */, ProtocolNumber, pkt); err != nil {\n+ if err := ndp.ep.nic.WritePacketToRemote(header.EthernetAddressFromMulticastIPv6Address(header.IPv6AllRoutersLinkLocalMulticastAddress), nil /* gso */, ProtocolNumber, pkt); err != nil {\nsent.dropped.Increment()\n// Don't send any more messages if we had an error.\nremaining = 0\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/multicast_group_test.go", "new_path": "pkg/tcpip/network/multicast_group_test.go", "diff": "@@ -194,7 +194,7 @@ func checkInitialIPv6Groups(t *testing.T, e *channel.Endpoint, s *stack.Stack, c\nif p, ok := e.Read(); !ok {\nt.Fatal(\"expected a report message to be sent\")\n} else {\n- validateMLDPacket(t, p, header.IPv6AllRoutersMulticastAddress, mldDone, 0, ipv6AddrSNMC)\n+ validateMLDPacket(t, p, header.IPv6AllRoutersLinkLocalMulticastAddress, mldDone, 0, ipv6AddrSNMC)\n}\n// Should not send any more packets.\n@@ -606,7 +606,7 @@ func TestMGPLeaveGroup(t *testing.T) {\nvalidateLeave: func(t *testing.T, p channel.PacketInfo) {\nt.Helper()\n- validateMLDPacket(t, p, header.IPv6AllRoutersMulticastAddress, mldDone, 0, ipv6MulticastAddr1)\n+ validateMLDPacket(t, p, header.IPv6AllRoutersLinkLocalMulticastAddress, mldDone, 0, ipv6MulticastAddr1)\n},\ncheckInitialGroups: checkInitialIPv6Groups,\n},\n@@ -1014,7 +1014,7 @@ func TestMGPWithNICLifecycle(t *testing.T) {\nvalidateLeave: func(t *testing.T, p channel.PacketInfo, addr tcpip.Address) {\nt.Helper()\n- validateMLDPacket(t, p, header.IPv6AllRoutersMulticastAddress, mldDone, 0, addr)\n+ validateMLDPacket(t, p, header.IPv6AllRoutersLinkLocalMulticastAddress, mldDone, 0, addr)\n},\ngetAndCheckGroupAddress: func(t *testing.T, seen map[tcpip.Address]bool, p channel.PacketInfo) tcpip.Address {\nt.Helper()\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/ndp_test.go", "new_path": "pkg/tcpip/stack/ndp_test.go", "diff": "@@ -5204,13 +5204,13 @@ func TestRouterSolicitation(t *testing.T) {\n}\n// Make sure the right remote link address is used.\n- if want := header.EthernetAddressFromMulticastIPv6Address(header.IPv6AllRoutersMulticastAddress); p.Route.RemoteLinkAddress != want {\n+ if want := header.EthernetAddressFromMulticastIPv6Address(header.IPv6AllRoutersLinkLocalMulticastAddress); p.Route.RemoteLinkAddress != want {\nt.Errorf(\"got remote link address = %s, want = %s\", p.Route.RemoteLinkAddress, want)\n}\nchecker.IPv6(t, stack.PayloadSince(p.Pkt.NetworkHeader()),\nchecker.SrcAddr(test.expectedSrcAddr),\n- checker.DstAddr(header.IPv6AllRoutersMulticastAddress),\n+ checker.DstAddr(header.IPv6AllRoutersLinkLocalMulticastAddress),\nchecker.TTL(header.NDPHopLimit),\nchecker.NDPRS(checker.NDPRSOptions(test.expectedNDPOpts)),\n)\n@@ -5362,7 +5362,7 @@ func TestStopStartSolicitingRouters(t *testing.T) {\n}\nchecker.IPv6(t, stack.PayloadSince(p.Pkt.NetworkHeader()),\nchecker.SrcAddr(header.IPv6Any),\n- checker.DstAddr(header.IPv6AllRoutersMulticastAddress),\n+ checker.DstAddr(header.IPv6AllRoutersLinkLocalMulticastAddress),\nchecker.TTL(header.NDPHopLimit),\nchecker.NDPRS())\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Join all routers group when forwarding is enabled See comments inline code for rationale. Test: ip_test.TestJoinLeaveAllRoutersGroup PiperOrigin-RevId: 367449434
260,001
08.04.2021 11:43:46
25,200
ffeb2a2f54c7363136c29cfe179afedf0889ea3f
Add Children in merkletree generate This field was missing and should be provided.
[ { "change_type": "MODIFY", "old_path": "pkg/merkletree/merkletree.go", "new_path": "pkg/merkletree/merkletree.go", "diff": "@@ -238,6 +238,7 @@ func Generate(params *GenerateParams) ([]byte, error) {\nMode: params.Mode,\nUID: params.UID,\nGID: params.GID,\n+ Children: params.Children,\nSymlinkTarget: params.SymlinkTarget,\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Add Children in merkletree generate This field was missing and should be provided. PiperOrigin-RevId: 367474481
259,858
08.04.2021 15:16:40
25,200
5ac79e154532fc594eb6bcc87f3ba5d85aaa5013
Drop unused escapes information.
[ { "change_type": "MODIFY", "old_path": "tools/nogo/analyzers.go", "new_path": "tools/nogo/analyzers.go", "diff": "@@ -83,11 +83,6 @@ var AllAnalyzers = []*analysis.Analyzer{\nchecklocks.Analyzer,\n}\n-// EscapeAnalyzers is a list of escape-related analyzers.\n-var EscapeAnalyzers = []*analysis.Analyzer{\n- checkescape.EscapeAnalyzer,\n-}\n-\nfunc register(all []*analysis.Analyzer) {\n// Register all fact types.\n//\n@@ -129,5 +124,4 @@ func init() {\n// Register lists.\nregister(AllAnalyzers)\n- register(EscapeAnalyzers)\n}\n" }, { "change_type": "MODIFY", "old_path": "tools/nogo/check/main.go", "new_path": "tools/nogo/check/main.go", "diff": "@@ -31,7 +31,6 @@ var (\nstdlibFile = flag.String(\"stdlib\", \"\", \"stdlib configuration file (in JSON format)\")\nfindingsOutput = flag.String(\"findings\", \"\", \"output file (or stdout, if not specified)\")\nfactsOutput = flag.String(\"facts\", \"\", \"output file for facts (optional)\")\n- escapesOutput = flag.String(\"escapes\", \"\", \"output file for escapes (optional)\")\n)\nfunc loadConfig(file string, config interface{}) interface{} {\n@@ -66,25 +65,13 @@ func main() {\n// Run the configuration.\nif *stdlibFile != \"\" {\n- // Perform basic analysis.\n+ // Perform stdlib analysis.\nc := loadConfig(*stdlibFile, new(nogo.StdlibConfig)).(*nogo.StdlibConfig)\nfindings, factData, err = nogo.CheckStdlib(c, nogo.AllAnalyzers)\n-\n} else if *packageFile != \"\" {\n- // Perform basic analysis.\n+ // Perform standard analysis.\nc := loadConfig(*packageFile, new(nogo.PackageConfig)).(*nogo.PackageConfig)\nfindings, factData, err = nogo.CheckPackage(c, nogo.AllAnalyzers, nil)\n-\n- // Do we need to do escape analysis?\n- if *escapesOutput != \"\" {\n- escapes, _, err := nogo.CheckPackage(c, nogo.EscapeAnalyzers, nil)\n- if err != nil {\n- log.Fatalf(\"error performing escape analysis: %v\", err)\n- }\n- if err := nogo.WriteFindingsToFile(escapes, *escapesOutput); err != nil {\n- log.Fatalf(\"error writing escapes to %q: %v\", *escapesOutput, err)\n- }\n- }\n} else {\nlog.Fatalf(\"please provide at least one of package or stdlib!\")\n}\n" }, { "change_type": "MODIFY", "old_path": "tools/nogo/defs.bzl", "new_path": "tools/nogo/defs.bzl", "diff": "@@ -280,7 +280,6 @@ def _nogo_aspect_impl(target, ctx):\ngo_ctx = go_context(ctx, goos = nogo_target_info.goos, goarch = nogo_target_info.goarch)\nfacts = ctx.actions.declare_file(target.label.name + \".facts\")\nraw_findings = ctx.actions.declare_file(target.label.name + \".raw_findings\")\n- escapes = ctx.actions.declare_file(target.label.name + \".escapes\")\nconfig = struct(\nImportPath = importpath,\nGoFiles = [src.path for src in srcs if src.path.endswith(\".go\")],\n@@ -297,7 +296,7 @@ def _nogo_aspect_impl(target, ctx):\ninputs.append(config_file)\nctx.actions.run(\ninputs = inputs,\n- outputs = [facts, raw_findings, escapes],\n+ outputs = [facts, raw_findings],\ntools = depset(go_ctx.runfiles.to_list() + ctx.files._nogo_objdump_tool),\nexecutable = ctx.files._nogo_check[0],\nmnemonic = \"NogoAnalysis\",\n@@ -308,7 +307,6 @@ def _nogo_aspect_impl(target, ctx):\n\"-package=%s\" % config_file.path,\n\"-findings=%s\" % raw_findings.path,\n\"-facts=%s\" % facts.path,\n- \"-escapes=%s\" % escapes.path,\n],\n)\n@@ -330,10 +328,6 @@ def _nogo_aspect_impl(target, ctx):\nsrcs = srcs,\ndeps = deps,\n),\n- # Make the escapes data visible to go/tricorder. This is returned here\n- # and not in the nogo_test rule so that this output can be obtained for\n- # ordinary go_* rules by using this as a command-line aspect.\n- OutputGroupInfo(tricorder = [escapes]),\n]\nnogo_aspect = go_rule(\n" } ]
Go
Apache License 2.0
google/gvisor
Drop unused escapes information. PiperOrigin-RevId: 367517305
260,004
08.04.2021 15:28:58
25,200
091badcb9c19bc2a2ff01f1ca7f4f20f99aef87c
Do not forward link-local packets As per RFC 3927 section 7 and RFC 4291 section 2.5.6. Test: forward_test.TestMulticastForwarding
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ipv4.go", "new_path": "pkg/tcpip/header/ipv4.go", "diff": "@@ -178,6 +178,26 @@ const (\nIPv4FlagDontFragment\n)\n+// ipv4LinkLocalUnicastSubnet is the IPv4 link local unicast subnet as defined\n+// by RFC 3927 section 1.\n+var ipv4LinkLocalUnicastSubnet = func() tcpip.Subnet {\n+ subnet, err := tcpip.NewSubnet(\"\\xa9\\xfe\\x00\\x00\", tcpip.AddressMask(\"\\xff\\xff\\x00\\x00\"))\n+ if err != nil {\n+ panic(err)\n+ }\n+ return subnet\n+}()\n+\n+// ipv4LinkLocalMulticastSubnet is the IPv4 link local multicast subnet as\n+// defined by RFC 5771 section 4.\n+var ipv4LinkLocalMulticastSubnet = func() tcpip.Subnet {\n+ subnet, err := tcpip.NewSubnet(\"\\xe0\\x00\\x00\\x00\", tcpip.AddressMask(\"\\xff\\xff\\xff\\x00\"))\n+ if err != nil {\n+ panic(err)\n+ }\n+ return subnet\n+}()\n+\n// IPv4EmptySubnet is the empty IPv4 subnet.\nvar IPv4EmptySubnet = func() tcpip.Subnet {\nsubnet, err := tcpip.NewSubnet(IPv4Any, tcpip.AddressMask(IPv4Any))\n@@ -423,6 +443,18 @@ func (b IPv4) IsValid(pktSize int) bool {\nreturn true\n}\n+// IsV4LinkLocalUnicastAddress determines if the provided address is an IPv4\n+// link-local unicast address.\n+func IsV4LinkLocalUnicastAddress(addr tcpip.Address) bool {\n+ return ipv4LinkLocalUnicastSubnet.Contains(addr)\n+}\n+\n+// IsV4LinkLocalMulticastAddress determines if the provided address is an IPv4\n+// link-local multicast address.\n+func IsV4LinkLocalMulticastAddress(addr tcpip.Address) bool {\n+ return ipv4LinkLocalMulticastSubnet.Contains(addr)\n+}\n+\n// IsV4MulticastAddress determines if the provided address is an IPv4 multicast\n// address (range 224.0.0.0 to 239.255.255.255). The four most significant bits\n// will be 1110 = 0xe0.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ipv4_test.go", "new_path": "pkg/tcpip/header/ipv4_test.go", "diff": "@@ -18,6 +18,7 @@ import (\n\"testing\"\n\"github.com/google/go-cmp/cmp\"\n+ \"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n)\n@@ -177,3 +178,77 @@ func TestIPv4EncodeOptions(t *testing.T) {\n})\n}\n}\n+\n+func TestIsV4LinkLocalUnicastAddress(t *testing.T) {\n+ tests := []struct {\n+ name string\n+ addr tcpip.Address\n+ expected bool\n+ }{\n+ {\n+ name: \"Valid (lowest)\",\n+ addr: \"\\xa9\\xfe\\x00\\x00\",\n+ expected: true,\n+ },\n+ {\n+ name: \"Valid (highest)\",\n+ addr: \"\\xa9\\xfe\\xff\\xff\",\n+ expected: true,\n+ },\n+ {\n+ name: \"Invalid (before subnet)\",\n+ addr: \"\\xa9\\xfd\\xff\\xff\",\n+ expected: false,\n+ },\n+ {\n+ name: \"Invalid (after subnet)\",\n+ addr: \"\\xa9\\xff\\x00\\x00\",\n+ expected: false,\n+ },\n+ }\n+\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\n+ if got := header.IsV4LinkLocalUnicastAddress(test.addr); got != test.expected {\n+ t.Errorf(\"got header.IsV4LinkLocalUnicastAddress(%s) = %t, want = %t\", test.addr, got, test.expected)\n+ }\n+ })\n+ }\n+}\n+\n+func TestIsV4LinkLocalMulticastAddress(t *testing.T) {\n+ tests := []struct {\n+ name string\n+ addr tcpip.Address\n+ expected bool\n+ }{\n+ {\n+ name: \"Valid (lowest)\",\n+ addr: \"\\xe0\\x00\\x00\\x00\",\n+ expected: true,\n+ },\n+ {\n+ name: \"Valid (highest)\",\n+ addr: \"\\xe0\\x00\\x00\\xff\",\n+ expected: true,\n+ },\n+ {\n+ name: \"Invalid (before subnet)\",\n+ addr: \"\\xdf\\xff\\xff\\xff\",\n+ expected: false,\n+ },\n+ {\n+ name: \"Invalid (after subnet)\",\n+ addr: \"\\xe0\\x00\\x01\\x00\",\n+ expected: false,\n+ },\n+ }\n+\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\n+ if got := header.IsV4LinkLocalMulticastAddress(test.addr); got != test.expected {\n+ t.Errorf(\"got header.IsV4LinkLocalMulticastAddress(%s) = %t, want = %t\", test.addr, got, test.expected)\n+ }\n+ })\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv4/ipv4.go", "new_path": "pkg/tcpip/network/ipv4/ipv4.go", "diff": "@@ -583,6 +583,22 @@ func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt *stack.PacketBu\n// forwardPacket attempts to forward a packet to its final destination.\nfunc (e *endpoint) forwardPacket(pkt *stack.PacketBuffer) tcpip.Error {\nh := header.IPv4(pkt.NetworkHeader().View())\n+\n+ dstAddr := h.DestinationAddress()\n+ if header.IsV4LinkLocalUnicastAddress(h.SourceAddress()) || header.IsV4LinkLocalUnicastAddress(dstAddr) || header.IsV4LinkLocalMulticastAddress(dstAddr) {\n+ // As per RFC 3927 section 7,\n+ //\n+ // A router MUST NOT forward a packet with an IPv4 Link-Local source or\n+ // destination address, irrespective of the router's default route\n+ // configuration or routes obtained from dynamic routing protocols.\n+ //\n+ // A router which receives a packet with an IPv4 Link-Local source or\n+ // destination address MUST NOT forward the packet. This prevents\n+ // forwarding of packets back onto the network segment from which they\n+ // originated, or to any other segment.\n+ return nil\n+ }\n+\nttl := h.TTL()\nif ttl == 0 {\n// As per RFC 792 page 6, Time Exceeded Message,\n@@ -621,8 +637,6 @@ func (e *endpoint) forwardPacket(pkt *stack.PacketBuffer) tcpip.Error {\n}\n}\n- dstAddr := h.DestinationAddress()\n-\n// Check if the destination is owned by the stack.\nif ep := e.protocol.findEndpointWithAddress(dstAddr); ep != nil {\nep.handleValidatedPacket(h, pkt)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/ipv6.go", "new_path": "pkg/tcpip/network/ipv6/ipv6.go", "diff": "@@ -912,6 +912,16 @@ func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt *stack.PacketBu\n// forwardPacket attempts to forward a packet to its final destination.\nfunc (e *endpoint) forwardPacket(pkt *stack.PacketBuffer) tcpip.Error {\nh := header.IPv6(pkt.NetworkHeader().View())\n+\n+ dstAddr := h.DestinationAddress()\n+ if header.IsV6LinkLocalAddress(h.SourceAddress()) || header.IsV6LinkLocalAddress(dstAddr) || header.IsV6LinkLocalMulticastAddress(dstAddr) {\n+ // As per RFC 4291 section 2.5.6,\n+ //\n+ // Routers must not forward any packets with Link-Local source or\n+ // destination addresses to other links.\n+ return nil\n+ }\n+\nhopLimit := h.HopLimit()\nif hopLimit <= 1 {\n// As per RFC 4443 section 3.3,\n@@ -924,8 +934,6 @@ func (e *endpoint) forwardPacket(pkt *stack.PacketBuffer) tcpip.Error {\nreturn e.protocol.returnError(&icmpReasonHopLimitExceeded{}, pkt)\n}\n- dstAddr := h.DestinationAddress()\n-\n// Check if the destination is owned by the stack.\nif ep := e.protocol.findEndpointWithAddress(dstAddr); ep != nil {\nep.handleValidatedPacket(h, pkt)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tests/integration/BUILD", "new_path": "pkg/tcpip/tests/integration/BUILD", "diff": "@@ -9,6 +9,8 @@ go_test(\ndeps = [\n\"//pkg/tcpip\",\n\"//pkg/tcpip/checker\",\n+ \"//pkg/tcpip/header\",\n+ \"//pkg/tcpip/link/channel\",\n\"//pkg/tcpip/network/arp\",\n\"//pkg/tcpip/network/ipv4\",\n\"//pkg/tcpip/network/ipv6\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tests/integration/forward_test.go", "new_path": "pkg/tcpip/tests/integration/forward_test.go", "diff": "@@ -21,6 +21,8 @@ import (\n\"github.com/google/go-cmp/cmp\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/checker\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/header\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/link/channel\"\n\"gvisor.dev/gvisor/pkg/tcpip/network/arp\"\n\"gvisor.dev/gvisor/pkg/tcpip/network/ipv4\"\n\"gvisor.dev/gvisor/pkg/tcpip/network/ipv6\"\n@@ -312,3 +314,193 @@ func TestForwarding(t *testing.T) {\n})\n}\n}\n+\n+func TestMulticastForwarding(t *testing.T) {\n+ const (\n+ nicID1 = 1\n+ nicID2 = 2\n+\n+ ipv4LinkLocalUnicastAddr = tcpip.Address(\"\\xa9\\xfe\\x00\\x0a\")\n+ ipv4LinkLocalMulticastAddr = tcpip.Address(\"\\xe0\\x00\\x00\\x0a\")\n+ ipv4GlobalMulticastAddr = tcpip.Address(\"\\xe0\\x00\\x01\\x0a\")\n+\n+ ipv6LinkLocalUnicastAddr = tcpip.Address(\"\\xfe\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x0a\")\n+ ipv6LinkLocalMulticastAddr = tcpip.Address(\"\\xff\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x0a\")\n+ ipv6GlobalMulticastAddr = tcpip.Address(\"\\xff\\x0e\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x0a\")\n+\n+ ttl = 64\n+ )\n+\n+ rxICMPv4EchoRequest := func(e *channel.Endpoint, src, dst tcpip.Address) {\n+ utils.RxICMPv4EchoRequest(e, src, dst, ttl)\n+ }\n+\n+ rxICMPv6EchoRequest := func(e *channel.Endpoint, src, dst tcpip.Address) {\n+ utils.RxICMPv6EchoRequest(e, src, dst, ttl)\n+ }\n+\n+ v4Checker := func(t *testing.T, b []byte, src, dst tcpip.Address) {\n+ checker.IPv4(t, b,\n+ checker.SrcAddr(src),\n+ checker.DstAddr(dst),\n+ checker.TTL(ttl-1),\n+ checker.ICMPv4(\n+ checker.ICMPv4Type(header.ICMPv4Echo)))\n+ }\n+\n+ v6Checker := func(t *testing.T, b []byte, src, dst tcpip.Address) {\n+ checker.IPv6(t, b,\n+ checker.SrcAddr(src),\n+ checker.DstAddr(dst),\n+ checker.TTL(ttl-1),\n+ checker.ICMPv6(\n+ checker.ICMPv6Type(header.ICMPv6EchoRequest)))\n+ }\n+\n+ tests := []struct {\n+ name string\n+ srcAddr, dstAddr tcpip.Address\n+ rx func(*channel.Endpoint, tcpip.Address, tcpip.Address)\n+ expectForward bool\n+ checker func(*testing.T, []byte)\n+ }{\n+ {\n+ name: \"IPv4 link-local multicast destination\",\n+ srcAddr: utils.RemoteIPv4Addr,\n+ dstAddr: ipv4LinkLocalMulticastAddr,\n+ rx: rxICMPv4EchoRequest,\n+ expectForward: false,\n+ },\n+ {\n+ name: \"IPv4 link-local source\",\n+ srcAddr: ipv4LinkLocalUnicastAddr,\n+ dstAddr: utils.RemoteIPv4Addr,\n+ rx: rxICMPv4EchoRequest,\n+ expectForward: false,\n+ },\n+ {\n+ name: \"IPv4 link-local destination\",\n+ srcAddr: utils.RemoteIPv4Addr,\n+ dstAddr: ipv4LinkLocalUnicastAddr,\n+ rx: rxICMPv4EchoRequest,\n+ expectForward: false,\n+ },\n+ {\n+ name: \"IPv4 non-link-local unicast\",\n+ srcAddr: utils.RemoteIPv4Addr,\n+ dstAddr: utils.Ipv4Addr2.AddressWithPrefix.Address,\n+ rx: rxICMPv4EchoRequest,\n+ expectForward: true,\n+ checker: func(t *testing.T, b []byte) {\n+ v4Checker(t, b, utils.RemoteIPv4Addr, utils.Ipv4Addr2.AddressWithPrefix.Address)\n+ },\n+ },\n+ {\n+ name: \"IPv4 non-link-local multicast\",\n+ srcAddr: utils.RemoteIPv4Addr,\n+ dstAddr: ipv4GlobalMulticastAddr,\n+ rx: rxICMPv4EchoRequest,\n+ expectForward: true,\n+ checker: func(t *testing.T, b []byte) {\n+ v4Checker(t, b, utils.RemoteIPv4Addr, ipv4GlobalMulticastAddr)\n+ },\n+ },\n+\n+ {\n+ name: \"IPv6 link-local multicast destination\",\n+ srcAddr: utils.RemoteIPv6Addr,\n+ dstAddr: ipv6LinkLocalMulticastAddr,\n+ rx: rxICMPv6EchoRequest,\n+ expectForward: false,\n+ },\n+ {\n+ name: \"IPv6 link-local source\",\n+ srcAddr: ipv6LinkLocalUnicastAddr,\n+ dstAddr: utils.RemoteIPv6Addr,\n+ rx: rxICMPv6EchoRequest,\n+ expectForward: false,\n+ },\n+ {\n+ name: \"IPv6 link-local destination\",\n+ srcAddr: utils.RemoteIPv6Addr,\n+ dstAddr: ipv6LinkLocalUnicastAddr,\n+ rx: rxICMPv6EchoRequest,\n+ expectForward: false,\n+ },\n+ {\n+ name: \"IPv6 non-link-local unicast\",\n+ srcAddr: utils.RemoteIPv6Addr,\n+ dstAddr: utils.Ipv6Addr2.AddressWithPrefix.Address,\n+ rx: rxICMPv6EchoRequest,\n+ expectForward: true,\n+ checker: func(t *testing.T, b []byte) {\n+ v6Checker(t, b, utils.RemoteIPv6Addr, utils.Ipv6Addr2.AddressWithPrefix.Address)\n+ },\n+ },\n+ {\n+ name: \"IPv6 non-link-local multicast\",\n+ srcAddr: utils.RemoteIPv6Addr,\n+ dstAddr: ipv6GlobalMulticastAddr,\n+ rx: rxICMPv6EchoRequest,\n+ expectForward: true,\n+ checker: func(t *testing.T, b []byte) {\n+ v6Checker(t, b, utils.RemoteIPv6Addr, ipv6GlobalMulticastAddr)\n+ },\n+ },\n+ }\n+\n+ for _, test := range tests {\n+ t.Run(test.name, func(t *testing.T) {\n+ s := stack.New(stack.Options{\n+ NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},\n+ TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},\n+ })\n+\n+ e1 := channel.New(1, header.IPv6MinimumMTU, \"\")\n+ if err := s.CreateNIC(nicID1, e1); err != nil {\n+ t.Fatalf(\"s.CreateNIC(%d, _): %s\", nicID1, err)\n+ }\n+\n+ e2 := channel.New(1, header.IPv6MinimumMTU, \"\")\n+ if err := s.CreateNIC(nicID2, e2); err != nil {\n+ t.Fatalf(\"s.CreateNIC(%d, _): %s\", nicID2, err)\n+ }\n+\n+ if err := s.AddAddress(nicID2, ipv4.ProtocolNumber, utils.Ipv4Addr.Address); err != nil {\n+ t.Fatalf(\"s.AddAddress(%d, %d, %s): %s\", nicID2, ipv4.ProtocolNumber, utils.Ipv4Addr.Address, err)\n+ }\n+ if err := s.AddAddress(nicID2, ipv6.ProtocolNumber, utils.Ipv6Addr.Address); err != nil {\n+ t.Fatalf(\"s.AddAddress(%d, %d, %s): %s\", nicID2, ipv6.ProtocolNumber, utils.Ipv6Addr.Address, err)\n+ }\n+\n+ if err := s.SetForwarding(ipv4.ProtocolNumber, true); err != nil {\n+ t.Fatalf(\"s.SetForwarding(%d, true): %s\", ipv4.ProtocolNumber, err)\n+ }\n+ if err := s.SetForwarding(ipv6.ProtocolNumber, true); err != nil {\n+ t.Fatalf(\"s.SetForwarding(%d, true): %s\", ipv6.ProtocolNumber, err)\n+ }\n+\n+ s.SetRouteTable([]tcpip.Route{\n+ {\n+ Destination: header.IPv4EmptySubnet,\n+ NIC: nicID2,\n+ },\n+ {\n+ Destination: header.IPv6EmptySubnet,\n+ NIC: nicID2,\n+ },\n+ })\n+\n+ test.rx(e1, test.srcAddr, test.dstAddr)\n+\n+ p, ok := e2.Read()\n+ if ok != test.expectForward {\n+ t.Fatalf(\"got e2.Read() = (%#v, %t), want = (_, %t)\", p, ok, test.expectForward)\n+ }\n+\n+ if test.expectForward {\n+ test.checker(t, stack.PayloadSince(p.Pkt.NetworkHeader()))\n+ }\n+ })\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tests/integration/loopback_test.go", "new_path": "pkg/tcpip/tests/integration/loopback_test.go", "diff": "@@ -513,22 +513,23 @@ func TestExternalLoopbackTraffic(t *testing.T) {\nipv4Loopback = tcpip.Address(\"\\x7f\\x00\\x00\\x01\")\nnumPackets = 1\n+ ttl = 64\n)\nloopbackSourcedICMPv4 := func(e *channel.Endpoint) {\n- utils.RxICMPv4EchoRequest(e, ipv4Loopback, utils.Ipv4Addr.Address)\n+ utils.RxICMPv4EchoRequest(e, ipv4Loopback, utils.Ipv4Addr.Address, ttl)\n}\nloopbackSourcedICMPv6 := func(e *channel.Endpoint) {\n- utils.RxICMPv6EchoRequest(e, header.IPv6Loopback, utils.Ipv6Addr.Address)\n+ utils.RxICMPv6EchoRequest(e, header.IPv6Loopback, utils.Ipv6Addr.Address, ttl)\n}\nloopbackDestinedICMPv4 := func(e *channel.Endpoint) {\n- utils.RxICMPv4EchoRequest(e, utils.RemoteIPv4Addr, ipv4Loopback)\n+ utils.RxICMPv4EchoRequest(e, utils.RemoteIPv4Addr, ipv4Loopback, ttl)\n}\nloopbackDestinedICMPv6 := func(e *channel.Endpoint) {\n- utils.RxICMPv6EchoRequest(e, utils.RemoteIPv6Addr, header.IPv6Loopback)\n+ utils.RxICMPv6EchoRequest(e, utils.RemoteIPv6Addr, header.IPv6Loopback, ttl)\n}\ninvalidSrcAddrStat := func(s tcpip.IPStats) *tcpip.StatCounter {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tests/integration/multicast_broadcast_test.go", "new_path": "pkg/tcpip/tests/integration/multicast_broadcast_test.go", "diff": "@@ -43,12 +43,15 @@ const (\n// to a multicast or broadcast address uses a unicast source address for the\n// reply.\nfunc TestPingMulticastBroadcast(t *testing.T) {\n- const nicID = 1\n+ const (\n+ nicID = 1\n+ ttl = 64\n+ )\ntests := []struct {\nname string\nprotoNum tcpip.NetworkProtocolNumber\n- rxICMP func(*channel.Endpoint, tcpip.Address, tcpip.Address)\n+ rxICMP func(*channel.Endpoint, tcpip.Address, tcpip.Address, uint8)\nsrcAddr tcpip.Address\ndstAddr tcpip.Address\nexpectedSrc tcpip.Address\n@@ -136,7 +139,7 @@ func TestPingMulticastBroadcast(t *testing.T) {\n},\n})\n- test.rxICMP(e, test.srcAddr, test.dstAddr)\n+ test.rxICMP(e, test.srcAddr, test.dstAddr, ttl)\npkt, ok := e.Read()\nif !ok {\nt.Fatal(\"expected ICMP response\")\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tests/utils/utils.go", "new_path": "pkg/tcpip/tests/utils/utils.go", "diff": "@@ -48,10 +48,6 @@ const (\nLinkAddr4 = tcpip.LinkAddress(\"\\x02\\x03\\x03\\x04\\x05\\x09\")\n)\n-const (\n- ttl = 255\n-)\n-\n// Common IP addresses used by tests.\nvar (\nIpv4Addr = tcpip.AddressWithPrefix{\n@@ -322,7 +318,7 @@ func SetupRoutedStacks(t *testing.T, host1Stack, routerStack, host2Stack *stack.\n// RxICMPv4EchoRequest constructs and injects an ICMPv4 echo request packet on\n// the provided endpoint.\n-func RxICMPv4EchoRequest(e *channel.Endpoint, src, dst tcpip.Address) {\n+func RxICMPv4EchoRequest(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8) {\ntotalLen := header.IPv4MinimumSize + header.ICMPv4MinimumSize\nhdr := buffer.NewPrependable(totalLen)\npkt := header.ICMPv4(hdr.Prepend(header.ICMPv4MinimumSize))\n@@ -347,7 +343,7 @@ func RxICMPv4EchoRequest(e *channel.Endpoint, src, dst tcpip.Address) {\n// RxICMPv6EchoRequest constructs and injects an ICMPv6 echo request packet on\n// the provided endpoint.\n-func RxICMPv6EchoRequest(e *channel.Endpoint, src, dst tcpip.Address) {\n+func RxICMPv6EchoRequest(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8) {\ntotalLen := header.IPv6MinimumSize + header.ICMPv6MinimumSize\nhdr := buffer.NewPrependable(totalLen)\npkt := header.ICMPv6(hdr.Prepend(header.ICMPv6MinimumSize))\n" } ]
Go
Apache License 2.0
google/gvisor
Do not forward link-local packets As per RFC 3927 section 7 and RFC 4291 section 2.5.6. Test: forward_test.TestMulticastForwarding PiperOrigin-RevId: 367519336
260,001
08.04.2021 17:59:46
25,200
496a3654e7af2ee905c1183c622687c6af29069b
Set parent after child is verified We should only set parent after child is verified. Also, if the parent is set before verified, destroyLocked() will try to grab parent.dirMu, which may cause deadlock.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/verity/filesystem.go", "new_path": "pkg/sentry/fsimpl/verity/filesystem.go", "diff": "@@ -632,8 +632,6 @@ func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry,\nchildVD.IncRef()\nchildMerkleVD.IncRef()\n- parent.IncRef()\n- child.parent = parent\nchild.name = name\nchild.mode = uint32(stat.Mode)\n@@ -657,6 +655,9 @@ func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry,\n}\n}\n+ parent.IncRef()\n+ child.parent = parent\n+\nreturn child, nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Set parent after child is verified We should only set parent after child is verified. Also, if the parent is set before verified, destroyLocked() will try to grab parent.dirMu, which may cause deadlock. PiperOrigin-RevId: 367543655
260,001
08.04.2021 18:31:11
25,200
edf30a9bc5a645a7e03fca81f3e5852214588021
Set root dentry and hash for verity before verify Set root dentry and root hash in verity fs before we verify the root directory if a root hash is provided. These are used during verification.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/verity/verity.go", "new_path": "pkg/sentry/fsimpl/verity/verity.go", "diff": "@@ -428,9 +428,15 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\nd.mode = uint32(stat.Mode)\nd.uid = stat.UID\nd.gid = stat.GID\n- d.hash = make([]byte, len(rootHash))\nd.childrenNames = make(map[string]struct{})\n+ d.hashMu.Lock()\n+ d.hash = make([]byte, len(rootHash))\n+ copy(d.hash, rootHash)\n+ d.hashMu.Unlock()\n+\n+ fs.rootDentry = d\n+\nif !d.isDir() {\nctx.Warningf(\"verity root must be a directory\")\nreturn nil, nil, syserror.EINVAL\n@@ -502,13 +508,8 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\n}\n}\n- d.hashMu.Lock()\n- copy(d.hash, rootHash)\n- d.hashMu.Unlock()\nd.vfsd.Init(d)\n- fs.rootDentry = d\n-\nreturn &fs.vfsfs, &d.vfsd, nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Set root dentry and hash for verity before verify Set root dentry and root hash in verity fs before we verify the root directory if a root hash is provided. These are used during verification. PiperOrigin-RevId: 367547346
260,004
09.04.2021 13:20:05
25,200
973ace6bd9c4a17fe6858d6a0b2977ddfaca7885
Rename IsV6LinkLocalAddress to IsV6LinkLocalUnicastAddress To match the V4 variant.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ipv6.go", "new_path": "pkg/tcpip/header/ipv6.go", "diff": "@@ -391,23 +391,23 @@ func LinkLocalAddr(linkAddr tcpip.LinkAddress) tcpip.Address {\nreturn tcpip.Address(lladdrb[:])\n}\n-// IsV6LinkLocalAddress determines if the provided address is an IPv6\n-// link-local address (fe80::/10).\n-func IsV6LinkLocalAddress(addr tcpip.Address) bool {\n+// IsV6LinkLocalUnicastAddress returns true iff the provided address is an IPv6\n+// link-local unicast address, as defined by RFC 4291 section 2.5.6.\n+func IsV6LinkLocalUnicastAddress(addr tcpip.Address) bool {\nif len(addr) != IPv6AddressSize {\nreturn false\n}\nreturn addr[0] == 0xfe && (addr[1]&0xc0) == 0x80\n}\n-// IsV6LoopbackAddress determines if the provided address is an IPv6 loopback\n-// address.\n+// IsV6LoopbackAddress returns true iff the provided address is an IPv6 loopback\n+// address, as defined by RFC 4291 section 2.5.3.\nfunc IsV6LoopbackAddress(addr tcpip.Address) bool {\nreturn addr == IPv6Loopback\n}\n-// IsV6LinkLocalMulticastAddress determines if the provided address is an IPv6\n-// link-local multicast address.\n+// IsV6LinkLocalMulticastAddress returns true iff the provided address is an\n+// IPv6 link-local multicast address, as defined by RFC 4291 section 2.7.\nfunc IsV6LinkLocalMulticastAddress(addr tcpip.Address) bool {\nreturn IsV6MulticastAddress(addr) && V6MulticastScope(addr) == IPv6LinkLocalMulticastScope\n}\n@@ -472,7 +472,7 @@ func ScopeForIPv6Address(addr tcpip.Address) (IPv6AddressScope, tcpip.Error) {\ncase IsV6LinkLocalMulticastAddress(addr):\nreturn LinkLocalScope, nil\n- case IsV6LinkLocalAddress(addr):\n+ case IsV6LinkLocalUnicastAddress(addr):\nreturn LinkLocalScope, nil\ndefault:\n@@ -531,7 +531,8 @@ func GenerateTempIPv6SLAACAddr(tempIIDHistory []byte, stableAddr tcpip.Address)\n}\n}\n-// IPv6MulticastScope is the scope of a multicast IPv6 address.\n+// IPv6MulticastScope is the scope of a multicast IPv6 address, as defined by\n+// RFC 7346 section 2.\ntype IPv6MulticastScope uint8\n// The various values for IPv6 multicast scopes, as per RFC 7346 section 2:\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ipv6_test.go", "new_path": "pkg/tcpip/header/ipv6_test.go", "diff": "@@ -252,7 +252,7 @@ func TestIsV6LinkLocalMulticastAddress(t *testing.T) {\n}\n}\n-func TestIsV6LinkLocalAddress(t *testing.T) {\n+func TestIsV6LinkLocalUnicastAddress(t *testing.T) {\ntests := []struct {\nname string\naddr tcpip.Address\n@@ -287,8 +287,8 @@ func TestIsV6LinkLocalAddress(t *testing.T) {\nfor _, test := range tests {\nt.Run(test.name, func(t *testing.T) {\n- if got := header.IsV6LinkLocalAddress(test.addr); got != test.expected {\n- t.Errorf(\"got header.IsV6LinkLocalAddress(%s) = %t, want = %t\", test.addr, got, test.expected)\n+ if got := header.IsV6LinkLocalUnicastAddress(test.addr); got != test.expected {\n+ t.Errorf(\"got header.IsV6LinkLocalUnicastAddress(%s) = %t, want = %t\", test.addr, got, test.expected)\n}\n})\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/icmp.go", "new_path": "pkg/tcpip/network/ipv6/icmp.go", "diff": "@@ -273,7 +273,7 @@ func isMLDValid(pkt *stack.PacketBuffer, iph header.IPv6, routerAlert *header.IP\nif iph.HopLimit() != header.MLDHopLimit {\nreturn false\n}\n- if !header.IsV6LinkLocalAddress(iph.SourceAddress()) {\n+ if !header.IsV6LinkLocalUnicastAddress(iph.SourceAddress()) {\nreturn false\n}\nreturn true\n@@ -804,7 +804,7 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r\nrouterAddr := srcAddr\n// Is the IP Source Address a link-local address?\n- if !header.IsV6LinkLocalAddress(routerAddr) {\n+ if !header.IsV6LinkLocalUnicastAddress(routerAddr) {\n// ...No, silently drop the packet.\nreceived.invalid.Increment()\nreturn\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/ipv6.go", "new_path": "pkg/tcpip/network/ipv6/ipv6.go", "diff": "@@ -314,7 +314,7 @@ func (e *endpoint) onAddressAssignedLocked(addr tcpip.Address) {\n// Snooping switches MUST manage multicast forwarding state based on MLD\n// Report and Done messages sent with the unspecified address as the\n// IPv6 source address.\n- if header.IsV6LinkLocalAddress(addr) {\n+ if header.IsV6LinkLocalUnicastAddress(addr) {\ne.mu.mld.sendQueuedReports()\n}\n}\n@@ -914,7 +914,7 @@ func (e *endpoint) forwardPacket(pkt *stack.PacketBuffer) tcpip.Error {\nh := header.IPv6(pkt.NetworkHeader().View())\ndstAddr := h.DestinationAddress()\n- if header.IsV6LinkLocalAddress(h.SourceAddress()) || header.IsV6LinkLocalAddress(dstAddr) || header.IsV6LinkLocalMulticastAddress(dstAddr) {\n+ if header.IsV6LinkLocalUnicastAddress(h.SourceAddress()) || header.IsV6LinkLocalUnicastAddress(dstAddr) || header.IsV6LinkLocalMulticastAddress(dstAddr) {\n// As per RFC 4291 section 2.5.6,\n//\n// Routers must not forward any packets with Link-Local source or\n@@ -1622,7 +1622,7 @@ func (e *endpoint) getLinkLocalAddressRLocked() tcpip.Address {\nvar linkLocalAddr tcpip.Address\ne.mu.addressableEndpointState.ForEachPrimaryEndpoint(func(addressEndpoint stack.AddressEndpoint) bool {\nif addressEndpoint.IsAssigned(false /* allowExpired */) {\n- if addr := addressEndpoint.AddressWithPrefix().Address; header.IsV6LinkLocalAddress(addr) {\n+ if addr := addressEndpoint.AddressWithPrefix().Address; header.IsV6LinkLocalUnicastAddress(addr) {\nlinkLocalAddr = addr\nreturn false\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv6/ndp.go", "new_path": "pkg/tcpip/network/ipv6/ndp.go", "diff": "@@ -737,7 +737,7 @@ func (ndp *ndpState) handleRA(ip tcpip.Address, ra header.NDPRouterAdvert) {\nprefix := opt.Subnet()\n// Is the prefix a link-local?\n- if header.IsV6LinkLocalAddress(prefix.ID()) {\n+ if header.IsV6LinkLocalUnicastAddress(prefix.ID()) {\n// ...Yes, skip as per RFC 4861 section 6.3.4,\n// and RFC 4862 section 5.5.3.b (for SLAAC).\ncontinue\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/route.go", "new_path": "pkg/tcpip/stack/route.go", "diff": "@@ -132,7 +132,7 @@ func constructAndValidateRoute(netProto tcpip.NetworkProtocolNumber, addressEndp\nlocalAddr = addressEndpoint.AddressWithPrefix().Address\n}\n- if localAddressNIC != outgoingNIC && header.IsV6LinkLocalAddress(localAddr) {\n+ if localAddressNIC != outgoingNIC && header.IsV6LinkLocalUnicastAddress(localAddr) {\naddressEndpoint.DecRef()\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/stack.go", "new_path": "pkg/tcpip/stack/stack.go", "diff": "@@ -1344,7 +1344,7 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n\ns.mu.RLock()\ndefer s.mu.RUnlock()\n- isLinkLocal := header.IsV6LinkLocalAddress(remoteAddr) || header.IsV6LinkLocalMulticastAddress(remoteAddr)\n+ isLinkLocal := header.IsV6LinkLocalUnicastAddress(remoteAddr) || header.IsV6LinkLocalMulticastAddress(remoteAddr)\nisLocalBroadcast := remoteAddr == header.IPv4Broadcast\nisMulticast := header.IsV4MulticastAddress(remoteAddr) || header.IsV6MulticastAddress(remoteAddr)\nisLoopback := header.IsV4LoopbackAddress(remoteAddr) || header.IsV6LoopbackAddress(remoteAddr)\n@@ -1381,7 +1381,7 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n\nreturn nil, &tcpip.ErrNetworkUnreachable{}\n}\n- canForward := s.Forwarding(netProto) && !header.IsV6LinkLocalAddress(localAddr) && !isLinkLocal\n+ canForward := s.Forwarding(netProto) && !header.IsV6LinkLocalUnicastAddress(localAddr) && !isLinkLocal\n// Find a route to the remote with the route table.\nvar chosenRoute tcpip.Route\n" } ]
Go
Apache License 2.0
google/gvisor
Rename IsV6LinkLocalAddress to IsV6LinkLocalUnicastAddress To match the V4 variant. PiperOrigin-RevId: 367691981
260,023
09.04.2021 16:51:23
25,200
dc8f6c6914747c700a629b7717e45759cf1f7650
Move maxListenBacklog check to sentry Move maxListenBacklog check to the caller of endpoint Listen so that it is applicable to Unix domain sockets as well. This was changed in cl/366935921. Reported-by:
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_socket.go", "new_path": "pkg/sentry/syscalls/linux/sys_socket.go", "diff": "@@ -46,6 +46,9 @@ const maxOptLen = 1024 * 8\n// buffers upto INT_MAX.\nconst maxControlLen = 10 * 1024 * 1024\n+// maxListenBacklog is the maximum limit of listen backlog supported.\n+const maxListenBacklog = 1024\n+\n// nameLenOffset is the offset from the start of the MessageHeader64 struct to\n// the NameLen field.\nconst nameLenOffset = 8\n@@ -361,7 +364,7 @@ func Bind(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC\n// Listen implements the linux syscall listen(2).\nfunc Listen(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\nfd := args[0].Int()\n- backlog := args[1].Int()\n+ backlog := args[1].Uint()\n// Get socket from the file descriptor.\nfile := t.GetFile(fd)\n@@ -376,6 +379,16 @@ func Listen(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\nreturn 0, nil, syserror.ENOTSOCK\n}\n+ if backlog > maxListenBacklog {\n+ // Linux treats incoming backlog as uint with a limit defined by\n+ // sysctl_somaxconn.\n+ // https://github.com/torvalds/linux/blob/7acac4b3196/net/socket.c#L1666\n+ //\n+ // We use the backlog to allocate a channel of that size, hence enforce\n+ // a hard limit for the backlog.\n+ backlog = maxListenBacklog\n+ }\n+\nreturn 0, nil, s.Listen(t, int(backlog)).ToError()\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/vfs2/socket.go", "new_path": "pkg/sentry/syscalls/linux/vfs2/socket.go", "diff": "@@ -46,6 +46,9 @@ const maxOptLen = 1024 * 8\n// buffers upto INT_MAX.\nconst maxControlLen = 10 * 1024 * 1024\n+// maxListenBacklog is the maximum limit of listen backlog supported.\n+const maxListenBacklog = 1024\n+\n// nameLenOffset is the offset from the start of the MessageHeader64 struct to\n// the NameLen field.\nconst nameLenOffset = 8\n@@ -365,7 +368,7 @@ func Bind(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC\n// Listen implements the linux syscall listen(2).\nfunc Listen(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\nfd := args[0].Int()\n- backlog := args[1].Int()\n+ backlog := args[1].Uint()\n// Get socket from the file descriptor.\nfile := t.GetFileVFS2(fd)\n@@ -380,6 +383,16 @@ func Listen(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\nreturn 0, nil, syserror.ENOTSOCK\n}\n+ if backlog > maxListenBacklog {\n+ // Linux treats incoming backlog as uint with a limit defined by\n+ // sysctl_somaxconn.\n+ // https://github.com/torvalds/linux/blob/7acac4b3196/net/socket.c#L1666\n+ //\n+ // We use the backlog to allocate a channel of that size, hence enforce\n+ // a hard limit for the backlog.\n+ backlog = maxListenBacklog\n+ }\n+\nreturn 0, nil, s.Listen(t, int(backlog)).ToError()\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -2474,20 +2474,10 @@ func (e *endpoint) shutdownLocked(flags tcpip.ShutdownFlags) tcpip.Error {\n// Listen puts the endpoint in \"listen\" mode, which allows it to accept\n// new connections.\nfunc (e *endpoint) Listen(backlog int) tcpip.Error {\n- if uint32(backlog) > MaxListenBacklog {\n- // Linux treats incoming backlog as uint with a limit defined by\n- // sysctl_somaxconn.\n- // https://github.com/torvalds/linux/blob/7acac4b3196/net/socket.c#L1666\n- //\n- // We use the backlog to allocate a channel of that size, hence enforce\n- // a hard limit for the backlog.\n- backlog = MaxListenBacklog\n- } else {\n// Accept one more than the configured listen backlog to keep in parity with\n// Linux. Ref, because of missing equality check here:\n// https://github.com/torvalds/linux/blob/7acac4b3196/include/net/sock.h#L937\nbacklog++\n- }\nerr := e.listen(backlog)\nif err != nil {\nif !err.IgnoreStats() {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/protocol.go", "new_path": "pkg/tcpip/transport/tcp/protocol.go", "diff": "@@ -68,9 +68,6 @@ const (\n// DefaultSynRetries is the default value for the number of SYN retransmits\n// before a connect is aborted.\nDefaultSynRetries = 6\n-\n- // MaxListenBacklog is the maximum limit of listen backlog supported.\n- MaxListenBacklog = 1024\n)\nconst (\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/accept_bind.cc", "new_path": "test/syscalls/linux/accept_bind.cc", "diff": "@@ -67,6 +67,42 @@ TEST_P(AllSocketPairTest, ListenDecreaseBacklog) {\nSyscallSucceeds());\n}\n+TEST_P(AllSocketPairTest, ListenBacklogSizes_NoRandomSave) {\n+ DisableSave ds;\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),\n+ sockets->first_addr_size()),\n+ SyscallSucceeds());\n+\n+ int type;\n+ socklen_t typelen = sizeof(type);\n+ EXPECT_THAT(\n+ getsockopt(sockets->first_fd(), SOL_SOCKET, SO_TYPE, &type, &typelen),\n+ SyscallSucceeds());\n+\n+ std::array<int, 3> backlogs = {-1, 0, 1};\n+ for (auto& backlog : backlogs) {\n+ ASSERT_THAT(listen(sockets->first_fd(), backlog), SyscallSucceeds());\n+\n+ int expected_accepts = backlog;\n+ if (backlog < 0) {\n+ expected_accepts = 1024;\n+ }\n+ for (int i = 0; i < expected_accepts; i++) {\n+ SCOPED_TRACE(absl::StrCat(\"i=\", i));\n+ // Connect to the listening socket.\n+ const FileDescriptor client =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_UNIX, type, 0));\n+ ASSERT_THAT(connect(client.get(), sockets->first_addr(),\n+ sockets->first_addr_size()),\n+ SyscallSucceeds());\n+ const FileDescriptor accepted = ASSERT_NO_ERRNO_AND_VALUE(\n+ Accept(sockets->first_fd(), nullptr, nullptr));\n+ }\n+ }\n+}\n+\nTEST_P(AllSocketPairTest, ListenWithoutBind) {\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\nASSERT_THAT(listen(sockets->first_fd(), 0), SyscallFailsWithErrno(EINVAL));\n" } ]
Go
Apache License 2.0
google/gvisor
Move maxListenBacklog check to sentry Move maxListenBacklog check to the caller of endpoint Listen so that it is applicable to Unix domain sockets as well. This was changed in cl/366935921. Reported-by: [email protected] PiperOrigin-RevId: 367728052
260,001
09.04.2021 17:35:32
25,200
ea7faa50579d3d76c6cbb1f7ffba4e16eebf1885
Return integrity failure only if enabled If the parent is not enabled in verity stepLocked(), failure to find the child dentry could just mean an incorrect path.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/verity/filesystem.go", "new_path": "pkg/sentry/fsimpl/verity/filesystem.go", "diff": "@@ -553,7 +553,7 @@ func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry,\n}\nchildVD, err := parent.getLowerAt(ctx, vfsObj, name)\n- if err == syserror.ENOENT {\n+ if parent.verityEnabled() && err == syserror.ENOENT {\nreturn nil, alertIntegrityViolation(fmt.Sprintf(\"file %s expected but not found\", parentPath+\"/\"+name))\n}\nif err != nil {\n@@ -565,8 +565,9 @@ func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry,\ndefer childVD.DecRef(ctx)\nchildMerkleVD, err := parent.getLowerAt(ctx, vfsObj, merklePrefix+name)\n+ if err != nil {\nif err == syserror.ENOENT {\n- if !fs.allowRuntimeEnable {\n+ if parent.verityEnabled() {\nreturn nil, alertIntegrityViolation(fmt.Sprintf(\"Merkle file for %s expected but not found\", parentPath+\"/\"+name))\n}\nchildMerkleFD, err := vfsObj.OpenAt(ctx, fs.creds, &vfs.PathOperation{\n@@ -585,10 +586,10 @@ func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry,\nif err != nil {\nreturn nil, err\n}\n- }\n- if err != nil && err != syserror.ENOENT {\n+ } else {\nreturn nil, err\n}\n+ }\n// Clear the Merkle tree file if they are to be generated at runtime.\n// TODO(b/182315468): Optimize the Merkle tree generate process to\n" } ]
Go
Apache License 2.0
google/gvisor
Return integrity failure only if enabled If the parent is not enabled in verity stepLocked(), failure to find the child dentry could just mean an incorrect path. PiperOrigin-RevId: 367733412
259,858
12.04.2021 12:33:15
25,200
9c87ef53fde887aa4f7151249029b534ca8c0998
Add /etc/containerd/runsc.toml to conffiles attribute. Fixes
[ { "change_type": "MODIFY", "old_path": "debian/BUILD", "new_path": "debian/BUILD", "diff": "@@ -29,6 +29,9 @@ pkg_deb(\narm64 = \"arm64\",\n),\nchanges = \"runsc.changes\",\n+ conffiles = [\n+ \"/etc/containerd/runsc.toml\",\n+ ],\ndata = \":debian-data\",\ndeb = \"runsc.deb\",\n# Note that the description_file will be flatten (all newlines removed),\n" } ]
Go
Apache License 2.0
google/gvisor
Add /etc/containerd/runsc.toml to conffiles attribute. Fixes #5817 PiperOrigin-RevId: 368060056
259,907
12.04.2021 13:51:11
25,200
982fc8b5765795493c25f07d5f86c20a4fc940c1
[op] Use faster go_marshal methods in netfilter. Use MarshalUnsafe for packed types as it is faster than MarshalBytes.
[ { "change_type": "MODIFY", "old_path": "pkg/abi/linux/netfilter.go", "new_path": "pkg/abi/linux/netfilter.go", "diff": "@@ -145,13 +145,13 @@ func (ke *KernelIPTEntry) SizeBytes() int {\n// MarshalBytes implements marshal.Marshallable.MarshalBytes.\nfunc (ke *KernelIPTEntry) MarshalBytes(dst []byte) {\n- ke.Entry.MarshalBytes(dst)\n+ ke.Entry.MarshalUnsafe(dst)\nke.Elems.MarshalBytes(dst[ke.Entry.SizeBytes():])\n}\n// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.\nfunc (ke *KernelIPTEntry) UnmarshalBytes(src []byte) {\n- ke.Entry.UnmarshalBytes(src)\n+ ke.Entry.UnmarshalUnsafe(src)\nke.Elems.UnmarshalBytes(src[ke.Entry.SizeBytes():])\n}\n@@ -440,7 +440,7 @@ func (ke *KernelIPTGetEntries) SizeBytes() int {\n// MarshalBytes implements marshal.Marshallable.MarshalBytes.\nfunc (ke *KernelIPTGetEntries) MarshalBytes(dst []byte) {\n- ke.IPTGetEntries.MarshalBytes(dst)\n+ ke.IPTGetEntries.MarshalUnsafe(dst)\nmarshalledUntil := ke.IPTGetEntries.SizeBytes()\nfor i := range ke.Entrytable {\nke.Entrytable[i].MarshalBytes(dst[marshalledUntil:])\n@@ -450,7 +450,7 @@ func (ke *KernelIPTGetEntries) MarshalBytes(dst []byte) {\n// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.\nfunc (ke *KernelIPTGetEntries) UnmarshalBytes(src []byte) {\n- ke.IPTGetEntries.UnmarshalBytes(src)\n+ ke.IPTGetEntries.UnmarshalUnsafe(src)\nunmarshalledUntil := ke.IPTGetEntries.SizeBytes()\nfor i := range ke.Entrytable {\nke.Entrytable[i].UnmarshalBytes(src[unmarshalledUntil:])\n" }, { "change_type": "MODIFY", "old_path": "pkg/abi/linux/netfilter_ipv6.go", "new_path": "pkg/abi/linux/netfilter_ipv6.go", "diff": "@@ -86,7 +86,7 @@ func (ke *KernelIP6TGetEntries) SizeBytes() int {\n// MarshalBytes implements marshal.Marshallable.MarshalBytes.\nfunc (ke *KernelIP6TGetEntries) MarshalBytes(dst []byte) {\n- ke.IPTGetEntries.MarshalBytes(dst)\n+ ke.IPTGetEntries.MarshalUnsafe(dst)\nmarshalledUntil := ke.IPTGetEntries.SizeBytes()\nfor i := range ke.Entrytable {\nke.Entrytable[i].MarshalBytes(dst[marshalledUntil:])\n@@ -96,7 +96,7 @@ func (ke *KernelIP6TGetEntries) MarshalBytes(dst []byte) {\n// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.\nfunc (ke *KernelIP6TGetEntries) UnmarshalBytes(src []byte) {\n- ke.IPTGetEntries.UnmarshalBytes(src)\n+ ke.IPTGetEntries.UnmarshalUnsafe(src)\nunmarshalledUntil := ke.IPTGetEntries.SizeBytes()\nfor i := range ke.Entrytable {\nke.Entrytable[i].UnmarshalBytes(src[unmarshalledUntil:])\n@@ -149,8 +149,8 @@ type IP6TEntry struct {\nconst SizeOfIP6TEntry = 168\n// KernelIP6TEntry is identical to IP6TEntry, but includes the Elems field.\n-// KernelIP6TEntry itself is not Marshallable but it implements some methods of\n-// marshal.Marshallable that help in other implementations of Marshallable.\n+//\n+// +marshal dynamic\ntype KernelIP6TEntry struct {\nEntry IP6TEntry\n@@ -168,13 +168,13 @@ func (ke *KernelIP6TEntry) SizeBytes() int {\n// MarshalBytes implements marshal.Marshallable.MarshalBytes.\nfunc (ke *KernelIP6TEntry) MarshalBytes(dst []byte) {\n- ke.Entry.MarshalBytes(dst)\n+ ke.Entry.MarshalUnsafe(dst)\nke.Elems.MarshalBytes(dst[ke.Entry.SizeBytes():])\n}\n// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.\nfunc (ke *KernelIP6TEntry) UnmarshalBytes(src []byte) {\n- ke.Entry.UnmarshalBytes(src)\n+ ke.Entry.UnmarshalUnsafe(src)\nke.Elems.UnmarshalBytes(src[ke.Entry.SizeBytes():])\n}\n" } ]
Go
Apache License 2.0
google/gvisor
[op] Use faster go_marshal methods in netfilter. Use MarshalUnsafe for packed types as it is faster than MarshalBytes. PiperOrigin-RevId: 368076368
259,985
12.04.2021 15:10:19
25,200
f4f6ce337aa8ceb46a5a8f783efb770c9a8a2858
Don't grab TaskSet mu recursively when reading task state. Reported-by: Reported-by:
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/kernel.go", "new_path": "pkg/sentry/kernel/kernel.go", "diff": "@@ -1854,7 +1854,7 @@ func (k *Kernel) Release() {\nfunc (k *Kernel) PopulateNewCgroupHierarchy(root Cgroup) {\nk.tasks.mu.RLock()\nk.tasks.forEachTaskLocked(func(t *Task) {\n- if t.ExitState() != TaskExitNone {\n+ if t.exitState != TaskExitNone {\nreturn\n}\nt.mu.Lock()\n@@ -1870,7 +1870,7 @@ func (k *Kernel) PopulateNewCgroupHierarchy(root Cgroup) {\nfunc (k *Kernel) ReleaseCgroupHierarchy(hid uint32) {\nk.tasks.mu.RLock()\nk.tasks.forEachTaskLocked(func(t *Task) {\n- if t.ExitState() != TaskExitNone {\n+ if t.exitState != TaskExitNone {\nreturn\n}\nt.mu.Lock()\n" } ]
Go
Apache License 2.0
google/gvisor
Don't grab TaskSet mu recursively when reading task state. Reported-by: [email protected] Reported-by: [email protected] PiperOrigin-RevId: 368093861
260,001
12.04.2021 15:24:49
25,200
c4c6a71fb9d5ed51d0e9e2d6a78eaabf96174849
Add DecRef for verity FDs that were missing Some FileDescriptions in verity fs were opened but DecRef() were missing after used. This could result in a ref leak.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/verity/filesystem.go", "new_path": "pkg/sentry/fsimpl/verity/filesystem.go", "diff": "@@ -229,6 +229,8 @@ func (fs *filesystem) verifyChildLocked(ctx context.Context, parent *dentry, chi\nreturn nil, err\n}\n+ defer parentMerkleFD.DecRef(ctx)\n+\n// dataSize is the size of raw data for the Merkle tree. For a file,\n// dataSize is the size of the whole file. For a directory, dataSize is\n// the size of all its children's hashes.\n@@ -337,6 +339,8 @@ func (fs *filesystem) verifyStatAndChildrenLocked(ctx context.Context, d *dentry\nreturn err\n}\n+ defer fd.DecRef(ctx)\n+\nmerkleSize, err := fd.GetXattr(ctx, &vfs.GetXattrOptions{\nName: merkleSizeXattr,\nSize: sizeOfStringInt32,\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/verity/verity.go", "new_path": "pkg/sentry/fsimpl/verity/verity.go", "diff": "@@ -494,6 +494,8 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\nreturn nil, nil, err\n}\n+ defer lowerMerkleFD.DecRef(ctx)\n+\nchildrenNames := make([]byte, size)\nif _, err := lowerMerkleFD.PRead(ctx, usermem.BytesIOSequence(childrenNames), int64(off), vfs.ReadOptions{}); err != nil {\nreturn nil, nil, alertIntegrityViolation(fmt.Sprintf(\"Failed to read root children map: %v\", err))\n" } ]
Go
Apache License 2.0
google/gvisor
Add DecRef for verity FDs that were missing Some FileDescriptions in verity fs were opened but DecRef() were missing after used. This could result in a ref leak. PiperOrigin-RevId: 368096759
259,884
12.04.2021 17:54:25
25,200
90900e4f8f6760f1de34e660030a0155cfd6b40a
Don't mark exported PRs as stale.
[ { "change_type": "MODIFY", "old_path": ".github/workflows/stale.yml", "new_path": ".github/workflows/stale.yml", "diff": "@@ -15,7 +15,7 @@ jobs:\nstale-issue-label: 'stale'\nstale-pr-label: 'stale'\nexempt-issue-labels: 'exported, type: bug, type: cleanup, type: enhancement, type: process, type: proposal, type: question'\n- exempt-pr-labels: 'ready to pull'\n+ exempt-pr-labels: 'ready to pull, exported'\nstale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove the stale label or comment or this will be closed in 30 days.'\nstale-pr-message: 'This pull request is stale because it has been open 90 days with no activity. Remove the stale label or comment or this will be closed in 30 days.'\ndays-before-stale: 90\n" } ]
Go
Apache License 2.0
google/gvisor
Don't mark exported PRs as stale. PiperOrigin-RevId: 368121539
259,975
14.04.2021 11:27:05
25,200
5c1052b6bb2658208f1afaf423aeac98f30235c1
[syserror] Remove syserror from go_marshal
[ { "change_type": "MODIFY", "old_path": "pkg/gohacks/BUILD", "new_path": "pkg/gohacks/BUILD", "diff": "-load(\"//tools:defs.bzl\", \"go_library\")\n+load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\npackage(licenses = [\"notice\"])\n@@ -10,3 +10,11 @@ go_library(\nstateify = False,\nvisibility = [\"//:sandbox\"],\n)\n+\n+go_test(\n+ name = \"gohacks_test\",\n+ size = \"small\",\n+ srcs = [\"gohacks_test.go\"],\n+ library = \":gohacks\",\n+ deps = [\"@org_golang_x_sys//unix:go_default_library\"],\n+)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/gohacks/gohacks_test.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package gohacks\n+\n+import (\n+ \"io/ioutil\"\n+ \"math/rand\"\n+ \"os\"\n+ \"runtime/debug\"\n+ \"testing\"\n+\n+ \"golang.org/x/sys/unix\"\n+)\n+\n+func randBuf(size int) []byte {\n+ b := make([]byte, size)\n+ for i := range b {\n+ b[i] = byte(rand.Intn(256))\n+ }\n+ return b\n+}\n+\n+// Size of a page in bytes. Cloned from hostarch.PageSize to avoid a circular\n+// dependency.\n+const pageSize = 4096\n+\n+func testCopy(dst, src []byte) (panicked bool) {\n+ defer func() {\n+ if r := recover(); r != nil {\n+ panicked = true\n+ }\n+ }()\n+ debug.SetPanicOnFault(true)\n+ copy(dst, src)\n+ return panicked\n+}\n+\n+func TestSegVOnMemmove(t *testing.T) {\n+ // Test that SIGSEGVs received by runtime.memmove when *not* doing\n+ // CopyIn or CopyOut work gets propagated to the runtime.\n+ const bufLen = pageSize\n+ a, err := unix.Mmap(-1, 0, bufLen, unix.PROT_NONE, unix.MAP_ANON|unix.MAP_PRIVATE)\n+ if err != nil {\n+ t.Fatalf(\"Mmap failed: %v\", err)\n+\n+ }\n+ defer unix.Munmap(a)\n+ b := randBuf(bufLen)\n+\n+ if !testCopy(b, a) {\n+ t.Fatalf(\"testCopy didn't panic when it should have\")\n+ }\n+\n+ if !testCopy(a, b) {\n+ t.Fatalf(\"testCopy didn't panic when it should have\")\n+ }\n+}\n+\n+func TestSigbusOnMemmove(t *testing.T) {\n+ // Test that SIGBUS received by runtime.memmove when *not* doing\n+ // CopyIn or CopyOut work gets propagated to the runtime.\n+ const bufLen = pageSize\n+ f, err := ioutil.TempFile(\"\", \"sigbus_test\")\n+ if err != nil {\n+ t.Fatalf(\"TempFile failed: %v\", err)\n+ }\n+ os.Remove(f.Name())\n+ defer f.Close()\n+\n+ a, err := unix.Mmap(int(f.Fd()), 0, bufLen, unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED)\n+ if err != nil {\n+ t.Fatalf(\"Mmap failed: %v\", err)\n+\n+ }\n+ defer unix.Munmap(a)\n+ b := randBuf(bufLen)\n+\n+ if !testCopy(b, a) {\n+ t.Fatalf(\"testCopy didn't panic when it should have\")\n+ }\n+\n+ if !testCopy(a, b) {\n+ t.Fatalf(\"testCopy didn't panic when it should have\")\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/gohacks/gohacks_unsafe.go", "new_path": "pkg/gohacks/gohacks_unsafe.go", "diff": "@@ -75,3 +75,17 @@ func StringFromImmutableBytes(bs []byte) string {\n// strings.Builder.String().\nreturn *(*string)(unsafe.Pointer(&bs))\n}\n+\n+// Note that go:linkname silently doesn't work if the local name is exported,\n+// necessitating an indirection for exported functions.\n+\n+// Memmove is runtime.memmove, exported for SeqAtomicLoad/SeqAtomicTryLoad<T>.\n+//\n+//go:nosplit\n+func Memmove(to, from unsafe.Pointer, n uintptr) {\n+ memmove(to, from, n)\n+}\n+\n+//go:linkname memmove runtime.memmove\n+//go:noescape\n+func memmove(to, from unsafe.Pointer, n uintptr)\n" }, { "change_type": "MODIFY", "old_path": "pkg/marshal/marshal.go", "new_path": "pkg/marshal/marshal.go", "diff": "@@ -166,6 +166,9 @@ type Marshallable interface {\n// %s is the first argument to the slice clause. This directive is not supported\n// for newtypes on arrays.\n//\n+// Note: Partial copies are not supported for Slice API UnmarshalUnsafe and\n+// MarshalUnsafe.\n+//\n// The slice clause also takes an optional second argument, which must be the\n// value \"inner\":\n//\n" }, { "change_type": "MODIFY", "old_path": "pkg/safecopy/safecopy_test.go", "new_path": "pkg/safecopy/safecopy_test.go", "diff": "@@ -19,8 +19,6 @@ import (\n\"fmt\"\n\"io/ioutil\"\n\"math/rand\"\n- \"os\"\n- \"runtime/debug\"\n\"testing\"\n\"unsafe\"\n@@ -568,63 +566,3 @@ func TestCompareAndSwapUint32BusError(t *testing.T) {\n}\n})\n}\n-\n-func testCopy(dst, src []byte) (panicked bool) {\n- defer func() {\n- if r := recover(); r != nil {\n- panicked = true\n- }\n- }()\n- debug.SetPanicOnFault(true)\n- copy(dst, src)\n- return\n-}\n-\n-func TestSegVOnMemmove(t *testing.T) {\n- // Test that SIGSEGVs received by runtime.memmove when *not* doing\n- // CopyIn or CopyOut work gets propagated to the runtime.\n- const bufLen = pageSize\n- a, err := unix.Mmap(-1, 0, bufLen, unix.PROT_NONE, unix.MAP_ANON|unix.MAP_PRIVATE)\n- if err != nil {\n- t.Fatalf(\"Mmap failed: %v\", err)\n-\n- }\n- defer unix.Munmap(a)\n- b := randBuf(bufLen)\n-\n- if !testCopy(b, a) {\n- t.Fatalf(\"testCopy didn't panic when it should have\")\n- }\n-\n- if !testCopy(a, b) {\n- t.Fatalf(\"testCopy didn't panic when it should have\")\n- }\n-}\n-\n-func TestSigbusOnMemmove(t *testing.T) {\n- // Test that SIGBUS received by runtime.memmove when *not* doing\n- // CopyIn or CopyOut work gets propagated to the runtime.\n- const bufLen = pageSize\n- f, err := ioutil.TempFile(\"\", \"sigbus_test\")\n- if err != nil {\n- t.Fatalf(\"TempFile failed: %v\", err)\n- }\n- os.Remove(f.Name())\n- defer f.Close()\n-\n- a, err := unix.Mmap(int(f.Fd()), 0, bufLen, unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED)\n- if err != nil {\n- t.Fatalf(\"Mmap failed: %v\", err)\n-\n- }\n- defer unix.Munmap(a)\n- b := randBuf(bufLen)\n-\n- if !testCopy(b, a) {\n- t.Fatalf(\"testCopy didn't panic when it should have\")\n- }\n-\n- if !testCopy(a, b) {\n- t.Fatalf(\"testCopy didn't panic when it should have\")\n- }\n-}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/time/BUILD", "new_path": "pkg/sentry/time/BUILD", "diff": "@@ -32,6 +32,7 @@ go_library(\n],\nvisibility = [\"//:sandbox\"],\ndeps = [\n+ \"//pkg/gohacks\",\n\"//pkg/log\",\n\"//pkg/metric\",\n\"//pkg/sync\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/sync/BUILD", "new_path": "pkg/sync/BUILD", "diff": "@@ -43,6 +43,7 @@ go_template(\n],\ndeps = [\n\":sync\",\n+ \"//pkg/gohacks\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sync/generic_seqatomic_unsafe.go", "new_path": "pkg/sync/generic_seqatomic_unsafe.go", "diff": "@@ -10,6 +10,7 @@ package seqatomic\nimport (\n\"unsafe\"\n+ \"gvisor.dev/gvisor/pkg/gohacks\"\n\"gvisor.dev/gvisor/pkg/sync\"\n)\n@@ -39,7 +40,7 @@ func SeqAtomicTryLoad(seq *sync.SeqCount, epoch sync.SeqCountEpoch, ptr *Value)\n// runtime.RaceDisable() doesn't actually stop the race detector, so it\n// can't help us here. Instead, call runtime.memmove directly, which is\n// not instrumented by the race detector.\n- sync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))\n+ gohacks.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))\n} else {\n// This is ~40% faster for short reads than going through memmove.\nval = *ptr\n" }, { "change_type": "MODIFY", "old_path": "pkg/sync/runtime_unsafe.go", "new_path": "pkg/sync/runtime_unsafe.go", "diff": "@@ -17,20 +17,6 @@ import (\n\"unsafe\"\n)\n-// Note that go:linkname silently doesn't work if the local name is exported,\n-// necessitating an indirection for exported functions.\n-\n-// Memmove is runtime.memmove, exported for SeqAtomicLoad/SeqAtomicTryLoad<T>.\n-//\n-//go:nosplit\n-func Memmove(to, from unsafe.Pointer, n uintptr) {\n- memmove(to, from, n)\n-}\n-\n-//go:linkname memmove runtime.memmove\n-//go:noescape\n-func memmove(to, from unsafe.Pointer, n uintptr)\n-\n// Gopark is runtime.gopark. Gopark calls unlockf(pointer to runtime.g, lock);\n// if unlockf returns true, Gopark blocks until Goready(pointer to runtime.g)\n// is called. unlockf and its callees must be nosplit and norace, since stack\n" }, { "change_type": "MODIFY", "old_path": "pkg/sync/seqatomictest/BUILD", "new_path": "pkg/sync/seqatomictest/BUILD", "diff": "@@ -18,6 +18,7 @@ go_library(\nname = \"seqatomic\",\nsrcs = [\"seqatomic_int_unsafe.go\"],\ndeps = [\n+ \"//pkg/gohacks\",\n\"//pkg/sync\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "tools/go_marshal/defs.bzl", "new_path": "tools/go_marshal/defs.bzl", "diff": "@@ -57,7 +57,6 @@ go_marshal = rule(\n# marshal_deps are the dependencies requied by generated code.\nmarshal_deps = [\n\"//pkg/gohacks\",\n- \"//pkg/safecopy\",\n\"//pkg/hostarch\",\n\"//pkg/marshal\",\n]\n" }, { "change_type": "MODIFY", "old_path": "tools/go_marshal/gomarshal/generator.go", "new_path": "tools/go_marshal/gomarshal/generator.go", "diff": "@@ -112,10 +112,8 @@ func NewGenerator(srcs []string, out, outTest, outTestUnconditional, pkg string,\ng.imports.add(\"runtime\")\ng.imports.add(\"unsafe\")\ng.imports.add(\"gvisor.dev/gvisor/pkg/gohacks\")\n- g.imports.add(\"gvisor.dev/gvisor/pkg/safecopy\")\ng.imports.add(\"gvisor.dev/gvisor/pkg/hostarch\")\ng.imports.add(\"gvisor.dev/gvisor/pkg/marshal\")\n-\nreturn &g, nil\n}\n" }, { "change_type": "MODIFY", "old_path": "tools/go_marshal/gomarshal/generator_interfaces_array_newtype.go", "new_path": "tools/go_marshal/gomarshal/generator_interfaces_array_newtype.go", "diff": "@@ -33,13 +33,13 @@ func (g *interfaceGenerator) validateArrayNewtype(n *ast.Ident, a *ast.ArrayType\n}\nfunc (g *interfaceGenerator) emitMarshallableForArrayNewtype(n *ast.Ident, a *ast.ArrayType, elt *ast.Ident) {\n+ g.recordUsedImport(\"gohacks\")\n+ g.recordUsedImport(\"hostarch\")\ng.recordUsedImport(\"io\")\ng.recordUsedImport(\"marshal\")\ng.recordUsedImport(\"reflect\")\ng.recordUsedImport(\"runtime\")\n- g.recordUsedImport(\"safecopy\")\ng.recordUsedImport(\"unsafe\")\n- g.recordUsedImport(\"hostarch\")\nlenExpr := g.arrayLenExpr(a)\n@@ -89,14 +89,14 @@ func (g *interfaceGenerator) emitMarshallableForArrayNewtype(n *ast.Ident, a *as\ng.emit(\"// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.\\n\")\ng.emit(\"func (%s *%s) MarshalUnsafe(dst []byte) {\\n\", g.r, g.typeName())\ng.inIndent(func() {\n- g.emit(\"safecopy.CopyIn(dst, unsafe.Pointer(%s))\\n\", g.r)\n+ g.emit(\"gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&%s[0]), uintptr(len(dst)))\\n\", g.r)\n})\ng.emit(\"}\\n\\n\")\ng.emit(\"// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.\\n\")\ng.emit(\"func (%s *%s) UnmarshalUnsafe(src []byte) {\\n\", g.r, g.typeName())\ng.inIndent(func() {\n- g.emit(\"safecopy.CopyOut(unsafe.Pointer(%s), src)\\n\", g.r)\n+ g.emit(\"gohacks.Memmove(unsafe.Pointer(%s), unsafe.Pointer(&src[0]), uintptr(len(src)))\\n\", g.r)\n})\ng.emit(\"}\\n\\n\")\n" }, { "change_type": "MODIFY", "old_path": "tools/go_marshal/gomarshal/generator_interfaces_primitive_newtype.go", "new_path": "tools/go_marshal/gomarshal/generator_interfaces_primitive_newtype.go", "diff": "@@ -95,13 +95,13 @@ func (g *interfaceGenerator) validatePrimitiveNewtype(t *ast.Ident) {\n// newtypes are always packed, so we can omit the various fallbacks required for\n// non-packed structs.\nfunc (g *interfaceGenerator) emitMarshallableForPrimitiveNewtype(nt *ast.Ident) {\n+ g.recordUsedImport(\"gohacks\")\n+ g.recordUsedImport(\"hostarch\")\ng.recordUsedImport(\"io\")\ng.recordUsedImport(\"marshal\")\ng.recordUsedImport(\"reflect\")\ng.recordUsedImport(\"runtime\")\n- g.recordUsedImport(\"safecopy\")\ng.recordUsedImport(\"unsafe\")\n- g.recordUsedImport(\"hostarch\")\ng.emit(\"// SizeBytes implements marshal.Marshallable.SizeBytes.\\n\")\ng.emit(\"//go:nosplit\\n\")\n@@ -141,14 +141,14 @@ func (g *interfaceGenerator) emitMarshallableForPrimitiveNewtype(nt *ast.Ident)\ng.emit(\"// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.\\n\")\ng.emit(\"func (%s *%s) MarshalUnsafe(dst []byte) {\\n\", g.r, g.typeName())\ng.inIndent(func() {\n- g.emit(\"safecopy.CopyIn(dst, unsafe.Pointer(%s))\\n\", g.r)\n+ g.emit(\"gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(%s), uintptr(len(dst)))\\n\", g.r)\n})\ng.emit(\"}\\n\\n\")\ng.emit(\"// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.\\n\")\ng.emit(\"func (%s *%s) UnmarshalUnsafe(src []byte) {\\n\", g.r, g.typeName())\ng.inIndent(func() {\n- g.emit(\"safecopy.CopyOut(unsafe.Pointer(%s), src)\\n\", g.r)\n+ g.emit(\"gohacks.Memmove(unsafe.Pointer(%s), unsafe.Pointer(&src[0]), uintptr(len(src)))\\n\", g.r)\n})\ng.emit(\"}\\n\\n\")\n@@ -260,11 +260,9 @@ func (g *interfaceGenerator) emitMarshallableSliceForPrimitiveNewtype(nt *ast.Id\ng.emit(\"}\\n\")\ng.emit(\"size := (*%s)(nil).SizeBytes()\\n\\n\", g.typeName())\n- g.emitNoEscapeSliceDataPointer(\"&src\", \"val\")\n-\n- g.emit(\"length, err := safecopy.CopyIn(dst[:(size*count)], val)\\n\")\n- g.emitKeepAlive(\"src\")\n- g.emit(\"return length, err\\n\")\n+ g.emit(\"dst = dst[:size*count]\\n\")\n+ g.emit(\"gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))\\n\")\n+ g.emit(\"return size*count, nil\\n\")\n})\ng.emit(\"}\\n\\n\")\n@@ -279,11 +277,9 @@ func (g *interfaceGenerator) emitMarshallableSliceForPrimitiveNewtype(nt *ast.Id\ng.emit(\"}\\n\")\ng.emit(\"size := (*%s)(nil).SizeBytes()\\n\\n\", g.typeName())\n- g.emitNoEscapeSliceDataPointer(\"&dst\", \"val\")\n-\n- g.emit(\"length, err := safecopy.CopyOut(val, src[:(size*count)])\\n\")\n- g.emitKeepAlive(\"dst\")\n- g.emit(\"return length, err\\n\")\n+ g.emit(\"src = src[:(size*count)]\\n\")\n+ g.emit(\"gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))\\n\")\n+ g.emit(\"return size*count, nil\\n\")\n})\ng.emit(\"}\\n\\n\")\n}\n" }, { "change_type": "MODIFY", "old_path": "tools/go_marshal/gomarshal/generator_interfaces_struct.go", "new_path": "tools/go_marshal/gomarshal/generator_interfaces_struct.go", "diff": "@@ -270,18 +270,18 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {\ng.emit(\"%s.MarshalBytes(dst)\\n\", g.r)\n}\nif thisPacked {\n- g.recordUsedImport(\"safecopy\")\n+ g.recordUsedImport(\"gohacks\")\ng.recordUsedImport(\"unsafe\")\nif cond, ok := g.areFieldsPackedExpression(); ok {\ng.emit(\"if %s {\\n\", cond)\ng.inIndent(func() {\n- g.emit(\"safecopy.CopyIn(dst, unsafe.Pointer(%s))\\n\", g.r)\n+ g.emit(\"gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(%s), uintptr(len(dst)))\\n\", g.r)\n})\ng.emit(\"} else {\\n\")\ng.inIndent(fallback)\ng.emit(\"}\\n\")\n} else {\n- g.emit(\"safecopy.CopyIn(dst, unsafe.Pointer(%s))\\n\", g.r)\n+ g.emit(\"gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(%s), uintptr(len(dst)))\\n\", g.r)\n}\n} else {\nfallback()\n@@ -297,25 +297,23 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {\ng.emit(\"%s.UnmarshalBytes(src)\\n\", g.r)\n}\nif thisPacked {\n- g.recordUsedImport(\"safecopy\")\n- g.recordUsedImport(\"unsafe\")\n+ g.recordUsedImport(\"gohacks\")\nif cond, ok := g.areFieldsPackedExpression(); ok {\ng.emit(\"if %s {\\n\", cond)\ng.inIndent(func() {\n- g.emit(\"safecopy.CopyOut(unsafe.Pointer(%s), src)\\n\", g.r)\n+ g.emit(\"gohacks.Memmove(unsafe.Pointer(%s), unsafe.Pointer(&src[0]), uintptr(len(src)))\\n\", g.r)\n})\ng.emit(\"} else {\\n\")\ng.inIndent(fallback)\ng.emit(\"}\\n\")\n} else {\n- g.emit(\"safecopy.CopyOut(unsafe.Pointer(%s), src)\\n\", g.r)\n+ g.emit(\"gohacks.Memmove(unsafe.Pointer(%s), unsafe.Pointer(&src[0]), uintptr(len(src)))\\n\", g.r)\n}\n} else {\nfallback()\n}\n})\ng.emit(\"}\\n\\n\")\n-\ng.emit(\"// CopyOutN implements marshal.Marshallable.CopyOutN.\\n\")\ng.emit(\"//go:nosplit\\n\")\ng.recordUsedImport(\"marshal\")\n@@ -561,16 +559,15 @@ func (g *interfaceGenerator) emitMarshallableSliceForStruct(st *ast.StructType,\ng.recordUsedImport(\"reflect\")\ng.recordUsedImport(\"runtime\")\ng.recordUsedImport(\"unsafe\")\n+ g.recordUsedImport(\"gohacks\")\nif _, ok := g.areFieldsPackedExpression(); ok {\ng.emit(\"if !src[0].Packed() {\\n\")\ng.inIndent(fallback)\ng.emit(\"}\\n\\n\")\n}\n- g.emitNoEscapeSliceDataPointer(\"&src\", \"val\")\n-\n- g.emit(\"length, err := safecopy.CopyIn(dst[:(size*count)], val)\\n\")\n- g.emitKeepAlive(\"src\")\n- g.emit(\"return length, err\\n\")\n+ g.emit(\"dst = dst[:size*count]\\n\")\n+ g.emit(\"gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))\\n\")\n+ g.emit(\"return size * count, nil\\n\")\n} else {\nfallback()\n}\n@@ -598,19 +595,19 @@ func (g *interfaceGenerator) emitMarshallableSliceForStruct(st *ast.StructType,\ng.emit(\"return size * count, nil\\n\")\n}\nif thisPacked {\n+ g.recordUsedImport(\"gohacks\")\ng.recordUsedImport(\"reflect\")\ng.recordUsedImport(\"runtime\")\n- g.recordUsedImport(\"unsafe\")\nif _, ok := g.areFieldsPackedExpression(); ok {\ng.emit(\"if !dst[0].Packed() {\\n\")\ng.inIndent(fallback)\ng.emit(\"}\\n\\n\")\n}\n- g.emitNoEscapeSliceDataPointer(\"&dst\", \"val\")\n- g.emit(\"length, err := safecopy.CopyOut(val, src[:(size*count)])\\n\")\n- g.emitKeepAlive(\"dst\")\n- g.emit(\"return length, err\\n\")\n+ g.emit(\"src = src[:(size*count)]\\n\")\n+ g.emit(\"gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))\\n\")\n+\n+ g.emit(\"return count*size, nil\\n\")\n} else {\nfallback()\n}\n" } ]
Go
Apache License 2.0
google/gvisor
[syserror] Remove syserror from go_marshal PiperOrigin-RevId: 368470656
260,003
14.04.2021 13:23:31
25,200
272d2e1168733fa7707ad21fca6f7a847f34bf1b
Make the generated test binary name match the target name
[ { "change_type": "MODIFY", "old_path": "test/runner/defs.bzl", "new_path": "test/runner/defs.bzl", "diff": "@@ -4,7 +4,7 @@ load(\"//tools:defs.bzl\", \"default_platform\", \"platforms\")\ndef _runner_test_impl(ctx):\n# Generate a runner binary.\n- runner = ctx.actions.declare_file(\"%s-runner\" % ctx.label.name)\n+ runner = ctx.actions.declare_file(ctx.label.name)\nrunner_content = \"\\n\".join([\n\"#!/bin/bash\",\n\"set -euf -x -o pipefail\",\n" } ]
Go
Apache License 2.0
google/gvisor
Make the generated test binary name match the target name PiperOrigin-RevId: 368495641
259,860
15.04.2021 16:29:15
25,200
b5919d3065a28c1fd6f27612bafd28f06bf3f0d0
Generate notification when closing host fd. Thanks ianlewis@ for discovering the bug/fix!
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/host/host.go", "new_path": "pkg/sentry/fsimpl/host/host.go", "diff": "@@ -460,6 +460,9 @@ func (i *inode) DecRef(ctx context.Context) {\nif err := unix.Close(i.hostFD); err != nil {\nlog.Warningf(\"failed to close host fd %d: %v\", i.hostFD, err)\n}\n+ // We can't rely on fdnotifier when closing the fd, because the event may race\n+ // with fdnotifier.RemoveFD. Instead, notify the queue explicitly.\n+ i.queue.Notify(waiter.EventHUp | waiter.ReadableEvents | waiter.WritableEvents)\n})\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Generate notification when closing host fd. Thanks ianlewis@ for discovering the bug/fix! PiperOrigin-RevId: 368740744
259,992
15.04.2021 17:02:51
25,200
2e5022974908669d55cf7f47ff8fb7ff5c70c34c
Add S/R logic for host.ConnectedEndpoint Otherwise ConnectedEndpoint.sndbuf will be restored as 0 and writes to the socket will fail with EAGAIN.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/host/save_restore.go", "new_path": "pkg/sentry/fsimpl/host/save_restore.go", "diff": "@@ -68,3 +68,10 @@ func (i *inode) afterLoad() {\n}\n}\n}\n+\n+// afterLoad is invoked by stateify.\n+func (c *ConnectedEndpoint) afterLoad() {\n+ if err := c.initFromOptions(); err != nil {\n+ panic(fmt.Sprintf(\"initFromOptions failed: %v\", err))\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/host/socket.go", "new_path": "pkg/sentry/fsimpl/host/socket.go", "diff": "@@ -39,7 +39,7 @@ import (\nfunc newEndpoint(ctx context.Context, hostFD int, queue *waiter.Queue) (transport.Endpoint, error) {\n// Set up an external transport.Endpoint using the host fd.\naddr := fmt.Sprintf(\"hostfd:[%d]\", hostFD)\n- e, err := NewConnectedEndpoint(ctx, hostFD, addr, true /* saveable */)\n+ e, err := NewConnectedEndpoint(hostFD, addr)\nif err != nil {\nreturn nil, err.ToError()\n}\n@@ -86,7 +86,10 @@ type ConnectedEndpoint struct {\n// for restoring them.\nfunc (c *ConnectedEndpoint) init() *syserr.Error {\nc.InitRefs()\n+ return c.initFromOptions()\n+}\n+func (c *ConnectedEndpoint) initFromOptions() *syserr.Error {\nfamily, err := unix.GetsockoptInt(c.fd, unix.SOL_SOCKET, unix.SO_DOMAIN)\nif err != nil {\nreturn syserr.FromError(err)\n@@ -123,7 +126,7 @@ func (c *ConnectedEndpoint) init() *syserr.Error {\n// The caller is responsible for calling Init(). Additionaly, Release needs to\n// be called twice because ConnectedEndpoint is both a transport.Receiver and\n// transport.ConnectedEndpoint.\n-func NewConnectedEndpoint(ctx context.Context, hostFD int, addr string, saveable bool) (*ConnectedEndpoint, *syserr.Error) {\n+func NewConnectedEndpoint(hostFD int, addr string) (*ConnectedEndpoint, *syserr.Error) {\ne := ConnectedEndpoint{\nfd: hostFD,\naddr: addr,\n" } ]
Go
Apache License 2.0
google/gvisor
Add S/R logic for host.ConnectedEndpoint Otherwise ConnectedEndpoint.sndbuf will be restored as 0 and writes to the socket will fail with EAGAIN. PiperOrigin-RevId: 368746660
259,891
15.04.2021 17:13:34
25,200
19dfc4f7af2bd1f204d1973d3e3ced1f1adf615e
Reduce tcp_x_test runtime and memory usage Reduce the ephemeral port range, which decreases the calls to makeEP.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/tcp_test.go", "new_path": "pkg/tcpip/transport/tcp/tcp_test.go", "diff": "@@ -4899,7 +4899,13 @@ func TestConnectAvoidsBoundPorts(t *testing.T) {\nt.Fatalf(\"unknown address type: '%s'\", candidateAddressType)\n}\n- start, end := s.PortRange()\n+ const (\n+ start = 16000\n+ end = 16050\n+ )\n+ if err := s.SetPortRange(start, end); err != nil {\n+ t.Fatalf(\"got s.SetPortRange(%d, %d) = %s, want = nil\", start, end, err)\n+ }\nfor i := start; i <= end; i++ {\nif makeEP(exhaustedNetwork).Bind(tcpip.FullAddress{Addr: address(t, exhaustedAddressType, isAny), Port: uint16(i)}); err != nil {\nt.Fatalf(\"Bind(%d) failed: %s\", i, err)\n" } ]
Go
Apache License 2.0
google/gvisor
Reduce tcp_x_test runtime and memory usage Reduce the ephemeral port range, which decreases the calls to makeEP. PiperOrigin-RevId: 368748379
259,860
15.04.2021 17:23:47
25,200
82dc881dba7399afa4268c5e3a70624db0b1e7ee
Disable failing socket_ipv4_udp_unbound_loopback_test_linux tests.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_ipv4_udp_unbound.cc", "new_path": "test/syscalls/linux/socket_ipv4_udp_unbound.cc", "diff": "@@ -80,6 +80,8 @@ TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackNoGroup) {\n// Check that not setting a default send interface prevents multicast packets\n// from being sent. Group membership interface configured by address.\nTEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackAddrNoDefaultSendIf) {\n+ // TODO(b/185517803): Fix for native test.\n+ SKIP_IF(!IsRunningOnGvisor());\nauto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\nauto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n@@ -118,6 +120,8 @@ TEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackAddrNoDefaultSendIf) {\n// Check that not setting a default send interface prevents multicast packets\n// from being sent. Group membership interface configured by NIC ID.\nTEST_P(IPv4UDPUnboundSocketTest, IpMulticastLoopbackNicNoDefaultSendIf) {\n+ // TODO(b/185517803): Fix for native test.\n+ SKIP_IF(!IsRunningOnGvisor());\nauto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\nauto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n@@ -1151,6 +1155,8 @@ TEST_P(IPv4UDPUnboundSocketTest, IpMulticastIfSetNic) {\n}\nTEST_P(IPv4UDPUnboundSocketTest, TestJoinGroupNoIf) {\n+ // TODO(b/185517803): Fix for native test.\n+ SKIP_IF(!IsRunningOnGvisor());\nauto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\nauto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket());\n" } ]
Go
Apache License 2.0
google/gvisor
Disable failing socket_ipv4_udp_unbound_loopback_test_linux tests. PiperOrigin-RevId: 368749894
259,896
15.04.2021 20:01:04
25,200
14b7d775c950070378ea799a0b6b7907f67a1f1e
Add field support to the sentry metrics. Fields allow counter metrics to have multiple tabular values. At most one field is supported at the moment.
[ { "change_type": "MODIFY", "old_path": "pkg/metric/metric.go", "new_path": "pkg/metric/metric.go", "diff": "@@ -38,7 +38,7 @@ var (\n)\n// Uint64Metric encapsulates a uint64 that represents some kind of metric to be\n-// monitored.\n+// monitored. We currently support metrics with at most one field.\n//\n// Metrics are not saved across save/restore and thus reset to zero on restore.\n//\n@@ -46,6 +46,16 @@ var (\ntype Uint64Metric struct {\n// value is the actual value of the metric. It must be accessed atomically.\nvalue uint64\n+\n+ // numFields is the number of metric fields. It is immutable once\n+ // initialized.\n+ numFields int\n+\n+ // mu protects the below fields.\n+ mu sync.RWMutex `state:\"nosave\"`\n+\n+ // fields is the map of fields in the metric.\n+ fields map[string]uint64\n}\nvar (\n@@ -97,8 +107,19 @@ type customUint64Metric struct {\n// metadata describes the metric. It is immutable.\nmetadata *pb.MetricMetadata\n- // value returns the current value of the metric.\n- value func() uint64\n+ // value returns the current value of the metric for the given set of\n+ // fields. It takes a variadic number of field values as argument.\n+ value func(fieldValues ...string) uint64\n+}\n+\n+// Field contains the field name and allowed values for the metric which is\n+// used in registration of the metric.\n+type Field struct {\n+ // name is the metric field name.\n+ name string\n+\n+ // allowedValues is the list of allowed values for the field.\n+ allowedValues []string\n}\n// RegisterCustomUint64Metric registers a metric with the given name.\n@@ -109,7 +130,8 @@ type customUint64Metric struct {\n// Preconditions:\n// * name must be globally unique.\n// * Initialize/Disable have not been called.\n-func RegisterCustomUint64Metric(name string, cumulative, sync bool, units pb.MetricMetadata_Units, description string, value func() uint64) error {\n+// * value is expected to accept exactly len(fields) arguments.\n+func RegisterCustomUint64Metric(name string, cumulative, sync bool, units pb.MetricMetadata_Units, description string, value func(...string) uint64, fields ...Field) error {\nif initialized {\nreturn ErrInitializationDone\n}\n@@ -129,13 +151,25 @@ func RegisterCustomUint64Metric(name string, cumulative, sync bool, units pb.Met\n},\nvalue: value,\n}\n+\n+ // Metrics can exist without fields.\n+ if len(fields) > 1 {\n+ panic(\"Sentry metrics support at most one field\")\n+ }\n+\n+ for _, field := range fields {\n+ allMetrics.m[name].metadata.Fields = append(allMetrics.m[name].metadata.Fields, &pb.MetricMetadata_Field{\n+ FieldName: field.name,\n+ AllowedValues: field.allowedValues,\n+ })\n+ }\nreturn nil\n}\n-// MustRegisterCustomUint64Metric calls RegisterCustomUint64Metric and panics\n-// if it returns an error.\n-func MustRegisterCustomUint64Metric(name string, cumulative, sync bool, description string, value func() uint64) {\n- if err := RegisterCustomUint64Metric(name, cumulative, sync, pb.MetricMetadata_UNITS_NONE, description, value); err != nil {\n+// MustRegisterCustomUint64Metric calls RegisterCustomUint64Metric for metrics\n+// without fields and panics if it returns an error.\n+func MustRegisterCustomUint64Metric(name string, cumulative, sync bool, description string, value func(...string) uint64, fields ...Field) {\n+ if err := RegisterCustomUint64Metric(name, cumulative, sync, pb.MetricMetadata_UNITS_NONE, description, value, fields...); err != nil {\npanic(fmt.Sprintf(\"Unable to register metric %q: %v\", name, err))\n}\n}\n@@ -144,15 +178,24 @@ func MustRegisterCustomUint64Metric(name string, cumulative, sync bool, descript\n// name.\n//\n// Metrics must be statically defined (i.e., at init).\n-func NewUint64Metric(name string, sync bool, units pb.MetricMetadata_Units, description string) (*Uint64Metric, error) {\n- var m Uint64Metric\n- return &m, RegisterCustomUint64Metric(name, true /* cumulative */, sync, units, description, m.Value)\n+func NewUint64Metric(name string, sync bool, units pb.MetricMetadata_Units, description string, fields ...Field) (*Uint64Metric, error) {\n+ m := Uint64Metric{\n+ numFields: len(fields),\n+ }\n+\n+ if m.numFields == 1 {\n+ m.fields = make(map[string]uint64)\n+ for _, fieldValue := range fields[0].allowedValues {\n+ m.fields[fieldValue] = 0\n+ }\n+ }\n+ return &m, RegisterCustomUint64Metric(name, true /* cumulative */, sync, units, description, m.Value, fields...)\n}\n// MustCreateNewUint64Metric calls NewUint64Metric and panics if it returns an\n// error.\n-func MustCreateNewUint64Metric(name string, sync bool, description string) *Uint64Metric {\n- m, err := NewUint64Metric(name, sync, pb.MetricMetadata_UNITS_NONE, description)\n+func MustCreateNewUint64Metric(name string, sync bool, description string, fields ...Field) *Uint64Metric {\n+ m, err := NewUint64Metric(name, sync, pb.MetricMetadata_UNITS_NONE, description, fields...)\nif err != nil {\npanic(fmt.Sprintf(\"Unable to create metric %q: %v\", name, err))\n}\n@@ -169,19 +212,56 @@ func MustCreateNewUint64NanosecondsMetric(name string, sync bool, description st\nreturn m\n}\n-// Value returns the current value of the metric.\n-func (m *Uint64Metric) Value() uint64 {\n+// Value returns the current value of the metric for the given set of fields.\n+func (m *Uint64Metric) Value(fieldValues ...string) uint64 {\n+ if m.numFields != len(fieldValues) {\n+ panic(fmt.Sprintf(\"Number of fieldValues %d is not equal to the number of metric fields %d\", len(fieldValues), m.numFields))\n+ }\n+\n+ switch m.numFields {\n+ case 0:\nreturn atomic.LoadUint64(&m.value)\n+ case 1:\n+ m.mu.RLock()\n+ defer m.mu.RUnlock()\n+\n+ fieldValue := fieldValues[0]\n+ if _, ok := m.fields[fieldValue]; !ok {\n+ panic(fmt.Sprintf(\"Metric does not allow to have field value %s\", fieldValue))\n+ }\n+ return m.fields[fieldValue]\n+ default:\n+ panic(\"Sentry metrics do not support more than one field\")\n+ }\n}\n-// Increment increments the metric by 1.\n-func (m *Uint64Metric) Increment() {\n- atomic.AddUint64(&m.value, 1)\n+// Increment increments the metric field by 1.\n+func (m *Uint64Metric) Increment(fieldValues ...string) {\n+ m.IncrementBy(1, fieldValues...)\n}\n// IncrementBy increments the metric by v.\n-func (m *Uint64Metric) IncrementBy(v uint64) {\n+func (m *Uint64Metric) IncrementBy(v uint64, fieldValues ...string) {\n+ if m.numFields != len(fieldValues) {\n+ panic(fmt.Sprintf(\"Number of fieldValues %d is not equal to the number of metric fields %d\", len(fieldValues), m.numFields))\n+ }\n+\n+ switch m.numFields {\n+ case 0:\natomic.AddUint64(&m.value, v)\n+ return\n+ case 1:\n+ fieldValue := fieldValues[0]\n+ m.mu.Lock()\n+ defer m.mu.Unlock()\n+\n+ if _, ok := m.fields[fieldValue]; !ok {\n+ panic(fmt.Sprintf(\"Metric does not allow to have field value %s\", fieldValue))\n+ }\n+ m.fields[fieldValue] += v\n+ default:\n+ panic(\"Sentry metrics do not support more than one field\")\n+ }\n}\n// metricSet holds named metrics.\n@@ -199,14 +279,30 @@ func makeMetricSet() metricSet {\n// Values returns a snapshot of all values in m.\nfunc (m *metricSet) Values() metricValues {\nvals := make(metricValues)\n+\nfor k, v := range m.m {\n+ fields := v.metadata.GetFields()\n+ switch len(fields) {\n+ case 0:\nvals[k] = v.value()\n+ case 1:\n+ values := fields[0].GetAllowedValues()\n+ fieldsMap := make(map[string]uint64)\n+ for _, fieldValue := range values {\n+ fieldsMap[fieldValue] = v.value(fieldValue)\n+ }\n+ vals[k] = fieldsMap\n+ default:\n+ panic(fmt.Sprintf(\"Unsupported number of metric fields: %d\", len(fields)))\n+ }\n}\nreturn vals\n}\n-// metricValues contains a copy of the values of all metrics.\n-type metricValues map[string]uint64\n+// metricValues contains a copy of the values of all metrics. It is a map\n+// with key as metric name and value can be either uint64 or map[string]uint64\n+// to support metrics with one field.\n+type metricValues map[string]interface{}\nvar (\n// emitMu protects metricsAtLastEmit and ensures that all emitted\n@@ -233,14 +329,37 @@ func EmitMetricUpdate() {\nsnapshot := allMetrics.Values()\nm := pb.MetricUpdate{}\n+ // On the first call metricsAtLastEmit will be empty. Include all\n+ // metrics then.\nfor k, v := range snapshot {\n- // On the first call metricsAtLastEmit will be empty. Include\n- // all metrics then.\n- if prev, ok := metricsAtLastEmit[k]; !ok || prev != v {\n+ prev, ok := metricsAtLastEmit[k]\n+ switch t := v.(type) {\n+ case uint64:\n+ // Metric exists and value did not change.\n+ if ok && prev.(uint64) == t {\n+ continue\n+ }\n+\nm.Metrics = append(m.Metrics, &pb.MetricValue{\nName: k,\n- Value: &pb.MetricValue_Uint64Value{v},\n+ Value: &pb.MetricValue_Uint64Value{t},\n})\n+ case map[string]uint64:\n+ for fieldValue, metricValue := range t {\n+ // Emit data on the first call only if the field\n+ // value has been incremented. For all other\n+ // calls, emit data if the field value has been\n+ // changed from the previous emit.\n+ if (!ok && metricValue == 0) || (ok && prev.(map[string]uint64)[fieldValue] == metricValue) {\n+ continue\n+ }\n+\n+ m.Metrics = append(m.Metrics, &pb.MetricValue{\n+ Name: k,\n+ FieldValues: []string{fieldValue},\n+ Value: &pb.MetricValue_Uint64Value{metricValue},\n+ })\n+ }\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/metric/metric.proto", "new_path": "pkg/metric/metric.proto", "diff": "@@ -48,6 +48,15 @@ message MetricMetadata {\n// units is the units of the metric value.\nUnits units = 6;\n+\n+ message Field {\n+ string field_name = 1;\n+ repeated string allowed_values = 2;\n+ }\n+\n+ // fields contains the metric fields. Currently a metric can have at most\n+ // one field.\n+ repeated Field fields = 7;\n}\n// MetricRegistration contains the metadata for all metrics that will be in\n@@ -66,6 +75,8 @@ message MetricValue {\noneof value {\nuint64 uint64_value = 2;\n}\n+\n+ repeated string field_values = 4;\n}\n// MetricUpdate contains new values for multiple distinct metrics.\n" }, { "change_type": "MODIFY", "old_path": "pkg/metric/metric_test.go", "new_path": "pkg/metric/metric_test.go", "diff": "@@ -61,6 +61,7 @@ func reset() {\nconst (\nfooDescription = \"Foo!\"\nbarDescription = \"Bar Baz\"\n+ counterDescription = \"Counter\"\n)\nfunc TestInitialize(t *testing.T) {\n@@ -95,7 +96,7 @@ func TestInitialize(t *testing.T) {\nfoundBar := false\nfor _, m := range mr.Metrics {\nif m.Type != pb.MetricMetadata_TYPE_UINT64 {\n- t.Errorf(\"Metadata %+v Type got %v want %v\", m, m.Type, pb.MetricMetadata_TYPE_UINT64)\n+ t.Errorf(\"Metadata %+v Type got %v want pb.MetricMetadata_TYPE_UINT64\", m, m.Type)\n}\nif !m.Cumulative {\nt.Errorf(\"Metadata %+v Cumulative got false want true\", m)\n@@ -256,3 +257,88 @@ func TestEmitMetricUpdate(t *testing.T) {\nt.Errorf(\"%v: Value got %v want 1\", m, uv.Uint64Value)\n}\n}\n+\n+func TestEmitMetricUpdateWithFields(t *testing.T) {\n+ defer reset()\n+\n+ field := Field{\n+ name: \"weirdness_type\",\n+ allowedValues: []string{\"weird1\", \"weird2\"}}\n+\n+ counter, err := NewUint64Metric(\"/weirdness\", false, pb.MetricMetadata_UNITS_NONE, counterDescription, field)\n+ if err != nil {\n+ t.Fatalf(\"NewUint64Metric got err %v want nil\", err)\n+ }\n+\n+ Initialize()\n+\n+ // Don't care about the registration metrics.\n+ emitter.Reset()\n+ EmitMetricUpdate()\n+\n+ // For metrics with fields, we do not emit data unless the value is\n+ // incremented.\n+ if len(emitter) != 0 {\n+ t.Fatalf(\"EmitMetricUpdate emitted %d events want 0\", len(emitter))\n+ }\n+\n+ counter.IncrementBy(4, \"weird1\")\n+ counter.Increment(\"weird2\")\n+\n+ emitter.Reset()\n+ EmitMetricUpdate()\n+\n+ if len(emitter) != 1 {\n+ t.Fatalf(\"EmitMetricUpdate emitted %d events want 1\", len(emitter))\n+ }\n+\n+ update, ok := emitter[0].(*pb.MetricUpdate)\n+ if !ok {\n+ t.Fatalf(\"emitter %v got %T want pb.MetricUpdate\", emitter[0], emitter[0])\n+ }\n+\n+ if len(update.Metrics) != 2 {\n+ t.Errorf(\"MetricUpdate got %d metrics want 2\", len(update.Metrics))\n+ }\n+\n+ foundWeird1 := false\n+ foundWeird2 := false\n+ for i := 0; i < len(update.Metrics); i++ {\n+ m := update.Metrics[i]\n+\n+ if m.Name != \"/weirdness\" {\n+ t.Errorf(\"Metric %+v name got %q want '/weirdness'\", m, m.Name)\n+ }\n+ if len(m.FieldValues) != 1 {\n+ t.Errorf(\"MetricUpdate got %d fields want 1\", len(m.FieldValues))\n+ }\n+\n+ switch m.FieldValues[0] {\n+ case \"weird1\":\n+ uv, ok := m.Value.(*pb.MetricValue_Uint64Value)\n+ if !ok {\n+ t.Errorf(\"%+v: value %v got %T want pb.MetricValue_Uint64Value\", m, m.Value, m.Value)\n+ }\n+ if uv.Uint64Value != 4 {\n+ t.Errorf(\"%v: Value got %v want 4\", m, uv.Uint64Value)\n+ }\n+ foundWeird1 = true\n+ case \"weird2\":\n+ uv, ok := m.Value.(*pb.MetricValue_Uint64Value)\n+ if !ok {\n+ t.Errorf(\"%+v: value %v got %T want pb.MetricValue_Uint64Value\", m, m.Value, m.Value)\n+ }\n+ if uv.Uint64Value != 1 {\n+ t.Errorf(\"%v: Value got %v want 1\", m, uv.Uint64Value)\n+ }\n+ foundWeird2 = true\n+ }\n+ }\n+\n+ if !foundWeird1 {\n+ t.Errorf(\"Field value weird1 not found: %+v\", emitter)\n+ }\n+ if !foundWeird2 {\n+ t.Errorf(\"Field value weird2 not found: %+v\", emitter)\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/error.go", "new_path": "pkg/sentry/syscalls/linux/error.go", "diff": "@@ -33,6 +33,14 @@ var (\npartialResultOnce sync.Once\n)\n+// incrementPartialResultMetric increments PartialResultMetric by calling\n+// Increment(). This is added as the func Do() which is called below requires\n+// us to pass a function which does not take any arguments, whereas Increment()\n+// takes a variadic number of arguments.\n+func incrementPartialResultMetric() {\n+ partialResultMetric.Increment()\n+}\n+\n// HandleIOErrorVFS2 handles special error cases for partial results. For some\n// errors, we may consume the error and return only the partial read/write.\n//\n@@ -48,7 +56,7 @@ func HandleIOErrorVFS2(ctx context.Context, partialResult bool, ioerr, intr erro\nroot := vfs.RootFromContext(ctx)\nname, _ := fs.PathnameWithDeleted(ctx, root, f.VirtualDentry())\nlog.Traceback(\"Invalid request partialResult %v and err (type %T) %v for %s operation on %q\", partialResult, ioerr, ioerr, op, name)\n- partialResultOnce.Do(partialResultMetric.Increment)\n+ partialResultOnce.Do(incrementPartialResultMetric)\n}\nreturn nil\n}\n@@ -66,7 +74,7 @@ func handleIOError(ctx context.Context, partialResult bool, ioerr, intr error, o\n// An unknown error is encountered with a partial read/write.\nname, _ := f.Dirent.FullName(nil /* ignore chroot */)\nlog.Traceback(\"Invalid request partialResult %v and err (type %T) %v for %s operation on %q, %T\", partialResult, ioerr, ioerr, op, name, f.FileOperations)\n- partialResultOnce.Do(partialResultMetric.Increment)\n+ partialResultOnce.Do(incrementPartialResultMetric)\n}\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tcpip.go", "new_path": "pkg/tcpip/tcpip.go", "diff": "@@ -1212,7 +1212,7 @@ func (s *StatCounter) Decrement() {\n}\n// Value returns the current value of the counter.\n-func (s *StatCounter) Value() uint64 {\n+func (s *StatCounter) Value(name ...string) uint64 {\nreturn atomic.LoadUint64(&s.count)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Add field support to the sentry metrics. Fields allow counter metrics to have multiple tabular values. At most one field is supported at the moment. PiperOrigin-RevId: 368767040
260,001
15.04.2021 22:10:17
25,200
c980fe573d7a3488dc27c58f84aecf9ae1814f49
Add verity ioctl test for mount with root hash
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/verity_ioctl.cc", "new_path": "test/syscalls/linux/verity_ioctl.cc", "diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n+#include <stdint.h>\n#include <sys/mount.h>\n+#include <iomanip>\n+#include <sstream>\n+\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"test/util/capability_util.h\"\n@@ -49,8 +53,9 @@ struct fsverity_digest {\n__u8 digest[];\n};\n-const int fsverity_max_digest_size = 64;\n-const int fsverity_default_digest_size = 32;\n+constexpr int kMaxDigestSize = 64;\n+constexpr int kDefaultDigestSize = 32;\n+constexpr char kContents[] = \"foobarbaz\";\nclass IoctlTest : public ::testing::Test {\nprotected:\n@@ -65,7 +70,6 @@ class IoctlTest : public ::testing::Test {\nSyscallSucceeds());\n// Create a new file in the tmpfs mount.\n- constexpr char kContents[] = \"foobarbaz\";\nfile_ = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateFileWith(tmpfs_dir_.path(), kContents, 0777));\nfilename_ = Basename(file_.path());\n@@ -76,17 +80,26 @@ class IoctlTest : public ::testing::Test {\nstd::string filename_;\n};\n+// Provide a function to convert bytes to hex string, since\n+// absl::BytesToHexString does not seem to be compatible with golang\n+// hex.DecodeString used in verity due to zero-padding.\n+std::string BytesToHexString(uint8_t bytes[], int size) {\n+ std::stringstream ss;\n+ ss << std::hex;\n+ for (int i = 0; i < size; ++i) {\n+ ss << std::setw(2) << std::setfill('0') << static_cast<int>(bytes[i]);\n+ }\n+ return ss.str();\n+}\n+\nTEST_F(IoctlTest, Enable) {\n- // mount a verity fs on the existing tmpfs mount.\n+ // Mount a verity fs on the existing tmpfs mount.\nstd::string mount_opts = \"lower_path=\" + tmpfs_dir_.path();\nauto const verity_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\nASSERT_THAT(\nmount(\"\", verity_dir.path().c_str(), \"verity\", 0, mount_opts.c_str()),\nSyscallSucceeds());\n- printf(\"verity path: %s, filename: %s\\n\", verity_dir.path().c_str(),\n- filename_.c_str());\n- fflush(nullptr);\n// Confirm that the verity flag is absent.\nint flag = 0;\nauto const fd = ASSERT_NO_ERRNO_AND_VALUE(\n@@ -101,7 +114,7 @@ TEST_F(IoctlTest, Enable) {\n}\nTEST_F(IoctlTest, Measure) {\n- // mount a verity fs on the existing tmpfs mount.\n+ // Mount a verity fs on the existing tmpfs mount.\nstd::string mount_opts = \"lower_path=\" + tmpfs_dir_.path();\nauto const verity_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\nASSERT_THAT(\n@@ -111,11 +124,10 @@ TEST_F(IoctlTest, Measure) {\n// Confirm that the file cannot be measured.\nauto const fd = ASSERT_NO_ERRNO_AND_VALUE(\nOpen(JoinPath(verity_dir.path(), filename_), O_RDONLY, 0777));\n- int digest_size = sizeof(struct fsverity_digest) + fsverity_max_digest_size;\n+ uint8_t digest_array[sizeof(struct fsverity_digest) + kMaxDigestSize] = {0};\nstruct fsverity_digest* digest =\n- reinterpret_cast<struct fsverity_digest *>(malloc(digest_size));\n- memset(digest, 0, digest_size);\n- digest->digest_size = fsverity_max_digest_size;\n+ reinterpret_cast<struct fsverity_digest*>(digest_array);\n+ digest->digest_size = kMaxDigestSize;\nASSERT_THAT(ioctl(fd.get(), FS_IOC_MEASURE_VERITY, digest),\nSyscallFailsWithErrno(ENODATA));\n@@ -123,8 +135,51 @@ TEST_F(IoctlTest, Measure) {\nASSERT_THAT(ioctl(fd.get(), FS_IOC_ENABLE_VERITY), SyscallSucceeds());\nASSERT_THAT(ioctl(fd.get(), FS_IOC_MEASURE_VERITY, digest),\nSyscallSucceeds());\n- EXPECT_EQ(digest->digest_size, fsverity_default_digest_size);\n- free(digest);\n+ EXPECT_EQ(digest->digest_size, kDefaultDigestSize);\n+}\n+\n+TEST_F(IoctlTest, Mount) {\n+ // Mount a verity fs on the existing tmpfs mount.\n+ std::string mount_opts = \"lower_path=\" + tmpfs_dir_.path();\n+ auto verity_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ ASSERT_THAT(\n+ mount(\"\", verity_dir.path().c_str(), \"verity\", 0, mount_opts.c_str()),\n+ SyscallSucceeds());\n+\n+ // Enable both the file and the directory.\n+ auto const fd = ASSERT_NO_ERRNO_AND_VALUE(\n+ Open(JoinPath(verity_dir.path(), filename_), O_RDONLY, 0777));\n+ ASSERT_THAT(ioctl(fd.get(), FS_IOC_ENABLE_VERITY), SyscallSucceeds());\n+ auto const dir_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(verity_dir.path(), O_RDONLY, 0777));\n+ ASSERT_THAT(ioctl(dir_fd.get(), FS_IOC_ENABLE_VERITY), SyscallSucceeds());\n+\n+ // Measure the root hash.\n+ uint8_t digest_array[sizeof(struct fsverity_digest) + kMaxDigestSize] = {0};\n+ struct fsverity_digest* digest =\n+ reinterpret_cast<struct fsverity_digest*>(digest_array);\n+ digest->digest_size = kMaxDigestSize;\n+ ASSERT_THAT(ioctl(dir_fd.get(), FS_IOC_MEASURE_VERITY, digest),\n+ SyscallSucceeds());\n+\n+ // Mount a verity fs with specified root hash.\n+ mount_opts +=\n+ \",root_hash=\" + BytesToHexString(digest->digest, digest->digest_size);\n+ auto verity_with_hash_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ ASSERT_THAT(mount(\"\", verity_with_hash_dir.path().c_str(), \"verity\", 0,\n+ mount_opts.c_str()),\n+ SyscallSucceeds());\n+\n+ // Make sure the file can be open and read in the mounted verity fs.\n+ auto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(\n+ Open(JoinPath(verity_with_hash_dir.path(), filename_), O_RDONLY, 0777));\n+ char buf[16];\n+ EXPECT_THAT(ReadFd(fd.get(), buf, sizeof(kContents)), SyscallSucceeds());\n+\n+ // Verity directories should not be deleted. Release the TempPath objects to\n+ // prevent those directories from being deleted by the destructor.\n+ verity_dir.release();\n+ verity_with_hash_dir.release();\n}\n} // namespace\n" } ]
Go
Apache License 2.0
google/gvisor
Add verity ioctl test for mount with root hash PiperOrigin-RevId: 368779532
259,860
16.04.2021 14:26:49
25,200
81ff6bd9213ebb6752fae97fcbc6037323fa6811
Use size_t instead of C integer types.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/proc_net.cc", "new_path": "test/syscalls/linux/proc_net.cc", "diff": "@@ -419,14 +419,14 @@ TEST(ProcNetSnmp, CheckNetStat) {\nint name_count = 0;\nint value_count = 0;\nstd::vector<absl::string_view> lines = absl::StrSplit(contents, '\\n');\n- for (long unsigned int i = 0; i + 1 < lines.size(); i += 2) {\n+ for (size_t i = 0; i + 1 < lines.size(); i += 2) {\nstd::vector<absl::string_view> names =\nabsl::StrSplit(lines[i], absl::ByAnyChar(\"\\t \"));\nstd::vector<absl::string_view> values =\nabsl::StrSplit(lines[i + 1], absl::ByAnyChar(\"\\t \"));\nEXPECT_EQ(names.size(), values.size()) << \" mismatch in lines '\" << lines[i]\n<< \"' and '\" << lines[i + 1] << \"'\";\n- for (long unsigned int j = 0; j < names.size() && j < values.size(); ++j) {\n+ for (size_t j = 0; j < names.size() && j < values.size(); ++j) {\nif (names[j] == \"TCPOrigDataSent\" || names[j] == \"TCPSynRetrans\" ||\nnames[j] == \"TCPDSACKRecv\" || names[j] == \"TCPDSACKOfoRecv\") {\n++name_count;\n@@ -456,14 +456,14 @@ TEST(ProcNetSnmp, CheckSnmp) {\nint name_count = 0;\nint value_count = 0;\nstd::vector<absl::string_view> lines = absl::StrSplit(contents, '\\n');\n- for (long unsigned int i = 0; i + 1 < lines.size(); i += 2) {\n+ for (size_t i = 0; i + 1 < lines.size(); i += 2) {\nstd::vector<absl::string_view> names =\nabsl::StrSplit(lines[i], absl::ByAnyChar(\"\\t \"));\nstd::vector<absl::string_view> values =\nabsl::StrSplit(lines[i + 1], absl::ByAnyChar(\"\\t \"));\nEXPECT_EQ(names.size(), values.size()) << \" mismatch in lines '\" << lines[i]\n<< \"' and '\" << lines[i + 1] << \"'\";\n- for (long unsigned int j = 0; j < names.size() && j < values.size(); ++j) {\n+ for (size_t j = 0; j < names.size() && j < values.size(); ++j) {\nif (names[j] == \"RetransSegs\") {\n++name_count;\nint64_t val;\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/proc_net_unix.cc", "new_path": "test/syscalls/linux/proc_net_unix.cc", "diff": "@@ -182,7 +182,7 @@ PosixErrorOr<std::vector<UnixEntry>> ProcNetUnixEntries() {\n// Returns true on match, and sets 'match' to point to the matching entry.\nbool FindBy(std::vector<UnixEntry> entries, UnixEntry* match,\nstd::function<bool(const UnixEntry&)> predicate) {\n- for (long unsigned int i = 0; i < entries.size(); ++i) {\n+ for (size_t i = 0; i < entries.size(); ++i) {\nif (predicate(entries[i])) {\n*match = entries[i];\nreturn true;\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/proc_pid_uid_gid_map.cc", "new_path": "test/syscalls/linux/proc_pid_uid_gid_map.cc", "diff": "@@ -203,8 +203,9 @@ TEST_P(ProcSelfUidGidMapTest, IdentityMapOwnID) {\nEXPECT_THAT(\nInNewUserNamespaceWithMapFD([&](int fd) {\nDenySelfSetgroups();\n- TEST_PCHECK(static_cast<long unsigned int>(\n- write(fd, line.c_str(), line.size())) == line.size());\n+ size_t n;\n+ TEST_PCHECK((n = write(fd, line.c_str(), line.size())) != -1);\n+ TEST_CHECK(n == line.size());\n}),\nIsPosixErrorOkAndHolds(0));\n}\n@@ -221,8 +222,9 @@ TEST_P(ProcSelfUidGidMapTest, TrailingNewlineAndNULIgnored) {\nDenySelfSetgroups();\n// The write should return the full size of the write, even though\n// characters after the NUL were ignored.\n- TEST_PCHECK(static_cast<long unsigned int>(\n- write(fd, line.c_str(), line.size())) == line.size());\n+ size_t n;\n+ TEST_PCHECK((n = write(fd, line.c_str(), line.size())) != -1);\n+ TEST_CHECK(n == line.size());\n}),\nIsPosixErrorOkAndHolds(0));\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket.cc", "new_path": "test/syscalls/linux/socket.cc", "diff": "@@ -47,7 +47,7 @@ TEST(SocketTest, ProtocolUnix) {\n{AF_UNIX, SOCK_SEQPACKET, PF_UNIX},\n{AF_UNIX, SOCK_DGRAM, PF_UNIX},\n};\n- for (long unsigned int i = 0; i < ABSL_ARRAYSIZE(tests); i++) {\n+ for (size_t i = 0; i < ABSL_ARRAYSIZE(tests); i++) {\nASSERT_NO_ERRNO_AND_VALUE(\nSocket(tests[i].domain, tests[i].type, tests[i].protocol));\n}\n@@ -60,7 +60,7 @@ TEST(SocketTest, ProtocolInet) {\n{AF_INET, SOCK_DGRAM, IPPROTO_UDP},\n{AF_INET, SOCK_STREAM, IPPROTO_TCP},\n};\n- for (long unsigned int i = 0; i < ABSL_ARRAYSIZE(tests); i++) {\n+ for (size_t i = 0; i < ABSL_ARRAYSIZE(tests); i++) {\nASSERT_NO_ERRNO_AND_VALUE(\nSocket(tests[i].domain, tests[i].type, tests[i].protocol));\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_bind_to_device_distribution.cc", "new_path": "test/syscalls/linux/socket_bind_to_device_distribution.cc", "diff": "@@ -166,7 +166,7 @@ TEST_P(BindToDeviceDistributionTest, Tcp) {\nstd::vector<std::unique_ptr<ScopedThread>> listen_threads(\nlistener_fds.size());\n- for (long unsigned int i = 0; i < listener_fds.size(); i++) {\n+ for (size_t i = 0; i < listener_fds.size(); i++) {\nlisten_threads[i] = absl::make_unique<ScopedThread>(\n[&listener_fds, &accept_counts, &connects_received, i,\nkConnectAttempts]() {\n@@ -218,7 +218,7 @@ TEST_P(BindToDeviceDistributionTest, Tcp) {\nlisten_thread->Join();\n}\n// Check that connections are distributed correctly among listening sockets.\n- for (long unsigned int i = 0; i < accept_counts.size(); i++) {\n+ for (size_t i = 0; i < accept_counts.size(); i++) {\nEXPECT_THAT(\naccept_counts[i],\nEquivalentWithin(static_cast<int>(kConnectAttempts *\n@@ -289,7 +289,7 @@ TEST_P(BindToDeviceDistributionTest, Udp) {\nstd::vector<std::unique_ptr<ScopedThread>> receiver_threads(\nlistener_fds.size());\n- for (long unsigned int i = 0; i < listener_fds.size(); i++) {\n+ for (size_t i = 0; i < listener_fds.size(); i++) {\nreceiver_threads[i] = absl::make_unique<ScopedThread>(\n[&listener_fds, &packets_per_socket, &packets_received, i]() {\ndo {\n@@ -346,7 +346,7 @@ TEST_P(BindToDeviceDistributionTest, Udp) {\nreceiver_thread->Join();\n}\n// Check that packets are distributed correctly among listening sockets.\n- for (long unsigned int i = 0; i < packets_per_socket.size(); i++) {\n+ for (size_t i = 0; i < packets_per_socket.size(); i++) {\nEXPECT_THAT(\npackets_per_socket[i],\nEquivalentWithin(static_cast<int>(kConnectAttempts *\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_ipv4_udp_unbound_netlink.cc", "new_path": "test/syscalls/linux/socket_ipv4_udp_unbound_netlink.cc", "diff": "@@ -171,7 +171,7 @@ TEST_P(IPv4UDPUnboundSocketNetlinkTest, ReuseAddrSubnetDirectedBroadcast) {\n// Broadcasts from each socket should be received by every socket (including\n// the sending socket).\n- for (long unsigned int w = 0; w < socks.size(); w++) {\n+ for (size_t w = 0; w < socks.size(); w++) {\nauto& w_sock = socks[w];\nASSERT_THAT(RetryEINTR(sendto)(w_sock->get(), send_buf, kSendBufSize, 0,\nAsSockAddr(&broadcast_address.addr),\n@@ -180,7 +180,7 @@ TEST_P(IPv4UDPUnboundSocketNetlinkTest, ReuseAddrSubnetDirectedBroadcast) {\n<< \"write socks[\" << w << \"]\";\n// Check that we received the packet on all sockets.\n- for (long unsigned int r = 0; r < socks.size(); r++) {\n+ for (size_t r = 0; r < socks.size(); r++) {\nauto& r_sock = socks[r];\nstruct pollfd poll_fd = {r_sock->get(), POLLIN, 0};\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/tuntap.cc", "new_path": "test/syscalls/linux/tuntap.cc", "diff": "@@ -349,9 +349,8 @@ TEST_F(TuntapTest, PingKernel) {\n};\nwhile (1) {\ninpkt r = {};\n- int nread = read(fd.get(), &r, sizeof(r));\n- EXPECT_THAT(nread, SyscallSucceeds());\n- long unsigned int n = static_cast<long unsigned int>(nread);\n+ size_t n;\n+ EXPECT_THAT(n = read(fd.get(), &r, sizeof(r)), SyscallSucceeds());\nif (n < sizeof(pihdr)) {\nstd::cerr << \"Ignored packet, protocol: \" << r.pi.pi_protocol\n@@ -408,9 +407,8 @@ TEST_F(TuntapTest, SendUdpTriggersArpResolution) {\n};\nwhile (1) {\ninpkt r = {};\n- int nread = read(fd.get(), &r, sizeof(r));\n- EXPECT_THAT(nread, SyscallSucceeds());\n- long unsigned int n = static_cast<long unsigned int>(nread);\n+ size_t n;\n+ EXPECT_THAT(n = read(fd.get(), &r, sizeof(r)), SyscallSucceeds());\nif (n < sizeof(pihdr)) {\nstd::cerr << \"Ignored packet, protocol: \" << r.pi.pi_protocol\n" } ]
Go
Apache License 2.0
google/gvisor
Use size_t instead of C integer types. PiperOrigin-RevId: 368919557
259,898
16.04.2021 16:22:09
25,200
6241f89f49820c193213b2d395bb09030409166d
Include logs for packetimpact tests that are expected to fail
[ { "change_type": "MODIFY", "old_path": "test/packetimpact/runner/dut.go", "new_path": "test/packetimpact/runner/dut.go", "diff": "@@ -369,7 +369,6 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co\n\"--dut_infos_json\", string(dutInfosBytes),\n)\ntestbenchLogs, err := testbenchContainer.Exec(ctx, dockerutil.ExecOpts{}, testArgs...)\n- if (err != nil) != expectFailure {\nvar dutLogs string\nfor i, dut := range duts {\nlogs, err := dut.Logs(ctx)\n@@ -384,15 +383,18 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co\n`, dutLogs, i, logs, i)\n}\n-\n- t.Errorf(`test error: %v, expect failure: %t\n-\n+ testLogs := fmt.Sprintf(`\n%s====== Begin of Testbench Logs ======\n%s\n-====== End of Testbench Logs ======`,\n- err, expectFailure, dutLogs, testbenchLogs)\n+====== End of Testbench Logs ======`, dutLogs, testbenchLogs)\n+ if (err != nil) != expectFailure {\n+ t.Errorf(`test error: %v, expect failure: %t\n+%s`, err, expectFailure, testLogs)\n+ } else if expectFailure {\n+ t.Logf(`test failed as expected: %v\n+%s`, err, testLogs)\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Include logs for packetimpact tests that are expected to fail PiperOrigin-RevId: 368938936
259,891
16.04.2021 16:26:31
25,200
32c18f443f567dac21465b3999d1a18b886891d1
Enlarge port range and fix integer overflow Also count failed TCP port allocations
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netstack/netstack.go", "new_path": "pkg/sentry/socket/netstack/netstack.go", "diff": "@@ -242,6 +242,7 @@ var Metrics = tcpip.Stats{\nFastRetransmit: mustCreateMetric(\"/netstack/tcp/fast_retransmit\", \"Number of TCP segments which were fast retransmitted.\"),\nTimeouts: mustCreateMetric(\"/netstack/tcp/timeouts\", \"Number of times RTO expired.\"),\nChecksumErrors: mustCreateMetric(\"/netstack/tcp/checksum_errors\", \"Number of segments dropped due to bad checksums.\"),\n+ FailedPortReservations: mustCreateMetric(\"/netstack/tcp/failed_port_reservations\", \"Number of time TCP failed to reserve a port.\"),\n},\nUDP: tcpip.UDPStats{\nPacketsReceived: mustCreateMetric(\"/netstack/udp/packets_received\", \"Number of UDP datagrams received via HandlePacket.\"),\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/ports/ports.go", "new_path": "pkg/tcpip/ports/ports.go", "diff": "package ports\nimport (\n+ \"math\"\n\"math/rand\"\n\"sync/atomic\"\n@@ -24,7 +25,10 @@ import (\n\"gvisor.dev/gvisor/pkg/tcpip\"\n)\n-const anyIPAddress tcpip.Address = \"\"\n+const (\n+ firstEphemeral = 16000\n+ anyIPAddress tcpip.Address = \"\"\n+)\n// Reservation describes a port reservation.\ntype Reservation struct {\n@@ -220,10 +224,8 @@ type PortManager struct {\nfunc NewPortManager() *PortManager {\nreturn &PortManager{\nallocatedPorts: make(map[portDescriptor]addrToDevice),\n- // Match Linux's default ephemeral range. See:\n- // https://github.com/torvalds/linux/blob/e54937963fa249595824439dc839c948188dea83/net/ipv4/af_inet.c#L1842\n- firstEphemeral: 32768,\n- numEphemeral: 28232,\n+ firstEphemeral: firstEphemeral,\n+ numEphemeral: math.MaxUint16 - firstEphemeral + 1,\n}\n}\n@@ -242,13 +244,13 @@ func (pm *PortManager) PickEphemeralPort(testPort PortTester) (port uint16, err\nnumEphemeral := pm.numEphemeral\npm.ephemeralMu.RUnlock()\n- offset := uint16(rand.Int31n(int32(numEphemeral)))\n+ offset := uint32(rand.Int31n(int32(numEphemeral)))\nreturn pickEphemeralPort(offset, firstEphemeral, numEphemeral, testPort)\n}\n// portHint atomically reads and returns the pm.hint value.\n-func (pm *PortManager) portHint() uint16 {\n- return uint16(atomic.LoadUint32(&pm.hint))\n+func (pm *PortManager) portHint() uint32 {\n+ return atomic.LoadUint32(&pm.hint)\n}\n// incPortHint atomically increments pm.hint by 1.\n@@ -260,7 +262,7 @@ func (pm *PortManager) incPortHint() {\n// iterates over all ephemeral ports, allowing the caller to decide whether a\n// given port is suitable for its needs and stopping when a port is found or an\n// error occurs.\n-func (pm *PortManager) PickEphemeralPortStable(offset uint16, testPort PortTester) (port uint16, err tcpip.Error) {\n+func (pm *PortManager) PickEphemeralPortStable(offset uint32, testPort PortTester) (port uint16, err tcpip.Error) {\npm.ephemeralMu.RLock()\nfirstEphemeral := pm.firstEphemeral\nnumEphemeral := pm.numEphemeral\n@@ -277,9 +279,9 @@ func (pm *PortManager) PickEphemeralPortStable(offset uint16, testPort PortTeste\n// and iterates over the number of ports specified by count and allows the\n// caller to decide whether a given port is suitable for its needs, and stopping\n// when a port is found or an error occurs.\n-func pickEphemeralPort(offset, first, count uint16, testPort PortTester) (port uint16, err tcpip.Error) {\n- for i := uint16(0); i < count; i++ {\n- port = first + (offset+i)%count\n+func pickEphemeralPort(offset uint32, first, count uint16, testPort PortTester) (port uint16, err tcpip.Error) {\n+ for i := uint32(0); i < uint32(count); i++ {\n+ port := uint16(uint32(first) + (offset+i)%uint32(count))\nok, err := testPort(port)\nif err != nil {\nreturn 0, err\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/ports/ports_test.go", "new_path": "pkg/tcpip/ports/ports_test.go", "diff": "package ports\nimport (\n+ \"math\"\n\"math/rand\"\n\"testing\"\n@@ -482,7 +483,7 @@ func TestPickEphemeralPortStable(t *testing.T) {\nif err := pm.SetPortRange(firstEphemeral, firstEphemeral+numEphemeralPorts); err != nil {\nt.Fatalf(\"failed to set ephemeral port range: %s\", err)\n}\n- portOffset := uint16(rand.Int31n(int32(numEphemeralPorts)))\n+ portOffset := uint32(rand.Int31n(int32(numEphemeralPorts)))\nport, err := pm.PickEphemeralPortStable(portOffset, test.f)\nif diff := cmp.Diff(test.wantErr, err); diff != \"\" {\nt.Fatalf(\"unexpected error from PickEphemeralPort(..), (-want, +got):\\n%s\", diff)\n@@ -493,3 +494,29 @@ func TestPickEphemeralPortStable(t *testing.T) {\n})\n}\n}\n+\n+// TestOverflow addresses b/183593432, wherein an overflowing uint16 causes a\n+// port allocation failure.\n+func TestOverflow(t *testing.T) {\n+ // Use a small range and start at offsets that will cause an overflow.\n+ count := uint16(50)\n+ for offset := uint32(math.MaxUint16 - count); offset < math.MaxUint16; offset++ {\n+ reservedPorts := make(map[uint16]struct{})\n+ // Ensure we can reserve everything in the allowed range.\n+ for i := uint16(0); i < count; i++ {\n+ port, err := pickEphemeralPort(offset, firstEphemeral, count, func(port uint16) (bool, tcpip.Error) {\n+ if _, ok := reservedPorts[port]; !ok {\n+ reservedPorts[port] = struct{}{}\n+ return true, nil\n+ }\n+ return false, nil\n+ })\n+ if err != nil {\n+ t.Fatalf(\"port picking failed at iteration %d, for offset %d, len(reserved): %+v\", i, offset, len(reservedPorts))\n+ }\n+ if port < firstEphemeral || port > firstEphemeral+count {\n+ t.Fatalf(\"reserved port %d, which is not in range [%d, %d]\", port, firstEphemeral, firstEphemeral+count-1)\n+ }\n+ }\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tcpip.go", "new_path": "pkg/tcpip/tcpip.go", "diff": "@@ -1732,6 +1732,10 @@ type TCPStats struct {\n// ChecksumErrors is the number of segments dropped due to bad checksums.\nChecksumErrors *StatCounter\n+\n+ // FailedPortReservations is the number of times TCP failed to reserve\n+ // a port.\n+ FailedPortReservations *StatCounter\n}\n// UDPStats collects UDP-specific stats.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/accept.go", "new_path": "pkg/tcpip/transport/tcp/accept.go", "diff": "@@ -455,6 +455,7 @@ func (e *endpoint) reserveTupleLocked() bool {\nDest: dest,\n}\nif !e.stack.ReserveTuple(portRes) {\n+ e.stack.Stats().TCP.FailedPortReservations.Increment()\nreturn false\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -2251,7 +2251,7 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) tcp\npanic(err)\n}\n}\n- portOffset := uint16(h.Sum32())\n+ portOffset := h.Sum32()\nvar twReuse tcpip.TCPTimeWaitReuseOption\nif err := e.stack.TransportProtocolOption(ProtocolNumber, &twReuse); err != nil {\n@@ -2362,6 +2362,7 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) tcp\ne.boundDest = addr\nreturn true, nil\n}); err != nil {\n+ e.stack.Stats().TCP.FailedPortReservations.Increment()\nreturn err\n}\n}\n@@ -2685,6 +2686,7 @@ func (e *endpoint) bindLocked(addr tcpip.FullAddress) (err tcpip.Error) {\nreturn true, nil\n})\nif err != nil {\n+ e.stack.Stats().TCP.FailedPortReservations.Increment()\nreturn err\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Enlarge port range and fix integer overflow Also count failed TCP port allocations PiperOrigin-RevId: 368939619
259,853
19.04.2021 12:59:32
25,200
b0333d33a206b3a3cf3bcc90e44793708ed5cb7a
Optimize safemem.Zero There is a loop that fills a byte array with zero-s. Let's use copy() instead of setting elements one by one. The new implementation is two time faster than the previous one and it is more than 10x faster with the race detector. Reported-by:
[ { "change_type": "MODIFY", "old_path": "pkg/safemem/BUILD", "new_path": "pkg/safemem/BUILD", "diff": "@@ -14,6 +14,7 @@ go_library(\ndeps = [\n\"//pkg/gohacks\",\n\"//pkg/safecopy\",\n+ \"//pkg/sync\",\n\"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "pkg/safemem/block_unsafe.go", "new_path": "pkg/safemem/block_unsafe.go", "diff": "@@ -20,6 +20,7 @@ import (\n\"gvisor.dev/gvisor/pkg/gohacks\"\n\"gvisor.dev/gvisor/pkg/safecopy\"\n+ \"gvisor.dev/gvisor/pkg/sync\"\n)\n// A Block is a range of contiguous bytes, similar to []byte but with the\n@@ -223,9 +224,23 @@ func Copy(dst, src Block) (int, error) {\nfunc Zero(dst Block) (int, error) {\nif !dst.needSafecopy {\nbs := dst.ToSlice()\n+ if !sync.RaceEnabled {\n+ // If the race detector isn't enabled, the golang\n+ // compiler replaces the next loop with memclr\n+ // (https://github.com/golang/go/issues/5373).\nfor i := range bs {\nbs[i] = 0\n}\n+ } else {\n+ bsLen := len(bs)\n+ if bsLen == 0 {\n+ return 0, nil\n+ }\n+ bs[0] = 0\n+ for i := 1; i < bsLen; i *= 2 {\n+ copy(bs[i:], bs[:i])\n+ }\n+ }\nreturn len(bs), nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Optimize safemem.Zero There is a loop that fills a byte array with zero-s. Let's use copy() instead of setting elements one by one. The new implementation is two time faster than the previous one and it is more than 10x faster with the race detector. Reported-by: [email protected] PiperOrigin-RevId: 369283919
260,001
19.04.2021 18:48:47
25,200
82eecd2e93a4d2c42ca2b0b88f721f186c8712d9
Change verity action to be a fs member Currently the verity action is a global variable, which causes the same action for all verity mounts, and is overwritten for each new verity mount. Changed it to a member of verity fs.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/verity/filesystem.go", "new_path": "pkg/sentry/fsimpl/verity/filesystem.go", "diff": "@@ -200,7 +200,7 @@ func (fs *filesystem) verifyChildLocked(ctx context.Context, parent *dentry, chi\n// contains the expected xattrs. If the file or the xattr does not\n// exist, it indicates unexpected modifications to the file system.\nif err == syserror.ENOENT || err == syserror.ENODATA {\n- return nil, alertIntegrityViolation(fmt.Sprintf(\"Failed to get xattr %s for %s: %v\", merkleOffsetInParentXattr, childPath, err))\n+ return nil, fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to get xattr %s for %s: %v\", merkleOffsetInParentXattr, childPath, err))\n}\nif err != nil {\nreturn nil, err\n@@ -209,7 +209,7 @@ func (fs *filesystem) verifyChildLocked(ctx context.Context, parent *dentry, chi\n// unexpected modifications to the file system.\noffset, err := strconv.Atoi(off)\nif err != nil {\n- return nil, alertIntegrityViolation(fmt.Sprintf(\"Failed to convert xattr %s for %s to int: %v\", merkleOffsetInParentXattr, childPath, err))\n+ return nil, fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to convert xattr %s for %s to int: %v\", merkleOffsetInParentXattr, childPath, err))\n}\n// Open parent Merkle tree file to read and verify child's hash.\n@@ -223,7 +223,7 @@ func (fs *filesystem) verifyChildLocked(ctx context.Context, parent *dentry, chi\n// The parent Merkle tree file should have been created. If it's\n// missing, it indicates an unexpected modification to the file system.\nif err == syserror.ENOENT {\n- return nil, alertIntegrityViolation(fmt.Sprintf(\"Failed to open parent Merkle file for %s: %v\", childPath, err))\n+ return nil, fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to open parent Merkle file for %s: %v\", childPath, err))\n}\nif err != nil {\nreturn nil, err\n@@ -243,7 +243,7 @@ func (fs *filesystem) verifyChildLocked(ctx context.Context, parent *dentry, chi\n// contains the expected xattrs. If the file or the xattr does not\n// exist, it indicates unexpected modifications to the file system.\nif err == syserror.ENOENT || err == syserror.ENODATA {\n- return nil, alertIntegrityViolation(fmt.Sprintf(\"Failed to get xattr %s for %s: %v\", merkleSizeXattr, childPath, err))\n+ return nil, fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to get xattr %s for %s: %v\", merkleSizeXattr, childPath, err))\n}\nif err != nil {\nreturn nil, err\n@@ -253,7 +253,7 @@ func (fs *filesystem) verifyChildLocked(ctx context.Context, parent *dentry, chi\n// unexpected modifications to the file system.\nparentSize, err := strconv.Atoi(dataSize)\nif err != nil {\n- return nil, alertIntegrityViolation(fmt.Sprintf(\"Failed to convert xattr %s for %s to int: %v\", merkleSizeXattr, childPath, err))\n+ return nil, fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to convert xattr %s for %s to int: %v\", merkleSizeXattr, childPath, err))\n}\nfdReader := FileReadWriteSeeker{\n@@ -266,7 +266,7 @@ func (fs *filesystem) verifyChildLocked(ctx context.Context, parent *dentry, chi\nStart: parent.lowerVD,\n}, &vfs.StatOptions{})\nif err == syserror.ENOENT {\n- return nil, alertIntegrityViolation(fmt.Sprintf(\"Failed to get parent stat for %s: %v\", childPath, err))\n+ return nil, fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to get parent stat for %s: %v\", childPath, err))\n}\nif err != nil {\nreturn nil, err\n@@ -296,7 +296,7 @@ func (fs *filesystem) verifyChildLocked(ctx context.Context, parent *dentry, chi\n})\nparent.hashMu.RUnlock()\nif err != nil && err != io.EOF {\n- return nil, alertIntegrityViolation(fmt.Sprintf(\"Verification for %s failed: %v\", childPath, err))\n+ return nil, fs.alertIntegrityViolation(fmt.Sprintf(\"Verification for %s failed: %v\", childPath, err))\n}\n// Cache child hash when it's verified the first time.\n@@ -333,7 +333,7 @@ func (fs *filesystem) verifyStatAndChildrenLocked(ctx context.Context, d *dentry\nFlags: linux.O_RDONLY,\n})\nif err == syserror.ENOENT {\n- return alertIntegrityViolation(fmt.Sprintf(\"Failed to open merkle file for %s: %v\", childPath, err))\n+ return fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to open merkle file for %s: %v\", childPath, err))\n}\nif err != nil {\nreturn err\n@@ -347,7 +347,7 @@ func (fs *filesystem) verifyStatAndChildrenLocked(ctx context.Context, d *dentry\n})\nif err == syserror.ENODATA {\n- return alertIntegrityViolation(fmt.Sprintf(\"Failed to get xattr %s for merkle file of %s: %v\", merkleSizeXattr, childPath, err))\n+ return fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to get xattr %s for merkle file of %s: %v\", merkleSizeXattr, childPath, err))\n}\nif err != nil {\nreturn err\n@@ -355,7 +355,7 @@ func (fs *filesystem) verifyStatAndChildrenLocked(ctx context.Context, d *dentry\nsize, err := strconv.Atoi(merkleSize)\nif err != nil {\n- return alertIntegrityViolation(fmt.Sprintf(\"Failed to convert xattr %s for %s to int: %v\", merkleSizeXattr, childPath, err))\n+ return fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to convert xattr %s for %s to int: %v\", merkleSizeXattr, childPath, err))\n}\nif d.isDir() && len(d.childrenNames) == 0 {\n@@ -365,14 +365,14 @@ func (fs *filesystem) verifyStatAndChildrenLocked(ctx context.Context, d *dentry\n})\nif err == syserror.ENODATA {\n- return alertIntegrityViolation(fmt.Sprintf(\"Failed to get xattr %s for merkle file of %s: %v\", childrenOffsetXattr, childPath, err))\n+ return fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to get xattr %s for merkle file of %s: %v\", childrenOffsetXattr, childPath, err))\n}\nif err != nil {\nreturn err\n}\nchildrenOffset, err := strconv.Atoi(childrenOffString)\nif err != nil {\n- return alertIntegrityViolation(fmt.Sprintf(\"Failed to convert xattr %s to int: %v\", childrenOffsetXattr, err))\n+ return fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to convert xattr %s to int: %v\", childrenOffsetXattr, err))\n}\nchildrenSizeString, err := fd.GetXattr(ctx, &vfs.GetXattrOptions{\n@@ -381,23 +381,23 @@ func (fs *filesystem) verifyStatAndChildrenLocked(ctx context.Context, d *dentry\n})\nif err == syserror.ENODATA {\n- return alertIntegrityViolation(fmt.Sprintf(\"Failed to get xattr %s for merkle file of %s: %v\", childrenSizeXattr, childPath, err))\n+ return fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to get xattr %s for merkle file of %s: %v\", childrenSizeXattr, childPath, err))\n}\nif err != nil {\nreturn err\n}\nchildrenSize, err := strconv.Atoi(childrenSizeString)\nif err != nil {\n- return alertIntegrityViolation(fmt.Sprintf(\"Failed to convert xattr %s to int: %v\", childrenSizeXattr, err))\n+ return fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to convert xattr %s to int: %v\", childrenSizeXattr, err))\n}\nchildrenNames := make([]byte, childrenSize)\nif _, err := fd.PRead(ctx, usermem.BytesIOSequence(childrenNames), int64(childrenOffset), vfs.ReadOptions{}); err != nil {\n- return alertIntegrityViolation(fmt.Sprintf(\"Failed to read children map for %s: %v\", childPath, err))\n+ return fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to read children map for %s: %v\", childPath, err))\n}\nif err := json.Unmarshal(childrenNames, &d.childrenNames); err != nil {\n- return alertIntegrityViolation(fmt.Sprintf(\"Failed to deserialize childrenNames of %s: %v\", childPath, err))\n+ return fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to deserialize childrenNames of %s: %v\", childPath, err))\n}\n}\n@@ -442,7 +442,7 @@ func (fs *filesystem) verifyStatAndChildrenLocked(ctx context.Context, d *dentry\n}\nif _, err := merkletree.Verify(params); err != nil && err != io.EOF {\n- return alertIntegrityViolation(fmt.Sprintf(\"Verification stat for %s failed: %v\", childPath, err))\n+ return fs.alertIntegrityViolation(fmt.Sprintf(\"Verification stat for %s failed: %v\", childPath, err))\n}\nd.mode = uint32(stat.Mode)\nd.uid = stat.UID\n@@ -475,7 +475,7 @@ func (fs *filesystem) getChildLocked(ctx context.Context, parent *dentry, name s\n// The file was previously accessed. If the\n// file does not exist now, it indicates an\n// unexpected modification to the file system.\n- return nil, alertIntegrityViolation(fmt.Sprintf(\"Target file %s is expected but missing\", path))\n+ return nil, fs.alertIntegrityViolation(fmt.Sprintf(\"Target file %s is expected but missing\", path))\n}\nif err != nil {\nreturn nil, err\n@@ -487,7 +487,7 @@ func (fs *filesystem) getChildLocked(ctx context.Context, parent *dentry, name s\n// does not exist now, it indicates an unexpected\n// modification to the file system.\nif err == syserror.ENOENT {\n- return nil, alertIntegrityViolation(fmt.Sprintf(\"Expected Merkle file for target %s but none found\", path))\n+ return nil, fs.alertIntegrityViolation(fmt.Sprintf(\"Expected Merkle file for target %s but none found\", path))\n}\nif err != nil {\nreturn nil, err\n@@ -558,7 +558,7 @@ func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry,\nchildVD, err := parent.getLowerAt(ctx, vfsObj, name)\nif parent.verityEnabled() && err == syserror.ENOENT {\n- return nil, alertIntegrityViolation(fmt.Sprintf(\"file %s expected but not found\", parentPath+\"/\"+name))\n+ return nil, fs.alertIntegrityViolation(fmt.Sprintf(\"file %s expected but not found\", parentPath+\"/\"+name))\n}\nif err != nil {\nreturn nil, err\n@@ -572,7 +572,7 @@ func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry,\nif err != nil {\nif err == syserror.ENOENT {\nif parent.verityEnabled() {\n- return nil, alertIntegrityViolation(fmt.Sprintf(\"Merkle file for %s expected but not found\", parentPath+\"/\"+name))\n+ return nil, fs.alertIntegrityViolation(fmt.Sprintf(\"Merkle file for %s expected but not found\", parentPath+\"/\"+name))\n}\nchildMerkleFD, err := vfsObj.OpenAt(ctx, fs.creds, &vfs.PathOperation{\nRoot: parent.lowerVD,\n@@ -861,7 +861,7 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf\n// missing, it indicates an unexpected modification to the file system.\nif err != nil {\nif err == syserror.ENOENT {\n- return nil, alertIntegrityViolation(fmt.Sprintf(\"File %s expected but not found\", path))\n+ return nil, d.fs.alertIntegrityViolation(fmt.Sprintf(\"File %s expected but not found\", path))\n}\nreturn nil, err\n}\n@@ -884,7 +884,7 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf\n// the file system.\nif err != nil {\nif err == syserror.ENOENT {\n- return nil, alertIntegrityViolation(fmt.Sprintf(\"Merkle file for %s expected but not found\", path))\n+ return nil, d.fs.alertIntegrityViolation(fmt.Sprintf(\"Merkle file for %s expected but not found\", path))\n}\nreturn nil, err\n}\n@@ -909,7 +909,7 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf\n})\nif err != nil {\nif err == syserror.ENOENT {\n- return nil, alertIntegrityViolation(fmt.Sprintf(\"Merkle file for %s expected but not found\", path))\n+ return nil, d.fs.alertIntegrityViolation(fmt.Sprintf(\"Merkle file for %s expected but not found\", path))\n}\nreturn nil, err\n}\n@@ -927,7 +927,7 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf\nif err != nil {\nif err == syserror.ENOENT {\nparentPath, _ := d.fs.vfsfs.VirtualFilesystem().PathnameWithDeleted(ctx, d.fs.rootDentry.lowerVD, d.parent.lowerVD)\n- return nil, alertIntegrityViolation(fmt.Sprintf(\"Merkle file for %s expected but not found\", parentPath))\n+ return nil, d.fs.alertIntegrityViolation(fmt.Sprintf(\"Merkle file for %s expected but not found\", parentPath))\n}\nreturn nil, err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/verity/verity.go", "new_path": "pkg/sentry/fsimpl/verity/verity.go", "diff": "@@ -98,9 +98,6 @@ const (\n)\nvar (\n- // action specifies the action towards detected violation.\n- action ViolationAction\n-\n// verityMu synchronizes concurrent operations that enable verity and perform\n// verification checks.\nverityMu sync.RWMutex\n@@ -179,6 +176,9 @@ type filesystem struct {\n// system.\nalg HashAlgorithm\n+ // action specifies the action towards detected violation.\n+ action ViolationAction\n+\n// opts is the string mount options passed to opts.Data.\nopts string\n@@ -235,8 +235,8 @@ func (FilesystemType) Release(ctx context.Context) {}\n// alertIntegrityViolation alerts a violation of integrity, which usually means\n// unexpected modification to the file system is detected. In ErrorOnViolation\n// mode, it returns EIO, otherwise it panic.\n-func alertIntegrityViolation(msg string) error {\n- if action == ErrorOnViolation {\n+func (fs *filesystem) alertIntegrityViolation(msg string) error {\n+ if fs.action == ErrorOnViolation {\nreturn syserror.EIO\n}\npanic(msg)\n@@ -288,7 +288,6 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\nAction: ErrorOnViolation,\n}\n}\n- action = iopts.Action\nvar lowerMount *vfs.Mount\nvar mountedLowerVD vfs.VirtualDentry\n@@ -336,6 +335,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\ncreds: creds.Fork(),\nalg: iopts.Alg,\nlowerMount: lowerMount,\n+ action: iopts.Action,\nopts: opts.Data,\nallowRuntimeEnable: iopts.AllowRuntimeEnable,\n}\n@@ -389,7 +389,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\n// the root Merkle file, or it's never generated.\nfs.vfsfs.DecRef(ctx)\nd.DecRef(ctx)\n- return nil, nil, alertIntegrityViolation(\"Failed to find root Merkle file\")\n+ return nil, nil, fs.alertIntegrityViolation(\"Failed to find root Merkle file\")\n}\n// Clear the Merkle tree file if they are to be generated at runtime.\n@@ -452,7 +452,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\nSize: sizeOfStringInt32,\n})\nif err == syserror.ENOENT || err == syserror.ENODATA {\n- return nil, nil, alertIntegrityViolation(fmt.Sprintf(\"Failed to get xattr %s: %v\", childrenOffsetXattr, err))\n+ return nil, nil, fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to get xattr %s: %v\", childrenOffsetXattr, err))\n}\nif err != nil {\nreturn nil, nil, err\n@@ -460,7 +460,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\noff, err := strconv.Atoi(offString)\nif err != nil {\n- return nil, nil, alertIntegrityViolation(fmt.Sprintf(\"Failed to convert xattr %s to int: %v\", childrenOffsetXattr, err))\n+ return nil, nil, fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to convert xattr %s to int: %v\", childrenOffsetXattr, err))\n}\nsizeString, err := vfsObj.GetXattrAt(ctx, creds, &vfs.PathOperation{\n@@ -471,14 +471,14 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\nSize: sizeOfStringInt32,\n})\nif err == syserror.ENOENT || err == syserror.ENODATA {\n- return nil, nil, alertIntegrityViolation(fmt.Sprintf(\"Failed to get xattr %s: %v\", childrenSizeXattr, err))\n+ return nil, nil, fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to get xattr %s: %v\", childrenSizeXattr, err))\n}\nif err != nil {\nreturn nil, nil, err\n}\nsize, err := strconv.Atoi(sizeString)\nif err != nil {\n- return nil, nil, alertIntegrityViolation(fmt.Sprintf(\"Failed to convert xattr %s to int: %v\", childrenSizeXattr, err))\n+ return nil, nil, fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to convert xattr %s to int: %v\", childrenSizeXattr, err))\n}\nlowerMerkleFD, err := vfsObj.OpenAt(ctx, fs.creds, &vfs.PathOperation{\n@@ -488,7 +488,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\nFlags: linux.O_RDONLY,\n})\nif err == syserror.ENOENT {\n- return nil, nil, alertIntegrityViolation(fmt.Sprintf(\"Failed to open root Merkle file: %v\", err))\n+ return nil, nil, fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to open root Merkle file: %v\", err))\n}\nif err != nil {\nreturn nil, nil, err\n@@ -498,11 +498,11 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\nchildrenNames := make([]byte, size)\nif _, err := lowerMerkleFD.PRead(ctx, usermem.BytesIOSequence(childrenNames), int64(off), vfs.ReadOptions{}); err != nil {\n- return nil, nil, alertIntegrityViolation(fmt.Sprintf(\"Failed to read root children map: %v\", err))\n+ return nil, nil, fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to read root children map: %v\", err))\n}\nif err := json.Unmarshal(childrenNames, &d.childrenNames); err != nil {\n- return nil, nil, alertIntegrityViolation(fmt.Sprintf(\"Failed to deserialize childrenNames: %v\", err))\n+ return nil, nil, fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to deserialize childrenNames: %v\", err))\n}\nif err := fs.verifyStatAndChildrenLocked(ctx, d, stat); err != nil {\n@@ -879,7 +879,7 @@ func (fd *fileDescription) IterDirents(ctx context.Context, cb vfs.IterDirentsCa\n// Verify that the child is expected.\nif dirent.Name != \".\" && dirent.Name != \"..\" {\nif _, ok := fd.d.childrenNames[dirent.Name]; !ok {\n- return alertIntegrityViolation(fmt.Sprintf(\"Unexpected children %s\", dirent.Name))\n+ return fd.d.fs.alertIntegrityViolation(fmt.Sprintf(\"Unexpected children %s\", dirent.Name))\n}\n}\n}\n@@ -893,7 +893,7 @@ func (fd *fileDescription) IterDirents(ctx context.Context, cb vfs.IterDirentsCa\n// The result should contain all children plus \".\" and \"..\".\nif fd.d.verityEnabled() && len(ds) != len(fd.d.childrenNames)+2 {\n- return alertIntegrityViolation(fmt.Sprintf(\"Unexpected children number %d\", len(ds)))\n+ return fd.d.fs.alertIntegrityViolation(fmt.Sprintf(\"Unexpected children number %d\", len(ds)))\n}\nfor fd.off < int64(len(ds)) {\n@@ -1065,7 +1065,7 @@ func (fd *fileDescription) enableVerity(ctx context.Context) (uintptr, error) {\n// or directory other than the root, the parent Merkle tree file should\n// have also been initialized.\nif fd.lowerFD == nil || fd.merkleReader == nil || fd.merkleWriter == nil || (fd.parentMerkleWriter == nil && fd.d != fd.d.fs.rootDentry) {\n- return 0, alertIntegrityViolation(\"Unexpected verity fd: missing expected underlying fds\")\n+ return 0, fd.d.fs.alertIntegrityViolation(\"Unexpected verity fd: missing expected underlying fds\")\n}\nhash, dataSize, err := fd.generateMerkleLocked(ctx)\n@@ -1138,7 +1138,7 @@ func (fd *fileDescription) measureVerity(ctx context.Context, verityDigest hosta\nif fd.d.fs.allowRuntimeEnable {\nreturn 0, syserror.ENODATA\n}\n- return 0, alertIntegrityViolation(\"Ioctl measureVerity: no hash found\")\n+ return 0, fd.d.fs.alertIntegrityViolation(\"Ioctl measureVerity: no hash found\")\n}\n// The first part of VerityDigest is the metadata.\n@@ -1228,7 +1228,7 @@ func (fd *fileDescription) PRead(ctx context.Context, dst usermem.IOSequence, of\n// contains the expected xattrs. If the xattr does not exist, it\n// indicates unexpected modifications to the file system.\nif err == syserror.ENODATA {\n- return 0, alertIntegrityViolation(fmt.Sprintf(\"Failed to get xattr %s: %v\", merkleSizeXattr, err))\n+ return 0, fd.d.fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to get xattr %s: %v\", merkleSizeXattr, err))\n}\nif err != nil {\nreturn 0, err\n@@ -1238,7 +1238,7 @@ func (fd *fileDescription) PRead(ctx context.Context, dst usermem.IOSequence, of\n// unexpected modifications to the file system.\nsize, err := strconv.Atoi(dataSize)\nif err != nil {\n- return 0, alertIntegrityViolation(fmt.Sprintf(\"Failed to convert xattr %s to int: %v\", merkleSizeXattr, err))\n+ return 0, fd.d.fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to convert xattr %s to int: %v\", merkleSizeXattr, err))\n}\ndataReader := FileReadWriteSeeker{\n@@ -1271,7 +1271,7 @@ func (fd *fileDescription) PRead(ctx context.Context, dst usermem.IOSequence, of\n})\nfd.d.hashMu.RUnlock()\nif err != nil {\n- return 0, alertIntegrityViolation(fmt.Sprintf(\"Verification failed: %v\", err))\n+ return 0, fd.d.fs.alertIntegrityViolation(fmt.Sprintf(\"Verification failed: %v\", err))\n}\nreturn n, err\n}\n@@ -1346,7 +1346,7 @@ func (fd *fileDescription) Translate(ctx context.Context, required, optional mem\n// contains the expected xattrs. If the xattr does not exist, it\n// indicates unexpected modifications to the file system.\nif err == syserror.ENODATA {\n- return ts, alertIntegrityViolation(fmt.Sprintf(\"Failed to get xattr %s: %v\", merkleSizeXattr, err))\n+ return ts, fd.d.fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to get xattr %s: %v\", merkleSizeXattr, err))\n}\nif err != nil {\nreturn ts, err\n@@ -1356,7 +1356,7 @@ func (fd *fileDescription) Translate(ctx context.Context, required, optional mem\n// unexpected modifications to the file system.\nsize, err := strconv.Atoi(dataSize)\nif err != nil {\n- return ts, alertIntegrityViolation(fmt.Sprintf(\"Failed to convert xattr %s to int: %v\", merkleSizeXattr, err))\n+ return ts, fd.d.fs.alertIntegrityViolation(fmt.Sprintf(\"Failed to convert xattr %s to int: %v\", merkleSizeXattr, err))\n}\nmerkleReader := FileReadWriteSeeker{\n@@ -1389,7 +1389,7 @@ func (fd *fileDescription) Translate(ctx context.Context, required, optional mem\nDataAndTreeInSameFile: false,\n})\nif err != nil {\n- return ts, alertIntegrityViolation(fmt.Sprintf(\"Verification failed: %v\", err))\n+ return ts, fd.d.fs.alertIntegrityViolation(fmt.Sprintf(\"Verification failed: %v\", err))\n}\n}\nreturn ts, err\n" } ]
Go
Apache License 2.0
google/gvisor
Change verity action to be a fs member Currently the verity action is a global variable, which causes the same action for all verity mounts, and is overwritten for each new verity mount. Changed it to a member of verity fs. PiperOrigin-RevId: 369348522
259,951
20.04.2021 00:26:54
25,200
2c8379d95738bb2bc10d2cc7cead6889379e244c
Expose header methods that validate checksums This is done for IPv4, UDP and TCP headers. This also changes the packet checkers used in tests to error on zero-checksum, not sure why it was allowed before. And while I'm here, make comments' case consistent. RELNOTES: n/a Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/checker/checker.go", "new_path": "pkg/tcpip/checker/checker.go", "diff": "@@ -53,9 +53,8 @@ func IPv4(t *testing.T, b []byte, checkers ...NetworkChecker) {\nt.Error(\"Not a valid IPv4 packet\")\n}\n- xsum := ipv4.CalculateChecksum()\n- if xsum != 0 && xsum != 0xffff {\n- t.Errorf(\"Bad checksum: 0x%x, checksum in packet: 0x%x\", xsum, ipv4.Checksum())\n+ if !ipv4.IsChecksumValid() {\n+ t.Errorf(\"Bad checksum, got = %d\", ipv4.Checksum())\n}\nfor _, f := range checkers {\n@@ -400,18 +399,11 @@ func TCP(checkers ...TransportChecker) NetworkChecker {\nt.Errorf(\"Bad protocol, got = %d, want = %d\", p, header.TCPProtocolNumber)\n}\n- // Verify the checksum.\ntcp := header.TCP(last.Payload())\n- l := uint16(len(tcp))\n-\n- xsum := header.Checksum([]byte(first.SourceAddress()), 0)\n- xsum = header.Checksum([]byte(first.DestinationAddress()), xsum)\n- xsum = header.Checksum([]byte{0, byte(last.TransportProtocol())}, xsum)\n- xsum = header.Checksum([]byte{byte(l >> 8), byte(l)}, xsum)\n- xsum = header.Checksum(tcp, xsum)\n-\n- if xsum != 0 && xsum != 0xffff {\n- t.Errorf(\"Bad checksum: 0x%x, checksum in segment: 0x%x\", xsum, tcp.Checksum())\n+ payload := tcp.Payload()\n+ payloadChecksum := header.Checksum(payload, 0)\n+ if !tcp.IsChecksumValid(first.SourceAddress(), first.DestinationAddress(), payloadChecksum, uint16(len(payload))) {\n+ t.Errorf(\"Bad checksum, got = %d\", tcp.Checksum())\n}\n// Run the transport checkers.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/ipv4.go", "new_path": "pkg/tcpip/header/ipv4.go", "diff": "@@ -455,6 +455,32 @@ func IsV4LinkLocalMulticastAddress(addr tcpip.Address) bool {\nreturn ipv4LinkLocalMulticastSubnet.Contains(addr)\n}\n+// IsChecksumValid returns true iff the IPv4 header's checksum is valid.\n+func (b IPv4) IsChecksumValid() bool {\n+ // There has been some confusion regarding verifying checksums. We need\n+ // just look for negative 0 (0xffff) as the checksum, as it's not possible to\n+ // get positive 0 (0) for the checksum. Some bad implementations could get it\n+ // when doing entry replacement in the early days of the Internet,\n+ // however the lore that one needs to check for both persists.\n+ //\n+ // RFC 1624 section 1 describes the source of this confusion as:\n+ // [the partial recalculation method described in RFC 1071] computes a\n+ // result for certain cases that differs from the one obtained from\n+ // scratch (one's complement of one's complement sum of the original\n+ // fields).\n+ //\n+ // However RFC 1624 section 5 clarifies that if using the verification method\n+ // \"recommended by RFC 1071, it does not matter if an intermediate system\n+ // generated a -0 instead of +0\".\n+ //\n+ // RFC1071 page 1 specifies the verification method as:\n+ // (3) To check a checksum, the 1's complement sum is computed over the\n+ // same set of octets, including the checksum field. If the result\n+ // is all 1 bits (-0 in 1's complement arithmetic), the check\n+ // succeeds.\n+ return b.CalculateChecksum() == 0xffff\n+}\n+\n// IsV4MulticastAddress determines if the provided address is an IPv4 multicast\n// address (range 224.0.0.0 to 239.255.255.255). The four most significant bits\n// will be 1110 = 0xe0.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/tcp.go", "new_path": "pkg/tcpip/header/tcp.go", "diff": "@@ -216,104 +216,104 @@ const (\nTCPDefaultMSS = 536\n)\n-// SourcePort returns the \"source port\" field of the tcp header.\n+// SourcePort returns the \"source port\" field of the TCP header.\nfunc (b TCP) SourcePort() uint16 {\nreturn binary.BigEndian.Uint16(b[TCPSrcPortOffset:])\n}\n-// DestinationPort returns the \"destination port\" field of the tcp header.\n+// DestinationPort returns the \"destination port\" field of the TCP header.\nfunc (b TCP) DestinationPort() uint16 {\nreturn binary.BigEndian.Uint16(b[TCPDstPortOffset:])\n}\n-// SequenceNumber returns the \"sequence number\" field of the tcp header.\n+// SequenceNumber returns the \"sequence number\" field of the TCP header.\nfunc (b TCP) SequenceNumber() uint32 {\nreturn binary.BigEndian.Uint32(b[TCPSeqNumOffset:])\n}\n-// AckNumber returns the \"ack number\" field of the tcp header.\n+// AckNumber returns the \"ack number\" field of the TCP header.\nfunc (b TCP) AckNumber() uint32 {\nreturn binary.BigEndian.Uint32(b[TCPAckNumOffset:])\n}\n-// DataOffset returns the \"data offset\" field of the tcp header. The return\n+// DataOffset returns the \"data offset\" field of the TCP header. The return\n// value is the length of the TCP header in bytes.\nfunc (b TCP) DataOffset() uint8 {\nreturn (b[TCPDataOffset] >> 4) * 4\n}\n-// Payload returns the data in the tcp packet.\n+// Payload returns the data in the TCP packet.\nfunc (b TCP) Payload() []byte {\nreturn b[b.DataOffset():]\n}\n-// Flags returns the flags field of the tcp header.\n+// Flags returns the flags field of the TCP header.\nfunc (b TCP) Flags() TCPFlags {\nreturn TCPFlags(b[TCPFlagsOffset])\n}\n-// WindowSize returns the \"window size\" field of the tcp header.\n+// WindowSize returns the \"window size\" field of the TCP header.\nfunc (b TCP) WindowSize() uint16 {\nreturn binary.BigEndian.Uint16(b[TCPWinSizeOffset:])\n}\n-// Checksum returns the \"checksum\" field of the tcp header.\n+// Checksum returns the \"checksum\" field of the TCP header.\nfunc (b TCP) Checksum() uint16 {\nreturn binary.BigEndian.Uint16(b[TCPChecksumOffset:])\n}\n-// UrgentPointer returns the \"urgent pointer\" field of the tcp header.\n+// UrgentPointer returns the \"urgent pointer\" field of the TCP header.\nfunc (b TCP) UrgentPointer() uint16 {\nreturn binary.BigEndian.Uint16(b[TCPUrgentPtrOffset:])\n}\n-// SetSourcePort sets the \"source port\" field of the tcp header.\n+// SetSourcePort sets the \"source port\" field of the TCP header.\nfunc (b TCP) SetSourcePort(port uint16) {\nbinary.BigEndian.PutUint16(b[TCPSrcPortOffset:], port)\n}\n-// SetDestinationPort sets the \"destination port\" field of the tcp header.\n+// SetDestinationPort sets the \"destination port\" field of the TCP header.\nfunc (b TCP) SetDestinationPort(port uint16) {\nbinary.BigEndian.PutUint16(b[TCPDstPortOffset:], port)\n}\n-// SetChecksum sets the checksum field of the tcp header.\n+// SetChecksum sets the checksum field of the TCP header.\nfunc (b TCP) SetChecksum(checksum uint16) {\nbinary.BigEndian.PutUint16(b[TCPChecksumOffset:], checksum)\n}\n-// SetDataOffset sets the data offset field of the tcp header. headerLen should\n+// SetDataOffset sets the data offset field of the TCP header. headerLen should\n// be the length of the TCP header in bytes.\nfunc (b TCP) SetDataOffset(headerLen uint8) {\nb[TCPDataOffset] = (headerLen / 4) << 4\n}\n-// SetSequenceNumber sets the sequence number field of the tcp header.\n+// SetSequenceNumber sets the sequence number field of the TCP header.\nfunc (b TCP) SetSequenceNumber(seqNum uint32) {\nbinary.BigEndian.PutUint32(b[TCPSeqNumOffset:], seqNum)\n}\n-// SetAckNumber sets the ack number field of the tcp header.\n+// SetAckNumber sets the ack number field of the TCP header.\nfunc (b TCP) SetAckNumber(ackNum uint32) {\nbinary.BigEndian.PutUint32(b[TCPAckNumOffset:], ackNum)\n}\n-// SetFlags sets the flags field of the tcp header.\n+// SetFlags sets the flags field of the TCP header.\nfunc (b TCP) SetFlags(flags uint8) {\nb[TCPFlagsOffset] = flags\n}\n-// SetWindowSize sets the window size field of the tcp header.\n+// SetWindowSize sets the window size field of the TCP header.\nfunc (b TCP) SetWindowSize(rcvwnd uint16) {\nbinary.BigEndian.PutUint16(b[TCPWinSizeOffset:], rcvwnd)\n}\n-// SetUrgentPoiner sets the window size field of the tcp header.\n+// SetUrgentPoiner sets the window size field of the TCP header.\nfunc (b TCP) SetUrgentPoiner(urgentPointer uint16) {\nbinary.BigEndian.PutUint16(b[TCPUrgentPtrOffset:], urgentPointer)\n}\n-// CalculateChecksum calculates the checksum of the tcp segment.\n+// CalculateChecksum calculates the checksum of the TCP segment.\n// partialChecksum is the checksum of the network-layer pseudo-header\n// and the checksum of the segment data.\nfunc (b TCP) CalculateChecksum(partialChecksum uint16) uint16 {\n@@ -321,6 +321,13 @@ func (b TCP) CalculateChecksum(partialChecksum uint16) uint16 {\nreturn Checksum(b[:b.DataOffset()], partialChecksum)\n}\n+// IsChecksumValid returns true iff the TCP header's checksum is valid.\n+func (b TCP) IsChecksumValid(src, dst tcpip.Address, payloadChecksum, payloadLength uint16) bool {\n+ xsum := PseudoHeaderChecksum(TCPProtocolNumber, src, dst, uint16(b.DataOffset())+payloadLength)\n+ xsum = ChecksumCombine(xsum, payloadChecksum)\n+ return b.CalculateChecksum(xsum) == 0xffff\n+}\n+\n// Options returns a slice that holds the unparsed TCP options in the segment.\nfunc (b TCP) Options() []byte {\nreturn b[TCPMinimumSize:b.DataOffset()]\n@@ -340,7 +347,7 @@ func (b TCP) encodeSubset(seq, ack uint32, flags TCPFlags, rcvwnd uint16) {\nbinary.BigEndian.PutUint16(b[TCPWinSizeOffset:], rcvwnd)\n}\n-// Encode encodes all the fields of the tcp header.\n+// Encode encodes all the fields of the TCP header.\nfunc (b TCP) Encode(t *TCPFields) {\nb.encodeSubset(t.SeqNum, t.AckNum, t.Flags, t.WindowSize)\nbinary.BigEndian.PutUint16(b[TCPSrcPortOffset:], t.SrcPort)\n@@ -350,7 +357,7 @@ func (b TCP) Encode(t *TCPFields) {\nbinary.BigEndian.PutUint16(b[TCPUrgentPtrOffset:], t.UrgentPointer)\n}\n-// EncodePartial updates a subset of the fields of the tcp header. It is useful\n+// EncodePartial updates a subset of the fields of the TCP header. It is useful\n// in cases when similar segments are produced.\nfunc (b TCP) EncodePartial(partialChecksum, length uint16, seqnum, acknum uint32, flags TCPFlags, rcvwnd uint16) {\n// Add the total length and \"flags\" field contributions to the checksum.\n@@ -374,7 +381,7 @@ func (b TCP) EncodePartial(partialChecksum, length uint16, seqnum, acknum uint32\n}\n// ParseSynOptions parses the options received in a SYN segment and returns the\n-// relevant ones. opts should point to the option part of the TCP Header.\n+// relevant ones. opts should point to the option part of the TCP header.\nfunc ParseSynOptions(opts []byte, isAck bool) TCPSynOptions {\nlimit := len(opts)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/udp.go", "new_path": "pkg/tcpip/header/udp.go", "diff": "@@ -64,17 +64,17 @@ const (\nUDPProtocolNumber tcpip.TransportProtocolNumber = 17\n)\n-// SourcePort returns the \"source port\" field of the udp header.\n+// SourcePort returns the \"source port\" field of the UDP header.\nfunc (b UDP) SourcePort() uint16 {\nreturn binary.BigEndian.Uint16(b[udpSrcPort:])\n}\n-// DestinationPort returns the \"destination port\" field of the udp header.\n+// DestinationPort returns the \"destination port\" field of the UDP header.\nfunc (b UDP) DestinationPort() uint16 {\nreturn binary.BigEndian.Uint16(b[udpDstPort:])\n}\n-// Length returns the \"length\" field of the udp header.\n+// Length returns the \"length\" field of the UDP header.\nfunc (b UDP) Length() uint16 {\nreturn binary.BigEndian.Uint16(b[udpLength:])\n}\n@@ -84,39 +84,46 @@ func (b UDP) Payload() []byte {\nreturn b[UDPMinimumSize:]\n}\n-// Checksum returns the \"checksum\" field of the udp header.\n+// Checksum returns the \"checksum\" field of the UDP header.\nfunc (b UDP) Checksum() uint16 {\nreturn binary.BigEndian.Uint16(b[udpChecksum:])\n}\n-// SetSourcePort sets the \"source port\" field of the udp header.\n+// SetSourcePort sets the \"source port\" field of the UDP header.\nfunc (b UDP) SetSourcePort(port uint16) {\nbinary.BigEndian.PutUint16(b[udpSrcPort:], port)\n}\n-// SetDestinationPort sets the \"destination port\" field of the udp header.\n+// SetDestinationPort sets the \"destination port\" field of the UDP header.\nfunc (b UDP) SetDestinationPort(port uint16) {\nbinary.BigEndian.PutUint16(b[udpDstPort:], port)\n}\n-// SetChecksum sets the \"checksum\" field of the udp header.\n+// SetChecksum sets the \"checksum\" field of the UDP header.\nfunc (b UDP) SetChecksum(checksum uint16) {\nbinary.BigEndian.PutUint16(b[udpChecksum:], checksum)\n}\n-// SetLength sets the \"length\" field of the udp header.\n+// SetLength sets the \"length\" field of the UDP header.\nfunc (b UDP) SetLength(length uint16) {\nbinary.BigEndian.PutUint16(b[udpLength:], length)\n}\n-// CalculateChecksum calculates the checksum of the udp packet, given the\n+// CalculateChecksum calculates the checksum of the UDP packet, given the\n// checksum of the network-layer pseudo-header and the checksum of the payload.\nfunc (b UDP) CalculateChecksum(partialChecksum uint16) uint16 {\n// Calculate the rest of the checksum.\nreturn Checksum(b[:UDPMinimumSize], partialChecksum)\n}\n-// Encode encodes all the fields of the udp header.\n+// IsChecksumValid returns true iff the UDP header's checksum is valid.\n+func (b UDP) IsChecksumValid(src, dst tcpip.Address, payloadChecksum uint16) bool {\n+ xsum := PseudoHeaderChecksum(UDPProtocolNumber, dst, src, b.Length())\n+ xsum = ChecksumCombine(xsum, payloadChecksum)\n+ return b.CalculateChecksum(xsum) == 0xffff\n+}\n+\n+// Encode encodes all the fields of the UDP header.\nfunc (b UDP) Encode(u *UDPFields) {\nbinary.BigEndian.PutUint16(b[udpSrcPort:], u.SrcPort)\nbinary.BigEndian.PutUint16(b[udpDstPort:], u.DstPort)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv4/ipv4.go", "new_path": "pkg/tcpip/network/ipv4/ipv4.go", "diff": "@@ -1178,28 +1178,7 @@ func (p *protocol) parseAndValidate(pkt *stack.PacketBuffer) (header.IPv4, bool)\nreturn nil, false\n}\n- // There has been some confusion regarding verifying checksums. We need\n- // just look for negative 0 (0xffff) as the checksum, as it's not possible to\n- // get positive 0 (0) for the checksum. Some bad implementations could get it\n- // when doing entry replacement in the early days of the Internet,\n- // however the lore that one needs to check for both persists.\n- //\n- // RFC 1624 section 1 describes the source of this confusion as:\n- // [the partial recalculation method described in RFC 1071] computes a\n- // result for certain cases that differs from the one obtained from\n- // scratch (one's complement of one's complement sum of the original\n- // fields).\n- //\n- // However RFC 1624 section 5 clarifies that if using the verification method\n- // \"recommended by RFC 1071, it does not matter if an intermediate system\n- // generated a -0 instead of +0\".\n- //\n- // RFC1071 page 1 specifies the verification method as:\n- // (3) To check a checksum, the 1's complement sum is computed over the\n- // same set of octets, including the checksum field. If the result\n- // is all 1 bits (-0 in 1's complement arithmetic), the check\n- // succeeds.\n- if h.CalculateChecksum() != 0xffff {\n+ if !h.IsChecksumValid() {\nreturn nil, false\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/segment.go", "new_path": "pkg/tcpip/transport/tcp/segment.go", "diff": "@@ -236,20 +236,14 @@ func (s *segment) parse(skipChecksumValidation bool) bool {\ns.options = []byte(s.hdr[header.TCPMinimumSize:])\ns.parsedOptions = header.ParseTCPOptions(s.options)\n-\n- verifyChecksum := true\nif skipChecksumValidation {\ns.csumValid = true\n- verifyChecksum = false\n- }\n- if verifyChecksum {\n+ } else {\ns.csum = s.hdr.Checksum()\n- xsum := header.PseudoHeaderChecksum(ProtocolNumber, s.srcAddr, s.dstAddr, uint16(s.data.Size()+len(s.hdr)))\n- xsum = s.hdr.CalculateChecksum(xsum)\n- xsum = header.ChecksumVV(s.data, xsum)\n- s.csumValid = xsum == 0xffff\n+ payloadChecksum := header.ChecksumVV(s.data, 0)\n+ payloadLength := uint16(s.data.Size())\n+ s.csumValid = s.hdr.IsChecksumValid(s.srcAddr, s.dstAddr, payloadChecksum, payloadLength)\n}\n-\ns.sequenceNumber = seqnum.Value(s.hdr.SequenceNumber())\ns.ackNumber = seqnum.Value(s.hdr.AckNumber())\ns.flags = s.hdr.Flags()\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/udp/endpoint.go", "new_path": "pkg/tcpip/transport/udp/endpoint.go", "diff": "@@ -1255,22 +1255,31 @@ func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask {\n}\n// verifyChecksum verifies the checksum unless RX checksum offload is enabled.\n-// On IPv4, UDP checksum is optional, and a zero value means the transmitter\n-// omitted the checksum generation (RFC768).\n-// On IPv6, UDP checksum is not optional (RFC2460 Section 8.1).\nfunc verifyChecksum(hdr header.UDP, pkt *stack.PacketBuffer) bool {\n- if !pkt.RXTransportChecksumValidated &&\n- (hdr.Checksum() != 0 || pkt.NetworkProtocolNumber == header.IPv6ProtocolNumber) {\n- netHdr := pkt.Network()\n- xsum := header.PseudoHeaderChecksum(ProtocolNumber, netHdr.DestinationAddress(), netHdr.SourceAddress(), hdr.Length())\n- for _, v := range pkt.Data().Views() {\n- xsum = header.Checksum(v, xsum)\n- }\n- return hdr.CalculateChecksum(xsum) == 0xffff\n+ if pkt.RXTransportChecksumValidated {\n+ return true\n}\n+\n+ // On IPv4, UDP checksum is optional, and a zero value means the transmitter\n+ // omitted the checksum generation, as per RFC 768:\n+ //\n+ // An all zero transmitted checksum value means that the transmitter\n+ // generated no checksum (for debugging or for higher level protocols that\n+ // don't care).\n+ //\n+ // On IPv6, UDP checksum is not optional, as per RFC 2460 Section 8.1:\n+ //\n+ // Unlike IPv4, when UDP packets are originated by an IPv6 node, the UDP\n+ // checksum is not optional.\n+ if pkt.NetworkProtocolNumber == header.IPv4ProtocolNumber && hdr.Checksum() == 0 {\nreturn true\n}\n+ netHdr := pkt.Network()\n+ payloadChecksum := pkt.Data().AsRange().Checksum()\n+ return hdr.IsChecksumValid(netHdr.SourceAddress(), netHdr.DestinationAddress(), payloadChecksum)\n+}\n+\n// HandlePacket is called by the stack when new packets arrive to this transport\n// endpoint.\nfunc (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt *stack.PacketBuffer) {\n@@ -1284,7 +1293,6 @@ func (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt *stack.PacketB\n}\nif !verifyChecksum(hdr, pkt) {\n- // Checksum Error.\ne.stack.Stats().UDP.ChecksumErrors.Increment()\ne.stats.ReceiveErrors.ChecksumErrors.Increment()\nreturn\n" } ]
Go
Apache License 2.0
google/gvisor
Expose header methods that validate checksums This is done for IPv4, UDP and TCP headers. This also changes the packet checkers used in tests to error on zero-checksum, not sure why it was allowed before. And while I'm here, make comments' case consistent. RELNOTES: n/a Fixes #5049 PiperOrigin-RevId: 369383862
259,985
21.04.2021 13:34:51
25,200
e3a5da8ce62826f56c0b531590bb472ea717eeac
Stub the custom "job" controller required by some workloads.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/cgroupfs/BUILD", "new_path": "pkg/sentry/fsimpl/cgroupfs/BUILD", "diff": "@@ -23,6 +23,7 @@ go_library(\n\"cpuacct.go\",\n\"cpuset.go\",\n\"dir_refs.go\",\n+ \"job.go\",\n\"memory.go\",\n],\nvisibility = [\"//pkg/sentry:internal\"],\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/cgroupfs/base.go", "new_path": "pkg/sentry/fsimpl/cgroupfs/base.go", "diff": "@@ -18,6 +18,7 @@ import (\n\"bytes\"\n\"fmt\"\n\"sort\"\n+ \"strconv\"\n\"sync/atomic\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n@@ -26,6 +27,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n+ \"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n)\n@@ -231,3 +233,29 @@ func (d *tasksData) Write(ctx context.Context, src usermem.IOSequence, offset in\n// TODO(b/183137098): Payload is the pid for a process to add to this cgroup.\nreturn src.NumBytes(), nil\n}\n+\n+// parseInt64FromString interprets src as string encoding a int64 value, and\n+// returns the parsed value.\n+func parseInt64FromString(ctx context.Context, src usermem.IOSequence, offset int64) (val, len int64, err error) {\n+ const maxInt64StrLen = 20 // i.e. len(fmt.Sprintf(\"%d\", math.MinInt64)) == 20\n+\n+ t := kernel.TaskFromContext(ctx)\n+ src = src.DropFirst64(offset)\n+\n+ buf := t.CopyScratchBuffer(maxInt64StrLen)\n+ n, err := src.CopyIn(ctx, buf)\n+ if err != nil {\n+ return 0, int64(n), err\n+ }\n+ buf = buf[:n]\n+\n+ val, err = strconv.ParseInt(string(buf), 10, 64)\n+ if err != nil {\n+ // Note: This also handles zero-len writes if offset is beyond the end\n+ // of src, or src is empty.\n+ ctx.Warningf(\"cgroupfs.parseInt64FromString: failed to parse %q: %v\", string(buf), err)\n+ return 0, int64(n), syserror.EINVAL\n+ }\n+\n+ return val, int64(n), nil\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/cgroupfs/cgroupfs.go", "new_path": "pkg/sentry/fsimpl/cgroupfs/cgroupfs.go", "diff": "@@ -81,13 +81,20 @@ const (\ncontrollerCPU = kernel.CgroupControllerType(\"cpu\")\ncontrollerCPUAcct = kernel.CgroupControllerType(\"cpuacct\")\ncontrollerCPUSet = kernel.CgroupControllerType(\"cpuset\")\n+ controllerJob = kernel.CgroupControllerType(\"job\")\ncontrollerMemory = kernel.CgroupControllerType(\"memory\")\n)\n-var allControllers = []kernel.CgroupControllerType{controllerCPU, controllerCPUAcct, controllerCPUSet, controllerMemory}\n+var allControllers = []kernel.CgroupControllerType{\n+ controllerCPU,\n+ controllerCPUAcct,\n+ controllerCPUSet,\n+ controllerJob,\n+ controllerMemory,\n+}\n// SupportedMountOptions is the set of supported mount options for cgroupfs.\n-var SupportedMountOptions = []string{\"all\", \"cpu\", \"cpuacct\", \"cpuset\", \"memory\"}\n+var SupportedMountOptions = []string{\"all\", \"cpu\", \"cpuacct\", \"cpuset\", \"job\", \"memory\"}\n// FilesystemType implements vfs.FilesystemType.\n//\n@@ -171,6 +178,10 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\ndelete(mopts, \"cpuset\")\nwantControllers = append(wantControllers, controllerCPUSet)\n}\n+ if _, ok := mopts[\"job\"]; ok {\n+ delete(mopts, \"job\")\n+ wantControllers = append(wantControllers, controllerJob)\n+ }\nif _, ok := mopts[\"memory\"]; ok {\ndelete(mopts, \"memory\")\nwantControllers = append(wantControllers, controllerMemory)\n@@ -235,14 +246,16 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\nfor _, ty := range wantControllers {\nvar c controller\nswitch ty {\n- case controllerMemory:\n- c = newMemoryController(fs, defaults)\ncase controllerCPU:\nc = newCPUController(fs, defaults)\ncase controllerCPUAcct:\nc = newCPUAcctController(fs)\ncase controllerCPUSet:\nc = newCPUSetController(fs)\n+ case controllerJob:\n+ c = newJobController(fs)\n+ case controllerMemory:\n+ c = newMemoryController(fs, defaults)\ndefault:\npanic(fmt.Sprintf(\"Unreachable: unknown cgroup controller %q\", ty))\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/sentry/fsimpl/cgroupfs/job.go", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package cgroupfs\n+\n+import (\n+ \"bytes\"\n+ \"fmt\"\n+\n+ \"gvisor.dev/gvisor/pkg/context\"\n+ \"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n+ \"gvisor.dev/gvisor/pkg/usermem\"\n+)\n+\n+// +stateify savable\n+type jobController struct {\n+ controllerCommon\n+ id int64\n+}\n+\n+var _ controller = (*jobController)(nil)\n+\n+func newJobController(fs *filesystem) *jobController {\n+ c := &jobController{}\n+ c.controllerCommon.init(controllerJob, fs)\n+ return c\n+}\n+\n+func (c *jobController) AddControlFiles(ctx context.Context, creds *auth.Credentials, _ *cgroupInode, contents map[string]kernfs.Inode) {\n+ contents[\"job.id\"] = c.fs.newControllerWritableFile(ctx, creds, &jobIDData{c: c})\n+}\n+\n+// +stateify savable\n+type jobIDData struct {\n+ c *jobController\n+}\n+\n+// Generate implements vfs.DynamicBytesSource.Generate.\n+func (d *jobIDData) Generate(ctx context.Context, buf *bytes.Buffer) error {\n+ fmt.Fprintf(buf, \"%d\\n\", d.c.id)\n+ return nil\n+}\n+\n+// Write implements vfs.WritableDynamicBytesSource.Write.\n+func (d *jobIDData) Write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) {\n+ val, n, err := parseInt64FromString(ctx, src, offset)\n+ if err != nil {\n+ return n, err\n+ }\n+ d.c.id = val\n+ return n, nil\n+}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/cgroup.cc", "new_path": "test/syscalls/linux/cgroup.cc", "diff": "// All tests in this file rely on being about to mount and unmount cgroupfs,\n// which isn't expected to work, or be safe on a general linux system.\n+#include <limits.h>\n#include <sys/mount.h>\n#include <unistd.h>\n@@ -35,8 +36,9 @@ using ::testing::_;\nusing ::testing::Ge;\nusing ::testing::Gt;\n-std::vector<std::string> known_controllers = {\"cpu\", \"cpuset\", \"cpuacct\",\n- \"memory\"};\n+std::vector<std::string> known_controllers = {\n+ \"cpu\", \"cpuset\", \"cpuacct\", \"job\", \"memory\",\n+};\nbool CgroupsAvailable() {\nreturn IsRunningOnGvisor() && !IsRunningWithVFS1() &&\n@@ -257,6 +259,35 @@ TEST(CPUAcctCgroup, CPUAcctStat) {\nEXPECT_THAT(Atoi<int64_t>(sys_tokens[1]), IsPosixErrorOkAndHolds(Ge(0)));\n}\n+// WriteAndVerifyControlValue attempts to write val to a cgroup file at path,\n+// and verify the value by reading it afterwards.\n+PosixError WriteAndVerifyControlValue(const Cgroup& c, std::string_view path,\n+ int64_t val) {\n+ RETURN_IF_ERRNO(c.WriteIntegerControlFile(path, val));\n+ ASSIGN_OR_RETURN_ERRNO(int64_t newval, c.ReadIntegerControlFile(path));\n+ if (newval != val) {\n+ return PosixError(\n+ EINVAL,\n+ absl::StrFormat(\n+ \"Unexpected value for control file '%s': expected %d, got %d\", path,\n+ val, newval));\n+ }\n+ return NoError();\n+}\n+\n+TEST(JobCgroup, ReadWriteRead) {\n+ SKIP_IF(!CgroupsAvailable());\n+\n+ Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));\n+ Cgroup c = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(\"job\"));\n+\n+ EXPECT_THAT(c.ReadIntegerControlFile(\"job.id\"), IsPosixErrorOkAndHolds(0));\n+ EXPECT_NO_ERRNO(WriteAndVerifyControlValue(c, \"job.id\", 1234));\n+ EXPECT_NO_ERRNO(WriteAndVerifyControlValue(c, \"job.id\", -1));\n+ EXPECT_NO_ERRNO(WriteAndVerifyControlValue(c, \"job.id\", LLONG_MIN));\n+ EXPECT_NO_ERRNO(WriteAndVerifyControlValue(c, \"job.id\", LLONG_MAX));\n+}\n+\nTEST(ProcCgroups, Empty) {\nSKIP_IF(!CgroupsAvailable());\n" }, { "change_type": "MODIFY", "old_path": "test/util/cgroup_util.cc", "new_path": "test/util/cgroup_util.cc", "diff": "#include <sys/syscall.h>\n#include <unistd.h>\n+#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_split.h\"\n#include \"test/util/fs_util.h\"\n#include \"test/util/mount_util.h\"\n@@ -50,6 +51,18 @@ PosixErrorOr<int64_t> Cgroup::ReadIntegerControlFile(\nreturn val;\n}\n+PosixError Cgroup::WriteControlFile(absl::string_view name,\n+ const std::string& value) const {\n+ ASSIGN_OR_RETURN_ERRNO(FileDescriptor fd, Open(Relpath(name), O_WRONLY));\n+ RETURN_ERROR_IF_SYSCALL_FAIL(WriteFd(fd.get(), value.c_str(), value.size()));\n+ return NoError();\n+}\n+\n+PosixError Cgroup::WriteIntegerControlFile(absl::string_view name,\n+ int64_t value) const {\n+ return WriteControlFile(name, absl::StrCat(value));\n+}\n+\nPosixErrorOr<absl::flat_hash_set<pid_t>> Cgroup::Procs() const {\nASSIGN_OR_RETURN_ERRNO(std::string buf, ReadControlFile(\"cgroup.procs\"));\nreturn ParsePIDList(buf);\n" }, { "change_type": "MODIFY", "old_path": "test/util/cgroup_util.h", "new_path": "test/util/cgroup_util.h", "diff": "@@ -45,6 +45,14 @@ class Cgroup {\n// to parse it as an integer.\nPosixErrorOr<int64_t> ReadIntegerControlFile(absl::string_view name) const;\n+ // Writes a string to a cgroup control file.\n+ PosixError WriteControlFile(absl::string_view name,\n+ const std::string& value) const;\n+\n+ // Writes an integer value to a cgroup control file.\n+ PosixError WriteIntegerControlFile(absl::string_view name,\n+ int64_t value) const;\n+\n// Returns the thread ids of the leaders of thread groups managed by this\n// cgroup.\nPosixErrorOr<absl::flat_hash_set<pid_t>> Procs() const;\n" } ]
Go
Apache License 2.0
google/gvisor
Stub the custom "job" controller required by some workloads. PiperOrigin-RevId: 369724358
260,001
22.04.2021 11:10:35
25,200
dbfdb31e8a014e5e11092de121e825b21c2804c3
Add verity tests for modified file/Merkle file
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/verity_ioctl.cc", "new_path": "test/syscalls/linux/verity_ioctl.cc", "diff": "// limitations under the License.\n#include <stdint.h>\n+#include <stdlib.h>\n#include <sys/mount.h>\n+#include <time.h>\n#include <iomanip>\n#include <sstream>\n@@ -56,6 +58,8 @@ struct fsverity_digest {\nconstexpr int kMaxDigestSize = 64;\nconstexpr int kDefaultDigestSize = 32;\nconstexpr char kContents[] = \"foobarbaz\";\n+constexpr char kMerklePrefix[] = \".merkle.verity.\";\n+constexpr char kMerkleRootPrefix[] = \".merkleroot.verity.\";\nclass IoctlTest : public ::testing::Test {\nprotected:\n@@ -92,6 +96,68 @@ std::string BytesToHexString(uint8_t bytes[], int size) {\nreturn ss.str();\n}\n+std::string MerklePath(absl::string_view path) {\n+ return JoinPath(Dirname(path),\n+ std::string(kMerklePrefix) + std::string(Basename(path)));\n+}\n+\n+std::string MerkleRootPath(absl::string_view path) {\n+ return JoinPath(Dirname(path),\n+ std::string(kMerkleRootPrefix) + std::string(Basename(path)));\n+}\n+\n+// Flip a random bit in the file represented by fd.\n+PosixError FlipRandomBit(int fd, int size) {\n+ // Generate a random offset in the file.\n+ srand(time(nullptr));\n+ unsigned int seed = 0;\n+ int random_offset = rand_r(&seed) % size;\n+\n+ // Read a random byte and flip a bit in it.\n+ char buf[1];\n+ RETURN_ERROR_IF_SYSCALL_FAIL(PreadFd(fd, buf, 1, random_offset));\n+ buf[0] ^= 1;\n+ RETURN_ERROR_IF_SYSCALL_FAIL(PwriteFd(fd, buf, 1, random_offset));\n+ return NoError();\n+}\n+\n+// Mount a verity on the tmpfs and enable both the file and the direcotry. Then\n+// mount a new verity with measured root hash.\n+PosixErrorOr<std::string> MountVerity(std::string tmpfs_dir,\n+ std::string filename) {\n+ // Mount a verity fs on the existing tmpfs mount.\n+ std::string mount_opts = \"lower_path=\" + tmpfs_dir;\n+ ASSIGN_OR_RETURN_ERRNO(TempPath verity_dir, TempPath::CreateDir());\n+ RETURN_ERROR_IF_SYSCALL_FAIL(\n+ mount(\"\", verity_dir.path().c_str(), \"verity\", 0, mount_opts.c_str()));\n+\n+ // Enable both the file and the directory.\n+ ASSIGN_OR_RETURN_ERRNO(\n+ auto fd, Open(JoinPath(verity_dir.path(), filename), O_RDONLY, 0777));\n+ RETURN_ERROR_IF_SYSCALL_FAIL(ioctl(fd.get(), FS_IOC_ENABLE_VERITY));\n+ ASSIGN_OR_RETURN_ERRNO(auto dir_fd, Open(verity_dir.path(), O_RDONLY, 0777));\n+ RETURN_ERROR_IF_SYSCALL_FAIL(ioctl(dir_fd.get(), FS_IOC_ENABLE_VERITY));\n+\n+ // Measure the root hash.\n+ uint8_t digest_array[sizeof(struct fsverity_digest) + kMaxDigestSize] = {0};\n+ struct fsverity_digest* digest =\n+ reinterpret_cast<struct fsverity_digest*>(digest_array);\n+ digest->digest_size = kMaxDigestSize;\n+ RETURN_ERROR_IF_SYSCALL_FAIL(\n+ ioctl(dir_fd.get(), FS_IOC_MEASURE_VERITY, digest));\n+\n+ // Mount a verity fs with specified root hash.\n+ mount_opts +=\n+ \",root_hash=\" + BytesToHexString(digest->digest, digest->digest_size);\n+ ASSIGN_OR_RETURN_ERRNO(TempPath verity_with_hash_dir, TempPath::CreateDir());\n+ RETURN_ERROR_IF_SYSCALL_FAIL(mount(\"\", verity_with_hash_dir.path().c_str(),\n+ \"verity\", 0, mount_opts.c_str()));\n+ // Verity directories should not be deleted. Release the TempPath objects to\n+ // prevent those directories from being deleted by the destructor.\n+ verity_dir.release();\n+ return verity_with_hash_dir.release();\n+}\n+\nTEST_F(IoctlTest, Enable) {\n// Mount a verity fs on the existing tmpfs mount.\nstd::string mount_opts = \"lower_path=\" + tmpfs_dir_.path();\n@@ -139,47 +205,71 @@ TEST_F(IoctlTest, Measure) {\n}\nTEST_F(IoctlTest, Mount) {\n- // Mount a verity fs on the existing tmpfs mount.\n- std::string mount_opts = \"lower_path=\" + tmpfs_dir_.path();\n- auto verity_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n- ASSERT_THAT(\n- mount(\"\", verity_dir.path().c_str(), \"verity\", 0, mount_opts.c_str()),\n+ std::string verity_dir =\n+ ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+\n+ // Make sure the file can be open and read in the mounted verity fs.\n+ auto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(\n+ Open(JoinPath(verity_dir, filename_), O_RDONLY, 0777));\n+ char buf[sizeof(kContents)];\n+ EXPECT_THAT(ReadFd(verity_fd.get(), buf, sizeof(kContents)),\nSyscallSucceeds());\n+}\n- // Enable both the file and the directory.\n- auto const fd = ASSERT_NO_ERRNO_AND_VALUE(\n- Open(JoinPath(verity_dir.path(), filename_), O_RDONLY, 0777));\n- ASSERT_THAT(ioctl(fd.get(), FS_IOC_ENABLE_VERITY), SyscallSucceeds());\n- auto const dir_fd =\n- ASSERT_NO_ERRNO_AND_VALUE(Open(verity_dir.path(), O_RDONLY, 0777));\n- ASSERT_THAT(ioctl(dir_fd.get(), FS_IOC_ENABLE_VERITY), SyscallSucceeds());\n+TEST_F(IoctlTest, NonExistingFile) {\n+ std::string verity_dir =\n+ ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n- // Measure the root hash.\n- uint8_t digest_array[sizeof(struct fsverity_digest) + kMaxDigestSize] = {0};\n- struct fsverity_digest* digest =\n- reinterpret_cast<struct fsverity_digest*>(digest_array);\n- digest->digest_size = kMaxDigestSize;\n- ASSERT_THAT(ioctl(dir_fd.get(), FS_IOC_MEASURE_VERITY, digest),\n- SyscallSucceeds());\n+ // Confirm that opening a non-existing file in the verity-enabled directory\n+ // triggers the expected error instead of verification failure.\n+ EXPECT_THAT(\n+ open(JoinPath(verity_dir, filename_ + \"abc\").c_str(), O_RDONLY, 0777),\n+ SyscallFailsWithErrno(ENOENT));\n+}\n- // Mount a verity fs with specified root hash.\n- mount_opts +=\n- \",root_hash=\" + BytesToHexString(digest->digest, digest->digest_size);\n- auto verity_with_hash_dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n- ASSERT_THAT(mount(\"\", verity_with_hash_dir.path().c_str(), \"verity\", 0,\n- mount_opts.c_str()),\n- SyscallSucceeds());\n+TEST_F(IoctlTest, ModifiedFile) {\n+ std::string verity_dir =\n+ ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+\n+ // Modify the file and check verification failure upon reading from it.\n+ auto const fd = ASSERT_NO_ERRNO_AND_VALUE(\n+ Open(JoinPath(tmpfs_dir_.path(), filename_), O_RDWR, 0777));\n+ ASSERT_NO_ERRNO(FlipRandomBit(fd.get(), sizeof(kContents) - 1));\n- // Make sure the file can be open and read in the mounted verity fs.\nauto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(\n- Open(JoinPath(verity_with_hash_dir.path(), filename_), O_RDONLY, 0777));\n- char buf[16];\n- EXPECT_THAT(ReadFd(fd.get(), buf, sizeof(kContents)), SyscallSucceeds());\n+ Open(JoinPath(verity_dir, filename_), O_RDONLY, 0777));\n+ char buf[sizeof(kContents)];\n+ EXPECT_THAT(pread(verity_fd.get(), buf, 16, 0), SyscallFailsWithErrno(EIO));\n+}\n- // Verity directories should not be deleted. Release the TempPath objects to\n- // prevent those directories from being deleted by the destructor.\n- verity_dir.release();\n- verity_with_hash_dir.release();\n+TEST_F(IoctlTest, ModifiedMerkle) {\n+ std::string verity_dir =\n+ ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+\n+ // Modify the Merkle file and check verification failure upon opening the\n+ // corresponding file.\n+ auto const fd = ASSERT_NO_ERRNO_AND_VALUE(\n+ Open(MerklePath(JoinPath(tmpfs_dir_.path(), filename_)), O_RDWR, 0777));\n+ auto stat = ASSERT_NO_ERRNO_AND_VALUE(Fstat(fd.get()));\n+ ASSERT_NO_ERRNO(FlipRandomBit(fd.get(), stat.st_size));\n+\n+ EXPECT_THAT(open(JoinPath(verity_dir, filename_).c_str(), O_RDONLY, 0777),\n+ SyscallFailsWithErrno(EIO));\n+}\n+\n+TEST_F(IoctlTest, ModifiedDirMerkle) {\n+ std::string verity_dir =\n+ ASSERT_NO_ERRNO_AND_VALUE(MountVerity(tmpfs_dir_.path(), filename_));\n+\n+ // Modify the Merkle file for the parent directory and check verification\n+ // failure upon opening the corresponding file.\n+ auto const fd = ASSERT_NO_ERRNO_AND_VALUE(\n+ Open(MerkleRootPath(JoinPath(tmpfs_dir_.path(), \"root\")), O_RDWR, 0777));\n+ auto stat = ASSERT_NO_ERRNO_AND_VALUE(Fstat(fd.get()));\n+ ASSERT_NO_ERRNO(FlipRandomBit(fd.get(), stat.st_size));\n+\n+ EXPECT_THAT(open(JoinPath(verity_dir, filename_).c_str(), O_RDONLY, 0777),\n+ SyscallFailsWithErrno(EIO));\n}\n} // namespace\n" } ]
Go
Apache License 2.0
google/gvisor
Add verity tests for modified file/Merkle file PiperOrigin-RevId: 369909691
259,985
22.04.2021 15:50:01
25,200
d93907110eebdfb1e51dacd9ccffd0f0c2633a81
Also report mount options through /proc/<pid>/mounts.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/mount.go", "new_path": "pkg/sentry/vfs/mount.go", "diff": "@@ -826,6 +826,9 @@ func (vfs *VirtualFilesystem) GenerateProcMounts(ctx context.Context, taskRootDi\nif mnt.Flags.NoExec {\nopts += \",noexec\"\n}\n+ if mopts := mnt.fs.Impl().MountOptions(); mopts != \"\" {\n+ opts += \",\" + mopts\n+ }\n// Format:\n// <special device or remote filesystem> <mount point> <filesystem type> <mount options> <needs dump> <fsck order>\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -1726,6 +1726,7 @@ cc_binary(\n\"//test/util:cleanup\",\n\"//test/util:file_descriptor\",\n\"//test/util:fs_util\",\n+ \"//test/util:mount_util\",\n\"@com_google_absl//absl/container:node_hash_set\",\n\"@com_google_absl//absl/strings\",\n\"@com_google_absl//absl/synchronization\",\n@@ -4243,6 +4244,7 @@ cc_binary(\n\"//test/util:cgroup_util\",\n\"//test/util:file_descriptor\",\n\"//test/util:fs_util\",\n+ \"//test/util:mount_util\",\n\"@com_google_absl//absl/strings\",\ngtest,\n\"//test/util:posix_error\",\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/cgroup.cc", "new_path": "test/syscalls/linux/cgroup.cc", "diff": "#include \"absl/strings/str_split.h\"\n#include \"test/util/capability_util.h\"\n#include \"test/util/cgroup_util.h\"\n+#include \"test/util/mount_util.h\"\n#include \"test/util/temp_path.h\"\n#include \"test/util/test_util.h\"\n@@ -33,8 +34,11 @@ namespace testing {\nnamespace {\nusing ::testing::_;\n+using ::testing::Contains;\nusing ::testing::Ge;\nusing ::testing::Gt;\n+using ::testing::Key;\n+using ::testing::Not;\nstd::vector<std::string> known_controllers = {\n\"cpu\", \"cpuset\", \"cpuacct\", \"job\", \"memory\",\n@@ -447,6 +451,54 @@ TEST(ProcCgroup, MultiControllerHierarchy) {\nEXPECT_EQ(pid_e.hierarchy, mem_e.hierarchy);\n}\n+TEST(ProcCgroup, ProcfsReportsCgroupfsMountOptions) {\n+ SKIP_IF(!CgroupsAvailable());\n+\n+ Mounter m(ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir()));\n+ // Hierarchy with multiple controllers.\n+ Cgroup c1 = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(\"memory,cpu\"));\n+ // Hierarchy with a single controller.\n+ Cgroup c2 = ASSERT_NO_ERRNO_AND_VALUE(m.MountCgroupfs(\"cpuacct\"));\n+\n+ const std::vector<ProcMountsEntry> mounts =\n+ ASSERT_NO_ERRNO_AND_VALUE(ProcSelfMountsEntries());\n+\n+ for (auto const& e : mounts) {\n+ if (e.mount_point == c1.Path()) {\n+ auto mopts = ParseMountOptions(e.mount_opts);\n+ EXPECT_THAT(mopts, Contains(Key(\"memory\")));\n+ EXPECT_THAT(mopts, Contains(Key(\"cpu\")));\n+ EXPECT_THAT(mopts, Not(Contains(Key(\"cpuacct\"))));\n+ }\n+\n+ if (e.mount_point == c2.Path()) {\n+ auto mopts = ParseMountOptions(e.mount_opts);\n+ EXPECT_THAT(mopts, Contains(Key(\"cpuacct\")));\n+ EXPECT_THAT(mopts, Not(Contains(Key(\"cpu\"))));\n+ EXPECT_THAT(mopts, Not(Contains(Key(\"memory\"))));\n+ }\n+ }\n+\n+ const std::vector<ProcMountInfoEntry> mountinfo =\n+ ASSERT_NO_ERRNO_AND_VALUE(ProcSelfMountInfoEntries());\n+\n+ for (auto const& e : mountinfo) {\n+ if (e.mount_point == c1.Path()) {\n+ auto mopts = ParseMountOptions(e.super_opts);\n+ EXPECT_THAT(mopts, Contains(Key(\"memory\")));\n+ EXPECT_THAT(mopts, Contains(Key(\"cpu\")));\n+ EXPECT_THAT(mopts, Not(Contains(Key(\"cpuacct\"))));\n+ }\n+\n+ if (e.mount_point == c2.Path()) {\n+ auto mopts = ParseMountOptions(e.super_opts);\n+ EXPECT_THAT(mopts, Contains(Key(\"cpuacct\")));\n+ EXPECT_THAT(mopts, Not(Contains(Key(\"cpu\"))));\n+ EXPECT_THAT(mopts, Not(Contains(Key(\"memory\"))));\n+ }\n+ }\n+}\n+\n} // namespace\n} // namespace testing\n} // namespace gvisor\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/mount.cc", "new_path": "test/syscalls/linux/mount.cc", "diff": "#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n+#include \"absl/strings/str_split.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/time/time.h\"\n#include \"test/util/capability_util.h\"\n@@ -44,6 +45,9 @@ namespace testing {\nnamespace {\n+using ::testing::Contains;\n+using ::testing::Pair;\n+\nTEST(MountTest, MountBadFilesystem) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n@@ -345,6 +349,35 @@ TEST(MountTest, RenameRemoveMountPoint) {\nASSERT_THAT(rmdir(dir.path().c_str()), SyscallFailsWithErrno(EBUSY));\n}\n+TEST(MountTest, MountInfo) {\n+ SKIP_IF(IsRunningWithVFS1());\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));\n+\n+ auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ auto const mount = ASSERT_NO_ERRNO_AND_VALUE(\n+ Mount(\"\", dir.path(), \"tmpfs\", MS_NOEXEC, \"mode=0123\", 0));\n+ const std::vector<ProcMountsEntry> mounts =\n+ ASSERT_NO_ERRNO_AND_VALUE(ProcSelfMountsEntries());\n+ for (const auto& e : mounts) {\n+ if (e.mount_point == dir.path()) {\n+ EXPECT_EQ(e.fstype, \"tmpfs\");\n+ auto mopts = ParseMountOptions(e.mount_opts);\n+ EXPECT_THAT(mopts, Contains(Pair(\"mode\", \"0123\")));\n+ }\n+ }\n+\n+ const std::vector<ProcMountInfoEntry> mountinfo =\n+ ASSERT_NO_ERRNO_AND_VALUE(ProcSelfMountInfoEntries());\n+\n+ for (auto const& e : mountinfo) {\n+ if (e.mount_point == dir.path()) {\n+ EXPECT_EQ(e.fstype, \"tmpfs\");\n+ auto mopts = ParseMountOptions(e.super_opts);\n+ EXPECT_THAT(mopts, Contains(Pair(\"mode\", \"0123\")));\n+ }\n+ }\n+}\n+\n} // namespace\n} // namespace testing\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/proc.cc", "new_path": "test/syscalls/linux/proc.cc", "diff": "#include \"test/util/file_descriptor.h\"\n#include \"test/util/fs_util.h\"\n#include \"test/util/memory_util.h\"\n+#include \"test/util/mount_util.h\"\n#include \"test/util/multiprocess_util.h\"\n#include \"test/util/posix_error.h\"\n#include \"test/util/proc_util.h\"\n@@ -2468,6 +2469,19 @@ TEST(ProcSelfMountinfo, RequiredFieldsArePresent) {\nR\"([0-9]+ [0-9]+ [0-9]+:[0-9]+ / /proc rw.*- \\S+ \\S+ rw\\S*)\")));\n}\n+TEST(ProcSelfMountinfo, ContainsProcfsEntry) {\n+ const std::vector<ProcMountInfoEntry> entries =\n+ ASSERT_NO_ERRNO_AND_VALUE(ProcSelfMountInfoEntries());\n+ bool found = false;\n+ for (const auto& e : entries) {\n+ if (e.fstype == \"proc\") {\n+ found = true;\n+ break;\n+ }\n+ }\n+ EXPECT_TRUE(found);\n+}\n+\n// Check that /proc/self/mounts looks something like a real mounts file.\nTEST(ProcSelfMounts, RequiredFieldsArePresent) {\nauto mounts = ASSERT_NO_ERRNO_AND_VALUE(GetContents(\"/proc/self/mounts\"));\n@@ -2479,6 +2493,19 @@ TEST(ProcSelfMounts, RequiredFieldsArePresent) {\nContainsRegex(R\"(\\S+ /proc \\S+ rw\\S* [0-9]+ [0-9]+\\s)\")));\n}\n+TEST(ProcSelfMounts, ContainsProcfsEntry) {\n+ const std::vector<ProcMountsEntry> entries =\n+ ASSERT_NO_ERRNO_AND_VALUE(ProcSelfMountsEntries());\n+ bool found = false;\n+ for (const auto& e : entries) {\n+ if (e.fstype == \"proc\") {\n+ found = true;\n+ break;\n+ }\n+ }\n+ EXPECT_TRUE(found);\n+}\n+\nvoid CheckDuplicatesRecursively(std::string path) {\nstd::vector<std::string> child_dirs;\n" }, { "change_type": "MODIFY", "old_path": "test/util/BUILD", "new_path": "test/util/BUILD", "diff": "@@ -137,11 +137,14 @@ cc_library(\ncc_library(\nname = \"mount_util\",\ntestonly = 1,\n+ srcs = [\"mount_util.cc\"],\nhdrs = [\"mount_util.h\"],\ndeps = [\n\":cleanup\",\n\":posix_error\",\n\":test_util\",\n+ \"@com_google_absl//absl/container:flat_hash_map\",\n+ \"@com_google_absl//absl/strings\",\ngtest,\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "test/util/cgroup_util.cc", "new_path": "test/util/cgroup_util.cc", "diff": "namespace gvisor {\nnamespace testing {\n-Cgroup::Cgroup(std::string path) : cgroup_path_(path) {\n+Cgroup::Cgroup(std::string_view path) : cgroup_path_(path) {\nid_ = ++Cgroup::next_id_;\nstd::cerr << absl::StreamFormat(\"[cg#%d] <= %s\", id_, cgroup_path_)\n<< std::endl;\n" }, { "change_type": "MODIFY", "old_path": "test/util/cgroup_util.h", "new_path": "test/util/cgroup_util.h", "diff": "@@ -30,10 +30,12 @@ namespace testing {\n// Cgroup represents a cgroup directory on a mounted cgroupfs.\nclass Cgroup {\npublic:\n- Cgroup(std::string path);\n+ Cgroup(std::string_view path);\nuint64_t id() const { return id_; }\n+ const std::string& Path() const { return cgroup_path_; }\n+\nstd::string Relpath(absl::string_view leaf) const {\nreturn JoinPath(cgroup_path_, leaf);\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/util/mount_util.cc", "diff": "+// Copyright 2021 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#include \"test/util/mount_util.h\"\n+\n+#include <sys/syscall.h>\n+#include <unistd.h>\n+\n+#include \"absl/strings/numbers.h\"\n+#include \"absl/strings/str_split.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+\n+PosixErrorOr<std::vector<ProcMountsEntry>> ProcSelfMountsEntries() {\n+ std::string content;\n+ RETURN_IF_ERRNO(GetContents(\"/proc/self/mounts\", &content));\n+\n+ std::vector<ProcMountsEntry> entries;\n+ std::vector<std::string> lines = absl::StrSplit(content, '\\n');\n+ std::cerr << \"<contents of /proc/self/mounts>\" << std::endl;\n+ for (const std::string& line : lines) {\n+ std::cerr << line << std::endl;\n+ if (line.empty()) {\n+ continue;\n+ }\n+\n+ // Parse a single entry from /proc/self/mounts.\n+ //\n+ // Example entries:\n+ //\n+ // sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0\n+ // proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0\n+ // ^ ^ ^ ^ ^ ^\n+ // 0 1 2 3 4 5\n+\n+ ProcMountsEntry entry;\n+ std::vector<std::string> fields =\n+ absl::StrSplit(line, absl::ByChar(' '), absl::SkipEmpty());\n+ if (fields.size() != 6) {\n+ return PosixError(EINVAL,\n+ absl::StrFormat(\"Not enough tokens, got %d, line: %s\",\n+ fields.size(), line));\n+ }\n+\n+ entry.spec = fields[0];\n+ entry.mount_point = fields[1];\n+ entry.fstype = fields[2];\n+ entry.mount_opts = fields[3];\n+ ASSIGN_OR_RETURN_ERRNO(entry.dump, Atoi<uint32_t>(fields[4]));\n+ ASSIGN_OR_RETURN_ERRNO(entry.fsck, Atoi<uint32_t>(fields[5]));\n+\n+ entries.push_back(entry);\n+ }\n+ std::cerr << \"<end of /proc/self/mounts>\" << std::endl;\n+\n+ return entries;\n+}\n+\n+PosixErrorOr<std::vector<ProcMountInfoEntry>> ProcSelfMountInfoEntries() {\n+ std::string content;\n+ RETURN_IF_ERRNO(GetContents(\"/proc/self/mountinfo\", &content));\n+\n+ std::vector<ProcMountInfoEntry> entries;\n+ std::vector<std::string> lines = absl::StrSplit(content, '\\n');\n+ std::cerr << \"<contents of /proc/self/mountinfo>\" << std::endl;\n+ for (const std::string& line : lines) {\n+ std::cerr << line << std::endl;\n+ if (line.empty()) {\n+ continue;\n+ }\n+\n+ // Parse a single entry from /proc/self/mountinfo.\n+ //\n+ // Example entries:\n+ //\n+ // 22 28 0:20 / /sys rw,relatime shared:7 - sysfs sysfs rw\n+ // 23 28 0:21 / /proc rw,relatime shared:14 - proc proc rw\n+ // ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^\n+ // 0 1 2 3 4 5 6 7 8 9 10\n+\n+ ProcMountInfoEntry entry;\n+ std::vector<std::string> fields =\n+ absl::StrSplit(line, absl::ByChar(' '), absl::SkipEmpty());\n+ if (fields.size() < 10 || fields.size() > 11) {\n+ return PosixError(\n+ EINVAL,\n+ absl::StrFormat(\"Unexpected number of tokens, got %d, line: %s\",\n+ fields.size(), line));\n+ }\n+\n+ ASSIGN_OR_RETURN_ERRNO(entry.id, Atoi<uint64_t>(fields[0]));\n+ ASSIGN_OR_RETURN_ERRNO(entry.parent_id, Atoi<uint64_t>(fields[1]));\n+\n+ std::vector<std::string> devs =\n+ absl::StrSplit(fields[2], absl::ByChar(':'));\n+ if (devs.size() != 2) {\n+ return PosixError(\n+ EINVAL,\n+ absl::StrFormat(\n+ \"Failed to parse dev number field %s: too many tokens, got %d\",\n+ fields[2], devs.size()));\n+ }\n+ ASSIGN_OR_RETURN_ERRNO(entry.major, Atoi<dev_t>(devs[0]));\n+ ASSIGN_OR_RETURN_ERRNO(entry.minor, Atoi<dev_t>(devs[1]));\n+\n+ entry.root = fields[3];\n+ entry.mount_point = fields[4];\n+ entry.mount_opts = fields[5];\n+\n+ // The optional field (fields[6]) may or may not be present. We know based\n+ // on the total number of tokens.\n+ int off = -1;\n+ if (fields.size() == 11) {\n+ entry.optional = fields[6];\n+ off = 0;\n+ }\n+ // Field 7 is the optional field terminator char '-'.\n+ entry.fstype = fields[8 + off];\n+ entry.mount_source = fields[9 + off];\n+ entry.super_opts = fields[10 + off];\n+\n+ entries.push_back(entry);\n+ }\n+ std::cerr << \"<end of /proc/self/mountinfo>\" << std::endl;\n+\n+ return entries;\n+}\n+\n+absl::flat_hash_map<std::string, std::string> ParseMountOptions(\n+ std::string mopts) {\n+ absl::flat_hash_map<std::string, std::string> entries;\n+ const std::vector<std::string> tokens =\n+ absl::StrSplit(mopts, absl::ByChar(','), absl::SkipEmpty());\n+ for (const auto& token : tokens) {\n+ std::vector<std::string> kv =\n+ absl::StrSplit(token, absl::MaxSplits('=', 1));\n+ if (kv.size() == 2) {\n+ entries[kv[0]] = kv[1];\n+ } else if (kv.size() == 1) {\n+ entries[kv[0]] = \"\";\n+ } else {\n+ TEST_CHECK_MSG(\n+ false,\n+ absl::StrFormat(\n+ \"Invalid mount option token '%s', was split into %d subtokens\",\n+ token, kv.size())\n+ .c_str());\n+ }\n+ }\n+ return entries;\n+}\n+\n+} // namespace testing\n+} // namespace gvisor\n" }, { "change_type": "MODIFY", "old_path": "test/util/mount_util.h", "new_path": "test/util/mount_util.h", "diff": "#include <string>\n#include \"gmock/gmock.h\"\n+#include \"absl/container/flat_hash_map.h\"\n#include \"test/util/cleanup.h\"\n#include \"test/util/posix_error.h\"\n#include \"test/util/test_util.h\"\n@@ -45,6 +46,43 @@ inline PosixErrorOr<Cleanup> Mount(const std::string& source,\n});\n}\n+struct ProcMountsEntry {\n+ std::string spec;\n+ std::string mount_point;\n+ std::string fstype;\n+ std::string mount_opts;\n+ uint32_t dump;\n+ uint32_t fsck;\n+};\n+\n+// ProcSelfMountsEntries returns a parsed representation of /proc/self/mounts.\n+PosixErrorOr<std::vector<ProcMountsEntry>> ProcSelfMountsEntries();\n+\n+struct ProcMountInfoEntry {\n+ uint64_t id;\n+ uint64_t parent_id;\n+ dev_t major;\n+ dev_t minor;\n+ std::string root;\n+ std::string mount_point;\n+ std::string mount_opts;\n+ std::string optional;\n+ std::string fstype;\n+ std::string mount_source;\n+ std::string super_opts;\n+};\n+\n+// ProcSelfMountInfoEntries returns a parsed representation of\n+// /proc/self/mountinfo.\n+PosixErrorOr<std::vector<ProcMountInfoEntry>> ProcSelfMountInfoEntries();\n+\n+// Interprets the input string mopts as a comma separated list of mount\n+// options. A mount option can either be just a value, or a key=value pair. For\n+// example, the string \"rw,relatime,fd=7\" will be parsed into a map like { \"rw\":\n+// \"\", \"relatime\": \"\", \"fd\": \"7\" }.\n+absl::flat_hash_map<std::string, std::string> ParseMountOptions(\n+ std::string mopts);\n+\n} // namespace testing\n} // namespace gvisor\n" } ]
Go
Apache License 2.0
google/gvisor
Also report mount options through /proc/<pid>/mounts. PiperOrigin-RevId: 369967629
259,896
22.04.2021 16:04:40
25,200
0a6eaed50b83a35a687699aa5e871b80605c9f46
Add weirdness sentry metric. Weirdness metric contains fields to track the number of clock fallback, partial result and vsyscalls. This metric will avoid the overhead of having three different metrics (fallbackMetric, partialResultMetric, vsyscallCount).
[ { "change_type": "MODIFY", "old_path": "pkg/metric/metric.go", "new_path": "pkg/metric/metric.go", "diff": "@@ -35,6 +35,11 @@ var (\n// ErrInitializationDone indicates that the caller tried to create a\n// new metric after initialization.\nErrInitializationDone = errors.New(\"metric cannot be created after initialization is complete\")\n+\n+ // WeirdnessMetric is a metric with fields created to track the number\n+ // of weird occurrences such as clock fallback, partial_result and\n+ // vsyscall count.\n+ WeirdnessMetric *Uint64Metric\n)\n// Uint64Metric encapsulates a uint64 that represents some kind of metric to be\n@@ -380,3 +385,16 @@ func EmitMetricUpdate() {\neventchannel.Emit(&m)\n}\n+\n+// CreateSentryMetrics creates the sentry metrics during kernel initialization.\n+func CreateSentryMetrics() {\n+ if WeirdnessMetric != nil {\n+ return\n+ }\n+\n+ WeirdnessMetric = MustCreateNewUint64Metric(\"/weirdness\", true /* sync */, \"Increment for weird occurrences of problems such as clock fallback, partial result and vsyscalls invoked in the sandbox\",\n+ Field{\n+ name: \"weirdness_type\",\n+ allowedValues: []string{\"fallback\", \"partial_result\", \"vsyscall_count\"},\n+ })\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/testutil/BUILD", "new_path": "pkg/sentry/fsimpl/testutil/BUILD", "diff": "@@ -17,6 +17,7 @@ go_library(\n\"//pkg/fspath\",\n\"//pkg/hostarch\",\n\"//pkg/memutil\",\n+ \"//pkg/metric\",\n\"//pkg/sentry/fsbridge\",\n\"//pkg/sentry/fsimpl/tmpfs\",\n\"//pkg/sentry/kernel\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/testutil/kernel.go", "new_path": "pkg/sentry/fsimpl/testutil/kernel.go", "diff": "@@ -25,6 +25,7 @@ import (\n\"gvisor.dev/gvisor/pkg/cpuid\"\n\"gvisor.dev/gvisor/pkg/fspath\"\n\"gvisor.dev/gvisor/pkg/memutil\"\n+ \"gvisor.dev/gvisor/pkg/metric\"\n\"gvisor.dev/gvisor/pkg/sentry/fsbridge\"\n\"gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n@@ -62,6 +63,8 @@ func Boot() (*kernel.Kernel, error) {\nreturn nil, fmt.Errorf(\"creating platform: %v\", err)\n}\n+ metric.CreateSentryMetrics()\n+\nkernel.VFS2Enabled = true\nk := &kernel.Kernel{\nPlatform: plat,\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_syscall.go", "new_path": "pkg/sentry/kernel/task_syscall.go", "diff": "@@ -285,6 +285,7 @@ func (*runSyscallExit) execute(t *Task) taskRunState {\n// task's next run state.\nfunc (t *Task) doVsyscall(addr hostarch.Addr, sysno uintptr) taskRunState {\nvsyscallCount.Increment()\n+ metric.WeirdnessMetric.Increment(\"vsyscall_count\")\n// Grab the caller up front, to make sure there's a sensible stack.\ncaller := t.Arch().Native(uintptr(0))\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/error.go", "new_path": "pkg/sentry/syscalls/linux/error.go", "diff": "@@ -39,6 +39,7 @@ var (\n// takes a variadic number of arguments.\nfunc incrementPartialResultMetric() {\npartialResultMetric.Increment()\n+ metric.WeirdnessMetric.Increment(\"partial_result\")\n}\n// HandleIOErrorVFS2 handles special error cases for partial results. For some\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/time/calibrated_clock.go", "new_path": "pkg/sentry/time/calibrated_clock.go", "diff": "@@ -103,6 +103,7 @@ func (c *CalibratedClock) resetLocked(str string, v ...interface{}) {\nc.ready = false\nc.ref.Reset()\nfallbackMetric.Increment()\n+ metric.WeirdnessMetric.Increment(\"fallback\")\n}\n// updateParams updates the timekeeping parameters based on the passed\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/BUILD", "new_path": "runsc/boot/BUILD", "diff": "@@ -38,6 +38,7 @@ go_library(\n\"//pkg/fspath\",\n\"//pkg/log\",\n\"//pkg/memutil\",\n+ \"//pkg/metric\",\n\"//pkg/rand\",\n\"//pkg/refs\",\n\"//pkg/refsvfs2\",\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -34,6 +34,7 @@ import (\n\"gvisor.dev/gvisor/pkg/fd\"\n\"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/memutil\"\n+ \"gvisor.dev/gvisor/pkg/metric\"\n\"gvisor.dev/gvisor/pkg/rand\"\n\"gvisor.dev/gvisor/pkg/refs\"\n\"gvisor.dev/gvisor/pkg/refsvfs2\"\n@@ -217,6 +218,8 @@ func New(args Args) (*Loader, error) {\nreturn nil, fmt.Errorf(\"setting up memory usage: %v\", err)\n}\n+ metric.CreateSentryMetrics()\n+\n// Is this a VFSv2 kernel?\nif args.Conf.VFS2 {\nkernel.VFS2Enabled = true\n" } ]
Go
Apache License 2.0
google/gvisor
Add weirdness sentry metric. Weirdness metric contains fields to track the number of clock fallback, partial result and vsyscalls. This metric will avoid the overhead of having three different metrics (fallbackMetric, partialResultMetric, vsyscallCount). PiperOrigin-RevId: 369970218
259,992
22.04.2021 18:36:15
25,200
d1859fe179bae1614ba91a45497ad63400210863
Add mlock syscall test
[ { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -3,6 +3,8 @@ load(\"//test/runner:defs.bzl\", \"syscall_test\")\npackage(licenses = [\"notice\"])\n+# Please keep syscall tests ordered alphabetically by name.\n+\nsyscall_test(\ntest = \"//test/syscalls/linux:32bit_test\",\n)\n@@ -56,17 +58,7 @@ syscall_test(\n)\nsyscall_test(\n- test = \"//test/syscalls/linux:socket_test\",\n-)\n-\n-syscall_test(\n- test = \"//test/syscalls/linux:socket_capability_test\",\n-)\n-\n-syscall_test(\n- size = \"large\",\n- shard_count = most_shards,\n- test = \"//test/syscalls/linux:socket_stress_test\",\n+ test = \"//test/syscalls/linux:cgroup_test\",\n)\nsyscall_test(\n@@ -310,6 +302,10 @@ syscall_test(\ntest = \"//test/syscalls/linux:mknod_test\",\n)\n+syscall_test(\n+ test = \"//test/syscalls/linux:mlock_test\",\n+)\n+\nsyscall_test(\nsize = \"medium\",\nshard_count = more_shards,\n@@ -321,10 +317,6 @@ syscall_test(\ntest = \"//test/syscalls/linux:mount_test\",\n)\n-syscall_test(\n- test = \"//test/syscalls/linux:verity_mount_test\",\n-)\n-\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:mremap_test\",\n@@ -611,6 +603,10 @@ syscall_test(\ntest = \"//test/syscalls/linux:socket_abstract_test\",\n)\n+syscall_test(\n+ test = \"//test/syscalls/linux:socket_capability_test\",\n+)\n+\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:socket_domain_non_blocking_test\",\n@@ -779,6 +775,16 @@ syscall_test(\ntest = \"//test/syscalls/linux:socket_stream_nonblock_local_test\",\n)\n+syscall_test(\n+ size = \"large\",\n+ shard_count = most_shards,\n+ test = \"//test/syscalls/linux:socket_stress_test\",\n+)\n+\n+syscall_test(\n+ test = \"//test/syscalls/linux:socket_test\",\n+)\n+\nsyscall_test(\nflaky = 1, # NOTE(b/116636318): Large sendmsg may stall a long time.\nshard_count = more_shards,\n@@ -1003,5 +1009,5 @@ syscall_test(\n)\nsyscall_test(\n- test = \"//test/syscalls/linux:cgroup_test\",\n+ test = \"//test/syscalls/linux:verity_mount_test\",\n)\n" } ]
Go
Apache License 2.0
google/gvisor
Add mlock syscall test PiperOrigin-RevId: 369993733