hunk
dict
file
stringlengths
0
11.8M
file_path
stringlengths
2
234
label
int64
0
1
commit_url
stringlengths
74
103
dependency_score
sequencelengths
5
5
{ "id": 4, "code_window": [ "\t// release. This only happens as part of preparing the master branch for the\n", "\t// next release. The release team runbooks, at time of writing, reflect\n", "\t// this.\n", "\t//\n", "\t// Example invocation:\n", "\t// roachtest --local run generate-fixtures --debug --cockroach ./cockroach \\\n", "\t// --build-tag v22.1.0-beta.3 tag:fixtures\n", "\trunFixtures := func(\n", "\t\tctx context.Context,\n", "\t\tt test.Test,\n", "\t\tc cluster.Cluster,\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t// FIXTURE_VERSION=v20.2.0-beta.1 roachtest --local run generate-fixtures --debug --cockroach ./cockroach tag:fixtures\n" ], "file_path": "pkg/cmd/roachtest/tests/fixtures.go", "type": "replace", "edit_start_line_idx": 51 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package tests import ( "context" gosql "database/sql" "fmt" "math/rand" "path/filepath" "runtime" "time" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/option" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil/clusterupgrade" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil/mixedversion" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/roachprod/install" "github.com/cockroachdb/cockroach/pkg/roachprod/logger" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/version" "github.com/stretchr/testify/require" ) type versionFeatureTest struct { name string statement string } // Feature tests that are invoked in mixed-version state during the // upgrade test. A gotcha is that these feature tests are also // invoked when the cluster is in the middle of upgrading -- i.e. a // state where the cluster version has already been bumped, but not // all nodes are aware). This should be considered a feature of this // test, and feature tests that flake because of it need to be fixed. var versionUpgradeTestFeatures = []versionFeatureTest{ // NB: the next four tests are ancient and supported since v2.0. { name: "ObjectAccess", statement: ` -- We should be able to successfully select from objects created in ancient -- versions of CRDB using their FQNs. Prevents bugs such as #43141, where -- databases created before a migration were inaccessible after the -- migration. -- -- NB: the data has been baked into the fixtures. Originally created via: -- create database persistent_db -- create table persistent_db.persistent_table(a int)")) -- on CRDB v1.0 select * from persistent_db.persistent_table; show tables from persistent_db; `, }, { name: "JSONB", statement: ` CREATE DATABASE IF NOT EXISTS test; CREATE TABLE test.t (j JSONB); DROP TABLE test.t; `, }, { name: "Sequences", statement: ` CREATE DATABASE IF NOT EXISTS test; CREATE SEQUENCE test.test_sequence; DROP SEQUENCE test.test_sequence; `, }, { name: "Computed Columns", statement: ` CREATE DATABASE IF NOT EXISTS test; CREATE TABLE test.t (x INT AS (3) STORED); DROP TABLE test.t; `, }, { name: "Split and Merge Ranges", statement: ` CREATE DATABASE IF NOT EXISTS splitmerge; CREATE TABLE splitmerge.t (k INT PRIMARY KEY); ALTER TABLE splitmerge.t SPLIT AT VALUES (1), (2), (3); ALTER TABLE splitmerge.t UNSPLIT AT VALUES (1), (2), (3); DROP TABLE splitmerge.t; `, }, } func runVersionUpgrade(ctx context.Context, t test.Test, c cluster.Cluster) { if c.IsLocal() && runtime.GOARCH == "arm64" { t.Skip("Skip under ARM64. See https://github.com/cockroachdb/cockroach/issues/89268") } c.Put(ctx, t.DeprecatedWorkload(), "./workload", c.All()) mvt := mixedversion.NewTest(ctx, t, t.L(), c, c.All()) mvt.OnStartup("setup schema changer workload", func(ctx context.Context, l *logger.Logger, r *rand.Rand, helper *mixedversion.Helper) error { // Execute the workload init. return c.RunE(ctx, c.All(), "./workload init schemachange") }) mvt.InMixedVersion("run backup", func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { // Verify that backups can be created in various configurations. This is // important to test because changes in system tables might cause backups to // fail in mixed-version clusters. dest := fmt.Sprintf("nodelocal://1/%d", timeutil.Now().UnixNano()) return h.Exec(rng, `BACKUP TO $1`, dest) }) mvt.InMixedVersion( "test features", func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { for _, featureTest := range versionUpgradeTestFeatures { l.Printf("running feature test %q", featureTest.name) if err := h.Exec(rng, featureTest.statement); err != nil { l.Printf("%q: ERROR (%s)", featureTest.name, err) return err } l.Printf("%q: OK", featureTest.name) } return nil }, ) mvt.InMixedVersion( "test schema change step", func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { l.Printf("running schema workload step") runCmd := roachtestutil.NewCommand("./workload run schemachange").Flag("verbose", 1).Flag("max-ops", 10).Flag("concurrency", 2).Arg("{pgurl:1-%d}", len(c.All())) randomNode := h.RandomNode(rng, c.All()) return c.RunE(ctx, option.NodeListOption{randomNode}, runCmd.String()) }, ) mvt.AfterUpgradeFinalized( "check if GC TTL is pinned", func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { // TODO(irfansharif): This can be removed when the predecessor version // in this test is v23.1, where the default is 4h. This test was only to // make sure that existing clusters that upgrade to 23.1 retained their // existing GC TTL. l.Printf("checking if GC TTL is pinned to 24h") var ttlSeconds int query := ` SELECT (crdb_internal.pb_to_json('cockroach.config.zonepb.ZoneConfig', raw_config_protobuf)->'gc'->'ttlSeconds')::INT FROM crdb_internal.zones WHERE target = 'RANGE default' LIMIT 1 ` if err := h.QueryRow(rng, query).Scan(&ttlSeconds); err != nil { return fmt.Errorf("error querying GC TTL: %w", err) } expectedTTL := 24 * 60 * 60 // NB: 24h is what's used in the fixture if ttlSeconds != expectedTTL { return fmt.Errorf("unexpected GC TTL: actual (%d) != expected (%d)", ttlSeconds, expectedTTL) } return nil }, ) mvt.Run() } func (u *versionUpgradeTest) run(ctx context.Context, t test.Test) { defer func() { for _, db := range u.conns { _ = db.Close() } }() for i, step := range u.steps { if step != nil { t.Status(fmt.Sprintf("versionUpgradeTest: starting step %d", i+1)) step(ctx, t, u) } } } type versionUpgradeTest struct { goOS string c cluster.Cluster steps []versionStep // Cache conns because opening one takes hundreds of ms, and we do it quite // a lot. conns []*gosql.DB } func newVersionUpgradeTest(c cluster.Cluster, steps ...versionStep) *versionUpgradeTest { return &versionUpgradeTest{ goOS: ifLocal(c, runtime.GOOS, "linux"), c: c, steps: steps, } } // Return a cached conn to the given node. Don't call .Close(), the test harness // will do it. func (u *versionUpgradeTest) conn(ctx context.Context, t test.Test, i int) *gosql.DB { if u.conns == nil { for _, i := range u.c.All() { u.conns = append(u.conns, u.c.Conn(ctx, t.L(), i)) } } db := u.conns[i-1] // Run a trivial query to shake out errors that can occur when the server has // restarted in the meantime. _ = db.PingContext(ctx) return db } // uploadVersion is a thin wrapper around // `clusterupgrade.UploadVersion` that calls t.Fatal if that call // returns an error func uploadVersion( ctx context.Context, t test.Test, c cluster.Cluster, nodes option.NodeListOption, newVersion string, ) string { path, err := clusterupgrade.UploadVersion(ctx, t, t.L(), c, nodes, newVersion) if err != nil { t.Fatal(err) } return path } // upgradeNodes is a thin wrapper around // `clusterupgrade.RestartNodesWithNewBinary` that calls t.Fatal if // that call returns an errror. func upgradeNodes( ctx context.Context, t test.Test, c cluster.Cluster, nodes option.NodeListOption, startOpts option.StartOpts, newVersion string, ) { if err := clusterupgrade.RestartNodesWithNewBinary( ctx, t, t.L(), c, nodes, startOpts, newVersion, ); err != nil { t.Fatal(err) } } func (u *versionUpgradeTest) binaryVersion( ctx context.Context, t test.Test, i int, ) roachpb.Version { db := u.conn(ctx, t, i) v, err := clusterupgrade.BinaryVersion(db) if err != nil { t.Fatal(err) } return v } // versionStep is an isolated version migration on a running cluster. type versionStep func(ctx context.Context, t test.Test, u *versionUpgradeTest) func uploadAndStartFromCheckpointFixture(nodes option.NodeListOption, v string) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { if err := clusterupgrade.InstallFixtures(ctx, t.L(), u.c, nodes, v); err != nil { t.Fatal(err) } binary := uploadVersion(ctx, t, u.c, nodes, v) startOpts := option.DefaultStartOpts() // NB: can't start sequentially since cluster already bootstrapped. startOpts.RoachprodOpts.Sequential = false clusterupgrade.StartWithBinary(ctx, t.L(), u.c, nodes, binary, startOpts) } } func uploadAndStart(nodes option.NodeListOption, v string) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { binary := uploadVersion(ctx, t, u.c, nodes, v) startOpts := option.DefaultStartOpts() // NB: can't start sequentially since cluster already bootstrapped. startOpts.RoachprodOpts.Sequential = false clusterupgrade.StartWithBinary(ctx, t.L(), u.c, nodes, binary, startOpts) } } // binaryUpgradeStep rolling-restarts the given nodes into the new binary // version. Note that this does *not* wait for the cluster version to upgrade. // Use a waitForUpgradeStep() for that. func binaryUpgradeStep(nodes option.NodeListOption, newVersion string) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { if err := clusterupgrade.RestartNodesWithNewBinary( ctx, t, t.L(), u.c, nodes, option.DefaultStartOpts(), newVersion, ); err != nil { t.Fatal(err) } } } func preventAutoUpgradeStep(node int) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { db := u.conn(ctx, t, node) _, err := db.ExecContext(ctx, `SET CLUSTER SETTING cluster.preserve_downgrade_option = $1`, u.binaryVersion(ctx, t, node).String()) if err != nil { t.Fatal(err) } } } func allowAutoUpgradeStep(node int) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { db := u.conn(ctx, t, node) _, err := db.ExecContext(ctx, `RESET CLUSTER SETTING cluster.preserve_downgrade_option`) if err != nil { t.Fatal(err) } } } // NB: this is intentionally kept separate from binaryUpgradeStep because we run // feature tests between the steps, and we want to expose them (at least // heuristically) to the real-world situation in which some nodes have already // learned of a cluster version bump (from Gossip) where others haven't. This // situation tends to exhibit unexpected behavior. func waitForUpgradeStep(nodes option.NodeListOption) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { dbFunc := func(node int) *gosql.DB { return u.conn(ctx, t, node) } if err := clusterupgrade.WaitForClusterUpgrade(ctx, t.L(), nodes, dbFunc); err != nil { t.Fatal(err) } } } // makeVersionFixtureAndFatal creates fixtures from which we can test // mixed-version clusters (i.e. version X mixing with X-1). The fixtures date // back all the way to v1.0; when development begins on version X, we make a // fixture for version X-1 by running a starting the version X-2 cluster from // the X-2 fixtures, upgrading it to version X-1, and copy the resulting store // directories to the log directories (which are part of the artifacts). The // test will then fail on purpose when it's done with instructions on where to // move the files. func makeVersionFixtureAndFatal( ctx context.Context, t test.Test, c cluster.Cluster, makeFixtureVersion string, ) { var useLocalBinary bool if makeFixtureVersion == "" { c.Start(ctx, t.L(), option.DefaultStartOpts(), install.MakeClusterSettings(), c.Node(1)) require.NoError(t, c.Conn(ctx, t.L(), 1).QueryRowContext( ctx, `select regexp_extract(value, '^v([0-9]+\.[0-9]+\.[0-9]+)') from crdb_internal.node_build_info where field = 'Version';`, ).Scan(&makeFixtureVersion)) c.Wipe(ctx, c.Node(1)) useLocalBinary = true } predecessorVersion, err := version.PredecessorVersion(*version.MustParse("v" + makeFixtureVersion)) if err != nil { t.Fatal(err) } t.L().Printf("making fixture for %s (starting at %s)", makeFixtureVersion, predecessorVersion) if useLocalBinary { // Make steps below use the main cockroach binary (in particular, don't try // to download the released version for makeFixtureVersion which may not yet // exist) makeFixtureVersion = "" } newVersionUpgradeTest(c, // Start the cluster from a fixture. That fixture's cluster version may // be at the predecessor version (though in practice it's fully up to // date, if it was created via the checkpointer above), so add a // waitForUpgradeStep to make sure we're upgraded all the way before // moving on. // // See the comment on createCheckpoints for details on fixtures. uploadAndStartFromCheckpointFixture(c.All(), predecessorVersion), waitForUpgradeStep(c.All()), // NB: at this point, cluster and binary version equal predecessorVersion, // and auto-upgrades are on. binaryUpgradeStep(c.All(), makeFixtureVersion), waitForUpgradeStep(c.All()), func(ctx context.Context, t test.Test, u *versionUpgradeTest) { // If we're taking checkpoints, momentarily stop the cluster (we // need to do that to get the checkpoints to reflect a // consistent cluster state). The binary at this point will be // the new one, but the cluster version was not explicitly // bumped, though auto-update may have taken place already. // For example, if newVersion is 2.1, the cluster version in // the store directories may be 2.0 on some stores and 2.1 on // the others (though if any are on 2.1, then that's what's // stored in system.settings). // This means that when we restart from that version, we're // going to want to use the binary mentioned in the checkpoint, // or at least one compatible with the *predecessor* of the // checkpoint version. For example, for checkpoint-2.1, the // cluster version might be 2.0, so we can only use the 2.0 or // 2.1 binary, but not the 19.1 binary (as 19.1 and 2.0 are not // compatible). name := clusterupgrade.CheckpointName(u.binaryVersion(ctx, t, 1).String()) u.c.Stop(ctx, t.L(), option.DefaultStopOpts(), c.All()) binaryPath := clusterupgrade.BinaryPathFromVersion(makeFixtureVersion) c.Run(ctx, c.All(), binaryPath, "debug", "pebble", "db", "checkpoint", "{store-dir}", "{store-dir}/"+name) // The `cluster-bootstrapped` marker can already be found within // store-dir, but the rocksdb checkpoint step above does not pick it // up as it isn't recognized by RocksDB. We copy the marker // manually, it's necessary for roachprod created clusters. See // #54761. c.Run(ctx, c.Node(1), "cp", "{store-dir}/cluster-bootstrapped", "{store-dir}/"+name) // Similar to the above - newer versions require the min version file to open a store. c.Run(ctx, c.Node(1), "cp", fmt.Sprintf("{store-dir}/%s", storage.MinVersionFilename), "{store-dir}/"+name) c.Run(ctx, c.All(), "tar", "-C", "{store-dir}/"+name, "-czf", "{log-dir}/"+name+".tgz", ".") t.Fatalf(`successfully created checkpoints; failing test on purpose. Invoke the following to move the archives to the right place and commit the result: for i in 1 2 3 4; do mkdir -p pkg/cmd/roachtest/fixtures/${i} && \ mv artifacts/generate-fixtures/run_1/logs/${i}.unredacted/checkpoint-*.tgz \ pkg/cmd/roachtest/fixtures/${i}/ done `) }).run(ctx, t) } // importTPCCStep runs a TPCC import import on the first crdbNode (monitoring them all for // crashes during the import). If oldV is nil, this runs the import using the specified // version (for example "19.2.1", as provided by PredecessorVersion()) using the location // used by c.Stage(). An empty oldV uses the main cockroach binary. func importTPCCStep( oldV string, headroomWarehouses int, crdbNodes option.NodeListOption, ) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { // We need to use the predecessor binary to load into the // predecessor cluster to avoid random breakage. For example, you // can't use 21.1 to import into 20.2 due to some flag changes. // // TODO(tbg): also import a large dataset (for example 2TB bank) // that will provide cold data that may need to be migrated. var cmd string if oldV == "" { cmd = tpccImportCmd(headroomWarehouses) } else { cmd = tpccImportCmdWithCockroachBinary(filepath.Join("v"+oldV, "cockroach"), headroomWarehouses, "--checks=false") } // Use a monitor so that we fail cleanly if the cluster crashes // during import. m := u.c.NewMonitor(ctx, crdbNodes) m.Go(func(ctx context.Context) error { return u.c.RunE(ctx, u.c.Node(crdbNodes[0]), cmd) }) m.Wait() } } func importLargeBankStep(oldV string, rows int, crdbNodes option.NodeListOption) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { // Use the predecessor binary to load into the predecessor // cluster to avoid random breakage due to flag changes, etc. binary := "./cockroach" if oldV != "" { binary = filepath.Join("v"+oldV, "cockroach") } // Use a monitor so that we fail cleanly if the cluster crashes // during import. m := u.c.NewMonitor(ctx, crdbNodes) m.Go(func(ctx context.Context) error { return u.c.RunE(ctx, u.c.Node(crdbNodes[0]), binary, "workload", "fixtures", "import", "bank", "--payload-bytes=10240", "--rows="+fmt.Sprint(rows), "--seed=4", "--db=bigbank") }) m.Wait() } } func sleepStep(d time.Duration) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { time.Sleep(d) } }
pkg/cmd/roachtest/tests/versionupgrade.go
1
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.0028766817413270473, 0.0003846268227789551, 0.00016291152860503644, 0.00022610736778005958, 0.0004306182381696999 ]
{ "id": 4, "code_window": [ "\t// release. This only happens as part of preparing the master branch for the\n", "\t// next release. The release team runbooks, at time of writing, reflect\n", "\t// this.\n", "\t//\n", "\t// Example invocation:\n", "\t// roachtest --local run generate-fixtures --debug --cockroach ./cockroach \\\n", "\t// --build-tag v22.1.0-beta.3 tag:fixtures\n", "\trunFixtures := func(\n", "\t\tctx context.Context,\n", "\t\tt test.Test,\n", "\t\tc cluster.Cluster,\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t// FIXTURE_VERSION=v20.2.0-beta.1 roachtest --local run generate-fixtures --debug --cockroach ./cockroach tag:fixtures\n" ], "file_path": "pkg/cmd/roachtest/tests/fixtures.go", "type": "replace", "edit_start_line_idx": 51 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colexec import ( "context" "fmt" "math" "testing" "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldatatestutils" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecagg" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexectestutils" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/util/duration" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/randutil" "github.com/cockroachdb/cockroach/pkg/util/timeofday" "github.com/stretchr/testify/require" ) type aggregatorTestCase struct { name string typs []*types.T input colexectestutils.Tuples groupCols []uint32 aggCols [][]uint32 aggFns []execinfrapb.AggregatorSpec_Func expected colexectestutils.Tuples constArguments [][]execinfrapb.Expression // spec will be populated during init(). spec *execinfrapb.AggregatorSpec aggDistinct []bool aggFilter []int unorderedInput bool orderedCols []uint32 // convToDecimal will convert any float64s to apd.Decimals. If a string is // encountered, a best effort is made to convert that string to an // apd.Decimal. convToDecimal bool } type ordering int64 const ( ordered ordering = iota partial unordered ) // aggType is a helper struct that allows tests to test both the ordered and // hash aggregators at the same time. type aggType struct { new func(context.Context, *colexecagg.NewAggregatorArgs) colexecop.ResettableOperator name string order ordering } var aggTypesWithPartial = []aggType{ { // This is a wrapper around NewHashAggregator so its signature is // compatible with NewOrderedAggregator. new: func(ctx context.Context, args *colexecagg.NewAggregatorArgs) colexecop.ResettableOperator { return NewHashAggregator( ctx, &colexecagg.NewHashAggregatorArgs{ NewAggregatorArgs: args, HashTableAllocator: testAllocator, OutputUnlimitedAllocator: testAllocator, MaxOutputBatchMemSize: math.MaxInt64, }, nil, /* newSpillingQueueArgs */ ) }, name: "hash", order: unordered, }, { new: NewOrderedAggregator, name: "ordered", order: ordered, }, { // This is a wrapper around NewHashAggregator so its signature is // compatible with NewOrderedAggregator. new: func(ctx context.Context, args *colexecagg.NewAggregatorArgs) colexecop.ResettableOperator { return NewHashAggregator( ctx, &colexecagg.NewHashAggregatorArgs{ NewAggregatorArgs: args, HashTableAllocator: testAllocator, OutputUnlimitedAllocator: testAllocator, MaxOutputBatchMemSize: math.MaxInt64, }, nil, /* newSpillingQueueArgs */ ) }, name: "hash-partial-order", order: partial, }, } var aggTypes = aggTypesWithPartial[:2] func (tc *aggregatorTestCase) init() error { if tc.convToDecimal { for _, tuples := range []colexectestutils.Tuples{tc.input, tc.expected} { for _, tuple := range tuples { for i, e := range tuple { switch v := e.(type) { case float64: d := &apd.Decimal{} d, err := d.SetFloat64(v) if err != nil { return err } tuple[i] = *d case string: d := &apd.Decimal{} d, _, err := d.SetString(v) if err != nil { // If there was an error converting the string to decimal, just // leave the datum as is. continue } tuple[i] = *d } } } } } aggregations := make([]execinfrapb.AggregatorSpec_Aggregation, len(tc.aggFns)) for i, aggFn := range tc.aggFns { aggregations[i].Func = aggFn aggregations[i].ColIdx = tc.aggCols[i] if tc.constArguments != nil { aggregations[i].Arguments = tc.constArguments[i] } if tc.aggDistinct != nil { aggregations[i].Distinct = tc.aggDistinct[i] } if tc.aggFilter != nil && tc.aggFilter[i] != tree.NoColumnIdx { filterColIdx := uint32(tc.aggFilter[i]) aggregations[i].FilterColIdx = &filterColIdx } } tc.spec = &execinfrapb.AggregatorSpec{ GroupCols: tc.groupCols, Aggregations: aggregations, } if !tc.unorderedInput { var outputOrderCols []uint32 if len(tc.orderedCols) == 0 { outputOrderCols = tc.spec.GroupCols } else { outputOrderCols = tc.orderedCols tc.spec.OrderedGroupCols = tc.orderedCols } // If input grouping columns have an ordering, then we'll require the // output to also have the same ordering. outputOrdering := execinfrapb.Ordering{Columns: make([]execinfrapb.Ordering_Column, len(outputOrderCols))} for i, col := range outputOrderCols { outputOrdering.Columns[i].ColIdx = col } tc.spec.OutputOrdering = outputOrdering } return nil } var aggregatorsTestCases = []aggregatorTestCase{ { name: "OneTuple", typs: types.TwoIntCols, input: colexectestutils.Tuples{ {0, 1}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {1}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.SumInt, }, expected: colexectestutils.Tuples{ {0, 1}, }, }, { name: "OneGroup", typs: types.TwoIntCols, input: colexectestutils.Tuples{ {0, 1}, {0, 1}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {1}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.SumInt, }, expected: colexectestutils.Tuples{ {0, 2}, }, }, { name: "MultiGroup", typs: types.TwoIntCols, input: colexectestutils.Tuples{ {0, 1}, {0, 0}, {0, 1}, {1, 4}, {2, 5}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {1}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.SumInt, }, expected: colexectestutils.Tuples{ {0, 2}, {1, 4}, {2, 5}, }, }, { name: "CarryBetweenInputBatches", typs: types.TwoIntCols, input: colexectestutils.Tuples{ {0, 1}, {0, 2}, {0, 3}, {1, 4}, {1, 5}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {1}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.SumInt, }, expected: colexectestutils.Tuples{ {0, 6}, {1, 9}, }, }, { name: "CarryBetweenOutputBatches", typs: types.TwoIntCols, input: colexectestutils.Tuples{ {0, 1}, {0, 2}, {0, 3}, {0, 4}, {1, 5}, {2, 6}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {1}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.SumInt, }, expected: colexectestutils.Tuples{ {0, 10}, {1, 5}, {2, 6}, }, }, { name: "CarryBetweenInputAndOutputBatches", typs: types.TwoIntCols, input: colexectestutils.Tuples{ {0, 1}, {0, 1}, {1, 2}, {2, 3}, {2, 3}, {3, 4}, {3, 4}, {4, 5}, {5, 6}, {6, 7}, {7, 8}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {1}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.SumInt, }, expected: colexectestutils.Tuples{ {0, 2}, {1, 2}, {2, 6}, {3, 8}, {4, 5}, {5, 6}, {6, 7}, {7, 8}, }, }, { name: "NoGroupingCols", typs: types.TwoIntCols, input: colexectestutils.Tuples{ {0, 1}, {0, 2}, {0, 3}, {0, 4}, }, groupCols: []uint32{}, aggCols: [][]uint32{{0}, {1}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.SumInt, }, expected: colexectestutils.Tuples{ {0, 10}, }, }, { name: "UnorderedWithNullsInGroupingCol", typs: types.TwoIntCols, input: colexectestutils.Tuples{ {nil, 1}, {4, 42}, {nil, 2}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {1}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.SumInt, }, expected: colexectestutils.Tuples{ {nil, 3}, {4, 42}, }, unorderedInput: true, }, { name: "CountRows", typs: types.OneIntCol, input: colexectestutils.Tuples{ {1}, {2}, {1}, {nil}, {3}, {1}, {3}, {4}, {1}, {nil}, {2}, {4}, {2}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.CountRows, }, expected: colexectestutils.Tuples{ {nil, 2}, {1, 4}, {2, 3}, {3, 2}, {4, 2}, }, unorderedInput: true, }, { name: "OutputOrder", typs: types.ThreeIntCols, input: colexectestutils.Tuples{ {0, 1, 2}, {0, 1, 2}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {2}, {1}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.SumInt, execinfrapb.SumInt, }, expected: colexectestutils.Tuples{ {0, 4, 2}, }, }, { name: "SumMultiType", typs: []*types.T{types.Int, types.Int, types.Decimal}, input: colexectestutils.Tuples{ {0, 1, 1.3}, {0, 1, 1.6}, {0, 1, 0.5}, {1, 1, 1.2}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {2}, {1}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.Sum, execinfrapb.SumInt, }, expected: colexectestutils.Tuples{ {0, 3.4, 3}, {1, 1.2, 1}, }, convToDecimal: true, }, { name: "AvgSumSingleInputBatch", typs: []*types.T{types.Int, types.Decimal}, input: colexectestutils.Tuples{ {0, 1.1}, {0, 1.2}, {0, 2.3}, {1, 6.21}, {1, 2.43}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {1}, {1}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.Avg, execinfrapb.Sum, }, expected: colexectestutils.Tuples{ {0, "1.5333333333333333333", 4.6}, {1, "4.3200000000000000000", 8.64}, }, convToDecimal: true, }, { name: "BoolAndOrBatch", typs: []*types.T{types.Int, types.Bool}, input: colexectestutils.Tuples{ {0, true}, {1, false}, {2, true}, {2, false}, {3, true}, {3, true}, {4, false}, {4, false}, {5, false}, {5, nil}, {6, nil}, {6, true}, {7, nil}, {7, false}, {7, true}, {8, nil}, {8, nil}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {1}, {1}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.BoolAnd, execinfrapb.BoolOr, }, expected: colexectestutils.Tuples{ {0, true, true}, {1, false, false}, {2, false, true}, {3, true, true}, {4, false, false}, {5, false, false}, {6, true, true}, {7, false, true}, {8, nil, nil}, }, }, { name: "MultiGroupColsWithPointerTypes", typs: []*types.T{types.Int, types.Decimal, types.Bytes, types.Decimal}, input: colexectestutils.Tuples{ {2, 1.0, "1.0", 2.0}, {2, 1.0, "1.0", 4.0}, {2, 2.0, "2.0", 6.0}, }, groupCols: []uint32{0, 1, 2}, aggCols: [][]uint32{{0}, {1}, {2}, {3}, {3}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.AnyNotNull, execinfrapb.AnyNotNull, execinfrapb.Min, execinfrapb.Sum, }, expected: colexectestutils.Tuples{ {2, 1.0, "1.0", 2.0, 6.0}, {2, 2.0, "2.0", 6.0, 6.0}, }, }, { name: "GroupOnTimeTZColumns", typs: []*types.T{types.TimeTZ, types.Int}, input: colexectestutils.Tuples{ {tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 0), -1}, {tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 1), 1}, {tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 1), 2}, {tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 2), 10}, {tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 2), 11}, {tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 3), 100}, {tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 3), 101}, {tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 4), 102}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {1}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.SumInt, }, expected: colexectestutils.Tuples{ {tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 0), -1}, {tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 1), 3}, {tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 2), 21}, {tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 3), 201}, {tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 4), 102}, }, }, { name: "AVG on all types", typs: []*types.T{types.Int, types.Int2, types.Int4, types.Int, types.Decimal, types.Float, types.Interval}, input: colexectestutils.Tuples{ {0, nil, 1, 1, 1.0, 1.0, duration.MakeDuration(1, 1, 1)}, {0, 1, nil, 2, 2.0, 2.0, duration.MakeDuration(2, 2, 2)}, {0, 2, 2, nil, 3.0, 3.0, duration.MakeDuration(3, 3, 3)}, {0, 3, 3, 3, nil, 4.0, duration.MakeDuration(4, 4, 4)}, {0, 4, 4, 4, 4.0, nil, duration.MakeDuration(5, 5, 5)}, {0, 5, 5, 5, 5.0, 5.0, nil}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {1}, {2}, {3}, {4}, {5}, {6}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.Avg, execinfrapb.Avg, execinfrapb.Avg, execinfrapb.Avg, execinfrapb.Avg, execinfrapb.Avg, }, expected: colexectestutils.Tuples{ {0, 3.0, 3.0, 3.0, 3.0, 3.0, duration.MakeDuration(3, 3, 3)}, }, }, { name: "ConcatAgg", typs: []*types.T{types.Int, types.Bytes}, input: colexectestutils.Tuples{ {1, "1"}, {1, "2"}, {1, "3"}, {2, nil}, {2, "1"}, {2, "2"}, {3, "1"}, {3, nil}, {3, "2"}, {4, nil}, {4, nil}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {1}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.ConcatAgg, }, expected: colexectestutils.Tuples{ {1, "123"}, {2, "12"}, {3, "12"}, {4, nil}, }, }, { name: "All", typs: []*types.T{types.Int, types.Decimal, types.Int, types.Bool, types.Bytes}, input: colexectestutils.Tuples{ {0, 3.1, 2, true, "zero"}, {0, 1.1, 3, false, "zero"}, {1, 1.1, 1, false, "one"}, {1, 4.1, 0, false, "one"}, {2, 1.1, 1, true, "two"}, {3, 4.1, 0, false, "three"}, {3, 5.1, 0, true, "three"}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {}, {1}, {1}, {1}, {2}, {2}, {2}, {3}, {3}, {4}, {4}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.CountRows, execinfrapb.Avg, execinfrapb.Count, execinfrapb.Sum, execinfrapb.SumInt, execinfrapb.Min, execinfrapb.Max, execinfrapb.BoolAnd, execinfrapb.BoolOr, execinfrapb.AnyNotNull, execinfrapb.ConcatAgg, }, expected: colexectestutils.Tuples{ {0, 2, "2.1000000000000000000", 2, 4.2, 5, 2, 3, false, true, "zero", "zerozero"}, {1, 2, "2.6000000000000000000", 2, 5.2, 1, 0, 1, false, false, "one", "oneone"}, {2, 1, "1.1000000000000000000", 1, 1.1, 1, 1, 1, true, true, "two", "two"}, {3, 2, "4.6000000000000000000", 2, 9.2, 0, 0, 0, false, true, "three", "threethree"}, }, convToDecimal: true, }, { name: "NullHandling", typs: []*types.T{types.Int, types.Decimal, types.Int, types.Bool, types.Bytes}, input: colexectestutils.Tuples{ {nil, 1.1, 4, true, "a"}, {0, nil, nil, nil, nil}, {0, 3.1, 5, nil, "b"}, {1, nil, nil, nil, nil}, {1, nil, nil, false, nil}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {}, {1}, {1}, {1}, {1}, {2}, {2}, {2}, {3}, {3}, {4}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.CountRows, execinfrapb.AnyNotNull, execinfrapb.Count, execinfrapb.Sum, execinfrapb.Avg, execinfrapb.SumInt, execinfrapb.Min, execinfrapb.Max, execinfrapb.BoolAnd, execinfrapb.BoolOr, execinfrapb.ConcatAgg, }, expected: colexectestutils.Tuples{ {nil, 1, 1.1, 1, 1.1, "1.1000000000000000000", 4, 4, 4, true, true, "a"}, {0, 2, 3.1, 1, 3.1, "3.1000000000000000000", 5, 5, 5, nil, nil, "b"}, {1, 2, nil, 0, nil, nil, nil, nil, nil, false, false, nil}, }, convToDecimal: true, }, { name: "DistinctAggregation", typs: types.TwoIntCols, input: colexectestutils.Tuples{ {0, 1}, {0, 2}, {0, 2}, {0, nil}, {0, 1}, {0, nil}, {1, 1}, {1, 2}, {1, 2}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {1}, {1}, {1}, {1}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.Count, execinfrapb.Count, execinfrapb.SumInt, execinfrapb.SumInt, }, expected: colexectestutils.Tuples{ {0, 4, 2, 6, 3}, {1, 3, 2, 5, 3}, }, aggDistinct: []bool{false, false, true, false, true}, }, { name: "FilteringAggregation", typs: []*types.T{types.Int, types.Int, types.Bool}, input: colexectestutils.Tuples{ {0, 1, false}, {0, 2, true}, {0, 2, true}, {0, nil, nil}, {0, 1, nil}, {0, nil, true}, {1, 1, true}, {1, 2, nil}, {1, 2, true}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {}, {1}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.CountRows, execinfrapb.SumInt, }, expected: colexectestutils.Tuples{ {0, 3, 4}, {1, 2, 3}, }, aggFilter: []int{tree.NoColumnIdx, 2, 2}, }, { name: "AllGroupsFilteredOut", typs: []*types.T{types.Int, types.Int, types.Bool}, input: colexectestutils.Tuples{ {0, 1, false}, {0, nil, nil}, {0, 2, false}, {1, 1, true}, {1, 2, nil}, {1, 2, true}, {2, 1, false}, {2, nil, nil}, {2, 2, nil}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {}, {1}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.CountRows, execinfrapb.SumInt, }, expected: colexectestutils.Tuples{ {0, 0, nil}, {1, 2, 3}, {2, 0, nil}, }, aggFilter: []int{tree.NoColumnIdx, 2, 2}, }, { name: "DistinctFilteringAggregation", typs: []*types.T{types.Int, types.Int, types.Bool}, input: colexectestutils.Tuples{ {0, 1, false}, {0, 2, true}, {0, 2, true}, {0, nil, nil}, {0, 1, nil}, {0, nil, true}, {1, 1, true}, {1, 2, nil}, {1, 2, true}, }, groupCols: []uint32{0}, aggCols: [][]uint32{{0}, {1}, {1}, {1}, {1}, {1}, {1}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.AnyNotNull, execinfrapb.Count, execinfrapb.Count, execinfrapb.Count, execinfrapb.SumInt, execinfrapb.SumInt, execinfrapb.SumInt, }, expected: colexectestutils.Tuples{ {0, 2, 2, 1, 4, 3, 2}, {1, 2, 2, 2, 3, 3, 3}, }, aggDistinct: []bool{false, false, true, true, false, true, true}, aggFilter: []int{tree.NoColumnIdx, 2, tree.NoColumnIdx, 2, 2, tree.NoColumnIdx, 2}, }, } func init() { for i := range aggregatorsTestCases { if err := aggregatorsTestCases[i].init(); err != nil { colexecerror.InternalError(err) } } } func TestAggregators(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) evalCtx := eval.MakeTestingEvalContext(cluster.MakeTestingClusterSettings()) defer evalCtx.Stop(context.Background()) ctx := context.Background() for _, tc := range aggregatorsTestCases { constructors, constArguments, outputTypes, err := colexecagg.ProcessAggregations( ctx, &evalCtx, nil /* semaCtx */, tc.spec.Aggregations, tc.typs, ) require.NoError(t, err) for _, agg := range aggTypes { if tc.unorderedInput && agg.order == ordered { // This test case has unordered input, so we skip ordered // aggregator. continue } if agg.order == ordered && tc.aggFilter != nil { // Filtering aggregation is only supported with hash aggregator. continue } log.Infof(ctx, "%s/%s", tc.name, agg.name) verifier := colexectestutils.OrderedVerifier if tc.unorderedInput { verifier = colexectestutils.UnorderedVerifier } colexectestutils.RunTestsWithTyps(t, testAllocator, []colexectestutils.Tuples{tc.input}, [][]*types.T{tc.typs}, tc.expected, verifier, func(input []colexecop.Operator) (colexecop.Operator, error) { return agg.new(ctx, &colexecagg.NewAggregatorArgs{ Allocator: testAllocator, MemAccount: testMemAcc, Input: input[0], InputTypes: tc.typs, Spec: tc.spec, EvalCtx: &evalCtx, Constructors: constructors, ConstArguments: constArguments, OutputTypes: outputTypes, }), nil }) } } } func TestAggregatorRandom(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) evalCtx := eval.MakeTestingEvalContext(cluster.MakeTestingClusterSettings()) defer evalCtx.Stop(context.Background()) // This test aggregates random inputs, keeping track of the expected results // to make sure the aggregations are correct. rng, _ := randutil.NewTestRand() for _, groupSize := range []int{1, 2, coldata.BatchSize() / 4, coldata.BatchSize() / 2} { if groupSize == 0 { // We might be varying coldata.BatchSize() so that when it is divided by // 4, groupSize is 0. We want to skip such configuration. continue } for _, numInputBatches := range []int{1, 2, 64} { for _, hasNulls := range []bool{true, false} { for _, agg := range aggTypesWithPartial { log.Infof(context.Background(), "%s/groupSize=%d/numInputBatches=%d/hasNulls=%t", agg.name, groupSize, numInputBatches, hasNulls) nTuples := coldata.BatchSize() * numInputBatches typs := []*types.T{types.Int, types.Float} cols := []coldata.Vec{ testAllocator.NewMemColumn(typs[0], nTuples), testAllocator.NewMemColumn(typs[1], nTuples), } if agg.order == partial { typs = append(typs, types.Int) cols = append(cols, testAllocator.NewMemColumn(typs[2], nTuples)) } groups, aggCol, aggColNulls := cols[0].Int64(), cols[1].Float64(), cols[1].Nulls() expectedTuples := colexectestutils.Tuples{} var expRowCounts, expCounts []int64 var expSums, expMins, expMaxs []float64 // SUM, MIN, MAX, and AVG aggregators can output null. var expNulls []bool curGroup := -1 for i := range groups { if i%groupSize == 0 { if curGroup != -1 { if expNulls[curGroup] { expectedTuples = append(expectedTuples, colexectestutils.Tuple{ expRowCounts[curGroup], expCounts[curGroup], nil, nil, nil, nil, }) } else { expectedTuples = append(expectedTuples, colexectestutils.Tuple{ expRowCounts[curGroup], expCounts[curGroup], expSums[curGroup], expMins[curGroup], expMaxs[curGroup], expSums[curGroup] / float64(expCounts[curGroup]), }) } } expRowCounts = append(expRowCounts, 0) expCounts = append(expCounts, 0) expSums = append(expSums, 0) expMins = append(expMins, 2048) expMaxs = append(expMaxs, -2048) expNulls = append(expNulls, true) curGroup++ } // Keep the inputs small so they are a realistic size. Using a // large range is not realistic and makes decimal operations // slower. aggCol[i] = 2048 * (rng.Float64() - 0.5) // NULL values contribute to the row count, so we're updating // the row counts outside of the if block. expRowCounts[curGroup]++ if hasNulls && rng.Float64() < 0.1 { aggColNulls.SetNull(i) } else { expNulls[curGroup] = false expCounts[curGroup]++ expSums[curGroup] += aggCol[i] expMins[curGroup] = min64(aggCol[i], expMins[curGroup]) expMaxs[curGroup] = max64(aggCol[i], expMaxs[curGroup]) } groups[i] = int64(curGroup) } // Add result for last group. if expNulls[curGroup] { expectedTuples = append(expectedTuples, colexectestutils.Tuple{ expRowCounts[curGroup], expCounts[curGroup], nil, nil, nil, nil, }) } else { expectedTuples = append(expectedTuples, colexectestutils.Tuple{ expRowCounts[curGroup], expCounts[curGroup], expSums[curGroup], expMins[curGroup], expMaxs[curGroup], expSums[curGroup] / float64(expCounts[curGroup]), }) } source := colexectestutils.NewChunkingBatchSource(testAllocator, typs, cols, nTuples) tc := aggregatorTestCase{ typs: typs, groupCols: []uint32{0}, aggCols: [][]uint32{{}, {1}, {1}, {1}, {1}, {1}}, aggFns: []execinfrapb.AggregatorSpec_Func{ execinfrapb.CountRows, execinfrapb.Count, execinfrapb.Sum, execinfrapb.Min, execinfrapb.Max, execinfrapb.Avg, }, } if agg.order == partial { tc.groupCols = []uint32{0, 2} tc.orderedCols = []uint32{0} } require.NoError(t, tc.init()) constructors, constArguments, outputTypes, err := colexecagg.ProcessAggregations( context.Background(), &evalCtx, nil /* semaCtx */, tc.spec.Aggregations, tc.typs, ) require.NoError(t, err) a := agg.new(context.Background(), &colexecagg.NewAggregatorArgs{ Allocator: testAllocator, MemAccount: testMemAcc, Input: source, InputTypes: tc.typs, Spec: tc.spec, EvalCtx: &evalCtx, Constructors: constructors, ConstArguments: constArguments, OutputTypes: outputTypes, }) a.Init(context.Background()) testOutput := colexectestutils.NewOpTestOutput(a, expectedTuples) if agg.order == ordered { err = testOutput.Verify() } else if agg.order == partial { err = testOutput.VerifyPartialOrder() } else { err = testOutput.VerifyAnyOrder() } if err != nil { t.Fatal(err) } } } } } } // benchmarkAggregateFunction runs aggregator microbenchmarks. numGroupCol is // the number of grouping columns. groupSize is the number of tuples to target // in each distinct aggregation group. chunkSize is the number of tuples to // target in each distinct partially ordered group column, and is intended for // use with partial order. Limit is the number of rows to retrieve from the // aggregation function before ending the microbenchmark. func benchmarkAggregateFunction( b *testing.B, agg aggType, aggFn execinfrapb.AggregatorSpec_Func, aggInputTypes []*types.T, numGroupCol int, groupSize int, distinctProb float64, numInputRows int, chunkSize int, limit int, ) { defer log.Scope(b).Close(b) if groupSize > numInputRows { // In this case all tuples will be part of the same group, and we have // likely already benchmarked such scenario with this value of // numInputRows, so we short-circuit. return } if numGroupCol < 1 { // We should always have at least one group column. return } if agg.order == partial { if chunkSize > numInputRows || groupSize > chunkSize { return } } rng := randutil.NewTestRandWithSeed(17) ctx := context.Background() evalCtx := eval.MakeTestingEvalContext(cluster.MakeTestingClusterSettings()) defer evalCtx.Stop(ctx) aggMemAcc := evalCtx.TestingMon.MakeBoundAccount() defer aggMemAcc.Close(ctx) evalCtx.SingleDatumAggMemAccount = &aggMemAcc const bytesFixedLength = 8 typs := []*types.T{types.Int} groupCols := []uint32{0} for g := 1; g < numGroupCol; g++ { typs = append(typs, types.Int) groupCols = append(groupCols, uint32(g)) } typs = append(typs, aggInputTypes...) cols := make([]coldata.Vec, len(typs)) for i := range typs { cols[i] = testAllocator.NewMemColumn(typs[i], numInputRows) } groups := cols[0].Int64() if agg.order == ordered { curGroup := -1 for i := 0; i < numInputRows; i++ { if i%groupSize == 0 { curGroup++ } groups[i] = int64(curGroup) } } else if agg.order == unordered { numGroups := numInputRows / groupSize for i := 0; i < numInputRows; i++ { groups[i] = int64(rng.Intn(numGroups)) } } else { // partial order. chunks := cols[0].Int64() groups = cols[1].Int64() curChunk := -1 numGroups := chunkSize / groupSize for i := 0; i < numInputRows; i++ { if i%chunkSize == 0 { curChunk++ } chunks[i] = int64(curChunk) groups[i] = int64(rng.Intn(numGroups)) } } for _, col := range cols[numGroupCol:] { coldatatestutils.RandomVec(coldatatestutils.RandomVecArgs{ Rand: rng, Vec: col, N: numInputRows, NullProbability: 0, BytesFixedLength: bytesFixedLength, }) } if aggFn == execinfrapb.SumInt { // Integer summation of random Int64 values can lead // to overflow, and we will panic. To go around it, we // restrict the range of values. vals := cols[numGroupCol].Int64() for i := range vals { vals[i] = vals[i] % 1024 } } source := colexectestutils.NewChunkingBatchSource(testAllocator, typs, cols, numInputRows) aggCols := make([]uint32, len(aggInputTypes)) for i := range aggCols { aggCols[i] = uint32(numGroupCol + i) } tc := aggregatorTestCase{ typs: typs, groupCols: groupCols, aggCols: [][]uint32{aggCols}, aggFns: []execinfrapb.AggregatorSpec_Func{aggFn}, unorderedInput: agg.order == unordered, } if distinctProb > 0 { if !typs[0].Identical(types.Int) { skip.IgnoreLint(b, "benchmarking distinct aggregation is supported only on an INT argument") } tc.aggDistinct = []bool{true} distinctModulo := int64(1.0 / distinctProb) vals := cols[1].Int64() for i := range vals { vals[i] = vals[i] % distinctModulo } } if agg.order == partial { tc.orderedCols = []uint32{0} } require.NoError(b, tc.init()) constructors, constArguments, outputTypes, err := colexecagg.ProcessAggregations( ctx, &evalCtx, nil /* semaCtx */, tc.spec.Aggregations, tc.typs, ) require.NoError(b, err) fName := execinfrapb.AggregatorSpec_Func_name[int32(aggFn)] // Only count the aggregation columns. var argumentsSize int if len(aggInputTypes) > 0 { for _, typ := range aggInputTypes { if typ.Identical(types.Bool) { argumentsSize++ } else { argumentsSize += 8 } } } else { // For COUNT_ROWS we'll just use 8 bytes. argumentsSize = 8 } var inputTypesString string switch len(aggInputTypes) { case 1: // Override the string so that the name of the benchmark was the same // as in pre-20.2 releases (which allows us to compare against old // numbers). inputTypesString = aggInputTypes[0].String() default: inputTypesString = fmt.Sprintf("%s", aggInputTypes) } distinctProbString := "" if distinctProb > 0 { distinctProbString = fmt.Sprintf("/distinctProb=%.2f", distinctProb) } b.Run(fmt.Sprintf( "%s/%s/%s/groupSize=%d%s/numInputRows=%d", fName, agg.name, inputTypesString, groupSize, distinctProbString, numInputRows), func(b *testing.B) { b.SetBytes(int64(argumentsSize * numInputRows)) b.ResetTimer() for i := 0; i < b.N; i++ { a := agg.new(ctx, &colexecagg.NewAggregatorArgs{ Allocator: testAllocator, MemAccount: testMemAcc, Input: source, InputTypes: tc.typs, Spec: tc.spec, EvalCtx: &evalCtx, Constructors: constructors, ConstArguments: constArguments, OutputTypes: outputTypes, }) a.Init(ctx) // Exhaust aggregator until all batches have been read or limit, if // non-zero, is reached. tupleCount := 0 for b := a.Next(); b.Length() != 0; b = a.Next() { tupleCount += b.Length() if limit > 0 && tupleCount >= limit { break } } if err = a.(colexecop.Closer).Close(ctx); err != nil { b.Fatal(err) } source.Reset(ctx) } }, ) } // BenchmarkAggregator runs the benchmark both aggregators with diverse data // source parameters but using a single aggregate function. The goal of this // benchmark is measuring the performance of the aggregators themselves // depending on the parameters of the input. func BenchmarkAggregator(b *testing.B) { numRows := []int{1, 32, coldata.BatchSize(), 32 * coldata.BatchSize(), 1024 * coldata.BatchSize()} groupSizes := []int{1, 2, 32, 128, coldata.BatchSize()} if testing.Short() { numRows = []int{32, 32 * coldata.BatchSize()} groupSizes = []int{1, coldata.BatchSize()} } // We choose any_not_null aggregate function because it is the simplest // possible and, thus, its Compute function call will have the least impact // when benchmarking the aggregator logic. aggFn := execinfrapb.AnyNotNull for _, agg := range aggTypes { for _, numInputRows := range numRows { for _, groupSize := range groupSizes { benchmarkAggregateFunction( b, agg, aggFn, []*types.T{types.Int}, 1, /* numGroupCol */ groupSize, 0 /* distinctProb */, numInputRows, 0 /* chunkSize */, 0 /* limit */) } } } } // BenchmarkAllOptimizedAggregateFunctions runs the benchmark of all optimized // aggregate functions in 4 configurations (hash vs ordered, and small groups // vs big groups). Such configurations were chosen since they provide good // enough signal on the speeds of aggregate functions. For more diverse // configurations look at BenchmarkAggregator. func BenchmarkAllOptimizedAggregateFunctions(b *testing.B) { var numInputRows = 32 * coldata.BatchSize() numFnsToRun := len(execinfrapb.AggregatorSpec_Func_name) if testing.Short() { numFnsToRun = 1 } for aggFnNumber := 0; aggFnNumber < numFnsToRun; aggFnNumber++ { aggFn := execinfrapb.AggregatorSpec_Func(aggFnNumber) if !colexecagg.IsAggOptimized(aggFn) { continue } for _, agg := range aggTypes { var aggInputTypes []*types.T switch aggFn { case execinfrapb.BoolAnd, execinfrapb.BoolOr: aggInputTypes = []*types.T{types.Bool} case execinfrapb.ConcatAgg: aggInputTypes = []*types.T{types.Bytes} case execinfrapb.CountRows: default: aggInputTypes = []*types.T{types.Int} } for _, groupSize := range []int{1, coldata.BatchSize()} { benchmarkAggregateFunction(b, agg, aggFn, aggInputTypes, 1 /* numGroupCol */, groupSize, 0 /* distinctProb */, numInputRows, 0 /* chunkSize */, 0 /* limit */) } } } } func BenchmarkDistinctAggregation(b *testing.B) { aggFn := execinfrapb.Count for _, agg := range aggTypes { for _, numInputRows := range []int{32, 32 * coldata.BatchSize()} { for _, groupSize := range []int{1, 2, 32, 128, coldata.BatchSize()} { for _, distinctProb := range []float64{0.01, 0.1, 1.0} { distinctModulo := int(1.0 / distinctProb) if (groupSize == 1 && distinctProb != 1.0) || float64(groupSize)/float64(distinctModulo) < 0.1 { // We have a such combination of groupSize and distinctProb // parameters that we will be very unlikely to satisfy them // (for example, with groupSize=1 and distinctProb=0.01, // every value will be distinct within the group), so we // skip such configuration. continue } benchmarkAggregateFunction(b, agg, aggFn, []*types.T{types.Int}, 1 /* numGroupCol */, groupSize, 0 /* distinctProb */, numInputRows, 0 /* chunkSize */, 0 /* limit */) } } } } } func min64(a, b float64) float64 { if a < b { return a } return b } func max64(a, b float64) float64 { if a > b { return a } return b }
pkg/sql/colexec/aggregators_test.go
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.0033904951997101307, 0.00021499453578144312, 0.0001598928647581488, 0.00017139707051683217, 0.00030070103821344674 ]
{ "id": 4, "code_window": [ "\t// release. This only happens as part of preparing the master branch for the\n", "\t// next release. The release team runbooks, at time of writing, reflect\n", "\t// this.\n", "\t//\n", "\t// Example invocation:\n", "\t// roachtest --local run generate-fixtures --debug --cockroach ./cockroach \\\n", "\t// --build-tag v22.1.0-beta.3 tag:fixtures\n", "\trunFixtures := func(\n", "\t\tctx context.Context,\n", "\t\tt test.Test,\n", "\t\tc cluster.Cluster,\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t// FIXTURE_VERSION=v20.2.0-beta.1 roachtest --local run generate-fixtures --debug --cockroach ./cockroach tag:fixtures\n" ], "file_path": "pkg/cmd/roachtest/tests/fixtures.go", "type": "replace", "edit_start_line_idx": 51 }
// Copyright 2023 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package metrics_test import ( "context" "testing" "time" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/asim" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/asim/config" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/asim/metrics" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/asim/state" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/asim/workload" "github.com/stretchr/testify/require" ) type mockListener struct { history [][]metrics.StoreMetrics } func (ml *mockListener) Listen(ctx context.Context, sms []metrics.StoreMetrics) { ml.history = append(ml.history, sms) } // TestTracker asserts that the Tracker calls Listen on each registered // listener with identical arguments. func TestTracker(t *testing.T) { ctx := context.Background() settings := config.DefaultSimulationSettings() duration := 200 * time.Second rwg := []workload.Generator{ workload.TestCreateWorkloadGenerator(settings.Seed, settings.StartTime, 10, 10000), } s := state.LoadConfig(state.ComplexConfig, state.SingleRangeConfig, settings) l1 := &mockListener{history: [][]metrics.StoreMetrics{}} l2 := &mockListener{history: [][]metrics.StoreMetrics{}} tracker := metrics.NewTracker(testingMetricsInterval, l1, l2) sim := asim.NewSimulator(duration, rwg, s, settings, tracker) sim.RunSim(ctx) require.Equal(t, l1.history, l2.history) }
pkg/kv/kvserver/asim/metrics/tracker_test.go
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.003376137465238571, 0.0007408368401229382, 0.00016824397607706487, 0.0001708079653326422, 0.0011812943266704679 ]
{ "id": 4, "code_window": [ "\t// release. This only happens as part of preparing the master branch for the\n", "\t// next release. The release team runbooks, at time of writing, reflect\n", "\t// this.\n", "\t//\n", "\t// Example invocation:\n", "\t// roachtest --local run generate-fixtures --debug --cockroach ./cockroach \\\n", "\t// --build-tag v22.1.0-beta.3 tag:fixtures\n", "\trunFixtures := func(\n", "\t\tctx context.Context,\n", "\t\tt test.Test,\n", "\t\tc cluster.Cluster,\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t// FIXTURE_VERSION=v20.2.0-beta.1 roachtest --local run generate-fixtures --debug --cockroach ./cockroach tag:fixtures\n" ], "file_path": "pkg/cmd/roachtest/tests/fixtures.go", "type": "replace", "edit_start_line_idx": 51 }
sql create database t; create table t.f (x int, y int); insert into t.f values (42, 69); create table t.g (x int, y int); insert into t.g values (3, 4) ---- INSERT 1 dump t f g ---- ---- CREATE TABLE public.f ( x INT8 NULL, y INT8 NULL, FAMILY "primary" (x, y, rowid) ); CREATE TABLE public.g ( x INT8 NULL, y INT8 NULL, FAMILY "primary" (x, y, rowid) ); INSERT INTO public.f (x, y) VALUES (42, 69); INSERT INTO public.g (x, y) VALUES (3, 4); ---- ---- dump t ---- ---- CREATE TABLE public.f ( x INT8 NULL, y INT8 NULL, FAMILY "primary" (x, y, rowid) ); CREATE TABLE public.g ( x INT8 NULL, y INT8 NULL, FAMILY "primary" (x, y, rowid) ); INSERT INTO public.f (x, y) VALUES (42, 69); INSERT INTO public.g (x, y) VALUES (3, 4); ---- ----
pkg/cli/testdata/dump/multiple
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.00017184742318931967, 0.00016714048979338259, 0.00016297602269332856, 0.00016693788347765803, 0.000003626651960075833 ]
{ "id": 5, "code_window": [ "\t) {\n", "\t\tif c.IsLocal() && runtime.GOARCH == \"arm64\" {\n", "\t\t\tt.Skip(\"Skip under ARM64. See https://github.com/cockroachdb/cockroach/issues/89268\")\n", "\t\t}\n", "\t\tfixtureVersion := strings.TrimPrefix(t.BuildVersion().String(), \"v\")\n", "\t\tmakeVersionFixtureAndFatal(ctx, t, c, fixtureVersion)\n", "\t}\n", "\tspec := registry.TestSpec{\n", "\t\tName: \"generate-fixtures\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tfixtureVersion := os.Getenv(\"FIXTURE_VERSION\")\n", "\t\tif fixtureVersion == \"\" {\n", "\t\t\tt.Fatal(\"FIXTURE_VERSION must be set\")\n", "\t\t}\n" ], "file_path": "pkg/cmd/roachtest/tests/fixtures.go", "type": "replace", "edit_start_line_idx": 61 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package tests import ( "context" gosql "database/sql" "fmt" "math/rand" "path/filepath" "runtime" "time" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/option" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil/clusterupgrade" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil/mixedversion" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/roachprod/install" "github.com/cockroachdb/cockroach/pkg/roachprod/logger" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/version" "github.com/stretchr/testify/require" ) type versionFeatureTest struct { name string statement string } // Feature tests that are invoked in mixed-version state during the // upgrade test. A gotcha is that these feature tests are also // invoked when the cluster is in the middle of upgrading -- i.e. a // state where the cluster version has already been bumped, but not // all nodes are aware). This should be considered a feature of this // test, and feature tests that flake because of it need to be fixed. var versionUpgradeTestFeatures = []versionFeatureTest{ // NB: the next four tests are ancient and supported since v2.0. { name: "ObjectAccess", statement: ` -- We should be able to successfully select from objects created in ancient -- versions of CRDB using their FQNs. Prevents bugs such as #43141, where -- databases created before a migration were inaccessible after the -- migration. -- -- NB: the data has been baked into the fixtures. Originally created via: -- create database persistent_db -- create table persistent_db.persistent_table(a int)")) -- on CRDB v1.0 select * from persistent_db.persistent_table; show tables from persistent_db; `, }, { name: "JSONB", statement: ` CREATE DATABASE IF NOT EXISTS test; CREATE TABLE test.t (j JSONB); DROP TABLE test.t; `, }, { name: "Sequences", statement: ` CREATE DATABASE IF NOT EXISTS test; CREATE SEQUENCE test.test_sequence; DROP SEQUENCE test.test_sequence; `, }, { name: "Computed Columns", statement: ` CREATE DATABASE IF NOT EXISTS test; CREATE TABLE test.t (x INT AS (3) STORED); DROP TABLE test.t; `, }, { name: "Split and Merge Ranges", statement: ` CREATE DATABASE IF NOT EXISTS splitmerge; CREATE TABLE splitmerge.t (k INT PRIMARY KEY); ALTER TABLE splitmerge.t SPLIT AT VALUES (1), (2), (3); ALTER TABLE splitmerge.t UNSPLIT AT VALUES (1), (2), (3); DROP TABLE splitmerge.t; `, }, } func runVersionUpgrade(ctx context.Context, t test.Test, c cluster.Cluster) { if c.IsLocal() && runtime.GOARCH == "arm64" { t.Skip("Skip under ARM64. See https://github.com/cockroachdb/cockroach/issues/89268") } c.Put(ctx, t.DeprecatedWorkload(), "./workload", c.All()) mvt := mixedversion.NewTest(ctx, t, t.L(), c, c.All()) mvt.OnStartup("setup schema changer workload", func(ctx context.Context, l *logger.Logger, r *rand.Rand, helper *mixedversion.Helper) error { // Execute the workload init. return c.RunE(ctx, c.All(), "./workload init schemachange") }) mvt.InMixedVersion("run backup", func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { // Verify that backups can be created in various configurations. This is // important to test because changes in system tables might cause backups to // fail in mixed-version clusters. dest := fmt.Sprintf("nodelocal://1/%d", timeutil.Now().UnixNano()) return h.Exec(rng, `BACKUP TO $1`, dest) }) mvt.InMixedVersion( "test features", func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { for _, featureTest := range versionUpgradeTestFeatures { l.Printf("running feature test %q", featureTest.name) if err := h.Exec(rng, featureTest.statement); err != nil { l.Printf("%q: ERROR (%s)", featureTest.name, err) return err } l.Printf("%q: OK", featureTest.name) } return nil }, ) mvt.InMixedVersion( "test schema change step", func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { l.Printf("running schema workload step") runCmd := roachtestutil.NewCommand("./workload run schemachange").Flag("verbose", 1).Flag("max-ops", 10).Flag("concurrency", 2).Arg("{pgurl:1-%d}", len(c.All())) randomNode := h.RandomNode(rng, c.All()) return c.RunE(ctx, option.NodeListOption{randomNode}, runCmd.String()) }, ) mvt.AfterUpgradeFinalized( "check if GC TTL is pinned", func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { // TODO(irfansharif): This can be removed when the predecessor version // in this test is v23.1, where the default is 4h. This test was only to // make sure that existing clusters that upgrade to 23.1 retained their // existing GC TTL. l.Printf("checking if GC TTL is pinned to 24h") var ttlSeconds int query := ` SELECT (crdb_internal.pb_to_json('cockroach.config.zonepb.ZoneConfig', raw_config_protobuf)->'gc'->'ttlSeconds')::INT FROM crdb_internal.zones WHERE target = 'RANGE default' LIMIT 1 ` if err := h.QueryRow(rng, query).Scan(&ttlSeconds); err != nil { return fmt.Errorf("error querying GC TTL: %w", err) } expectedTTL := 24 * 60 * 60 // NB: 24h is what's used in the fixture if ttlSeconds != expectedTTL { return fmt.Errorf("unexpected GC TTL: actual (%d) != expected (%d)", ttlSeconds, expectedTTL) } return nil }, ) mvt.Run() } func (u *versionUpgradeTest) run(ctx context.Context, t test.Test) { defer func() { for _, db := range u.conns { _ = db.Close() } }() for i, step := range u.steps { if step != nil { t.Status(fmt.Sprintf("versionUpgradeTest: starting step %d", i+1)) step(ctx, t, u) } } } type versionUpgradeTest struct { goOS string c cluster.Cluster steps []versionStep // Cache conns because opening one takes hundreds of ms, and we do it quite // a lot. conns []*gosql.DB } func newVersionUpgradeTest(c cluster.Cluster, steps ...versionStep) *versionUpgradeTest { return &versionUpgradeTest{ goOS: ifLocal(c, runtime.GOOS, "linux"), c: c, steps: steps, } } // Return a cached conn to the given node. Don't call .Close(), the test harness // will do it. func (u *versionUpgradeTest) conn(ctx context.Context, t test.Test, i int) *gosql.DB { if u.conns == nil { for _, i := range u.c.All() { u.conns = append(u.conns, u.c.Conn(ctx, t.L(), i)) } } db := u.conns[i-1] // Run a trivial query to shake out errors that can occur when the server has // restarted in the meantime. _ = db.PingContext(ctx) return db } // uploadVersion is a thin wrapper around // `clusterupgrade.UploadVersion` that calls t.Fatal if that call // returns an error func uploadVersion( ctx context.Context, t test.Test, c cluster.Cluster, nodes option.NodeListOption, newVersion string, ) string { path, err := clusterupgrade.UploadVersion(ctx, t, t.L(), c, nodes, newVersion) if err != nil { t.Fatal(err) } return path } // upgradeNodes is a thin wrapper around // `clusterupgrade.RestartNodesWithNewBinary` that calls t.Fatal if // that call returns an errror. func upgradeNodes( ctx context.Context, t test.Test, c cluster.Cluster, nodes option.NodeListOption, startOpts option.StartOpts, newVersion string, ) { if err := clusterupgrade.RestartNodesWithNewBinary( ctx, t, t.L(), c, nodes, startOpts, newVersion, ); err != nil { t.Fatal(err) } } func (u *versionUpgradeTest) binaryVersion( ctx context.Context, t test.Test, i int, ) roachpb.Version { db := u.conn(ctx, t, i) v, err := clusterupgrade.BinaryVersion(db) if err != nil { t.Fatal(err) } return v } // versionStep is an isolated version migration on a running cluster. type versionStep func(ctx context.Context, t test.Test, u *versionUpgradeTest) func uploadAndStartFromCheckpointFixture(nodes option.NodeListOption, v string) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { if err := clusterupgrade.InstallFixtures(ctx, t.L(), u.c, nodes, v); err != nil { t.Fatal(err) } binary := uploadVersion(ctx, t, u.c, nodes, v) startOpts := option.DefaultStartOpts() // NB: can't start sequentially since cluster already bootstrapped. startOpts.RoachprodOpts.Sequential = false clusterupgrade.StartWithBinary(ctx, t.L(), u.c, nodes, binary, startOpts) } } func uploadAndStart(nodes option.NodeListOption, v string) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { binary := uploadVersion(ctx, t, u.c, nodes, v) startOpts := option.DefaultStartOpts() // NB: can't start sequentially since cluster already bootstrapped. startOpts.RoachprodOpts.Sequential = false clusterupgrade.StartWithBinary(ctx, t.L(), u.c, nodes, binary, startOpts) } } // binaryUpgradeStep rolling-restarts the given nodes into the new binary // version. Note that this does *not* wait for the cluster version to upgrade. // Use a waitForUpgradeStep() for that. func binaryUpgradeStep(nodes option.NodeListOption, newVersion string) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { if err := clusterupgrade.RestartNodesWithNewBinary( ctx, t, t.L(), u.c, nodes, option.DefaultStartOpts(), newVersion, ); err != nil { t.Fatal(err) } } } func preventAutoUpgradeStep(node int) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { db := u.conn(ctx, t, node) _, err := db.ExecContext(ctx, `SET CLUSTER SETTING cluster.preserve_downgrade_option = $1`, u.binaryVersion(ctx, t, node).String()) if err != nil { t.Fatal(err) } } } func allowAutoUpgradeStep(node int) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { db := u.conn(ctx, t, node) _, err := db.ExecContext(ctx, `RESET CLUSTER SETTING cluster.preserve_downgrade_option`) if err != nil { t.Fatal(err) } } } // NB: this is intentionally kept separate from binaryUpgradeStep because we run // feature tests between the steps, and we want to expose them (at least // heuristically) to the real-world situation in which some nodes have already // learned of a cluster version bump (from Gossip) where others haven't. This // situation tends to exhibit unexpected behavior. func waitForUpgradeStep(nodes option.NodeListOption) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { dbFunc := func(node int) *gosql.DB { return u.conn(ctx, t, node) } if err := clusterupgrade.WaitForClusterUpgrade(ctx, t.L(), nodes, dbFunc); err != nil { t.Fatal(err) } } } // makeVersionFixtureAndFatal creates fixtures from which we can test // mixed-version clusters (i.e. version X mixing with X-1). The fixtures date // back all the way to v1.0; when development begins on version X, we make a // fixture for version X-1 by running a starting the version X-2 cluster from // the X-2 fixtures, upgrading it to version X-1, and copy the resulting store // directories to the log directories (which are part of the artifacts). The // test will then fail on purpose when it's done with instructions on where to // move the files. func makeVersionFixtureAndFatal( ctx context.Context, t test.Test, c cluster.Cluster, makeFixtureVersion string, ) { var useLocalBinary bool if makeFixtureVersion == "" { c.Start(ctx, t.L(), option.DefaultStartOpts(), install.MakeClusterSettings(), c.Node(1)) require.NoError(t, c.Conn(ctx, t.L(), 1).QueryRowContext( ctx, `select regexp_extract(value, '^v([0-9]+\.[0-9]+\.[0-9]+)') from crdb_internal.node_build_info where field = 'Version';`, ).Scan(&makeFixtureVersion)) c.Wipe(ctx, c.Node(1)) useLocalBinary = true } predecessorVersion, err := version.PredecessorVersion(*version.MustParse("v" + makeFixtureVersion)) if err != nil { t.Fatal(err) } t.L().Printf("making fixture for %s (starting at %s)", makeFixtureVersion, predecessorVersion) if useLocalBinary { // Make steps below use the main cockroach binary (in particular, don't try // to download the released version for makeFixtureVersion which may not yet // exist) makeFixtureVersion = "" } newVersionUpgradeTest(c, // Start the cluster from a fixture. That fixture's cluster version may // be at the predecessor version (though in practice it's fully up to // date, if it was created via the checkpointer above), so add a // waitForUpgradeStep to make sure we're upgraded all the way before // moving on. // // See the comment on createCheckpoints for details on fixtures. uploadAndStartFromCheckpointFixture(c.All(), predecessorVersion), waitForUpgradeStep(c.All()), // NB: at this point, cluster and binary version equal predecessorVersion, // and auto-upgrades are on. binaryUpgradeStep(c.All(), makeFixtureVersion), waitForUpgradeStep(c.All()), func(ctx context.Context, t test.Test, u *versionUpgradeTest) { // If we're taking checkpoints, momentarily stop the cluster (we // need to do that to get the checkpoints to reflect a // consistent cluster state). The binary at this point will be // the new one, but the cluster version was not explicitly // bumped, though auto-update may have taken place already. // For example, if newVersion is 2.1, the cluster version in // the store directories may be 2.0 on some stores and 2.1 on // the others (though if any are on 2.1, then that's what's // stored in system.settings). // This means that when we restart from that version, we're // going to want to use the binary mentioned in the checkpoint, // or at least one compatible with the *predecessor* of the // checkpoint version. For example, for checkpoint-2.1, the // cluster version might be 2.0, so we can only use the 2.0 or // 2.1 binary, but not the 19.1 binary (as 19.1 and 2.0 are not // compatible). name := clusterupgrade.CheckpointName(u.binaryVersion(ctx, t, 1).String()) u.c.Stop(ctx, t.L(), option.DefaultStopOpts(), c.All()) binaryPath := clusterupgrade.BinaryPathFromVersion(makeFixtureVersion) c.Run(ctx, c.All(), binaryPath, "debug", "pebble", "db", "checkpoint", "{store-dir}", "{store-dir}/"+name) // The `cluster-bootstrapped` marker can already be found within // store-dir, but the rocksdb checkpoint step above does not pick it // up as it isn't recognized by RocksDB. We copy the marker // manually, it's necessary for roachprod created clusters. See // #54761. c.Run(ctx, c.Node(1), "cp", "{store-dir}/cluster-bootstrapped", "{store-dir}/"+name) // Similar to the above - newer versions require the min version file to open a store. c.Run(ctx, c.Node(1), "cp", fmt.Sprintf("{store-dir}/%s", storage.MinVersionFilename), "{store-dir}/"+name) c.Run(ctx, c.All(), "tar", "-C", "{store-dir}/"+name, "-czf", "{log-dir}/"+name+".tgz", ".") t.Fatalf(`successfully created checkpoints; failing test on purpose. Invoke the following to move the archives to the right place and commit the result: for i in 1 2 3 4; do mkdir -p pkg/cmd/roachtest/fixtures/${i} && \ mv artifacts/generate-fixtures/run_1/logs/${i}.unredacted/checkpoint-*.tgz \ pkg/cmd/roachtest/fixtures/${i}/ done `) }).run(ctx, t) } // importTPCCStep runs a TPCC import import on the first crdbNode (monitoring them all for // crashes during the import). If oldV is nil, this runs the import using the specified // version (for example "19.2.1", as provided by PredecessorVersion()) using the location // used by c.Stage(). An empty oldV uses the main cockroach binary. func importTPCCStep( oldV string, headroomWarehouses int, crdbNodes option.NodeListOption, ) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { // We need to use the predecessor binary to load into the // predecessor cluster to avoid random breakage. For example, you // can't use 21.1 to import into 20.2 due to some flag changes. // // TODO(tbg): also import a large dataset (for example 2TB bank) // that will provide cold data that may need to be migrated. var cmd string if oldV == "" { cmd = tpccImportCmd(headroomWarehouses) } else { cmd = tpccImportCmdWithCockroachBinary(filepath.Join("v"+oldV, "cockroach"), headroomWarehouses, "--checks=false") } // Use a monitor so that we fail cleanly if the cluster crashes // during import. m := u.c.NewMonitor(ctx, crdbNodes) m.Go(func(ctx context.Context) error { return u.c.RunE(ctx, u.c.Node(crdbNodes[0]), cmd) }) m.Wait() } } func importLargeBankStep(oldV string, rows int, crdbNodes option.NodeListOption) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { // Use the predecessor binary to load into the predecessor // cluster to avoid random breakage due to flag changes, etc. binary := "./cockroach" if oldV != "" { binary = filepath.Join("v"+oldV, "cockroach") } // Use a monitor so that we fail cleanly if the cluster crashes // during import. m := u.c.NewMonitor(ctx, crdbNodes) m.Go(func(ctx context.Context) error { return u.c.RunE(ctx, u.c.Node(crdbNodes[0]), binary, "workload", "fixtures", "import", "bank", "--payload-bytes=10240", "--rows="+fmt.Sprint(rows), "--seed=4", "--db=bigbank") }) m.Wait() } } func sleepStep(d time.Duration) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { time.Sleep(d) } }
pkg/cmd/roachtest/tests/versionupgrade.go
1
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.9715007543563843, 0.05351685360074043, 0.00016020754992496222, 0.00016966401017270982, 0.2103963941335678 ]
{ "id": 5, "code_window": [ "\t) {\n", "\t\tif c.IsLocal() && runtime.GOARCH == \"arm64\" {\n", "\t\t\tt.Skip(\"Skip under ARM64. See https://github.com/cockroachdb/cockroach/issues/89268\")\n", "\t\t}\n", "\t\tfixtureVersion := strings.TrimPrefix(t.BuildVersion().String(), \"v\")\n", "\t\tmakeVersionFixtureAndFatal(ctx, t, c, fixtureVersion)\n", "\t}\n", "\tspec := registry.TestSpec{\n", "\t\tName: \"generate-fixtures\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tfixtureVersion := os.Getenv(\"FIXTURE_VERSION\")\n", "\t\tif fixtureVersion == \"\" {\n", "\t\t\tt.Fatal(\"FIXTURE_VERSION must be set\")\n", "\t\t}\n" ], "file_path": "pkg/cmd/roachtest/tests/fixtures.go", "type": "replace", "edit_start_line_idx": 61 }
// Copyright 2023 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package opgen import ( "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" ) func init() { opRegistry.register((*scpb.FunctionParamDefaultExpression)(nil), toPublic( scpb.Status_ABSENT, to(scpb.Status_PUBLIC, // TODO(chengxiong): add operations when default value is supported. emit(func(this *scpb.FunctionParamDefaultExpression) *scop.NotImplementedForPublicObjects { return notImplementedForPublicObjects(this) }), ), ), toAbsent( scpb.Status_PUBLIC, to(scpb.Status_ABSENT, // TODO(chengxiong): add operations when default value is supported. emit(func(this *scpb.FunctionParamDefaultExpression) *scop.NotImplemented { return notImplemented(this) }), ), ), ) }
pkg/sql/schemachanger/scplan/internal/opgen/opgen_function_param_default.go
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.000176601271959953, 0.0001755950943334028, 0.0001737733109621331, 0.00017600288265384734, 0.0000011049526165152201 ]
{ "id": 5, "code_window": [ "\t) {\n", "\t\tif c.IsLocal() && runtime.GOARCH == \"arm64\" {\n", "\t\t\tt.Skip(\"Skip under ARM64. See https://github.com/cockroachdb/cockroach/issues/89268\")\n", "\t\t}\n", "\t\tfixtureVersion := strings.TrimPrefix(t.BuildVersion().String(), \"v\")\n", "\t\tmakeVersionFixtureAndFatal(ctx, t, c, fixtureVersion)\n", "\t}\n", "\tspec := registry.TestSpec{\n", "\t\tName: \"generate-fixtures\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tfixtureVersion := os.Getenv(\"FIXTURE_VERSION\")\n", "\t\tif fixtureVersion == \"\" {\n", "\t\t\tt.Fatal(\"FIXTURE_VERSION must be set\")\n", "\t\t}\n" ], "file_path": "pkg/cmd/roachtest/tests/fixtures.go", "type": "replace", "edit_start_line_idx": 61 }
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. include "schema.fbs"; namespace org.apache.arrow.flatbuf; /// ---------------------------------------------------------------------- /// Arrow File metadata /// table Footer { version: org.apache.arrow.flatbuf.MetadataVersion; schema: org.apache.arrow.flatbuf.Schema; dictionaries: [ Block ]; recordBatches: [ Block ]; } struct Block { /// Index to the start of the RecordBlock (note this is past the Message header) offset: long; /// Length of the metadata metaDataLength: int; /// Length of the data (this is aligned so there can be a gap between this and /// the metadata). bodyLength: long; } root_type Footer;
pkg/col/colserde/arrowserde/file.fbs
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.0001785298518370837, 0.00017575705714989454, 0.00017048398149199784, 0.00017637829296290874, 0.000002779691385512706 ]
{ "id": 5, "code_window": [ "\t) {\n", "\t\tif c.IsLocal() && runtime.GOARCH == \"arm64\" {\n", "\t\t\tt.Skip(\"Skip under ARM64. See https://github.com/cockroachdb/cockroach/issues/89268\")\n", "\t\t}\n", "\t\tfixtureVersion := strings.TrimPrefix(t.BuildVersion().String(), \"v\")\n", "\t\tmakeVersionFixtureAndFatal(ctx, t, c, fixtureVersion)\n", "\t}\n", "\tspec := registry.TestSpec{\n", "\t\tName: \"generate-fixtures\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tfixtureVersion := os.Getenv(\"FIXTURE_VERSION\")\n", "\t\tif fixtureVersion == \"\" {\n", "\t\t\tt.Fatal(\"FIXTURE_VERSION must be set\")\n", "\t\t}\n" ], "file_path": "pkg/cmd/roachtest/tests/fixtures.go", "type": "replace", "edit_start_line_idx": 61 }
#!/usr/bin/env bash set -exuo pipefail dir="$(dirname $(dirname $(dirname $(dirname "${0}"))))" source "$dir/teamcity-support.sh" if [[ ! -f ~/.ssh/id_rsa.pub ]]; then ssh-keygen -q -C "roachtest-weekly-bazel $(date)" -N "" -f ~/.ssh/id_rsa fi source $root/build/teamcity/cockroach/nightlies/roachtest_compile_bits.sh artifacts=/artifacts source $root/build/teamcity/util/roachtest_util.sh build/teamcity-roachtest-invoke.sh \ tag:aws-weekly \ --cloud="${CLOUD}" \ --cluster-id "${TC_BUILD_ID}" \ --cockroach "$PWD/bin/cockroach" \ --artifacts=/artifacts \ --artifacts-literal="${LITERAL_ARTIFACTS_DIR:-}" \ --slack-token="${SLACK_TOKEN}"
build/teamcity/cockroach/nightlies/roachtest_weekly_aws_impl.sh
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.00016914652951527387, 0.0001649768528295681, 0.00016102584777399898, 0.00016475819575134665, 0.0000033188582619914087 ]
{ "id": 6, "code_window": [ "\t\"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil/mixedversion\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/roachprod/install\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/roachprod/logger\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/storage\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/util/timeutil\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/util/version\"\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 28 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package tests import ( "context" gosql "database/sql" "fmt" "math/rand" "path/filepath" "runtime" "time" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/option" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil/clusterupgrade" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil/mixedversion" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/roachprod/install" "github.com/cockroachdb/cockroach/pkg/roachprod/logger" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/version" "github.com/stretchr/testify/require" ) type versionFeatureTest struct { name string statement string } // Feature tests that are invoked in mixed-version state during the // upgrade test. A gotcha is that these feature tests are also // invoked when the cluster is in the middle of upgrading -- i.e. a // state where the cluster version has already been bumped, but not // all nodes are aware). This should be considered a feature of this // test, and feature tests that flake because of it need to be fixed. var versionUpgradeTestFeatures = []versionFeatureTest{ // NB: the next four tests are ancient and supported since v2.0. { name: "ObjectAccess", statement: ` -- We should be able to successfully select from objects created in ancient -- versions of CRDB using their FQNs. Prevents bugs such as #43141, where -- databases created before a migration were inaccessible after the -- migration. -- -- NB: the data has been baked into the fixtures. Originally created via: -- create database persistent_db -- create table persistent_db.persistent_table(a int)")) -- on CRDB v1.0 select * from persistent_db.persistent_table; show tables from persistent_db; `, }, { name: "JSONB", statement: ` CREATE DATABASE IF NOT EXISTS test; CREATE TABLE test.t (j JSONB); DROP TABLE test.t; `, }, { name: "Sequences", statement: ` CREATE DATABASE IF NOT EXISTS test; CREATE SEQUENCE test.test_sequence; DROP SEQUENCE test.test_sequence; `, }, { name: "Computed Columns", statement: ` CREATE DATABASE IF NOT EXISTS test; CREATE TABLE test.t (x INT AS (3) STORED); DROP TABLE test.t; `, }, { name: "Split and Merge Ranges", statement: ` CREATE DATABASE IF NOT EXISTS splitmerge; CREATE TABLE splitmerge.t (k INT PRIMARY KEY); ALTER TABLE splitmerge.t SPLIT AT VALUES (1), (2), (3); ALTER TABLE splitmerge.t UNSPLIT AT VALUES (1), (2), (3); DROP TABLE splitmerge.t; `, }, } func runVersionUpgrade(ctx context.Context, t test.Test, c cluster.Cluster) { if c.IsLocal() && runtime.GOARCH == "arm64" { t.Skip("Skip under ARM64. See https://github.com/cockroachdb/cockroach/issues/89268") } c.Put(ctx, t.DeprecatedWorkload(), "./workload", c.All()) mvt := mixedversion.NewTest(ctx, t, t.L(), c, c.All()) mvt.OnStartup("setup schema changer workload", func(ctx context.Context, l *logger.Logger, r *rand.Rand, helper *mixedversion.Helper) error { // Execute the workload init. return c.RunE(ctx, c.All(), "./workload init schemachange") }) mvt.InMixedVersion("run backup", func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { // Verify that backups can be created in various configurations. This is // important to test because changes in system tables might cause backups to // fail in mixed-version clusters. dest := fmt.Sprintf("nodelocal://1/%d", timeutil.Now().UnixNano()) return h.Exec(rng, `BACKUP TO $1`, dest) }) mvt.InMixedVersion( "test features", func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { for _, featureTest := range versionUpgradeTestFeatures { l.Printf("running feature test %q", featureTest.name) if err := h.Exec(rng, featureTest.statement); err != nil { l.Printf("%q: ERROR (%s)", featureTest.name, err) return err } l.Printf("%q: OK", featureTest.name) } return nil }, ) mvt.InMixedVersion( "test schema change step", func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { l.Printf("running schema workload step") runCmd := roachtestutil.NewCommand("./workload run schemachange").Flag("verbose", 1).Flag("max-ops", 10).Flag("concurrency", 2).Arg("{pgurl:1-%d}", len(c.All())) randomNode := h.RandomNode(rng, c.All()) return c.RunE(ctx, option.NodeListOption{randomNode}, runCmd.String()) }, ) mvt.AfterUpgradeFinalized( "check if GC TTL is pinned", func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { // TODO(irfansharif): This can be removed when the predecessor version // in this test is v23.1, where the default is 4h. This test was only to // make sure that existing clusters that upgrade to 23.1 retained their // existing GC TTL. l.Printf("checking if GC TTL is pinned to 24h") var ttlSeconds int query := ` SELECT (crdb_internal.pb_to_json('cockroach.config.zonepb.ZoneConfig', raw_config_protobuf)->'gc'->'ttlSeconds')::INT FROM crdb_internal.zones WHERE target = 'RANGE default' LIMIT 1 ` if err := h.QueryRow(rng, query).Scan(&ttlSeconds); err != nil { return fmt.Errorf("error querying GC TTL: %w", err) } expectedTTL := 24 * 60 * 60 // NB: 24h is what's used in the fixture if ttlSeconds != expectedTTL { return fmt.Errorf("unexpected GC TTL: actual (%d) != expected (%d)", ttlSeconds, expectedTTL) } return nil }, ) mvt.Run() } func (u *versionUpgradeTest) run(ctx context.Context, t test.Test) { defer func() { for _, db := range u.conns { _ = db.Close() } }() for i, step := range u.steps { if step != nil { t.Status(fmt.Sprintf("versionUpgradeTest: starting step %d", i+1)) step(ctx, t, u) } } } type versionUpgradeTest struct { goOS string c cluster.Cluster steps []versionStep // Cache conns because opening one takes hundreds of ms, and we do it quite // a lot. conns []*gosql.DB } func newVersionUpgradeTest(c cluster.Cluster, steps ...versionStep) *versionUpgradeTest { return &versionUpgradeTest{ goOS: ifLocal(c, runtime.GOOS, "linux"), c: c, steps: steps, } } // Return a cached conn to the given node. Don't call .Close(), the test harness // will do it. func (u *versionUpgradeTest) conn(ctx context.Context, t test.Test, i int) *gosql.DB { if u.conns == nil { for _, i := range u.c.All() { u.conns = append(u.conns, u.c.Conn(ctx, t.L(), i)) } } db := u.conns[i-1] // Run a trivial query to shake out errors that can occur when the server has // restarted in the meantime. _ = db.PingContext(ctx) return db } // uploadVersion is a thin wrapper around // `clusterupgrade.UploadVersion` that calls t.Fatal if that call // returns an error func uploadVersion( ctx context.Context, t test.Test, c cluster.Cluster, nodes option.NodeListOption, newVersion string, ) string { path, err := clusterupgrade.UploadVersion(ctx, t, t.L(), c, nodes, newVersion) if err != nil { t.Fatal(err) } return path } // upgradeNodes is a thin wrapper around // `clusterupgrade.RestartNodesWithNewBinary` that calls t.Fatal if // that call returns an errror. func upgradeNodes( ctx context.Context, t test.Test, c cluster.Cluster, nodes option.NodeListOption, startOpts option.StartOpts, newVersion string, ) { if err := clusterupgrade.RestartNodesWithNewBinary( ctx, t, t.L(), c, nodes, startOpts, newVersion, ); err != nil { t.Fatal(err) } } func (u *versionUpgradeTest) binaryVersion( ctx context.Context, t test.Test, i int, ) roachpb.Version { db := u.conn(ctx, t, i) v, err := clusterupgrade.BinaryVersion(db) if err != nil { t.Fatal(err) } return v } // versionStep is an isolated version migration on a running cluster. type versionStep func(ctx context.Context, t test.Test, u *versionUpgradeTest) func uploadAndStartFromCheckpointFixture(nodes option.NodeListOption, v string) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { if err := clusterupgrade.InstallFixtures(ctx, t.L(), u.c, nodes, v); err != nil { t.Fatal(err) } binary := uploadVersion(ctx, t, u.c, nodes, v) startOpts := option.DefaultStartOpts() // NB: can't start sequentially since cluster already bootstrapped. startOpts.RoachprodOpts.Sequential = false clusterupgrade.StartWithBinary(ctx, t.L(), u.c, nodes, binary, startOpts) } } func uploadAndStart(nodes option.NodeListOption, v string) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { binary := uploadVersion(ctx, t, u.c, nodes, v) startOpts := option.DefaultStartOpts() // NB: can't start sequentially since cluster already bootstrapped. startOpts.RoachprodOpts.Sequential = false clusterupgrade.StartWithBinary(ctx, t.L(), u.c, nodes, binary, startOpts) } } // binaryUpgradeStep rolling-restarts the given nodes into the new binary // version. Note that this does *not* wait for the cluster version to upgrade. // Use a waitForUpgradeStep() for that. func binaryUpgradeStep(nodes option.NodeListOption, newVersion string) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { if err := clusterupgrade.RestartNodesWithNewBinary( ctx, t, t.L(), u.c, nodes, option.DefaultStartOpts(), newVersion, ); err != nil { t.Fatal(err) } } } func preventAutoUpgradeStep(node int) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { db := u.conn(ctx, t, node) _, err := db.ExecContext(ctx, `SET CLUSTER SETTING cluster.preserve_downgrade_option = $1`, u.binaryVersion(ctx, t, node).String()) if err != nil { t.Fatal(err) } } } func allowAutoUpgradeStep(node int) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { db := u.conn(ctx, t, node) _, err := db.ExecContext(ctx, `RESET CLUSTER SETTING cluster.preserve_downgrade_option`) if err != nil { t.Fatal(err) } } } // NB: this is intentionally kept separate from binaryUpgradeStep because we run // feature tests between the steps, and we want to expose them (at least // heuristically) to the real-world situation in which some nodes have already // learned of a cluster version bump (from Gossip) where others haven't. This // situation tends to exhibit unexpected behavior. func waitForUpgradeStep(nodes option.NodeListOption) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { dbFunc := func(node int) *gosql.DB { return u.conn(ctx, t, node) } if err := clusterupgrade.WaitForClusterUpgrade(ctx, t.L(), nodes, dbFunc); err != nil { t.Fatal(err) } } } // makeVersionFixtureAndFatal creates fixtures from which we can test // mixed-version clusters (i.e. version X mixing with X-1). The fixtures date // back all the way to v1.0; when development begins on version X, we make a // fixture for version X-1 by running a starting the version X-2 cluster from // the X-2 fixtures, upgrading it to version X-1, and copy the resulting store // directories to the log directories (which are part of the artifacts). The // test will then fail on purpose when it's done with instructions on where to // move the files. func makeVersionFixtureAndFatal( ctx context.Context, t test.Test, c cluster.Cluster, makeFixtureVersion string, ) { var useLocalBinary bool if makeFixtureVersion == "" { c.Start(ctx, t.L(), option.DefaultStartOpts(), install.MakeClusterSettings(), c.Node(1)) require.NoError(t, c.Conn(ctx, t.L(), 1).QueryRowContext( ctx, `select regexp_extract(value, '^v([0-9]+\.[0-9]+\.[0-9]+)') from crdb_internal.node_build_info where field = 'Version';`, ).Scan(&makeFixtureVersion)) c.Wipe(ctx, c.Node(1)) useLocalBinary = true } predecessorVersion, err := version.PredecessorVersion(*version.MustParse("v" + makeFixtureVersion)) if err != nil { t.Fatal(err) } t.L().Printf("making fixture for %s (starting at %s)", makeFixtureVersion, predecessorVersion) if useLocalBinary { // Make steps below use the main cockroach binary (in particular, don't try // to download the released version for makeFixtureVersion which may not yet // exist) makeFixtureVersion = "" } newVersionUpgradeTest(c, // Start the cluster from a fixture. That fixture's cluster version may // be at the predecessor version (though in practice it's fully up to // date, if it was created via the checkpointer above), so add a // waitForUpgradeStep to make sure we're upgraded all the way before // moving on. // // See the comment on createCheckpoints for details on fixtures. uploadAndStartFromCheckpointFixture(c.All(), predecessorVersion), waitForUpgradeStep(c.All()), // NB: at this point, cluster and binary version equal predecessorVersion, // and auto-upgrades are on. binaryUpgradeStep(c.All(), makeFixtureVersion), waitForUpgradeStep(c.All()), func(ctx context.Context, t test.Test, u *versionUpgradeTest) { // If we're taking checkpoints, momentarily stop the cluster (we // need to do that to get the checkpoints to reflect a // consistent cluster state). The binary at this point will be // the new one, but the cluster version was not explicitly // bumped, though auto-update may have taken place already. // For example, if newVersion is 2.1, the cluster version in // the store directories may be 2.0 on some stores and 2.1 on // the others (though if any are on 2.1, then that's what's // stored in system.settings). // This means that when we restart from that version, we're // going to want to use the binary mentioned in the checkpoint, // or at least one compatible with the *predecessor* of the // checkpoint version. For example, for checkpoint-2.1, the // cluster version might be 2.0, so we can only use the 2.0 or // 2.1 binary, but not the 19.1 binary (as 19.1 and 2.0 are not // compatible). name := clusterupgrade.CheckpointName(u.binaryVersion(ctx, t, 1).String()) u.c.Stop(ctx, t.L(), option.DefaultStopOpts(), c.All()) binaryPath := clusterupgrade.BinaryPathFromVersion(makeFixtureVersion) c.Run(ctx, c.All(), binaryPath, "debug", "pebble", "db", "checkpoint", "{store-dir}", "{store-dir}/"+name) // The `cluster-bootstrapped` marker can already be found within // store-dir, but the rocksdb checkpoint step above does not pick it // up as it isn't recognized by RocksDB. We copy the marker // manually, it's necessary for roachprod created clusters. See // #54761. c.Run(ctx, c.Node(1), "cp", "{store-dir}/cluster-bootstrapped", "{store-dir}/"+name) // Similar to the above - newer versions require the min version file to open a store. c.Run(ctx, c.Node(1), "cp", fmt.Sprintf("{store-dir}/%s", storage.MinVersionFilename), "{store-dir}/"+name) c.Run(ctx, c.All(), "tar", "-C", "{store-dir}/"+name, "-czf", "{log-dir}/"+name+".tgz", ".") t.Fatalf(`successfully created checkpoints; failing test on purpose. Invoke the following to move the archives to the right place and commit the result: for i in 1 2 3 4; do mkdir -p pkg/cmd/roachtest/fixtures/${i} && \ mv artifacts/generate-fixtures/run_1/logs/${i}.unredacted/checkpoint-*.tgz \ pkg/cmd/roachtest/fixtures/${i}/ done `) }).run(ctx, t) } // importTPCCStep runs a TPCC import import on the first crdbNode (monitoring them all for // crashes during the import). If oldV is nil, this runs the import using the specified // version (for example "19.2.1", as provided by PredecessorVersion()) using the location // used by c.Stage(). An empty oldV uses the main cockroach binary. func importTPCCStep( oldV string, headroomWarehouses int, crdbNodes option.NodeListOption, ) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { // We need to use the predecessor binary to load into the // predecessor cluster to avoid random breakage. For example, you // can't use 21.1 to import into 20.2 due to some flag changes. // // TODO(tbg): also import a large dataset (for example 2TB bank) // that will provide cold data that may need to be migrated. var cmd string if oldV == "" { cmd = tpccImportCmd(headroomWarehouses) } else { cmd = tpccImportCmdWithCockroachBinary(filepath.Join("v"+oldV, "cockroach"), headroomWarehouses, "--checks=false") } // Use a monitor so that we fail cleanly if the cluster crashes // during import. m := u.c.NewMonitor(ctx, crdbNodes) m.Go(func(ctx context.Context) error { return u.c.RunE(ctx, u.c.Node(crdbNodes[0]), cmd) }) m.Wait() } } func importLargeBankStep(oldV string, rows int, crdbNodes option.NodeListOption) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { // Use the predecessor binary to load into the predecessor // cluster to avoid random breakage due to flag changes, etc. binary := "./cockroach" if oldV != "" { binary = filepath.Join("v"+oldV, "cockroach") } // Use a monitor so that we fail cleanly if the cluster crashes // during import. m := u.c.NewMonitor(ctx, crdbNodes) m.Go(func(ctx context.Context) error { return u.c.RunE(ctx, u.c.Node(crdbNodes[0]), binary, "workload", "fixtures", "import", "bank", "--payload-bytes=10240", "--rows="+fmt.Sprint(rows), "--seed=4", "--db=bigbank") }) m.Wait() } } func sleepStep(d time.Duration) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { time.Sleep(d) } }
pkg/cmd/roachtest/tests/versionupgrade.go
1
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.23481255769729614, 0.005231813061982393, 0.00016177192446775734, 0.0001694428938208148, 0.032892242074012756 ]
{ "id": 6, "code_window": [ "\t\"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil/mixedversion\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/roachprod/install\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/roachprod/logger\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/storage\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/util/timeutil\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/util/version\"\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 28 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colserde import ( "bytes" "context" "encoding/binary" "io" "os" "github.com/apache/arrow/go/arrow/array" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/colserde/arrowserde" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/errors" mmap "github.com/edsrzf/mmap-go" flatbuffers "github.com/google/flatbuffers/go" ) const fileMagic = `ARROW1` var fileMagicPadding [8 - len(fileMagic)]byte type fileBlock struct { offset int64 metadataLen int32 bodyLen int64 } // FileSerializer converts our in-mem columnar batch representation into the // arrow specification's file format. All batches serialized to a file must have // the same schema. type FileSerializer struct { scratch [4]byte w *countingWriter typs []*types.T fb *flatbuffers.Builder a *ArrowBatchConverter rb *RecordBatchSerializer recordBatches []fileBlock } // NewFileSerializer creates a FileSerializer for the given types. The caller is // responsible for closing the given writer as well as the given memory account. func NewFileSerializer( w io.Writer, typs []*types.T, acc *mon.BoundAccount, ) (*FileSerializer, error) { a, err := NewArrowBatchConverter(typs, BatchToArrowOnly, acc) if err != nil { return nil, err } rb, err := NewRecordBatchSerializer(typs) if err != nil { return nil, err } s := &FileSerializer{ typs: typs, fb: flatbuffers.NewBuilder(flatbufferBuilderInitialCapacity), a: a, rb: rb, } return s, s.Reset(w) } // Reset can be called to reuse this FileSerializer with a new io.Writer after // calling Finish. The types will remain the ones passed to the constructor. The // caller is responsible for closing the given writer. func (s *FileSerializer) Reset(w io.Writer) error { if s.w != nil { return errors.New(`Finish must be called before Reset`) } s.w = &countingWriter{wrapped: w} s.recordBatches = s.recordBatches[:0] if _, err := io.WriteString(s.w, fileMagic); err != nil { return err } // Pad to 8 byte boundary. if _, err := s.w.Write(fileMagicPadding[:]); err != nil { return err } // The file format is a wrapper around the streaming format and the streaming // format starts with a Schema message. s.fb.Reset() messageOffset := schemaMessage(s.fb, s.typs) s.fb.Finish(messageOffset) schemaBytes := s.fb.FinishedBytes() if _, err := s.w.Write(schemaBytes); err != nil { return err } _, err := s.w.Write(make([]byte, calculatePadding(len(schemaBytes)))) return err } // AppendBatch adds one batch of columnar data to the file. func (s *FileSerializer) AppendBatch(ctx context.Context, batch coldata.Batch) error { offset := int64(s.w.written) arrow, err := s.a.BatchToArrow(ctx, batch) if err != nil { return err } metadataLen, bodyLen, err := s.rb.Serialize(s.w, arrow, batch.Length()) if err != nil { return err } s.recordBatches = append(s.recordBatches, fileBlock{ offset: offset, metadataLen: int32(metadataLen), bodyLen: int64(bodyLen), }) return nil } // Finish writes the footer metadata described by the arrow spec. Nothing can be // called after Finish except Reset. func (s *FileSerializer) Finish() error { defer func() { s.w = nil }() // Write the footer flatbuffer, which has byte offsets of all the record // batch messages in the file. s.fb.Reset() footerOffset := fileFooter(s.fb, s.typs, s.recordBatches) s.fb.Finish(footerOffset) footerBytes := s.fb.FinishedBytes() if _, err := s.w.Write(footerBytes); err != nil { return err } // For the footer, and only the footer, the spec requires the length _after_ // the footer so that it can be read by starting at the back of the file and // working forward. binary.LittleEndian.PutUint32(s.scratch[:], uint32(len(footerBytes))) if _, err := s.w.Write(s.scratch[:]); err != nil { return err } // Spec wants the magic again here. _, err := io.WriteString(s.w, fileMagic) return err } // Close releases the resources of the serializer. func (s *FileSerializer) Close(ctx context.Context) { s.a.Release(ctx) } // FileDeserializer decodes columnar data batches from files encoded according // to the arrow spec. type FileDeserializer struct { buf []byte bufCloseFn func() error recordBatches []fileBlock idx int end int typs []*types.T a *ArrowBatchConverter rb *RecordBatchSerializer arrowScratch []array.Data } // NewFileDeserializerFromBytes constructs a FileDeserializer for an in-memory // buffer. func NewFileDeserializerFromBytes(typs []*types.T, buf []byte) (*FileDeserializer, error) { return newFileDeserializer(typs, buf, func() error { return nil }) } // NewTestFileDeserializerFromPath constructs a FileDeserializer by reading it // from a file. It is only used in tests. func NewTestFileDeserializerFromPath(typs []*types.T, path string) (*FileDeserializer, error) { f, err := os.Open(path) if err != nil { return nil, pgerror.Wrapf(err, pgcode.Io, `opening %s`, path) } // TODO(dan): This is currently using copy on write semantics because we store // the nulls differently in-mem than arrow does and there's an in-place // conversion. If we used the same format that arrow does, this could be // switched to mmap.RDONLY (it's easy to check, the test fails with a SIGBUS // right now with mmap.RDONLY). buf, err := mmap.Map(f, mmap.COPY, 0 /* flags */) if err != nil { return nil, pgerror.Wrapf(err, pgcode.Io, `mmaping %s`, path) } return newFileDeserializer(typs, buf, buf.Unmap) } func newFileDeserializer( typs []*types.T, buf []byte, bufCloseFn func() error, ) (*FileDeserializer, error) { d := &FileDeserializer{ buf: buf, bufCloseFn: bufCloseFn, end: len(buf), } var err error if err = d.init(); err != nil { return nil, err } d.typs = typs if d.a, err = NewArrowBatchConverter(typs, ArrowToBatchOnly, nil /* acc */); err != nil { return nil, err } if d.rb, err = NewRecordBatchSerializer(typs); err != nil { return nil, err } d.arrowScratch = make([]array.Data, 0, len(typs)) return d, nil } // Close releases any resources held by this deserializer. func (d *FileDeserializer) Close(ctx context.Context) error { d.a.Release(ctx) return d.bufCloseFn() } // Typs returns the in-memory types for the data stored in this file. func (d *FileDeserializer) Typs() []*types.T { return d.typs } // NumBatches returns the number of record batches stored in this file. func (d *FileDeserializer) NumBatches() int { return len(d.recordBatches) } // GetBatch fills in the given in-mem batch with the requested on-disk data. func (d *FileDeserializer) GetBatch(batchIdx int, b coldata.Batch) error { rb := d.recordBatches[batchIdx] d.idx = int(rb.offset) buf, err := d.read(metadataLengthNumBytes + int(rb.metadataLen) + int(rb.bodyLen)) if err != nil { return err } d.arrowScratch = d.arrowScratch[:0] batchLength, err := d.rb.Deserialize(&d.arrowScratch, buf) if err != nil { return err } return d.a.ArrowToBatch(d.arrowScratch, batchLength, b) } // read gets the next `n` bytes from the start of the buffer, consuming them. func (d *FileDeserializer) read(n int) ([]byte, error) { if d.idx+n > d.end { return nil, io.EOF } start := d.idx d.idx += n return d.buf[start:d.idx], nil } // readBackward gets the `n` bytes from the end of the buffer, consuming them. func (d *FileDeserializer) readBackward(n int) ([]byte, error) { if d.idx+n > d.end { return nil, io.EOF } end := d.end d.end -= n return d.buf[d.end:end], nil } // init verifies the file magic and headers. After init, the `idx` and `end` // fields are set to the range of record batches and dictionary batches // described by the arrow spec's streaming format. func (d *FileDeserializer) init() error { // Check the header magic if magic, err := d.read(8); err != nil { return pgerror.Wrap(err, pgcode.DataException, `verifying arrow file header magic`) } else if !bytes.Equal([]byte(fileMagic), magic[:len(fileMagic)]) { return errors.New(`arrow file header magic mismatch`) } if magic, err := d.readBackward(len(fileMagic)); err != nil { return pgerror.Wrap(err, pgcode.DataException, `verifying arrow file footer magic`) } else if !bytes.Equal([]byte(fileMagic), magic) { return errors.New(`arrow file magic footer mismatch`) } footerSize, err := d.readBackward(4) if err != nil { return pgerror.Wrap(err, pgcode.DataException, `reading arrow file footer`) } footerBytes, err := d.readBackward(int(binary.LittleEndian.Uint32(footerSize))) if err != nil { return pgerror.Wrap(err, pgcode.DataException, `reading arrow file footer`) } footer := arrowserde.GetRootAsFooter(footerBytes, 0) if footer.Version() != arrowserde.MetadataVersionV1 { return errors.Errorf(`only arrow V1 is supported got %d`, footer.Version()) } var block arrowserde.Block d.recordBatches = d.recordBatches[:0] for blockIdx := 0; blockIdx < footer.RecordBatchesLength(); blockIdx++ { footer.RecordBatches(&block, blockIdx) d.recordBatches = append(d.recordBatches, fileBlock{ offset: block.Offset(), metadataLen: block.MetaDataLength(), bodyLen: block.BodyLength(), }) } return nil } type countingWriter struct { wrapped io.Writer written int } func (w *countingWriter) Write(buf []byte) (int, error) { n, err := w.wrapped.Write(buf) w.written += n return n, err } func schema(fb *flatbuffers.Builder, typs []*types.T) flatbuffers.UOffsetT { fieldOffsets := make([]flatbuffers.UOffsetT, len(typs)) for idx, typ := range typs { var fbTyp byte var fbTypOffset flatbuffers.UOffsetT switch typeconv.TypeFamilyToCanonicalTypeFamily(typ.Family()) { case types.BoolFamily: arrowserde.BoolStart(fb) fbTypOffset = arrowserde.BoolEnd(fb) fbTyp = arrowserde.TypeBool case types.BytesFamily, types.JsonFamily: arrowserde.BinaryStart(fb) fbTypOffset = arrowserde.BinaryEnd(fb) fbTyp = arrowserde.TypeBinary case types.IntFamily: switch typ.Width() { case 16: arrowserde.IntStart(fb) arrowserde.IntAddBitWidth(fb, 16) arrowserde.IntAddIsSigned(fb, 1) fbTypOffset = arrowserde.IntEnd(fb) fbTyp = arrowserde.TypeInt case 32: arrowserde.IntStart(fb) arrowserde.IntAddBitWidth(fb, 32) arrowserde.IntAddIsSigned(fb, 1) fbTypOffset = arrowserde.IntEnd(fb) fbTyp = arrowserde.TypeInt case 0, 64: arrowserde.IntStart(fb) arrowserde.IntAddBitWidth(fb, 64) arrowserde.IntAddIsSigned(fb, 1) fbTypOffset = arrowserde.IntEnd(fb) fbTyp = arrowserde.TypeInt default: panic(errors.Errorf(`unexpected int width %d`, typ.Width())) } case types.FloatFamily: arrowserde.FloatingPointStart(fb) arrowserde.FloatingPointAddPrecision(fb, arrowserde.PrecisionDOUBLE) fbTypOffset = arrowserde.FloatingPointEnd(fb) fbTyp = arrowserde.TypeFloatingPoint case types.DecimalFamily: // Decimals are marshaled into bytes, so we use binary headers. arrowserde.BinaryStart(fb) fbTypOffset = arrowserde.BinaryEnd(fb) fbTyp = arrowserde.TypeDecimal case types.TimestampTZFamily: // Timestamps are marshaled into bytes, so we use binary headers. arrowserde.BinaryStart(fb) fbTypOffset = arrowserde.BinaryEnd(fb) fbTyp = arrowserde.TypeTimestamp case types.IntervalFamily: // Intervals are marshaled into bytes, so we use binary headers. arrowserde.BinaryStart(fb) fbTypOffset = arrowserde.BinaryEnd(fb) fbTyp = arrowserde.TypeInterval case typeconv.DatumVecCanonicalTypeFamily: // Datums are marshaled into bytes, so we use binary headers. arrowserde.BinaryStart(fb) fbTypOffset = arrowserde.BinaryEnd(fb) fbTyp = arrowserde.TypeUtf8 default: panic(errors.Errorf(`don't know how to map %s`, typ)) } arrowserde.FieldStart(fb) arrowserde.FieldAddTypeType(fb, fbTyp) arrowserde.FieldAddType(fb, fbTypOffset) fieldOffsets[idx] = arrowserde.FieldEnd(fb) } arrowserde.SchemaStartFieldsVector(fb, len(typs)) // flatbuffers adds everything back to front. Reverse iterate so they're in // the right order when they come out. for i := len(fieldOffsets) - 1; i >= 0; i-- { fb.PrependUOffsetT(fieldOffsets[i]) } fields := fb.EndVector(len(typs)) arrowserde.SchemaStart(fb) arrowserde.SchemaAddFields(fb, fields) return arrowserde.SchemaEnd(fb) } func schemaMessage(fb *flatbuffers.Builder, typs []*types.T) flatbuffers.UOffsetT { schemaOffset := schema(fb, typs) arrowserde.MessageStart(fb) arrowserde.MessageAddVersion(fb, arrowserde.MetadataVersionV1) arrowserde.MessageAddHeaderType(fb, arrowserde.MessageHeaderSchema) arrowserde.MessageAddHeader(fb, schemaOffset) return arrowserde.MessageEnd(fb) } func fileFooter( fb *flatbuffers.Builder, typs []*types.T, recordBatches []fileBlock, ) flatbuffers.UOffsetT { schemaOffset := schema(fb, typs) arrowserde.FooterStartRecordBatchesVector(fb, len(recordBatches)) // flatbuffers adds everything back to front. Reverse iterate so they're in // the right order when they come out. for i := len(recordBatches) - 1; i >= 0; i-- { rb := recordBatches[i] arrowserde.CreateBlock(fb, rb.offset, rb.metadataLen, rb.bodyLen) } recordBatchesOffset := fb.EndVector(len(recordBatches)) arrowserde.FooterStart(fb) arrowserde.FooterAddVersion(fb, arrowserde.MetadataVersionV1) arrowserde.FooterAddSchema(fb, schemaOffset) arrowserde.FooterAddRecordBatches(fb, recordBatchesOffset) return arrowserde.FooterEnd(fb) }
pkg/col/colserde/file.go
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.001308003906160593, 0.0001972444006241858, 0.00016379235603380948, 0.00017283800116274506, 0.00016752751253079623 ]
{ "id": 6, "code_window": [ "\t\"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil/mixedversion\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/roachprod/install\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/roachprod/logger\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/storage\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/util/timeutil\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/util/version\"\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 28 }
#!/usr/bin/env bash set -euo pipefail source ./download_binary.sh if [[ $# -ne 2 ]] then echo "usage: $0 EXPECTED-VERSION EXPECTED-SHA" >&2 exit 1 fi COCKROACH_VERSION=$1 COCKROACH_SHA=$2 download_and_extract "$COCKROACH_VERSION" "windows-6.2-amd64.zip" ./bincheck ./mnt/cockroach.exe "$COCKROACH_VERSION" "$COCKROACH_SHA"
build/release/bincheck/test-windows
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.0002567475603427738, 0.00021294341422617435, 0.00016913926810957491, 0.00021294341422617435, 0.00004380414611659944 ]
{ "id": 6, "code_window": [ "\t\"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil/mixedversion\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/roachprod/install\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/roachprod/logger\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/storage\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/util/timeutil\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/util/version\"\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 28 }
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package invertedexpr import ( "math" "sort" "github.com/cockroachdb/cockroach/pkg/geo/geoindex" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/inverted" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/errors" ) // This file contains functions to encode geoindex.{UnionKeySpans, RPKeyExpr} // into a SpanExpression. These functions are in this package since they // need to use keyside.Encode to convert geoindex.Key to invertedexpr.EncVal and // that cannot be done in the geoindex package as it introduces a circular // dependency. // // TODO(sumeer): change geoindex to produce SpanExpressions directly. func geoKeyToEncInvertedVal(k geoindex.Key, end bool, b []byte) (inverted.EncVal, []byte) { // geoindex.KeySpan.End is inclusive, while InvertedSpan.end is exclusive. // For all but k == math.MaxUint64, we can account for this before the key // encoding. For k == math.MaxUint64, we must PrefixEnd after, which incurs // a separate memory allocation. prefixEnd := false if end { if k < math.MaxUint64 { k++ } else { prefixEnd = true } } prev := len(b) b = encoding.EncodeGeoInvertedAscending(b) b = encoding.EncodeUvarintAscending(b, uint64(k)) // Set capacity so that the caller appending does not corrupt later keys. enc := b[prev:len(b):len(b)] if prefixEnd { enc = roachpb.Key(enc).PrefixEnd() } return enc, b } func geoToSpan(span geoindex.KeySpan, b []byte) (inverted.Span, []byte) { start, b := geoKeyToEncInvertedVal(span.Start, false, b) end, b := geoKeyToEncInvertedVal(span.End, true, b) return inverted.Span{Start: start, End: end}, b } // GeoUnionKeySpansToSpanExpr converts geoindex.UnionKeySpans to a // SpanExpression. func GeoUnionKeySpansToSpanExpr(ukSpans geoindex.UnionKeySpans) inverted.Expression { if len(ukSpans) == 0 { return inverted.NonInvertedColExpression{} } // Avoid per-span heap allocations. Each of the 2 keys in a span is the // geoInvertedIndexMarker (1 byte) followed by a varint. b := make([]byte, 0, len(ukSpans)*(2*encoding.MaxVarintLen+2)) spans := make([]inverted.Span, len(ukSpans)) for i, ukSpan := range ukSpans { spans[i], b = geoToSpan(ukSpan, b) } return &inverted.SpanExpression{ SpansToRead: spans, FactoredUnionSpans: spans, } } // GeoRPKeyExprToSpanExpr converts geoindex.RPKeyExpr to SpanExpression. func GeoRPKeyExprToSpanExpr(rpExpr geoindex.RPKeyExpr) (inverted.Expression, error) { if len(rpExpr) == 0 { return inverted.NonInvertedColExpression{}, nil } spansToRead := make([]inverted.Span, 0, len(rpExpr)) var b []byte // avoid per-expr heap allocations var stack []*inverted.SpanExpression for _, elem := range rpExpr { switch e := elem.(type) { case geoindex.Key: var span inverted.Span span, b = geoToSpan(geoindex.KeySpan{Start: e, End: e}, b) // The keys in the RPKeyExpr are unique, so simply append to spansToRead. spansToRead = append(spansToRead, span) stack = append(stack, &inverted.SpanExpression{ FactoredUnionSpans: []inverted.Span{span}, }) case geoindex.RPSetOperator: if len(stack) < 2 { return nil, errors.Errorf("malformed expression: %s", rpExpr) } node0, node1 := stack[len(stack)-1], stack[len(stack)-2] var node *inverted.SpanExpression stack = stack[:len(stack)-2] switch e { case geoindex.RPSetIntersection: node = makeSpanExpression(inverted.SetIntersection, node0, node1) case geoindex.RPSetUnion: if node0.Operator == inverted.None { node0, node1 = node1, node0 } if node1.Operator == inverted.None { // node1 can be discarded after unioning its FactoredUnionSpans. node = node0 // Union into the one with the larger capacity. This optimizes // the case of many unions. We will sort the spans later. if cap(node.FactoredUnionSpans) < cap(node1.FactoredUnionSpans) { node.FactoredUnionSpans = append(node1.FactoredUnionSpans, node.FactoredUnionSpans...) } else { node.FactoredUnionSpans = append(node.FactoredUnionSpans, node1.FactoredUnionSpans...) } } else { node = makeSpanExpression(inverted.SetUnion, node0, node1) } } stack = append(stack, node) } } if len(stack) != 1 { return inverted.NonInvertedColExpression{}, errors.Errorf("malformed expression: %s", rpExpr) } spanExpr := *stack[0] spanExpr.SpansToRead = spansToRead sort.Sort(spanExpr.SpansToRead) // Sort the FactoredUnionSpans of the root. The others are already sorted // in makeSpanExpression. sort.Sort(spanExpr.FactoredUnionSpans) return &spanExpr, nil } func makeSpanExpression( op inverted.SetOperator, n0 *inverted.SpanExpression, n1 *inverted.SpanExpression, ) *inverted.SpanExpression { sort.Sort(n0.FactoredUnionSpans) sort.Sort(n1.FactoredUnionSpans) return &inverted.SpanExpression{ Operator: op, Left: n0, Right: n1, } }
pkg/sql/opt/invertedexpr/geo_expression.go
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.00048114682431332767, 0.0001921124930959195, 0.00016386451898142695, 0.00017329465481452644, 0.00007476320752175525 ]
{ "id": 7, "code_window": [ "\t\"github.com/cockroachdb/cockroach/pkg/roachprod/logger\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/storage\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/util/timeutil\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/util/version\"\n", "\t\"github.com/stretchr/testify/require\"\n", ")\n", "\n", "type versionFeatureTest struct {\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 33 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package tests import ( "context" "runtime" "strings" "time" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/registry" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test" ) func registerFixtures(r registry.Registry) { // Run this test to create a new fixture for the version upgrade test. This // is necessary after every release. For example, the day `master` becomes // the 20.2 release, this test will fail because it is missing a fixture for // 20.1; run the test (on 20.1). Check it in (instructions will be logged // below) and off we go. // // The version to create/update the fixture for. Must be released (i.e. // can download it from the homepage); if that is not the case use the // empty string which uses the local cockroach binary. Make sure that // this binary then has the correct version. For example, to make a // "v20.2" fixture, you will need a binary that has "v20.2" in the // output of `./cockroach version`, and this process will end up // creating fixtures that have "v20.2" in them. This would be part // of tagging the master branch as v21.1 in the process of going // through the major release for v20.2. The version is passed in as // FIXTURE_VERSION environment variable. // // In the common case, one should populate this with the version (instead of // using the empty string) as this is the most straightforward and least // error-prone way to generate the fixtures. // // Please note that you do *NOT* need to update the fixtures in a patch // release. This only happens as part of preparing the master branch for the // next release. The release team runbooks, at time of writing, reflect // this. // // Example invocation: // roachtest --local run generate-fixtures --debug --cockroach ./cockroach \ // --build-tag v22.1.0-beta.3 tag:fixtures runFixtures := func( ctx context.Context, t test.Test, c cluster.Cluster, ) { if c.IsLocal() && runtime.GOARCH == "arm64" { t.Skip("Skip under ARM64. See https://github.com/cockroachdb/cockroach/issues/89268") } fixtureVersion := strings.TrimPrefix(t.BuildVersion().String(), "v") makeVersionFixtureAndFatal(ctx, t, c, fixtureVersion) } spec := registry.TestSpec{ Name: "generate-fixtures", Timeout: 30 * time.Minute, Tags: registry.Tags("fixtures"), Owner: registry.OwnerDevInf, Cluster: r.MakeClusterSpec(4), Run: runFixtures, } r.Add(spec) }
pkg/cmd/roachtest/tests/fixtures.go
1
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.0005994573584757745, 0.0002650862734299153, 0.0001633634092286229, 0.00017533422214910388, 0.00014472025213763118 ]
{ "id": 7, "code_window": [ "\t\"github.com/cockroachdb/cockroach/pkg/roachprod/logger\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/storage\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/util/timeutil\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/util/version\"\n", "\t\"github.com/stretchr/testify/require\"\n", ")\n", "\n", "type versionFeatureTest struct {\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 33 }
// Copyright 2022 The Cockroach Authors. // // Licensed as a CockroachDB Enterprise file under the Cockroach Community // License (the "License"); you may not use this file except in compliance with // the License. You may obtain a copy of the License at // // https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt package streamingest import ( "context" "math" "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl" "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl/replicationutils" "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/multitenant/mtinfopb" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/exprutil" "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/asof" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/errors" ) const alterReplicationJobOp = "ALTER TENANT REPLICATION" var alterReplicationCutoverHeader = colinfo.ResultColumns{ {Name: "cutover_time", Typ: types.Decimal}, } // ResolvedTenantReplicationOptions represents options from an // evaluated CREATE TENANT FROM REPLICATION command. type resolvedTenantReplicationOptions struct { retention *int32 } func evalTenantReplicationOptions( ctx context.Context, options tree.TenantReplicationOptions, eval exprutil.Evaluator, ) (*resolvedTenantReplicationOptions, error) { r := &resolvedTenantReplicationOptions{} if options.Retention != nil { dur, err := eval.Duration(ctx, options.Retention) if err != nil { return nil, err } retSeconds64, ok := dur.AsInt64() if !ok { return nil, errors.Newf("interval conversion error: %v", dur) } if retSeconds64 > math.MaxInt32 || retSeconds64 < 0 { return nil, errors.Newf("retention should result in a number of seconds between 0 and %d", math.MaxInt32) } retSeconds := int32(retSeconds64) r.retention = &retSeconds } return r, nil } func (r *resolvedTenantReplicationOptions) GetRetention() (int32, bool) { if r == nil || r.retention == nil { return 0, false } return *r.retention, true } func alterReplicationJobTypeCheck( ctx context.Context, stmt tree.Statement, p sql.PlanHookState, ) (matched bool, header colinfo.ResultColumns, _ error) { alterStmt, ok := stmt.(*tree.AlterTenantReplication) if !ok { return false, nil, nil } if err := exprutil.TypeCheck( ctx, alterReplicationJobOp, p.SemaCtx(), exprutil.TenantSpec{TenantSpec: alterStmt.TenantSpec}, exprutil.Strings{alterStmt.Options.Retention}, ); err != nil { return false, nil, err } if cutoverTime := alterStmt.Cutover; cutoverTime != nil { if cutoverTime.Timestamp != nil { evalCtx := &p.ExtendedEvalContext().Context if _, err := typeCheckCutoverTime(ctx, evalCtx, p.SemaCtx(), cutoverTime.Timestamp); err != nil { return false, nil, err } } return true, alterReplicationCutoverHeader, nil } return true, nil, nil } func alterReplicationJobHook( ctx context.Context, stmt tree.Statement, p sql.PlanHookState, ) (sql.PlanHookRowFn, colinfo.ResultColumns, []sql.PlanNode, bool, error) { alterTenantStmt, ok := stmt.(*tree.AlterTenantReplication) if !ok { return nil, nil, nil, false, nil } if !streamingccl.CrossClusterReplicationEnabled.Get(&p.ExecCfg().Settings.SV) { return nil, nil, nil, false, errors.WithTelemetry( pgerror.WithCandidateCode( errors.WithHint( errors.Newf("cross cluster replication is disabled"), "You can enable cross cluster replication by running `SET CLUSTER SETTING cross_cluster_replication.enabled = true`.", ), pgcode.ExperimentalFeature, ), "cross_cluster_replication.enabled", ) } if !p.ExecCfg().Codec.ForSystemTenant() { return nil, nil, nil, false, pgerror.Newf(pgcode.InsufficientPrivilege, "only the system tenant can alter tenant") } var cutoverTime hlc.Timestamp if alterTenantStmt.Cutover != nil { if !alterTenantStmt.Cutover.Latest { if alterTenantStmt.Cutover.Timestamp == nil { return nil, nil, nil, false, errors.AssertionFailedf("unexpected nil cutover expression") } evalCtx := &p.ExtendedEvalContext().Context ct, err := evalCutoverTime(ctx, evalCtx, p.SemaCtx(), alterTenantStmt.Cutover.Timestamp) if err != nil { return nil, nil, nil, false, err } cutoverTime = ct } } exprEval := p.ExprEvaluator(alterReplicationJobOp) options, err := evalTenantReplicationOptions(ctx, alterTenantStmt.Options, exprEval) if err != nil { return nil, nil, nil, false, err } fn := func(ctx context.Context, _ []sql.PlanNode, resultsCh chan<- tree.Datums) error { if err := utilccl.CheckEnterpriseEnabled( p.ExecCfg().Settings, p.ExecCfg().NodeInfo.LogicalClusterID(), "ALTER TENANT REPLICATION", ); err != nil { return err } if err := sql.CanManageTenant(ctx, p); err != nil { return err } tenInfo, err := p.LookupTenantInfo(ctx, alterTenantStmt.TenantSpec, "ALTER TENANT REPLICATION") if err != nil { return err } if tenInfo.TenantReplicationJobID == 0 { return errors.Newf("tenant %q (%d) does not have an active replication job", tenInfo.Name, tenInfo.ID) } jobRegistry := p.ExecCfg().JobRegistry if alterTenantStmt.Cutover != nil { pts := p.ExecCfg().ProtectedTimestampProvider.WithTxn(p.InternalSQLTxn()) actualCutoverTime, err := alterTenantJobCutover( ctx, p.InternalSQLTxn(), jobRegistry, pts, alterTenantStmt, tenInfo, cutoverTime) if err != nil { return err } resultsCh <- tree.Datums{eval.TimestampToDecimalDatum(actualCutoverTime)} } else if !alterTenantStmt.Options.IsDefault() { if err := alterTenantOptions(ctx, p.InternalSQLTxn(), jobRegistry, options, tenInfo); err != nil { return err } } else { switch alterTenantStmt.Command { case tree.ResumeJob: if err := jobRegistry.Unpause(ctx, p.InternalSQLTxn(), tenInfo.TenantReplicationJobID); err != nil { return err } case tree.PauseJob: if err := jobRegistry.PauseRequested(ctx, p.InternalSQLTxn(), tenInfo.TenantReplicationJobID, "ALTER TENANT PAUSE REPLICATION"); err != nil { return err } default: return errors.New("unsupported job command in ALTER TENANT REPLICATION") } } return nil } if alterTenantStmt.Cutover != nil { return fn, alterReplicationCutoverHeader, nil, false, nil } return fn, nil, nil, false, nil } // alterTenantJobCutover returns the cutover timestamp that was used to initiate // the cutover process - if the command is 'ALTER TENANT .. COMPLETE REPLICATION // TO LATEST' then the frontier high water timestamp is used. func alterTenantJobCutover( ctx context.Context, txn isql.Txn, jobRegistry *jobs.Registry, ptp protectedts.Storage, alterTenantStmt *tree.AlterTenantReplication, tenInfo *mtinfopb.TenantInfo, cutoverTime hlc.Timestamp, ) (hlc.Timestamp, error) { if alterTenantStmt == nil || alterTenantStmt.Cutover == nil { return hlc.Timestamp{}, errors.AssertionFailedf("unexpected nil ALTER TENANT cutover expression") } tenantName := tenInfo.Name job, err := jobRegistry.LoadJobWithTxn(ctx, tenInfo.TenantReplicationJobID, txn) if err != nil { return hlc.Timestamp{}, err } details, ok := job.Details().(jobspb.StreamIngestionDetails) if !ok { return hlc.Timestamp{}, errors.Newf("job with id %d is not a stream ingestion job", job.ID()) } progress := job.Progress() if alterTenantStmt.Cutover.Latest { ts := progress.GetHighWater() if ts == nil || ts.IsEmpty() { return hlc.Timestamp{}, errors.Newf("replicated tenant %q has not yet recorded a safe replication time", tenantName) } cutoverTime = *ts } // TODO(ssd): We could use the replication manager here, but // that embeds a priviledge check which is already completed. // // Check that the timestamp is above our retained timestamp. stats, err := replicationutils.GetStreamIngestionStatsNoHeartbeat(ctx, details, progress) if err != nil { return hlc.Timestamp{}, err } if stats.IngestionDetails.ProtectedTimestampRecordID == nil { return hlc.Timestamp{}, errors.Newf("replicated tenant %q (%d) has not yet recorded a retained timestamp", tenantName, tenInfo.ID) } else { record, err := ptp.GetRecord(ctx, *stats.IngestionDetails.ProtectedTimestampRecordID) if err != nil { return hlc.Timestamp{}, err } if cutoverTime.Less(record.Timestamp) { return hlc.Timestamp{}, errors.Newf("cutover time %s is before earliest safe cutover time %s", cutoverTime, record.Timestamp) } } if err := completeStreamIngestion(ctx, jobRegistry, txn, tenInfo.TenantReplicationJobID, cutoverTime); err != nil { return hlc.Timestamp{}, err } return cutoverTime, nil } func alterTenantOptions( ctx context.Context, txn isql.Txn, jobRegistry *jobs.Registry, options *resolvedTenantReplicationOptions, tenInfo *mtinfopb.TenantInfo, ) error { return jobRegistry.UpdateJobWithTxn(ctx, tenInfo.TenantReplicationJobID, txn, false, /* useReadLock */ func(txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { streamIngestionDetails := md.Payload.GetStreamIngestion() if ret, ok := options.GetRetention(); ok { streamIngestionDetails.ReplicationTTLSeconds = ret } ju.UpdatePayload(md.Payload) return nil }) } func typeCheckCutoverTime( ctx context.Context, evalCtx *eval.Context, semaCtx *tree.SemaContext, cutoverExpr tree.Expr, ) (tree.TypedExpr, error) { typedExpr, err := tree.TypeCheckAndRequire(ctx, cutoverExpr, semaCtx, types.Any, alterReplicationJobOp) if err != nil { return nil, err } // TODO(ssd): AOST and SPLIT are restricted to the use of constant expressions // or particular follower-read related functions. Do we want to do that here as well? // One nice side effect of allowing functions is that users can use NOW(). // These are the types currently supported by asof.DatumToHLC. switch typedExpr.ResolvedType().Family() { case types.IntervalFamily, types.TimestampTZFamily, types.TimestampFamily, types.StringFamily, types.DecimalFamily, types.IntFamily: return typedExpr, nil default: return nil, errors.Errorf("expected string, timestamp, decimal, interval, or integer, got %s", typedExpr.ResolvedType()) } } func evalCutoverTime( ctx context.Context, evalCtx *eval.Context, semaCtx *tree.SemaContext, cutoverExpr tree.Expr, ) (hlc.Timestamp, error) { typedExpr, err := typeCheckCutoverTime(ctx, evalCtx, semaCtx, cutoverExpr) if err != nil { return hlc.Timestamp{}, err } d, err := eval.Expr(ctx, evalCtx, typedExpr) if err != nil { return hlc.Timestamp{}, err } if d == tree.DNull { return hlc.MaxTimestamp, nil } stmtTimestamp := evalCtx.GetStmtTimestamp() return asof.DatumToHLC(evalCtx, stmtTimestamp, d, asof.ReplicationCutover) } func init() { sql.AddPlanHook("alter replication job", alterReplicationJobHook, alterReplicationJobTypeCheck) }
pkg/ccl/streamingccl/streamingest/alter_replication_job.go
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.00378056219778955, 0.0003028094070032239, 0.00016346361371688545, 0.00017449182632844895, 0.0006111137336120009 ]
{ "id": 7, "code_window": [ "\t\"github.com/cockroachdb/cockroach/pkg/roachprod/logger\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/storage\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/util/timeutil\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/util/version\"\n", "\t\"github.com/stretchr/testify/require\"\n", ")\n", "\n", "type versionFeatureTest struct {\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 33 }
load("//build/bazelutil/unused_checker:unused.bzl", "get_x_data") load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "mixedversion", srcs = [ "mixedversion.go", "planner.go", "runner.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil/mixedversion", visibility = ["//visibility:public"], deps = [ "//pkg/cmd/roachtest/cluster", "//pkg/cmd/roachtest/option", "//pkg/cmd/roachtest/roachtestutil", "//pkg/cmd/roachtest/roachtestutil/clusterupgrade", "//pkg/cmd/roachtest/test", "//pkg/roachpb", "//pkg/roachprod/logger", "//pkg/util/ctxgroup", "//pkg/util/randutil", "//pkg/util/timeutil", "//pkg/util/version", ], ) go_test( name = "mixedversion_test", srcs = ["planner_test.go"], args = ["-test.timeout=295s"], embed = [":mixedversion"], deps = [ "//pkg/cmd/roachtest/cluster", "//pkg/cmd/roachtest/option", "//pkg/cmd/roachtest/roachtestutil", "//pkg/cmd/roachtest/roachtestutil/clusterupgrade", "//pkg/roachprod/logger", "//pkg/util/version", "@com_github_stretchr_testify//require", ], ) get_x_data(name = "get_x_data")
pkg/cmd/roachtest/roachtestutil/mixedversion/BUILD.bazel
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.0025969380512833595, 0.0008881019311957061, 0.00017474782362114638, 0.00017740007024258375, 0.0009618081385269761 ]
{ "id": 7, "code_window": [ "\t\"github.com/cockroachdb/cockroach/pkg/roachprod/logger\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/storage\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/util/timeutil\"\n", "\t\"github.com/cockroachdb/cockroach/pkg/util/version\"\n", "\t\"github.com/stretchr/testify/require\"\n", ")\n", "\n", "type versionFeatureTest struct {\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 33 }
1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H 1,I 1,J 1,K 1,L 1,M 1,N 1,O 1,P 1,Q 1,R 1,S 1,T 1,U 1,V 1,W 1,X 1,Y 1,Z 1,A 1,B 1,C 1,D 1,E 1,F 1,G 1,H
pkg/sql/importer/testdata/csv/data-4-dup
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.0001751500240061432, 0.00016138116188813, 0.0001599026145413518, 0.00016102046356536448, 0.0000016140559182531433 ]
{ "id": 8, "code_window": [ "// move the files.\n", "func makeVersionFixtureAndFatal(\n", "\tctx context.Context, t test.Test, c cluster.Cluster, makeFixtureVersion string,\n", ") {\n", "\tvar useLocalBinary bool\n", "\tif makeFixtureVersion == \"\" {\n", "\t\tc.Start(ctx, t.L(), option.DefaultStartOpts(), install.MakeClusterSettings(), c.Node(1))\n", "\t\trequire.NoError(t, c.Conn(ctx, t.L(), 1).QueryRowContext(\n", "\t\t\tctx,\n", "\t\t\t`select regexp_extract(value, '^v([0-9]+\\.[0-9]+\\.[0-9]+)') from crdb_internal.node_build_info where field = 'Version';`,\n", "\t\t).Scan(&makeFixtureVersion))\n", "\t\tc.Wipe(ctx, c.Node(1))\n", "\t\tuseLocalBinary = true\n", "\t}\n", "\n", "\tpredecessorVersion, err := version.PredecessorVersion(*version.MustParse(\"v\" + makeFixtureVersion))\n", "\tif err != nil {\n", "\t\tt.Fatal(err)\n", "\t}\n", "\n", "\tt.L().Printf(\"making fixture for %s (starting at %s)\", makeFixtureVersion, predecessorVersion)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tpredecessorVersion, err := version.PredecessorVersion(*version.MustParse(makeFixtureVersion))\n" ], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 352 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package tests import ( "context" "runtime" "strings" "time" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/registry" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test" ) func registerFixtures(r registry.Registry) { // Run this test to create a new fixture for the version upgrade test. This // is necessary after every release. For example, the day `master` becomes // the 20.2 release, this test will fail because it is missing a fixture for // 20.1; run the test (on 20.1). Check it in (instructions will be logged // below) and off we go. // // The version to create/update the fixture for. Must be released (i.e. // can download it from the homepage); if that is not the case use the // empty string which uses the local cockroach binary. Make sure that // this binary then has the correct version. For example, to make a // "v20.2" fixture, you will need a binary that has "v20.2" in the // output of `./cockroach version`, and this process will end up // creating fixtures that have "v20.2" in them. This would be part // of tagging the master branch as v21.1 in the process of going // through the major release for v20.2. The version is passed in as // FIXTURE_VERSION environment variable. // // In the common case, one should populate this with the version (instead of // using the empty string) as this is the most straightforward and least // error-prone way to generate the fixtures. // // Please note that you do *NOT* need to update the fixtures in a patch // release. This only happens as part of preparing the master branch for the // next release. The release team runbooks, at time of writing, reflect // this. // // Example invocation: // roachtest --local run generate-fixtures --debug --cockroach ./cockroach \ // --build-tag v22.1.0-beta.3 tag:fixtures runFixtures := func( ctx context.Context, t test.Test, c cluster.Cluster, ) { if c.IsLocal() && runtime.GOARCH == "arm64" { t.Skip("Skip under ARM64. See https://github.com/cockroachdb/cockroach/issues/89268") } fixtureVersion := strings.TrimPrefix(t.BuildVersion().String(), "v") makeVersionFixtureAndFatal(ctx, t, c, fixtureVersion) } spec := registry.TestSpec{ Name: "generate-fixtures", Timeout: 30 * time.Minute, Tags: registry.Tags("fixtures"), Owner: registry.OwnerDevInf, Cluster: r.MakeClusterSpec(4), Run: runFixtures, } r.Add(spec) }
pkg/cmd/roachtest/tests/fixtures.go
1
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.9985295534133911, 0.12497499585151672, 0.00016370585944969207, 0.00016895562293939292, 0.330172598361969 ]
{ "id": 8, "code_window": [ "// move the files.\n", "func makeVersionFixtureAndFatal(\n", "\tctx context.Context, t test.Test, c cluster.Cluster, makeFixtureVersion string,\n", ") {\n", "\tvar useLocalBinary bool\n", "\tif makeFixtureVersion == \"\" {\n", "\t\tc.Start(ctx, t.L(), option.DefaultStartOpts(), install.MakeClusterSettings(), c.Node(1))\n", "\t\trequire.NoError(t, c.Conn(ctx, t.L(), 1).QueryRowContext(\n", "\t\t\tctx,\n", "\t\t\t`select regexp_extract(value, '^v([0-9]+\\.[0-9]+\\.[0-9]+)') from crdb_internal.node_build_info where field = 'Version';`,\n", "\t\t).Scan(&makeFixtureVersion))\n", "\t\tc.Wipe(ctx, c.Node(1))\n", "\t\tuseLocalBinary = true\n", "\t}\n", "\n", "\tpredecessorVersion, err := version.PredecessorVersion(*version.MustParse(\"v\" + makeFixtureVersion))\n", "\tif err != nil {\n", "\t\tt.Fatal(err)\n", "\t}\n", "\n", "\tt.L().Printf(\"making fixture for %s (starting at %s)\", makeFixtureVersion, predecessorVersion)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tpredecessorVersion, err := version.PredecessorVersion(*version.MustParse(makeFixtureVersion))\n" ], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 352 }
This guide is based on using CoreOS's Prometheus Operator, which allows a Prometheus instance to be managed using native Kubernetes concepts. References used: * https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/getting-started.md * https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/alerting.md # Preflight Create and initialize a Cockroach cluster, if you haven't already done so: * `kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml` * `kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml` If you're running on Google Kubernetes Engine, it's necessary to ensure that your Kubernetes user is part of the cluster-admin groups. Edit the following command before running it; the email address should be whatever account you use to access GKE. This is required, regardless of whether or not you are using a secure CockroachDB cluster. * `kubectl create clusterrolebinding $USER-cluster-admin-binding --clusterrole=cluster-admin [email protected]` # Monitoring Edit the cockroachdb service to add the label `prometheus: cockroachdb`. We use this because we don't want to duplicate the monitoring data between the two services that we create. If we don't have a way to distinguish the `cockroachdb` and `cockroachdb-public` services from one another, we'd have two different prometheus jobs that had duplicated backends. * `kubectl label svc cockroachdb prometheus=cockroachdb` Check for the latest Prometheus Operator [release version](https://github.com/prometheus-operator/prometheus-operator/blob/master/RELEASE.md). Specify the version number in the below command. Install Prometheus Operator: * `kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.1/bundle.yaml` Ensure that the instance of prometheus-operator has started before continuing. The `kubectl get` command and its desired output is below: ``` $ kubectl get deploy prometheus-operator NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE prometheus-operator 1 1 1 1 23h ``` Create the various objects necessary to run a prometheus instance: * `kubectl apply -f prometheus.yaml` To view the Prometheus UI locally: * `kubectl port-forward prometheus-cockroachdb-0 9090` * Open http://localhost:9090 in your browser. * Select the `Status -> Targets` menu entry to verify that the CockroachDB instances have been located. ![Targets screenshot](img/targets.png) * Graphing the `sys_uptime` variable will verify that data is being collected. ![Uptime graph screenshot](img/graph.png) # Alerting Edit the template `alertmanager.yaml` with your relevant configuration. What's in the file has a dummy web hook, per the prometheus-operator alerting guide linked from the top of the document. Upload `alertmanager-config.yaml`, renaming it to `alertmanager.yaml` in the process, and labelling it to make it easier to find. * `kubectl create secret generic alertmanager-cockroachdb --from-file=alertmanager.yaml=alertmanager-config.yaml` * `kubectl label secret alertmanager-cockroachdb app=cockroachdb` It's critical that the name of the secret and the `alertmanager.yaml` are given exactly as shown. Create an AlertManager object to run a replicated AlertManager instance and create a ClusterIP service so that Prometheus can forward alerts: * `kubectl apply -f alertmanager.yaml` Verify that AlertManager is running: * `kubectl port-forward alertmanager-cockroachdb-0 9093` * Open http://localhost:9093 in your browser. You should see something similar to the following: ![AlertManager screenshot](img/alertmanager.png) * Ensure that the AlertManagers are visible to Prometheus by checking http://localhost:9090/status. It may take a minute for the configuration changes to propagate. If this is successful, you should see something similar to the following: ![AlertManager screenshot](img/status-alertmanagers.png) Upload alert rules: * These are copied from https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml: * `kubectl apply -f alert-rules.yaml` * Check that the rules are visible to Prometheus by opening http://localhost:9090/rules. It may take a minute for the configuration changes to propagate. ![Rule screenshot](img/rules.png) * Verify that the example alert is firing by opening http://localhost:9090/rules ![Alerts screenshot](img/alerts.png) * Remove the example alert by running `kubectl edit prometheusrules prometheus-cockroachdb-rules` and deleting the `dummy.rules` block. # Cleaning Up You can remove the monitoring configurations using the following command: `kubectl delete Alertmanager,Prometheus,PrometheusRule,ServiceMonitor -l app=cockroachdb` # Maintenance The contents of `alert-rules.yaml` are generated from our reference prometheus configs, located in the top-level `cockroach/monitoring` directory. A `wraprules` tool exists to make maintaining this easier. ``` go get github.com/cockroachdb/cockroach/pkg/cmd/wraprules wraprules -o path/to/alert-rules.yaml path/to/cockroach/monitoring/rules/*.rules.yml ```
cloud/kubernetes/prometheus/README.md
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.0001717841805657372, 0.00016781731392256916, 0.00016412613331340253, 0.0001679229608271271, 0.0000022358360638463637 ]
{ "id": 8, "code_window": [ "// move the files.\n", "func makeVersionFixtureAndFatal(\n", "\tctx context.Context, t test.Test, c cluster.Cluster, makeFixtureVersion string,\n", ") {\n", "\tvar useLocalBinary bool\n", "\tif makeFixtureVersion == \"\" {\n", "\t\tc.Start(ctx, t.L(), option.DefaultStartOpts(), install.MakeClusterSettings(), c.Node(1))\n", "\t\trequire.NoError(t, c.Conn(ctx, t.L(), 1).QueryRowContext(\n", "\t\t\tctx,\n", "\t\t\t`select regexp_extract(value, '^v([0-9]+\\.[0-9]+\\.[0-9]+)') from crdb_internal.node_build_info where field = 'Version';`,\n", "\t\t).Scan(&makeFixtureVersion))\n", "\t\tc.Wipe(ctx, c.Node(1))\n", "\t\tuseLocalBinary = true\n", "\t}\n", "\n", "\tpredecessorVersion, err := version.PredecessorVersion(*version.MustParse(\"v\" + makeFixtureVersion))\n", "\tif err != nil {\n", "\t\tt.Fatal(err)\n", "\t}\n", "\n", "\tt.L().Printf(\"making fixture for %s (starting at %s)\", makeFixtureVersion, predecessorVersion)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tpredecessorVersion, err := version.PredecessorVersion(*version.MustParse(makeFixtureVersion))\n" ], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 352 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. export { default as Certificates } from "./containers/certificates"; export { default as CustomChart } from "./containers/customChart"; export { default as ConnectedDecommissionedNodeHistory, DecommissionedNodeHistory, } from "./containers/nodeHistory/decommissionedNodeHistory"; export { default as Debug } from "./containers/debug"; export { default as EnqueueRange } from "./containers/enqueueRange"; export { default as ProblemRanges } from "./containers/problemRanges"; export { default as Localities } from "./containers/localities"; export { default as Network } from "./containers/network"; export { default as Nodes } from "./containers/nodes"; export { default as ReduxDebug } from "./containers/redux"; export { default as Range } from "./containers/range"; export { default as Settings } from "./containers/settings"; export { default as Stores } from "./containers/stores";
pkg/ui/workspaces/db-console/src/views/reports/index.ts
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.00017541075067128986, 0.0001717801787890494, 0.00016915427113417536, 0.00017077552911359817, 0.00000265114840658498 ]
{ "id": 8, "code_window": [ "// move the files.\n", "func makeVersionFixtureAndFatal(\n", "\tctx context.Context, t test.Test, c cluster.Cluster, makeFixtureVersion string,\n", ") {\n", "\tvar useLocalBinary bool\n", "\tif makeFixtureVersion == \"\" {\n", "\t\tc.Start(ctx, t.L(), option.DefaultStartOpts(), install.MakeClusterSettings(), c.Node(1))\n", "\t\trequire.NoError(t, c.Conn(ctx, t.L(), 1).QueryRowContext(\n", "\t\t\tctx,\n", "\t\t\t`select regexp_extract(value, '^v([0-9]+\\.[0-9]+\\.[0-9]+)') from crdb_internal.node_build_info where field = 'Version';`,\n", "\t\t).Scan(&makeFixtureVersion))\n", "\t\tc.Wipe(ctx, c.Node(1))\n", "\t\tuseLocalBinary = true\n", "\t}\n", "\n", "\tpredecessorVersion, err := version.PredecessorVersion(*version.MustParse(\"v\" + makeFixtureVersion))\n", "\tif err != nil {\n", "\t\tt.Fatal(err)\n", "\t}\n", "\n", "\tt.L().Printf(\"making fixture for %s (starting at %s)\", makeFixtureVersion, predecessorVersion)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tpredecessorVersion, err := version.PredecessorVersion(*version.MustParse(makeFixtureVersion))\n" ], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 352 }
// Copyright 2022 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. // package roachtestutil import ( "context" gosql "database/sql" "github.com/cockroachdb/cockroach/pkg/roachprod/logger" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/errors" ) // CheckReplicaDivergenceOnDB runs a stats-only consistency check via the // provided DB. It ignores transient errors that can result from the // implementation of crdb_internal.check_consistency, so a nil result // does not prove anything. func CheckReplicaDivergenceOnDB(ctx context.Context, l *logger.Logger, db *gosql.DB) error { // NB: we set a statement_timeout since context cancellation won't work here, // see: // https://github.com/cockroachdb/cockroach/pull/34520 // // We've seen the consistency checks hang indefinitely in some cases. rows, err := db.QueryContext(ctx, ` SET statement_timeout = '5m'; SELECT t.range_id, t.start_key_pretty, t.status, t.detail FROM crdb_internal.check_consistency(true, '', '') as t WHERE t.status NOT IN ('RANGE_CONSISTENT', 'RANGE_INDETERMINATE')`) if err != nil { // TODO(tbg): the checks can fail for silly reasons like missing gossiped // descriptors, etc. -- not worth failing the test for. Ideally this would // be rock solid. l.Printf("consistency check failed with %v; ignoring", err) return nil } defer rows.Close() var finalErr error for rows.Next() { var rangeID int32 var prettyKey, status, detail string if scanErr := rows.Scan(&rangeID, &prettyKey, &status, &detail); scanErr != nil { l.Printf("consistency check failed with %v; ignoring", scanErr) return nil } finalErr = errors.CombineErrors(finalErr, errors.Newf("r%d (%s) is inconsistent: %s %s\n", rangeID, prettyKey, status, detail)) } if err := rows.Err(); err != nil { l.Printf("consistency check failed with %v; ignoring", err) return nil } return finalErr } // CheckInvalidDescriptors returns an error if there exists any descriptors in // the crdb_internal.invalid_objects virtual table. func CheckInvalidDescriptors(db *gosql.DB) error { // Because crdb_internal.invalid_objects is a virtual table, by default, the // query will take a lease on the database sqlDB is connected to and only run // the query on the given database. The "" prefix prevents this lease // acquisition and allows the query to fetch all descriptors in the cluster. rows, err := db.Query(`SELECT id, obj_name, error FROM "".crdb_internal.invalid_objects`) if err != nil { return err } invalidIDs, err := sqlutils.RowsToDataDrivenOutput(rows) if err != nil { return err } if invalidIDs != "" { return errors.Errorf("the following descriptor ids are invalid\n%v", invalidIDs) } return nil }
pkg/cmd/roachtest/roachtestutil/validation_check.go
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.004329990595579147, 0.0006588458782061934, 0.00016604812117293477, 0.00017336156452074647, 0.001298901392146945 ]
{ "id": 9, "code_window": [ "\tif err != nil {\n", "\t\tt.Fatal(err)\n", "\t}\n", "\n", "\tt.L().Printf(\"making fixture for %s (starting at %s)\", makeFixtureVersion, predecessorVersion)\n", "\n", "\tif useLocalBinary {\n", "\t\t// Make steps below use the main cockroach binary (in particular, don't try\n", "\t\t// to download the released version for makeFixtureVersion which may not yet\n", "\t\t// exist)\n", "\t\tmakeFixtureVersion = \"\"\n", "\t}\n", "\n", "\tnewVersionUpgradeTest(c,\n", "\t\t// Start the cluster from a fixture. That fixture's cluster version may\n", "\t\t// be at the predecessor version (though in practice it's fully up to\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tfixtureVersion := makeFixtureVersion[1:] // drop the leading v\n" ], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 369 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package tests import ( "context" "runtime" "strings" "time" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/registry" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test" ) func registerFixtures(r registry.Registry) { // Run this test to create a new fixture for the version upgrade test. This // is necessary after every release. For example, the day `master` becomes // the 20.2 release, this test will fail because it is missing a fixture for // 20.1; run the test (on 20.1). Check it in (instructions will be logged // below) and off we go. // // The version to create/update the fixture for. Must be released (i.e. // can download it from the homepage); if that is not the case use the // empty string which uses the local cockroach binary. Make sure that // this binary then has the correct version. For example, to make a // "v20.2" fixture, you will need a binary that has "v20.2" in the // output of `./cockroach version`, and this process will end up // creating fixtures that have "v20.2" in them. This would be part // of tagging the master branch as v21.1 in the process of going // through the major release for v20.2. The version is passed in as // FIXTURE_VERSION environment variable. // // In the common case, one should populate this with the version (instead of // using the empty string) as this is the most straightforward and least // error-prone way to generate the fixtures. // // Please note that you do *NOT* need to update the fixtures in a patch // release. This only happens as part of preparing the master branch for the // next release. The release team runbooks, at time of writing, reflect // this. // // Example invocation: // roachtest --local run generate-fixtures --debug --cockroach ./cockroach \ // --build-tag v22.1.0-beta.3 tag:fixtures runFixtures := func( ctx context.Context, t test.Test, c cluster.Cluster, ) { if c.IsLocal() && runtime.GOARCH == "arm64" { t.Skip("Skip under ARM64. See https://github.com/cockroachdb/cockroach/issues/89268") } fixtureVersion := strings.TrimPrefix(t.BuildVersion().String(), "v") makeVersionFixtureAndFatal(ctx, t, c, fixtureVersion) } spec := registry.TestSpec{ Name: "generate-fixtures", Timeout: 30 * time.Minute, Tags: registry.Tags("fixtures"), Owner: registry.OwnerDevInf, Cluster: r.MakeClusterSpec(4), Run: runFixtures, } r.Add(spec) }
pkg/cmd/roachtest/tests/fixtures.go
1
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.004523857496678829, 0.0009770271135494113, 0.00016008004604373127, 0.00022153486497700214, 0.001468951697461307 ]
{ "id": 9, "code_window": [ "\tif err != nil {\n", "\t\tt.Fatal(err)\n", "\t}\n", "\n", "\tt.L().Printf(\"making fixture for %s (starting at %s)\", makeFixtureVersion, predecessorVersion)\n", "\n", "\tif useLocalBinary {\n", "\t\t// Make steps below use the main cockroach binary (in particular, don't try\n", "\t\t// to download the released version for makeFixtureVersion which may not yet\n", "\t\t// exist)\n", "\t\tmakeFixtureVersion = \"\"\n", "\t}\n", "\n", "\tnewVersionUpgradeTest(c,\n", "\t\t// Start the cluster from a fixture. That fixture's cluster version may\n", "\t\t// be at the predecessor version (though in practice it's fully up to\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tfixtureVersion := makeFixtureVersion[1:] // drop the leading v\n" ], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 369 }
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. import React from "react"; import { Alert, Icon } from "antd"; import "antd/lib/alert/style"; import "antd/lib/icon/style"; import { Link } from "react-router-dom"; import { AlertInfo, AlertLevel } from "src/redux/alerts"; import "./alertMessage.styl"; interface AlertMessageProps extends AlertInfo { autoClose: boolean; autoCloseTimeout: number; closable: boolean; dismiss(): void; } type AlertType = "success" | "info" | "warning" | "error"; const mapAlertLevelToType = (alertLevel: AlertLevel): AlertType => { switch (alertLevel) { case AlertLevel.SUCCESS: return "success"; case AlertLevel.NOTIFICATION: return "info"; case AlertLevel.WARNING: return "warning"; case AlertLevel.CRITICAL: return "error"; default: return "info"; } }; const getIconType = (alertLevel: AlertLevel): string => { switch (alertLevel) { case AlertLevel.SUCCESS: return "check-circle"; case AlertLevel.NOTIFICATION: return "info-circle"; case AlertLevel.WARNING: return "warning"; case AlertLevel.CRITICAL: return "close-circle"; default: return "info-circle"; } }; export class AlertMessage extends React.Component<AlertMessageProps> { static defaultProps = { closable: true, autoCloseTimeout: 6000, }; timeoutHandler: number; componentDidMount() { const { autoClose, dismiss, autoCloseTimeout } = this.props; if (autoClose) { this.timeoutHandler = window.setTimeout(dismiss, autoCloseTimeout); } } componentWillUnmount() { clearTimeout(this.timeoutHandler); } render() { const { level, dismiss, link, title, text, closable } = this.props; let description: React.ReactNode = text; if (link) { description = ( <Link to={link} onClick={dismiss}> {text} </Link> ); } const type = mapAlertLevelToType(level); const iconType = getIconType(level); return ( <Alert className="alert-massage" message={title} description={description} showIcon icon={ <Icon type={iconType} theme="filled" className="alert-massage__icon" /> } closable={closable} onClose={dismiss} closeText={ closable && <div className="alert-massage__close-text">&times;</div> } type={type} /> ); } }
pkg/ui/workspaces/db-console/src/views/shared/components/alertMessage/alertMessage.tsx
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.0001764822518453002, 0.0001732771488605067, 0.00017002325330395252, 0.00017360600759275258, 0.000001787097858141351 ]
{ "id": 9, "code_window": [ "\tif err != nil {\n", "\t\tt.Fatal(err)\n", "\t}\n", "\n", "\tt.L().Printf(\"making fixture for %s (starting at %s)\", makeFixtureVersion, predecessorVersion)\n", "\n", "\tif useLocalBinary {\n", "\t\t// Make steps below use the main cockroach binary (in particular, don't try\n", "\t\t// to download the released version for makeFixtureVersion which may not yet\n", "\t\t// exist)\n", "\t\tmakeFixtureVersion = \"\"\n", "\t}\n", "\n", "\tnewVersionUpgradeTest(c,\n", "\t\t// Start the cluster from a fixture. That fixture's cluster version may\n", "\t\t// be at the predecessor version (though in practice it's fully up to\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tfixtureVersion := makeFixtureVersion[1:] // drop the leading v\n" ], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 369 }
// Copyright 2021 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package sql import ( "context" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/multiregion" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/types" ) // databaseRegionChangeFinalizer encapsulates the logic and state for finalizing // a region metadata operation on a multi-region database. This includes methods // to update partitions and zone configurations as well as leases on REGIONAL BY // ROW tables. type databaseRegionChangeFinalizer struct { dbID descpb.ID typeID descpb.ID localPlanner *planner cleanupFunc func() regionalByRowTables []*tabledesc.Mutable } // newDatabaseRegionChangeFinalizer returns a databaseRegionChangeFinalizer. // It pre-fetches all REGIONAL BY ROW tables from the database. func newDatabaseRegionChangeFinalizer( ctx context.Context, txn descs.Txn, execCfg *ExecutorConfig, dbID descpb.ID, typeID descpb.ID, ) (*databaseRegionChangeFinalizer, error) { p, cleanup := NewInternalPlanner( "repartition-regional-by-row-tables", txn.KV(), username.RootUserName(), &MemoryMetrics{}, execCfg, txn.SessionData().SessionData, WithDescCollection(txn.Descriptors()), ) localPlanner := p.(*planner) var regionalByRowTables []*tabledesc.Mutable if err := func() error { dbDesc, err := txn.Descriptors().ByID(txn.KV()).WithoutNonPublic().Get().Database(ctx, dbID) if err != nil { return err } return localPlanner.forEachMutableTableInDatabase( ctx, dbDesc, func(ctx context.Context, scName string, tableDesc *tabledesc.Mutable) error { if !tableDesc.IsLocalityRegionalByRow() || tableDesc.Dropped() { // We only need to re-partition REGIONAL BY ROW tables. Even then, we // don't need to (can't) repartition a REGIONAL BY ROW table if it has // been dropped. return nil } regionalByRowTables = append(regionalByRowTables, tableDesc) return nil }, ) }(); err != nil { cleanup() return nil, err } return &databaseRegionChangeFinalizer{ dbID: dbID, typeID: typeID, localPlanner: localPlanner, cleanupFunc: cleanup, regionalByRowTables: regionalByRowTables, }, nil } // cleanup cleans up remaining objects on the databaseRegionChangeFinalizer. func (r *databaseRegionChangeFinalizer) cleanup() { if r.cleanupFunc != nil { r.cleanupFunc() r.cleanupFunc = nil } } // finalize updates the zone configurations of the database and all enclosed // REGIONAL BY ROW tables once the region promotion/demotion is complete. func (r *databaseRegionChangeFinalizer) finalize(ctx context.Context, txn descs.Txn) error { if err := r.updateDatabaseZoneConfig(ctx, txn); err != nil { return err } if err := r.preDrop(ctx, txn); err != nil { return err } return r.updateGlobalTablesZoneConfig(ctx, txn) } // preDrop is called in advance of dropping regions from a multi-region // database. This function just re-partitions the REGIONAL BY ROW tables in // advance of the type descriptor change, to ensure that the table and type // descriptors never become incorrect (from a query perspective). For more info, // see the callers. func (r *databaseRegionChangeFinalizer) preDrop(ctx context.Context, txn descs.Txn) error { repartitioned, zoneConfigUpdates, err := r.repartitionRegionalByRowTables(ctx, txn) if err != nil { return err } for _, update := range zoneConfigUpdates { if _, err := writeZoneConfigUpdate( ctx, txn, r.localPlanner.ExtendedEvalContext().Tracing.KVTracingEnabled(), update, ); err != nil { return err } } b := txn.KV().NewBatch() for _, t := range repartitioned { const kvTrace = false if err := r.localPlanner.Descriptors().WriteDescToBatch( ctx, kvTrace, t, b, ); err != nil { return err } } return txn.KV().Run(ctx, b) } // updateGlobalTablesZoneConfig refreshes all global tables' zone configs so // that their zone configs are refreshes after a newly-added region goes out of // being a transitioning region. This function only applies if the database is // in PLACEMENT RESTRICTED because if the database is in PLACEMENT DEFAULT, it // will inherit the database's constraints. In the RESTRICTED case, however, // constraints must be explicitly refreshed when new regions are added/removed. func (r *databaseRegionChangeFinalizer) updateGlobalTablesZoneConfig( ctx context.Context, txn isql.Txn, ) error { regionConfig, err := SynthesizeRegionConfig(ctx, txn.KV(), r.dbID, r.localPlanner.Descriptors()) if err != nil { return err } // If we're not in PLACEMENT RESTRICTED, GLOBAL tables will inherit the // database zone config. Therefore, their constraints do not have to be // refreshed. if !regionConfig.IsPlacementRestricted() { return nil } descsCol := r.localPlanner.Descriptors() dbDesc, err := descsCol.ByID(txn.KV()).WithoutNonPublic().Get().Database(ctx, r.dbID) if err != nil { return err } err = r.localPlanner.refreshZoneConfigsForTables(ctx, dbDesc, WithOnlyGlobalTables) if err != nil { return err } return nil } // updateDatabaseZoneConfig updates the zone config of the database that // encloses the multi-region enum such that there is an entry for all PUBLIC // region values. func (r *databaseRegionChangeFinalizer) updateDatabaseZoneConfig( ctx context.Context, txn descs.Txn, ) error { regionConfig, err := SynthesizeRegionConfig(ctx, txn.KV(), r.dbID, r.localPlanner.Descriptors()) if err != nil { return err } return ApplyZoneConfigFromDatabaseRegionConfig( ctx, r.dbID, regionConfig, txn, r.localPlanner.ExecCfg(), r.localPlanner.extendedEvalCtx.Tracing.KVTracingEnabled(), ) } // repartitionRegionalByRowTables re-partitions all REGIONAL BY ROW tables // contained in the database. repartitionRegionalByRowTables adds a partition // and corresponding zone configuration for all PUBLIC enum members (regions) // on the multi-region enum. // // Note that even if the caller does not write the returned descriptors, the // mutable copies of the descriptor in the collection has been modified and is // being returned. This allows callers to inject the descriptors into a // collection in order to observe the side- effects of such a change. The caller // is responsible for actually writing the repartitioned tables. To re-iterate, // when a mutable descriptor is resolved from a collection subsequently, the // exact same descriptor object is returned. All of the objects descriptors // mutated here are from the underlying collection. However, these descriptors // have not been added back to the collection using AddUncommittedDescriptor // (or its friends WriteDesc.*), so immutable resolution of the descriptors // will still yield the original, unmodified version. If users want these // modified versions to be visible for immutable resolution, they must either // write the descriptors through the collection or inject them as synthetic // descriptors. func (r *databaseRegionChangeFinalizer) repartitionRegionalByRowTables( ctx context.Context, txn descs.Txn, ) (repartitioned []*tabledesc.Mutable, zoneConfigUpdates []*zoneConfigUpdate, _ error) { regionConfig, err := SynthesizeRegionConfig(ctx, txn.KV(), r.dbID, r.localPlanner.Descriptors()) if err != nil { return nil, nil, err } for _, tableDesc := range r.regionalByRowTables { // Since we hydrated the columns with the old enum, and now that the enum // has transitioned the read-only members to public, we have to re-hydrate // the table descriptor with the new type metadata. for i := range tableDesc.Columns { col := &tableDesc.Columns[i] if col.Type.UserDefined() { tid := typedesc.UserDefinedTypeOIDToID(col.Type.Oid()) if tid == r.typeID { col.Type.TypeMeta = types.UserDefinedTypeMetadata{} } } } if err := typedesc.HydrateTypesInDescriptor( ctx, tableDesc, r.localPlanner, ); err != nil { return nil, nil, err } colName, err := tableDesc.GetRegionalByRowTableRegionColumnName() if err != nil { return nil, nil, err } partitionAllBy := multiregion.PartitionByForRegionalByRow(regionConfig, colName) // oldPartitionings saves the old partitionings for each // index that is repartitioned. This is later used to remove zone // configurations from any partitions that are removed. oldPartitionings := make(map[descpb.IndexID]catalog.Partitioning) // Update the partitioning on all indexes of the table that aren't being // dropped. for _, index := range tableDesc.NonDropIndexes() { oldPartitionings[index.GetID()] = index.GetPartitioning().DeepCopy() newImplicitCols, newPartitioning, err := CreatePartitioning( ctx, r.localPlanner.extendedEvalCtx.Settings, r.localPlanner.EvalContext(), tableDesc, *index.IndexDesc(), partitionAllBy, nil, /* allowedNewColumnName*/ true, /* allowImplicitPartitioning */ ) if err != nil { return nil, nil, err } tabledesc.UpdateIndexPartitioning(index.IndexDesc(), index.Primary(), newImplicitCols, newPartitioning) } // Update the zone configurations now that the partition's been added. update, err := prepareZoneConfigForMultiRegionTable( ctx, txn, r.localPlanner.ExecCfg(), regionConfig, tableDesc, ApplyZoneConfigForMultiRegionTableOptionTableAndIndexes, ) if err != nil { return nil, nil, err } if update != nil { zoneConfigUpdates = append(zoneConfigUpdates, update) } repartitioned = append(repartitioned, tableDesc) } return repartitioned, zoneConfigUpdates, nil }
pkg/sql/database_region_change_finalizer.go
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.0016595694469287992, 0.0002952718350570649, 0.00015621501370333135, 0.00016659432731103152, 0.00038011212018318474 ]
{ "id": 9, "code_window": [ "\tif err != nil {\n", "\t\tt.Fatal(err)\n", "\t}\n", "\n", "\tt.L().Printf(\"making fixture for %s (starting at %s)\", makeFixtureVersion, predecessorVersion)\n", "\n", "\tif useLocalBinary {\n", "\t\t// Make steps below use the main cockroach binary (in particular, don't try\n", "\t\t// to download the released version for makeFixtureVersion which may not yet\n", "\t\t// exist)\n", "\t\tmakeFixtureVersion = \"\"\n", "\t}\n", "\n", "\tnewVersionUpgradeTest(c,\n", "\t\t// Start the cluster from a fixture. That fixture's cluster version may\n", "\t\t// be at the predecessor version (though in practice it's fully up to\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tfixtureVersion := makeFixtureVersion[1:] // drop the leading v\n" ], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 369 }
// Copyright 2022 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. // package kvnemesis import ( "fmt" "sort" "strings" "github.com/cockroachdb/cockroach/pkg/kv/kvnemesis/kvnemesisutil" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/syncutil" ) // SeqTracker is a container that helps kvnemesis map MVCC versions to // operations as identified by their Seq. // // SeqTracker is threadsafe. type SeqTracker struct { syncutil.Mutex seen map[keyTS]kvnemesisutil.Seq } type keyTS struct { key, endKey string ts hlc.Timestamp } func (tr *SeqTracker) String() string { tr.Lock() defer tr.Unlock() var sl []keyTS for k := range tr.seen { sl = append(sl, k) } sort.Slice(sl, func(i, j int) bool { return fmt.Sprintf("%v", sl[i]) < fmt.Sprintf("%v", sl[j]) }) var buf strings.Builder for _, el := range sl { fmt.Fprintf(&buf, "%s %s -> %s\n", roachpb.Span{Key: roachpb.Key(el.key), EndKey: roachpb.Key(el.endKey)}, el.ts, tr.seen[el]) } return buf.String() } // Add associates key@ts with the provided Seq. It is called in two places: // // - For regular point/range key writes, we hook into the server-side rangefeed // processor via RangefeedValueHeaderFilter and read it there. // // - For AddSSTables, we scan the emitted SST in the rangefeed watcher, // which contains the embedded seqnos. func (tr *SeqTracker) Add(key, endKey roachpb.Key, ts hlc.Timestamp, seq kvnemesisutil.Seq) { tr.Lock() defer tr.Unlock() if tr.seen == nil { tr.seen = map[keyTS]kvnemesisutil.Seq{} } tr.seen[keyTS{key: string(key), endKey: string(endKey), ts: ts}] = seq } // Lookup checks whether the version key@ts is associated with a Seq. func (tr *SeqTracker) Lookup(key, endKey roachpb.Key, ts hlc.Timestamp) (kvnemesisutil.Seq, bool) { tr.Lock() defer tr.Unlock() // Rangedels can be split, but the tracker will always see the pre-split // value (since it's reported by the operation's BatchRequest). So this // method checks whether the input span is contained in any span seen // by the tracker. if seq, fastPathOK := tr.seen[keyTS{ key: string(key), endKey: string(endKey), ts: ts, }]; fastPathOK { // Fast path - exact match. Should be the common case outside of MVCC range // deletions. return seq, true } for kts := range tr.seen { if kts.ts != ts { continue } cur := roachpb.Span{Key: roachpb.Key(kts.key), EndKey: roachpb.Key(kts.endKey)} if cur.Contains(roachpb.Span{Key: key, EndKey: endKey}) { return tr.seen[kts], true } } return 0, false }
pkg/kv/kvnemesis/seq_tracker.go
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.00018201534112449735, 0.00017196027329191566, 0.00016562639211770147, 0.00017224554903805256, 0.0000042711731111921836 ]
{ "id": 10, "code_window": [ "\t\twaitForUpgradeStep(c.All()),\n", "\n", "\t\t// NB: at this point, cluster and binary version equal predecessorVersion,\n", "\t\t// and auto-upgrades are on.\n", "\n", "\t\tbinaryUpgradeStep(c.All(), makeFixtureVersion),\n", "\t\twaitForUpgradeStep(c.All()),\n", "\n", "\t\tfunc(ctx context.Context, t test.Test, u *versionUpgradeTest) {\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\tbinaryUpgradeStep(c.All(), fixtureVersion),\n" ], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 391 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package tests import ( "context" "runtime" "strings" "time" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/registry" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test" ) func registerFixtures(r registry.Registry) { // Run this test to create a new fixture for the version upgrade test. This // is necessary after every release. For example, the day `master` becomes // the 20.2 release, this test will fail because it is missing a fixture for // 20.1; run the test (on 20.1). Check it in (instructions will be logged // below) and off we go. // // The version to create/update the fixture for. Must be released (i.e. // can download it from the homepage); if that is not the case use the // empty string which uses the local cockroach binary. Make sure that // this binary then has the correct version. For example, to make a // "v20.2" fixture, you will need a binary that has "v20.2" in the // output of `./cockroach version`, and this process will end up // creating fixtures that have "v20.2" in them. This would be part // of tagging the master branch as v21.1 in the process of going // through the major release for v20.2. The version is passed in as // FIXTURE_VERSION environment variable. // // In the common case, one should populate this with the version (instead of // using the empty string) as this is the most straightforward and least // error-prone way to generate the fixtures. // // Please note that you do *NOT* need to update the fixtures in a patch // release. This only happens as part of preparing the master branch for the // next release. The release team runbooks, at time of writing, reflect // this. // // Example invocation: // roachtest --local run generate-fixtures --debug --cockroach ./cockroach \ // --build-tag v22.1.0-beta.3 tag:fixtures runFixtures := func( ctx context.Context, t test.Test, c cluster.Cluster, ) { if c.IsLocal() && runtime.GOARCH == "arm64" { t.Skip("Skip under ARM64. See https://github.com/cockroachdb/cockroach/issues/89268") } fixtureVersion := strings.TrimPrefix(t.BuildVersion().String(), "v") makeVersionFixtureAndFatal(ctx, t, c, fixtureVersion) } spec := registry.TestSpec{ Name: "generate-fixtures", Timeout: 30 * time.Minute, Tags: registry.Tags("fixtures"), Owner: registry.OwnerDevInf, Cluster: r.MakeClusterSpec(4), Run: runFixtures, } r.Add(spec) }
pkg/cmd/roachtest/tests/fixtures.go
1
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.009982802905142307, 0.0021159625612199306, 0.00016604353731963784, 0.00017311627743765712, 0.003517413977533579 ]
{ "id": 10, "code_window": [ "\t\twaitForUpgradeStep(c.All()),\n", "\n", "\t\t// NB: at this point, cluster and binary version equal predecessorVersion,\n", "\t\t// and auto-upgrades are on.\n", "\n", "\t\tbinaryUpgradeStep(c.All(), makeFixtureVersion),\n", "\t\twaitForUpgradeStep(c.All()),\n", "\n", "\t\tfunc(ctx context.Context, t test.Test, u *versionUpgradeTest) {\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\tbinaryUpgradeStep(c.All(), fixtureVersion),\n" ], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 391 }
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package coldata // Datum is abstract type for elements inside DatumVec, this type in reality // should be tree.Datum. However, in order to avoid pulling in 'tree' package // into the 'coldata' package, we use a runtime cast instead. type Datum interface{} // DatumVec is the interface for a specialized vector that operates on // tree.Datums in the vectorized engine. In order to avoid import of 'tree' // package the implementation of DatumVec lives in 'coldataext' package. type DatumVec interface { // Get returns the datum at index i in the vector. The datum cannot be used // anymore once the vector is modified. Get(i int) Datum // Set sets the datum at index i in the vector. It must check whether the // provided datum is compatible with the type that the DatumVec stores. Set(i int, v Datum) // Window creates a "window" into the vector. It behaves similarly to // Golang's slice. Window(start, end int) DatumVec // CopySlice copies srcStartIdx inclusive and srcEndIdx exclusive // tree.Datum values from src into the vector starting at destIdx. CopySlice(src DatumVec, destIdx, srcStartIdx, srcEndIdx int) // AppendSlice appends srcStartIdx inclusive and srcEndIdx exclusive // tree.Datum values from src into the vector starting at destIdx. AppendSlice(src DatumVec, destIdx, srcStartIdx, srcEndIdx int) // AppendVal appends the given tree.Datum value to the end of the vector. AppendVal(v Datum) // SetLength sets the length of the vector. SetLength(l int) // Len returns the length of the vector. Len() int // Cap returns the underlying capacity of the vector. Cap() int // MarshalAt returns the marshaled representation of datum at index i. MarshalAt(appendTo []byte, i int) ([]byte, error) // UnmarshalTo unmarshals the byte representation of a datum and sets it at // index i. UnmarshalTo(i int, b []byte) error // Size returns the total memory footprint of the vector (including the // internal memory used by tree.Datums) in bytes. It only accounts for the // size of the datum objects starting from the given index. So, Size is // relatively cheap when startIdx >= length, and expensive when // startIdx < length (with a maximum at zero). A nonzero startIdx should only // be used when elements before startIdx are guaranteed not to have been // modified. Size(startIdx int) int64 // SetEvalCtx updates the vector with the provided *eval.Context. SetEvalCtx(evalCtx interface{}) }
pkg/col/coldata/datum_vec.go
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.00036940432619303465, 0.000203793853870593, 0.00016384967602789402, 0.00017437881615478545, 0.00006811296043451875 ]
{ "id": 10, "code_window": [ "\t\twaitForUpgradeStep(c.All()),\n", "\n", "\t\t// NB: at this point, cluster and binary version equal predecessorVersion,\n", "\t\t// and auto-upgrades are on.\n", "\n", "\t\tbinaryUpgradeStep(c.All(), makeFixtureVersion),\n", "\t\twaitForUpgradeStep(c.All()),\n", "\n", "\t\tfunc(ctx context.Context, t test.Test, u *versionUpgradeTest) {\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\tbinaryUpgradeStep(c.All(), fixtureVersion),\n" ], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 391 }
// Copyright 2021 The Cockroach Authors. // // Licensed as a CockroachDB Enterprise file under the Cockroach Community // License (the "License"); you may not use this file except in compliance with // the License. You may obtain a copy of the License at // // https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt package kvevent import ( "context" "fmt" "math/rand" "testing" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) func TestAllocMergeRandomized(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) run := func(t *testing.T, N, P int) { require.True(t, N >= P) // test assumes this invariant pools := make([]*testAllocPool, P) allocs := make([]Alloc, N) // Make P pools. for i := range pools { pools[i] = &testAllocPool{} } // Allocate N allocs from the P pools. poolPerm := rand.Perm(P) for i := range allocs { allocs[i] = pools[poolPerm[i%P]].alloc(1) } // Randomly merge the allocs together. perm := rand.Perm(N) for i := 0; i < N-1; i++ { p := perm[i] toMergeInto := perm[i+1+rand.Intn(N-i-1)] allocs[toMergeInto].Merge(&allocs[p]) } // Ensure that the remaining alloc, which has received all of the // others, has P-1 other allocs. require.Len(t, allocs[perm[N-1]].otherPoolAllocs, P-1) for i := 0; i < N-1; i++ { require.True(t, allocs[perm[i]].isZero()) } // Ensure that all N allocations worth of data are still outstanding sum := func() (ret int) { for _, p := range pools { ret += p.getN() } return ret } require.Equal(t, N, sum()) // Release the remaining alloc. allocs[perm[N-1]].Release(context.Background()) // Ensure it now is zero-valued. require.True(t, allocs[perm[N-1]].isZero()) // Ensure that all of the resources have been released. require.Equal(t, 0, sum()) } for _, np := range []struct{ N, P int }{ {1, 1}, {2, 2}, {1000, 2}, {10000, 1000}, } { t.Run(fmt.Sprintf("N=%d,P=%d", np.N, np.P), func(t *testing.T) { run(t, np.N, np.P) }) } } func TestAllocAdjust(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() p := &testAllocPool{} a := p.alloc(10) require.EqualValues(t, 10, a.Bytes()) require.EqualValues(t, 1, a.Events()) a.AdjustBytesToTarget(ctx, 6) require.EqualValues(t, 6, a.Bytes()) a.AdjustBytesToTarget(ctx, 7) // no-op require.EqualValues(t, 6, a.Bytes()) a.AdjustBytesToTarget(ctx, -5) // no-op require.EqualValues(t, 6, a.Bytes()) a.AdjustBytesToTarget(ctx, 1) require.EqualValues(t, 1, a.Bytes()) a.Release(ctx) } type testAllocPool struct { syncutil.Mutex n int64 } // Release implements kvevent.pool interface. func (ap *testAllocPool) Release(ctx context.Context, bytes, entries int64) { ap.Lock() defer ap.Unlock() if ap.n < bytes { panic(errors.AssertionFailedf("can't release %d bytes from zero resources", bytes)) } ap.n -= bytes } func (ap *testAllocPool) alloc(bytes int64) Alloc { ap.Lock() defer ap.Unlock() ap.n += bytes return TestingMakeAlloc(bytes, ap) } func (ap *testAllocPool) getN() int { ap.Lock() defer ap.Unlock() return int(ap.n) }
pkg/ccl/changefeedccl/kvevent/alloc_test.go
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.0008166696061380208, 0.00023128882457967848, 0.0001656227686908096, 0.00017217150889337063, 0.0001696753897704184 ]
{ "id": 10, "code_window": [ "\t\twaitForUpgradeStep(c.All()),\n", "\n", "\t\t// NB: at this point, cluster and binary version equal predecessorVersion,\n", "\t\t// and auto-upgrades are on.\n", "\n", "\t\tbinaryUpgradeStep(c.All(), makeFixtureVersion),\n", "\t\twaitForUpgradeStep(c.All()),\n", "\n", "\t\tfunc(ctx context.Context, t test.Test, u *versionUpgradeTest) {\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\tbinaryUpgradeStep(c.All(), fixtureVersion),\n" ], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 391 }
#!/usr/bin/env bash set -xeuo pipefail PKGDIR=build/bazelbuilder/packages mkdir $PKGDIR cd $PKGDIR # repackage FIPS-related packages. This operation has to happen on a FIPS-enabled host. for pkg in openssl libssl1.1 libssl1.1-hmac kcapi-tools libkcapi1; do dpkg-repack "$pkg" done cd - TAG=$(cut -d: -f2 build/.bazelbuilderversion) DOCKER_BUILDKIT=1 docker build -t "cockroachdb/bazel-fips:$TAG" \ --build-arg FROM_IMAGE="cockroachdb/bazel:$TAG" \ -f build/bazelbuilder/Dockerfile.fips build/bazelbuilder docker push "cockroachdb/bazel-fips:$TAG" rm -rf $PKGDIR if [[ "$open_pr_on_success" == "true" ]]; then # Trigger "Open New Bazel Builder Image PR". curl -u "$TC_API_USER:$TC_API_PASSWORD" -X POST \ "https://$TC_SERVER_URL/app/rest/buildQueue" \ -H 'Accept: application/json' \ -H 'Content-Type: application/xml' \ -H "Host: $TC_SERVER_URL" \ -d '<build branchName="master"> <buildType id="Internal_Cockroach_Build_Ci_OpenNewBazelBuilderImagePr"/> <properties> <property name="env.BRANCH" value="'"bazel-builder-update-$TAG"'"/> <property name="env.VERSION" value="'"cockroachdb/bazel-fips:$TAG"'"/> </properties> </build>' else echo "No-op - opening a PR was not requested" fi
build/teamcity/internal/cockroach/build/ci/build-and-push-bazel-builder-image-fips.sh
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.00017314744764007628, 0.00016968848649412394, 0.0001655021624173969, 0.00017005216795951128, 0.000003004962309205439 ]
{ "id": 11, "code_window": [ "\t\t\t// compatible).\n", "\t\t\tname := clusterupgrade.CheckpointName(u.binaryVersion(ctx, t, 1).String())\n", "\t\t\tu.c.Stop(ctx, t.L(), option.DefaultStopOpts(), c.All())\n", "\n", "\t\t\tbinaryPath := clusterupgrade.BinaryPathFromVersion(makeFixtureVersion)\n", "\t\t\tc.Run(ctx, c.All(), binaryPath, \"debug\", \"pebble\", \"db\", \"checkpoint\",\n", "\t\t\t\t\"{store-dir}\", \"{store-dir}/\"+name)\n", "\t\t\t// The `cluster-bootstrapped` marker can already be found within\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tbinaryPath := clusterupgrade.BinaryPathFromVersion(fixtureVersion)\n" ], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 414 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package tests import ( "context" gosql "database/sql" "fmt" "math/rand" "path/filepath" "runtime" "time" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/option" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil/clusterupgrade" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil/mixedversion" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/roachprod/install" "github.com/cockroachdb/cockroach/pkg/roachprod/logger" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/version" "github.com/stretchr/testify/require" ) type versionFeatureTest struct { name string statement string } // Feature tests that are invoked in mixed-version state during the // upgrade test. A gotcha is that these feature tests are also // invoked when the cluster is in the middle of upgrading -- i.e. a // state where the cluster version has already been bumped, but not // all nodes are aware). This should be considered a feature of this // test, and feature tests that flake because of it need to be fixed. var versionUpgradeTestFeatures = []versionFeatureTest{ // NB: the next four tests are ancient and supported since v2.0. { name: "ObjectAccess", statement: ` -- We should be able to successfully select from objects created in ancient -- versions of CRDB using their FQNs. Prevents bugs such as #43141, where -- databases created before a migration were inaccessible after the -- migration. -- -- NB: the data has been baked into the fixtures. Originally created via: -- create database persistent_db -- create table persistent_db.persistent_table(a int)")) -- on CRDB v1.0 select * from persistent_db.persistent_table; show tables from persistent_db; `, }, { name: "JSONB", statement: ` CREATE DATABASE IF NOT EXISTS test; CREATE TABLE test.t (j JSONB); DROP TABLE test.t; `, }, { name: "Sequences", statement: ` CREATE DATABASE IF NOT EXISTS test; CREATE SEQUENCE test.test_sequence; DROP SEQUENCE test.test_sequence; `, }, { name: "Computed Columns", statement: ` CREATE DATABASE IF NOT EXISTS test; CREATE TABLE test.t (x INT AS (3) STORED); DROP TABLE test.t; `, }, { name: "Split and Merge Ranges", statement: ` CREATE DATABASE IF NOT EXISTS splitmerge; CREATE TABLE splitmerge.t (k INT PRIMARY KEY); ALTER TABLE splitmerge.t SPLIT AT VALUES (1), (2), (3); ALTER TABLE splitmerge.t UNSPLIT AT VALUES (1), (2), (3); DROP TABLE splitmerge.t; `, }, } func runVersionUpgrade(ctx context.Context, t test.Test, c cluster.Cluster) { if c.IsLocal() && runtime.GOARCH == "arm64" { t.Skip("Skip under ARM64. See https://github.com/cockroachdb/cockroach/issues/89268") } c.Put(ctx, t.DeprecatedWorkload(), "./workload", c.All()) mvt := mixedversion.NewTest(ctx, t, t.L(), c, c.All()) mvt.OnStartup("setup schema changer workload", func(ctx context.Context, l *logger.Logger, r *rand.Rand, helper *mixedversion.Helper) error { // Execute the workload init. return c.RunE(ctx, c.All(), "./workload init schemachange") }) mvt.InMixedVersion("run backup", func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { // Verify that backups can be created in various configurations. This is // important to test because changes in system tables might cause backups to // fail in mixed-version clusters. dest := fmt.Sprintf("nodelocal://1/%d", timeutil.Now().UnixNano()) return h.Exec(rng, `BACKUP TO $1`, dest) }) mvt.InMixedVersion( "test features", func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { for _, featureTest := range versionUpgradeTestFeatures { l.Printf("running feature test %q", featureTest.name) if err := h.Exec(rng, featureTest.statement); err != nil { l.Printf("%q: ERROR (%s)", featureTest.name, err) return err } l.Printf("%q: OK", featureTest.name) } return nil }, ) mvt.InMixedVersion( "test schema change step", func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { l.Printf("running schema workload step") runCmd := roachtestutil.NewCommand("./workload run schemachange").Flag("verbose", 1).Flag("max-ops", 10).Flag("concurrency", 2).Arg("{pgurl:1-%d}", len(c.All())) randomNode := h.RandomNode(rng, c.All()) return c.RunE(ctx, option.NodeListOption{randomNode}, runCmd.String()) }, ) mvt.AfterUpgradeFinalized( "check if GC TTL is pinned", func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { // TODO(irfansharif): This can be removed when the predecessor version // in this test is v23.1, where the default is 4h. This test was only to // make sure that existing clusters that upgrade to 23.1 retained their // existing GC TTL. l.Printf("checking if GC TTL is pinned to 24h") var ttlSeconds int query := ` SELECT (crdb_internal.pb_to_json('cockroach.config.zonepb.ZoneConfig', raw_config_protobuf)->'gc'->'ttlSeconds')::INT FROM crdb_internal.zones WHERE target = 'RANGE default' LIMIT 1 ` if err := h.QueryRow(rng, query).Scan(&ttlSeconds); err != nil { return fmt.Errorf("error querying GC TTL: %w", err) } expectedTTL := 24 * 60 * 60 // NB: 24h is what's used in the fixture if ttlSeconds != expectedTTL { return fmt.Errorf("unexpected GC TTL: actual (%d) != expected (%d)", ttlSeconds, expectedTTL) } return nil }, ) mvt.Run() } func (u *versionUpgradeTest) run(ctx context.Context, t test.Test) { defer func() { for _, db := range u.conns { _ = db.Close() } }() for i, step := range u.steps { if step != nil { t.Status(fmt.Sprintf("versionUpgradeTest: starting step %d", i+1)) step(ctx, t, u) } } } type versionUpgradeTest struct { goOS string c cluster.Cluster steps []versionStep // Cache conns because opening one takes hundreds of ms, and we do it quite // a lot. conns []*gosql.DB } func newVersionUpgradeTest(c cluster.Cluster, steps ...versionStep) *versionUpgradeTest { return &versionUpgradeTest{ goOS: ifLocal(c, runtime.GOOS, "linux"), c: c, steps: steps, } } // Return a cached conn to the given node. Don't call .Close(), the test harness // will do it. func (u *versionUpgradeTest) conn(ctx context.Context, t test.Test, i int) *gosql.DB { if u.conns == nil { for _, i := range u.c.All() { u.conns = append(u.conns, u.c.Conn(ctx, t.L(), i)) } } db := u.conns[i-1] // Run a trivial query to shake out errors that can occur when the server has // restarted in the meantime. _ = db.PingContext(ctx) return db } // uploadVersion is a thin wrapper around // `clusterupgrade.UploadVersion` that calls t.Fatal if that call // returns an error func uploadVersion( ctx context.Context, t test.Test, c cluster.Cluster, nodes option.NodeListOption, newVersion string, ) string { path, err := clusterupgrade.UploadVersion(ctx, t, t.L(), c, nodes, newVersion) if err != nil { t.Fatal(err) } return path } // upgradeNodes is a thin wrapper around // `clusterupgrade.RestartNodesWithNewBinary` that calls t.Fatal if // that call returns an errror. func upgradeNodes( ctx context.Context, t test.Test, c cluster.Cluster, nodes option.NodeListOption, startOpts option.StartOpts, newVersion string, ) { if err := clusterupgrade.RestartNodesWithNewBinary( ctx, t, t.L(), c, nodes, startOpts, newVersion, ); err != nil { t.Fatal(err) } } func (u *versionUpgradeTest) binaryVersion( ctx context.Context, t test.Test, i int, ) roachpb.Version { db := u.conn(ctx, t, i) v, err := clusterupgrade.BinaryVersion(db) if err != nil { t.Fatal(err) } return v } // versionStep is an isolated version migration on a running cluster. type versionStep func(ctx context.Context, t test.Test, u *versionUpgradeTest) func uploadAndStartFromCheckpointFixture(nodes option.NodeListOption, v string) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { if err := clusterupgrade.InstallFixtures(ctx, t.L(), u.c, nodes, v); err != nil { t.Fatal(err) } binary := uploadVersion(ctx, t, u.c, nodes, v) startOpts := option.DefaultStartOpts() // NB: can't start sequentially since cluster already bootstrapped. startOpts.RoachprodOpts.Sequential = false clusterupgrade.StartWithBinary(ctx, t.L(), u.c, nodes, binary, startOpts) } } func uploadAndStart(nodes option.NodeListOption, v string) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { binary := uploadVersion(ctx, t, u.c, nodes, v) startOpts := option.DefaultStartOpts() // NB: can't start sequentially since cluster already bootstrapped. startOpts.RoachprodOpts.Sequential = false clusterupgrade.StartWithBinary(ctx, t.L(), u.c, nodes, binary, startOpts) } } // binaryUpgradeStep rolling-restarts the given nodes into the new binary // version. Note that this does *not* wait for the cluster version to upgrade. // Use a waitForUpgradeStep() for that. func binaryUpgradeStep(nodes option.NodeListOption, newVersion string) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { if err := clusterupgrade.RestartNodesWithNewBinary( ctx, t, t.L(), u.c, nodes, option.DefaultStartOpts(), newVersion, ); err != nil { t.Fatal(err) } } } func preventAutoUpgradeStep(node int) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { db := u.conn(ctx, t, node) _, err := db.ExecContext(ctx, `SET CLUSTER SETTING cluster.preserve_downgrade_option = $1`, u.binaryVersion(ctx, t, node).String()) if err != nil { t.Fatal(err) } } } func allowAutoUpgradeStep(node int) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { db := u.conn(ctx, t, node) _, err := db.ExecContext(ctx, `RESET CLUSTER SETTING cluster.preserve_downgrade_option`) if err != nil { t.Fatal(err) } } } // NB: this is intentionally kept separate from binaryUpgradeStep because we run // feature tests between the steps, and we want to expose them (at least // heuristically) to the real-world situation in which some nodes have already // learned of a cluster version bump (from Gossip) where others haven't. This // situation tends to exhibit unexpected behavior. func waitForUpgradeStep(nodes option.NodeListOption) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { dbFunc := func(node int) *gosql.DB { return u.conn(ctx, t, node) } if err := clusterupgrade.WaitForClusterUpgrade(ctx, t.L(), nodes, dbFunc); err != nil { t.Fatal(err) } } } // makeVersionFixtureAndFatal creates fixtures from which we can test // mixed-version clusters (i.e. version X mixing with X-1). The fixtures date // back all the way to v1.0; when development begins on version X, we make a // fixture for version X-1 by running a starting the version X-2 cluster from // the X-2 fixtures, upgrading it to version X-1, and copy the resulting store // directories to the log directories (which are part of the artifacts). The // test will then fail on purpose when it's done with instructions on where to // move the files. func makeVersionFixtureAndFatal( ctx context.Context, t test.Test, c cluster.Cluster, makeFixtureVersion string, ) { var useLocalBinary bool if makeFixtureVersion == "" { c.Start(ctx, t.L(), option.DefaultStartOpts(), install.MakeClusterSettings(), c.Node(1)) require.NoError(t, c.Conn(ctx, t.L(), 1).QueryRowContext( ctx, `select regexp_extract(value, '^v([0-9]+\.[0-9]+\.[0-9]+)') from crdb_internal.node_build_info where field = 'Version';`, ).Scan(&makeFixtureVersion)) c.Wipe(ctx, c.Node(1)) useLocalBinary = true } predecessorVersion, err := version.PredecessorVersion(*version.MustParse("v" + makeFixtureVersion)) if err != nil { t.Fatal(err) } t.L().Printf("making fixture for %s (starting at %s)", makeFixtureVersion, predecessorVersion) if useLocalBinary { // Make steps below use the main cockroach binary (in particular, don't try // to download the released version for makeFixtureVersion which may not yet // exist) makeFixtureVersion = "" } newVersionUpgradeTest(c, // Start the cluster from a fixture. That fixture's cluster version may // be at the predecessor version (though in practice it's fully up to // date, if it was created via the checkpointer above), so add a // waitForUpgradeStep to make sure we're upgraded all the way before // moving on. // // See the comment on createCheckpoints for details on fixtures. uploadAndStartFromCheckpointFixture(c.All(), predecessorVersion), waitForUpgradeStep(c.All()), // NB: at this point, cluster and binary version equal predecessorVersion, // and auto-upgrades are on. binaryUpgradeStep(c.All(), makeFixtureVersion), waitForUpgradeStep(c.All()), func(ctx context.Context, t test.Test, u *versionUpgradeTest) { // If we're taking checkpoints, momentarily stop the cluster (we // need to do that to get the checkpoints to reflect a // consistent cluster state). The binary at this point will be // the new one, but the cluster version was not explicitly // bumped, though auto-update may have taken place already. // For example, if newVersion is 2.1, the cluster version in // the store directories may be 2.0 on some stores and 2.1 on // the others (though if any are on 2.1, then that's what's // stored in system.settings). // This means that when we restart from that version, we're // going to want to use the binary mentioned in the checkpoint, // or at least one compatible with the *predecessor* of the // checkpoint version. For example, for checkpoint-2.1, the // cluster version might be 2.0, so we can only use the 2.0 or // 2.1 binary, but not the 19.1 binary (as 19.1 and 2.0 are not // compatible). name := clusterupgrade.CheckpointName(u.binaryVersion(ctx, t, 1).String()) u.c.Stop(ctx, t.L(), option.DefaultStopOpts(), c.All()) binaryPath := clusterupgrade.BinaryPathFromVersion(makeFixtureVersion) c.Run(ctx, c.All(), binaryPath, "debug", "pebble", "db", "checkpoint", "{store-dir}", "{store-dir}/"+name) // The `cluster-bootstrapped` marker can already be found within // store-dir, but the rocksdb checkpoint step above does not pick it // up as it isn't recognized by RocksDB. We copy the marker // manually, it's necessary for roachprod created clusters. See // #54761. c.Run(ctx, c.Node(1), "cp", "{store-dir}/cluster-bootstrapped", "{store-dir}/"+name) // Similar to the above - newer versions require the min version file to open a store. c.Run(ctx, c.Node(1), "cp", fmt.Sprintf("{store-dir}/%s", storage.MinVersionFilename), "{store-dir}/"+name) c.Run(ctx, c.All(), "tar", "-C", "{store-dir}/"+name, "-czf", "{log-dir}/"+name+".tgz", ".") t.Fatalf(`successfully created checkpoints; failing test on purpose. Invoke the following to move the archives to the right place and commit the result: for i in 1 2 3 4; do mkdir -p pkg/cmd/roachtest/fixtures/${i} && \ mv artifacts/generate-fixtures/run_1/logs/${i}.unredacted/checkpoint-*.tgz \ pkg/cmd/roachtest/fixtures/${i}/ done `) }).run(ctx, t) } // importTPCCStep runs a TPCC import import on the first crdbNode (monitoring them all for // crashes during the import). If oldV is nil, this runs the import using the specified // version (for example "19.2.1", as provided by PredecessorVersion()) using the location // used by c.Stage(). An empty oldV uses the main cockroach binary. func importTPCCStep( oldV string, headroomWarehouses int, crdbNodes option.NodeListOption, ) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { // We need to use the predecessor binary to load into the // predecessor cluster to avoid random breakage. For example, you // can't use 21.1 to import into 20.2 due to some flag changes. // // TODO(tbg): also import a large dataset (for example 2TB bank) // that will provide cold data that may need to be migrated. var cmd string if oldV == "" { cmd = tpccImportCmd(headroomWarehouses) } else { cmd = tpccImportCmdWithCockroachBinary(filepath.Join("v"+oldV, "cockroach"), headroomWarehouses, "--checks=false") } // Use a monitor so that we fail cleanly if the cluster crashes // during import. m := u.c.NewMonitor(ctx, crdbNodes) m.Go(func(ctx context.Context) error { return u.c.RunE(ctx, u.c.Node(crdbNodes[0]), cmd) }) m.Wait() } } func importLargeBankStep(oldV string, rows int, crdbNodes option.NodeListOption) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { // Use the predecessor binary to load into the predecessor // cluster to avoid random breakage due to flag changes, etc. binary := "./cockroach" if oldV != "" { binary = filepath.Join("v"+oldV, "cockroach") } // Use a monitor so that we fail cleanly if the cluster crashes // during import. m := u.c.NewMonitor(ctx, crdbNodes) m.Go(func(ctx context.Context) error { return u.c.RunE(ctx, u.c.Node(crdbNodes[0]), binary, "workload", "fixtures", "import", "bank", "--payload-bytes=10240", "--rows="+fmt.Sprint(rows), "--seed=4", "--db=bigbank") }) m.Wait() } } func sleepStep(d time.Duration) versionStep { return func(ctx context.Context, t test.Test, u *versionUpgradeTest) { time.Sleep(d) } }
pkg/cmd/roachtest/tests/versionupgrade.go
1
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.9983434677124023, 0.022420041263103485, 0.00016195466741919518, 0.00022861475008539855, 0.13962215185165405 ]
{ "id": 11, "code_window": [ "\t\t\t// compatible).\n", "\t\t\tname := clusterupgrade.CheckpointName(u.binaryVersion(ctx, t, 1).String())\n", "\t\t\tu.c.Stop(ctx, t.L(), option.DefaultStopOpts(), c.All())\n", "\n", "\t\t\tbinaryPath := clusterupgrade.BinaryPathFromVersion(makeFixtureVersion)\n", "\t\t\tc.Run(ctx, c.All(), binaryPath, \"debug\", \"pebble\", \"db\", \"checkpoint\",\n", "\t\t\t\t\"{store-dir}\", \"{store-dir}/\"+name)\n", "\t\t\t// The `cluster-bootstrapped` marker can already be found within\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tbinaryPath := clusterupgrade.BinaryPathFromVersion(fixtureVersion)\n" ], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 414 }
// Copyright 2022 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package kvserver import "github.com/cockroachdb/cockroach/pkg/util/metric" // RaftTransportMetrics is the set of metrics for a given RaftTransport. type RaftTransportMetrics struct { SendQueueSize *metric.Gauge SendQueueBytes *metric.Gauge MessagesDropped *metric.Counter MessagesSent *metric.Counter MessagesRcvd *metric.Counter ReverseSent *metric.Counter ReverseRcvd *metric.Counter } func (t *RaftTransport) initMetrics() { t.metrics = &RaftTransportMetrics{ SendQueueSize: metric.NewFunctionalGauge(metric.Metadata{ Name: "raft.transport.send-queue-size", Help: `Number of pending outgoing messages in the Raft Transport queue. The queue is composed of multiple bounded channels associated with different peers. The overall size of tens of thousands could indicate issues streaming messages to at least one peer. Use this metric in conjunction with send-queue-bytes.`, Measurement: "Messages", Unit: metric.Unit_COUNT, }, t.queueMessageCount), SendQueueBytes: metric.NewFunctionalGauge(metric.Metadata{ Name: "raft.transport.send-queue-bytes", Help: `The total byte size of pending outgoing messages in the queue. The queue is composed of multiple bounded channels associated with different peers. A size higher than the average baseline could indicate issues streaming messages to at least one peer. Use this metric together with send-queue-size, to have a fuller picture.`, Measurement: "Bytes", Unit: metric.Unit_BYTES, }, t.queueByteSize), MessagesDropped: metric.NewCounter(metric.Metadata{ Name: "raft.transport.sends-dropped", Help: "Number of Raft message sends dropped by the Raft Transport", Measurement: "Messages", Unit: metric.Unit_COUNT, }), MessagesSent: metric.NewCounter(metric.Metadata{ Name: "raft.transport.sent", Help: "Number of Raft messages sent by the Raft Transport", Measurement: "Messages", Unit: metric.Unit_COUNT, }), MessagesRcvd: metric.NewCounter(metric.Metadata{ Name: "raft.transport.rcvd", Help: "Number of Raft messages received by the Raft Transport", Measurement: "Messages", Unit: metric.Unit_COUNT, }), ReverseSent: metric.NewCounter(metric.Metadata{ Name: "raft.transport.reverse-sent", Help: `Messages sent in the reverse direction of a stream. These messages should be rare. They are mostly informational, and are not actual responses to Raft messages. Responses are sent over another stream.`, Measurement: "Messages", Unit: metric.Unit_COUNT, }), ReverseRcvd: metric.NewCounter(metric.Metadata{ Name: "raft.transport.reverse-rcvd", Help: `Messages received from the reverse direction of a stream. These messages should be rare. They are mostly informational, and are not actual responses to Raft messages. Responses are received over another stream.`, Measurement: "Messages", Unit: metric.Unit_COUNT, }), } }
pkg/kv/kvserver/raft_transport_metrics.go
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.00017934746574610472, 0.0001728501811157912, 0.00016424730711150914, 0.00017442298121750355, 0.000004346218702266924 ]
{ "id": 11, "code_window": [ "\t\t\t// compatible).\n", "\t\t\tname := clusterupgrade.CheckpointName(u.binaryVersion(ctx, t, 1).String())\n", "\t\t\tu.c.Stop(ctx, t.L(), option.DefaultStopOpts(), c.All())\n", "\n", "\t\t\tbinaryPath := clusterupgrade.BinaryPathFromVersion(makeFixtureVersion)\n", "\t\t\tc.Run(ctx, c.All(), binaryPath, \"debug\", \"pebble\", \"db\", \"checkpoint\",\n", "\t\t\t\t\"{store-dir}\", \"{store-dir}/\"+name)\n", "\t\t\t// The `cluster-bootstrapped` marker can already be found within\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tbinaryPath := clusterupgrade.BinaryPathFromVersion(fixtureVersion)\n" ], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 414 }
// Copyright 2014 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package kvserver import ( "context" "reflect" "testing" "github.com/cockroachdb/cockroach/pkg/gossip" "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/logstore" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) func newStores(ambientCtx log.AmbientContext, clock *hlc.Clock) *Stores { return NewStores(ambientCtx, clock) } func TestStoresAddStore(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ls := newStores(log.MakeTestingAmbientCtxWithNewTracer(), hlc.NewClockForTesting(nil)) store := Store{ Ident: &roachpb.StoreIdent{StoreID: 123}, } ls.AddStore(&store) if !ls.HasStore(store.Ident.StoreID) { t.Errorf("expected local sender to contain storeID=%d", store.Ident.StoreID) } if ls.HasStore(store.Ident.StoreID + 1) { t.Errorf("expected local sender to not contain storeID=%d", store.Ident.StoreID+1) } } func TestStoresRemoveStore(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ls := newStores(log.MakeTestingAmbientCtxWithNewTracer(), hlc.NewClockForTesting(nil)) storeID := roachpb.StoreID(89) ls.AddStore(&Store{Ident: &roachpb.StoreIdent{StoreID: storeID}}) ls.RemoveStore(&Store{Ident: &roachpb.StoreIdent{StoreID: storeID}}) if ls.HasStore(storeID) { t.Errorf("expted local sender to remove storeID=%d", storeID) } } func TestStoresGetStoreCount(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ls := newStores(log.MakeTestingAmbientCtxWithNewTracer(), hlc.NewClockForTesting(nil)) if ls.GetStoreCount() != 0 { t.Errorf("expected 0 stores in new local sender") } expectedCount := 10 for i := 0; i < expectedCount; i++ { ls.AddStore(&Store{Ident: &roachpb.StoreIdent{StoreID: roachpb.StoreID(i)}}) } if count := ls.GetStoreCount(); count != expectedCount { t.Errorf("expected store count to be %d but was %d", expectedCount, count) } } func TestStoresVisitStores(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ls := newStores(log.MakeTestingAmbientCtxWithNewTracer(), hlc.NewClockForTesting(nil)) numStores := 10 for i := 0; i < numStores; i++ { ls.AddStore(&Store{Ident: &roachpb.StoreIdent{StoreID: roachpb.StoreID(i)}}) } visit := make([]bool, numStores) err := ls.VisitStores(func(s *Store) error { visit[s.Ident.StoreID] = true; return nil }) if err != nil { t.Errorf("unexpected error on visit: %s", err.Error()) } for i, visited := range visit { if !visited { t.Errorf("store %d was not visited", i) } } errBoom := errors.New("boom") if err := ls.VisitStores(func(s *Store) error { return errBoom }); !errors.Is(err, errBoom) { t.Errorf("got unexpected error %v", err) } } func TestStoresGetReplicaForRangeID(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() stopper := stop.NewStopper() defer stopper.Stop(ctx) clock := hlc.NewClockForTesting(nil) ls := newStores(log.MakeTestingAmbientCtxWithNewTracer(), clock) numStores := 10 for i := 1; i <= numStores; i++ { storeID := roachpb.StoreID(i) rangeID := roachpb.RangeID(i) replicaID := roachpb.ReplicaID(1) memEngine := storage.NewDefaultInMemForTesting() stopper.AddCloser(memEngine) cfg := TestStoreConfig(clock) cfg.Transport = NewDummyRaftTransport(cfg.Settings, cfg.AmbientCtx.Tracer) store := NewStore(ctx, cfg, memEngine, &roachpb.NodeDescriptor{NodeID: 1}) // Fake-set an ident. This is usually read from the engine on store.Start() // which we're not even going to call. store.Ident = &roachpb.StoreIdent{StoreID: storeID} ls.AddStore(store) desc := &roachpb.RangeDescriptor{ RangeID: rangeID, StartKey: roachpb.RKey("a"), EndKey: roachpb.RKey("b"), InternalReplicas: []roachpb.ReplicaDescriptor{ { StoreID: storeID, ReplicaID: replicaID, NodeID: 1, }, }, } require.NoError(t, logstore.NewStateLoader(desc.RangeID).SetRaftReplicaID(ctx, store.TODOEngine(), replicaID)) replica, err := loadInitializedReplicaForTesting(ctx, store, desc, replicaID) if err != nil { t.Fatalf("unexpected error when creating replica: %+v", err) } err2 := store.AddReplica(replica) if err2 != nil { t.Fatalf("unexpected error when adding replica: %v", err2) } } // Test the case where the replica we're looking for exists. rangeID1 := roachpb.RangeID(5) replica1, _, err1 := ls.GetReplicaForRangeID(ctx, rangeID1) if replica1 == nil { t.Fatal("expected replica to be found; was nil") } if err1 != nil { t.Fatalf("expected err to be nil; was %v", err1) } if replica1.RangeID != rangeID1 { t.Fatalf("expected replica's range id to be %v; got %v", rangeID1, replica1.RangeID) } // Test the case where the replica we're looking for doesn't exist. rangeID2 := roachpb.RangeID(1000) replica2, _, err2 := ls.GetReplicaForRangeID(ctx, rangeID2) if replica2 != nil { t.Fatalf("expected replica to be nil; was %v", replica2) } expectedError := kvpb.NewRangeNotFoundError(rangeID2, 0) if err2.Error() != expectedError.Error() { t.Fatalf("expected err to be %v; was %v", expectedError, err2) } } func TestStoresGetStore(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ls := newStores(log.MakeTestingAmbientCtxWithNewTracer(), hlc.NewClockForTesting(nil)) store := Store{Ident: &roachpb.StoreIdent{StoreID: 1}} replica := roachpb.ReplicaDescriptor{StoreID: store.Ident.StoreID} s, pErr := ls.GetStore(replica.StoreID) if s != nil || pErr == nil { t.Errorf("expected no stores in new local sender") } ls.AddStore(&store) s, pErr = ls.GetStore(replica.StoreID) if s == nil { t.Errorf("expected store") } else if s.Ident.StoreID != store.Ident.StoreID { t.Errorf("expected storeID to be %d but was %d", s.Ident.StoreID, store.Ident.StoreID) } else if pErr != nil { t.Errorf("expected no error, instead had err=%s", pErr) } } var storeIDAlloc roachpb.StoreID // createStores creates a slice of count stores. func createStores(count int) (*timeutil.ManualTime, []*Store, *Stores, *stop.Stopper) { stopper := stop.NewStopper() manual := timeutil.NewManualTime(timeutil.Unix(0, 123)) cfg := TestStoreConfig(hlc.NewClockForTesting(manual)) ls := newStores(log.MakeTestingAmbientCtxWithNewTracer(), cfg.Clock) // Create two stores with ranges we care about. stores := []*Store{} for i := 0; i < count; i++ { cfg.Transport = NewDummyRaftTransport(cfg.Settings, cfg.AmbientCtx.Tracer) eng := storage.NewDefaultInMemForTesting() stopper.AddCloser(eng) s := NewStore(context.Background(), cfg, eng, &roachpb.NodeDescriptor{NodeID: 1}) storeIDAlloc++ s.Ident = &roachpb.StoreIdent{StoreID: storeIDAlloc} stores = append(stores, s) } return manual, stores, ls, stopper } // TestStoresGossipStorage verifies reading and writing of bootstrap info. func TestStoresGossipStorage(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) manual, stores, ls, stopper := createStores(2) defer stopper.Stop(context.Background()) ls.AddStore(stores[0]) // Verify initial read is empty. var bi gossip.BootstrapInfo if err := ls.ReadBootstrapInfo(&bi); err != nil { t.Fatal(err) } if len(bi.Addresses) != 0 { t.Errorf("expected empty bootstrap info: %+v", bi) } // Add a fake address and write. manual.Advance(1) bi.Addresses = append(bi.Addresses, util.MakeUnresolvedAddr("tcp", "127.0.0.1:8001")) if err := ls.WriteBootstrapInfo(&bi); err != nil { t.Fatal(err) } // Verify on read. manual.Advance(1) var newBI gossip.BootstrapInfo if err := ls.ReadBootstrapInfo(&newBI); err != nil { t.Fatal(err) } if len(newBI.Addresses) != 1 { t.Errorf("expected single bootstrap info address: %+v", newBI) } // Add another store and verify it has bootstrap info written. ls.AddStore(stores[1]) // Create a new stores object to verify read. ls2 := newStores(log.MakeTestingAmbientCtxWithNewTracer(), ls.clock) ls2.AddStore(stores[1]) var verifyBI gossip.BootstrapInfo if err := ls2.ReadBootstrapInfo(&verifyBI); err != nil { t.Fatal(err) } if !reflect.DeepEqual(bi, verifyBI) { t.Errorf("bootstrap info %+v not equal to expected %+v", verifyBI, bi) } } // TestStoresGossipStorageReadLatest verifies that the latest // bootstrap info from multiple stores is returned on Read. func TestStoresGossipStorageReadLatest(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) manual, stores, ls, stopper := createStores(2) defer stopper.Stop(context.Background()) ls.AddStore(stores[0]) // Add a fake address and write. var bi gossip.BootstrapInfo bi.Addresses = append(bi.Addresses, util.MakeUnresolvedAddr("tcp", "127.0.0.1:8001")) if err := ls.WriteBootstrapInfo(&bi); err != nil { t.Fatal(err) } // Now remove store 0 and add store 1. ls.RemoveStore(stores[0]) ls.AddStore(stores[1]) // Increment clock, add another address and write. manual.Advance(1) bi.Addresses = append(bi.Addresses, util.MakeUnresolvedAddr("tcp", "127.0.0.1:8002")) if err := ls.WriteBootstrapInfo(&bi); err != nil { t.Fatal(err) } // Create a new stores object to freshly read. Should get latest // version from store 1. manual.Advance(1) ls2 := newStores(log.MakeTestingAmbientCtxWithNewTracer(), ls.clock) ls2.AddStore(stores[0]) ls2.AddStore(stores[1]) var verifyBI gossip.BootstrapInfo if err := ls2.ReadBootstrapInfo(&verifyBI); err != nil { t.Fatal(err) } if !reflect.DeepEqual(bi, verifyBI) { t.Errorf("bootstrap info %+v not equal to expected %+v", verifyBI, bi) } // Verify that stores[0], which had old info, was updated with // latest bootstrap info during the read. ls3 := newStores(log.MakeTestingAmbientCtxWithNewTracer(), ls.clock) ls3.AddStore(stores[0]) verifyBI.Reset() if err := ls3.ReadBootstrapInfo(&verifyBI); err != nil { t.Fatal(err) } if !reflect.DeepEqual(bi, verifyBI) { t.Errorf("bootstrap info %+v not equal to expected %+v", verifyBI, bi) } }
pkg/kv/kvserver/stores_test.go
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.0002395829069428146, 0.00017299749015364796, 0.00016156212950590998, 0.00017122080316767097, 0.00001247493491973728 ]
{ "id": 11, "code_window": [ "\t\t\t// compatible).\n", "\t\t\tname := clusterupgrade.CheckpointName(u.binaryVersion(ctx, t, 1).String())\n", "\t\t\tu.c.Stop(ctx, t.L(), option.DefaultStopOpts(), c.All())\n", "\n", "\t\t\tbinaryPath := clusterupgrade.BinaryPathFromVersion(makeFixtureVersion)\n", "\t\t\tc.Run(ctx, c.All(), binaryPath, \"debug\", \"pebble\", \"db\", \"checkpoint\",\n", "\t\t\t\t\"{store-dir}\", \"{store-dir}/\"+name)\n", "\t\t\t// The `cluster-bootstrapped` marker can already be found within\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tbinaryPath := clusterupgrade.BinaryPathFromVersion(fixtureVersion)\n" ], "file_path": "pkg/cmd/roachtest/tests/versionupgrade.go", "type": "replace", "edit_start_line_idx": 414 }
// Copyright 2019, OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package opentelemetry.proto.common.v1; option csharp_namespace = "OpenTelemetry.Proto.Common.V1"; option java_multiple_files = true; option java_package = "io.opentelemetry.proto.common.v1"; option java_outer_classname = "CommonProto"; option go_package = "v1"; // AnyValue is used to represent any type of attribute value. AnyValue may contain a // primitive value such as a string or integer or it may contain an arbitrary nested // object containing arrays, key-value lists and primitives. message AnyValue { // The value is one of the listed fields. It is valid for all values to be unspecified // in which case this AnyValue is considered to be "empty". oneof value { string string_value = 1; bool bool_value = 2; int64 int_value = 3; double double_value = 4; ArrayValue array_value = 5; KeyValueList kvlist_value = 6; bytes bytes_value = 7; } } // ArrayValue is a list of AnyValue messages. We need ArrayValue as a message // since oneof in AnyValue does not allow repeated fields. message ArrayValue { // Array of values. The array may be empty (contain 0 elements). repeated AnyValue values = 1; } // KeyValueList is a list of KeyValue messages. We need KeyValueList as a message // since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need // a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to // avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches // are semantically equivalent. message KeyValueList { // A collection of key/value pairs of key-value pairs. The list may be empty (may // contain 0 elements). // The keys MUST be unique (it is not allowed to have more than one // value with the same key). repeated KeyValue values = 1; } // KeyValue is a key-value pair that is used to store Span attributes, Link // attributes, etc. message KeyValue { string key = 1; AnyValue value = 2; } // InstrumentationScope is a message representing the instrumentation scope information // such as the fully qualified name and version. message InstrumentationScope { // An empty instrumentation scope name means the name is unknown. string name = 1; string version = 2; repeated KeyValue attributes = 3; uint32 dropped_attributes_count = 4; }
pkg/obsservice/obspb/opentelemetry-proto/common/v1/common.proto
0
https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298
[ 0.0012662641238421202, 0.00030763426912017167, 0.0001635610533412546, 0.00017128977924585342, 0.00036235060542821884 ]
{ "id": 0, "code_window": [ "import (\n", "\t\"testing\"\n", "\t\"time\"\n", ")\n", "\n", "func TestTxnPanics(t *testing.T) {\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\n", "\t\"github.com/coreos/etcd/pkg/testutil\"\n" ], "file_path": "clientv3/txn_test.go", "type": "add", "edit_start_line_idx": 19 }
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package testutil import ( "fmt" "net/http" "os" "regexp" "runtime" "sort" "strings" "testing" "time" ) /* CheckLeakedGoroutine verifies tests do not leave any leaky goroutines. It returns true when there are goroutines still running(leaking) after all tests. import "github.com/coreos/etcd/pkg/testutil" func TestMain(m *testing.M) { v := m.Run() if v == 0 && testutil.CheckLeakedGoroutine() { os.Exit(1) } os.Exit(v) } func TestSample(t *testing.T) { defer testutil.AfterTest(t) ... } */ func CheckLeakedGoroutine() bool { if testing.Short() { // not counting goroutines for leakage in -short mode return false } gs := interestingGoroutines() if len(gs) == 0 { return false } stackCount := make(map[string]int) re := regexp.MustCompile(`\(0[0-9a-fx, ]*\)`) for _, g := range gs { // strip out pointer arguments in first function of stack dump normalized := string(re.ReplaceAll([]byte(g), []byte("(...)"))) stackCount[normalized]++ } fmt.Fprintf(os.Stderr, "Too many goroutines running after all test(s).\n") for stack, count := range stackCount { fmt.Fprintf(os.Stderr, "%d instances of:\n%s\n", count, stack) } return true } func AfterTest(t *testing.T) { http.DefaultTransport.(*http.Transport).CloseIdleConnections() if testing.Short() { return } var bad string badSubstring := map[string]string{ ").writeLoop(": "a Transport", "created by net/http/httptest.(*Server).Start": "an httptest.Server", "timeoutHandler": "a TimeoutHandler", "net.(*netFD).connect(": "a timing out dial", ").noteClientGone(": "a closenotifier sender", ").readLoop(": "a Transport", } var stacks string for i := 0; i < 6; i++ { bad = "" stacks = strings.Join(interestingGoroutines(), "\n\n") for substr, what := range badSubstring { if strings.Contains(stacks, substr) { bad = what } } if bad == "" { return } // Bad stuff found, but goroutines might just still be // shutting down, so give it some time. time.Sleep(50 * time.Millisecond) } t.Errorf("Test appears to have leaked %s:\n%s", bad, stacks) } func interestingGoroutines() (gs []string) { buf := make([]byte, 2<<20) buf = buf[:runtime.Stack(buf, true)] for _, g := range strings.Split(string(buf), "\n\n") { sl := strings.SplitN(g, "\n", 2) if len(sl) != 2 { continue } stack := strings.TrimSpace(sl[1]) if stack == "" || strings.Contains(stack, "created by testing.RunTests") || strings.Contains(stack, "testing.Main(") || strings.Contains(stack, "runtime.goexit") || strings.Contains(stack, "github.com/coreos/etcd/pkg/testutil.interestingGoroutines") || strings.Contains(stack, "github.com/coreos/etcd/pkg/logutil.(*MergeLogger).outputLoop") || strings.Contains(stack, "github.com/golang/glog.(*loggingT).flushDaemon") || strings.Contains(stack, "created by runtime.gc") || strings.Contains(stack, "runtime.MHeap_Scavenger") { continue } gs = append(gs, stack) } sort.Strings(gs) return }
pkg/testutil/leak.go
1
https://github.com/etcd-io/etcd/commit/984badeb03b2aa2ccf02bb9a763ae6b521f9e6c2
[ 0.0002498944813851267, 0.00018341073882766068, 0.0001669755147304386, 0.0001739694271236658, 0.000023216909539769404 ]
{ "id": 0, "code_window": [ "import (\n", "\t\"testing\"\n", "\t\"time\"\n", ")\n", "\n", "func TestTxnPanics(t *testing.T) {\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\n", "\t\"github.com/coreos/etcd/pkg/testutil\"\n" ], "file_path": "clientv3/txn_test.go", "type": "add", "edit_start_line_idx": 19 }
// Copyright 2016 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package grpcproxy import ( "github.com/coreos/etcd/clientv3" "golang.org/x/net/context" ) type watcherSingle struct { // ch delievers events received from the etcd server ch clientv3.WatchChan // cancel is used to cancel the underlying etcd server watcher // It should also close the ch. cancel context.CancelFunc // sws is the stream this watcherSingle attached to sws *serverWatchStream w watcher rev int64 // current revision lastSeenRev int64 donec chan struct{} } func newWatcherSingle(wch clientv3.WatchChan, c context.CancelFunc, w watcher, sws *serverWatchStream) *watcherSingle { return &watcherSingle{ sws: sws, ch: wch, cancel: c, w: w, donec: make(chan struct{}), } } func (ws watcherSingle) run() { defer close(ws.donec) for wr := range ws.ch { ws.rev = wr.Header.Revision ws.w.send(wr) ws.lastSeenRev = wr.Events[len(wr.Events)-1].Kv.ModRevision if ws.sws.maybeCoalesceWatcher(ws) { return } } } // canPromote returns true if a watcherSingle can promote itself to a watchergroup // when it already caught up with the current revision. func (ws watcherSingle) canPromote() bool { return ws.rev == ws.lastSeenRev } func (ws watcherSingle) stop() { ws.cancel() <-ws.donec }
proxy/grpcproxy/watcher_single.go
0
https://github.com/etcd-io/etcd/commit/984badeb03b2aa2ccf02bb9a763ae6b521f9e6c2
[ 0.00017863186076283455, 0.00017158666742034256, 0.00016635430802125484, 0.00017065415158867836, 0.000004149748747295234 ]
{ "id": 0, "code_window": [ "import (\n", "\t\"testing\"\n", "\t\"time\"\n", ")\n", "\n", "func TestTxnPanics(t *testing.T) {\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\n", "\t\"github.com/coreos/etcd/pkg/testutil\"\n" ], "file_path": "clientv3/txn_test.go", "type": "add", "edit_start_line_idx": 19 }
# etcd3 API TODO: API doc ## Data Model etcd is designed to reliably store infrequently updated data and provide reliable watch queries. etcd exposes previous versions of key-value pairs to support inexpensive snapshots and watch history events (“time travel queries”). A persistent, multi-version, concurrency-control data model is a good fit for these use cases. etcd stores data in a multiversion [persistent][persistent-ds] key-value store. The persistent key-value store preserves the previous version of a key-value pair when its value is superseded with new data. The key-value store is effectively immutable; its operations do not update the structure in-place, but instead always generates a new updated structure. All past versions of keys are still accessible and watchable after modification. To prevent the data store from growing indefinitely over time from maintaining old versions, the store may be compacted to shed the oldest versions of superseded data. ### Logical View The store’s logical view is a flat binary key space. The key space has a lexically sorted index on byte string keys so range queries are inexpensive. The key space maintains multiple revisions. Each atomic mutative operation (e.g., a transaction operation may contain multiple operations) creates a new revision on the key space. All data held by previous revisions remains unchanged. Old versions of key can still be accessed through previous revisions. Likewise, revisions are indexed as well; ranging over revisions with watchers is efficient. If the store is compacted to recover space, revisions before the compact revision will be removed. A key’s lifetime spans a generation. Each key may have one or multiple generations. Creating a key increments the generation of that key, starting at 1 if the key never existed. Deleting a key generates a key tombstone, concluding the key’s current generation. Each modification of a key creates a new version of the key. Once a compaction happens, any generation ended before the given revision will be removed and values set before the compaction revision except the latest one will be removed. ### Physical View etcd stores the physical data as key-value pairs in a persistent [b+tree][b+tree]. Each revision of the store’s state only contains the delta from its previous revision to be efficient. A single revision may correspond to multiple keys in the tree. The key of key-value pair is a 3-tuple (major, sub, type). Major is the store revision holding the key. Sub differentiates among keys within the same revision. Type is an optional suffix for special value (e.g., `t` if the value contains a tombstone). The value of the key-value pair contains the modification from previous revision, thus one delta from previous revision. The b+tree is ordered by key in lexical byte-order. Ranged lookups over revision deltas are fast; this enables quickly finding modifications from one specific revision to another. Compaction removes out-of-date keys-value pairs. etcd also keeps a secondary in-memory [btree][btree] index to speed up range queries over keys. The keys in the btree index are the keys of the store exposed to user. The value is a pointer to the modification of the persistent b+tree. Compaction removes dead pointers. ## KV API Guarantees etcd is a consistent and durable key value store with mini-transaction(TODO: link to txn doc when we have it) support. The key value store is exposed through the KV APIs. etcd tries to ensure the strongest consistency and durability guarantees for a distributed system. This specification enumerates the KV API guarantees made by etcd. ### APIs to consider * Read APIs * range * watch * Write APIs * put * delete * Combination (read-modify-write) APIs * txn ### etcd Specific Definitions #### operation completed An etcd operation is considered complete when it is committed through consensus, and therefore “executed” -- permanently stored -- by the etcd storage engine. The client knows an operation is completed when it receives a response from the etcd server. Note that the client may be uncertain about the status of an operation if it times out, or there is a network disruption between the client and the etcd member. etcd may also abort operations when there is a leader election. etcd does not send `abort` responses to clients’ outstanding requests in this event. #### revision An etcd operation that modifies the key value store is assigned with a single increasing revision. A transaction operation might modifies the key value store multiple times, but only one revision is assigned. The revision attribute of a key value pair that modified by the operation has the same value as the revision of the operation. The revision can be used as a logical clock for key value store. A key value pair that has a larger revision is modified after a key value pair with a smaller revision. Two key value pairs that have the same revision are modified by an operation "concurrently". ### Guarantees Provided #### Atomicity All API requests are atomic; an operation either completes entirely or not at all. For watch requests, all events generated by one operation will be in one watch response. Watch never observes partial events for a single operation. #### Consistency All API calls ensure [sequential consistency][seq_consistency], the strongest consistency guarantee available from distributed systems. No matter which etcd member server a client makes requests to, a client reads the same events in the same order. If two members complete the same number of operations, the state of the two members is consistent. For watch operations, etcd guarantees to return the same value for the same key across all members for the same revision. For range operations, etcd has a similar guarantee for [linearized][Linearizability] access; serialized access may be behind the quorum state, so that the later revision is not yet available. As with all distributed systems, it is impossible for etcd to ensure [strict consistency][strict_consistency]. etcd does not guarantee that it will return to a read the “most recent” value (as measured by a wall clock when a request is completed) available on any cluster member. #### Isolation etcd ensures [serializable isolation][serializable_isolation], which is the highest isolation level available in distributed systems. Read operations will never observe any intermediate data. #### Durability Any completed operations are durable. All accessible data is also durable data. A read will never return data that has not been made durable. #### Linearizability Linearizability (also known as Atomic Consistency or External Consistency) is a consistency level between strict consistency and sequential consistency. For linearizability, suppose each operation receives a timestamp from a loosely synchronized global clock. Operations are linearized if and only if they always complete as though they were executed in a sequential order and each operation appears to complete in the order specified by the program. Likewise, if an operation’s timestamp precedes another, that operation must also precede the other operation in the sequence. For example, consider a client completing a write at time point 1 (*t1*). A client issuing a read at *t2* (for *t2* > *t1*) should receive a value at least as recent as the previous write, completed at *t1*. However, the read might actually complete only by *t3*, and the returned value, current at *t2* when the read began, might be "stale" by *t3*. etcd does not ensure linearizability for watch operations. Users are expected to verify the revision of watch responses to ensure correct ordering. etcd ensures linearizability for all other operations by default. Linearizability comes with a cost, however, because linearized requests must go through the Raft consensus process. To obtain lower latencies and higher throughput for read requests, clients can configure a request’s consistency mode to `serializable`, which may access stale data with respect to quorum, but removes the performance penalty of linearized accesses' reliance on live consensus. [persistent-ds]: [https://en.wikipedia.org/wiki/Persistent_data_structure] [btree]: [https://en.wikipedia.org/wiki/B-tree] [b+tree]: [https://en.wikipedia.org/wiki/B%2B_tree] [seq_consistency]: [https://en.wikipedia.org/wiki/Consistency_model#Sequential_consistency] [strict_consistency]: [https://en.wikipedia.org/wiki/Consistency_model#Strict_consistency] [serializable_isolation]: [https://en.wikipedia.org/wiki/Isolation_(database_systems)#Serializable] [Linearizability]: [#Linearizability]
Documentation/v2/api_v3.md
0
https://github.com/etcd-io/etcd/commit/984badeb03b2aa2ccf02bb9a763ae6b521f9e6c2
[ 0.00025548413395881653, 0.00017598114209249616, 0.0001628596510272473, 0.00016693121870048344, 0.000026676361812860705 ]
{ "id": 0, "code_window": [ "import (\n", "\t\"testing\"\n", "\t\"time\"\n", ")\n", "\n", "func TestTxnPanics(t *testing.T) {\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\n", "\t\"github.com/coreos/etcd/pkg/testutil\"\n" ], "file_path": "clientv3/txn_test.go", "type": "add", "edit_start_line_idx": 19 }
// Copyright 2016 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package integration import ( "testing" "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" "github.com/coreos/etcd/integration" "github.com/coreos/etcd/pkg/testutil" "golang.org/x/net/context" ) func TestRoleError(t *testing.T) { defer testutil.AfterTest(t) clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) authapi := clientv3.NewAuth(clus.RandClient()) _, err := authapi.RoleAdd(context.TODO(), "test-role") if err != nil { t.Fatal(err) } _, err = authapi.RoleAdd(context.TODO(), "test-role") if err != rpctypes.ErrRoleAlreadyExist { t.Fatalf("expected %v, got %v", rpctypes.ErrRoleAlreadyExist, err) } }
clientv3/integration/role_test.go
0
https://github.com/etcd-io/etcd/commit/984badeb03b2aa2ccf02bb9a763ae6b521f9e6c2
[ 0.0002876907819882035, 0.00020136777311563492, 0.00016958609921857715, 0.00017863186076283455, 0.00004400647958391346 ]
{ "id": 1, "code_window": [ ")\n", "\n", "func TestTxnPanics(t *testing.T) {\n", "\tkv := &kv{}\n", "\n", "\terrc := make(chan string)\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tdefer testutil.AfterTest(t)\n", "\n" ], "file_path": "clientv3/txn_test.go", "type": "add", "edit_start_line_idx": 22 }
// Copyright 2016 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package clientv3 import ( "testing" "time" ) func TestTxnPanics(t *testing.T) { kv := &kv{} errc := make(chan string) df := func() { if s := recover(); s != nil { errc <- s.(string) } } cmp := Compare(CreateRevision("foo"), "=", 0) op := OpPut("foo", "bar") tests := []struct { f func() err string }{ { f: func() { defer df() kv.Txn(nil).If(cmp).If(cmp) }, err: "cannot call If twice!", }, { f: func() { defer df() kv.Txn(nil).Then(op).If(cmp) }, err: "cannot call If after Then!", }, { f: func() { defer df() kv.Txn(nil).Else(op).If(cmp) }, err: "cannot call If after Else!", }, { f: func() { defer df() kv.Txn(nil).Then(op).Then(op) }, err: "cannot call Then twice!", }, { f: func() { defer df() kv.Txn(nil).Else(op).Then(op) }, err: "cannot call Then after Else!", }, { f: func() { defer df() kv.Txn(nil).Else(op).Else(op) }, err: "cannot call Else twice!", }, } for i, tt := range tests { go tt.f() select { case err := <-errc: if err != tt.err { t.Errorf("#%d: got %s, wanted %s", i, err, tt.err) } case <-time.After(time.Second): t.Errorf("#%d: did not panic, wanted panic %s", i, tt.err) } } }
clientv3/txn_test.go
1
https://github.com/etcd-io/etcd/commit/984badeb03b2aa2ccf02bb9a763ae6b521f9e6c2
[ 0.9985306262969971, 0.6330569982528687, 0.00016913765284698457, 0.9920706152915955, 0.47842562198638916 ]
{ "id": 1, "code_window": [ ")\n", "\n", "func TestTxnPanics(t *testing.T) {\n", "\tkv := &kv{}\n", "\n", "\terrc := make(chan string)\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tdefer testutil.AfterTest(t)\n", "\n" ], "file_path": "clientv3/txn_test.go", "type": "add", "edit_start_line_idx": 22 }
// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rafttest import ( "math/rand" "sync" "time" "github.com/coreos/etcd/raft/raftpb" ) // a network interface type iface interface { send(m raftpb.Message) recv() chan raftpb.Message disconnect() connect() } // a network type network interface { // drop message at given rate (1.0 drops all messages) drop(from, to uint64, rate float64) // delay message for (0, d] randomly at given rate (1.0 delay all messages) // do we need rate here? delay(from, to uint64, d time.Duration, rate float64) disconnect(id uint64) connect(id uint64) // heal heals the network heal() } type raftNetwork struct { mu sync.Mutex disconnected map[uint64]bool dropmap map[conn]float64 delaymap map[conn]delay recvQueues map[uint64]chan raftpb.Message } type conn struct { from, to uint64 } type delay struct { d time.Duration rate float64 } func newRaftNetwork(nodes ...uint64) *raftNetwork { pn := &raftNetwork{ recvQueues: make(map[uint64]chan raftpb.Message), dropmap: make(map[conn]float64), delaymap: make(map[conn]delay), disconnected: make(map[uint64]bool), } for _, n := range nodes { pn.recvQueues[n] = make(chan raftpb.Message, 1024) } return pn } func (rn *raftNetwork) nodeNetwork(id uint64) iface { return &nodeNetwork{id: id, raftNetwork: rn} } func (rn *raftNetwork) send(m raftpb.Message) { rn.mu.Lock() to := rn.recvQueues[m.To] if rn.disconnected[m.To] { to = nil } drop := rn.dropmap[conn{m.From, m.To}] dl := rn.delaymap[conn{m.From, m.To}] rn.mu.Unlock() if to == nil { return } if drop != 0 && rand.Float64() < drop { return } // TODO: shall we dl without blocking the send call? if dl.d != 0 && rand.Float64() < dl.rate { rd := rand.Int63n(int64(dl.d)) time.Sleep(time.Duration(rd)) } select { case to <- m: default: // drop messages when the receiver queue is full. } } func (rn *raftNetwork) recvFrom(from uint64) chan raftpb.Message { rn.mu.Lock() fromc := rn.recvQueues[from] if rn.disconnected[from] { fromc = nil } rn.mu.Unlock() return fromc } func (rn *raftNetwork) drop(from, to uint64, rate float64) { rn.mu.Lock() defer rn.mu.Unlock() rn.dropmap[conn{from, to}] = rate } func (rn *raftNetwork) delay(from, to uint64, d time.Duration, rate float64) { rn.mu.Lock() defer rn.mu.Unlock() rn.delaymap[conn{from, to}] = delay{d, rate} } func (rn *raftNetwork) heal() { rn.mu.Lock() defer rn.mu.Unlock() rn.dropmap = make(map[conn]float64) rn.delaymap = make(map[conn]delay) } func (rn *raftNetwork) disconnect(id uint64) { rn.mu.Lock() defer rn.mu.Unlock() rn.disconnected[id] = true } func (rn *raftNetwork) connect(id uint64) { rn.mu.Lock() defer rn.mu.Unlock() rn.disconnected[id] = false } type nodeNetwork struct { id uint64 *raftNetwork } func (nt *nodeNetwork) connect() { nt.raftNetwork.connect(nt.id) } func (nt *nodeNetwork) disconnect() { nt.raftNetwork.disconnect(nt.id) } func (nt *nodeNetwork) send(m raftpb.Message) { nt.raftNetwork.send(m) } func (nt *nodeNetwork) recv() chan raftpb.Message { return nt.recvFrom(nt.id) }
raft/rafttest/network.go
0
https://github.com/etcd-io/etcd/commit/984badeb03b2aa2ccf02bb9a763ae6b521f9e6c2
[ 0.0015827232273295522, 0.0003170070704072714, 0.00016595219494774938, 0.00017515811487101018, 0.00034301908453926444 ]
{ "id": 1, "code_window": [ ")\n", "\n", "func TestTxnPanics(t *testing.T) {\n", "\tkv := &kv{}\n", "\n", "\terrc := make(chan string)\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tdefer testutil.AfterTest(t)\n", "\n" ], "file_path": "clientv3/txn_test.go", "type": "add", "edit_start_line_idx": 22 }
// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package command import ( "errors" "time" "github.com/coreos/etcd/client" "github.com/urfave/cli" ) // NewMakeDirCommand returns the CLI command for "mkdir". func NewMakeDirCommand() cli.Command { return cli.Command{ Name: "mkdir", Usage: "make a new directory", ArgsUsage: "<key>", Flags: []cli.Flag{ cli.IntFlag{Name: "ttl", Value: 0, Usage: "key time-to-live"}, }, Action: func(c *cli.Context) error { mkdirCommandFunc(c, mustNewKeyAPI(c), client.PrevNoExist) return nil }, } } // mkdirCommandFunc executes the "mkdir" command. func mkdirCommandFunc(c *cli.Context, ki client.KeysAPI, prevExist client.PrevExistType) { if len(c.Args()) == 0 { handleError(ExitBadArgs, errors.New("key required")) } key := c.Args()[0] ttl := c.Int("ttl") ctx, cancel := contextWithTotalTimeout(c) _, err := ki.Set(ctx, key, "", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: prevExist}) cancel() if err != nil { handleError(ExitServerError, err) } }
etcdctl/ctlv2/command/mkdir_command.go
0
https://github.com/etcd-io/etcd/commit/984badeb03b2aa2ccf02bb9a763ae6b521f9e6c2
[ 0.00021697641932405531, 0.0001802195911295712, 0.00016832933761179447, 0.00017403835954610258, 0.000016852882254170254 ]
{ "id": 1, "code_window": [ ")\n", "\n", "func TestTxnPanics(t *testing.T) {\n", "\tkv := &kv{}\n", "\n", "\terrc := make(chan string)\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tdefer testutil.AfterTest(t)\n", "\n" ], "file_path": "clientv3/txn_test.go", "type": "add", "edit_start_line_idx": 22 }
// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Defensive debug-only utility to track that functions run on the // goroutine that they're supposed to. package http2 import ( "bytes" "errors" "fmt" "os" "runtime" "strconv" "sync" ) var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" type goroutineLock uint64 func newGoroutineLock() goroutineLock { if !DebugGoroutines { return 0 } return goroutineLock(curGoroutineID()) } func (g goroutineLock) check() { if !DebugGoroutines { return } if curGoroutineID() != uint64(g) { panic("running on the wrong goroutine") } } func (g goroutineLock) checkNotOn() { if !DebugGoroutines { return } if curGoroutineID() == uint64(g) { panic("running on the wrong goroutine") } } var goroutineSpace = []byte("goroutine ") func curGoroutineID() uint64 { bp := littleBuf.Get().(*[]byte) defer littleBuf.Put(bp) b := *bp b = b[:runtime.Stack(b, false)] // Parse the 4707 out of "goroutine 4707 [" b = bytes.TrimPrefix(b, goroutineSpace) i := bytes.IndexByte(b, ' ') if i < 0 { panic(fmt.Sprintf("No space found in %q", b)) } b = b[:i] n, err := parseUintBytes(b, 10, 64) if err != nil { panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) } return n } var littleBuf = sync.Pool{ New: func() interface{} { buf := make([]byte, 64) return &buf }, } // parseUintBytes is like strconv.ParseUint, but using a []byte. func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { var cutoff, maxVal uint64 if bitSize == 0 { bitSize = int(strconv.IntSize) } s0 := s switch { case len(s) < 1: err = strconv.ErrSyntax goto Error case 2 <= base && base <= 36: // valid base; nothing to do case base == 0: // Look for octal, hex prefix. switch { case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): base = 16 s = s[2:] if len(s) < 1 { err = strconv.ErrSyntax goto Error } case s[0] == '0': base = 8 default: base = 10 } default: err = errors.New("invalid base " + strconv.Itoa(base)) goto Error } n = 0 cutoff = cutoff64(base) maxVal = 1<<uint(bitSize) - 1 for i := 0; i < len(s); i++ { var v byte d := s[i] switch { case '0' <= d && d <= '9': v = d - '0' case 'a' <= d && d <= 'z': v = d - 'a' + 10 case 'A' <= d && d <= 'Z': v = d - 'A' + 10 default: n = 0 err = strconv.ErrSyntax goto Error } if int(v) >= base { n = 0 err = strconv.ErrSyntax goto Error } if n >= cutoff { // n*base overflows n = 1<<64 - 1 err = strconv.ErrRange goto Error } n *= uint64(base) n1 := n + uint64(v) if n1 < n || n1 > maxVal { // n+v overflows n = 1<<64 - 1 err = strconv.ErrRange goto Error } n = n1 } return n, nil Error: return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} } // Return the first number n such that n*base >= 1<<64. func cutoff64(base int) uint64 { if base < 2 { return 0 } return (1<<64-1)/uint64(base) + 1 }
cmd/vendor/golang.org/x/net/http2/gotrack.go
0
https://github.com/etcd-io/etcd/commit/984badeb03b2aa2ccf02bb9a763ae6b521f9e6c2
[ 0.005852534435689449, 0.0009348296443931758, 0.0001688574266154319, 0.00017569810734130442, 0.0016767680644989014 ]
{ "id": 2, "code_window": [ "\t\t\tcontinue\n", "\t\t}\n", "\t\tstack := strings.TrimSpace(sl[1])\n", "\t\tif stack == \"\" ||\n", "\t\t\tstrings.Contains(stack, \"created by testing.RunTests\") ||\n", "\t\t\tstrings.Contains(stack, \"testing.Main(\") ||\n", "\t\t\tstrings.Contains(stack, \"runtime.goexit\") ||\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tstrings.Contains(stack, \"runtime/panic.go\") ||\n" ], "file_path": "pkg/testutil/leak.go", "type": "add", "edit_start_line_idx": 108 }
// Copyright 2016 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package clientv3 import ( "testing" "time" ) func TestTxnPanics(t *testing.T) { kv := &kv{} errc := make(chan string) df := func() { if s := recover(); s != nil { errc <- s.(string) } } cmp := Compare(CreateRevision("foo"), "=", 0) op := OpPut("foo", "bar") tests := []struct { f func() err string }{ { f: func() { defer df() kv.Txn(nil).If(cmp).If(cmp) }, err: "cannot call If twice!", }, { f: func() { defer df() kv.Txn(nil).Then(op).If(cmp) }, err: "cannot call If after Then!", }, { f: func() { defer df() kv.Txn(nil).Else(op).If(cmp) }, err: "cannot call If after Else!", }, { f: func() { defer df() kv.Txn(nil).Then(op).Then(op) }, err: "cannot call Then twice!", }, { f: func() { defer df() kv.Txn(nil).Else(op).Then(op) }, err: "cannot call Then after Else!", }, { f: func() { defer df() kv.Txn(nil).Else(op).Else(op) }, err: "cannot call Else twice!", }, } for i, tt := range tests { go tt.f() select { case err := <-errc: if err != tt.err { t.Errorf("#%d: got %s, wanted %s", i, err, tt.err) } case <-time.After(time.Second): t.Errorf("#%d: did not panic, wanted panic %s", i, tt.err) } } }
clientv3/txn_test.go
1
https://github.com/etcd-io/etcd/commit/984badeb03b2aa2ccf02bb9a763ae6b521f9e6c2
[ 0.0001888790138764307, 0.0001765621273079887, 0.00016682971909176558, 0.00017646627384237945, 0.000005186974703974556 ]
{ "id": 2, "code_window": [ "\t\t\tcontinue\n", "\t\t}\n", "\t\tstack := strings.TrimSpace(sl[1])\n", "\t\tif stack == \"\" ||\n", "\t\t\tstrings.Contains(stack, \"created by testing.RunTests\") ||\n", "\t\t\tstrings.Contains(stack, \"testing.Main(\") ||\n", "\t\t\tstrings.Contains(stack, \"runtime.goexit\") ||\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tstrings.Contains(stack, \"runtime/panic.go\") ||\n" ], "file_path": "pkg/testutil/leak.go", "type": "add", "edit_start_line_idx": 108 }
Copyright (C) 2014 Kevin Ballard Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
cmd/vendor/github.com/kballard/go-shellquote/LICENSE
0
https://github.com/etcd-io/etcd/commit/984badeb03b2aa2ccf02bb9a763ae6b521f9e6c2
[ 0.00017520514666102827, 0.00017426781414542347, 0.00017333048162981868, 0.00017426781414542347, 9.37332515604794e-7 ]
{ "id": 2, "code_window": [ "\t\t\tcontinue\n", "\t\t}\n", "\t\tstack := strings.TrimSpace(sl[1])\n", "\t\tif stack == \"\" ||\n", "\t\t\tstrings.Contains(stack, \"created by testing.RunTests\") ||\n", "\t\t\tstrings.Contains(stack, \"testing.Main(\") ||\n", "\t\t\tstrings.Contains(stack, \"runtime.goexit\") ||\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tstrings.Contains(stack, \"runtime/panic.go\") ||\n" ], "file_path": "pkg/testutil/leak.go", "type": "add", "edit_start_line_idx": 108 }
// Copyright 2016 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package integration import ( "testing" "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" "github.com/coreos/etcd/integration" "github.com/coreos/etcd/pkg/testutil" "golang.org/x/net/context" ) func TestRoleError(t *testing.T) { defer testutil.AfterTest(t) clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) authapi := clientv3.NewAuth(clus.RandClient()) _, err := authapi.RoleAdd(context.TODO(), "test-role") if err != nil { t.Fatal(err) } _, err = authapi.RoleAdd(context.TODO(), "test-role") if err != rpctypes.ErrRoleAlreadyExist { t.Fatalf("expected %v, got %v", rpctypes.ErrRoleAlreadyExist, err) } }
clientv3/integration/role_test.go
0
https://github.com/etcd-io/etcd/commit/984badeb03b2aa2ccf02bb9a763ae6b521f9e6c2
[ 0.0001792090042727068, 0.00017525001021567732, 0.00017165351891890168, 0.00017605854372959584, 0.000002618214921312756 ]
{ "id": 2, "code_window": [ "\t\t\tcontinue\n", "\t\t}\n", "\t\tstack := strings.TrimSpace(sl[1])\n", "\t\tif stack == \"\" ||\n", "\t\t\tstrings.Contains(stack, \"created by testing.RunTests\") ||\n", "\t\t\tstrings.Contains(stack, \"testing.Main(\") ||\n", "\t\t\tstrings.Contains(stack, \"runtime.goexit\") ||\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tstrings.Contains(stack, \"runtime/panic.go\") ||\n" ], "file_path": "pkg/testutil/leak.go", "type": "add", "edit_start_line_idx": 108 }
// Code generated by protoc-gen-go. // source: runtime/internal/stream_chunk.proto // DO NOT EDIT! /* Package internal is a generated protocol buffer package. It is generated from these files: runtime/internal/stream_chunk.proto It has these top-level messages: StreamError */ package internal import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // StreamError is a response type which is returned when // streaming rpc returns an error. type StreamError struct { GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode" json:"grpc_code,omitempty"` HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode" json:"http_code,omitempty"` Message string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"` HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus" json:"http_status,omitempty"` } func (m *StreamError) Reset() { *m = StreamError{} } func (m *StreamError) String() string { return proto.CompactTextString(m) } func (*StreamError) ProtoMessage() {} func (*StreamError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func init() { proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError") } func init() { proto.RegisterFile("runtime/internal/stream_chunk.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 180 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x52, 0x2e, 0x2a, 0xcd, 0x2b, 0xc9, 0xcc, 0x4d, 0xd5, 0xcf, 0xcc, 0x2b, 0x49, 0x2d, 0xca, 0x4b, 0xcc, 0xd1, 0x2f, 0x2e, 0x29, 0x4a, 0x4d, 0xcc, 0x8d, 0x4f, 0xce, 0x28, 0xcd, 0xcb, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x49, 0x2f, 0x2a, 0x48, 0xd6, 0x4b, 0x4f, 0x2c, 0x49, 0x2d, 0x4f, 0xac, 0xd4, 0x83, 0xea, 0x50, 0x6a, 0x62, 0xe4, 0xe2, 0x0e, 0x06, 0x2b, 0x76, 0x2d, 0x2a, 0xca, 0x2f, 0x12, 0x92, 0xe6, 0xe2, 0x04, 0xa9, 0x8b, 0x4f, 0xce, 0x4f, 0x49, 0x95, 0x60, 0x54, 0x60, 0xd4, 0x60, 0x0d, 0xe2, 0x00, 0x09, 0x38, 0x03, 0xf9, 0x20, 0xc9, 0x8c, 0x92, 0x92, 0x02, 0x88, 0x24, 0x13, 0x44, 0x12, 0x24, 0x00, 0x96, 0x94, 0xe0, 0x62, 0xcf, 0x4d, 0x2d, 0x2e, 0x4e, 0x4c, 0x4f, 0x95, 0x60, 0x06, 0x4a, 0x71, 0x06, 0xc1, 0xb8, 0x42, 0xf2, 0x5c, 0xdc, 0x60, 0x6d, 0xc5, 0x25, 0x89, 0x25, 0xa5, 0xc5, 0x12, 0x2c, 0x60, 0x59, 0x2e, 0x90, 0x50, 0x30, 0x58, 0xc4, 0x89, 0x2b, 0x8a, 0x03, 0xe6, 0xf2, 0x24, 0x36, 0xb0, 0x6b, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa9, 0x07, 0x92, 0xb6, 0xd4, 0x00, 0x00, 0x00, }
cmd/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go
0
https://github.com/etcd-io/etcd/commit/984badeb03b2aa2ccf02bb9a763ae6b521f9e6c2
[ 0.0011460289824754, 0.0003084479831159115, 0.00015989423263818026, 0.00017098328680731356, 0.0003419714339543134 ]
{ "id": 0, "code_window": [ "\t\"hash sharded indexes can only be created on a cluster that has fully migrated to version 20.1\")\n", "\n", "var hashShardedIndexesDisabledError = pgerror.Newf(pgcode.FeatureNotSupported,\n", "\t\"hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting\")\n", "\n", "func setupShardedIndex(\n", "\tctx context.Context,\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\"hash sharded indexes require the experimental_enable_hash_sharded_indexes session variable\")\n" ], "file_path": "pkg/sql/create_index.go", "type": "replace", "edit_start_line_idx": 275 }
statement ok SET experimental_enable_hash_sharded_indexes = true # Tests for creating a hash sharded primary key statement ok CREATE TABLE sharded_primary (a INT PRIMARY KEY USING HASH WITH BUCKET_COUNT = 10) query TT SHOW CREATE TABLE sharded_primary ---- sharded_primary CREATE TABLE public.sharded_primary ( a INT8 NOT NULL, CONSTRAINT "primary" PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 10, FAMILY "primary" (crdb_internal_a_shard_10, a) ) statement error pgcode 22023 BUCKET_COUNT must be an integer greater than 1 CREATE TABLE invalid_bucket_count (k INT PRIMARY KEY USING HASH WITH BUCKET_COUNT=-1) statement error pgcode 22023 BUCKET_COUNT must be an integer greater than 1 CREATE TABLE invalid_bucket_count (k INT PRIMARY KEY USING HASH WITH BUCKET_COUNT=1) statement error expected BUCKET_COUNT expression to have type int, but '2.32' has type decimal CREATE TABLE fractional_bucket_count (k INT PRIMARY KEY USING HASH WITH BUCKET_COUNT=2.32) statement error variable sub-expressions are not allowed in BUCKET_COUNT CREATE TABLE invalid_bucket_count (k INT PRIMARY KEY USING HASH WITH BUCKET_COUNT=(SELECT 1)) # Ensure that this is round-tripable statement ok DROP TABLE sharded_primary statement ok CREATE TABLE sharded_primary ( a INT8 NOT NULL, CONSTRAINT "primary" PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 10, FAMILY "primary" (crdb_internal_a_shard_10, a) ) query TT SHOW CREATE TABLE sharded_primary ---- sharded_primary CREATE TABLE public.sharded_primary ( a INT8 NOT NULL, CONSTRAINT "primary" PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 10, FAMILY "primary" (crdb_internal_a_shard_10, a) ) statement ok INSERT INTO sharded_primary values (1), (2), (3) query error pq: duplicate key value \(crdb_internal_a_shard_10,a\)=\(6,1\) violates unique constraint "primary" INSERT INTO sharded_primary values (1) # Ensure that the shard column is assigned into the column family of the first column in # the index column set. statement ok CREATE TABLE specific_family ( a INT, b INT, INDEX (b) USING HASH WITH BUCKET_COUNT=10, FAMILY "a_family" (a), FAMILY "b_family" (b) ) query TT SHOW CREATE TABLE specific_family ---- specific_family CREATE TABLE public.specific_family ( a INT8 NULL, b INT8 NULL, INDEX specific_family_crdb_internal_b_shard_10_b_idx (b ASC) USING HASH WITH BUCKET_COUNT = 10, FAMILY a_family (a, rowid), FAMILY b_family (b, crdb_internal_b_shard_10) ) # Tests for secondary sharded indexes statement ok CREATE TABLE sharded_secondary (a INT, INDEX (a) USING HASH WITH BUCKET_COUNT=4) query TT SHOW CREATE TABLE sharded_secondary ---- sharded_secondary CREATE TABLE public.sharded_secondary ( a INT8 NULL, INDEX sharded_secondary_crdb_internal_a_shard_4_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 4, FAMILY "primary" (a, crdb_internal_a_shard_4, rowid) ) statement ok DROP TABLE sharded_secondary statement ok CREATE TABLE sharded_secondary ( a INT8 NULL, INDEX sharded_secondary_crdb_internal_a_shard_4_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 4, FAMILY "primary" (a, crdb_internal_a_shard_4, rowid) ) query TT SHOW CREATE TABLE sharded_secondary ---- sharded_secondary CREATE TABLE public.sharded_secondary ( a INT8 NULL, INDEX sharded_secondary_crdb_internal_a_shard_4_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 4, FAMILY "primary" (a, crdb_internal_a_shard_4, rowid) ) statement ok INSERT INTO sharded_secondary values (1), (2), (1) statement ok DROP TABLE sharded_secondary statement ok CREATE TABLE sharded_secondary ( a INT ) statement ok CREATE INDEX ON sharded_secondary (a) USING HASH WITH BUCKET_COUNT = 10 statement ok INSERT INTO sharded_secondary values (1), (2), (1) query TT SHOW CREATE TABLE sharded_secondary ---- sharded_secondary CREATE TABLE public.sharded_secondary ( a INT8 NULL, INDEX sharded_secondary_crdb_internal_a_shard_10_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 10, FAMILY "primary" (a, rowid, crdb_internal_a_shard_10) ) statement ok INSERT INTO sharded_secondary values (3), (2), (1) # Test multiple indexes on the same column set statement ok CREATE INDEX ON sharded_secondary (a) USING HASH WITH BUCKET_COUNT = 4 query TT SHOW CREATE TABLE sharded_secondary ---- sharded_secondary CREATE TABLE public.sharded_secondary ( a INT8 NULL, INDEX sharded_secondary_crdb_internal_a_shard_10_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 10, INDEX sharded_secondary_crdb_internal_a_shard_4_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 4, FAMILY "primary" (a, rowid, crdb_internal_a_shard_10, crdb_internal_a_shard_4) ) # Drop a sharded index and ensure that the shard column is dropped with it. statement ok DROP INDEX sharded_secondary_crdb_internal_a_shard_4_a_idx query TT SHOW CREATE TABLE sharded_secondary ---- sharded_secondary CREATE TABLE public.sharded_secondary ( a INT8 NULL, INDEX sharded_secondary_crdb_internal_a_shard_10_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 10, FAMILY "primary" (a, rowid, crdb_internal_a_shard_10) ) statement ok DROP INDEX sharded_secondary_crdb_internal_a_shard_10_a_idx query TT SHOW CREATE TABLE sharded_secondary ---- sharded_secondary CREATE TABLE public.sharded_secondary ( a INT8 NULL, FAMILY "primary" (a, rowid) ) # Ensure that the shard column cannot be used in the same txn if its dropped along with # the sharded index. statement ok CREATE INDEX idx on sharded_secondary (a) USING HASH WITH BUCKET_COUNT = 3 statement ok BEGIN statement ok SELECT crdb_internal_a_shard_3 FROM sharded_secondary statement ok DROP INDEX sharded_secondary@idx statement error pq: column "crdb_internal_a_shard_3" does not exist SELECT crdb_internal_a_shard_3 FROM sharded_secondary statement ok ROLLBACK statement ok DROP INDEX sharded_secondary@idx # Ensure that multiple (> 2) identical indexes can be created. statement ok CREATE INDEX ON sharded_secondary (a) USING HASH WITH BUCKET_COUNT=10 statement ok CREATE INDEX ON sharded_secondary (a) USING HASH WITH BUCKET_COUNT=10 statement ok CREATE INDEX ON sharded_secondary (a) USING HASH WITH BUCKET_COUNT=10 query TT SHOW CREATE TABLE sharded_secondary ---- sharded_secondary CREATE TABLE public.sharded_secondary ( a INT8 NULL, INDEX sharded_secondary_crdb_internal_a_shard_10_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 10, INDEX sharded_secondary_crdb_internal_a_shard_10_a_idx1 (a ASC) USING HASH WITH BUCKET_COUNT = 10, INDEX sharded_secondary_crdb_internal_a_shard_10_a_idx2 (a ASC) USING HASH WITH BUCKET_COUNT = 10, FAMILY "primary" (a, rowid, crdb_internal_a_shard_10) ) # Ensure that the table descriptor was left in a "valid" state query I SELECT count(*) FROM sharded_secondary ---- 6 statement ok CREATE INDEX ON sharded_primary (a) USING HASH WITH BUCKET_COUNT = 4; query TT SHOW CREATE TABLE sharded_primary ---- sharded_primary CREATE TABLE public.sharded_primary ( a INT8 NOT NULL, CONSTRAINT "primary" PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 10, INDEX sharded_primary_crdb_internal_a_shard_4_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 4, FAMILY "primary" (crdb_internal_a_shard_10, a, crdb_internal_a_shard_4) ) statement ok DROP INDEX sharded_primary_crdb_internal_a_shard_4_a_idx statement ok SELECT count(*) FROM sharded_primary query TT SHOW CREATE TABLE sharded_primary ---- sharded_primary CREATE TABLE public.sharded_primary ( a INT8 NOT NULL, CONSTRAINT "primary" PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 10, FAMILY "primary" (crdb_internal_a_shard_10, a) ) statement ok CREATE INDEX on sharded_primary (a) USING HASH WITH BUCKET_COUNT=10; query TT SHOW CREATE TABLE sharded_primary ---- sharded_primary CREATE TABLE public.sharded_primary ( a INT8 NOT NULL, CONSTRAINT "primary" PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 10, INDEX sharded_primary_crdb_internal_a_shard_10_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 10, FAMILY "primary" (crdb_internal_a_shard_10, a) ) statement ok DROP INDEX sharded_primary_crdb_internal_a_shard_10_a_idx # Ensure that the table descriptor was left in a "valid" state statement ok SELECT count(*) FROM sharded_primary statement ok DROP TABLE sharded_secondary statement ok CREATE TABLE sharded_secondary (a INT8, INDEX (a) USING HASH WITH BUCKET_COUNT=12) # Ensure that hash sharded indexes can be created on columns that are added in the same # statement, just like non-sharded indexes. statement ok BEGIN TRANSACTION statement ok ALTER TABLE sharded_secondary ADD COLUMN b INT statement ok CREATE INDEX ON sharded_secondary (a, b) USING HASH WITH BUCKET_COUNT=12 statement ok COMMIT TRANSACTION # Ensure that sharded indexes cannot be created on computed columns statement ok ALTER TABLE sharded_secondary ADD COLUMN c INT AS (mod(a, 100)) STORED statement error cannot create a sharded index on a computed column CREATE INDEX ON sharded_secondary (a, c) USING HASH WITH BUCKET_COUNT=12; # Ensure that sharded indexes cannot be created on computed columns # in the same txn statement error cannot create a sharded index on a computed column CREATE TABLE shard_on_computed_column ( a INT, b INT AS (a % 5) STORED, INDEX (b) USING HASH WITH BUCKET_COUNT=10 ) statement ok BEGIN TRANSACTION statement ok ALTER TABLE sharded_secondary ADD COLUMN d INT AS (mod(a, 100)) STORED statement error cannot create a sharded index on a computed column CREATE INDEX ON sharded_secondary (a, d) USING HASH WITH BUCKET_COUNT=12; statement ok ROLLBACK TRANSACTION # Ensure that the shard column isn't dropped even if its being used by a non-sharded index statement ok CREATE TABLE column_used_on_unsharded ( a INT, INDEX foo (a) USING HASH WITH BUCKET_COUNT=10 ) statement ok CREATE INDEX on column_used_on_unsharded (crdb_internal_a_shard_10) statement ok DROP INDEX column_used_on_unsharded@foo query TT SHOW CREATE TABLE column_used_on_unsharded ---- column_used_on_unsharded CREATE TABLE public.column_used_on_unsharded ( a INT8 NULL, INDEX column_used_on_unsharded_crdb_internal_a_shard_10_idx (crdb_internal_a_shard_10 ASC), FAMILY "primary" (a, crdb_internal_a_shard_10, rowid) ) statement ok DROP INDEX column_used_on_unsharded_crdb_internal_a_shard_10_idx statement ok CREATE TABLE column_used_on_unsharded_create_table ( a INT, INDEX foo (a) USING HASH WITH BUCKET_COUNT=10, INDEX (crdb_internal_a_shard_10) ) statement ok DROP INDEX column_used_on_unsharded_create_table@foo query TT SHOW CREATE TABLE column_used_on_unsharded_create_table ---- column_used_on_unsharded_create_table CREATE TABLE public.column_used_on_unsharded_create_table ( a INT8 NULL, INDEX column_used_on_unsharded_create_table_crdb_internal_a_shard_10_idx (crdb_internal_a_shard_10 ASC), FAMILY "primary" (a, crdb_internal_a_shard_10, rowid) ) statement ok DROP INDEX column_used_on_unsharded_create_table_crdb_internal_a_shard_10_idx statement ok DROP TABLE sharded_primary statement ok SET experimental_enable_hash_sharded_indexes = false statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting CREATE TABLE disabled (k INT PRIMARY KEY USING HASH WITH BUCKET_COUNT = 10) statement ok CREATE TABLE disabled_secondary (k INT, v BYTES) statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting CREATE INDEX failure on disabled_secondary (k) USING HASH WITH BUCKET_COUNT = 12 statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting CREATE TABLE disabled (k INT, INDEX (k) USING HASH WITH BUCKET_COUNT = 10) # Ensure everything works with weird column names statement ok SET experimental_enable_hash_sharded_indexes = true statement ok CREATE TABLE weird_names ( "I am a column with spaces" INT PRIMARY KEY USING HASH WITH BUCKET_COUNT = 12, "'quotes' in the column's name" INT, FAMILY "primary" ("I am a column with spaces", "'quotes' in the column's name") ) statement ok CREATE INDEX foo on weird_names ("'quotes' in the column's name") USING HASH WITH BUCKET_COUNT = 4 statement ok INSERT INTO weird_names VALUES (1, 2) query I SELECT count(*) from weird_names WHERE "'quotes' in the column's name" = 2 ---- 1 query TT SHOW CREATE TABLE weird_names ---- weird_names CREATE TABLE public.weird_names ( "I am a column with spaces" INT8 NOT NULL, "'quotes' in the column's name" INT8 NULL, CONSTRAINT "primary" PRIMARY KEY ("I am a column with spaces" ASC) USING HASH WITH BUCKET_COUNT = 12, INDEX foo ("'quotes' in the column's name" ASC) USING HASH WITH BUCKET_COUNT = 4, FAMILY "primary" ("I am a column with spaces", "'quotes' in the column's name", "crdb_internal_I am a column with spaces_shard_12", "crdb_internal_'quotes' in the column's name_shard_4") ) subtest interleave_disabled statement ok CREATE TABLE parent (x INT PRIMARY KEY); statement error pq: interleaved indexes cannot also be hash sharded CREATE TABLE t (x INT PRIMARY KEY USING HASH WITH BUCKET_COUNT = 10) INTERLEAVE IN PARENT parent (x) statement error pq: interleaved indexes cannot also be hash sharded CREATE TABLE t (x INT, y INT, PRIMARY KEY (x, y) USING HASH WITH BUCKET_COUNT = 10) INTERLEAVE IN PARENT parent (x) statement error pq: interleaved indexes cannot also be hash sharded CREATE INDEX ON parent (x) USING HASH WITH BUCKET_COUNT = 10 INTERLEAVE IN PARENT parent(x) statement ok DROP TABLE parent; # This test ensures that the appropriate error is returned when trying to create # a hash sharded index with a column which does not exist. subtest column_does_not_exist statement ok CREATE TABLE t0(); statement error column "c0" does not exist CREATE INDEX ON t0 (c0) USING HASH WITH BUCKET_COUNT = 8; statement ok DROP TABLE t0; # Test that creating an index on a column which is currently being dropped # causes an error. subtest create_hash_index_on_dropping_column statement ok CREATE TABLE create_idx_drop_column (c0 INT PRIMARY KEY, c1 INT); statement ok begin; ALTER TABLE create_idx_drop_column DROP COLUMN c1; statement error column "c1" does not exist CREATE INDEX idx_create_idx_drop_column ON create_idx_drop_column (c1) USING HASH WITH BUCKET_COUNT = 8; statement ok ROLLBACK; statement ok DROP TABLE create_idx_drop_column; # Test that NULL values can be a part of a hash-sharded index. subtest null_values_in_sharded_columns statement ok CREATE TABLE sharded_index_with_nulls ( a INT8 PRIMARY KEY, b INT8, INDEX (b) USING HASH WITH BUCKET_COUNT = 8 ) statement ok INSERT INTO sharded_index_with_nulls VALUES (1, NULL); statement ok DROP TABLE sharded_index_with_nulls; # Test that renaming a column which is a member of a hash sharded index works. subtest rename_column statement ok CREATE TABLE rename_column ( c0 INT, c1 INT, c2 INT, PRIMARY KEY (c0, c1) USING HASH WITH BUCKET_COUNT = 8, INDEX (c2) USING HASH WITH BUCKET_COUNT = 8, FAMILY "primary" (c0, c1, c2) ); statement ok INSERT INTO rename_column VALUES (1, 2, 3); query TT SHOW CREATE TABLE rename_column ---- rename_column CREATE TABLE public.rename_column ( c0 INT8 NOT NULL, c1 INT8 NOT NULL, c2 INT8 NULL, CONSTRAINT "primary" PRIMARY KEY (c0 ASC, c1 ASC) USING HASH WITH BUCKET_COUNT = 8, INDEX rename_column_crdb_internal_c2_shard_8_c2_idx (c2 ASC) USING HASH WITH BUCKET_COUNT = 8, FAMILY "primary" (c0, c1, c2, crdb_internal_c0_c1_shard_8, crdb_internal_c2_shard_8) ) statement ok ALTER TABLE rename_column RENAME c2 TO c3; # Test mucking with primary key columns. statement ok ALTER TABLE rename_column RENAME c1 TO c2; statement ok ALTER TABLE rename_column RENAME c0 TO c1; query TT SHOW CREATE TABLE rename_column ---- rename_column CREATE TABLE public.rename_column ( c1 INT8 NOT NULL, c2 INT8 NOT NULL, c3 INT8 NULL, CONSTRAINT "primary" PRIMARY KEY (c1 ASC, c2 ASC) USING HASH WITH BUCKET_COUNT = 8, INDEX rename_column_crdb_internal_c2_shard_8_c2_idx (c3 ASC) USING HASH WITH BUCKET_COUNT = 8, FAMILY "primary" (c1, c2, c3, crdb_internal_c1_c2_shard_8, crdb_internal_c3_shard_8) ) query III SELECT c3, c2, c1 FROM rename_column ---- 3 2 1 # Test both at the same time. statement ok ALTER TABLE rename_column RENAME c1 TO c0, RENAME c2 TO c1, RENAME c3 TO c2; query TT SHOW CREATE TABLE rename_column ---- rename_column CREATE TABLE public.rename_column ( c0 INT8 NOT NULL, c1 INT8 NOT NULL, c2 INT8 NULL, CONSTRAINT "primary" PRIMARY KEY (c0 ASC, c1 ASC) USING HASH WITH BUCKET_COUNT = 8, INDEX rename_column_crdb_internal_c2_shard_8_c2_idx (c2 ASC) USING HASH WITH BUCKET_COUNT = 8, FAMILY "primary" (c0, c1, c2, crdb_internal_c0_c1_shard_8, crdb_internal_c2_shard_8) ) query III SELECT c2, c1, c0 FROM rename_column ---- 3 2 1 # Ensure that renaming a shard column fails. statement error cannot rename shard column ALTER TABLE rename_column RENAME crdb_internal_c2_shard_8 TO foo; statement ok DROP TABLE rename_column;
pkg/sql/logictest/testdata/logic_test/hash_sharded_index
1
https://github.com/cockroachdb/cockroach/commit/e69cbee2e876f8f76799a25701060009c2575672
[ 0.23841996490955353, 0.008168770000338554, 0.00016145694826263934, 0.0014756423188373446, 0.03154733031988144 ]
{ "id": 0, "code_window": [ "\t\"hash sharded indexes can only be created on a cluster that has fully migrated to version 20.1\")\n", "\n", "var hashShardedIndexesDisabledError = pgerror.Newf(pgcode.FeatureNotSupported,\n", "\t\"hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting\")\n", "\n", "func setupShardedIndex(\n", "\tctx context.Context,\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\"hash sharded indexes require the experimental_enable_hash_sharded_indexes session variable\")\n" ], "file_path": "pkg/sql/create_index.go", "type": "replace", "edit_start_line_idx": 275 }
select_clause ::= ( simple_select_clause | values_clause | table_clause | set_operation ) | '(' ( simple_select_clause | values_clause | table_clause | set_operation ) ')'
docs/generated/sql/bnf/select_clause.bnf
0
https://github.com/cockroachdb/cockroach/commit/e69cbee2e876f8f76799a25701060009c2575672
[ 0.000174079614225775, 0.000174079614225775, 0.000174079614225775, 0.000174079614225775, 0 ]
{ "id": 0, "code_window": [ "\t\"hash sharded indexes can only be created on a cluster that has fully migrated to version 20.1\")\n", "\n", "var hashShardedIndexesDisabledError = pgerror.Newf(pgcode.FeatureNotSupported,\n", "\t\"hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting\")\n", "\n", "func setupShardedIndex(\n", "\tctx context.Context,\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\"hash sharded indexes require the experimental_enable_hash_sharded_indexes session variable\")\n" ], "file_path": "pkg/sql/create_index.go", "type": "replace", "edit_start_line_idx": 275 }
// Copyright 2017 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package ledger import ( "context" gosql "database/sql" "math/rand" "strconv" "strings" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload/histogram" "github.com/cockroachdb/errors" ) type worker struct { config *ledger hists *histogram.Histograms db *gosql.DB rng *rand.Rand deckPerm []int permIdx int } type ledgerTx interface { run(config *ledger, db *gosql.DB, rng *rand.Rand) (interface{}, error) } type tx struct { ledgerTx weight int // percent likelihood that each transaction type is run name string // display name } var allTxs = [...]tx{ { ledgerTx: balance{}, name: "balance", }, { ledgerTx: withdrawal{}, name: "withdrawal", }, { ledgerTx: deposit{}, name: "deposit", }, { ledgerTx: reversal{}, name: "reversal", }, } func initializeMix(config *ledger) error { config.txs = append([]tx(nil), allTxs[0:]...) nameToTx := make(map[string]int, len(allTxs)) for i, tx := range config.txs { nameToTx[tx.name] = i } items := strings.Split(config.mix, `,`) totalWeight := 0 for _, item := range items { kv := strings.Split(item, `=`) if len(kv) != 2 { return errors.Errorf(`Invalid mix %s: %s is not a k=v pair`, config.mix, item) } txName, weightStr := kv[0], kv[1] weight, err := strconv.Atoi(weightStr) if err != nil { return errors.Errorf( `Invalid percentage mix %s: %s is not an integer`, config.mix, weightStr) } i, ok := nameToTx[txName] if !ok { return errors.Errorf( `Invalid percentage mix %s: no such transaction %s`, config.mix, txName) } config.txs[i].weight = weight totalWeight += weight } config.deck = make([]int, 0, totalWeight) for i, t := range config.txs { for j := 0; j < t.weight; j++ { config.deck = append(config.deck, i) } } return nil } func (w *worker) run(ctx context.Context) error { if w.permIdx == len(w.deckPerm) { rand.Shuffle(len(w.deckPerm), func(i, j int) { w.deckPerm[i], w.deckPerm[j] = w.deckPerm[j], w.deckPerm[i] }) w.permIdx = 0 } // Move through our permutation slice until its exhausted, using each value to // to index into our deck of transactions, which contains indexes into the // txs slice. opIdx := w.deckPerm[w.permIdx] t := w.config.txs[opIdx] w.permIdx++ start := timeutil.Now() if _, err := t.run(w.config, w.db, w.rng); err != nil { return errors.Wrapf(err, "error in %s", t.name) } elapsed := timeutil.Since(start) w.hists.Get(t.name).Record(elapsed) return nil }
pkg/workload/ledger/worker.go
0
https://github.com/cockroachdb/cockroach/commit/e69cbee2e876f8f76799a25701060009c2575672
[ 0.0008315079612657428, 0.00022456393344327807, 0.0001646738382987678, 0.00017257090075872838, 0.0001753922551870346 ]
{ "id": 0, "code_window": [ "\t\"hash sharded indexes can only be created on a cluster that has fully migrated to version 20.1\")\n", "\n", "var hashShardedIndexesDisabledError = pgerror.Newf(pgcode.FeatureNotSupported,\n", "\t\"hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting\")\n", "\n", "func setupShardedIndex(\n", "\tctx context.Context,\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\"hash sharded indexes require the experimental_enable_hash_sharded_indexes session variable\")\n" ], "file_path": "pkg/sql/create_index.go", "type": "replace", "edit_start_line_idx": 275 }
optgen rulenames test.opt define And { Left Expr Right Expr } define True {} define Join { Left RelExpr Right RelExpr } [SimplifyTrueAnd, Normalize] (And (True) $right:*) => $right [NormalizeNestedAnds, Normalize] (And $left:* (And $innerLeft:* $innerRight:*)) => (And (ConcatLeftDeepAnds $left $innerLeft) $innerRight) [CommuteJoin, Explore] (Join $r:* $s:*) => (Join $s $r) ---- ---- // Code generated by optgen; [omitted] package opt const ( startAutoRule RuleName = iota + NumManualRuleNames // ------------------------------------------------------------ // Normalize Rule Names // ------------------------------------------------------------ SimplifyTrueAnd NormalizeNestedAnds // startExploreRule tracks the number of normalization rules; // all rules greater than this value are exploration rules. startExploreRule // ------------------------------------------------------------ // Explore Rule Names // ------------------------------------------------------------ CommuteJoin // NumRuleNames tracks the total count of rule names. NumRuleNames ) ---- ----
pkg/sql/opt/optgen/cmd/optgen/testdata/rulenames
0
https://github.com/cockroachdb/cockroach/commit/e69cbee2e876f8f76799a25701060009c2575672
[ 0.0001751062663970515, 0.00017126480815932155, 0.00016577223141212016, 0.00017221507732756436, 0.0000034436368423484964 ]
{ "id": 1, "code_window": [ "statement ok\n", "SET experimental_enable_hash_sharded_indexes = false\n", "\n", "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting\n", "CREATE TABLE disabled (k INT PRIMARY KEY USING HASH WITH BUCKET_COUNT = 10)\n", "\n", "statement ok\n", "CREATE TABLE disabled_secondary (k INT, v BYTES)\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes session variable\n" ], "file_path": "pkg/sql/logictest/testdata/logic_test/hash_sharded_index", "type": "replace", "edit_start_line_idx": 376 }
statement ok SET experimental_enable_hash_sharded_indexes = true # Tests for creating a hash sharded primary key statement ok CREATE TABLE sharded_primary (a INT PRIMARY KEY USING HASH WITH BUCKET_COUNT = 10) query TT SHOW CREATE TABLE sharded_primary ---- sharded_primary CREATE TABLE public.sharded_primary ( a INT8 NOT NULL, CONSTRAINT "primary" PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 10, FAMILY "primary" (crdb_internal_a_shard_10, a) ) statement error pgcode 22023 BUCKET_COUNT must be an integer greater than 1 CREATE TABLE invalid_bucket_count (k INT PRIMARY KEY USING HASH WITH BUCKET_COUNT=-1) statement error pgcode 22023 BUCKET_COUNT must be an integer greater than 1 CREATE TABLE invalid_bucket_count (k INT PRIMARY KEY USING HASH WITH BUCKET_COUNT=1) statement error expected BUCKET_COUNT expression to have type int, but '2.32' has type decimal CREATE TABLE fractional_bucket_count (k INT PRIMARY KEY USING HASH WITH BUCKET_COUNT=2.32) statement error variable sub-expressions are not allowed in BUCKET_COUNT CREATE TABLE invalid_bucket_count (k INT PRIMARY KEY USING HASH WITH BUCKET_COUNT=(SELECT 1)) # Ensure that this is round-tripable statement ok DROP TABLE sharded_primary statement ok CREATE TABLE sharded_primary ( a INT8 NOT NULL, CONSTRAINT "primary" PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 10, FAMILY "primary" (crdb_internal_a_shard_10, a) ) query TT SHOW CREATE TABLE sharded_primary ---- sharded_primary CREATE TABLE public.sharded_primary ( a INT8 NOT NULL, CONSTRAINT "primary" PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 10, FAMILY "primary" (crdb_internal_a_shard_10, a) ) statement ok INSERT INTO sharded_primary values (1), (2), (3) query error pq: duplicate key value \(crdb_internal_a_shard_10,a\)=\(6,1\) violates unique constraint "primary" INSERT INTO sharded_primary values (1) # Ensure that the shard column is assigned into the column family of the first column in # the index column set. statement ok CREATE TABLE specific_family ( a INT, b INT, INDEX (b) USING HASH WITH BUCKET_COUNT=10, FAMILY "a_family" (a), FAMILY "b_family" (b) ) query TT SHOW CREATE TABLE specific_family ---- specific_family CREATE TABLE public.specific_family ( a INT8 NULL, b INT8 NULL, INDEX specific_family_crdb_internal_b_shard_10_b_idx (b ASC) USING HASH WITH BUCKET_COUNT = 10, FAMILY a_family (a, rowid), FAMILY b_family (b, crdb_internal_b_shard_10) ) # Tests for secondary sharded indexes statement ok CREATE TABLE sharded_secondary (a INT, INDEX (a) USING HASH WITH BUCKET_COUNT=4) query TT SHOW CREATE TABLE sharded_secondary ---- sharded_secondary CREATE TABLE public.sharded_secondary ( a INT8 NULL, INDEX sharded_secondary_crdb_internal_a_shard_4_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 4, FAMILY "primary" (a, crdb_internal_a_shard_4, rowid) ) statement ok DROP TABLE sharded_secondary statement ok CREATE TABLE sharded_secondary ( a INT8 NULL, INDEX sharded_secondary_crdb_internal_a_shard_4_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 4, FAMILY "primary" (a, crdb_internal_a_shard_4, rowid) ) query TT SHOW CREATE TABLE sharded_secondary ---- sharded_secondary CREATE TABLE public.sharded_secondary ( a INT8 NULL, INDEX sharded_secondary_crdb_internal_a_shard_4_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 4, FAMILY "primary" (a, crdb_internal_a_shard_4, rowid) ) statement ok INSERT INTO sharded_secondary values (1), (2), (1) statement ok DROP TABLE sharded_secondary statement ok CREATE TABLE sharded_secondary ( a INT ) statement ok CREATE INDEX ON sharded_secondary (a) USING HASH WITH BUCKET_COUNT = 10 statement ok INSERT INTO sharded_secondary values (1), (2), (1) query TT SHOW CREATE TABLE sharded_secondary ---- sharded_secondary CREATE TABLE public.sharded_secondary ( a INT8 NULL, INDEX sharded_secondary_crdb_internal_a_shard_10_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 10, FAMILY "primary" (a, rowid, crdb_internal_a_shard_10) ) statement ok INSERT INTO sharded_secondary values (3), (2), (1) # Test multiple indexes on the same column set statement ok CREATE INDEX ON sharded_secondary (a) USING HASH WITH BUCKET_COUNT = 4 query TT SHOW CREATE TABLE sharded_secondary ---- sharded_secondary CREATE TABLE public.sharded_secondary ( a INT8 NULL, INDEX sharded_secondary_crdb_internal_a_shard_10_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 10, INDEX sharded_secondary_crdb_internal_a_shard_4_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 4, FAMILY "primary" (a, rowid, crdb_internal_a_shard_10, crdb_internal_a_shard_4) ) # Drop a sharded index and ensure that the shard column is dropped with it. statement ok DROP INDEX sharded_secondary_crdb_internal_a_shard_4_a_idx query TT SHOW CREATE TABLE sharded_secondary ---- sharded_secondary CREATE TABLE public.sharded_secondary ( a INT8 NULL, INDEX sharded_secondary_crdb_internal_a_shard_10_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 10, FAMILY "primary" (a, rowid, crdb_internal_a_shard_10) ) statement ok DROP INDEX sharded_secondary_crdb_internal_a_shard_10_a_idx query TT SHOW CREATE TABLE sharded_secondary ---- sharded_secondary CREATE TABLE public.sharded_secondary ( a INT8 NULL, FAMILY "primary" (a, rowid) ) # Ensure that the shard column cannot be used in the same txn if its dropped along with # the sharded index. statement ok CREATE INDEX idx on sharded_secondary (a) USING HASH WITH BUCKET_COUNT = 3 statement ok BEGIN statement ok SELECT crdb_internal_a_shard_3 FROM sharded_secondary statement ok DROP INDEX sharded_secondary@idx statement error pq: column "crdb_internal_a_shard_3" does not exist SELECT crdb_internal_a_shard_3 FROM sharded_secondary statement ok ROLLBACK statement ok DROP INDEX sharded_secondary@idx # Ensure that multiple (> 2) identical indexes can be created. statement ok CREATE INDEX ON sharded_secondary (a) USING HASH WITH BUCKET_COUNT=10 statement ok CREATE INDEX ON sharded_secondary (a) USING HASH WITH BUCKET_COUNT=10 statement ok CREATE INDEX ON sharded_secondary (a) USING HASH WITH BUCKET_COUNT=10 query TT SHOW CREATE TABLE sharded_secondary ---- sharded_secondary CREATE TABLE public.sharded_secondary ( a INT8 NULL, INDEX sharded_secondary_crdb_internal_a_shard_10_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 10, INDEX sharded_secondary_crdb_internal_a_shard_10_a_idx1 (a ASC) USING HASH WITH BUCKET_COUNT = 10, INDEX sharded_secondary_crdb_internal_a_shard_10_a_idx2 (a ASC) USING HASH WITH BUCKET_COUNT = 10, FAMILY "primary" (a, rowid, crdb_internal_a_shard_10) ) # Ensure that the table descriptor was left in a "valid" state query I SELECT count(*) FROM sharded_secondary ---- 6 statement ok CREATE INDEX ON sharded_primary (a) USING HASH WITH BUCKET_COUNT = 4; query TT SHOW CREATE TABLE sharded_primary ---- sharded_primary CREATE TABLE public.sharded_primary ( a INT8 NOT NULL, CONSTRAINT "primary" PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 10, INDEX sharded_primary_crdb_internal_a_shard_4_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 4, FAMILY "primary" (crdb_internal_a_shard_10, a, crdb_internal_a_shard_4) ) statement ok DROP INDEX sharded_primary_crdb_internal_a_shard_4_a_idx statement ok SELECT count(*) FROM sharded_primary query TT SHOW CREATE TABLE sharded_primary ---- sharded_primary CREATE TABLE public.sharded_primary ( a INT8 NOT NULL, CONSTRAINT "primary" PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 10, FAMILY "primary" (crdb_internal_a_shard_10, a) ) statement ok CREATE INDEX on sharded_primary (a) USING HASH WITH BUCKET_COUNT=10; query TT SHOW CREATE TABLE sharded_primary ---- sharded_primary CREATE TABLE public.sharded_primary ( a INT8 NOT NULL, CONSTRAINT "primary" PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 10, INDEX sharded_primary_crdb_internal_a_shard_10_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 10, FAMILY "primary" (crdb_internal_a_shard_10, a) ) statement ok DROP INDEX sharded_primary_crdb_internal_a_shard_10_a_idx # Ensure that the table descriptor was left in a "valid" state statement ok SELECT count(*) FROM sharded_primary statement ok DROP TABLE sharded_secondary statement ok CREATE TABLE sharded_secondary (a INT8, INDEX (a) USING HASH WITH BUCKET_COUNT=12) # Ensure that hash sharded indexes can be created on columns that are added in the same # statement, just like non-sharded indexes. statement ok BEGIN TRANSACTION statement ok ALTER TABLE sharded_secondary ADD COLUMN b INT statement ok CREATE INDEX ON sharded_secondary (a, b) USING HASH WITH BUCKET_COUNT=12 statement ok COMMIT TRANSACTION # Ensure that sharded indexes cannot be created on computed columns statement ok ALTER TABLE sharded_secondary ADD COLUMN c INT AS (mod(a, 100)) STORED statement error cannot create a sharded index on a computed column CREATE INDEX ON sharded_secondary (a, c) USING HASH WITH BUCKET_COUNT=12; # Ensure that sharded indexes cannot be created on computed columns # in the same txn statement error cannot create a sharded index on a computed column CREATE TABLE shard_on_computed_column ( a INT, b INT AS (a % 5) STORED, INDEX (b) USING HASH WITH BUCKET_COUNT=10 ) statement ok BEGIN TRANSACTION statement ok ALTER TABLE sharded_secondary ADD COLUMN d INT AS (mod(a, 100)) STORED statement error cannot create a sharded index on a computed column CREATE INDEX ON sharded_secondary (a, d) USING HASH WITH BUCKET_COUNT=12; statement ok ROLLBACK TRANSACTION # Ensure that the shard column isn't dropped even if its being used by a non-sharded index statement ok CREATE TABLE column_used_on_unsharded ( a INT, INDEX foo (a) USING HASH WITH BUCKET_COUNT=10 ) statement ok CREATE INDEX on column_used_on_unsharded (crdb_internal_a_shard_10) statement ok DROP INDEX column_used_on_unsharded@foo query TT SHOW CREATE TABLE column_used_on_unsharded ---- column_used_on_unsharded CREATE TABLE public.column_used_on_unsharded ( a INT8 NULL, INDEX column_used_on_unsharded_crdb_internal_a_shard_10_idx (crdb_internal_a_shard_10 ASC), FAMILY "primary" (a, crdb_internal_a_shard_10, rowid) ) statement ok DROP INDEX column_used_on_unsharded_crdb_internal_a_shard_10_idx statement ok CREATE TABLE column_used_on_unsharded_create_table ( a INT, INDEX foo (a) USING HASH WITH BUCKET_COUNT=10, INDEX (crdb_internal_a_shard_10) ) statement ok DROP INDEX column_used_on_unsharded_create_table@foo query TT SHOW CREATE TABLE column_used_on_unsharded_create_table ---- column_used_on_unsharded_create_table CREATE TABLE public.column_used_on_unsharded_create_table ( a INT8 NULL, INDEX column_used_on_unsharded_create_table_crdb_internal_a_shard_10_idx (crdb_internal_a_shard_10 ASC), FAMILY "primary" (a, crdb_internal_a_shard_10, rowid) ) statement ok DROP INDEX column_used_on_unsharded_create_table_crdb_internal_a_shard_10_idx statement ok DROP TABLE sharded_primary statement ok SET experimental_enable_hash_sharded_indexes = false statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting CREATE TABLE disabled (k INT PRIMARY KEY USING HASH WITH BUCKET_COUNT = 10) statement ok CREATE TABLE disabled_secondary (k INT, v BYTES) statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting CREATE INDEX failure on disabled_secondary (k) USING HASH WITH BUCKET_COUNT = 12 statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting CREATE TABLE disabled (k INT, INDEX (k) USING HASH WITH BUCKET_COUNT = 10) # Ensure everything works with weird column names statement ok SET experimental_enable_hash_sharded_indexes = true statement ok CREATE TABLE weird_names ( "I am a column with spaces" INT PRIMARY KEY USING HASH WITH BUCKET_COUNT = 12, "'quotes' in the column's name" INT, FAMILY "primary" ("I am a column with spaces", "'quotes' in the column's name") ) statement ok CREATE INDEX foo on weird_names ("'quotes' in the column's name") USING HASH WITH BUCKET_COUNT = 4 statement ok INSERT INTO weird_names VALUES (1, 2) query I SELECT count(*) from weird_names WHERE "'quotes' in the column's name" = 2 ---- 1 query TT SHOW CREATE TABLE weird_names ---- weird_names CREATE TABLE public.weird_names ( "I am a column with spaces" INT8 NOT NULL, "'quotes' in the column's name" INT8 NULL, CONSTRAINT "primary" PRIMARY KEY ("I am a column with spaces" ASC) USING HASH WITH BUCKET_COUNT = 12, INDEX foo ("'quotes' in the column's name" ASC) USING HASH WITH BUCKET_COUNT = 4, FAMILY "primary" ("I am a column with spaces", "'quotes' in the column's name", "crdb_internal_I am a column with spaces_shard_12", "crdb_internal_'quotes' in the column's name_shard_4") ) subtest interleave_disabled statement ok CREATE TABLE parent (x INT PRIMARY KEY); statement error pq: interleaved indexes cannot also be hash sharded CREATE TABLE t (x INT PRIMARY KEY USING HASH WITH BUCKET_COUNT = 10) INTERLEAVE IN PARENT parent (x) statement error pq: interleaved indexes cannot also be hash sharded CREATE TABLE t (x INT, y INT, PRIMARY KEY (x, y) USING HASH WITH BUCKET_COUNT = 10) INTERLEAVE IN PARENT parent (x) statement error pq: interleaved indexes cannot also be hash sharded CREATE INDEX ON parent (x) USING HASH WITH BUCKET_COUNT = 10 INTERLEAVE IN PARENT parent(x) statement ok DROP TABLE parent; # This test ensures that the appropriate error is returned when trying to create # a hash sharded index with a column which does not exist. subtest column_does_not_exist statement ok CREATE TABLE t0(); statement error column "c0" does not exist CREATE INDEX ON t0 (c0) USING HASH WITH BUCKET_COUNT = 8; statement ok DROP TABLE t0; # Test that creating an index on a column which is currently being dropped # causes an error. subtest create_hash_index_on_dropping_column statement ok CREATE TABLE create_idx_drop_column (c0 INT PRIMARY KEY, c1 INT); statement ok begin; ALTER TABLE create_idx_drop_column DROP COLUMN c1; statement error column "c1" does not exist CREATE INDEX idx_create_idx_drop_column ON create_idx_drop_column (c1) USING HASH WITH BUCKET_COUNT = 8; statement ok ROLLBACK; statement ok DROP TABLE create_idx_drop_column; # Test that NULL values can be a part of a hash-sharded index. subtest null_values_in_sharded_columns statement ok CREATE TABLE sharded_index_with_nulls ( a INT8 PRIMARY KEY, b INT8, INDEX (b) USING HASH WITH BUCKET_COUNT = 8 ) statement ok INSERT INTO sharded_index_with_nulls VALUES (1, NULL); statement ok DROP TABLE sharded_index_with_nulls; # Test that renaming a column which is a member of a hash sharded index works. subtest rename_column statement ok CREATE TABLE rename_column ( c0 INT, c1 INT, c2 INT, PRIMARY KEY (c0, c1) USING HASH WITH BUCKET_COUNT = 8, INDEX (c2) USING HASH WITH BUCKET_COUNT = 8, FAMILY "primary" (c0, c1, c2) ); statement ok INSERT INTO rename_column VALUES (1, 2, 3); query TT SHOW CREATE TABLE rename_column ---- rename_column CREATE TABLE public.rename_column ( c0 INT8 NOT NULL, c1 INT8 NOT NULL, c2 INT8 NULL, CONSTRAINT "primary" PRIMARY KEY (c0 ASC, c1 ASC) USING HASH WITH BUCKET_COUNT = 8, INDEX rename_column_crdb_internal_c2_shard_8_c2_idx (c2 ASC) USING HASH WITH BUCKET_COUNT = 8, FAMILY "primary" (c0, c1, c2, crdb_internal_c0_c1_shard_8, crdb_internal_c2_shard_8) ) statement ok ALTER TABLE rename_column RENAME c2 TO c3; # Test mucking with primary key columns. statement ok ALTER TABLE rename_column RENAME c1 TO c2; statement ok ALTER TABLE rename_column RENAME c0 TO c1; query TT SHOW CREATE TABLE rename_column ---- rename_column CREATE TABLE public.rename_column ( c1 INT8 NOT NULL, c2 INT8 NOT NULL, c3 INT8 NULL, CONSTRAINT "primary" PRIMARY KEY (c1 ASC, c2 ASC) USING HASH WITH BUCKET_COUNT = 8, INDEX rename_column_crdb_internal_c2_shard_8_c2_idx (c3 ASC) USING HASH WITH BUCKET_COUNT = 8, FAMILY "primary" (c1, c2, c3, crdb_internal_c1_c2_shard_8, crdb_internal_c3_shard_8) ) query III SELECT c3, c2, c1 FROM rename_column ---- 3 2 1 # Test both at the same time. statement ok ALTER TABLE rename_column RENAME c1 TO c0, RENAME c2 TO c1, RENAME c3 TO c2; query TT SHOW CREATE TABLE rename_column ---- rename_column CREATE TABLE public.rename_column ( c0 INT8 NOT NULL, c1 INT8 NOT NULL, c2 INT8 NULL, CONSTRAINT "primary" PRIMARY KEY (c0 ASC, c1 ASC) USING HASH WITH BUCKET_COUNT = 8, INDEX rename_column_crdb_internal_c2_shard_8_c2_idx (c2 ASC) USING HASH WITH BUCKET_COUNT = 8, FAMILY "primary" (c0, c1, c2, crdb_internal_c0_c1_shard_8, crdb_internal_c2_shard_8) ) query III SELECT c2, c1, c0 FROM rename_column ---- 3 2 1 # Ensure that renaming a shard column fails. statement error cannot rename shard column ALTER TABLE rename_column RENAME crdb_internal_c2_shard_8 TO foo; statement ok DROP TABLE rename_column;
pkg/sql/logictest/testdata/logic_test/hash_sharded_index
1
https://github.com/cockroachdb/cockroach/commit/e69cbee2e876f8f76799a25701060009c2575672
[ 0.9750288724899292, 0.3083515167236328, 0.004147310741245747, 0.24940234422683716, 0.2628679871559143 ]
{ "id": 1, "code_window": [ "statement ok\n", "SET experimental_enable_hash_sharded_indexes = false\n", "\n", "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting\n", "CREATE TABLE disabled (k INT PRIMARY KEY USING HASH WITH BUCKET_COUNT = 10)\n", "\n", "statement ok\n", "CREATE TABLE disabled_secondary (k INT, v BYTES)\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes session variable\n" ], "file_path": "pkg/sql/logictest/testdata/logic_test/hash_sharded_index", "type": "replace", "edit_start_line_idx": 376 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package workload import ( "testing" "github.com/cockroachdb/cockroach/pkg/util/leaktest" ) func TestDistribute(t *testing.T) { defer leaktest.AfterTest(t)() for _, total := range []int{0, 1, 2, 5, 10, 17, 25} { for _, num := range []int{1, 2, 3, 4, 5, 8, 13, 15} { d := distribute(total, num) // Verify the sum is correct and that the variance is no more than 1. min, max, sum := d[0], d[0], d[0] for i := 1; i < len(d); i++ { sum += d[i] if min > d[i] { min = d[i] } if max < d[i] { max = d[i] } } if sum != total { t.Errorf("%d / %d: incorrect sum %d", total, num, sum) } if max > min+1 { t.Errorf("%d / %d: min value %d, max value %d", total, num, min, max) } } } }
pkg/workload/pgx_helpers_test.go
0
https://github.com/cockroachdb/cockroach/commit/e69cbee2e876f8f76799a25701060009c2575672
[ 0.0003762702108360827, 0.00021451673819683492, 0.00016913523722905666, 0.0001774734992068261, 0.0000809663615655154 ]
{ "id": 1, "code_window": [ "statement ok\n", "SET experimental_enable_hash_sharded_indexes = false\n", "\n", "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting\n", "CREATE TABLE disabled (k INT PRIMARY KEY USING HASH WITH BUCKET_COUNT = 10)\n", "\n", "statement ok\n", "CREATE TABLE disabled_secondary (k INT, v BYTES)\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes session variable\n" ], "file_path": "pkg/sql/logictest/testdata/logic_test/hash_sharded_index", "type": "replace", "edit_start_line_idx": 376 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. // Copyright (C) 2013-2018 by Maxim Bublis <[email protected]> // Use of this source code is governed by a MIT-style // license that can be found in licenses/MIT-gofrs.txt. // This code originated in github.com/gofrs/uuid. package uuid import ( "bytes" "encoding/hex" "fmt" ) // FromBytes returns a UUID generated from the raw byte slice input. // It will return an error if the slice isn't 16 bytes long. func FromBytes(input []byte) (UUID, error) { u := UUID{} err := u.UnmarshalBinary(input) return u, err } // FromBytesOrNil returns a UUID generated from the raw byte slice input. // Same behavior as FromBytes(), but returns uuid.Nil instead of an error. func FromBytesOrNil(input []byte) UUID { uuid, err := FromBytes(input) if err != nil { return Nil } return uuid } // FromString returns a UUID parsed from the input string. // Input is expected in a form accepted by UnmarshalText. func FromString(input string) (UUID, error) { u := UUID{} err := u.UnmarshalText([]byte(input)) return u, err } // FromStringOrNil returns a UUID parsed from the input string. // Same behavior as FromString(), but returns uuid.Nil instead of an error. func FromStringOrNil(input string) UUID { uuid, err := FromString(input) if err != nil { return Nil } return uuid } // MarshalText implements the encoding.TextMarshaler interface. // The encoding is the same as returned by the String() method. func (u UUID) MarshalText() ([]byte, error) { return []byte(u.String()), nil } // UnmarshalText implements the encoding.TextUnmarshaler interface. // Following formats are supported: // // "6ba7b810-9dad-11d1-80b4-00c04fd430c8", // "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", // "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" // "6ba7b8109dad11d180b400c04fd430c8" // "{6ba7b8109dad11d180b400c04fd430c8}", // "urn:uuid:6ba7b8109dad11d180b400c04fd430c8" // // ABNF for supported UUID text representation follows: // // URN := 'urn' // UUID-NID := 'uuid' // // hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | // 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | // 'A' | 'B' | 'C' | 'D' | 'E' | 'F' // // hexoct := hexdig hexdig // 2hexoct := hexoct hexoct // 4hexoct := 2hexoct 2hexoct // 6hexoct := 4hexoct 2hexoct // 12hexoct := 6hexoct 6hexoct // // hashlike := 12hexoct // canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct // // plain := canonical | hashlike // uuid := canonical | hashlike | braced | urn // // braced := '{' plain '}' | '{' hashlike '}' // urn := URN ':' UUID-NID ':' plain // func (u *UUID) UnmarshalText(text []byte) error { switch len(text) { case 32: return u.decodeHashLike(text) case 34, 38: return u.decodeBraced(text) case 36: return u.decodeCanonical(text) case 41, 45: return u.decodeURN(text) default: return fmt.Errorf("uuid: incorrect UUID length: %s", text) } } // decodeCanonical decodes UUID strings that are formatted as defined in RFC-4122 (section 3): // "6ba7b810-9dad-11d1-80b4-00c04fd430c8". func (u *UUID) decodeCanonical(t []byte) error { if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' { return fmt.Errorf("uuid: incorrect UUID format %s", t) } src := t dst := u[:] for i, byteGroup := range byteGroups { if i > 0 { src = src[1:] // skip dash } _, err := hex.Decode(dst[:byteGroup/2], src[:byteGroup]) if err != nil { return err } src = src[byteGroup:] dst = dst[byteGroup/2:] } return nil } // decodeHashLike decodes UUID strings that are using the following format: // "6ba7b8109dad11d180b400c04fd430c8". func (u *UUID) decodeHashLike(t []byte) error { src := t[:] dst := u[:] _, err := hex.Decode(dst, src) return err } // decodeBraced decodes UUID strings that are using the following formats: // "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" // "{6ba7b8109dad11d180b400c04fd430c8}". func (u *UUID) decodeBraced(t []byte) error { l := len(t) if t[0] != '{' || t[l-1] != '}' { return fmt.Errorf("uuid: incorrect UUID format %s", t) } return u.decodePlain(t[1 : l-1]) } // decodeURN decodes UUID strings that are using the following formats: // "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" // "urn:uuid:6ba7b8109dad11d180b400c04fd430c8". func (u *UUID) decodeURN(t []byte) error { total := len(t) urnUUIDPrefix := t[:9] if !bytes.Equal(urnUUIDPrefix, urnPrefix) { return fmt.Errorf("uuid: incorrect UUID format: %s", t) } return u.decodePlain(t[9:total]) } // decodePlain decodes UUID strings that are using the following formats: // "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format // "6ba7b8109dad11d180b400c04fd430c8". func (u *UUID) decodePlain(t []byte) error { switch len(t) { case 32: return u.decodeHashLike(t) case 36: return u.decodeCanonical(t) default: return fmt.Errorf("uuid: incorrrect UUID length: %s", t) } } // MarshalBinary implements the encoding.BinaryMarshaler interface. func (u UUID) MarshalBinary() ([]byte, error) { return u.bytes(), nil } // UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. // It will return an error if the slice isn't 16 bytes long. func (u *UUID) UnmarshalBinary(data []byte) error { if len(data) != Size { return fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) } copy(u[:], data) return nil }
pkg/util/uuid/codec.go
0
https://github.com/cockroachdb/cockroach/commit/e69cbee2e876f8f76799a25701060009c2575672
[ 0.00020362141367513686, 0.0001748527865856886, 0.00016415461141150445, 0.0001725226902635768, 0.000009664288882049732 ]
{ "id": 1, "code_window": [ "statement ok\n", "SET experimental_enable_hash_sharded_indexes = false\n", "\n", "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting\n", "CREATE TABLE disabled (k INT PRIMARY KEY USING HASH WITH BUCKET_COUNT = 10)\n", "\n", "statement ok\n", "CREATE TABLE disabled_secondary (k INT, v BYTES)\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes session variable\n" ], "file_path": "pkg/sql/logictest/testdata/logic_test/hash_sharded_index", "type": "replace", "edit_start_line_idx": 376 }
query T SELECT aclexplode(NULL) ---- query T SELECT aclexplode(ARRAY[]::text[]) ---- query T SELECT aclexplode(ARRAY['foo']) ---- query O SELECT pg_my_temp_schema() ---- 0 # Regression test for #49072. statement ok SELECT has_table_privilege('root'::NAME, 0, 'select') # Regression test for #53684. statement ok CREATE TYPE typ AS ENUM ('hello') query T SELECT format_type(oid, 0) FROM pg_catalog.pg_type WHERE typname = 'typ' ---- typ # Nothing breaks if we put a non-existing oid into format_type. query T SELECT format_type(152100, 0) ---- unknown (OID=152100)
pkg/sql/logictest/testdata/logic_test/pg_builtins
0
https://github.com/cockroachdb/cockroach/commit/e69cbee2e876f8f76799a25701060009c2575672
[ 0.029632100835442543, 0.013128362596035004, 0.0001702545996522531, 0.011355547234416008, 0.013173305429518223 ]
{ "id": 2, "code_window": [ "CREATE TABLE disabled (k INT PRIMARY KEY USING HASH WITH BUCKET_COUNT = 10)\n", "\n", "statement ok\n", "CREATE TABLE disabled_secondary (k INT, v BYTES)\n", "\n", "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting\n", "CREATE INDEX failure on disabled_secondary (k) USING HASH WITH BUCKET_COUNT = 12\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes session variable\n" ], "file_path": "pkg/sql/logictest/testdata/logic_test/hash_sharded_index", "type": "replace", "edit_start_line_idx": 382 }
// Copyright 2017 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package sql import ( "context" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/geo/geoindex" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/paramparse" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/schemaexpr" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/errors" ) type createIndexNode struct { n *tree.CreateIndex tableDesc *tabledesc.Mutable } // CreateIndex creates an index. // Privileges: CREATE on table. // notes: postgres requires CREATE on the table. // mysql requires INDEX on the table. func (p *planner) CreateIndex(ctx context.Context, n *tree.CreateIndex) (planNode, error) { tableDesc, err := p.ResolveMutableTableDescriptor( ctx, &n.Table, true /*required*/, tree.ResolveRequireTableOrViewDesc, ) if err != nil { return nil, err } if tableDesc.IsView() && !tableDesc.MaterializedView() { return nil, pgerror.Newf(pgcode.WrongObjectType, "%q is not a table or materialized view", tableDesc.Name) } if tableDesc.MaterializedView() { if n.Interleave != nil { return nil, pgerror.New(pgcode.InvalidObjectDefinition, "cannot create interleaved index on materialized view") } if n.Sharded != nil { return nil, pgerror.New(pgcode.InvalidObjectDefinition, "cannot create hash sharded index on materialized view") } } if err := p.CheckPrivilege(ctx, tableDesc, privilege.CREATE); err != nil { return nil, err } return &createIndexNode{tableDesc: tableDesc, n: n}, nil } // setupFamilyAndConstraintForShard adds a newly-created shard column into its appropriate // family (see comment above GetColumnFamilyForShard) and adds a check constraint ensuring // that the shard column's value is within [0..ShardBuckets-1]. This method is called when // a `CREATE INDEX` statement is issued for the creation of a sharded index that *does // not* re-use a pre-existing shard column. func (p *planner) setupFamilyAndConstraintForShard( ctx context.Context, tableDesc *tabledesc.Mutable, shardCol *descpb.ColumnDescriptor, idxColumns []string, buckets int32, ) error { family := tabledesc.GetColumnFamilyForShard(tableDesc, idxColumns) if family == "" { return errors.AssertionFailedf("could not find column family for the first column in the index column set") } // Assign shard column to the family of the first column in its index set, and do it // before `AllocateIDs()` assigns it to the primary column family. if err := tableDesc.AddColumnToFamilyMaybeCreate(shardCol.Name, family, false, false); err != nil { return err } // Assign an ID to the newly-added shard column, which is needed for the creation // of a valid check constraint. if err := tableDesc.AllocateIDs(ctx); err != nil { return err } ckDef, err := makeShardCheckConstraintDef(tableDesc, int(buckets), shardCol) if err != nil { return err } info, err := tableDesc.GetConstraintInfo(ctx, nil) if err != nil { return err } inuseNames := make(map[string]struct{}, len(info)) for k := range info { inuseNames[k] = struct{}{} } ckBuilder := schemaexpr.MakeCheckConstraintBuilder(ctx, p.tableName, tableDesc, &p.semaCtx) ckName, err := ckBuilder.DefaultName(ckDef.Expr) if err != nil { return err } // Avoid creating duplicate check constraints. if _, ok := inuseNames[ckName]; !ok { ck, err := ckBuilder.Build(ckDef) if err != nil { return err } ck.Validity = descpb.ConstraintValidity_Validating tableDesc.AddCheckMutation(ck, descpb.DescriptorMutation_ADD) } return nil } // MakeIndexDescriptor creates an index descriptor from a CreateIndex node and optionally // adds a hidden computed shard column (along with its check constraint) in case the index // is hash sharded. Note that `tableDesc` will be modified when this method is called for // a hash sharded index. func MakeIndexDescriptor( params runParams, n *tree.CreateIndex, tableDesc *tabledesc.Mutable, ) (*descpb.IndexDescriptor, error) { // Ensure that the columns we want to index exist before trying to create the // index. if err := validateIndexColumnsExist(tableDesc, n.Columns); err != nil { return nil, err } // Ensure that the index name does not exist before trying to create the index. if err := tableDesc.ValidateIndexNameIsUnique(string(n.Name)); err != nil { return nil, err } indexDesc := descpb.IndexDescriptor{ Name: string(n.Name), Unique: n.Unique, StoreColumnNames: n.Storing.ToStrings(), CreatedExplicitly: true, } if n.Inverted { if n.Interleave != nil { return nil, pgerror.New(pgcode.InvalidSQLStatementName, "inverted indexes don't support interleaved tables") } if n.PartitionBy != nil { return nil, pgerror.New(pgcode.InvalidSQLStatementName, "inverted indexes don't support partitioning") } if n.Sharded != nil { return nil, pgerror.New(pgcode.InvalidSQLStatementName, "inverted indexes don't support hash sharding") } if len(indexDesc.StoreColumnNames) > 0 { return nil, pgerror.New(pgcode.InvalidSQLStatementName, "inverted indexes don't support stored columns") } if n.Unique { return nil, pgerror.New(pgcode.InvalidSQLStatementName, "inverted indexes can't be unique") } indexDesc.Type = descpb.IndexDescriptor_INVERTED columnDesc, _, err := tableDesc.FindColumnByName(n.Columns[0].Column) if err != nil { return nil, err } switch columnDesc.Type.Family() { case types.GeometryFamily: config, err := geoindex.GeometryIndexConfigForSRID(columnDesc.Type.GeoSRIDOrZero()) if err != nil { return nil, err } indexDesc.GeoConfig = *config telemetry.Inc(sqltelemetry.GeometryInvertedIndexCounter) case types.GeographyFamily: indexDesc.GeoConfig = *geoindex.DefaultGeographyIndexConfig() telemetry.Inc(sqltelemetry.GeographyInvertedIndexCounter) } telemetry.Inc(sqltelemetry.InvertedIndexCounter) } if n.Sharded != nil { if n.PartitionBy != nil { return nil, pgerror.New(pgcode.FeatureNotSupported, "sharded indexes don't support partitioning") } if n.Interleave != nil { return nil, pgerror.New(pgcode.FeatureNotSupported, "interleaved indexes cannot also be hash sharded") } shardCol, newColumn, err := setupShardedIndex( params.ctx, params.EvalContext(), &params.p.semaCtx, params.SessionData().HashShardedIndexesEnabled, &n.Columns, n.Sharded.ShardBuckets, tableDesc, &indexDesc, false /* isNewTable */) if err != nil { return nil, err } if newColumn { if err := params.p.setupFamilyAndConstraintForShard(params.ctx, tableDesc, shardCol, indexDesc.Sharded.ColumnNames, indexDesc.Sharded.ShardBuckets); err != nil { return nil, err } } telemetry.Inc(sqltelemetry.HashShardedIndexCounter) } if n.Predicate != nil { idxValidator := schemaexpr.MakeIndexPredicateValidator(params.ctx, n.Table, tableDesc, &params.p.semaCtx) expr, err := idxValidator.Validate(n.Predicate) if err != nil { return nil, err } indexDesc.Predicate = expr telemetry.Inc(sqltelemetry.PartialIndexCounter) } if err := indexDesc.FillColumns(n.Columns); err != nil { return nil, err } if err := paramparse.ApplyStorageParameters( params.ctx, params.p.SemaCtx(), params.EvalContext(), n.StorageParams, &paramparse.IndexStorageParamObserver{IndexDesc: &indexDesc}, ); err != nil { return nil, err } return &indexDesc, nil } // validateIndexColumnsExists validates that the columns for an index exist // in the table and are not being dropped prior to attempting to add the index. func validateIndexColumnsExist(desc *tabledesc.Mutable, columns tree.IndexElemList) error { for _, column := range columns { _, dropping, err := desc.FindColumnByName(column.Column) if err != nil { return err } if dropping { return colinfo.NewUndefinedColumnError(string(column.Column)) } } return nil } // ReadingOwnWrites implements the planNodeReadingOwnWrites interface. // This is because CREATE INDEX performs multiple KV operations on descriptors // and expects to see its own writes. func (n *createIndexNode) ReadingOwnWrites() {} var invalidClusterForShardedIndexError = pgerror.Newf(pgcode.FeatureNotSupported, "hash sharded indexes can only be created on a cluster that has fully migrated to version 20.1") var hashShardedIndexesDisabledError = pgerror.Newf(pgcode.FeatureNotSupported, "hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting") func setupShardedIndex( ctx context.Context, evalCtx *tree.EvalContext, semaCtx *tree.SemaContext, shardedIndexEnabled bool, columns *tree.IndexElemList, bucketsExpr tree.Expr, tableDesc *tabledesc.Mutable, indexDesc *descpb.IndexDescriptor, isNewTable bool, ) (shard *descpb.ColumnDescriptor, newColumn bool, err error) { st := evalCtx.Settings if !st.Version.IsActive(ctx, clusterversion.VersionHashShardedIndexes) { return nil, false, invalidClusterForShardedIndexError } if !shardedIndexEnabled { return nil, false, hashShardedIndexesDisabledError } colNames := make([]string, 0, len(*columns)) for _, c := range *columns { colNames = append(colNames, string(c.Column)) } buckets, err := tabledesc.EvalShardBucketCount(ctx, semaCtx, evalCtx, bucketsExpr) if err != nil { return nil, false, err } shardCol, newColumn, err := maybeCreateAndAddShardCol(int(buckets), tableDesc, colNames, isNewTable) if err != nil { return nil, false, err } shardIdxElem := tree.IndexElem{ Column: tree.Name(shardCol.Name), Direction: tree.Ascending, } *columns = append(tree.IndexElemList{shardIdxElem}, *columns...) indexDesc.Sharded = descpb.ShardedDescriptor{ IsSharded: true, Name: shardCol.Name, ShardBuckets: buckets, ColumnNames: colNames, } return shardCol, newColumn, nil } // maybeCreateAndAddShardCol adds a new hidden computed shard column (or its mutation) to // `desc`, if one doesn't already exist for the given index column set and number of shard // buckets. func maybeCreateAndAddShardCol( shardBuckets int, desc *tabledesc.Mutable, colNames []string, isNewTable bool, ) (col *descpb.ColumnDescriptor, created bool, err error) { shardCol, err := makeShardColumnDesc(colNames, shardBuckets) if err != nil { return nil, false, err } existingShardCol, dropped, err := desc.FindColumnByName(tree.Name(shardCol.Name)) if err == nil && !dropped { // TODO(ajwerner): In what ways is existingShardCol allowed to differ from // the newly made shardCol? Should there be some validation of // existingShardCol? if !existingShardCol.Hidden { // The user managed to reverse-engineer our crazy shard column name, so // we'll return an error here rather than try to be tricky. return nil, false, pgerror.Newf(pgcode.DuplicateColumn, "column %s already specified; can't be used for sharding", shardCol.Name) } return existingShardCol, false, nil } columnIsUndefined := sqlerrors.IsUndefinedColumnError(err) if err != nil && !columnIsUndefined { return nil, false, err } if columnIsUndefined || dropped { if isNewTable { desc.AddColumn(shardCol) } else { desc.AddColumnMutation(shardCol, descpb.DescriptorMutation_ADD) } created = true } return shardCol, created, nil } func (n *createIndexNode) startExec(params runParams) error { telemetry.Inc(sqltelemetry.SchemaChangeCreateCounter("index")) _, dropped, err := n.tableDesc.FindIndexByName(string(n.n.Name)) if err == nil { if dropped { return pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, "index %q being dropped, try again later", string(n.n.Name)) } if n.n.IfNotExists { return nil } } if n.n.Concurrently { params.p.BufferClientNotice( params.ctx, pgnotice.Newf("CONCURRENTLY is not required as all indexes are created concurrently"), ) } // Warn against creating a non-partitioned index on a partitioned table, // which is undesirable in most cases. if n.n.PartitionBy == nil && n.tableDesc.PrimaryIndex.Partitioning.NumColumns > 0 { params.p.BufferClientNotice( params.ctx, errors.WithHint( pgnotice.Newf("creating non-partitioned index on partitioned table may not be performant"), "Consider modifying the index such that it is also partitioned.", ), ) } indexDesc, err := MakeIndexDescriptor(params, n.n, n.tableDesc) if err != nil { return err } // Increment the counter if this index could be storing data across multiple column families. if len(indexDesc.StoreColumnNames) > 1 && len(n.tableDesc.Families) > 1 { telemetry.Inc(sqltelemetry.SecondaryIndexColumnFamiliesCounter) } // If all nodes in the cluster know how to handle secondary indexes with column families, // write the new version into the index descriptor. encodingVersion := descpb.BaseIndexFormatVersion if params.p.EvalContext().Settings.Version.IsActive(params.ctx, clusterversion.VersionSecondaryIndexColumnFamilies) { encodingVersion = descpb.SecondaryIndexFamilyFormatVersion } indexDesc.Version = encodingVersion if n.n.PartitionBy != nil { partitioning, err := CreatePartitioning(params.ctx, params.p.ExecCfg().Settings, params.EvalContext(), n.tableDesc, indexDesc, n.n.PartitionBy) if err != nil { return err } indexDesc.Partitioning = partitioning } mutationIdx := len(n.tableDesc.Mutations) if err := n.tableDesc.AddIndexMutation(indexDesc, descpb.DescriptorMutation_ADD); err != nil { return err } if err := n.tableDesc.AllocateIDs(params.ctx); err != nil { return err } // The index name may have changed as a result of // AllocateIDs(). Retrieve it for the event log below. index := n.tableDesc.Mutations[mutationIdx].GetIndex() indexName := index.Name if n.n.Interleave != nil { if err := params.p.addInterleave(params.ctx, n.tableDesc, index, n.n.Interleave); err != nil { return err } if err := params.p.finalizeInterleave(params.ctx, n.tableDesc, index); err != nil { return err } } mutationID := n.tableDesc.ClusterVersion.NextMutationID if err := params.p.writeSchemaChange( params.ctx, n.tableDesc, mutationID, tree.AsStringWithFQNames(n.n, params.Ann()), ); err != nil { return err } // Add all newly created type back references. if err := params.p.addBackRefsFromAllTypesInTable(params.ctx, n.tableDesc); err != nil { return err } // Record index creation in the event log. This is an auditable log // event and is recorded in the same transaction as the table descriptor // update. return MakeEventLogger(params.extendedEvalCtx.ExecCfg).InsertEventRecord( params.ctx, params.p.txn, EventLogCreateIndex, int32(n.tableDesc.ID), int32(params.extendedEvalCtx.NodeID.SQLInstanceID()), struct { TableName string IndexName string Statement string User string MutationID uint32 }{ n.n.Table.FQString(), indexName, n.n.String(), params.SessionData().User, uint32(mutationID), }, ) } func (*createIndexNode) Next(runParams) (bool, error) { return false, nil } func (*createIndexNode) Values() tree.Datums { return tree.Datums{} } func (*createIndexNode) Close(context.Context) {}
pkg/sql/create_index.go
1
https://github.com/cockroachdb/cockroach/commit/e69cbee2e876f8f76799a25701060009c2575672
[ 0.9889354109764099, 0.10075762867927551, 0.00017365289386361837, 0.002049423521384597, 0.2608353793621063 ]
{ "id": 2, "code_window": [ "CREATE TABLE disabled (k INT PRIMARY KEY USING HASH WITH BUCKET_COUNT = 10)\n", "\n", "statement ok\n", "CREATE TABLE disabled_secondary (k INT, v BYTES)\n", "\n", "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting\n", "CREATE INDEX failure on disabled_secondary (k) USING HASH WITH BUCKET_COUNT = 12\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes session variable\n" ], "file_path": "pkg/sql/logictest/testdata/logic_test/hash_sharded_index", "type": "replace", "edit_start_line_idx": 382 }
// Code generated by TestPretty. DO NOT EDIT. // GENERATED FILE DO NOT EDIT 1: - SELECT a, b, c FROM x UNION ALL SELECT d, e, f FROM y
pkg/sql/sem/tree/testdata/pretty/union_all.ref.golden.short
0
https://github.com/cockroachdb/cockroach/commit/e69cbee2e876f8f76799a25701060009c2575672
[ 0.00019023494678549469, 0.00019023494678549469, 0.00019023494678549469, 0.00019023494678549469, 0 ]
{ "id": 2, "code_window": [ "CREATE TABLE disabled (k INT PRIMARY KEY USING HASH WITH BUCKET_COUNT = 10)\n", "\n", "statement ok\n", "CREATE TABLE disabled_secondary (k INT, v BYTES)\n", "\n", "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting\n", "CREATE INDEX failure on disabled_secondary (k) USING HASH WITH BUCKET_COUNT = 12\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes session variable\n" ], "file_path": "pkg/sql/logictest/testdata/logic_test/hash_sharded_index", "type": "replace", "edit_start_line_idx": 382 }
--- ncurses/base/MKlib_gen.sh 2015-08-07 00:48:24.000000000 +0000 +++ ncurses/base/MKlib_gen.sh 2017-10-03 07:12:40.873640729 +0000 @@ -72,7 +72,7 @@ # appears in gcc 5.0 and (with modification) in 5.1, making it necessary to # determine if we are using gcc, and if so, what version because the proposed # solution uses a nonstandard option. -PRG=`echo "$1" | $AWK '{ sub(/^[[:space:]]*/,""); sub(/[[:space:]].*$/, ""); print; }' || exit 0` +PRG=`echo "$1" | $AWK '{ sub(/^[ ]*/,""); sub(/[ ].*$/, ""); print; }' || exit 0` FSF=`"$PRG" --version 2>/dev/null || exit 0 | fgrep "Free Software Foundation" | head -n 1` ALL=`"$PRG" -dumpversion 2>/dev/null || exit 0` ONE=`echo "$ALL" | sed -e 's/\..*$//'`
build/builder/ncurses.patch
0
https://github.com/cockroachdb/cockroach/commit/e69cbee2e876f8f76799a25701060009c2575672
[ 0.00026201389846391976, 0.00024265996762551367, 0.0002233060367871076, 0.00024265996762551367, 0.000019353930838406086 ]
{ "id": 2, "code_window": [ "CREATE TABLE disabled (k INT PRIMARY KEY USING HASH WITH BUCKET_COUNT = 10)\n", "\n", "statement ok\n", "CREATE TABLE disabled_secondary (k INT, v BYTES)\n", "\n", "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting\n", "CREATE INDEX failure on disabled_secondary (k) USING HASH WITH BUCKET_COUNT = 12\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes session variable\n" ], "file_path": "pkg/sql/logictest/testdata/logic_test/hash_sharded_index", "type": "replace", "edit_start_line_idx": 382 }
// Copyright 2014 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package gossip import ( "context" "fmt" "math" "reflect" "sort" "sync" "testing" "time" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/gogo/protobuf/proto" ) var emptyAddr = util.MakeUnresolvedAddr("test", "<test-addr>") func newTestInfoStore() (*infoStore, *stop.Stopper) { stopper := stop.NewStopper() nc := &base.NodeIDContainer{} nc.Set(context.Background(), 1) is := newInfoStore(log.AmbientContext{Tracer: tracing.NewTracer()}, nc, emptyAddr, stopper) return is, stopper } // TestZeroDuration verifies that specifying a zero duration sets // TTLStamp to max int64. func TestZeroDuration(t *testing.T) { defer leaktest.AfterTest(t)() is, stopper := newTestInfoStore() defer stopper.Stop(context.Background()) info := is.newInfo(nil, 0) if info.TTLStamp != math.MaxInt64 { t.Errorf("expected zero duration to get max TTLStamp: %d", info.TTLStamp) } } // TestNewInfo creates new info objects. Verify sequence increments. func TestNewInfo(t *testing.T) { defer leaktest.AfterTest(t)() is, stopper := newTestInfoStore() defer stopper.Stop(context.Background()) info1 := is.newInfo(nil, time.Second) info2 := is.newInfo(nil, time.Second) if err := is.addInfo("a", info1); err != nil { t.Error(err) } if err := is.addInfo("b", info2); err != nil { t.Error(err) } if info1.OrigStamp >= info2.OrigStamp { t.Errorf("timestamps should increment %d, %d", info1.OrigStamp, info2.OrigStamp) } } // TestInfoStoreGetInfo adds an info, and makes sure it can be fetched // via getInfo. Also, verifies a non-existent info can't be fetched. func TestInfoStoreGetInfo(t *testing.T) { defer leaktest.AfterTest(t)() is, stopper := newTestInfoStore() defer stopper.Stop(context.Background()) i := is.newInfo(nil, time.Second) i.NodeID = 1 if err := is.addInfo("a", i); err != nil { t.Error(err) } if infoCount := len(is.Infos); infoCount != 1 { t.Errorf("infostore count incorrect %d != 1", infoCount) } if is.highWaterStamps[1] != i.OrigStamp { t.Error("high water timestamps map wasn't updated") } if is.getInfo("a") != i { t.Error("unable to get info") } if is.getInfo("b") != nil { t.Error("erroneously produced non-existent info for key b") } } // Verify TTL is respected on info fetched by key. func TestInfoStoreGetInfoTTL(t *testing.T) { defer leaktest.AfterTest(t)() is, stopper := newTestInfoStore() defer stopper.Stop(context.Background()) i := is.newInfo(nil, time.Nanosecond) if err := is.addInfo("a", i); err != nil { t.Error(err) } time.Sleep(time.Nanosecond) if info := is.getInfo("a"); info != nil { t.Errorf("shouldn't be able to get info with short TTL, got %+v", info) } } // Add infos using same key, same and lesser timestamp; verify no // replacement. func TestAddInfoSameKeyLessThanEqualTimestamp(t *testing.T) { defer leaktest.AfterTest(t)() is, stopper := newTestInfoStore() defer stopper.Stop(context.Background()) info1 := is.newInfo(nil, time.Second) if err := is.addInfo("a", info1); err != nil { t.Error(err) } info2 := is.newInfo(nil, time.Second) info2.Value.Timestamp.WallTime = info1.Value.Timestamp.WallTime if err := is.addInfo("a", info2); err == nil { t.Error("able to add info2 with same timestamp") } info2.Value.Timestamp.WallTime-- if err := is.addInfo("a", info2); err == nil { t.Error("able to add info2 with lesser timestamp") } // Verify info2 did not replace info1. if is.getInfo("a") != info1 { t.Error("info1 was replaced, despite same timestamp") } } // Add infos using same key, same timestamp; verify no replacement. func TestAddInfoSameKeyGreaterTimestamp(t *testing.T) { defer leaktest.AfterTest(t)() is, stopper := newTestInfoStore() defer stopper.Stop(context.Background()) info1 := is.newInfo(nil, time.Second) info2 := is.newInfo(nil, time.Second) if err1, err2 := is.addInfo("a", info1), is.addInfo("a", info2); err1 != nil || err2 != nil { t.Error(err1, err2) } } // Verify that adding two infos with different hops but same keys // always chooses the minimum hops. func TestAddInfoSameKeyDifferentHops(t *testing.T) { defer leaktest.AfterTest(t)() is, stopper := newTestInfoStore() defer stopper.Stop(context.Background()) info1 := is.newInfo(nil, time.Second) info1.Hops = 1 info2 := is.newInfo(nil, time.Second) info2.Value.Timestamp.WallTime = info1.Value.Timestamp.WallTime info2.Hops = 2 if err := is.addInfo("a", info1); err != nil { t.Errorf("failed insert: %s", err) } if err := is.addInfo("a", info2); err == nil { t.Errorf("shouldn't have inserted info 2: %s", err) } i := is.getInfo("a") if i.Hops != info1.Hops || !proto.Equal(i, info1) { t.Error("failed to properly combine hops and value", i) } // Try yet another info, with lower hops yet (0). info3 := is.newInfo(nil, time.Second) if err := is.addInfo("a", info3); err != nil { t.Error(err) } i = is.getInfo("a") if i.Hops != info3.Hops || !proto.Equal(i, info3) { t.Error("failed to properly combine hops and value", i) } } func TestCombineInfosRatchetMonotonic(t *testing.T) { defer leaktest.AfterTest(t)() for _, local := range []bool{true, false} { t.Run(fmt.Sprintf("local=%t", local), func(t *testing.T) { is, stopper := newTestInfoStore() defer stopper.Stop(context.Background()) // Generate an info with a timestamp in the future. info := &Info{ NodeID: is.nodeID.Get(), TTLStamp: math.MaxInt64, OrigStamp: monotonicUnixNano() + int64(time.Hour), } if !local { info.NodeID++ } // Reset the monotonic clock. monoTime.Lock() monoTime.last = 0 monoTime.Unlock() fresh, err := is.combine(map[string]*Info{"hello": info}, 2) if err != nil { t.Fatal(err) } if fresh != 1 { t.Fatalf("expected no infos to be added, but found %d", fresh) } // Verify the monotonic clock was ratcheted if the info was generated // locally. monoTime.Lock() last := monoTime.last monoTime.Unlock() var expectedLast int64 if local { expectedLast = info.OrigStamp if now := monotonicUnixNano(); now <= last { t.Fatalf("expected mono-time to increase: %d <= %d", now, last) } } if expectedLast != last { t.Fatalf("expected mono-time %d, but found %d", expectedLast, last) } if i := is.getInfo("hello"); i == nil { t.Fatalf("expected to find info\n%v", is.Infos) } }) } } // Helper method creates an infostore with 10 infos. func createTestInfoStore(t *testing.T) *infoStore { is, stopper := newTestInfoStore() defer stopper.Stop(context.Background()) for i := 0; i < 10; i++ { infoA := is.newInfo(nil, time.Second) infoA.NodeID = 1 infoA.Hops = 1 if err := is.addInfo(fmt.Sprintf("a.%d", i), infoA); err != nil { t.Fatal(err) } infoB := is.newInfo(nil, time.Second) infoB.NodeID = 2 infoB.Hops = 2 if err := is.addInfo(fmt.Sprintf("b.%d", i), infoB); err != nil { t.Fatal(err) } infoC := is.newInfo(nil, time.Second) infoC.NodeID = 3 infoC.Hops = 3 if err := is.addInfo(fmt.Sprintf("c.%d", i), infoC); err != nil { t.Fatal(err) } } return is } // Check infostore delta based on info high water timestamps. func TestInfoStoreDelta(t *testing.T) { defer leaktest.AfterTest(t)() is := createTestInfoStore(t) // Verify deltas with successive high water timestamps & min hops. infos := is.delta(map[roachpb.NodeID]int64{}) for i := 0; i < 10; i++ { if i > 0 { infoA := is.getInfo(fmt.Sprintf("a.%d", i-1)) infoB := is.getInfo(fmt.Sprintf("b.%d", i-1)) infoC := is.getInfo(fmt.Sprintf("c.%d", i-1)) infos = is.delta(map[roachpb.NodeID]int64{ 1: infoA.OrigStamp, 2: infoB.OrigStamp, 3: infoC.OrigStamp, }) } for _, node := range []string{"a", "b", "c"} { for j := 0; j < 10; j++ { expected := i <= j if _, ok := infos[fmt.Sprintf("%s.%d", node, j)]; ok != expected { t.Errorf("i,j=%d,%d: expected to fetch info %s.%d? %t; got %t", i, j, node, j, expected, ok) } } } } if infos := is.delta(map[roachpb.NodeID]int64{ 1: math.MaxInt64, 2: math.MaxInt64, 3: math.MaxInt64, }); len(infos) != 0 { t.Errorf("fetching delta of infostore at maximum timestamp should return empty, got %v", infos) } } // TestInfoStoreMostDistant verifies selection of most distant node & // associated hops. func TestInfoStoreMostDistant(t *testing.T) { defer leaktest.AfterTest(t)() nodes := []roachpb.NodeID{ roachpb.NodeID(1), roachpb.NodeID(2), roachpb.NodeID(3), } is, stopper := newTestInfoStore() defer stopper.Stop(context.Background()) // Start with one very distant info that shouldn't affect mostDistant // calculations because it isn't a node ID key. scInfo := is.newInfo(nil, time.Second) scInfo.Hops = 100 scInfo.NodeID = nodes[0] if err := is.addInfo(KeySystemConfig, scInfo); err != nil { t.Fatal(err) } // Add info from each address, with hop count equal to index+1. var expectedNodeID roachpb.NodeID var expectedHops uint32 for i := 0; i < len(nodes); i++ { inf := is.newInfo(nil, time.Second) inf.Hops = uint32(i + 1) inf.NodeID = nodes[i] if err := is.addInfo(MakeNodeIDKey(inf.NodeID), inf); err != nil { t.Fatal(err) } if inf.NodeID != 1 { expectedNodeID = inf.NodeID expectedHops = inf.Hops } nodeID, hops := is.mostDistant(func(roachpb.NodeID) bool { return false }) if expectedNodeID != nodeID { t.Errorf("%d: expected n%d; got %d", i, expectedNodeID, nodeID) } if expectedHops != hops { t.Errorf("%d: expected hops %d; got %d", i, expectedHops, hops) } } // Finally, simulate a Gossip instance that has an outgoing connection // and expect the outgoing connection to not be recommended even though // it's the furthest node away. filteredNode := nodes[len(nodes)-1] expectedNode := nodes[len(nodes)-2] expectedHops = uint32(expectedNode) nodeID, hops := is.mostDistant(func(nodeID roachpb.NodeID) bool { return nodeID == filteredNode }) if nodeID != expectedNode { t.Errorf("expected n%d; got %d", expectedNode, nodeID) } if hops != expectedHops { t.Errorf("expected hops %d; got %d", expectedHops, hops) } } // TestLeastUseful verifies that the least-contributing peer node // can be determined. func TestLeastUseful(t *testing.T) { defer leaktest.AfterTest(t)() nodes := []roachpb.NodeID{ roachpb.NodeID(1), roachpb.NodeID(2), } is, stopper := newTestInfoStore() defer stopper.Stop(context.Background()) set := makeNodeSet(3, metric.NewGauge(metric.Metadata{Name: ""})) if is.leastUseful(set) != 0 { t.Error("not expecting a node from an empty set") } inf1 := is.newInfo(nil, time.Second) inf1.NodeID = 1 inf1.PeerID = 1 if err := is.addInfo("a1", inf1); err != nil { t.Fatal(err) } if is.leastUseful(set) != 0 { t.Error("not expecting a node from an empty set") } set.addNode(nodes[0]) if is.leastUseful(set) != nodes[0] { t.Error("expecting nodes[0] as least useful") } inf2 := is.newInfo(nil, time.Second) inf2.NodeID = 2 inf2.PeerID = 1 if err := is.addInfo("a2", inf2); err != nil { t.Fatal(err) } if is.leastUseful(set) != nodes[0] { t.Error("expecting nodes[0] as least useful") } set.addNode(nodes[1]) if is.leastUseful(set) != nodes[1] { t.Error("expecting nodes[1] as least useful") } inf3 := is.newInfo(nil, time.Second) inf3.NodeID = 2 inf3.PeerID = 2 if err := is.addInfo("a3", inf3); err != nil { t.Fatal(err) } if is.leastUseful(set) != nodes[1] { t.Error("expecting nodes[1] as least useful") } } type callbackRecord struct { keys []string wg *sync.WaitGroup syncutil.Mutex } func (cr *callbackRecord) Add(key string, _ roachpb.Value) { cr.Lock() defer cr.Unlock() cr.keys = append(cr.keys, key) cr.wg.Done() } func (cr *callbackRecord) Keys() []string { cr.Lock() defer cr.Unlock() return append([]string(nil), cr.keys...) } func TestCallbacks(t *testing.T) { defer leaktest.AfterTest(t)() is, stopper := newTestInfoStore() defer stopper.Stop(context.Background()) wg := &sync.WaitGroup{} cb1 := callbackRecord{wg: wg} cb2 := callbackRecord{wg: wg} cbAll := callbackRecord{wg: wg} unregisterCB1 := is.registerCallback("key1", cb1.Add) is.registerCallback("key2", cb2.Add) is.registerCallback("key.*", cbAll.Add, Redundant) i1 := is.newInfo(nil, time.Second) i2 := is.newInfo(nil, time.Second) i3 := is.newInfo(nil, time.Second) // Add infos twice and verify callbacks aren't called for same timestamps. for i := 0; i < 2; i++ { for _, test := range []struct { key string info *Info count int }{ {"key1", i1, 2}, {"key2", i2, 2}, {"key3", i3, 1}, } { if i == 0 { wg.Add(test.count) } if err := is.addInfo(test.key, test.info); err != nil { if i == 0 { t.Error(err) } } else if i != 0 { t.Errorf("expected error on run #%d, but didn't get one", i) } wg.Wait() } if expKeys := []string{"key1"}; !reflect.DeepEqual(cb1.Keys(), expKeys) { t.Errorf("expected %v, got %v", expKeys, cb1.Keys()) } if expKeys := []string{"key2"}; !reflect.DeepEqual(cb2.Keys(), expKeys) { t.Errorf("expected %v, got %v", expKeys, cb2.Keys()) } keys := cbAll.Keys() if expKeys := []string{"key1", "key2", "key3"}; !reflect.DeepEqual(keys, expKeys) { t.Errorf("expected %v, got %v", expKeys, keys) } } // Update an info twice. for i := 0; i < 2; i++ { i1 := is.newInfo([]byte("a"), time.Second) // The first time both callbacks will fire because the value has // changed. The second time cbAll (created with the Redundant option) will // fire. wg.Add(2 - i) if err := is.addInfo("key1", i1); err != nil { t.Error(err) } wg.Wait() if expKeys := []string{"key1", "key1"}; !reflect.DeepEqual(cb1.Keys(), expKeys) { t.Errorf("expected %v, got %v", expKeys, cb1.Keys()) } if expKeys := []string{"key2"}; !reflect.DeepEqual(cb2.Keys(), expKeys) { t.Errorf("expected %v, got %v", expKeys, cb2.Keys()) } } if expKeys := []string{"key1", "key2", "key3", "key1", "key1"}; !reflect.DeepEqual(cbAll.Keys(), expKeys) { t.Errorf("expected %v, got %v", expKeys, cbAll.Keys()) } const numInfos = 3 // Register another callback with same pattern and verify it is // invoked for all three keys. wg.Add(numInfos) is.registerCallback("key.*", cbAll.Add) wg.Wait() expKeys := []string{"key1", "key2", "key3"} keys := cbAll.Keys() keys = keys[len(keys)-numInfos:] sort.Strings(keys) if !reflect.DeepEqual(keys, expKeys) { t.Errorf("expected %v, got %v", expKeys, keys) } // Unregister a callback and verify nothing is invoked on it. unregisterCB1() iNew := is.newInfo([]byte("b"), time.Second) wg.Add(2) // for the two cbAll callbacks if err := is.addInfo("key1", iNew); err != nil { t.Error(err) } wg.Wait() if len(cb1.Keys()) != 2 { t.Errorf("expected no new cb1 keys, got %v", cb1.Keys()) } } // TestRegisterCallback verifies that a callback is invoked when // registered if there are items which match its regexp in the // infostore. func TestRegisterCallback(t *testing.T) { defer leaktest.AfterTest(t)() is, stopper := newTestInfoStore() defer stopper.Stop(context.Background()) wg := &sync.WaitGroup{} cb := callbackRecord{wg: wg} i1 := is.newInfo(nil, time.Second) i2 := is.newInfo(nil, time.Second) if err := is.addInfo("key1", i1); err != nil { t.Fatal(err) } if err := is.addInfo("key2", i2); err != nil { t.Fatal(err) } wg.Add(2) is.registerCallback("key.*", cb.Add) wg.Wait() actKeys := cb.Keys() sort.Strings(actKeys) if expKeys := []string{"key1", "key2"}; !reflect.DeepEqual(actKeys, expKeys) { t.Errorf("expected %v, got %v", expKeys, cb.Keys()) } }
pkg/gossip/infostore_test.go
0
https://github.com/cockroachdb/cockroach/commit/e69cbee2e876f8f76799a25701060009c2575672
[ 0.012627516873180866, 0.0005332371802069247, 0.0001736808626446873, 0.00022994671599008143, 0.0016262446297332644 ]
{ "id": 3, "code_window": [ "CREATE INDEX failure on disabled_secondary (k) USING HASH WITH BUCKET_COUNT = 12\n", "\n", "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting\n", "CREATE TABLE disabled (k INT, INDEX (k) USING HASH WITH BUCKET_COUNT = 10)\n", "\n", "# Ensure everything works with weird column names\n", "statement ok\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes session variable\n" ], "file_path": "pkg/sql/logictest/testdata/logic_test/hash_sharded_index", "type": "replace", "edit_start_line_idx": 385 }
// Copyright 2017 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package sql import ( "context" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/geo/geoindex" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/paramparse" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/schemaexpr" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/errors" ) type createIndexNode struct { n *tree.CreateIndex tableDesc *tabledesc.Mutable } // CreateIndex creates an index. // Privileges: CREATE on table. // notes: postgres requires CREATE on the table. // mysql requires INDEX on the table. func (p *planner) CreateIndex(ctx context.Context, n *tree.CreateIndex) (planNode, error) { tableDesc, err := p.ResolveMutableTableDescriptor( ctx, &n.Table, true /*required*/, tree.ResolveRequireTableOrViewDesc, ) if err != nil { return nil, err } if tableDesc.IsView() && !tableDesc.MaterializedView() { return nil, pgerror.Newf(pgcode.WrongObjectType, "%q is not a table or materialized view", tableDesc.Name) } if tableDesc.MaterializedView() { if n.Interleave != nil { return nil, pgerror.New(pgcode.InvalidObjectDefinition, "cannot create interleaved index on materialized view") } if n.Sharded != nil { return nil, pgerror.New(pgcode.InvalidObjectDefinition, "cannot create hash sharded index on materialized view") } } if err := p.CheckPrivilege(ctx, tableDesc, privilege.CREATE); err != nil { return nil, err } return &createIndexNode{tableDesc: tableDesc, n: n}, nil } // setupFamilyAndConstraintForShard adds a newly-created shard column into its appropriate // family (see comment above GetColumnFamilyForShard) and adds a check constraint ensuring // that the shard column's value is within [0..ShardBuckets-1]. This method is called when // a `CREATE INDEX` statement is issued for the creation of a sharded index that *does // not* re-use a pre-existing shard column. func (p *planner) setupFamilyAndConstraintForShard( ctx context.Context, tableDesc *tabledesc.Mutable, shardCol *descpb.ColumnDescriptor, idxColumns []string, buckets int32, ) error { family := tabledesc.GetColumnFamilyForShard(tableDesc, idxColumns) if family == "" { return errors.AssertionFailedf("could not find column family for the first column in the index column set") } // Assign shard column to the family of the first column in its index set, and do it // before `AllocateIDs()` assigns it to the primary column family. if err := tableDesc.AddColumnToFamilyMaybeCreate(shardCol.Name, family, false, false); err != nil { return err } // Assign an ID to the newly-added shard column, which is needed for the creation // of a valid check constraint. if err := tableDesc.AllocateIDs(ctx); err != nil { return err } ckDef, err := makeShardCheckConstraintDef(tableDesc, int(buckets), shardCol) if err != nil { return err } info, err := tableDesc.GetConstraintInfo(ctx, nil) if err != nil { return err } inuseNames := make(map[string]struct{}, len(info)) for k := range info { inuseNames[k] = struct{}{} } ckBuilder := schemaexpr.MakeCheckConstraintBuilder(ctx, p.tableName, tableDesc, &p.semaCtx) ckName, err := ckBuilder.DefaultName(ckDef.Expr) if err != nil { return err } // Avoid creating duplicate check constraints. if _, ok := inuseNames[ckName]; !ok { ck, err := ckBuilder.Build(ckDef) if err != nil { return err } ck.Validity = descpb.ConstraintValidity_Validating tableDesc.AddCheckMutation(ck, descpb.DescriptorMutation_ADD) } return nil } // MakeIndexDescriptor creates an index descriptor from a CreateIndex node and optionally // adds a hidden computed shard column (along with its check constraint) in case the index // is hash sharded. Note that `tableDesc` will be modified when this method is called for // a hash sharded index. func MakeIndexDescriptor( params runParams, n *tree.CreateIndex, tableDesc *tabledesc.Mutable, ) (*descpb.IndexDescriptor, error) { // Ensure that the columns we want to index exist before trying to create the // index. if err := validateIndexColumnsExist(tableDesc, n.Columns); err != nil { return nil, err } // Ensure that the index name does not exist before trying to create the index. if err := tableDesc.ValidateIndexNameIsUnique(string(n.Name)); err != nil { return nil, err } indexDesc := descpb.IndexDescriptor{ Name: string(n.Name), Unique: n.Unique, StoreColumnNames: n.Storing.ToStrings(), CreatedExplicitly: true, } if n.Inverted { if n.Interleave != nil { return nil, pgerror.New(pgcode.InvalidSQLStatementName, "inverted indexes don't support interleaved tables") } if n.PartitionBy != nil { return nil, pgerror.New(pgcode.InvalidSQLStatementName, "inverted indexes don't support partitioning") } if n.Sharded != nil { return nil, pgerror.New(pgcode.InvalidSQLStatementName, "inverted indexes don't support hash sharding") } if len(indexDesc.StoreColumnNames) > 0 { return nil, pgerror.New(pgcode.InvalidSQLStatementName, "inverted indexes don't support stored columns") } if n.Unique { return nil, pgerror.New(pgcode.InvalidSQLStatementName, "inverted indexes can't be unique") } indexDesc.Type = descpb.IndexDescriptor_INVERTED columnDesc, _, err := tableDesc.FindColumnByName(n.Columns[0].Column) if err != nil { return nil, err } switch columnDesc.Type.Family() { case types.GeometryFamily: config, err := geoindex.GeometryIndexConfigForSRID(columnDesc.Type.GeoSRIDOrZero()) if err != nil { return nil, err } indexDesc.GeoConfig = *config telemetry.Inc(sqltelemetry.GeometryInvertedIndexCounter) case types.GeographyFamily: indexDesc.GeoConfig = *geoindex.DefaultGeographyIndexConfig() telemetry.Inc(sqltelemetry.GeographyInvertedIndexCounter) } telemetry.Inc(sqltelemetry.InvertedIndexCounter) } if n.Sharded != nil { if n.PartitionBy != nil { return nil, pgerror.New(pgcode.FeatureNotSupported, "sharded indexes don't support partitioning") } if n.Interleave != nil { return nil, pgerror.New(pgcode.FeatureNotSupported, "interleaved indexes cannot also be hash sharded") } shardCol, newColumn, err := setupShardedIndex( params.ctx, params.EvalContext(), &params.p.semaCtx, params.SessionData().HashShardedIndexesEnabled, &n.Columns, n.Sharded.ShardBuckets, tableDesc, &indexDesc, false /* isNewTable */) if err != nil { return nil, err } if newColumn { if err := params.p.setupFamilyAndConstraintForShard(params.ctx, tableDesc, shardCol, indexDesc.Sharded.ColumnNames, indexDesc.Sharded.ShardBuckets); err != nil { return nil, err } } telemetry.Inc(sqltelemetry.HashShardedIndexCounter) } if n.Predicate != nil { idxValidator := schemaexpr.MakeIndexPredicateValidator(params.ctx, n.Table, tableDesc, &params.p.semaCtx) expr, err := idxValidator.Validate(n.Predicate) if err != nil { return nil, err } indexDesc.Predicate = expr telemetry.Inc(sqltelemetry.PartialIndexCounter) } if err := indexDesc.FillColumns(n.Columns); err != nil { return nil, err } if err := paramparse.ApplyStorageParameters( params.ctx, params.p.SemaCtx(), params.EvalContext(), n.StorageParams, &paramparse.IndexStorageParamObserver{IndexDesc: &indexDesc}, ); err != nil { return nil, err } return &indexDesc, nil } // validateIndexColumnsExists validates that the columns for an index exist // in the table and are not being dropped prior to attempting to add the index. func validateIndexColumnsExist(desc *tabledesc.Mutable, columns tree.IndexElemList) error { for _, column := range columns { _, dropping, err := desc.FindColumnByName(column.Column) if err != nil { return err } if dropping { return colinfo.NewUndefinedColumnError(string(column.Column)) } } return nil } // ReadingOwnWrites implements the planNodeReadingOwnWrites interface. // This is because CREATE INDEX performs multiple KV operations on descriptors // and expects to see its own writes. func (n *createIndexNode) ReadingOwnWrites() {} var invalidClusterForShardedIndexError = pgerror.Newf(pgcode.FeatureNotSupported, "hash sharded indexes can only be created on a cluster that has fully migrated to version 20.1") var hashShardedIndexesDisabledError = pgerror.Newf(pgcode.FeatureNotSupported, "hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting") func setupShardedIndex( ctx context.Context, evalCtx *tree.EvalContext, semaCtx *tree.SemaContext, shardedIndexEnabled bool, columns *tree.IndexElemList, bucketsExpr tree.Expr, tableDesc *tabledesc.Mutable, indexDesc *descpb.IndexDescriptor, isNewTable bool, ) (shard *descpb.ColumnDescriptor, newColumn bool, err error) { st := evalCtx.Settings if !st.Version.IsActive(ctx, clusterversion.VersionHashShardedIndexes) { return nil, false, invalidClusterForShardedIndexError } if !shardedIndexEnabled { return nil, false, hashShardedIndexesDisabledError } colNames := make([]string, 0, len(*columns)) for _, c := range *columns { colNames = append(colNames, string(c.Column)) } buckets, err := tabledesc.EvalShardBucketCount(ctx, semaCtx, evalCtx, bucketsExpr) if err != nil { return nil, false, err } shardCol, newColumn, err := maybeCreateAndAddShardCol(int(buckets), tableDesc, colNames, isNewTable) if err != nil { return nil, false, err } shardIdxElem := tree.IndexElem{ Column: tree.Name(shardCol.Name), Direction: tree.Ascending, } *columns = append(tree.IndexElemList{shardIdxElem}, *columns...) indexDesc.Sharded = descpb.ShardedDescriptor{ IsSharded: true, Name: shardCol.Name, ShardBuckets: buckets, ColumnNames: colNames, } return shardCol, newColumn, nil } // maybeCreateAndAddShardCol adds a new hidden computed shard column (or its mutation) to // `desc`, if one doesn't already exist for the given index column set and number of shard // buckets. func maybeCreateAndAddShardCol( shardBuckets int, desc *tabledesc.Mutable, colNames []string, isNewTable bool, ) (col *descpb.ColumnDescriptor, created bool, err error) { shardCol, err := makeShardColumnDesc(colNames, shardBuckets) if err != nil { return nil, false, err } existingShardCol, dropped, err := desc.FindColumnByName(tree.Name(shardCol.Name)) if err == nil && !dropped { // TODO(ajwerner): In what ways is existingShardCol allowed to differ from // the newly made shardCol? Should there be some validation of // existingShardCol? if !existingShardCol.Hidden { // The user managed to reverse-engineer our crazy shard column name, so // we'll return an error here rather than try to be tricky. return nil, false, pgerror.Newf(pgcode.DuplicateColumn, "column %s already specified; can't be used for sharding", shardCol.Name) } return existingShardCol, false, nil } columnIsUndefined := sqlerrors.IsUndefinedColumnError(err) if err != nil && !columnIsUndefined { return nil, false, err } if columnIsUndefined || dropped { if isNewTable { desc.AddColumn(shardCol) } else { desc.AddColumnMutation(shardCol, descpb.DescriptorMutation_ADD) } created = true } return shardCol, created, nil } func (n *createIndexNode) startExec(params runParams) error { telemetry.Inc(sqltelemetry.SchemaChangeCreateCounter("index")) _, dropped, err := n.tableDesc.FindIndexByName(string(n.n.Name)) if err == nil { if dropped { return pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, "index %q being dropped, try again later", string(n.n.Name)) } if n.n.IfNotExists { return nil } } if n.n.Concurrently { params.p.BufferClientNotice( params.ctx, pgnotice.Newf("CONCURRENTLY is not required as all indexes are created concurrently"), ) } // Warn against creating a non-partitioned index on a partitioned table, // which is undesirable in most cases. if n.n.PartitionBy == nil && n.tableDesc.PrimaryIndex.Partitioning.NumColumns > 0 { params.p.BufferClientNotice( params.ctx, errors.WithHint( pgnotice.Newf("creating non-partitioned index on partitioned table may not be performant"), "Consider modifying the index such that it is also partitioned.", ), ) } indexDesc, err := MakeIndexDescriptor(params, n.n, n.tableDesc) if err != nil { return err } // Increment the counter if this index could be storing data across multiple column families. if len(indexDesc.StoreColumnNames) > 1 && len(n.tableDesc.Families) > 1 { telemetry.Inc(sqltelemetry.SecondaryIndexColumnFamiliesCounter) } // If all nodes in the cluster know how to handle secondary indexes with column families, // write the new version into the index descriptor. encodingVersion := descpb.BaseIndexFormatVersion if params.p.EvalContext().Settings.Version.IsActive(params.ctx, clusterversion.VersionSecondaryIndexColumnFamilies) { encodingVersion = descpb.SecondaryIndexFamilyFormatVersion } indexDesc.Version = encodingVersion if n.n.PartitionBy != nil { partitioning, err := CreatePartitioning(params.ctx, params.p.ExecCfg().Settings, params.EvalContext(), n.tableDesc, indexDesc, n.n.PartitionBy) if err != nil { return err } indexDesc.Partitioning = partitioning } mutationIdx := len(n.tableDesc.Mutations) if err := n.tableDesc.AddIndexMutation(indexDesc, descpb.DescriptorMutation_ADD); err != nil { return err } if err := n.tableDesc.AllocateIDs(params.ctx); err != nil { return err } // The index name may have changed as a result of // AllocateIDs(). Retrieve it for the event log below. index := n.tableDesc.Mutations[mutationIdx].GetIndex() indexName := index.Name if n.n.Interleave != nil { if err := params.p.addInterleave(params.ctx, n.tableDesc, index, n.n.Interleave); err != nil { return err } if err := params.p.finalizeInterleave(params.ctx, n.tableDesc, index); err != nil { return err } } mutationID := n.tableDesc.ClusterVersion.NextMutationID if err := params.p.writeSchemaChange( params.ctx, n.tableDesc, mutationID, tree.AsStringWithFQNames(n.n, params.Ann()), ); err != nil { return err } // Add all newly created type back references. if err := params.p.addBackRefsFromAllTypesInTable(params.ctx, n.tableDesc); err != nil { return err } // Record index creation in the event log. This is an auditable log // event and is recorded in the same transaction as the table descriptor // update. return MakeEventLogger(params.extendedEvalCtx.ExecCfg).InsertEventRecord( params.ctx, params.p.txn, EventLogCreateIndex, int32(n.tableDesc.ID), int32(params.extendedEvalCtx.NodeID.SQLInstanceID()), struct { TableName string IndexName string Statement string User string MutationID uint32 }{ n.n.Table.FQString(), indexName, n.n.String(), params.SessionData().User, uint32(mutationID), }, ) } func (*createIndexNode) Next(runParams) (bool, error) { return false, nil } func (*createIndexNode) Values() tree.Datums { return tree.Datums{} } func (*createIndexNode) Close(context.Context) {}
pkg/sql/create_index.go
1
https://github.com/cockroachdb/cockroach/commit/e69cbee2e876f8f76799a25701060009c2575672
[ 0.9885657429695129, 0.07689983397722244, 0.0001735772384563461, 0.0018082167953252792, 0.23283937573432922 ]
{ "id": 3, "code_window": [ "CREATE INDEX failure on disabled_secondary (k) USING HASH WITH BUCKET_COUNT = 12\n", "\n", "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting\n", "CREATE TABLE disabled (k INT, INDEX (k) USING HASH WITH BUCKET_COUNT = 10)\n", "\n", "# Ensure everything works with weird column names\n", "statement ok\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes session variable\n" ], "file_path": "pkg/sql/logictest/testdata/logic_test/hash_sharded_index", "type": "replace", "edit_start_line_idx": 385 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package pgdate import ( "fmt" "math" "strings" "testing" "time" ) func TestParseDate(t *testing.T) { for _, tc := range []struct { s string err string pgdays int32 }{ { s: "2000-01-01", pgdays: 0, }, { s: "1999-12-31", pgdays: -1, }, { s: "2000-01-02", pgdays: 1, }, { s: "0001-01-01", pgdays: -730119, }, { s: "0001-12-31 BC", pgdays: -730120, }, { s: "0002-01-01 BC", pgdays: -730850, }, { s: "5874897-12-31", pgdays: highDays, }, { s: "4714-11-24 BC", pgdays: lowDays, }, { s: "4714-11-23 BC", err: "date is out of range", }, { s: "5874898-01-01", err: "date is out of range", }, { s: "0000-01-01", err: "year value 0 is out of range", }, } { t.Run(tc.s, func(t *testing.T) { d, depOnCtx, err := ParseDate(time.Time{}, ParseModeYMD, tc.s) if tc.err != "" { if err == nil || !strings.Contains(err.Error(), tc.err) { t.Fatalf("got %v, expected %v", err, tc.err) } return } if depOnCtx { t.Fatalf("should not depend on context") } pg := d.PGEpochDays() if pg != tc.pgdays { t.Fatalf("%d != %d", pg, tc.pgdays) } s := d.String() if s != tc.s { t.Fatalf("%s != %s", s, tc.s) } }) } } func TestMakeCompatibleDateFromDisk(t *testing.T) { for _, tc := range []struct { in, out int64 }{ {0, 0}, {1, 1}, {-1, -1}, {math.MaxInt64, math.MaxInt64}, {math.MinInt64, math.MinInt64}, {math.MaxInt32, math.MaxInt64}, {math.MinInt32, math.MinInt64}, } { t.Run(fmt.Sprint(tc.in), func(t *testing.T) { date := MakeCompatibleDateFromDisk(tc.in) orig := date.UnixEpochDaysWithOrig() if orig != tc.in { t.Fatalf("%d != %d", orig, tc.in) } days := date.UnixEpochDays() if days != tc.out { t.Fatalf("%d != %d", days, tc.out) } }) } } func TestMakeDateFromTime(t *testing.T) { pgEpoch := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) pgEpochWithHourOffset := time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC) // These dates are negative, which makes rounding a little different. dayBeforeUnixEpoch := time.Date(1969, 12, 31, 0, 0, 0, 0, time.UTC) dayBeforeUnixEpochWithHourOffset := time.Date(1969, 12, 31, 1, 0, 0, 0, time.UTC) twoDaysBeforeUnixEpoch := time.Date(1969, 12, 30, 0, 0, 0, 0, time.UTC) twoDaysBeforeUnixEpochWithHourOffset := time.Date(1969, 12, 30, 1, 0, 0, 0, time.UTC) for _, tc := range []struct { in time.Time out string }{ {pgEpoch.In(time.FixedZone("secsPerDay", secondsPerDay)), "2000-01-02"}, {pgEpoch.In(time.FixedZone("secsPerDay-1", secondsPerDay-1)), "2000-01-01"}, {pgEpoch.In(time.FixedZone("1", 1)), "2000-01-01"}, {pgEpoch, "2000-01-01"}, {pgEpoch.In(time.FixedZone("-1", -1)), "1999-12-31"}, {pgEpoch.In(time.FixedZone("-secsPerDay", -secondsPerDay)), "1999-12-31"}, {pgEpochWithHourOffset, "2000-01-01"}, {dayBeforeUnixEpoch, "1969-12-31"}, {dayBeforeUnixEpochWithHourOffset, "1969-12-31"}, {twoDaysBeforeUnixEpoch, "1969-12-30"}, {twoDaysBeforeUnixEpochWithHourOffset, "1969-12-30"}, } { t.Run(tc.in.Format(time.RFC3339), func(t *testing.T) { d, err := MakeDateFromTime(tc.in) if err != nil { t.Fatal(err) } exp := tc.in.Format("2006-01-02") // Sanity check our tests. if exp != tc.out { t.Fatalf("got %s, expected %s", exp, tc.out) } s := d.String() if exp != s { t.Fatalf("got %s, expected %s", s, exp) } }) } }
pkg/util/timeutil/pgdate/pgdate_test.go
0
https://github.com/cockroachdb/cockroach/commit/e69cbee2e876f8f76799a25701060009c2575672
[ 0.0009759305394254625, 0.00024443218717351556, 0.00016883150965441018, 0.00018228715634904802, 0.0001867627870524302 ]
{ "id": 3, "code_window": [ "CREATE INDEX failure on disabled_secondary (k) USING HASH WITH BUCKET_COUNT = 12\n", "\n", "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting\n", "CREATE TABLE disabled (k INT, INDEX (k) USING HASH WITH BUCKET_COUNT = 10)\n", "\n", "# Ensure everything works with weird column names\n", "statement ok\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes session variable\n" ], "file_path": "pkg/sql/logictest/testdata/logic_test/hash_sharded_index", "type": "replace", "edit_start_line_idx": 385 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colexec import ( "context" "testing" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldatatestutils" "github.com/cockroachdb/cockroach/pkg/sql/colexecbase" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/randutil" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) func TestSerialUnorderedSynchronizer(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() rng, _ := randutil.NewPseudoRand() const numInputs = 3 const numBatches = 4 typs := []*types.T{types.Int} inputs := make([]SynchronizerInput, numInputs) for i := range inputs { batch := coldatatestutils.RandomBatch(testAllocator, rng, typs, coldata.BatchSize(), 0 /* length */, rng.Float64()) source := colexecbase.NewRepeatableBatchSource(testAllocator, batch, typs) source.ResetBatchesToReturn(numBatches) inputIdx := i inputs[i] = SynchronizerInput{ Op: source, MetadataSources: []execinfrapb.MetadataSource{ execinfrapb.CallbackMetadataSource{ DrainMetaCb: func(_ context.Context) []execinfrapb.ProducerMetadata { return []execinfrapb.ProducerMetadata{{Err: errors.Errorf("input %d test-induced metadata", inputIdx)}} }, }, }, } } s := NewSerialUnorderedSynchronizer(inputs) resultBatches := 0 for { b := s.Next(ctx) if b.Length() == 0 { require.Equal(t, len(inputs), len(s.DrainMeta(ctx))) break } resultBatches++ } require.Equal(t, numInputs*numBatches, resultBatches) }
pkg/sql/colexec/serial_unordered_synchronizer_test.go
0
https://github.com/cockroachdb/cockroach/commit/e69cbee2e876f8f76799a25701060009c2575672
[ 0.00019858691666740924, 0.00018124592315871269, 0.00016848414088599384, 0.00017558930267114192, 0.00001003333818516694 ]
{ "id": 3, "code_window": [ "CREATE INDEX failure on disabled_secondary (k) USING HASH WITH BUCKET_COUNT = 12\n", "\n", "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes cluster setting\n", "CREATE TABLE disabled (k INT, INDEX (k) USING HASH WITH BUCKET_COUNT = 10)\n", "\n", "# Ensure everything works with weird column names\n", "statement ok\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "statement error pq: hash sharded indexes require the experimental_enable_hash_sharded_indexes session variable\n" ], "file_path": "pkg/sql/logictest/testdata/logic_test/hash_sharded_index", "type": "replace", "edit_start_line_idx": 385 }
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: server/status/statuspb/status.proto package statuspb import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" import build "github.com/cockroachdb/cockroach/pkg/build" import roachpb "github.com/cockroachdb/cockroach/pkg/roachpb" import github_com_cockroachdb_cockroach_pkg_roachpb "github.com/cockroachdb/cockroach/pkg/roachpb" import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" import encoding_binary "encoding/binary" import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type HealthAlert_Category int32 const ( HealthAlert_METRICS HealthAlert_Category = 0 HealthAlert_NETWORK HealthAlert_Category = 1 ) var HealthAlert_Category_name = map[int32]string{ 0: "METRICS", 1: "NETWORK", } var HealthAlert_Category_value = map[string]int32{ "METRICS": 0, "NETWORK": 1, } func (x HealthAlert_Category) String() string { return proto.EnumName(HealthAlert_Category_name, int32(x)) } func (HealthAlert_Category) EnumDescriptor() ([]byte, []int) { return fileDescriptor_status_f9872bd1035fefcc, []int{2, 0} } // StoreStatus records the most recent values of metrics for a store. type StoreStatus struct { Desc roachpb.StoreDescriptor `protobuf:"bytes,1,opt,name=desc,proto3" json:"desc"` Metrics map[string]float64 `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` } func (m *StoreStatus) Reset() { *m = StoreStatus{} } func (m *StoreStatus) String() string { return proto.CompactTextString(m) } func (*StoreStatus) ProtoMessage() {} func (*StoreStatus) Descriptor() ([]byte, []int) { return fileDescriptor_status_f9872bd1035fefcc, []int{0} } func (m *StoreStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *StoreStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } func (dst *StoreStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_StoreStatus.Merge(dst, src) } func (m *StoreStatus) XXX_Size() int { return m.Size() } func (m *StoreStatus) XXX_DiscardUnknown() { xxx_messageInfo_StoreStatus.DiscardUnknown(m) } var xxx_messageInfo_StoreStatus proto.InternalMessageInfo // NodeStatus records the most recent values of metrics for a node. type NodeStatus struct { Desc roachpb.NodeDescriptor `protobuf:"bytes,1,opt,name=desc,proto3" json:"desc"` BuildInfo build.Info `protobuf:"bytes,2,opt,name=build_info,json=buildInfo,proto3" json:"build_info"` StartedAt int64 `protobuf:"varint,3,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` UpdatedAt int64 `protobuf:"varint,4,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` Metrics map[string]float64 `protobuf:"bytes,5,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` StoreStatuses []StoreStatus `protobuf:"bytes,6,rep,name=store_statuses,json=storeStatuses,proto3" json:"store_statuses"` Args []string `protobuf:"bytes,7,rep,name=args,proto3" json:"args,omitempty"` Env []string `protobuf:"bytes,8,rep,name=env,proto3" json:"env,omitempty"` // latencies is a map of nodeIDs to nanoseconds which is the latency // between this node and the other node. // // NOTE: this is deprecated and is only set if the min supported // cluster version is >= VersionRPCNetworkStats. Latencies map[github_com_cockroachdb_cockroach_pkg_roachpb.NodeID]int64 `protobuf:"bytes,9,rep,name=latencies,proto3,castkey=github.com/cockroachdb/cockroach/pkg/roachpb.NodeID" json:"latencies" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` // activity is a map of nodeIDs to network statistics from this node // to other nodes. Activity map[github_com_cockroachdb_cockroach_pkg_roachpb.NodeID]NodeStatus_NetworkActivity `protobuf:"bytes,10,rep,name=activity,proto3,castkey=github.com/cockroachdb/cockroach/pkg/roachpb.NodeID" json:"activity" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // total_system_memory is the total RAM available to the system // (or, if possible, the memory available to the cgroup this process is in) // in bytes. TotalSystemMemory int64 `protobuf:"varint,11,opt,name=total_system_memory,json=totalSystemMemory,proto3" json:"total_system_memory,omitempty"` // num_cpus is the number of logical CPUs on this machine. NumCpus int32 `protobuf:"varint,12,opt,name=num_cpus,json=numCpus,proto3" json:"num_cpus,omitempty"` } func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (m *NodeStatus) String() string { return proto.CompactTextString(m) } func (*NodeStatus) ProtoMessage() {} func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptor_status_f9872bd1035fefcc, []int{1} } func (m *NodeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *NodeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } func (dst *NodeStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_NodeStatus.Merge(dst, src) } func (m *NodeStatus) XXX_Size() int { return m.Size() } func (m *NodeStatus) XXX_DiscardUnknown() { xxx_messageInfo_NodeStatus.DiscardUnknown(m) } var xxx_messageInfo_NodeStatus proto.InternalMessageInfo type NodeStatus_NetworkActivity struct { Incoming int64 `protobuf:"varint,1,opt,name=incoming,proto3" json:"incoming,omitempty"` Outgoing int64 `protobuf:"varint,2,opt,name=outgoing,proto3" json:"outgoing,omitempty"` Latency int64 `protobuf:"varint,3,opt,name=latency,proto3" json:"latency,omitempty"` } func (m *NodeStatus_NetworkActivity) Reset() { *m = NodeStatus_NetworkActivity{} } func (m *NodeStatus_NetworkActivity) String() string { return proto.CompactTextString(m) } func (*NodeStatus_NetworkActivity) ProtoMessage() {} func (*NodeStatus_NetworkActivity) Descriptor() ([]byte, []int) { return fileDescriptor_status_f9872bd1035fefcc, []int{1, 2} } func (m *NodeStatus_NetworkActivity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *NodeStatus_NetworkActivity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } func (dst *NodeStatus_NetworkActivity) XXX_Merge(src proto.Message) { xxx_messageInfo_NodeStatus_NetworkActivity.Merge(dst, src) } func (m *NodeStatus_NetworkActivity) XXX_Size() int { return m.Size() } func (m *NodeStatus_NetworkActivity) XXX_DiscardUnknown() { xxx_messageInfo_NodeStatus_NetworkActivity.DiscardUnknown(m) } var xxx_messageInfo_NodeStatus_NetworkActivity proto.InternalMessageInfo // A HealthAlert is an undesired condition detected by a server which should be // exposed to the operators. type HealthAlert struct { // store_id is zero for alerts not specific to a store (i.e. apply at the node level). StoreID github_com_cockroachdb_cockroach_pkg_roachpb.StoreID `protobuf:"varint,1,opt,name=store_id,json=storeId,proto3,customtype=github.com/cockroachdb/cockroach/pkg/roachpb.StoreID" json:"store_id"` Category HealthAlert_Category `protobuf:"varint,2,opt,name=category,proto3,enum=cockroach.server.status.statuspb.HealthAlert_Category" json:"category,omitempty"` Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` Value float64 `protobuf:"fixed64,4,opt,name=value,proto3" json:"value,omitempty"` } func (m *HealthAlert) Reset() { *m = HealthAlert{} } func (m *HealthAlert) String() string { return proto.CompactTextString(m) } func (*HealthAlert) ProtoMessage() {} func (*HealthAlert) Descriptor() ([]byte, []int) { return fileDescriptor_status_f9872bd1035fefcc, []int{2} } func (m *HealthAlert) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *HealthAlert) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } func (dst *HealthAlert) XXX_Merge(src proto.Message) { xxx_messageInfo_HealthAlert.Merge(dst, src) } func (m *HealthAlert) XXX_Size() int { return m.Size() } func (m *HealthAlert) XXX_DiscardUnknown() { xxx_messageInfo_HealthAlert.DiscardUnknown(m) } var xxx_messageInfo_HealthAlert proto.InternalMessageInfo // HealthCheckResult holds a number of HealthAlerts. type HealthCheckResult struct { Alerts []HealthAlert `protobuf:"bytes,1,rep,name=alerts,proto3" json:"alerts"` } func (m *HealthCheckResult) Reset() { *m = HealthCheckResult{} } func (m *HealthCheckResult) String() string { return proto.CompactTextString(m) } func (*HealthCheckResult) ProtoMessage() {} func (*HealthCheckResult) Descriptor() ([]byte, []int) { return fileDescriptor_status_f9872bd1035fefcc, []int{3} } func (m *HealthCheckResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *HealthCheckResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } func (dst *HealthCheckResult) XXX_Merge(src proto.Message) { xxx_messageInfo_HealthCheckResult.Merge(dst, src) } func (m *HealthCheckResult) XXX_Size() int { return m.Size() } func (m *HealthCheckResult) XXX_DiscardUnknown() { xxx_messageInfo_HealthCheckResult.DiscardUnknown(m) } var xxx_messageInfo_HealthCheckResult proto.InternalMessageInfo func init() { proto.RegisterType((*StoreStatus)(nil), "cockroach.server.status.statuspb.StoreStatus") proto.RegisterMapType((map[string]float64)(nil), "cockroach.server.status.statuspb.StoreStatus.MetricsEntry") proto.RegisterType((*NodeStatus)(nil), "cockroach.server.status.statuspb.NodeStatus") proto.RegisterMapType((map[github_com_cockroachdb_cockroach_pkg_roachpb.NodeID]NodeStatus_NetworkActivity)(nil), "cockroach.server.status.statuspb.NodeStatus.ActivityEntry") proto.RegisterMapType((map[github_com_cockroachdb_cockroach_pkg_roachpb.NodeID]int64)(nil), "cockroach.server.status.statuspb.NodeStatus.LatenciesEntry") proto.RegisterMapType((map[string]float64)(nil), "cockroach.server.status.statuspb.NodeStatus.MetricsEntry") proto.RegisterType((*NodeStatus_NetworkActivity)(nil), "cockroach.server.status.statuspb.NodeStatus.NetworkActivity") proto.RegisterType((*HealthAlert)(nil), "cockroach.server.status.statuspb.HealthAlert") proto.RegisterType((*HealthCheckResult)(nil), "cockroach.server.status.statuspb.HealthCheckResult") proto.RegisterEnum("cockroach.server.status.statuspb.HealthAlert_Category", HealthAlert_Category_name, HealthAlert_Category_value) } func (m *StoreStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *StoreStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ i = encodeVarintStatus(dAtA, i, uint64(m.Desc.Size())) n1, err := m.Desc.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 if len(m.Metrics) > 0 { keysForMetrics := make([]string, 0, len(m.Metrics)) for k := range m.Metrics { keysForMetrics = append(keysForMetrics, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForMetrics) for _, k := range keysForMetrics { dAtA[i] = 0x12 i++ v := m.Metrics[string(k)] mapSize := 1 + len(k) + sovStatus(uint64(len(k))) + 1 + 8 i = encodeVarintStatus(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa i++ i = encodeVarintStatus(dAtA, i, uint64(len(k))) i += copy(dAtA[i:], k) dAtA[i] = 0x11 i++ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(v)))) i += 8 } } return i, nil } func (m *NodeStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ i = encodeVarintStatus(dAtA, i, uint64(m.Desc.Size())) n2, err := m.Desc.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n2 dAtA[i] = 0x12 i++ i = encodeVarintStatus(dAtA, i, uint64(m.BuildInfo.Size())) n3, err := m.BuildInfo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n3 if m.StartedAt != 0 { dAtA[i] = 0x18 i++ i = encodeVarintStatus(dAtA, i, uint64(m.StartedAt)) } if m.UpdatedAt != 0 { dAtA[i] = 0x20 i++ i = encodeVarintStatus(dAtA, i, uint64(m.UpdatedAt)) } if len(m.Metrics) > 0 { keysForMetrics := make([]string, 0, len(m.Metrics)) for k := range m.Metrics { keysForMetrics = append(keysForMetrics, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForMetrics) for _, k := range keysForMetrics { dAtA[i] = 0x2a i++ v := m.Metrics[string(k)] mapSize := 1 + len(k) + sovStatus(uint64(len(k))) + 1 + 8 i = encodeVarintStatus(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa i++ i = encodeVarintStatus(dAtA, i, uint64(len(k))) i += copy(dAtA[i:], k) dAtA[i] = 0x11 i++ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(v)))) i += 8 } } if len(m.StoreStatuses) > 0 { for _, msg := range m.StoreStatuses { dAtA[i] = 0x32 i++ i = encodeVarintStatus(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if len(m.Args) > 0 { for _, s := range m.Args { dAtA[i] = 0x3a i++ l = len(s) for l >= 1<<7 { dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } dAtA[i] = uint8(l) i++ i += copy(dAtA[i:], s) } } if len(m.Env) > 0 { for _, s := range m.Env { dAtA[i] = 0x42 i++ l = len(s) for l >= 1<<7 { dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } dAtA[i] = uint8(l) i++ i += copy(dAtA[i:], s) } } if len(m.Latencies) > 0 { keysForLatencies := make([]int32, 0, len(m.Latencies)) for k := range m.Latencies { keysForLatencies = append(keysForLatencies, int32(k)) } github_com_gogo_protobuf_sortkeys.Int32s(keysForLatencies) for _, k := range keysForLatencies { dAtA[i] = 0x4a i++ v := m.Latencies[github_com_cockroachdb_cockroach_pkg_roachpb.NodeID(k)] mapSize := 1 + sovStatus(uint64(k)) + 1 + sovStatus(uint64(v)) i = encodeVarintStatus(dAtA, i, uint64(mapSize)) dAtA[i] = 0x8 i++ i = encodeVarintStatus(dAtA, i, uint64(k)) dAtA[i] = 0x10 i++ i = encodeVarintStatus(dAtA, i, uint64(v)) } } if len(m.Activity) > 0 { keysForActivity := make([]int32, 0, len(m.Activity)) for k := range m.Activity { keysForActivity = append(keysForActivity, int32(k)) } github_com_gogo_protobuf_sortkeys.Int32s(keysForActivity) for _, k := range keysForActivity { dAtA[i] = 0x52 i++ v := m.Activity[github_com_cockroachdb_cockroach_pkg_roachpb.NodeID(k)] msgSize := 0 if (&v) != nil { msgSize = (&v).Size() msgSize += 1 + sovStatus(uint64(msgSize)) } mapSize := 1 + sovStatus(uint64(k)) + msgSize i = encodeVarintStatus(dAtA, i, uint64(mapSize)) dAtA[i] = 0x8 i++ i = encodeVarintStatus(dAtA, i, uint64(k)) dAtA[i] = 0x12 i++ i = encodeVarintStatus(dAtA, i, uint64((&v).Size())) n4, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n4 } } if m.TotalSystemMemory != 0 { dAtA[i] = 0x58 i++ i = encodeVarintStatus(dAtA, i, uint64(m.TotalSystemMemory)) } if m.NumCpus != 0 { dAtA[i] = 0x60 i++ i = encodeVarintStatus(dAtA, i, uint64(m.NumCpus)) } return i, nil } func (m *NodeStatus_NetworkActivity) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *NodeStatus_NetworkActivity) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Incoming != 0 { dAtA[i] = 0x8 i++ i = encodeVarintStatus(dAtA, i, uint64(m.Incoming)) } if m.Outgoing != 0 { dAtA[i] = 0x10 i++ i = encodeVarintStatus(dAtA, i, uint64(m.Outgoing)) } if m.Latency != 0 { dAtA[i] = 0x18 i++ i = encodeVarintStatus(dAtA, i, uint64(m.Latency)) } return i, nil } func (m *HealthAlert) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *HealthAlert) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.StoreID != 0 { dAtA[i] = 0x8 i++ i = encodeVarintStatus(dAtA, i, uint64(m.StoreID)) } if m.Category != 0 { dAtA[i] = 0x10 i++ i = encodeVarintStatus(dAtA, i, uint64(m.Category)) } if len(m.Description) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintStatus(dAtA, i, uint64(len(m.Description))) i += copy(dAtA[i:], m.Description) } if m.Value != 0 { dAtA[i] = 0x21 i++ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) i += 8 } return i, nil } func (m *HealthCheckResult) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *HealthCheckResult) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Alerts) > 0 { for _, msg := range m.Alerts { dAtA[i] = 0xa i++ i = encodeVarintStatus(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } return i, nil } func encodeVarintStatus(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return offset + 1 } func (m *StoreStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l l = m.Desc.Size() n += 1 + l + sovStatus(uint64(l)) if len(m.Metrics) > 0 { for k, v := range m.Metrics { _ = k _ = v mapEntrySize := 1 + len(k) + sovStatus(uint64(len(k))) + 1 + 8 n += mapEntrySize + 1 + sovStatus(uint64(mapEntrySize)) } } return n } func (m *NodeStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l l = m.Desc.Size() n += 1 + l + sovStatus(uint64(l)) l = m.BuildInfo.Size() n += 1 + l + sovStatus(uint64(l)) if m.StartedAt != 0 { n += 1 + sovStatus(uint64(m.StartedAt)) } if m.UpdatedAt != 0 { n += 1 + sovStatus(uint64(m.UpdatedAt)) } if len(m.Metrics) > 0 { for k, v := range m.Metrics { _ = k _ = v mapEntrySize := 1 + len(k) + sovStatus(uint64(len(k))) + 1 + 8 n += mapEntrySize + 1 + sovStatus(uint64(mapEntrySize)) } } if len(m.StoreStatuses) > 0 { for _, e := range m.StoreStatuses { l = e.Size() n += 1 + l + sovStatus(uint64(l)) } } if len(m.Args) > 0 { for _, s := range m.Args { l = len(s) n += 1 + l + sovStatus(uint64(l)) } } if len(m.Env) > 0 { for _, s := range m.Env { l = len(s) n += 1 + l + sovStatus(uint64(l)) } } if len(m.Latencies) > 0 { for k, v := range m.Latencies { _ = k _ = v mapEntrySize := 1 + sovStatus(uint64(k)) + 1 + sovStatus(uint64(v)) n += mapEntrySize + 1 + sovStatus(uint64(mapEntrySize)) } } if len(m.Activity) > 0 { for k, v := range m.Activity { _ = k _ = v l = v.Size() mapEntrySize := 1 + sovStatus(uint64(k)) + 1 + l + sovStatus(uint64(l)) n += mapEntrySize + 1 + sovStatus(uint64(mapEntrySize)) } } if m.TotalSystemMemory != 0 { n += 1 + sovStatus(uint64(m.TotalSystemMemory)) } if m.NumCpus != 0 { n += 1 + sovStatus(uint64(m.NumCpus)) } return n } func (m *NodeStatus_NetworkActivity) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Incoming != 0 { n += 1 + sovStatus(uint64(m.Incoming)) } if m.Outgoing != 0 { n += 1 + sovStatus(uint64(m.Outgoing)) } if m.Latency != 0 { n += 1 + sovStatus(uint64(m.Latency)) } return n } func (m *HealthAlert) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.StoreID != 0 { n += 1 + sovStatus(uint64(m.StoreID)) } if m.Category != 0 { n += 1 + sovStatus(uint64(m.Category)) } l = len(m.Description) if l > 0 { n += 1 + l + sovStatus(uint64(l)) } if m.Value != 0 { n += 9 } return n } func (m *HealthCheckResult) Size() (n int) { if m == nil { return 0 } var l int _ = l if len(m.Alerts) > 0 { for _, e := range m.Alerts { l = e.Size() n += 1 + l + sovStatus(uint64(l)) } } return n } func sovStatus(x uint64) (n int) { for { n++ x >>= 7 if x == 0 { break } } return n } func sozStatus(x uint64) (n int) { return sovStatus(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *StoreStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: StoreStatus: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: StoreStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Desc", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthStatus } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Desc.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthStatus } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Metrics == nil { m.Metrics = make(map[string]float64) } var mapkey string var mapvalue float64 for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) if fieldNum == 1 { var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLenmapkey := int(stringLenmapkey) if intStringLenmapkey < 0 { return ErrInvalidLengthStatus } postStringIndexmapkey := iNdEx + intStringLenmapkey if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { var mapvaluetemp uint64 if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } mapvaluetemp = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 mapvalue = math.Float64frombits(mapvaluetemp) } else { iNdEx = entryPreIndex skippy, err := skipStatus(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthStatus } if (iNdEx + skippy) > postIndex { return io.ErrUnexpectedEOF } iNdEx += skippy } } m.Metrics[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipStatus(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthStatus } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *NodeStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Desc", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthStatus } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Desc.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field BuildInfo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthStatus } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.BuildInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) } m.StartedAt = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.StartedAt |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) } m.UpdatedAt = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.UpdatedAt |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthStatus } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Metrics == nil { m.Metrics = make(map[string]float64) } var mapkey string var mapvalue float64 for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) if fieldNum == 1 { var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLenmapkey := int(stringLenmapkey) if intStringLenmapkey < 0 { return ErrInvalidLengthStatus } postStringIndexmapkey := iNdEx + intStringLenmapkey if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { var mapvaluetemp uint64 if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } mapvaluetemp = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 mapvalue = math.Float64frombits(mapvaluetemp) } else { iNdEx = entryPreIndex skippy, err := skipStatus(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthStatus } if (iNdEx + skippy) > postIndex { return io.ErrUnexpectedEOF } iNdEx += skippy } } m.Metrics[mapkey] = mapvalue iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StoreStatuses", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthStatus } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.StoreStatuses = append(m.StoreStatuses, StoreStatus{}) if err := m.StoreStatuses[len(m.StoreStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthStatus } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthStatus } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 9: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Latencies", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthStatus } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Latencies == nil { m.Latencies = make(map[github_com_cockroachdb_cockroach_pkg_roachpb.NodeID]int64) } var mapkey int32 var mapvalue int64 for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) if fieldNum == 1 { for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ mapkey |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } } else if fieldNum == 2 { for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ mapvalue |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } } else { iNdEx = entryPreIndex skippy, err := skipStatus(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthStatus } if (iNdEx + skippy) > postIndex { return io.ErrUnexpectedEOF } iNdEx += skippy } } m.Latencies[github_com_cockroachdb_cockroach_pkg_roachpb.NodeID(mapkey)] = mapvalue iNdEx = postIndex case 10: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Activity", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthStatus } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Activity == nil { m.Activity = make(map[github_com_cockroachdb_cockroach_pkg_roachpb.NodeID]NodeStatus_NetworkActivity) } var mapkey int32 mapvalue := &NodeStatus_NetworkActivity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) if fieldNum == 1 { for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ mapkey |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } } else if fieldNum == 2 { var mapmsglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ mapmsglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if mapmsglen < 0 { return ErrInvalidLengthStatus } postmsgIndex := iNdEx + mapmsglen if mapmsglen < 0 { return ErrInvalidLengthStatus } if postmsgIndex > l { return io.ErrUnexpectedEOF } mapvalue = &NodeStatus_NetworkActivity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } iNdEx = postmsgIndex } else { iNdEx = entryPreIndex skippy, err := skipStatus(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthStatus } if (iNdEx + skippy) > postIndex { return io.ErrUnexpectedEOF } iNdEx += skippy } } m.Activity[github_com_cockroachdb_cockroach_pkg_roachpb.NodeID(mapkey)] = *mapvalue iNdEx = postIndex case 11: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field TotalSystemMemory", wireType) } m.TotalSystemMemory = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.TotalSystemMemory |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } case 12: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field NumCpus", wireType) } m.NumCpus = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.NumCpus |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipStatus(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthStatus } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *NodeStatus_NetworkActivity) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: NetworkActivity: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: NetworkActivity: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Incoming", wireType) } m.Incoming = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Incoming |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Outgoing", wireType) } m.Outgoing = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Outgoing |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Latency", wireType) } m.Latency = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Latency |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipStatus(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthStatus } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *HealthAlert) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: HealthAlert: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: HealthAlert: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StoreID", wireType) } m.StoreID = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.StoreID |= (github_com_cockroachdb_cockroach_pkg_roachpb.StoreID(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Category", wireType) } m.Category = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Category |= (HealthAlert_Category(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthStatus } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Description = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 1 { return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var v uint64 if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 m.Value = float64(math.Float64frombits(v)) default: iNdEx = preIndex skippy, err := skipStatus(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthStatus } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *HealthCheckResult) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: HealthCheckResult: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: HealthCheckResult: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Alerts", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthStatus } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Alerts = append(m.Alerts, HealthAlert{}) if err := m.Alerts[len(m.Alerts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipStatus(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthStatus } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipStatus(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowStatus } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowStatus } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } return iNdEx, nil case 1: iNdEx += 8 return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowStatus } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } iNdEx += length if length < 0 { return 0, ErrInvalidLengthStatus } return iNdEx, nil case 3: for { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowStatus } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } innerWireType := int(innerWire & 0x7) if innerWireType == 4 { break } next, err := skipStatus(dAtA[start:]) if err != nil { return 0, err } iNdEx = start + next } return iNdEx, nil case 4: return iNdEx, nil case 5: iNdEx += 4 return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } } panic("unreachable") } var ( ErrInvalidLengthStatus = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowStatus = fmt.Errorf("proto: integer overflow") ) func init() { proto.RegisterFile("server/status/statuspb/status.proto", fileDescriptor_status_f9872bd1035fefcc) } var fileDescriptor_status_f9872bd1035fefcc = []byte{ // 817 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x5f, 0x6f, 0xe3, 0x44, 0x10, 0xcf, 0x36, 0x69, 0xe3, 0x8c, 0xef, 0x4a, 0x6f, 0x39, 0x90, 0x89, 0x44, 0x6a, 0x02, 0x0f, 0x11, 0x12, 0x8e, 0x94, 0x43, 0x08, 0xda, 0xbe, 0xf4, 0x9f, 0x20, 0x3a, 0x5a, 0xa4, 0x4d, 0x25, 0xa4, 0x7b, 0x09, 0x1b, 0x7b, 0xcf, 0xb5, 0x12, 0x7b, 0x2d, 0xef, 0xba, 0x28, 0xaf, 0x7c, 0x02, 0x24, 0x24, 0xbe, 0x00, 0x5f, 0xa6, 0x8f, 0xf7, 0x78, 0x42, 0xa2, 0x40, 0xfa, 0xce, 0x67, 0x40, 0xbb, 0x5e, 0x27, 0x2e, 0x2a, 0xea, 0x45, 0xf0, 0x94, 0xd9, 0xf9, 0x79, 0x7e, 0x33, 0xbf, 0x99, 0xd9, 0x0d, 0x7c, 0x28, 0x58, 0x76, 0xc5, 0xb2, 0xbe, 0x90, 0x54, 0xe6, 0xc2, 0xfc, 0xa4, 0x13, 0x63, 0x78, 0x69, 0xc6, 0x25, 0xc7, 0xae, 0xcf, 0xfd, 0x69, 0xc6, 0xa9, 0x7f, 0xe9, 0x15, 0x9f, 0x7b, 0x06, 0x2e, 0x3f, 0x6f, 0xbf, 0xab, 0xd1, 0x74, 0xd2, 0x8f, 0x99, 0xa4, 0x01, 0x95, 0xb4, 0x88, 0x6c, 0xef, 0x4c, 0xf2, 0x68, 0x16, 0xf4, 0xa3, 0xe4, 0x25, 0x37, 0x9e, 0xa7, 0x21, 0x0f, 0xb9, 0x36, 0xfb, 0xca, 0x2a, 0xbc, 0xdd, 0xdf, 0x10, 0xd8, 0x23, 0xc9, 0x33, 0x36, 0xd2, 0x8c, 0xf8, 0x00, 0x1a, 0x01, 0x13, 0xbe, 0x83, 0x5c, 0xd4, 0xb3, 0x07, 0x5d, 0x6f, 0x55, 0x80, 0x49, 0xe4, 0xe9, 0xaf, 0x4f, 0x98, 0xf0, 0xb3, 0x28, 0x95, 0x3c, 0x3b, 0x6a, 0x5c, 0xdf, 0xec, 0xd6, 0x88, 0x8e, 0xc2, 0x17, 0xd0, 0x8c, 0x99, 0xcc, 0x22, 0x5f, 0x38, 0x1b, 0x6e, 0xbd, 0x67, 0x0f, 0xf6, 0xbc, 0x87, 0x14, 0x78, 0x95, 0xec, 0xde, 0x59, 0x11, 0x7c, 0x9a, 0xc8, 0x6c, 0x4e, 0x4a, 0xaa, 0xf6, 0x1e, 0x3c, 0xaa, 0x02, 0x78, 0x07, 0xea, 0x53, 0x36, 0xd7, 0x25, 0xb6, 0x88, 0x32, 0xf1, 0x53, 0xd8, 0xbc, 0xa2, 0xb3, 0x9c, 0x39, 0x1b, 0x2e, 0xea, 0x21, 0x52, 0x1c, 0xf6, 0x36, 0x3e, 0x47, 0xdd, 0xbf, 0x2c, 0x80, 0x73, 0x1e, 0x94, 0xf2, 0xf6, 0xef, 0xc8, 0xfb, 0xe0, 0x1e, 0x79, 0xea, 0xe3, 0x7f, 0x51, 0xb7, 0x07, 0xa0, 0xbb, 0x3a, 0x56, 0x5d, 0xd5, 0xa9, 0xec, 0xc1, 0x3b, 0x15, 0x0a, 0x0d, 0x7a, 0xc3, 0xe4, 0x25, 0x37, 0x61, 0x2d, 0xed, 0x51, 0x0e, 0xfc, 0x3e, 0x80, 0x90, 0x34, 0x93, 0x2c, 0x18, 0x53, 0xe9, 0xd4, 0x5d, 0xd4, 0xab, 0x93, 0x96, 0xf1, 0x1c, 0x4a, 0x05, 0xe7, 0x69, 0x40, 0x0d, 0xdc, 0x28, 0x60, 0xe3, 0x39, 0x94, 0x78, 0xb4, 0xea, 0xeb, 0xa6, 0xee, 0xeb, 0x17, 0x0f, 0xf7, 0x75, 0xa5, 0xfa, 0xfe, 0xb6, 0xe2, 0x17, 0xb0, 0x2d, 0x54, 0xef, 0xc7, 0x45, 0x08, 0x13, 0xce, 0x96, 0xe6, 0xfe, 0x64, 0xad, 0x99, 0x19, 0xa9, 0x8f, 0xc5, 0xca, 0xc5, 0x04, 0xc6, 0xd0, 0xa0, 0x59, 0x28, 0x9c, 0xa6, 0x5b, 0xef, 0xb5, 0x88, 0xb6, 0xd5, 0xd8, 0x58, 0x72, 0xe5, 0x58, 0xda, 0xa5, 0x4c, 0xfc, 0x33, 0x82, 0xd6, 0x8c, 0x4a, 0x96, 0xf8, 0x11, 0x13, 0x4e, 0x4b, 0x67, 0xdf, 0x5f, 0x4b, 0xd9, 0xd7, 0x65, 0xb4, 0xd6, 0x76, 0xb4, 0xaf, 0x6a, 0xf9, 0xe1, 0xf7, 0xdd, 0x67, 0x61, 0x24, 0x2f, 0xf3, 0x89, 0xe7, 0xf3, 0xb8, 0xbf, 0xa4, 0x0b, 0x26, 0x2b, 0xbb, 0x9f, 0x4e, 0xc3, 0x7e, 0x75, 0xe4, 0xc3, 0x13, 0xb2, 0x2a, 0x05, 0xff, 0x84, 0xc0, 0xa2, 0xbe, 0x8c, 0xae, 0x22, 0x39, 0x77, 0xe0, 0x4d, 0x37, 0xb9, 0x52, 0xd7, 0xa1, 0x09, 0xfe, 0x1f, 0xca, 0x5a, 0x16, 0x82, 0x3d, 0x78, 0x5b, 0x72, 0x49, 0x67, 0x63, 0x31, 0x17, 0x92, 0xc5, 0xe3, 0x98, 0xc5, 0x3c, 0x9b, 0x3b, 0xb6, 0xde, 0x96, 0x27, 0x1a, 0x1a, 0x69, 0xe4, 0x4c, 0x03, 0xf8, 0x3d, 0xb0, 0x92, 0x3c, 0x1e, 0xfb, 0x69, 0x2e, 0x9c, 0x47, 0x2e, 0xea, 0x6d, 0x92, 0x66, 0x92, 0xc7, 0xc7, 0x69, 0xfe, 0x9f, 0xae, 0x54, 0xfb, 0x00, 0xb6, 0xef, 0xb6, 0xbd, 0x1a, 0xbd, 0x79, 0x4f, 0x74, 0xbd, 0x1a, 0xed, 0xc3, 0x5b, 0xe7, 0x4c, 0x7e, 0xcf, 0xb3, 0x69, 0xd9, 0x23, 0xdc, 0x06, 0x2b, 0x4a, 0x7c, 0x1e, 0x47, 0x49, 0xa8, 0x39, 0xea, 0x64, 0x79, 0x56, 0x18, 0xcf, 0x65, 0xc8, 0x15, 0x56, 0x70, 0x2d, 0xcf, 0xd8, 0x81, 0x66, 0x31, 0xb2, 0xb9, 0xb9, 0x50, 0xe5, 0xb1, 0x3d, 0x87, 0xc7, 0x77, 0x26, 0x70, 0x4f, 0x85, 0xa4, 0x5a, 0xa1, 0x3d, 0x38, 0x58, 0x6b, 0xbc, 0xff, 0x50, 0x50, 0x7d, 0x70, 0x7e, 0xd9, 0x00, 0xfb, 0x2b, 0x46, 0x67, 0xf2, 0xf2, 0x70, 0xc6, 0x32, 0x89, 0x27, 0x60, 0x15, 0xb7, 0x2c, 0x0a, 0x8a, 0xf4, 0x47, 0x5f, 0xaa, 0x6d, 0xf8, 0xf5, 0x66, 0xf7, 0xd3, 0xb5, 0xb6, 0x41, 0x5f, 0xb9, 0xe1, 0xc9, 0xe2, 0x66, 0xb7, 0x69, 0x4c, 0xd2, 0xd4, 0xc4, 0xc3, 0x00, 0x13, 0xb0, 0x7c, 0x2a, 0x59, 0xa8, 0xb6, 0x41, 0xc9, 0xd9, 0x1e, 0x7c, 0xf6, 0xb0, 0x9c, 0x4a, 0x91, 0xde, 0xb1, 0x89, 0x26, 0x4b, 0x1e, 0xec, 0x82, 0x1d, 0x98, 0x67, 0x30, 0xe2, 0x89, 0x6e, 0x70, 0x8b, 0x54, 0x5d, 0xab, 0x19, 0x37, 0x2a, 0x1b, 0xd2, 0xfd, 0x08, 0xac, 0x92, 0x0d, 0xdb, 0xd0, 0x3c, 0x3b, 0xbd, 0x20, 0xc3, 0xe3, 0xd1, 0x4e, 0x4d, 0x1d, 0xce, 0x4f, 0x2f, 0xbe, 0xfd, 0x86, 0x3c, 0xdf, 0x41, 0xdd, 0xef, 0xe0, 0x49, 0x91, 0xff, 0xf8, 0x92, 0xf9, 0x53, 0xc2, 0x44, 0x3e, 0x93, 0xf8, 0x39, 0x6c, 0x51, 0x55, 0x8e, 0x70, 0xd0, 0x9b, 0x3e, 0x44, 0x15, 0x11, 0xe6, 0x21, 0x32, 0x14, 0x47, 0x1f, 0x5f, 0xff, 0xd9, 0xa9, 0x5d, 0x2f, 0x3a, 0xe8, 0xd5, 0xa2, 0x83, 0x5e, 0x2f, 0x3a, 0xe8, 0x8f, 0x45, 0x07, 0xfd, 0x78, 0xdb, 0xa9, 0xbd, 0xba, 0xed, 0xd4, 0x5e, 0xdf, 0x76, 0x6a, 0x2f, 0xac, 0x92, 0x65, 0xb2, 0xa5, 0xff, 0x0b, 0x9f, 0xfd, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x78, 0x91, 0x14, 0xef, 0x94, 0x07, 0x00, 0x00, }
pkg/server/status/statuspb/status.pb.go
0
https://github.com/cockroachdb/cockroach/commit/e69cbee2e876f8f76799a25701060009c2575672
[ 0.020150240510702133, 0.0005836120108142495, 0.00016481883358210325, 0.0002498840040061623, 0.0016721668653190136 ]
{ "id": 0, "code_window": [ "import https = require('https')\n", "import child_process = require('child_process')\n", "\n", "declare const ESBUILD_VERSION: string\n", "const toPath = path.join(__dirname, 'bin', 'esbuild')\n", "let isToPathJS = true\n", "\n", "function validateBinaryVersion(...command: string[]): void {\n", " command.push('--version')\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "const versionFromPackageJSON: string = require(path.join(__dirname, 'package.json')).version\n" ], "file_path": "lib/npm/node-install.ts", "type": "replace", "edit_start_line_idx": 9 }
import { downloadedBinPath, ESBUILD_BINARY_PATH, isValidBinaryPath, pkgAndSubpathForCurrentPlatform } from './node-platform' import fs = require('fs') import os = require('os') import path = require('path') import zlib = require('zlib') import https = require('https') import child_process = require('child_process') declare const ESBUILD_VERSION: string const toPath = path.join(__dirname, 'bin', 'esbuild') let isToPathJS = true function validateBinaryVersion(...command: string[]): void { command.push('--version') let stdout: string try { stdout = child_process.execFileSync(command.shift()!, command, { // Without this, this install script strangely crashes with the error // "EACCES: permission denied, write" but only on Ubuntu Linux when node is // installed from the Snap Store. This is not a problem when you download // the official version of node. The problem appears to be that stderr // (i.e. file descriptor 2) isn't writable? // // More info: // - https://snapcraft.io/ (what the Snap Store is) // - https://nodejs.org/dist/ (download the official version of node) // - https://github.com/evanw/esbuild/issues/1711#issuecomment-1027554035 // stdio: 'pipe', }).toString().trim() } catch (err) { if (os.platform() === 'darwin' && /_SecTrustEvaluateWithError/.test(err + '')) { let os = 'this version of macOS' try { os = 'macOS ' + child_process.execFileSync('sw_vers', ['-productVersion']).toString().trim() } catch { } throw new Error(`The "esbuild" package cannot be installed because ${os} is too outdated. The Go compiler (which esbuild relies on) no longer supports ${os}, which means the "esbuild" binary executable can't be run. You can either: * Update your version of macOS to one that the Go compiler supports * Use the "esbuild-wasm" package instead of the "esbuild" package * Build esbuild yourself using an older version of the Go compiler `) } throw err } if (stdout !== ESBUILD_VERSION) { throw new Error(`Expected ${JSON.stringify(ESBUILD_VERSION)} but got ${JSON.stringify(stdout)}`) } } function isYarn(): boolean { const { npm_config_user_agent } = process.env if (npm_config_user_agent) { return /\byarn\//.test(npm_config_user_agent) } return false } function fetch(url: string): Promise<Buffer> { return new Promise((resolve, reject) => { https.get(url, res => { if ((res.statusCode === 301 || res.statusCode === 302) && res.headers.location) return fetch(res.headers.location).then(resolve, reject) if (res.statusCode !== 200) return reject(new Error(`Server responded with ${res.statusCode}`)) let chunks: Buffer[] = [] res.on('data', chunk => chunks.push(chunk)) res.on('end', () => resolve(Buffer.concat(chunks))) }).on('error', reject) }) } function extractFileFromTarGzip(buffer: Buffer, subpath: string): Buffer { try { buffer = zlib.unzipSync(buffer) } catch (err: any) { throw new Error(`Invalid gzip data in archive: ${err && err.message || err}`) } let str = (i: number, n: number) => String.fromCharCode(...buffer.subarray(i, i + n)).replace(/\0.*$/, '') let offset = 0 subpath = `package/${subpath}` while (offset < buffer.length) { let name = str(offset, 100) let size = parseInt(str(offset + 124, 12), 8) offset += 512 if (!isNaN(size)) { if (name === subpath) return buffer.subarray(offset, offset + size) offset += (size + 511) & ~511 } } throw new Error(`Could not find ${JSON.stringify(subpath)} in archive`) } function installUsingNPM(pkg: string, subpath: string, binPath: string): void { // Erase "npm_config_global" so that "npm install --global esbuild" works. // Otherwise this nested "npm install" will also be global, and the install // will deadlock waiting for the global installation lock. const env = { ...process.env, npm_config_global: undefined } // Create a temporary directory inside the "esbuild" package with an empty // "package.json" file. We'll use this to run "npm install" in. const esbuildLibDir = path.dirname(require.resolve('esbuild')) const installDir = path.join(esbuildLibDir, 'npm-install') fs.mkdirSync(installDir) try { fs.writeFileSync(path.join(installDir, 'package.json'), '{}') // Run "npm install" in the temporary directory which should download the // desired package. Try to avoid unnecessary log output. This uses the "npm" // command instead of a HTTP request so that it hopefully works in situations // where HTTP requests are blocked but the "npm" command still works due to, // for example, a custom configured npm registry and special firewall rules. child_process.execSync(`npm install --loglevel=error --prefer-offline --no-audit --progress=false ${pkg}@${ESBUILD_VERSION}`, { cwd: installDir, stdio: 'pipe', env }) // Move the downloaded binary executable into place. The destination path // is the same one that the JavaScript API code uses so it will be able to // find the binary executable here later. const installedBinPath = path.join(installDir, 'node_modules', pkg, subpath) fs.renameSync(installedBinPath, binPath) } finally { // Try to clean up afterward so we don't unnecessarily waste file system // space. Leaving nested "node_modules" directories can also be problematic // for certain tools that scan over the file tree and expect it to have a // certain structure. try { removeRecursive(installDir) } catch { // Removing a file or directory can randomly break on Windows, returning // EBUSY for an arbitrary length of time. I think this happens when some // other program has that file or directory open (e.g. an anti-virus // program). This is fine on Unix because the OS just unlinks the entry // but keeps the reference around until it's unused. There's nothing we // can do in this case so we just leave the directory there. } } } function removeRecursive(dir: string): void { for (const entry of fs.readdirSync(dir)) { const entryPath = path.join(dir, entry) let stats try { stats = fs.lstatSync(entryPath) } catch { continue; // Guard against https://github.com/nodejs/node/issues/4760 } if (stats.isDirectory()) removeRecursive(entryPath) else fs.unlinkSync(entryPath) } fs.rmdirSync(dir) } function applyManualBinaryPathOverride(overridePath: string): void { // Patch the CLI use case (the "esbuild" command) const pathString = JSON.stringify(overridePath) fs.writeFileSync(toPath, `#!/usr/bin/env node\n` + `require('child_process').execFileSync(${pathString}, process.argv.slice(2), { stdio: 'inherit' });\n`) // Patch the JS API use case (the "require('esbuild')" workflow) const libMain = path.join(__dirname, 'lib', 'main.js') const code = fs.readFileSync(libMain, 'utf8') fs.writeFileSync(libMain, `var ESBUILD_BINARY_PATH = ${pathString};\n${code}`) } function maybeOptimizePackage(binPath: string): void { // This package contains a "bin/esbuild" JavaScript file that finds and runs // the appropriate binary executable. However, this means that running the // "esbuild" command runs another instance of "node" which is way slower than // just running the binary executable directly. // // Here we optimize for this by replacing the JavaScript file with the binary // executable at install time. This optimization does not work on Windows // because on Windows the binary executable must be called "esbuild.exe" // instead of "esbuild". // // This also doesn't work with Yarn both because of lack of support for binary // files in Yarn 2+ (see https://github.com/yarnpkg/berry/issues/882) and // because Yarn (even Yarn 1?) may run the same install scripts in the same // place multiple times from different platforms, especially when people use // Docker. Avoid idempotency issues by just not optimizing when using Yarn. // // This optimization also doesn't apply when npm's "--ignore-scripts" flag is // used since in that case this install script will not be run. if (os.platform() !== 'win32' && !isYarn()) { const tempPath = path.join(__dirname, 'bin-esbuild') try { // First link the binary with a temporary file. If this fails and throws an // error, then we'll just end up doing nothing. This uses a hard link to // avoid taking up additional space on the file system. fs.linkSync(binPath, tempPath) // Then use rename to atomically replace the target file with the temporary // file. If this fails and throws an error, then we'll just end up leaving // the temporary file there, which is harmless. fs.renameSync(tempPath, toPath) // If we get here, then we know that the target location is now a binary // executable instead of a JavaScript file. isToPathJS = false // If this install script is being re-run, then "renameSync" will fail // since the underlying inode is the same (it just returns without doing // anything, and without throwing an error). In that case we should remove // the file manually. fs.unlinkSync(tempPath) } catch { // Ignore errors here since this optimization is optional } } } async function downloadDirectlyFromNPM(pkg: string, subpath: string, binPath: string): Promise<void> { // If that fails, the user could have npm configured incorrectly or could not // have npm installed. Try downloading directly from npm as a last resort. const url = `https://registry.npmjs.org/${pkg}/-/${pkg.replace('@esbuild/', '')}-${ESBUILD_VERSION}.tgz` console.error(`[esbuild] Trying to download ${JSON.stringify(url)}`) try { fs.writeFileSync(binPath, extractFileFromTarGzip(await fetch(url), subpath)) fs.chmodSync(binPath, 0o755) } catch (e: any) { console.error(`[esbuild] Failed to download ${JSON.stringify(url)}: ${e && e.message || e}`) throw e } } async function checkAndPreparePackage(): Promise<void> { // This feature was added to give external code a way to modify the binary // path without modifying the code itself. Do not remove this because // external code relies on this (in addition to esbuild's own test suite). if (isValidBinaryPath(ESBUILD_BINARY_PATH)) { if (!fs.existsSync(ESBUILD_BINARY_PATH)) { console.warn(`[esbuild] Ignoring bad configuration: ESBUILD_BINARY_PATH=${ESBUILD_BINARY_PATH}`) } else { applyManualBinaryPathOverride(ESBUILD_BINARY_PATH) return } } const { pkg, subpath } = pkgAndSubpathForCurrentPlatform() let binPath: string try { // First check for the binary package from our "optionalDependencies". This // package should have been installed alongside this package at install time. binPath = require.resolve(`${pkg}/${subpath}`) } catch (e) { console.error(`[esbuild] Failed to find package "${pkg}" on the file system This can happen if you use the "--no-optional" flag. The "optionalDependencies" package.json feature is used by esbuild to install the correct binary executable for your current platform. This install script will now attempt to work around this. If that fails, you need to remove the "--no-optional" flag to use esbuild. `) // If that didn't work, then someone probably installed esbuild with the // "--no-optional" flag. Attempt to compensate for this by downloading the // package using a nested call to "npm" instead. // // THIS MAY NOT WORK. Package installation uses "optionalDependencies" for // a reason: manually downloading the package has a lot of obscure edge // cases that fail because people have customized their environment in // some strange way that breaks downloading. This code path is just here // to be helpful but it's not the supported way of installing esbuild. binPath = downloadedBinPath(pkg, subpath) try { console.error(`[esbuild] Trying to install package "${pkg}" using npm`) installUsingNPM(pkg, subpath, binPath) } catch (e2: any) { console.error(`[esbuild] Failed to install package "${pkg}" using npm: ${e2 && e2.message || e2}`) // If that didn't also work, then something is likely wrong with the "npm" // command. Attempt to compensate for this by manually downloading the // package from the npm registry over HTTP as a last resort. try { await downloadDirectlyFromNPM(pkg, subpath, binPath) } catch (e3: any) { throw new Error(`Failed to install package "${pkg}"`) } } } maybeOptimizePackage(binPath) } checkAndPreparePackage().then(() => { if (isToPathJS) { // We need "node" before this command since it's a JavaScript file validateBinaryVersion(process.execPath, toPath) } else { // This is no longer a JavaScript file so don't run it using "node" validateBinaryVersion(toPath) } })
lib/npm/node-install.ts
1
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.9992401599884033, 0.46462127566337585, 0.00016709543706383556, 0.25110727548599243, 0.4692474603652954 ]
{ "id": 0, "code_window": [ "import https = require('https')\n", "import child_process = require('child_process')\n", "\n", "declare const ESBUILD_VERSION: string\n", "const toPath = path.join(__dirname, 'bin', 'esbuild')\n", "let isToPathJS = true\n", "\n", "function validateBinaryVersion(...command: string[]): void {\n", " command.push('--version')\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "const versionFromPackageJSON: string = require(path.join(__dirname, 'package.json')).version\n" ], "file_path": "lib/npm/node-install.ts", "type": "replace", "edit_start_line_idx": 9 }
{ "dependencies": { "parcel": "2.8.0", "typescript": "4.9.3" } }
require/parcel2/package.json
0
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.0001692086225375533, 0.0001692086225375533, 0.0001692086225375533, 0.0001692086225375533, 0 ]
{ "id": 0, "code_window": [ "import https = require('https')\n", "import child_process = require('child_process')\n", "\n", "declare const ESBUILD_VERSION: string\n", "const toPath = path.join(__dirname, 'bin', 'esbuild')\n", "let isToPathJS = true\n", "\n", "function validateBinaryVersion(...command: string[]): void {\n", " command.push('--version')\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "const versionFromPackageJSON: string = require(path.join(__dirname, 'package.json')).version\n" ], "file_path": "lib/npm/node-install.ts", "type": "replace", "edit_start_line_idx": 9 }
// This test verifies that: // - a running service will not prevent NodeJS to exit if there is no compilation in progress. // - the NodeJS process will continue running if there is a serve() active or a transform or build in progress. const assert = require('assert') const { fork } = require('child_process'); // The tests to run in the child process async function tests() { const esbuild = require('./esbuild').installForTests() async function testTransform() { const t1 = await esbuild.transform(`1+2`) const t2 = await esbuild.transform(`1+3`) assert.strictEqual(t1.code, `1 + 2;\n`) assert.strictEqual(t2.code, `1 + 3;\n`) } async function testServe() { const context = await esbuild.context({}) try { const server = await context.serve({}) assert.strictEqual(server.host, '0.0.0.0') assert.strictEqual(typeof server.port, 'number') } finally { await context.dispose() } } async function testBuild() { const context = await esbuild.context({ stdin: { contents: '1+2' }, write: false, }) try { const result = await context.rebuild() assert.deepStrictEqual(result.outputFiles.length, 1); assert.deepStrictEqual(result.outputFiles[0].text, '1 + 2;\n'); const result2 = await context.rebuild() assert.deepStrictEqual(result2.outputFiles.length, 1); assert.deepStrictEqual(result2.outputFiles[0].text, '1 + 2;\n'); } finally { await context.dispose() } } async function testWatchAndIncremental() { const context = await esbuild.context({ stdin: { contents: '1+2' }, write: false, }) try { await context.watch() const result = await context.rebuild() assert.deepStrictEqual(result.outputFiles.length, 1); assert.deepStrictEqual(result.outputFiles[0].text, '1 + 2;\n'); } finally { await context.dispose() } } await testTransform() await testServe() await testBuild() await testWatchAndIncremental() } // Called when this is the child process to run the tests. function runTests() { process.exitCode = 1; tests().then(() => { process.exitCode = 0; }, (error) => { console.error('❌', error) }); } // A child process need to be started to verify that a running service is not hanging node. function startChildProcess() { const child = fork(__filename, ['__forked__'], { stdio: 'inherit', env: process.env }); const timeout = setTimeout(() => { console.error('❌ node unref test timeout - child_process.unref() broken?') process.exit(1); }, 5 * 60 * 1000); child.on('error', (error) => { console.error('❌', error); process.exit(1); }) child.on('exit', (code) => { clearTimeout(timeout); if (code) { console.error(`❌ node unref tests failed: child exited with code ${code}`) process.exit(1); } else { console.log(`✅ node unref tests passed`) } }) } if (process.argv[2] === '__forked__') { runTests(); } else { startChildProcess(); }
scripts/node-unref-tests.js
0
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.7009143829345703, 0.061087001115083694, 0.00016728386981412768, 0.00031429226510226727, 0.19303667545318604 ]
{ "id": 0, "code_window": [ "import https = require('https')\n", "import child_process = require('child_process')\n", "\n", "declare const ESBUILD_VERSION: string\n", "const toPath = path.join(__dirname, 'bin', 'esbuild')\n", "let isToPathJS = true\n", "\n", "function validateBinaryVersion(...command: string[]): void {\n", " command.push('--version')\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "const versionFromPackageJSON: string = require(path.join(__dirname, 'package.json')).version\n" ], "file_path": "lib/npm/node-install.ts", "type": "replace", "edit_start_line_idx": 9 }
{ "name": "@esbuild/linux-arm", "version": "0.17.10", "description": "The Linux ARM binary for esbuild, a JavaScript bundler.", "repository": "https://github.com/evanw/esbuild", "license": "MIT", "preferUnplugged": true, "engines": { "node": ">=12" }, "os": [ "linux" ], "cpu": [ "arm" ] }
npm/@esbuild/linux-arm/package.json
0
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.0006175145390443504, 0.00039330602157860994, 0.0001690975041128695, 0.00039330602157860994, 0.00022420851746574044 ]
{ "id": 1, "code_window": [ " }\n", " throw err\n", " }\n", " if (stdout !== ESBUILD_VERSION) {\n", " throw new Error(`Expected ${JSON.stringify(ESBUILD_VERSION)} but got ${JSON.stringify(stdout)}`)\n", " }\n", "}\n", "\n", "function isYarn(): boolean {\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ " if (stdout !== versionFromPackageJSON) {\n", " throw new Error(`Expected ${JSON.stringify(versionFromPackageJSON)} but got ${JSON.stringify(stdout)}`)\n" ], "file_path": "lib/npm/node-install.ts", "type": "replace", "edit_start_line_idx": 50 }
const childProcess = require('child_process') const path = require('path') const zlib = require('zlib') const fs = require('fs') const os = require('os') const repoDir = path.dirname(__dirname) const denoDir = path.join(repoDir, 'deno') const npmDir = path.join(repoDir, 'npm', 'esbuild') const version = fs.readFileSync(path.join(repoDir, 'version.txt'), 'utf8').trim() const nodeTarget = 'node10'; // See: https://nodejs.org/en/about/releases/ const umdBrowserTarget = 'es2015'; // Transpiles "async" const esmBrowserTarget = 'es2017'; // Preserves "async" const buildNeutralLib = (esbuildPath) => { const libDir = path.join(npmDir, 'lib') const binDir = path.join(npmDir, 'bin') fs.mkdirSync(libDir, { recursive: true }) fs.mkdirSync(binDir, { recursive: true }) // Generate "npm/esbuild/install.js" childProcess.execFileSync(esbuildPath, [ path.join(repoDir, 'lib', 'npm', 'node-install.ts'), '--outfile=' + path.join(npmDir, 'install.js'), '--bundle', '--target=' + nodeTarget, '--define:ESBUILD_VERSION=' + JSON.stringify(version), '--external:esbuild', '--platform=node', '--log-level=warning', ], { cwd: repoDir }) // Generate "npm/esbuild/lib/main.js" childProcess.execFileSync(esbuildPath, [ path.join(repoDir, 'lib', 'npm', 'node.ts'), '--outfile=' + path.join(libDir, 'main.js'), '--bundle', '--target=' + nodeTarget, '--define:WASM=false', '--define:ESBUILD_VERSION=' + JSON.stringify(version), '--external:esbuild', '--platform=node', '--log-level=warning', ], { cwd: repoDir }) // Generate "npm/esbuild/bin/esbuild" childProcess.execFileSync(esbuildPath, [ path.join(repoDir, 'lib', 'npm', 'node-shim.ts'), '--outfile=' + path.join(binDir, 'esbuild'), '--bundle', '--target=' + nodeTarget, '--define:ESBUILD_VERSION=' + JSON.stringify(version), '--external:esbuild', '--platform=node', '--log-level=warning', ], { cwd: repoDir }) // Generate "npm/esbuild/lib/main.d.ts" const types_ts = fs.readFileSync(path.join(repoDir, 'lib', 'shared', 'types.ts'), 'utf8') fs.writeFileSync(path.join(libDir, 'main.d.ts'), types_ts) // Get supported platforms const platforms = { exports: {} } new Function('module', 'exports', 'require', childProcess.execFileSync(esbuildPath, [ path.join(repoDir, 'lib', 'npm', 'node-platform.ts'), '--bundle', '--target=' + nodeTarget, '--external:esbuild', '--platform=node', '--log-level=warning', ], { cwd: repoDir }))(platforms, platforms.exports, require) const optionalDependencies = Object.fromEntries(Object.values({ ...platforms.exports.knownWindowsPackages, ...platforms.exports.knownUnixlikePackages, ...platforms.exports.knownWebAssemblyFallbackPackages, }).sort().map(x => [x, version])) // Update "npm/esbuild/package.json" const pjPath = path.join(npmDir, 'package.json') const package_json = JSON.parse(fs.readFileSync(pjPath, 'utf8')) package_json.optionalDependencies = optionalDependencies fs.writeFileSync(pjPath, JSON.stringify(package_json, null, 2) + '\n') } async function generateWorkerCode({ esbuildPath, wasm_exec_js, minify, target }) { const input = ` let onmessage let globalThis = {} for (let o = self; o; o = Object.getPrototypeOf(o)) for (let k of Object.getOwnPropertyNames(o)) if (!(k in globalThis)) Object.defineProperty(globalThis, k, { get: () => self[k] }) ${wasm_exec_js.replace(/\bfs\./g, 'globalThis.fs.')} ${fs.readFileSync(path.join(repoDir, 'lib', 'shared', 'worker.ts'), 'utf8')} return m => onmessage(m) ` const args = [ '--loader=ts', '--target=' + target, '--define:ESBUILD_VERSION=' + JSON.stringify(version), ].concat(minify ? ['--minify'] : []) // Note: This uses "execFile" because "execFileSync" in node appears to have // a bug. Specifically when using the "input" option of "execFileSync" to // provide stdin, sometimes (~2% of the time?) node writes all of the input // but then doesn't close the stream. The Go side is stuck reading from stdin // within "ioutil.ReadAll(os.Stdin)" so I suspect it's a bug in node, not in // Go. Explicitly calling "stdin.end()" on the node side appears to fix it. const wasmExecAndWorker = (await new Promise((resolve, reject) => { const proc = childProcess.execFile(esbuildPath, args, { cwd: repoDir }, (err, stdout) => { if (err) reject(err) else resolve(stdout) }) proc.stdin.write(input) proc.stdin.end() })).toString().trim() const commentLines = wasm_exec_js.split('\n') const firstNonComment = commentLines.findIndex(line => !line.startsWith('//')) const commentPrefix = '\n' + commentLines.slice(0, firstNonComment).join('\n') + '\n' if (minify) return `(postMessage=>{${commentPrefix}${wasmExecAndWorker}})` return `((postMessage) => {${(commentPrefix + wasmExecAndWorker).replace(/\n/g, '\n ')}\n })` } exports.buildWasmLib = async (esbuildPath) => { // Asynchronously start building the WebAssembly module const npmWasmDir = path.join(repoDir, 'npm', 'esbuild-wasm') const goBuildPromise = new Promise((resolve, reject) => childProcess.execFile('go', [ 'build', '-o', path.join(npmWasmDir, 'esbuild.wasm'), '-ldflags=-s -w', // This removes ~0.14mb of unnecessary WebAssembly code '-trimpath', path.join(repoDir, 'cmd', 'esbuild'), ], { cwd: repoDir, stdio: 'inherit', env: { ...process.env, GOOS: 'js', GOARCH: 'wasm' } }, err => err ? reject(err) : resolve())) const libDir = path.join(npmWasmDir, 'lib') const esmDir = path.join(npmWasmDir, 'esm') fs.mkdirSync(libDir, { recursive: true }) fs.mkdirSync(esmDir, { recursive: true }) // Generate "npm/esbuild-wasm/wasm_exec.js" const GOROOT = childProcess.execFileSync('go', ['env', 'GOROOT']).toString().trim() let wasm_exec_js = fs.readFileSync(path.join(GOROOT, 'misc', 'wasm', 'wasm_exec.js'), 'utf8') let wasm_exec_node_js = fs.readFileSync(path.join(GOROOT, 'misc', 'wasm', 'wasm_exec_node.js'), 'utf8') fs.writeFileSync(path.join(npmWasmDir, 'wasm_exec.js'), wasm_exec_js) fs.writeFileSync(path.join(npmWasmDir, 'wasm_exec_node.js'), wasm_exec_node_js) // Generate "npm/esbuild-wasm/lib/main.js" childProcess.execFileSync(esbuildPath, [ path.join(repoDir, 'lib', 'npm', 'node.ts'), '--outfile=' + path.join(libDir, 'main.js'), '--bundle', '--target=' + nodeTarget, '--format=cjs', '--define:WASM=true', '--define:ESBUILD_VERSION=' + JSON.stringify(version), '--external:esbuild', '--platform=node', '--log-level=warning', ], { cwd: repoDir }) // Generate "npm/esbuild-wasm/lib/main.d.ts" and "npm/esbuild-wasm/lib/browser.d.ts" const types_ts = fs.readFileSync(path.join(repoDir, 'lib', 'shared', 'types.ts'), 'utf8') fs.writeFileSync(path.join(libDir, 'main.d.ts'), types_ts) fs.writeFileSync(path.join(libDir, 'browser.d.ts'), types_ts) fs.writeFileSync(path.join(esmDir, 'browser.d.ts'), types_ts) for (const minify of [false, true]) { const minifyFlags = minify ? ['--minify'] : [] const wasmWorkerCodeUMD = await generateWorkerCode({ esbuildPath, wasm_exec_js, minify, target: umdBrowserTarget }) const wasmWorkerCodeESM = await generateWorkerCode({ esbuildPath, wasm_exec_js, minify, target: esmBrowserTarget }) // Generate "npm/esbuild-wasm/lib/browser.*" const umdPrefix = `(module=>{` const umdSuffix = `})(typeof module==="object"?module:{set exports(x){(typeof self!=="undefined"?self:this).esbuild=x}});` const browserCJS = childProcess.execFileSync(esbuildPath, [ path.join(repoDir, 'lib', 'npm', 'browser.ts'), '--bundle', '--target=' + umdBrowserTarget, '--format=cjs', '--define:ESBUILD_VERSION=' + JSON.stringify(version), '--define:WEB_WORKER_SOURCE_CODE=' + JSON.stringify(wasmWorkerCodeUMD), '--banner:js=' + umdPrefix, '--footer:js=' + umdSuffix, '--log-level=warning', ].concat(minifyFlags), { cwd: repoDir }).toString().replace('WEB_WORKER_FUNCTION', wasmWorkerCodeUMD) fs.writeFileSync(path.join(libDir, minify ? 'browser.min.js' : 'browser.js'), browserCJS) // Generate "npm/esbuild-wasm/esm/browser.min.js" const browserESM = childProcess.execFileSync(esbuildPath, [ path.join(repoDir, 'lib', 'npm', 'browser.ts'), '--bundle', '--target=' + esmBrowserTarget, '--format=esm', '--define:ESBUILD_VERSION=' + JSON.stringify(version), '--define:WEB_WORKER_SOURCE_CODE=' + JSON.stringify(wasmWorkerCodeESM), '--log-level=warning', ].concat(minifyFlags), { cwd: repoDir }).toString().replace('WEB_WORKER_FUNCTION', wasmWorkerCodeESM) fs.writeFileSync(path.join(esmDir, minify ? 'browser.min.js' : 'browser.js'), browserESM) } // Join with the asynchronous WebAssembly build await goBuildPromise // Also copy this into the WebAssembly shim directories for (const dir of [ path.join(repoDir, 'npm', '@esbuild', 'android-arm'), path.join(repoDir, 'npm', '@esbuild', 'android-x64'), ]) { fs.mkdirSync(path.join(dir, 'bin'), { recursive: true }) fs.writeFileSync(path.join(dir, 'wasm_exec.js'), wasm_exec_js) fs.writeFileSync(path.join(dir, 'wasm_exec_node.js'), wasm_exec_node_js) fs.copyFileSync(path.join(npmWasmDir, 'bin', 'esbuild'), path.join(dir, 'bin', 'esbuild')) fs.copyFileSync(path.join(npmWasmDir, 'esbuild.wasm'), path.join(dir, 'esbuild.wasm')) } } const buildDenoLib = async (esbuildPath) => { // Generate "deno/esbuild/mod.js" childProcess.execFileSync(esbuildPath, [ path.join(repoDir, 'lib', 'deno', 'mod.ts'), '--bundle', '--outfile=' + path.join(denoDir, 'mod.js'), '--target=esnext', '--define:ESBUILD_VERSION=' + JSON.stringify(version), '--platform=neutral', '--log-level=warning', '--banner:js=/// <reference types="./mod.d.ts" />', ], { cwd: repoDir }) // Generate "deno/esbuild/wasm.js" const GOROOT = childProcess.execFileSync('go', ['env', 'GOROOT']).toString().trim() let wasm_exec_js = fs.readFileSync(path.join(GOROOT, 'misc', 'wasm', 'wasm_exec.js'), 'utf8') const wasmWorkerCode = await generateWorkerCode({ esbuildPath, wasm_exec_js, minify: true, target: 'esnext' }) const modWASM = childProcess.execFileSync(esbuildPath, [ path.join(repoDir, 'lib', 'deno', 'wasm.ts'), '--bundle', '--target=esnext', '--define:ESBUILD_VERSION=' + JSON.stringify(version), '--define:WEB_WORKER_SOURCE_CODE=' + JSON.stringify(wasmWorkerCode), '--platform=neutral', '--log-level=warning', '--banner:js=/// <reference types="./wasm.d.ts" />', ], { cwd: repoDir }).toString().replace('WEB_WORKER_FUNCTION', wasmWorkerCode) fs.writeFileSync(path.join(denoDir, 'wasm.js'), modWASM) // Generate "deno/esbuild/mod.d.ts" const types_ts = fs.readFileSync(path.join(repoDir, 'lib', 'shared', 'types.ts'), 'utf8') + `\n// Unlike node, Deno lacks the necessary APIs to clean up child processes` + `\n// automatically. You must manually call stop() in Deno when you're done` + `\n// using esbuild or Deno will continue running forever.` + `\nexport function stop(): void;` + `\n` fs.writeFileSync(path.join(denoDir, 'mod.d.ts'), types_ts) fs.writeFileSync(path.join(denoDir, 'wasm.d.ts'), types_ts) // And copy the WebAssembly file over to the Deno library as well fs.copyFileSync(path.join(repoDir, 'npm', 'esbuild-wasm', 'esbuild.wasm'), path.join(repoDir, 'deno', 'esbuild.wasm')) } // Writing a file atomically is important for watch mode tests since we don't // want to read the file after it has been truncated but before the new contents // have been written. exports.writeFileAtomic = (where, contents) => { // Note: Can't use "os.tmpdir()" because that doesn't work on Windows. CI runs // tests on D:\ and the temporary directory is on C:\ or the other way around. // And apparently it's impossible to move files between C:\ and D:\ or something. // So we have to write the file in the same directory as the destination. This is // unfortunate because it will unnecessarily trigger extra watch mode rebuilds. // So we have to make our tests extra robust so they can still work with random // extra rebuilds thrown in. const file = path.join(path.dirname(where), '.esbuild-atomic-file-' + Math.random().toString(36).slice(2)) fs.writeFileSync(file, contents) fs.renameSync(file, where) } exports.buildBinary = () => { childProcess.execFileSync('go', ['build', '-ldflags=-s -w', '-trimpath', './cmd/esbuild'], { cwd: repoDir, stdio: 'ignore' }) return path.join(repoDir, process.platform === 'win32' ? 'esbuild.exe' : 'esbuild') } exports.removeRecursiveSync = path => { try { fs.rmSync(path, { recursive: true }) } catch (e) { // Removing stuff on Windows is flaky and unreliable. Don't fail tests // on CI if Windows is just being a pain. Common causes of flakes include // random EPERM and ENOTEMPTY errors. // // The general "solution" to this is to try asking Windows to redo the // failing operation repeatedly until eventually giving up after a // timeout. But that doesn't guarantee that flakes will be fixed so we // just give up instead. People that want reasonable file system // behavior on Windows should use WSL instead. } } const updateVersionPackageJSON = pathToPackageJSON => { const version = fs.readFileSync(path.join(path.dirname(__dirname), 'version.txt'), 'utf8').trim() const json = JSON.parse(fs.readFileSync(pathToPackageJSON, 'utf8')) if (json.version !== version) { json.version = version fs.writeFileSync(pathToPackageJSON, JSON.stringify(json, null, 2) + '\n') } } exports.installForTests = () => { // Build the "esbuild" binary and library const esbuildPath = exports.buildBinary() buildNeutralLib(esbuildPath) // Install the "esbuild" package to a temporary directory. On Windows, it's // sometimes randomly impossible to delete this installation directory. My // best guess is that this is because the esbuild process is kept alive until // the process exits for "buildSync" and "transformSync", and that sometimes // prevents Windows from deleting the directory it's in. The call in tests to // "rimraf.sync()" appears to hang when this happens. Other operating systems // don't have a problem with this. This has only been a problem on the Windows // VM in GitHub CI. I cannot reproduce this issue myself. const installDir = path.join(os.tmpdir(), 'esbuild-' + Math.random().toString(36).slice(2)) const env = { ...process.env, ESBUILD_BINARY_PATH: esbuildPath } fs.mkdirSync(installDir) fs.writeFileSync(path.join(installDir, 'package.json'), '{}') childProcess.execSync(`npm pack --silent "${npmDir}"`, { cwd: installDir, stdio: 'inherit' }) childProcess.execSync(`npm install --silent --no-audit --no-optional --progress=false esbuild-${version}.tgz`, { cwd: installDir, env, stdio: 'inherit' }) // Evaluate the code const ESBUILD_PACKAGE_PATH = path.join(installDir, 'node_modules', 'esbuild') const mod = require(ESBUILD_PACKAGE_PATH) Object.defineProperty(mod, 'ESBUILD_PACKAGE_PATH', { value: ESBUILD_PACKAGE_PATH }) return mod } const updateVersionGo = () => { const version_txt = fs.readFileSync(path.join(repoDir, 'version.txt'), 'utf8').trim() const version_go = `package main\n\nconst esbuildVersion = "${version_txt}"\n` const version_go_path = path.join(repoDir, 'cmd', 'esbuild', 'version.go') // Update this atomically to avoid issues with this being overwritten during use const temp_path = version_go_path + Math.random().toString(36).slice(1) fs.writeFileSync(temp_path, version_go) fs.renameSync(temp_path, version_go_path) } // This is helpful for ES6 modules which don't have access to __dirname exports.dirname = __dirname // The main Makefile invokes this script before publishing if (require.main === module) { if (process.argv.indexOf('--wasm') >= 0) exports.buildWasmLib(process.argv[2]) else if (process.argv.indexOf('--deno') >= 0) buildDenoLib(process.argv[2]) else if (process.argv.indexOf('--version') >= 0) updateVersionPackageJSON(process.argv[2]) else if (process.argv.indexOf('--neutral') >= 0) buildNeutralLib(process.argv[2]) else if (process.argv.indexOf('--update-version-go') >= 0) updateVersionGo() else throw new Error('Expected a flag') }
scripts/esbuild.js
1
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.0243904460221529, 0.0010941318469122052, 0.0001641256531002, 0.00017447177378926426, 0.003940258640795946 ]
{ "id": 1, "code_window": [ " }\n", " throw err\n", " }\n", " if (stdout !== ESBUILD_VERSION) {\n", " throw new Error(`Expected ${JSON.stringify(ESBUILD_VERSION)} but got ${JSON.stringify(stdout)}`)\n", " }\n", "}\n", "\n", "function isYarn(): boolean {\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ " if (stdout !== versionFromPackageJSON) {\n", " throw new Error(`Expected ${JSON.stringify(versionFromPackageJSON)} but got ${JSON.stringify(stdout)}`)\n" ], "file_path": "lib/npm/node-install.ts", "type": "replace", "edit_start_line_idx": 50 }
package logger_test import ( "testing" "github.com/evanw/esbuild/internal/logger" "github.com/evanw/esbuild/internal/test" ) func TestMsgIDs(t *testing.T) { for id := logger.MsgID_None; id <= logger.MsgID_END; id++ { str := logger.MsgIDToString(id) if str == "" { continue } overrides := make(map[logger.MsgID]logger.LogLevel) logger.StringToMsgIDs(str, logger.LevelError, overrides) if len(overrides) == 0 { t.Fatalf("Failed to find message id(s) for the string %q", str) } for k, v := range overrides { test.AssertEqual(t, logger.MsgIDToString(k), str) test.AssertEqual(t, v, logger.LevelError) } } }
internal/logger/logger_test.go
0
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.00017546344315633178, 0.00017219316214323044, 0.00017043539264705032, 0.000170680636074394, 0.0000023146076273405924 ]
{ "id": 1, "code_window": [ " }\n", " throw err\n", " }\n", " if (stdout !== ESBUILD_VERSION) {\n", " throw new Error(`Expected ${JSON.stringify(ESBUILD_VERSION)} but got ${JSON.stringify(stdout)}`)\n", " }\n", "}\n", "\n", "function isYarn(): boolean {\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ " if (stdout !== versionFromPackageJSON) {\n", " throw new Error(`Expected ${JSON.stringify(versionFromPackageJSON)} but got ${JSON.stringify(stdout)}`)\n" ], "file_path": "lib/npm/node-install.ts", "type": "replace", "edit_start_line_idx": 50 }
{ "name": "esbuild-wasm", "version": "0.17.10", "description": "The cross-platform WebAssembly binary for esbuild, a JavaScript bundler.", "repository": "https://github.com/evanw/esbuild", "license": "MIT", "engines": { "node": ">=12" }, "main": "lib/main.js", "browser": "lib/browser.js", "types": "lib/main.d.ts", "directories": { "bin": "bin" } }
npm/esbuild-wasm/package.json
0
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.00017230816592928022, 0.000171973486430943, 0.00017163879238069057, 0.000171973486430943, 3.346867742948234e-7 ]
{ "id": 1, "code_window": [ " }\n", " throw err\n", " }\n", " if (stdout !== ESBUILD_VERSION) {\n", " throw new Error(`Expected ${JSON.stringify(ESBUILD_VERSION)} but got ${JSON.stringify(stdout)}`)\n", " }\n", "}\n", "\n", "function isYarn(): boolean {\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ " if (stdout !== versionFromPackageJSON) {\n", " throw new Error(`Expected ${JSON.stringify(versionFromPackageJSON)} but got ${JSON.stringify(stdout)}`)\n" ], "file_path": "lib/npm/node-install.ts", "type": "replace", "edit_start_line_idx": 50 }
# This file is generated by running "yarn install" inside your project. # Manual changes might be lost - proceed with caution! __metadata: version: 7 cacheKey: 9 "@tokenizer/token@npm:^0.3.0": version: 0.3.0 resolution: "@tokenizer/token@npm:0.3.0" checksum: 0154f4fecd335fc121f78d1a697d90833943c855be7f39f22012f1ac75f6241b37f07025ad4ce6ec2576730e2253d4ef2698364a20d93b749bcdb393dafed93e languageName: node linkType: hard "@vue/tsconfig@npm:0.1.3": version: 0.1.3 resolution: "@vue/tsconfig@npm:0.1.3" peerDependencies: "@types/node": "*" peerDependenciesMeta: "@types/node": optional: true checksum: d086593aea25e7872f28227dec3102d232d47099d16933d483b69ec746ac4dc254b6bd610deeab059d0b8b2d72a0738ee77e4c60f6e8244aedb2e99d633c38d4 languageName: node linkType: hard "aws-jwt-verify@npm:3.1.0": version: 3.1.0 resolution: "aws-jwt-verify@npm:3.1.0" checksum: 7910e4fb3da606bfdb627908d6da02941a5adfc28324b8065e782ba9116eb4ed23bb83fe4d5edc3853897e93ebe1f26719812051459520b2c8187ebf31a2b552 languageName: node linkType: hard "d3-array@npm:2 - 3": version: 3.2.0 resolution: "d3-array@npm:3.2.0" dependencies: internmap: "npm:1 - 2" checksum: d5856c973d5ab72a57b68f3fa843d527683a9ac26af2684766e07df7f9e663ce3623bc8aed68057c54f7009393e6f15680718526cd05f65f811697e38d3da1a8 languageName: node linkType: hard "d3-time@npm:3.0.0": version: 3.0.0 resolution: "d3-time@npm:3.0.0" dependencies: d3-array: "npm:2 - 3" checksum: 2e9a13ad045ab5691edbfff633c602673670aabb160c06aa220932cc2512d66cfbce746cbb035ebc5968fccf6072ae89f27026504d972538698870d026987d36 languageName: node linkType: hard "foo@workspace:*, foo@workspace:foo": version: 0.0.0-use.local resolution: "foo@workspace:foo" languageName: unknown linkType: soft "internmap@npm:1 - 2": version: 2.0.3 resolution: "internmap@npm:2.0.3" checksum: 27c28dc08e432d4bb9fc70d74a59536e893d60fcb6b7078a70e8b0fbe1ed7fe8a01c4b4931ff50bbdab3d1411cb6050f5119d4c513dc663b7af2d2076efa56a6 languageName: node linkType: hard "js-tokens@npm:^3.0.0 || ^4.0.0": version: 4.0.0 resolution: "js-tokens@npm:4.0.0" checksum: 47d1c18dc6b9eed4baf1db3d81b36feb95b463201c82ffce0d7a4d65ede596ba97d6ac2468974199705db9ef8a3433606af41fc7bbe7cb25c1dd601785413d9b languageName: node linkType: hard "loose-envify@npm:^1.1.0": version: 1.4.0 resolution: "loose-envify@npm:1.4.0" dependencies: js-tokens: "npm:^3.0.0 || ^4.0.0" bin: loose-envify: cli.js checksum: 39c5fc44c6a8f7f8a92cccf174554fbb307477ef493760407920fdd4ed5f6cc1aec5b6a5ab3c3767ef79547b3e1aea09d8ca08d773232c662d910cfe473a0590 languageName: node linkType: hard "mime@npm:3.0.0": version: 3.0.0 resolution: "mime@npm:3.0.0" bin: mime: cli.js checksum: b00613ec79e1f14586c970b6651afca77947f972eca6086ccb614c2b7a1a899d0ec38c6f4418370ecb9d0cebeb4ad300999b6b7f2dcbeaf40f9e0d55874b6c81 languageName: node linkType: hard "peek-readable@npm:^5.0.0": version: 5.0.0 resolution: "peek-readable@npm:5.0.0" checksum: 9350acc783b1b01c956e07f1e010d25b7e3f995719fe08d7deb5d2ac782550875ef57006304a566a0cac14e9c5d5ba95c1c09888a79ddf2d8e2682a51e833516 languageName: node linkType: hard "react-dom@npm:18.2.0": version: 18.2.0 resolution: "react-dom@npm:18.2.0" dependencies: loose-envify: "npm:^1.1.0" scheduler: "npm:^0.23.0" peerDependencies: react: ^18.2.0 checksum: 7c5b915fb793d63563cec1f721e059e6ff0e2855ac116ab5cb7450b6c59398f5e25f95c960ce5cb93504cc58ab724a75a78e99282354e702a0e667d0d787d028 languageName: node linkType: hard "react@npm:18.2.0": version: 18.2.0 resolution: "react@npm:18.2.0" dependencies: loose-envify: "npm:^1.1.0" checksum: 8434e5782c52b3bf18a80b666348977924ee3827895fa03ec3ffb9faca90c460049f14130428dd1546bab6cf3b2c277f2c243d3c2a856501331d2e69c24b2bb9 languageName: node linkType: hard "root-workspace-0b6124@workspace:.": version: 0.0.0-use.local resolution: "root-workspace-0b6124@workspace:." dependencies: "@vue/tsconfig": "npm:0.1.3" aws-jwt-verify: "npm:3.1.0" d3-time: "npm:3.0.0" foo: "workspace:*" mime: "npm:3.0.0" react: "npm:18.2.0" react-dom: "npm:18.2.0" strtok3: "npm:7.0.0" languageName: unknown linkType: soft "scheduler@npm:^0.23.0": version: 0.23.0 resolution: "scheduler@npm:0.23.0" dependencies: loose-envify: "npm:^1.1.0" checksum: f4022b95cdc282668643da4850f55fe70c899aa956d11819f196e2ca892271bdb253613e53997852094f9351f7c72d057eea8b28d9b4bcb93bcb1c6d09985c82 languageName: node linkType: hard "strtok3@npm:7.0.0": version: 7.0.0 resolution: "strtok3@npm:7.0.0" dependencies: "@tokenizer/token": "npm:^0.3.0" peek-readable: "npm:^5.0.0" checksum: 0d3b800599678de1dae6eb103a850080564c31d48222c155aafa51dff05431fdb9f1863b48c094ec39c5b63245f03427a4ab1ad783c8903df44eae4c9c7badf0 languageName: node linkType: hard
require/yarnpnp/yarn.lock
0
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.00017856374324765056, 0.00017158864648081362, 0.00016370303637813777, 0.00017225689953193069, 0.0000037963452541589504 ]
{ "id": 2, "code_window": [ " // desired package. Try to avoid unnecessary log output. This uses the \"npm\"\n", " // command instead of a HTTP request so that it hopefully works in situations\n", " // where HTTP requests are blocked but the \"npm\" command still works due to,\n", " // for example, a custom configured npm registry and special firewall rules.\n", " child_process.execSync(`npm install --loglevel=error --prefer-offline --no-audit --progress=false ${pkg}@${ESBUILD_VERSION}`,\n", " { cwd: installDir, stdio: 'pipe', env })\n", "\n", " // Move the downloaded binary executable into place. The destination path\n", " // is the same one that the JavaScript API code uses so it will be able to\n", " // find the binary executable here later.\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " child_process.execSync(`npm install --loglevel=error --prefer-offline --no-audit --progress=false ${pkg}@${versionFromPackageJSON}`,\n" ], "file_path": "lib/npm/node-install.ts", "type": "replace", "edit_start_line_idx": 117 }
import { downloadedBinPath, ESBUILD_BINARY_PATH, isValidBinaryPath, pkgAndSubpathForCurrentPlatform } from './node-platform' import fs = require('fs') import os = require('os') import path = require('path') import zlib = require('zlib') import https = require('https') import child_process = require('child_process') declare const ESBUILD_VERSION: string const toPath = path.join(__dirname, 'bin', 'esbuild') let isToPathJS = true function validateBinaryVersion(...command: string[]): void { command.push('--version') let stdout: string try { stdout = child_process.execFileSync(command.shift()!, command, { // Without this, this install script strangely crashes with the error // "EACCES: permission denied, write" but only on Ubuntu Linux when node is // installed from the Snap Store. This is not a problem when you download // the official version of node. The problem appears to be that stderr // (i.e. file descriptor 2) isn't writable? // // More info: // - https://snapcraft.io/ (what the Snap Store is) // - https://nodejs.org/dist/ (download the official version of node) // - https://github.com/evanw/esbuild/issues/1711#issuecomment-1027554035 // stdio: 'pipe', }).toString().trim() } catch (err) { if (os.platform() === 'darwin' && /_SecTrustEvaluateWithError/.test(err + '')) { let os = 'this version of macOS' try { os = 'macOS ' + child_process.execFileSync('sw_vers', ['-productVersion']).toString().trim() } catch { } throw new Error(`The "esbuild" package cannot be installed because ${os} is too outdated. The Go compiler (which esbuild relies on) no longer supports ${os}, which means the "esbuild" binary executable can't be run. You can either: * Update your version of macOS to one that the Go compiler supports * Use the "esbuild-wasm" package instead of the "esbuild" package * Build esbuild yourself using an older version of the Go compiler `) } throw err } if (stdout !== ESBUILD_VERSION) { throw new Error(`Expected ${JSON.stringify(ESBUILD_VERSION)} but got ${JSON.stringify(stdout)}`) } } function isYarn(): boolean { const { npm_config_user_agent } = process.env if (npm_config_user_agent) { return /\byarn\//.test(npm_config_user_agent) } return false } function fetch(url: string): Promise<Buffer> { return new Promise((resolve, reject) => { https.get(url, res => { if ((res.statusCode === 301 || res.statusCode === 302) && res.headers.location) return fetch(res.headers.location).then(resolve, reject) if (res.statusCode !== 200) return reject(new Error(`Server responded with ${res.statusCode}`)) let chunks: Buffer[] = [] res.on('data', chunk => chunks.push(chunk)) res.on('end', () => resolve(Buffer.concat(chunks))) }).on('error', reject) }) } function extractFileFromTarGzip(buffer: Buffer, subpath: string): Buffer { try { buffer = zlib.unzipSync(buffer) } catch (err: any) { throw new Error(`Invalid gzip data in archive: ${err && err.message || err}`) } let str = (i: number, n: number) => String.fromCharCode(...buffer.subarray(i, i + n)).replace(/\0.*$/, '') let offset = 0 subpath = `package/${subpath}` while (offset < buffer.length) { let name = str(offset, 100) let size = parseInt(str(offset + 124, 12), 8) offset += 512 if (!isNaN(size)) { if (name === subpath) return buffer.subarray(offset, offset + size) offset += (size + 511) & ~511 } } throw new Error(`Could not find ${JSON.stringify(subpath)} in archive`) } function installUsingNPM(pkg: string, subpath: string, binPath: string): void { // Erase "npm_config_global" so that "npm install --global esbuild" works. // Otherwise this nested "npm install" will also be global, and the install // will deadlock waiting for the global installation lock. const env = { ...process.env, npm_config_global: undefined } // Create a temporary directory inside the "esbuild" package with an empty // "package.json" file. We'll use this to run "npm install" in. const esbuildLibDir = path.dirname(require.resolve('esbuild')) const installDir = path.join(esbuildLibDir, 'npm-install') fs.mkdirSync(installDir) try { fs.writeFileSync(path.join(installDir, 'package.json'), '{}') // Run "npm install" in the temporary directory which should download the // desired package. Try to avoid unnecessary log output. This uses the "npm" // command instead of a HTTP request so that it hopefully works in situations // where HTTP requests are blocked but the "npm" command still works due to, // for example, a custom configured npm registry and special firewall rules. child_process.execSync(`npm install --loglevel=error --prefer-offline --no-audit --progress=false ${pkg}@${ESBUILD_VERSION}`, { cwd: installDir, stdio: 'pipe', env }) // Move the downloaded binary executable into place. The destination path // is the same one that the JavaScript API code uses so it will be able to // find the binary executable here later. const installedBinPath = path.join(installDir, 'node_modules', pkg, subpath) fs.renameSync(installedBinPath, binPath) } finally { // Try to clean up afterward so we don't unnecessarily waste file system // space. Leaving nested "node_modules" directories can also be problematic // for certain tools that scan over the file tree and expect it to have a // certain structure. try { removeRecursive(installDir) } catch { // Removing a file or directory can randomly break on Windows, returning // EBUSY for an arbitrary length of time. I think this happens when some // other program has that file or directory open (e.g. an anti-virus // program). This is fine on Unix because the OS just unlinks the entry // but keeps the reference around until it's unused. There's nothing we // can do in this case so we just leave the directory there. } } } function removeRecursive(dir: string): void { for (const entry of fs.readdirSync(dir)) { const entryPath = path.join(dir, entry) let stats try { stats = fs.lstatSync(entryPath) } catch { continue; // Guard against https://github.com/nodejs/node/issues/4760 } if (stats.isDirectory()) removeRecursive(entryPath) else fs.unlinkSync(entryPath) } fs.rmdirSync(dir) } function applyManualBinaryPathOverride(overridePath: string): void { // Patch the CLI use case (the "esbuild" command) const pathString = JSON.stringify(overridePath) fs.writeFileSync(toPath, `#!/usr/bin/env node\n` + `require('child_process').execFileSync(${pathString}, process.argv.slice(2), { stdio: 'inherit' });\n`) // Patch the JS API use case (the "require('esbuild')" workflow) const libMain = path.join(__dirname, 'lib', 'main.js') const code = fs.readFileSync(libMain, 'utf8') fs.writeFileSync(libMain, `var ESBUILD_BINARY_PATH = ${pathString};\n${code}`) } function maybeOptimizePackage(binPath: string): void { // This package contains a "bin/esbuild" JavaScript file that finds and runs // the appropriate binary executable. However, this means that running the // "esbuild" command runs another instance of "node" which is way slower than // just running the binary executable directly. // // Here we optimize for this by replacing the JavaScript file with the binary // executable at install time. This optimization does not work on Windows // because on Windows the binary executable must be called "esbuild.exe" // instead of "esbuild". // // This also doesn't work with Yarn both because of lack of support for binary // files in Yarn 2+ (see https://github.com/yarnpkg/berry/issues/882) and // because Yarn (even Yarn 1?) may run the same install scripts in the same // place multiple times from different platforms, especially when people use // Docker. Avoid idempotency issues by just not optimizing when using Yarn. // // This optimization also doesn't apply when npm's "--ignore-scripts" flag is // used since in that case this install script will not be run. if (os.platform() !== 'win32' && !isYarn()) { const tempPath = path.join(__dirname, 'bin-esbuild') try { // First link the binary with a temporary file. If this fails and throws an // error, then we'll just end up doing nothing. This uses a hard link to // avoid taking up additional space on the file system. fs.linkSync(binPath, tempPath) // Then use rename to atomically replace the target file with the temporary // file. If this fails and throws an error, then we'll just end up leaving // the temporary file there, which is harmless. fs.renameSync(tempPath, toPath) // If we get here, then we know that the target location is now a binary // executable instead of a JavaScript file. isToPathJS = false // If this install script is being re-run, then "renameSync" will fail // since the underlying inode is the same (it just returns without doing // anything, and without throwing an error). In that case we should remove // the file manually. fs.unlinkSync(tempPath) } catch { // Ignore errors here since this optimization is optional } } } async function downloadDirectlyFromNPM(pkg: string, subpath: string, binPath: string): Promise<void> { // If that fails, the user could have npm configured incorrectly or could not // have npm installed. Try downloading directly from npm as a last resort. const url = `https://registry.npmjs.org/${pkg}/-/${pkg.replace('@esbuild/', '')}-${ESBUILD_VERSION}.tgz` console.error(`[esbuild] Trying to download ${JSON.stringify(url)}`) try { fs.writeFileSync(binPath, extractFileFromTarGzip(await fetch(url), subpath)) fs.chmodSync(binPath, 0o755) } catch (e: any) { console.error(`[esbuild] Failed to download ${JSON.stringify(url)}: ${e && e.message || e}`) throw e } } async function checkAndPreparePackage(): Promise<void> { // This feature was added to give external code a way to modify the binary // path without modifying the code itself. Do not remove this because // external code relies on this (in addition to esbuild's own test suite). if (isValidBinaryPath(ESBUILD_BINARY_PATH)) { if (!fs.existsSync(ESBUILD_BINARY_PATH)) { console.warn(`[esbuild] Ignoring bad configuration: ESBUILD_BINARY_PATH=${ESBUILD_BINARY_PATH}`) } else { applyManualBinaryPathOverride(ESBUILD_BINARY_PATH) return } } const { pkg, subpath } = pkgAndSubpathForCurrentPlatform() let binPath: string try { // First check for the binary package from our "optionalDependencies". This // package should have been installed alongside this package at install time. binPath = require.resolve(`${pkg}/${subpath}`) } catch (e) { console.error(`[esbuild] Failed to find package "${pkg}" on the file system This can happen if you use the "--no-optional" flag. The "optionalDependencies" package.json feature is used by esbuild to install the correct binary executable for your current platform. This install script will now attempt to work around this. If that fails, you need to remove the "--no-optional" flag to use esbuild. `) // If that didn't work, then someone probably installed esbuild with the // "--no-optional" flag. Attempt to compensate for this by downloading the // package using a nested call to "npm" instead. // // THIS MAY NOT WORK. Package installation uses "optionalDependencies" for // a reason: manually downloading the package has a lot of obscure edge // cases that fail because people have customized their environment in // some strange way that breaks downloading. This code path is just here // to be helpful but it's not the supported way of installing esbuild. binPath = downloadedBinPath(pkg, subpath) try { console.error(`[esbuild] Trying to install package "${pkg}" using npm`) installUsingNPM(pkg, subpath, binPath) } catch (e2: any) { console.error(`[esbuild] Failed to install package "${pkg}" using npm: ${e2 && e2.message || e2}`) // If that didn't also work, then something is likely wrong with the "npm" // command. Attempt to compensate for this by manually downloading the // package from the npm registry over HTTP as a last resort. try { await downloadDirectlyFromNPM(pkg, subpath, binPath) } catch (e3: any) { throw new Error(`Failed to install package "${pkg}"`) } } } maybeOptimizePackage(binPath) } checkAndPreparePackage().then(() => { if (isToPathJS) { // We need "node" before this command since it's a JavaScript file validateBinaryVersion(process.execPath, toPath) } else { // This is no longer a JavaScript file so don't run it using "node" validateBinaryVersion(toPath) } })
lib/npm/node-install.ts
1
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.9482646584510803, 0.06033407896757126, 0.00016263665747828782, 0.0004933485761284828, 0.22200246155261993 ]
{ "id": 2, "code_window": [ " // desired package. Try to avoid unnecessary log output. This uses the \"npm\"\n", " // command instead of a HTTP request so that it hopefully works in situations\n", " // where HTTP requests are blocked but the \"npm\" command still works due to,\n", " // for example, a custom configured npm registry and special firewall rules.\n", " child_process.execSync(`npm install --loglevel=error --prefer-offline --no-audit --progress=false ${pkg}@${ESBUILD_VERSION}`,\n", " { cwd: installDir, stdio: 'pipe', env })\n", "\n", " // Move the downloaded binary executable into place. The destination path\n", " // is the same one that the JavaScript API code uses so it will be able to\n", " // find the binary executable here later.\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " child_process.execSync(`npm install --loglevel=error --prefer-offline --no-audit --progress=false ${pkg}@${versionFromPackageJSON}`,\n" ], "file_path": "lib/npm/node-install.ts", "type": "replace", "edit_start_line_idx": 117 }
<!DOCTYPE html> <p> This script checks to see what characters need to be escaped in a data URL (in addition to % for percent-encoded hexadecimal escapes) for a browser to parse it correctly. This information is used to implement esbuild's <code>dataurl</code> loader. Here is what your current browser requires: </p> <pre id="result"></pre> <p> The answer that works across Chrome, Firefox, and Safari appears to be: <br> <br>Always percent-encode these values: <code>0x09, 0x0A, 0x0D, 0x23</code> <br>Only percent-encode these values in the trailing position: <code>0x00 to 0x08, 0x0B, 0x0C, 0x0E to 0x20</code> </p> <script> function percentEncode(i) { if (i >= 0x80) return encodeURIComponent(String.fromCharCode(i)) return '%' + (0x100 | i).toString(16).slice(-2) } async function urlDoesDecodeTo(url, to) { return to === await fetch(url).then(r => r.text()) } async function check() { const shouldEncode = [] for (let i = 0; i <= 0xFF; i++) { const ch = String.fromCharCode(i) const chPercent = percentEncode(i) if (!await urlDoesDecodeTo('data:text/plain,' + chPercent, ch)) { throw new Error('Assertion failed: Cannot decode ' + chPercent) } const leading = await urlDoesDecodeTo('data:text/plain,' + ch + 'foo', ch + 'foo') const trailing = await urlDoesDecodeTo('data:text/plain,foo' + ch, 'foo' + ch) const embedded = await urlDoesDecodeTo('data:text/plain,foo' + ch + 'foo', 'foo' + ch + 'foo') if (!leading && !trailing && !embedded) { shouldEncode.push('U+' + i.toString(16) + ' (' + ch + ')') } else { if (!leading) shouldEncode.push('U+' + i.toString(16) + ' (' + ch + ') leading') if (!trailing) shouldEncode.push('U+' + i.toString(16) + ' (' + ch + ') trailing') if (!embedded) shouldEncode.push('U+' + i.toString(16) + ' (' + ch + ') embedded') } } document.getElementById('result').textContent = 'shouldEncode = ' + JSON.stringify(shouldEncode, null, 2) } check() </script>
scripts/dataurl-escapes.html
0
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.00017622701125219464, 0.00017119287804234773, 0.000160981944645755, 0.00017304570064879954, 0.000004985224222764373 ]
{ "id": 2, "code_window": [ " // desired package. Try to avoid unnecessary log output. This uses the \"npm\"\n", " // command instead of a HTTP request so that it hopefully works in situations\n", " // where HTTP requests are blocked but the \"npm\" command still works due to,\n", " // for example, a custom configured npm registry and special firewall rules.\n", " child_process.execSync(`npm install --loglevel=error --prefer-offline --no-audit --progress=false ${pkg}@${ESBUILD_VERSION}`,\n", " { cwd: installDir, stdio: 'pipe', env })\n", "\n", " // Move the downloaded binary executable into place. The destination path\n", " // is the same one that the JavaScript API code uses so it will be able to\n", " // find the binary executable here later.\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " child_process.execSync(`npm install --loglevel=error --prefer-offline --no-audit --progress=false ${pkg}@${versionFromPackageJSON}`,\n" ], "file_path": "lib/npm/node-install.ts", "type": "replace", "edit_start_line_idx": 117 }
# esbuild This is the macOS ARM 64-bit binary for esbuild, a JavaScript bundler and minifier. See https://github.com/evanw/esbuild for details.
npm/@esbuild/darwin-arm64/README.md
0
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.001277686096727848, 0.001277686096727848, 0.001277686096727848, 0.001277686096727848, 0 ]
{ "id": 2, "code_window": [ " // desired package. Try to avoid unnecessary log output. This uses the \"npm\"\n", " // command instead of a HTTP request so that it hopefully works in situations\n", " // where HTTP requests are blocked but the \"npm\" command still works due to,\n", " // for example, a custom configured npm registry and special firewall rules.\n", " child_process.execSync(`npm install --loglevel=error --prefer-offline --no-audit --progress=false ${pkg}@${ESBUILD_VERSION}`,\n", " { cwd: installDir, stdio: 'pipe', env })\n", "\n", " // Move the downloaded binary executable into place. The destination path\n", " // is the same one that the JavaScript API code uses so it will be able to\n", " // find the binary executable here later.\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " child_process.execSync(`npm install --loglevel=error --prefer-offline --no-audit --progress=false ${pkg}@${versionFromPackageJSON}`,\n" ], "file_path": "lib/npm/node-install.ts", "type": "replace", "edit_start_line_idx": 117 }
package helpers import ( "strings" "unicode/utf8" ) func ContainsNonBMPCodePoint(text string) bool { for _, c := range text { if c > 0xFFFF { return true } } return false } // This does "ContainsNonBMPCodePoint(UTF16ToString(text))" without any allocations func ContainsNonBMPCodePointUTF16(text []uint16) bool { if n := len(text); n > 0 { for i, c := range text[:n-1] { // Check for a high surrogate if c >= 0xD800 && c <= 0xDBFF { // Check for a low surrogate if c2 := text[i+1]; c2 >= 0xDC00 && c2 <= 0xDFFF { return true } } } } return false } func StringToUTF16(text string) []uint16 { decoded := make([]uint16, 0, len(text)) for _, c := range text { if c <= 0xFFFF { decoded = append(decoded, uint16(c)) } else { c -= 0x10000 decoded = append(decoded, uint16(0xD800+((c>>10)&0x3FF)), uint16(0xDC00+(c&0x3FF))) } } return decoded } func UTF16ToString(text []uint16) string { var temp [utf8.UTFMax]byte b := strings.Builder{} n := len(text) for i := 0; i < n; i++ { r1 := rune(text[i]) if r1 >= 0xD800 && r1 <= 0xDBFF && i+1 < n { if r2 := rune(text[i+1]); r2 >= 0xDC00 && r2 <= 0xDFFF { r1 = (r1-0xD800)<<10 | (r2 - 0xDC00) + 0x10000 i++ } } width := encodeWTF8Rune(temp[:], r1) b.Write(temp[:width]) } return b.String() } func UTF16ToStringWithValidation(text []uint16) (string, uint16, bool) { var temp [utf8.UTFMax]byte b := strings.Builder{} n := len(text) for i := 0; i < n; i++ { r1 := rune(text[i]) if r1 >= 0xD800 && r1 <= 0xDBFF { if i+1 < n { if r2 := rune(text[i+1]); r2 >= 0xDC00 && r2 <= 0xDFFF { r1 = (r1-0xD800)<<10 | (r2 - 0xDC00) + 0x10000 i++ } else { return "", uint16(r1), false } } else { return "", uint16(r1), false } } else if r1 >= 0xDC00 && r1 <= 0xDFFF { return "", uint16(r1), false } width := encodeWTF8Rune(temp[:], r1) b.Write(temp[:width]) } return b.String(), 0, true } // Does "UTF16ToString(text) == str" without a temporary allocation func UTF16EqualsString(text []uint16, str string) bool { if len(text) > len(str) { // Strings can't be equal if UTF-16 encoding is longer than UTF-8 encoding return false } var temp [utf8.UTFMax]byte n := len(text) j := 0 for i := 0; i < n; i++ { r1 := rune(text[i]) if r1 >= 0xD800 && r1 <= 0xDBFF && i+1 < n { if r2 := rune(text[i+1]); r2 >= 0xDC00 && r2 <= 0xDFFF { r1 = (r1-0xD800)<<10 | (r2 - 0xDC00) + 0x10000 i++ } } width := encodeWTF8Rune(temp[:], r1) if j+width > len(str) { return false } for k := 0; k < width; k++ { if temp[k] != str[j] { return false } j++ } } return j == len(str) } func UTF16EqualsUTF16(a []uint16, b []uint16) bool { if len(a) == len(b) { for i, c := range a { if c != b[i] { return false } } return true } return false } // This is a clone of "utf8.EncodeRune" that has been modified to encode using // WTF-8 instead. See https://simonsapin.github.io/wtf-8/ for more info. func encodeWTF8Rune(p []byte, r rune) int { // Negative values are erroneous. Making it unsigned addresses the problem. switch i := uint32(r); { case i <= 0x7F: p[0] = byte(r) return 1 case i <= 0x7FF: _ = p[1] // eliminate bounds checks p[0] = 0xC0 | byte(r>>6) p[1] = 0x80 | byte(r)&0x3F return 2 case i > utf8.MaxRune: r = utf8.RuneError fallthrough case i <= 0xFFFF: _ = p[2] // eliminate bounds checks p[0] = 0xE0 | byte(r>>12) p[1] = 0x80 | byte(r>>6)&0x3F p[2] = 0x80 | byte(r)&0x3F return 3 default: _ = p[3] // eliminate bounds checks p[0] = 0xF0 | byte(r>>18) p[1] = 0x80 | byte(r>>12)&0x3F p[2] = 0x80 | byte(r>>6)&0x3F p[3] = 0x80 | byte(r)&0x3F return 4 } } // This is a clone of "utf8.DecodeRuneInString" that has been modified to // decode using WTF-8 instead. See https://simonsapin.github.io/wtf-8/ for // more info. func DecodeWTF8Rune(s string) (rune, int) { n := len(s) if n < 1 { return utf8.RuneError, 0 } s0 := s[0] if s0 < 0x80 { return rune(s0), 1 } var sz int if (s0 & 0xE0) == 0xC0 { sz = 2 } else if (s0 & 0xF0) == 0xE0 { sz = 3 } else if (s0 & 0xF8) == 0xF0 { sz = 4 } else { return utf8.RuneError, 1 } if n < sz { return utf8.RuneError, 0 } s1 := s[1] if (s1 & 0xC0) != 0x80 { return utf8.RuneError, 1 } if sz == 2 { cp := rune(s0&0x1F)<<6 | rune(s1&0x3F) if cp < 0x80 { return utf8.RuneError, 1 } return cp, 2 } s2 := s[2] if (s2 & 0xC0) != 0x80 { return utf8.RuneError, 1 } if sz == 3 { cp := rune(s0&0x0F)<<12 | rune(s1&0x3F)<<6 | rune(s2&0x3F) if cp < 0x0800 { return utf8.RuneError, 1 } return cp, 3 } s3 := s[3] if (s3 & 0xC0) != 0x80 { return utf8.RuneError, 1 } cp := rune(s0&0x07)<<18 | rune(s1&0x3F)<<12 | rune(s2&0x3F)<<6 | rune(s3&0x3F) if cp < 0x010000 || cp > 0x10FFFF { return utf8.RuneError, 1 } return cp, 4 }
internal/helpers/utf.go
0
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.00019033417629543692, 0.00017243181355297565, 0.0001633698120713234, 0.00017245222989004105, 0.0000060998777371423785 ]
{ "id": 3, "code_window": [ "\n", "async function downloadDirectlyFromNPM(pkg: string, subpath: string, binPath: string): Promise<void> {\n", " // If that fails, the user could have npm configured incorrectly or could not\n", " // have npm installed. Try downloading directly from npm as a last resort.\n", " const url = `https://registry.npmjs.org/${pkg}/-/${pkg.replace('@esbuild/', '')}-${ESBUILD_VERSION}.tgz`\n", " console.error(`[esbuild] Trying to download ${JSON.stringify(url)}`)\n", " try {\n", " fs.writeFileSync(binPath, extractFileFromTarGzip(await fetch(url), subpath))\n", " fs.chmodSync(binPath, 0o755)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ " const url = `https://registry.npmjs.org/${pkg}/-/${pkg.replace('@esbuild/', '')}-${versionFromPackageJSON}.tgz`\n" ], "file_path": "lib/npm/node-install.ts", "type": "replace", "edit_start_line_idx": 220 }
import { downloadedBinPath, ESBUILD_BINARY_PATH, isValidBinaryPath, pkgAndSubpathForCurrentPlatform } from './node-platform' import fs = require('fs') import os = require('os') import path = require('path') import zlib = require('zlib') import https = require('https') import child_process = require('child_process') declare const ESBUILD_VERSION: string const toPath = path.join(__dirname, 'bin', 'esbuild') let isToPathJS = true function validateBinaryVersion(...command: string[]): void { command.push('--version') let stdout: string try { stdout = child_process.execFileSync(command.shift()!, command, { // Without this, this install script strangely crashes with the error // "EACCES: permission denied, write" but only on Ubuntu Linux when node is // installed from the Snap Store. This is not a problem when you download // the official version of node. The problem appears to be that stderr // (i.e. file descriptor 2) isn't writable? // // More info: // - https://snapcraft.io/ (what the Snap Store is) // - https://nodejs.org/dist/ (download the official version of node) // - https://github.com/evanw/esbuild/issues/1711#issuecomment-1027554035 // stdio: 'pipe', }).toString().trim() } catch (err) { if (os.platform() === 'darwin' && /_SecTrustEvaluateWithError/.test(err + '')) { let os = 'this version of macOS' try { os = 'macOS ' + child_process.execFileSync('sw_vers', ['-productVersion']).toString().trim() } catch { } throw new Error(`The "esbuild" package cannot be installed because ${os} is too outdated. The Go compiler (which esbuild relies on) no longer supports ${os}, which means the "esbuild" binary executable can't be run. You can either: * Update your version of macOS to one that the Go compiler supports * Use the "esbuild-wasm" package instead of the "esbuild" package * Build esbuild yourself using an older version of the Go compiler `) } throw err } if (stdout !== ESBUILD_VERSION) { throw new Error(`Expected ${JSON.stringify(ESBUILD_VERSION)} but got ${JSON.stringify(stdout)}`) } } function isYarn(): boolean { const { npm_config_user_agent } = process.env if (npm_config_user_agent) { return /\byarn\//.test(npm_config_user_agent) } return false } function fetch(url: string): Promise<Buffer> { return new Promise((resolve, reject) => { https.get(url, res => { if ((res.statusCode === 301 || res.statusCode === 302) && res.headers.location) return fetch(res.headers.location).then(resolve, reject) if (res.statusCode !== 200) return reject(new Error(`Server responded with ${res.statusCode}`)) let chunks: Buffer[] = [] res.on('data', chunk => chunks.push(chunk)) res.on('end', () => resolve(Buffer.concat(chunks))) }).on('error', reject) }) } function extractFileFromTarGzip(buffer: Buffer, subpath: string): Buffer { try { buffer = zlib.unzipSync(buffer) } catch (err: any) { throw new Error(`Invalid gzip data in archive: ${err && err.message || err}`) } let str = (i: number, n: number) => String.fromCharCode(...buffer.subarray(i, i + n)).replace(/\0.*$/, '') let offset = 0 subpath = `package/${subpath}` while (offset < buffer.length) { let name = str(offset, 100) let size = parseInt(str(offset + 124, 12), 8) offset += 512 if (!isNaN(size)) { if (name === subpath) return buffer.subarray(offset, offset + size) offset += (size + 511) & ~511 } } throw new Error(`Could not find ${JSON.stringify(subpath)} in archive`) } function installUsingNPM(pkg: string, subpath: string, binPath: string): void { // Erase "npm_config_global" so that "npm install --global esbuild" works. // Otherwise this nested "npm install" will also be global, and the install // will deadlock waiting for the global installation lock. const env = { ...process.env, npm_config_global: undefined } // Create a temporary directory inside the "esbuild" package with an empty // "package.json" file. We'll use this to run "npm install" in. const esbuildLibDir = path.dirname(require.resolve('esbuild')) const installDir = path.join(esbuildLibDir, 'npm-install') fs.mkdirSync(installDir) try { fs.writeFileSync(path.join(installDir, 'package.json'), '{}') // Run "npm install" in the temporary directory which should download the // desired package. Try to avoid unnecessary log output. This uses the "npm" // command instead of a HTTP request so that it hopefully works in situations // where HTTP requests are blocked but the "npm" command still works due to, // for example, a custom configured npm registry and special firewall rules. child_process.execSync(`npm install --loglevel=error --prefer-offline --no-audit --progress=false ${pkg}@${ESBUILD_VERSION}`, { cwd: installDir, stdio: 'pipe', env }) // Move the downloaded binary executable into place. The destination path // is the same one that the JavaScript API code uses so it will be able to // find the binary executable here later. const installedBinPath = path.join(installDir, 'node_modules', pkg, subpath) fs.renameSync(installedBinPath, binPath) } finally { // Try to clean up afterward so we don't unnecessarily waste file system // space. Leaving nested "node_modules" directories can also be problematic // for certain tools that scan over the file tree and expect it to have a // certain structure. try { removeRecursive(installDir) } catch { // Removing a file or directory can randomly break on Windows, returning // EBUSY for an arbitrary length of time. I think this happens when some // other program has that file or directory open (e.g. an anti-virus // program). This is fine on Unix because the OS just unlinks the entry // but keeps the reference around until it's unused. There's nothing we // can do in this case so we just leave the directory there. } } } function removeRecursive(dir: string): void { for (const entry of fs.readdirSync(dir)) { const entryPath = path.join(dir, entry) let stats try { stats = fs.lstatSync(entryPath) } catch { continue; // Guard against https://github.com/nodejs/node/issues/4760 } if (stats.isDirectory()) removeRecursive(entryPath) else fs.unlinkSync(entryPath) } fs.rmdirSync(dir) } function applyManualBinaryPathOverride(overridePath: string): void { // Patch the CLI use case (the "esbuild" command) const pathString = JSON.stringify(overridePath) fs.writeFileSync(toPath, `#!/usr/bin/env node\n` + `require('child_process').execFileSync(${pathString}, process.argv.slice(2), { stdio: 'inherit' });\n`) // Patch the JS API use case (the "require('esbuild')" workflow) const libMain = path.join(__dirname, 'lib', 'main.js') const code = fs.readFileSync(libMain, 'utf8') fs.writeFileSync(libMain, `var ESBUILD_BINARY_PATH = ${pathString};\n${code}`) } function maybeOptimizePackage(binPath: string): void { // This package contains a "bin/esbuild" JavaScript file that finds and runs // the appropriate binary executable. However, this means that running the // "esbuild" command runs another instance of "node" which is way slower than // just running the binary executable directly. // // Here we optimize for this by replacing the JavaScript file with the binary // executable at install time. This optimization does not work on Windows // because on Windows the binary executable must be called "esbuild.exe" // instead of "esbuild". // // This also doesn't work with Yarn both because of lack of support for binary // files in Yarn 2+ (see https://github.com/yarnpkg/berry/issues/882) and // because Yarn (even Yarn 1?) may run the same install scripts in the same // place multiple times from different platforms, especially when people use // Docker. Avoid idempotency issues by just not optimizing when using Yarn. // // This optimization also doesn't apply when npm's "--ignore-scripts" flag is // used since in that case this install script will not be run. if (os.platform() !== 'win32' && !isYarn()) { const tempPath = path.join(__dirname, 'bin-esbuild') try { // First link the binary with a temporary file. If this fails and throws an // error, then we'll just end up doing nothing. This uses a hard link to // avoid taking up additional space on the file system. fs.linkSync(binPath, tempPath) // Then use rename to atomically replace the target file with the temporary // file. If this fails and throws an error, then we'll just end up leaving // the temporary file there, which is harmless. fs.renameSync(tempPath, toPath) // If we get here, then we know that the target location is now a binary // executable instead of a JavaScript file. isToPathJS = false // If this install script is being re-run, then "renameSync" will fail // since the underlying inode is the same (it just returns without doing // anything, and without throwing an error). In that case we should remove // the file manually. fs.unlinkSync(tempPath) } catch { // Ignore errors here since this optimization is optional } } } async function downloadDirectlyFromNPM(pkg: string, subpath: string, binPath: string): Promise<void> { // If that fails, the user could have npm configured incorrectly or could not // have npm installed. Try downloading directly from npm as a last resort. const url = `https://registry.npmjs.org/${pkg}/-/${pkg.replace('@esbuild/', '')}-${ESBUILD_VERSION}.tgz` console.error(`[esbuild] Trying to download ${JSON.stringify(url)}`) try { fs.writeFileSync(binPath, extractFileFromTarGzip(await fetch(url), subpath)) fs.chmodSync(binPath, 0o755) } catch (e: any) { console.error(`[esbuild] Failed to download ${JSON.stringify(url)}: ${e && e.message || e}`) throw e } } async function checkAndPreparePackage(): Promise<void> { // This feature was added to give external code a way to modify the binary // path without modifying the code itself. Do not remove this because // external code relies on this (in addition to esbuild's own test suite). if (isValidBinaryPath(ESBUILD_BINARY_PATH)) { if (!fs.existsSync(ESBUILD_BINARY_PATH)) { console.warn(`[esbuild] Ignoring bad configuration: ESBUILD_BINARY_PATH=${ESBUILD_BINARY_PATH}`) } else { applyManualBinaryPathOverride(ESBUILD_BINARY_PATH) return } } const { pkg, subpath } = pkgAndSubpathForCurrentPlatform() let binPath: string try { // First check for the binary package from our "optionalDependencies". This // package should have been installed alongside this package at install time. binPath = require.resolve(`${pkg}/${subpath}`) } catch (e) { console.error(`[esbuild] Failed to find package "${pkg}" on the file system This can happen if you use the "--no-optional" flag. The "optionalDependencies" package.json feature is used by esbuild to install the correct binary executable for your current platform. This install script will now attempt to work around this. If that fails, you need to remove the "--no-optional" flag to use esbuild. `) // If that didn't work, then someone probably installed esbuild with the // "--no-optional" flag. Attempt to compensate for this by downloading the // package using a nested call to "npm" instead. // // THIS MAY NOT WORK. Package installation uses "optionalDependencies" for // a reason: manually downloading the package has a lot of obscure edge // cases that fail because people have customized their environment in // some strange way that breaks downloading. This code path is just here // to be helpful but it's not the supported way of installing esbuild. binPath = downloadedBinPath(pkg, subpath) try { console.error(`[esbuild] Trying to install package "${pkg}" using npm`) installUsingNPM(pkg, subpath, binPath) } catch (e2: any) { console.error(`[esbuild] Failed to install package "${pkg}" using npm: ${e2 && e2.message || e2}`) // If that didn't also work, then something is likely wrong with the "npm" // command. Attempt to compensate for this by manually downloading the // package from the npm registry over HTTP as a last resort. try { await downloadDirectlyFromNPM(pkg, subpath, binPath) } catch (e3: any) { throw new Error(`Failed to install package "${pkg}"`) } } } maybeOptimizePackage(binPath) } checkAndPreparePackage().then(() => { if (isToPathJS) { // We need "node" before this command since it's a JavaScript file validateBinaryVersion(process.execPath, toPath) } else { // This is no longer a JavaScript file so don't run it using "node" validateBinaryVersion(toPath) } })
lib/npm/node-install.ts
1
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.9987595081329346, 0.17564161121845245, 0.00017651525558903813, 0.004032012540847063, 0.3527129888534546 ]
{ "id": 3, "code_window": [ "\n", "async function downloadDirectlyFromNPM(pkg: string, subpath: string, binPath: string): Promise<void> {\n", " // If that fails, the user could have npm configured incorrectly or could not\n", " // have npm installed. Try downloading directly from npm as a last resort.\n", " const url = `https://registry.npmjs.org/${pkg}/-/${pkg.replace('@esbuild/', '')}-${ESBUILD_VERSION}.tgz`\n", " console.error(`[esbuild] Trying to download ${JSON.stringify(url)}`)\n", " try {\n", " fs.writeFileSync(binPath, extractFileFromTarGzip(await fetch(url), subpath))\n", " fs.chmodSync(binPath, 0o755)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ " const url = `https://registry.npmjs.org/${pkg}/-/${pkg.replace('@esbuild/', '')}-${versionFromPackageJSON}.tgz`\n" ], "file_path": "lib/npm/node-install.ts", "type": "replace", "edit_start_line_idx": 220 }
const { installForTests } = require('./esbuild'); const childProcess = require('child_process'); const assert = require('assert'); const path = require('path'); const fs = require('fs'); const repoDir = path.dirname(__dirname); const testDir = path.join(repoDir, 'scripts', '.uglify-tests'); const uglifyDir = path.join(repoDir, 'demo', 'uglify'); const SKIP = {}; let U; main().catch(e => setTimeout(() => { throw e })); async function main() { // // Terser's stdout comparisons fail if this is true since stdout contains // // terminal color escape codes // process.stdout.isTTY = false; // Make sure the tests are installed childProcess.execSync('make demo/uglify', { cwd: repoDir, stdio: 'pipe' }); U = require(path.join(uglifyDir, 'test', 'node')); // Create a fresh test directory childProcess.execSync(`rm -fr "${testDir}"`); fs.mkdirSync(testDir) // Start the esbuild service const esbuild = installForTests(); // Find test files const compressDir = path.join(uglifyDir, 'test', 'compress'); const files = fs.readdirSync(compressDir).filter(name => name.endsWith('.js')); // Run all tests concurrently let passedTotal = 0; let failedTotal = 0; let skippedTotal = 0; const runTest = file => test_file(esbuild, path.join(compressDir, file)) .then(({ passed, failed, skipped }) => { passedTotal += passed; failedTotal += failed; skippedTotal += skipped; }); await Promise.all(files.map(runTest)); // Clean up test output childProcess.execSync(`rm -fr "${testDir}"`); console.log(`${failedTotal} failed out of ${passedTotal + failedTotal}, with ${skippedTotal} skipped`); if (failedTotal) { process.exit(1); } } async function test_file(esbuild, file) { let passed = 0; let failed = 0; let skipped = 0; const tests = parse_test(file); const runTest = name => test_case(esbuild, tests[name], path.basename(file)) .then(x => { if (x === SKIP) { skipped++; } else { passed++; } }) .catch(e => { failed++; console.error(`❌ ${file}: ${name}: ${(e && e.message || e).trim()}\n`); pass = false; }); await Promise.all(Object.keys(tests).map(runTest)); return { passed, failed, skipped }; } // Modified from "uglify/demo/test/compress.js" async function test_case(esbuild, test, basename) { const sandbox = require(path.join(uglifyDir, 'test', 'sandbox')); const log = (format, args) => { throw new Error(tmpl(format, args)); }; var semver = require(path.join(uglifyDir, 'node_modules', 'semver')); // Generate the input code var input = to_toplevel(test.input, test.mangle); var input_code = make_code(input); var input_formatted = make_code(test.input, { beautify: true, comments: "all", keep_quoted_props: true, quote_style: 3, }); // Make sure it's valid try { U.parse(input_code); } catch (ex) { log([ "!!! Cannot parse input", "---INPUT---", "{input}", "--PARSE ERROR--", "{error}", "", "", ].join("\n"), { input: input_formatted, error: ex, }); } // Ignore tests that no longer pass in modern versions of node. These tests // contain code that is now considered a syntax error. The relevant code is // this: // // try{throw 42}catch(a){console.log(a);function a(){}} // if (test.node_version && !semver.satisfies(process.version, test.node_version)) { console.error("*** skipping test %j with node_version %j", test.name, test.node_version); return SKIP; } // Run esbuild as a minifier try { var { code: output } = await esbuild.transform(input_code, { minify: true, target: 'esnext', }); } catch (e) { // These tests fail because they contain syntax errors. These test failures // do not indicate anything wrong with esbuild so the failures are ignored. // Here is one of the tests: // // try{}catch(a){const a="aa"} // if ([ 'const.js: issue_4290_1', 'const.js: issue_4305_2', 'const.js: retain_catch', 'const.js: skip_braces', 'exports.js: defaults', 'exports.js: drop_unused', 'exports.js: hoist_exports_1', 'exports.js: hoist_exports_2', 'exports.js: keep_return_values', 'exports.js: mangle_rename', 'exports.js: mangle', 'exports.js: refs', 'imports.js: issue_4708_1', 'imports.js: issue_4708_2', 'let.js: issue_4290_1', 'let.js: issue_4305_2', 'let.js: retain_catch', 'let.js: skip_braces', 'reduce_vars.js: defun_catch_4', 'reduce_vars.js: defun_catch_5', 'templates.js: malformed_evaluate_1', 'templates.js: malformed_evaluate_2', 'templates.js: malformed_evaluate_3', 'varify.js: issue_4290_1_const', 'varify.js: issue_4290_1_let', ].indexOf(`${basename}: ${test.name}`) >= 0) { console.error(`*** skipping test with known syntax error: ${basename}: ${test.name}`); return SKIP; } // These tests fail because esbuild supports top-level await. Technically // top-level await is only allowed inside a module, and can be used as a // normal identifier in a script. But the script/module distinction causes // a lot of pain due to the need to configure every single tool to say // whether to parse the code as a script or a module, so esbuild mostly // does away with the distinction and enables top-level await everywhere. // This means it fails these tests but the failures are unlikely to matter // in real-world code, so they can be ignored. Here's one test case: // // async function await(){console.log("PASS")}await(); // if ([ 'awaits.js: defun_name', 'awaits.js: drop_fname', 'awaits.js: functions_anonymous', 'awaits.js: functions_inner_var', 'awaits.js: issue_4335_1', 'awaits.js: keep_fname', 'classes.js: await', ].indexOf(`${basename}: ${test.name}`) >= 0) { console.error(`*** skipping test with top-level await as identifier: ${basename}: ${test.name}`); return SKIP; } // These tests fail because esbuild makes assigning to an inlined constant // a compile error to avoid code with incorrect behavior. This is a limitation // due to esbuild's three-pass design but it shouldn't matter in practice. It // just means esbuild rejects bad code at compile time instead of at run time. if ([ 'const.js: issue_4212_1', 'const.js: issue_4212_2', ].indexOf(`${basename}: ${test.name}`) >= 0) { console.error(`*** skipping test with assignment to an inlined constant: ${basename}: ${test.name}`); return SKIP; } log("!!! esbuild failed\n---INPUT---\n{input}\n---ERROR---\n{error}\n", { input: input_code, error: e && e.message || e, }); } // Make sure esbuild generates valid JavaScript try { U.parse(output); } catch (ex) { log([ "!!! Test matched expected result but cannot parse output", "---INPUT---", "{input}", "---OUTPUT---", "{output}", "--REPARSE ERROR--", "{error}", "", "", ].join("\n"), { input: input_formatted, output: output, error: ex && ex.stack || ex, }); } // Verify that the stdout matches our expectations if (test.expect_stdout && (!test.node_version || semver.satisfies(process.version, test.node_version))) { var stdout = [run_code(input_code), run_code(input_code, true)]; var toplevel = sandbox.has_toplevel({ compress: test.options, mangle: test.mangle }); var actual = stdout[toplevel ? 1 : 0]; if (test.expect_stdout === true) { test.expect_stdout = actual; } actual = run_code(output, toplevel); // Ignore the known failures in CI, but not otherwise const isExpectingFailure = !process.env.CI ? false : [ // Stdout difference 'classes.js: issue_5015_2', 'const.js: issue_4225', 'const.js: issue_4229', 'const.js: issue_4245', 'const.js: use_before_init_3', 'destructured.js: funarg_side_effects_2', 'destructured.js: funarg_side_effects_3', 'let.js: issue_4225', 'let.js: issue_4229', 'let.js: issue_4245', 'let.js: use_before_init_3', // Error difference 'dead-code.js: dead_code_2_should_warn', ].indexOf(`${basename}: ${test.name}`) >= 0 if (!sandbox.same_stdout(test.expect_stdout, actual)) { if (isExpectingFailure) { console.error(`*** skipping test with known esbuild failure: ${basename}: ${test.name}`); return SKIP; } log([ "!!! failed", "---INPUT---", "{input}", "---EXPECTED {expected_type}---", "{expected}", "---ACTUAL {actual_type}---", "{actual}", "", "", ].join("\n"), { input: input_formatted, expected_type: typeof test.expect_stdout == "string" ? "STDOUT" : "ERROR", expected: test.expect_stdout, actual_type: typeof actual == "string" ? "STDOUT" : "ERROR", actual: actual, }); } else if (isExpectingFailure) { throw new Error(`UPDATE NEEDED: expected failure for ${basename}: ${test.name}, please remove this test from known failure list`); } } } //////////////////////////////////////////////////////////////////////////////// // The code below was copied verbatim from "uglify/demo/test/compress.js" // // UglifyJS is released under the BSD license: // // Copyright 2012-2019 (c) Mihai Bazon <[email protected]> // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above // copyright notice, this list of conditions and the following // disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials // provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER “AS IS” AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, // OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF // THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF // SUCH DAMAGE. function evaluate(code) { if (code instanceof U.AST_Node) code = make_code(code, { beautify: true }); return new Function("return(" + code + ")")(); } function make_code(ast, options) { var stream = U.OutputStream(options); ast.print(stream); return stream.get(); } function parse_test(file) { var script = fs.readFileSync(file, "utf8"); // TODO try/catch can be removed after fixing https://github.com/mishoo/UglifyJS/issues/348 try { var ast = U.parse(script, { filename: file }); } catch (e) { console.error("Caught error while parsing tests in " + file); console.error(e); process.exit(1); } var tests = Object.create(null); var tw = new U.TreeWalker(function (node, descend) { if (node instanceof U.AST_LabeledStatement && tw.parent() instanceof U.AST_Toplevel) { var name = node.label.name; if (name in tests) { throw new Error('Duplicated test name "' + name + '" in ' + file); } tests[name] = get_one_test(name, node.body); return true; } if (!(node instanceof U.AST_Toplevel)) croak(node); }); ast.walk(tw); return tests; function croak(node) { throw new Error(tmpl("Can't understand test file {file} [{line},{col}]\n{code}", { file: file, line: node.start.line, col: node.start.col, code: make_code(node, { beautify: false }) })); } function read_string(stat) { if (stat.TYPE == "SimpleStatement") { var body = stat.body; switch (body.TYPE) { case "String": return body.value; case "Array": return body.elements.map(function (element) { if (element.TYPE !== "String") throw new Error("Should be array of strings"); return element.value; }).join("\n"); } } throw new Error("Should be string or array of strings"); } function get_one_test(name, block) { var test = { name: name, options: {} }; var tw = new U.TreeWalker(function (node, descend) { if (node instanceof U.AST_Assign) { if (!(node.left instanceof U.AST_SymbolRef)) { croak(node); } var name = node.left.name; test[name] = evaluate(node.right); return true; } if (node instanceof U.AST_LabeledStatement) { var label = node.label; assert.ok([ "input", "expect", "expect_exact", "expect_warnings", "expect_stdout", "node_version", ].indexOf(label.name) >= 0, tmpl("Unsupported label {name} [{line},{col}]", { name: label.name, line: label.start.line, col: label.start.col })); var stat = node.body; if (label.name == "expect_exact" || label.name == "node_version") { test[label.name] = read_string(stat); } else if (label.name == "expect_stdout") { var body = stat.body; if (body instanceof U.AST_Boolean) { test[label.name] = body.value; } else if (body instanceof U.AST_Call) { var ctor = global[body.expression.name]; assert.ok(ctor === Error || ctor.prototype instanceof Error, tmpl("Unsupported expect_stdout format [{line},{col}]", { line: label.start.line, col: label.start.col })); test[label.name] = ctor.apply(null, body.args.map(function (node) { assert.ok(node instanceof U.AST_Constant, tmpl("Unsupported expect_stdout format [{line},{col}]", { line: label.start.line, col: label.start.col })); return node.value; })); } else { test[label.name] = read_string(stat) + "\n"; } } else { test[label.name] = stat; } return true; } }); block.walk(tw); return test; } } function run_code(code, toplevel) { const sandbox = require(path.join(uglifyDir, 'test', 'sandbox')); var result = sandbox.run_code(code, toplevel); return typeof result == "string" ? result.replace(/\u001b\[\d+m/g, "") : result; } function tmpl() { return U.string_template.apply(null, arguments); } function to_toplevel(input, mangle_options) { if (!(input instanceof U.AST_BlockStatement)) throw new Error("Unsupported input syntax"); var directive = true; var offset = input.start.line; var tokens = []; var toplevel = new U.AST_Toplevel(input.transform(new U.TreeTransformer(function (node) { if (U.push_uniq(tokens, node.start)) node.start.line -= offset; if (!directive || node === input) return; if (node instanceof U.AST_SimpleStatement && node.body instanceof U.AST_String) { return new U.AST_Directive(node.body); } else { directive = false; } }))); toplevel.figure_out_scope(mangle_options); return toplevel; }
scripts/uglify-tests.js
0
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.005782359279692173, 0.0004891641438007355, 0.00016559481446165591, 0.0001708105264697224, 0.0009212030563503504 ]
{ "id": 3, "code_window": [ "\n", "async function downloadDirectlyFromNPM(pkg: string, subpath: string, binPath: string): Promise<void> {\n", " // If that fails, the user could have npm configured incorrectly or could not\n", " // have npm installed. Try downloading directly from npm as a last resort.\n", " const url = `https://registry.npmjs.org/${pkg}/-/${pkg.replace('@esbuild/', '')}-${ESBUILD_VERSION}.tgz`\n", " console.error(`[esbuild] Trying to download ${JSON.stringify(url)}`)\n", " try {\n", " fs.writeFileSync(binPath, extractFileFromTarGzip(await fetch(url), subpath))\n", " fs.chmodSync(binPath, 0o755)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ " const url = `https://registry.npmjs.org/${pkg}/-/${pkg.replace('@esbuild/', '')}-${versionFromPackageJSON}.tgz`\n" ], "file_path": "lib/npm/node-install.ts", "type": "replace", "edit_start_line_idx": 220 }
[{ "manifest": { "__info": [], "dependencyTreeRoots": [{ "name": "root", "reference": "workspace:." }], "ignorePatternData": null, "enableTopLevelFallback": false, "fallbackPool": [], "fallbackExclusionList": [], "packageRegistryData": [ [null, [ [null, { "packageLocation": "./", "packageDependencies": [["test", "npm:1.0.0"]], "linkType": "SOFT" }] ]], ["root", [ ["workspace:.", { "packageLocation": "./", "packageDependencies": [["test", "npm:1.0.0"]], "linkType": "SOFT" }] ]], ["workspace-alias-dependency", [ ["workspace:workspace-alias-dependency", { "packageLocation": "./workspace-alias-dependency/", "packageDependencies": [["alias", ["test", "npm:1.0.0"]]], "linkType": "SOFT" }] ]], ["workspace-self-dependency", [ ["workspace:workspace-self-dependency", { "packageLocation": "./workspace-self-dependency/", "packageDependencies": [["workspace-self-dependency", "workspace:workspace-self-dependency"]], "linkType": "SOFT" }] ]], ["workspace-unfulfilled-peer-dependency", [ ["workspace:workspace-unfulfilled-peer-dependency", { "packageLocation": "./workspace-unfulfilled-peer-dependency/", "packageDependencies": [["test", null]], "linkType": "SOFT" }] ]], ["longer", [ ["workspace:longer", { "packageLocation": "./longer/", "packageDependencies": [["test", "npm:2.0.0"]], "linkType": "SOFT" }] ]], ["long", [ ["workspace:long", { "packageLocation": "./long/", "packageDependencies": [["test", "npm:1.0.0"]], "linkType": "SOFT" }] ]], ["longerer", [ ["workspace:longerer", { "packageLocation": "./longerer/", "packageDependencies": [["test", "npm:3.0.0"]], "linkType": "SOFT" }] ]], ["test", [ ["npm:1.0.0", { "packageLocation": "./test-1.0.0/", "packageDependencies": [], "linkType": "HARD" }], ["npm:2.0.0", { "packageLocation": "./test-2.0.0/", "packageDependencies": [], "linkType": "HARD" }], ["npm:3.0.0", { "packageLocation": "./test-3.0.0/", "packageDependencies": [], "linkType": "HARD" }] ]] ] }, "tests": [{ "it": "should allow a package to import one of its dependencies", "imported": "test", "importer": "/path/to/project/", "expected": "/path/to/project/test-1.0.0/" }, { "it": "should allow a package to import itself, if specified in its own dependencies", "imported": "workspace-self-dependency", "importer": "/path/to/project/workspace-self-dependency/", "expected": "/path/to/project/workspace-self-dependency/" }, { "it": "should allow a package to import an aliased dependency", "imported": "alias", "importer": "/path/to/project/workspace-alias-dependency/", "expected": "/path/to/project/test-1.0.0/" }, { "it": "shouldn't allow a package to import something that isn't one of its dependencies", "imported": "missing-dependency", "importer": "/path/to/project/", "expected": "error!" }, { "it": "shouldn't accidentally discard the trailing slash from the package locations", "imported": "test", "importer": "/path/to/project/long/", "expected": "/path/to/project/test-1.0.0/" }, { "it": "should throw an exception when trying to access an unfulfilled peer dependency", "imported": "test", "importer": "/path/to/project/workspace-unfulfilled-peer-dependency/", "expected": "error!" }] }, { "manifest": { "__info": [], "dependencyTreeRoots": [{ "name": "root", "reference": "workspace:." }], "ignorePatternData": null, "enableTopLevelFallback": true, "fallbackPool": [ ["test-2", "npm:1.0.0"], ["alias", ["test-1", "npm:1.0.0"]] ], "fallbackExclusionList": [[ "workspace-no-fallbacks", ["workspace:workspace-no-fallbacks"] ]], "packageRegistryData": [ [null, [ [null, { "packageLocation": "./", "packageDependencies": [["test-1", "npm:1.0.0"]], "linkType": "SOFT" }] ]], ["root", [ ["workspace:.", { "packageLocation": "./", "packageDependencies": [["test-1", "npm:1.0.0"]], "linkType": "SOFT" }] ]], ["workspace-no-fallbacks", [ ["workspace:workspace-no-fallbacks", { "packageLocation": "./workspace-no-fallbacks/", "packageDependencies": [], "linkType": "SOFT" }] ]], ["workspace-with-fallbacks", [ ["workspace:workspace-with-fallbacks", { "packageLocation": "./workspace-with-fallbacks/", "packageDependencies": [], "linkType": "SOFT" }] ]], ["workspace-unfulfilled-peer-dependency", [ ["workspace:workspace-unfulfilled-peer-dependency", { "packageLocation": "./workspace-unfulfilled-peer-dependency/", "packageDependencies": [ ["test-1", null], ["test-2", null] ], "linkType": "SOFT" }] ]], ["test-1", [ ["npm:1.0.0", { "packageLocation": "./test-1/", "packageDependencies": [], "linkType": "HARD" }] ]], ["test-2", [ ["npm:1.0.0", { "packageLocation": "./test-2/", "packageDependencies": [], "linkType": "HARD" }] ]] ] }, "tests": [{ "it": "should allow resolution coming from the fallback pool if enableTopLevelFallback is set to true", "imported": "test-1", "importer": "/path/to/project/", "expected": "/path/to/project/test-1/" }, { "it": "should allow the fallback pool to contain aliases", "imported": "alias", "importer": "/path/to/project/", "expected": "/path/to/project/test-1/" }, { "it": "shouldn't use the fallback pool when the importer package is listed in fallbackExclusionList", "imported": "test-1", "importer": "/path/to/project/workspace-no-fallbacks/", "expected": "error!" }, { "it": "should implicitly use the top-level package dependencies as part of the fallback pool", "imported": "test-2", "importer": "/path/to/project/workspace-with-fallbacks/", "expected": "/path/to/project/test-2/" }, { "it": "should throw an error if a resolution isn't in in the package dependencies, nor inside the fallback pool", "imported": "test-3", "importer": "/path/to/project/workspace-with-fallbacks/", "expected": "error!" }, { "it": "should use the top-level fallback if a dependency is missing because of an unfulfilled peer dependency", "imported": "test-1", "importer": "/path/to/project/workspace-unfulfilled-peer-dependency/", "expected": "/path/to/project/test-1/" }, { "it": "should use the fallback pool if a dependency is missing because of an unfulfilled peer dependency", "imported": "test-2", "importer": "/path/to/project/workspace-unfulfilled-peer-dependency/", "expected": "/path/to/project/test-2/" }] }, { "manifest": { "__info": [], "dependencyTreeRoots": [{ "name": "root", "reference": "workspace:." }], "ignorePatternData": null, "enableTopLevelFallback": false, "fallbackPool": [ ["test", "npm:1.0.0"] ], "fallbackExclusionList": [], "packageRegistryData": [ [null, [ [null, { "packageLocation": "./", "packageDependencies": [], "linkType": "SOFT" }] ]], ["root", [ ["workspace:.", { "packageLocation": "./", "packageDependencies": [], "linkType": "SOFT" }] ]], ["test", [ ["npm:1.0.0", { "packageLocation": "./test-1/", "packageDependencies": [], "linkType": "HARD" }] ]] ] }, "tests": [{ "it": "should ignore the fallback pool if enableTopLevelFallback is set to false", "imported": "test", "importer": "/path/to/project/", "expected": "error!" }] }, { "manifest": { "__info": [], "dependencyTreeRoots": [{ "name": "root", "reference": "workspace:." }], "ignorePatternData": "^not-a-workspace(/|$)", "enableTopLevelFallback": false, "fallbackPool": [], "fallbackExclusionList": [], "packageRegistryData": [ [null, [ [null, { "packageLocation": "./", "packageDependencies": [], "linkType": "SOFT" }] ]], ["root", [ ["workspace:.", { "packageLocation": "./", "packageDependencies": [["test", "npm:1.0.0"]], "linkType": "SOFT" }] ]], ["test", [ ["npm:1.0.0", { "packageLocation": "./test/", "packageDependencies": [], "linkType": "HARD" }] ]] ] }, "tests": [{ "it": "shouldn't go through PnP when trying to resolve dependencies from packages covered by ignorePatternData", "imported": "test", "importer": "/path/to/project/not-a-workspace/", "expected": "error!" }] }]
internal/resolver/testExpectations.json
0
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.0002560718567110598, 0.0001782057515811175, 0.00016669298929627985, 0.00016866762598510832, 0.000021689116692868993 ]
{ "id": 3, "code_window": [ "\n", "async function downloadDirectlyFromNPM(pkg: string, subpath: string, binPath: string): Promise<void> {\n", " // If that fails, the user could have npm configured incorrectly or could not\n", " // have npm installed. Try downloading directly from npm as a last resort.\n", " const url = `https://registry.npmjs.org/${pkg}/-/${pkg.replace('@esbuild/', '')}-${ESBUILD_VERSION}.tgz`\n", " console.error(`[esbuild] Trying to download ${JSON.stringify(url)}`)\n", " try {\n", " fs.writeFileSync(binPath, extractFileFromTarGzip(await fetch(url), subpath))\n", " fs.chmodSync(binPath, 0o755)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ " const url = `https://registry.npmjs.org/${pkg}/-/${pkg.replace('@esbuild/', '')}-${versionFromPackageJSON}.tgz`\n" ], "file_path": "lib/npm/node-install.ts", "type": "replace", "edit_start_line_idx": 220 }
//go:build windows // +build windows package fs func CheckIfWindows() bool { return true }
internal/fs/iswin_windows.go
0
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.00036088423803448677, 0.00036088423803448677, 0.00036088423803448677, 0.00036088423803448677, 0 ]
{ "id": 4, "code_window": [ "const childProcess = require('child_process')\n", "const path = require('path')\n", "const zlib = require('zlib')\n", "const fs = require('fs')\n", "const os = require('os')\n", "\n", "const repoDir = path.dirname(__dirname)\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "scripts/esbuild.js", "type": "replace", "edit_start_line_idx": 2 }
const childProcess = require('child_process') const path = require('path') const zlib = require('zlib') const fs = require('fs') const os = require('os') const repoDir = path.dirname(__dirname) const denoDir = path.join(repoDir, 'deno') const npmDir = path.join(repoDir, 'npm', 'esbuild') const version = fs.readFileSync(path.join(repoDir, 'version.txt'), 'utf8').trim() const nodeTarget = 'node10'; // See: https://nodejs.org/en/about/releases/ const umdBrowserTarget = 'es2015'; // Transpiles "async" const esmBrowserTarget = 'es2017'; // Preserves "async" const buildNeutralLib = (esbuildPath) => { const libDir = path.join(npmDir, 'lib') const binDir = path.join(npmDir, 'bin') fs.mkdirSync(libDir, { recursive: true }) fs.mkdirSync(binDir, { recursive: true }) // Generate "npm/esbuild/install.js" childProcess.execFileSync(esbuildPath, [ path.join(repoDir, 'lib', 'npm', 'node-install.ts'), '--outfile=' + path.join(npmDir, 'install.js'), '--bundle', '--target=' + nodeTarget, '--define:ESBUILD_VERSION=' + JSON.stringify(version), '--external:esbuild', '--platform=node', '--log-level=warning', ], { cwd: repoDir }) // Generate "npm/esbuild/lib/main.js" childProcess.execFileSync(esbuildPath, [ path.join(repoDir, 'lib', 'npm', 'node.ts'), '--outfile=' + path.join(libDir, 'main.js'), '--bundle', '--target=' + nodeTarget, '--define:WASM=false', '--define:ESBUILD_VERSION=' + JSON.stringify(version), '--external:esbuild', '--platform=node', '--log-level=warning', ], { cwd: repoDir }) // Generate "npm/esbuild/bin/esbuild" childProcess.execFileSync(esbuildPath, [ path.join(repoDir, 'lib', 'npm', 'node-shim.ts'), '--outfile=' + path.join(binDir, 'esbuild'), '--bundle', '--target=' + nodeTarget, '--define:ESBUILD_VERSION=' + JSON.stringify(version), '--external:esbuild', '--platform=node', '--log-level=warning', ], { cwd: repoDir }) // Generate "npm/esbuild/lib/main.d.ts" const types_ts = fs.readFileSync(path.join(repoDir, 'lib', 'shared', 'types.ts'), 'utf8') fs.writeFileSync(path.join(libDir, 'main.d.ts'), types_ts) // Get supported platforms const platforms = { exports: {} } new Function('module', 'exports', 'require', childProcess.execFileSync(esbuildPath, [ path.join(repoDir, 'lib', 'npm', 'node-platform.ts'), '--bundle', '--target=' + nodeTarget, '--external:esbuild', '--platform=node', '--log-level=warning', ], { cwd: repoDir }))(platforms, platforms.exports, require) const optionalDependencies = Object.fromEntries(Object.values({ ...platforms.exports.knownWindowsPackages, ...platforms.exports.knownUnixlikePackages, ...platforms.exports.knownWebAssemblyFallbackPackages, }).sort().map(x => [x, version])) // Update "npm/esbuild/package.json" const pjPath = path.join(npmDir, 'package.json') const package_json = JSON.parse(fs.readFileSync(pjPath, 'utf8')) package_json.optionalDependencies = optionalDependencies fs.writeFileSync(pjPath, JSON.stringify(package_json, null, 2) + '\n') } async function generateWorkerCode({ esbuildPath, wasm_exec_js, minify, target }) { const input = ` let onmessage let globalThis = {} for (let o = self; o; o = Object.getPrototypeOf(o)) for (let k of Object.getOwnPropertyNames(o)) if (!(k in globalThis)) Object.defineProperty(globalThis, k, { get: () => self[k] }) ${wasm_exec_js.replace(/\bfs\./g, 'globalThis.fs.')} ${fs.readFileSync(path.join(repoDir, 'lib', 'shared', 'worker.ts'), 'utf8')} return m => onmessage(m) ` const args = [ '--loader=ts', '--target=' + target, '--define:ESBUILD_VERSION=' + JSON.stringify(version), ].concat(minify ? ['--minify'] : []) // Note: This uses "execFile" because "execFileSync" in node appears to have // a bug. Specifically when using the "input" option of "execFileSync" to // provide stdin, sometimes (~2% of the time?) node writes all of the input // but then doesn't close the stream. The Go side is stuck reading from stdin // within "ioutil.ReadAll(os.Stdin)" so I suspect it's a bug in node, not in // Go. Explicitly calling "stdin.end()" on the node side appears to fix it. const wasmExecAndWorker = (await new Promise((resolve, reject) => { const proc = childProcess.execFile(esbuildPath, args, { cwd: repoDir }, (err, stdout) => { if (err) reject(err) else resolve(stdout) }) proc.stdin.write(input) proc.stdin.end() })).toString().trim() const commentLines = wasm_exec_js.split('\n') const firstNonComment = commentLines.findIndex(line => !line.startsWith('//')) const commentPrefix = '\n' + commentLines.slice(0, firstNonComment).join('\n') + '\n' if (minify) return `(postMessage=>{${commentPrefix}${wasmExecAndWorker}})` return `((postMessage) => {${(commentPrefix + wasmExecAndWorker).replace(/\n/g, '\n ')}\n })` } exports.buildWasmLib = async (esbuildPath) => { // Asynchronously start building the WebAssembly module const npmWasmDir = path.join(repoDir, 'npm', 'esbuild-wasm') const goBuildPromise = new Promise((resolve, reject) => childProcess.execFile('go', [ 'build', '-o', path.join(npmWasmDir, 'esbuild.wasm'), '-ldflags=-s -w', // This removes ~0.14mb of unnecessary WebAssembly code '-trimpath', path.join(repoDir, 'cmd', 'esbuild'), ], { cwd: repoDir, stdio: 'inherit', env: { ...process.env, GOOS: 'js', GOARCH: 'wasm' } }, err => err ? reject(err) : resolve())) const libDir = path.join(npmWasmDir, 'lib') const esmDir = path.join(npmWasmDir, 'esm') fs.mkdirSync(libDir, { recursive: true }) fs.mkdirSync(esmDir, { recursive: true }) // Generate "npm/esbuild-wasm/wasm_exec.js" const GOROOT = childProcess.execFileSync('go', ['env', 'GOROOT']).toString().trim() let wasm_exec_js = fs.readFileSync(path.join(GOROOT, 'misc', 'wasm', 'wasm_exec.js'), 'utf8') let wasm_exec_node_js = fs.readFileSync(path.join(GOROOT, 'misc', 'wasm', 'wasm_exec_node.js'), 'utf8') fs.writeFileSync(path.join(npmWasmDir, 'wasm_exec.js'), wasm_exec_js) fs.writeFileSync(path.join(npmWasmDir, 'wasm_exec_node.js'), wasm_exec_node_js) // Generate "npm/esbuild-wasm/lib/main.js" childProcess.execFileSync(esbuildPath, [ path.join(repoDir, 'lib', 'npm', 'node.ts'), '--outfile=' + path.join(libDir, 'main.js'), '--bundle', '--target=' + nodeTarget, '--format=cjs', '--define:WASM=true', '--define:ESBUILD_VERSION=' + JSON.stringify(version), '--external:esbuild', '--platform=node', '--log-level=warning', ], { cwd: repoDir }) // Generate "npm/esbuild-wasm/lib/main.d.ts" and "npm/esbuild-wasm/lib/browser.d.ts" const types_ts = fs.readFileSync(path.join(repoDir, 'lib', 'shared', 'types.ts'), 'utf8') fs.writeFileSync(path.join(libDir, 'main.d.ts'), types_ts) fs.writeFileSync(path.join(libDir, 'browser.d.ts'), types_ts) fs.writeFileSync(path.join(esmDir, 'browser.d.ts'), types_ts) for (const minify of [false, true]) { const minifyFlags = minify ? ['--minify'] : [] const wasmWorkerCodeUMD = await generateWorkerCode({ esbuildPath, wasm_exec_js, minify, target: umdBrowserTarget }) const wasmWorkerCodeESM = await generateWorkerCode({ esbuildPath, wasm_exec_js, minify, target: esmBrowserTarget }) // Generate "npm/esbuild-wasm/lib/browser.*" const umdPrefix = `(module=>{` const umdSuffix = `})(typeof module==="object"?module:{set exports(x){(typeof self!=="undefined"?self:this).esbuild=x}});` const browserCJS = childProcess.execFileSync(esbuildPath, [ path.join(repoDir, 'lib', 'npm', 'browser.ts'), '--bundle', '--target=' + umdBrowserTarget, '--format=cjs', '--define:ESBUILD_VERSION=' + JSON.stringify(version), '--define:WEB_WORKER_SOURCE_CODE=' + JSON.stringify(wasmWorkerCodeUMD), '--banner:js=' + umdPrefix, '--footer:js=' + umdSuffix, '--log-level=warning', ].concat(minifyFlags), { cwd: repoDir }).toString().replace('WEB_WORKER_FUNCTION', wasmWorkerCodeUMD) fs.writeFileSync(path.join(libDir, minify ? 'browser.min.js' : 'browser.js'), browserCJS) // Generate "npm/esbuild-wasm/esm/browser.min.js" const browserESM = childProcess.execFileSync(esbuildPath, [ path.join(repoDir, 'lib', 'npm', 'browser.ts'), '--bundle', '--target=' + esmBrowserTarget, '--format=esm', '--define:ESBUILD_VERSION=' + JSON.stringify(version), '--define:WEB_WORKER_SOURCE_CODE=' + JSON.stringify(wasmWorkerCodeESM), '--log-level=warning', ].concat(minifyFlags), { cwd: repoDir }).toString().replace('WEB_WORKER_FUNCTION', wasmWorkerCodeESM) fs.writeFileSync(path.join(esmDir, minify ? 'browser.min.js' : 'browser.js'), browserESM) } // Join with the asynchronous WebAssembly build await goBuildPromise // Also copy this into the WebAssembly shim directories for (const dir of [ path.join(repoDir, 'npm', '@esbuild', 'android-arm'), path.join(repoDir, 'npm', '@esbuild', 'android-x64'), ]) { fs.mkdirSync(path.join(dir, 'bin'), { recursive: true }) fs.writeFileSync(path.join(dir, 'wasm_exec.js'), wasm_exec_js) fs.writeFileSync(path.join(dir, 'wasm_exec_node.js'), wasm_exec_node_js) fs.copyFileSync(path.join(npmWasmDir, 'bin', 'esbuild'), path.join(dir, 'bin', 'esbuild')) fs.copyFileSync(path.join(npmWasmDir, 'esbuild.wasm'), path.join(dir, 'esbuild.wasm')) } } const buildDenoLib = async (esbuildPath) => { // Generate "deno/esbuild/mod.js" childProcess.execFileSync(esbuildPath, [ path.join(repoDir, 'lib', 'deno', 'mod.ts'), '--bundle', '--outfile=' + path.join(denoDir, 'mod.js'), '--target=esnext', '--define:ESBUILD_VERSION=' + JSON.stringify(version), '--platform=neutral', '--log-level=warning', '--banner:js=/// <reference types="./mod.d.ts" />', ], { cwd: repoDir }) // Generate "deno/esbuild/wasm.js" const GOROOT = childProcess.execFileSync('go', ['env', 'GOROOT']).toString().trim() let wasm_exec_js = fs.readFileSync(path.join(GOROOT, 'misc', 'wasm', 'wasm_exec.js'), 'utf8') const wasmWorkerCode = await generateWorkerCode({ esbuildPath, wasm_exec_js, minify: true, target: 'esnext' }) const modWASM = childProcess.execFileSync(esbuildPath, [ path.join(repoDir, 'lib', 'deno', 'wasm.ts'), '--bundle', '--target=esnext', '--define:ESBUILD_VERSION=' + JSON.stringify(version), '--define:WEB_WORKER_SOURCE_CODE=' + JSON.stringify(wasmWorkerCode), '--platform=neutral', '--log-level=warning', '--banner:js=/// <reference types="./wasm.d.ts" />', ], { cwd: repoDir }).toString().replace('WEB_WORKER_FUNCTION', wasmWorkerCode) fs.writeFileSync(path.join(denoDir, 'wasm.js'), modWASM) // Generate "deno/esbuild/mod.d.ts" const types_ts = fs.readFileSync(path.join(repoDir, 'lib', 'shared', 'types.ts'), 'utf8') + `\n// Unlike node, Deno lacks the necessary APIs to clean up child processes` + `\n// automatically. You must manually call stop() in Deno when you're done` + `\n// using esbuild or Deno will continue running forever.` + `\nexport function stop(): void;` + `\n` fs.writeFileSync(path.join(denoDir, 'mod.d.ts'), types_ts) fs.writeFileSync(path.join(denoDir, 'wasm.d.ts'), types_ts) // And copy the WebAssembly file over to the Deno library as well fs.copyFileSync(path.join(repoDir, 'npm', 'esbuild-wasm', 'esbuild.wasm'), path.join(repoDir, 'deno', 'esbuild.wasm')) } // Writing a file atomically is important for watch mode tests since we don't // want to read the file after it has been truncated but before the new contents // have been written. exports.writeFileAtomic = (where, contents) => { // Note: Can't use "os.tmpdir()" because that doesn't work on Windows. CI runs // tests on D:\ and the temporary directory is on C:\ or the other way around. // And apparently it's impossible to move files between C:\ and D:\ or something. // So we have to write the file in the same directory as the destination. This is // unfortunate because it will unnecessarily trigger extra watch mode rebuilds. // So we have to make our tests extra robust so they can still work with random // extra rebuilds thrown in. const file = path.join(path.dirname(where), '.esbuild-atomic-file-' + Math.random().toString(36).slice(2)) fs.writeFileSync(file, contents) fs.renameSync(file, where) } exports.buildBinary = () => { childProcess.execFileSync('go', ['build', '-ldflags=-s -w', '-trimpath', './cmd/esbuild'], { cwd: repoDir, stdio: 'ignore' }) return path.join(repoDir, process.platform === 'win32' ? 'esbuild.exe' : 'esbuild') } exports.removeRecursiveSync = path => { try { fs.rmSync(path, { recursive: true }) } catch (e) { // Removing stuff on Windows is flaky and unreliable. Don't fail tests // on CI if Windows is just being a pain. Common causes of flakes include // random EPERM and ENOTEMPTY errors. // // The general "solution" to this is to try asking Windows to redo the // failing operation repeatedly until eventually giving up after a // timeout. But that doesn't guarantee that flakes will be fixed so we // just give up instead. People that want reasonable file system // behavior on Windows should use WSL instead. } } const updateVersionPackageJSON = pathToPackageJSON => { const version = fs.readFileSync(path.join(path.dirname(__dirname), 'version.txt'), 'utf8').trim() const json = JSON.parse(fs.readFileSync(pathToPackageJSON, 'utf8')) if (json.version !== version) { json.version = version fs.writeFileSync(pathToPackageJSON, JSON.stringify(json, null, 2) + '\n') } } exports.installForTests = () => { // Build the "esbuild" binary and library const esbuildPath = exports.buildBinary() buildNeutralLib(esbuildPath) // Install the "esbuild" package to a temporary directory. On Windows, it's // sometimes randomly impossible to delete this installation directory. My // best guess is that this is because the esbuild process is kept alive until // the process exits for "buildSync" and "transformSync", and that sometimes // prevents Windows from deleting the directory it's in. The call in tests to // "rimraf.sync()" appears to hang when this happens. Other operating systems // don't have a problem with this. This has only been a problem on the Windows // VM in GitHub CI. I cannot reproduce this issue myself. const installDir = path.join(os.tmpdir(), 'esbuild-' + Math.random().toString(36).slice(2)) const env = { ...process.env, ESBUILD_BINARY_PATH: esbuildPath } fs.mkdirSync(installDir) fs.writeFileSync(path.join(installDir, 'package.json'), '{}') childProcess.execSync(`npm pack --silent "${npmDir}"`, { cwd: installDir, stdio: 'inherit' }) childProcess.execSync(`npm install --silent --no-audit --no-optional --progress=false esbuild-${version}.tgz`, { cwd: installDir, env, stdio: 'inherit' }) // Evaluate the code const ESBUILD_PACKAGE_PATH = path.join(installDir, 'node_modules', 'esbuild') const mod = require(ESBUILD_PACKAGE_PATH) Object.defineProperty(mod, 'ESBUILD_PACKAGE_PATH', { value: ESBUILD_PACKAGE_PATH }) return mod } const updateVersionGo = () => { const version_txt = fs.readFileSync(path.join(repoDir, 'version.txt'), 'utf8').trim() const version_go = `package main\n\nconst esbuildVersion = "${version_txt}"\n` const version_go_path = path.join(repoDir, 'cmd', 'esbuild', 'version.go') // Update this atomically to avoid issues with this being overwritten during use const temp_path = version_go_path + Math.random().toString(36).slice(1) fs.writeFileSync(temp_path, version_go) fs.renameSync(temp_path, version_go_path) } // This is helpful for ES6 modules which don't have access to __dirname exports.dirname = __dirname // The main Makefile invokes this script before publishing if (require.main === module) { if (process.argv.indexOf('--wasm') >= 0) exports.buildWasmLib(process.argv[2]) else if (process.argv.indexOf('--deno') >= 0) buildDenoLib(process.argv[2]) else if (process.argv.indexOf('--version') >= 0) updateVersionPackageJSON(process.argv[2]) else if (process.argv.indexOf('--neutral') >= 0) buildNeutralLib(process.argv[2]) else if (process.argv.indexOf('--update-version-go') >= 0) updateVersionGo() else throw new Error('Expected a flag') }
scripts/esbuild.js
1
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.9990866184234619, 0.8637593984603882, 0.00016588732250966132, 0.9988182187080383, 0.341362863779068 ]
{ "id": 4, "code_window": [ "const childProcess = require('child_process')\n", "const path = require('path')\n", "const zlib = require('zlib')\n", "const fs = require('fs')\n", "const os = require('os')\n", "\n", "const repoDir = path.dirname(__dirname)\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "scripts/esbuild.js", "type": "replace", "edit_start_line_idx": 2 }
name: CI on: push: branches: ['*'] pull_request: branches: ['*'] permissions: contents: read # to fetch code (actions/checkout) jobs: esbuild-slow: # Split these out into their own runner because they're very slow name: esbuild CI (Slow Tests) runs-on: ubuntu-latest steps: - name: Set up Go 1.x uses: actions/setup-go@v3 with: go-version: 1.20.0 id: go - name: Setup Node.js environment uses: actions/setup-node@v3 with: node-version: 16 - name: Check out code into the Go module directory uses: actions/checkout@v3 - name: Rollup Tests run: make test-rollup - name: Uglify Tests run: CI=1 make uglify - name: Ensure all platforms can be built run: make clean && make platform-all esbuild: name: esbuild CI runs-on: ${{ matrix.os }} strategy: matrix: os: [ubuntu-latest, macos-latest, windows-latest] steps: - name: Set up Go 1.x uses: actions/setup-go@v3 with: go-version: 1.20.0 id: go - name: Setup Node.js environment uses: actions/setup-node@v3 with: node-version: 16 # The version of Deno is pinned because version 1.25.1 was causing test # flakes due to random segfaults. - name: Setup Deno 1.24.0 uses: denoland/setup-deno@main with: deno-version: v1.24.0 - name: Check out code into the Go module directory uses: actions/checkout@v3 - name: go test run: go test -race ./internal/... - name: go vet run: go vet ./cmd/... ./internal/... ./pkg/... - name: Deno Tests (non-Windows) if: matrix.os != 'windows-latest' run: make test-deno - name: Deno Tests (Windows) if: matrix.os == 'windows-latest' run: make test-deno-windows - name: Test for path/filepath if: matrix.os == 'ubuntu-latest' run: make no-filepath - name: Make sure "check-go-version" works (non-Windows) if: matrix.os != 'windows-latest' run: make check-go-version - name: go fmt if: matrix.os == 'macos-latest' run: make fmt-go - name: npm ci run: cd scripts && npm ci - name: Register Test (ESBUILD_WORKER_THREADS=0, non-Windows) if: matrix.os != 'windows-latest' run: ESBUILD_WORKER_THREADS=0 node scripts/register-test.js - name: Register Test run: node scripts/register-test.js - name: Verify Source Map run: node scripts/verify-source-map.js - name: E2E Tests run: node scripts/end-to-end-tests.js - name: JS API Tests (ESBUILD_WORKER_THREADS=0, non-Windows) if: matrix.os != 'windows-latest' run: ESBUILD_WORKER_THREADS=0 node scripts/js-api-tests.js - name: JS API Tests run: node scripts/js-api-tests.js - name: NodeJS Unref Tests run: node scripts/node-unref-tests.js - name: Plugin Tests run: node scripts/plugin-tests.js - name: TypeScript Type Definition Tests if: matrix.os == 'ubuntu-latest' run: node scripts/ts-type-tests.js - name: JS API Type Check if: matrix.os == 'ubuntu-latest' run: make lib-typecheck - name: WebAssembly API Tests (browser) if: matrix.os == 'ubuntu-latest' run: make test-wasm-browser - name: WebAssembly API Tests (node, Linux) if: matrix.os == 'ubuntu-latest' run: make test-wasm-node - name: WebAssembly API Tests (node, non-Linux) if: matrix.os != 'ubuntu-latest' run: node scripts/wasm-tests.js - name: Yarn PnP tests run: make test-yarnpnp - name: Sucrase Tests if: matrix.os == 'ubuntu-latest' run: make test-sucrase - name: Esprima Tests if: matrix.os == 'ubuntu-latest' run: make test-esprima - name: Preact Splitting Tests if: matrix.os == 'ubuntu-latest' run: make test-preact-splitting - name: Check the unicode table generator if: matrix.os == 'ubuntu-latest' run: cd scripts && node gen-unicode-table.js esbuild-old-golang: name: esbuild CI (old versions) runs-on: ubuntu-latest steps: - name: Set up Go 1.13 (the minimum required Go version for esbuild) uses: actions/setup-go@v3 with: go-version: 1.13 id: go - name: Check out code into the Go module directory uses: actions/checkout@v3 - name: go build run: go build ./cmd/esbuild - name: go test run: go test ./internal/... - name: make test-old-ts run: make test-old-ts
.github/workflows/ci.yml
0
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.19853337109088898, 0.012110917828977108, 0.00016634224448353052, 0.000175083740032278, 0.04437444359064102 ]
{ "id": 4, "code_window": [ "const childProcess = require('child_process')\n", "const path = require('path')\n", "const zlib = require('zlib')\n", "const fs = require('fs')\n", "const os = require('os')\n", "\n", "const repoDir = path.dirname(__dirname)\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "scripts/esbuild.js", "type": "replace", "edit_start_line_idx": 2 }
package css_parser import ( "github.com/evanw/esbuild/internal/css_ast" "github.com/evanw/esbuild/internal/css_lexer" "github.com/evanw/esbuild/internal/logger" ) const ( boxTop = iota boxRight boxBottom boxLeft ) type boxSide struct { token css_ast.Token unitSafety unitSafetyTracker ruleIndex uint32 // The index of the originating rule in the rules array wasSingleRule bool // True if the originating rule was just for this side } type boxTracker struct { keyText string sides [4]boxSide allowAuto bool // If true, allow the "auto" keyword important bool // True if all active rules were flagged as "!important" key css_ast.D } type unitSafetyStatus uint8 const ( unitSafe unitSafetyStatus = iota // "margin: 0 1px 2cm 3%;" unitUnsafeSingle // "margin: 0 1vw 2vw 3vw;" unitUnsafeMixed // "margin: 0 1vw 2vh 3ch;" ) // We can only compact rules together if they have the same unit safety level. // We want to avoid a situation where the browser treats some of the original // rules as valid and others as invalid. // // Safe: // top: 1px; left: 0; bottom: 1px; right: 0; // top: 1Q; left: 2Q; bottom: 3Q; right: 4Q; // // Unsafe: // top: 1vh; left: 2vw; bottom: 3vh; right: 4vw; // top: 1Q; left: 2Q; bottom: 3Q; right: 0; // inset: 1Q 0 0 0; top: 0; type unitSafetyTracker struct { unit string status unitSafetyStatus } func (a unitSafetyTracker) isSafeWith(b unitSafetyTracker) bool { return a.status == b.status && a.status != unitUnsafeMixed && (a.status != unitUnsafeSingle || a.unit == b.unit) } func (t *unitSafetyTracker) includeUnitOf(token css_ast.Token) { switch token.Kind { case css_lexer.TNumber: if token.Text == "0" { return } case css_lexer.TPercentage: return case css_lexer.TDimension: if token.DimensionUnitIsSafeLength() { return } else if unit := token.DimensionUnit(); t.status == unitSafe { t.status = unitUnsafeSingle t.unit = unit return } else if t.status == unitUnsafeSingle && t.unit == unit { return } } t.status = unitUnsafeMixed } func (box *boxTracker) updateSide(rules []css_ast.Rule, side int, new boxSide) { if old := box.sides[side]; old.token.Kind != css_lexer.TEndOfFile && (!new.wasSingleRule || old.wasSingleRule) && old.unitSafety.status == unitSafe && new.unitSafety.status == unitSafe { rules[old.ruleIndex] = css_ast.Rule{} } box.sides[side] = new } func (box *boxTracker) mangleSides(rules []css_ast.Rule, decl *css_ast.RDeclaration, index int, minifyWhitespace bool) { // Reset if we see a change in the "!important" flag if box.important != decl.Important { box.sides = [4]boxSide{} box.important = decl.Important } allowedIdent := "" if box.allowAuto { allowedIdent = "auto" } if quad, ok := expandTokenQuad(decl.Value, allowedIdent); ok { // Use a single tracker for the whole rule unitSafety := unitSafetyTracker{} for _, t := range quad { if !box.allowAuto || t.Kind.IsNumeric() { unitSafety.includeUnitOf(t) } } for side, t := range quad { if unitSafety.status == unitSafe { t.TurnLengthIntoNumberIfZero() } box.updateSide(rules, side, boxSide{ token: t, ruleIndex: uint32(index), unitSafety: unitSafety, }) } box.compactRules(rules, decl.KeyRange, minifyWhitespace) } else { box.sides = [4]boxSide{} } } func (box *boxTracker) mangleSide(rules []css_ast.Rule, decl *css_ast.RDeclaration, index int, minifyWhitespace bool, side int) { // Reset if we see a change in the "!important" flag if box.important != decl.Important { box.sides = [4]boxSide{} box.important = decl.Important } if tokens := decl.Value; len(tokens) == 1 { if t := tokens[0]; t.Kind.IsNumeric() || (t.Kind == css_lexer.TIdent && box.allowAuto && t.Text == "auto") { unitSafety := unitSafetyTracker{} if !box.allowAuto || t.Kind.IsNumeric() { unitSafety.includeUnitOf(t) } if unitSafety.status == unitSafe && t.TurnLengthIntoNumberIfZero() { tokens[0] = t } box.updateSide(rules, side, boxSide{ token: t, ruleIndex: uint32(index), wasSingleRule: true, unitSafety: unitSafety, }) box.compactRules(rules, decl.KeyRange, minifyWhitespace) return } } box.sides = [4]boxSide{} } func (box *boxTracker) compactRules(rules []css_ast.Rule, keyRange logger.Range, minifyWhitespace bool) { // All tokens must be present if eof := css_lexer.TEndOfFile; box.sides[0].token.Kind == eof || box.sides[1].token.Kind == eof || box.sides[2].token.Kind == eof || box.sides[3].token.Kind == eof { return } // All tokens must have the same unit for _, side := range box.sides[1:] { if !side.unitSafety.isSafeWith(box.sides[0].unitSafety) { return } } // Generate the most minimal representation tokens := compactTokenQuad( box.sides[0].token, box.sides[1].token, box.sides[2].token, box.sides[3].token, minifyWhitespace, ) // Remove all of the existing declarations rules[box.sides[0].ruleIndex] = css_ast.Rule{} rules[box.sides[1].ruleIndex] = css_ast.Rule{} rules[box.sides[2].ruleIndex] = css_ast.Rule{} rules[box.sides[3].ruleIndex] = css_ast.Rule{} // Insert the combined declaration where the last rule was rules[box.sides[3].ruleIndex].Data = &css_ast.RDeclaration{ Key: box.key, KeyText: box.keyText, Value: tokens, KeyRange: keyRange, Important: box.important, } }
internal/css_parser/css_decls_box.go
0
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.00017832382582128048, 0.00017306863446719944, 0.00016524545208085328, 0.00017331376147922128, 0.0000032200439363805344 ]
{ "id": 4, "code_window": [ "const childProcess = require('child_process')\n", "const path = require('path')\n", "const zlib = require('zlib')\n", "const fs = require('fs')\n", "const os = require('os')\n", "\n", "const repoDir = path.dirname(__dirname)\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "scripts/esbuild.js", "type": "replace", "edit_start_line_idx": 2 }
package helpers import "strings" var builtinTypesLower = map[string]string{ // Text ".css": "text/css; charset=utf-8", ".htm": "text/html; charset=utf-8", ".html": "text/html; charset=utf-8", ".js": "text/javascript; charset=utf-8", ".json": "application/json", ".markdown": "text/markdown; charset=utf-8", ".md": "text/markdown; charset=utf-8", ".mjs": "text/javascript; charset=utf-8", ".xml": "text/xml; charset=utf-8", // Images ".avif": "image/avif", ".gif": "image/gif", ".jpeg": "image/jpeg", ".jpg": "image/jpeg", ".png": "image/png", ".svg": "image/svg+xml", ".webp": "image/webp", // Fonts ".eot": "application/vnd.ms-fontobject", ".otf": "font/otf", ".sfnt": "font/sfnt", ".ttf": "font/ttf", ".woff": "font/woff", ".woff2": "font/woff2", // Other ".pdf": "application/pdf", ".wasm": "application/wasm", ".webmanifest": "application/manifest+json", } // This is used instead of Go's built-in "mime.TypeByExtension" function because // that function is broken on Windows: https://github.com/golang/go/issues/32350. func MimeTypeByExtension(ext string) string { contentType := builtinTypesLower[ext] if contentType == "" { contentType = builtinTypesLower[strings.ToLower(ext)] } return contentType }
internal/helpers/mime.go
0
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.00017227776697836816, 0.00016974154277704656, 0.0001663930161157623, 0.0001692331425147131, 0.000002204143129347358 ]
{ "id": 5, "code_window": [ " path.join(repoDir, 'lib', 'npm', 'node-install.ts'),\n", " '--outfile=' + path.join(npmDir, 'install.js'),\n", " '--bundle',\n", " '--target=' + nodeTarget,\n", " '--define:ESBUILD_VERSION=' + JSON.stringify(version),\n", " '--external:esbuild',\n", " '--platform=node',\n", " '--log-level=warning',\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ " // Note: https://socket.dev have complained that inlining the version into\n", " // the install script messes up some internal scanning that they do by\n", " // making it seem like esbuild's install script code changes with every\n", " // esbuild release. So now we read it from \"package.json\" instead.\n", " // '--define:ESBUILD_VERSION=' + JSON.stringify(version),\n" ], "file_path": "scripts/esbuild.js", "type": "replace", "edit_start_line_idx": 26 }
import { downloadedBinPath, ESBUILD_BINARY_PATH, isValidBinaryPath, pkgAndSubpathForCurrentPlatform } from './node-platform' import fs = require('fs') import os = require('os') import path = require('path') import zlib = require('zlib') import https = require('https') import child_process = require('child_process') declare const ESBUILD_VERSION: string const toPath = path.join(__dirname, 'bin', 'esbuild') let isToPathJS = true function validateBinaryVersion(...command: string[]): void { command.push('--version') let stdout: string try { stdout = child_process.execFileSync(command.shift()!, command, { // Without this, this install script strangely crashes with the error // "EACCES: permission denied, write" but only on Ubuntu Linux when node is // installed from the Snap Store. This is not a problem when you download // the official version of node. The problem appears to be that stderr // (i.e. file descriptor 2) isn't writable? // // More info: // - https://snapcraft.io/ (what the Snap Store is) // - https://nodejs.org/dist/ (download the official version of node) // - https://github.com/evanw/esbuild/issues/1711#issuecomment-1027554035 // stdio: 'pipe', }).toString().trim() } catch (err) { if (os.platform() === 'darwin' && /_SecTrustEvaluateWithError/.test(err + '')) { let os = 'this version of macOS' try { os = 'macOS ' + child_process.execFileSync('sw_vers', ['-productVersion']).toString().trim() } catch { } throw new Error(`The "esbuild" package cannot be installed because ${os} is too outdated. The Go compiler (which esbuild relies on) no longer supports ${os}, which means the "esbuild" binary executable can't be run. You can either: * Update your version of macOS to one that the Go compiler supports * Use the "esbuild-wasm" package instead of the "esbuild" package * Build esbuild yourself using an older version of the Go compiler `) } throw err } if (stdout !== ESBUILD_VERSION) { throw new Error(`Expected ${JSON.stringify(ESBUILD_VERSION)} but got ${JSON.stringify(stdout)}`) } } function isYarn(): boolean { const { npm_config_user_agent } = process.env if (npm_config_user_agent) { return /\byarn\//.test(npm_config_user_agent) } return false } function fetch(url: string): Promise<Buffer> { return new Promise((resolve, reject) => { https.get(url, res => { if ((res.statusCode === 301 || res.statusCode === 302) && res.headers.location) return fetch(res.headers.location).then(resolve, reject) if (res.statusCode !== 200) return reject(new Error(`Server responded with ${res.statusCode}`)) let chunks: Buffer[] = [] res.on('data', chunk => chunks.push(chunk)) res.on('end', () => resolve(Buffer.concat(chunks))) }).on('error', reject) }) } function extractFileFromTarGzip(buffer: Buffer, subpath: string): Buffer { try { buffer = zlib.unzipSync(buffer) } catch (err: any) { throw new Error(`Invalid gzip data in archive: ${err && err.message || err}`) } let str = (i: number, n: number) => String.fromCharCode(...buffer.subarray(i, i + n)).replace(/\0.*$/, '') let offset = 0 subpath = `package/${subpath}` while (offset < buffer.length) { let name = str(offset, 100) let size = parseInt(str(offset + 124, 12), 8) offset += 512 if (!isNaN(size)) { if (name === subpath) return buffer.subarray(offset, offset + size) offset += (size + 511) & ~511 } } throw new Error(`Could not find ${JSON.stringify(subpath)} in archive`) } function installUsingNPM(pkg: string, subpath: string, binPath: string): void { // Erase "npm_config_global" so that "npm install --global esbuild" works. // Otherwise this nested "npm install" will also be global, and the install // will deadlock waiting for the global installation lock. const env = { ...process.env, npm_config_global: undefined } // Create a temporary directory inside the "esbuild" package with an empty // "package.json" file. We'll use this to run "npm install" in. const esbuildLibDir = path.dirname(require.resolve('esbuild')) const installDir = path.join(esbuildLibDir, 'npm-install') fs.mkdirSync(installDir) try { fs.writeFileSync(path.join(installDir, 'package.json'), '{}') // Run "npm install" in the temporary directory which should download the // desired package. Try to avoid unnecessary log output. This uses the "npm" // command instead of a HTTP request so that it hopefully works in situations // where HTTP requests are blocked but the "npm" command still works due to, // for example, a custom configured npm registry and special firewall rules. child_process.execSync(`npm install --loglevel=error --prefer-offline --no-audit --progress=false ${pkg}@${ESBUILD_VERSION}`, { cwd: installDir, stdio: 'pipe', env }) // Move the downloaded binary executable into place. The destination path // is the same one that the JavaScript API code uses so it will be able to // find the binary executable here later. const installedBinPath = path.join(installDir, 'node_modules', pkg, subpath) fs.renameSync(installedBinPath, binPath) } finally { // Try to clean up afterward so we don't unnecessarily waste file system // space. Leaving nested "node_modules" directories can also be problematic // for certain tools that scan over the file tree and expect it to have a // certain structure. try { removeRecursive(installDir) } catch { // Removing a file or directory can randomly break on Windows, returning // EBUSY for an arbitrary length of time. I think this happens when some // other program has that file or directory open (e.g. an anti-virus // program). This is fine on Unix because the OS just unlinks the entry // but keeps the reference around until it's unused. There's nothing we // can do in this case so we just leave the directory there. } } } function removeRecursive(dir: string): void { for (const entry of fs.readdirSync(dir)) { const entryPath = path.join(dir, entry) let stats try { stats = fs.lstatSync(entryPath) } catch { continue; // Guard against https://github.com/nodejs/node/issues/4760 } if (stats.isDirectory()) removeRecursive(entryPath) else fs.unlinkSync(entryPath) } fs.rmdirSync(dir) } function applyManualBinaryPathOverride(overridePath: string): void { // Patch the CLI use case (the "esbuild" command) const pathString = JSON.stringify(overridePath) fs.writeFileSync(toPath, `#!/usr/bin/env node\n` + `require('child_process').execFileSync(${pathString}, process.argv.slice(2), { stdio: 'inherit' });\n`) // Patch the JS API use case (the "require('esbuild')" workflow) const libMain = path.join(__dirname, 'lib', 'main.js') const code = fs.readFileSync(libMain, 'utf8') fs.writeFileSync(libMain, `var ESBUILD_BINARY_PATH = ${pathString};\n${code}`) } function maybeOptimizePackage(binPath: string): void { // This package contains a "bin/esbuild" JavaScript file that finds and runs // the appropriate binary executable. However, this means that running the // "esbuild" command runs another instance of "node" which is way slower than // just running the binary executable directly. // // Here we optimize for this by replacing the JavaScript file with the binary // executable at install time. This optimization does not work on Windows // because on Windows the binary executable must be called "esbuild.exe" // instead of "esbuild". // // This also doesn't work with Yarn both because of lack of support for binary // files in Yarn 2+ (see https://github.com/yarnpkg/berry/issues/882) and // because Yarn (even Yarn 1?) may run the same install scripts in the same // place multiple times from different platforms, especially when people use // Docker. Avoid idempotency issues by just not optimizing when using Yarn. // // This optimization also doesn't apply when npm's "--ignore-scripts" flag is // used since in that case this install script will not be run. if (os.platform() !== 'win32' && !isYarn()) { const tempPath = path.join(__dirname, 'bin-esbuild') try { // First link the binary with a temporary file. If this fails and throws an // error, then we'll just end up doing nothing. This uses a hard link to // avoid taking up additional space on the file system. fs.linkSync(binPath, tempPath) // Then use rename to atomically replace the target file with the temporary // file. If this fails and throws an error, then we'll just end up leaving // the temporary file there, which is harmless. fs.renameSync(tempPath, toPath) // If we get here, then we know that the target location is now a binary // executable instead of a JavaScript file. isToPathJS = false // If this install script is being re-run, then "renameSync" will fail // since the underlying inode is the same (it just returns without doing // anything, and without throwing an error). In that case we should remove // the file manually. fs.unlinkSync(tempPath) } catch { // Ignore errors here since this optimization is optional } } } async function downloadDirectlyFromNPM(pkg: string, subpath: string, binPath: string): Promise<void> { // If that fails, the user could have npm configured incorrectly or could not // have npm installed. Try downloading directly from npm as a last resort. const url = `https://registry.npmjs.org/${pkg}/-/${pkg.replace('@esbuild/', '')}-${ESBUILD_VERSION}.tgz` console.error(`[esbuild] Trying to download ${JSON.stringify(url)}`) try { fs.writeFileSync(binPath, extractFileFromTarGzip(await fetch(url), subpath)) fs.chmodSync(binPath, 0o755) } catch (e: any) { console.error(`[esbuild] Failed to download ${JSON.stringify(url)}: ${e && e.message || e}`) throw e } } async function checkAndPreparePackage(): Promise<void> { // This feature was added to give external code a way to modify the binary // path without modifying the code itself. Do not remove this because // external code relies on this (in addition to esbuild's own test suite). if (isValidBinaryPath(ESBUILD_BINARY_PATH)) { if (!fs.existsSync(ESBUILD_BINARY_PATH)) { console.warn(`[esbuild] Ignoring bad configuration: ESBUILD_BINARY_PATH=${ESBUILD_BINARY_PATH}`) } else { applyManualBinaryPathOverride(ESBUILD_BINARY_PATH) return } } const { pkg, subpath } = pkgAndSubpathForCurrentPlatform() let binPath: string try { // First check for the binary package from our "optionalDependencies". This // package should have been installed alongside this package at install time. binPath = require.resolve(`${pkg}/${subpath}`) } catch (e) { console.error(`[esbuild] Failed to find package "${pkg}" on the file system This can happen if you use the "--no-optional" flag. The "optionalDependencies" package.json feature is used by esbuild to install the correct binary executable for your current platform. This install script will now attempt to work around this. If that fails, you need to remove the "--no-optional" flag to use esbuild. `) // If that didn't work, then someone probably installed esbuild with the // "--no-optional" flag. Attempt to compensate for this by downloading the // package using a nested call to "npm" instead. // // THIS MAY NOT WORK. Package installation uses "optionalDependencies" for // a reason: manually downloading the package has a lot of obscure edge // cases that fail because people have customized their environment in // some strange way that breaks downloading. This code path is just here // to be helpful but it's not the supported way of installing esbuild. binPath = downloadedBinPath(pkg, subpath) try { console.error(`[esbuild] Trying to install package "${pkg}" using npm`) installUsingNPM(pkg, subpath, binPath) } catch (e2: any) { console.error(`[esbuild] Failed to install package "${pkg}" using npm: ${e2 && e2.message || e2}`) // If that didn't also work, then something is likely wrong with the "npm" // command. Attempt to compensate for this by manually downloading the // package from the npm registry over HTTP as a last resort. try { await downloadDirectlyFromNPM(pkg, subpath, binPath) } catch (e3: any) { throw new Error(`Failed to install package "${pkg}"`) } } } maybeOptimizePackage(binPath) } checkAndPreparePackage().then(() => { if (isToPathJS) { // We need "node" before this command since it's a JavaScript file validateBinaryVersion(process.execPath, toPath) } else { // This is no longer a JavaScript file so don't run it using "node" validateBinaryVersion(toPath) } })
lib/npm/node-install.ts
1
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.004144767299294472, 0.0006238422938622534, 0.00016444067296106368, 0.00018820600234903395, 0.0008989751222543418 ]
{ "id": 5, "code_window": [ " path.join(repoDir, 'lib', 'npm', 'node-install.ts'),\n", " '--outfile=' + path.join(npmDir, 'install.js'),\n", " '--bundle',\n", " '--target=' + nodeTarget,\n", " '--define:ESBUILD_VERSION=' + JSON.stringify(version),\n", " '--external:esbuild',\n", " '--platform=node',\n", " '--log-level=warning',\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ " // Note: https://socket.dev have complained that inlining the version into\n", " // the install script messes up some internal scanning that they do by\n", " // making it seem like esbuild's install script code changes with every\n", " // esbuild release. So now we read it from \"package.json\" instead.\n", " // '--define:ESBUILD_VERSION=' + JSON.stringify(version),\n" ], "file_path": "scripts/esbuild.js", "type": "replace", "edit_start_line_idx": 26 }
{ "name": "@esbuild/android-x64", "version": "0.17.10", "description": "A WebAssembly shim for esbuild on Android x64.", "repository": "https://github.com/evanw/esbuild", "license": "MIT", "preferUnplugged": true, "engines": { "node": ">=12" }, "os": [ "android" ], "cpu": [ "x64" ] }
npm/@esbuild/android-x64/package.json
0
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.0001826170046115294, 0.00017882550309877843, 0.00017503400158602744, 0.00017882550309877843, 0.0000037915015127509832 ]
{ "id": 5, "code_window": [ " path.join(repoDir, 'lib', 'npm', 'node-install.ts'),\n", " '--outfile=' + path.join(npmDir, 'install.js'),\n", " '--bundle',\n", " '--target=' + nodeTarget,\n", " '--define:ESBUILD_VERSION=' + JSON.stringify(version),\n", " '--external:esbuild',\n", " '--platform=node',\n", " '--log-level=warning',\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ " // Note: https://socket.dev have complained that inlining the version into\n", " // the install script messes up some internal scanning that they do by\n", " // making it seem like esbuild's install script code changes with every\n", " // esbuild release. So now we read it from \"package.json\" instead.\n", " // '--define:ESBUILD_VERSION=' + JSON.stringify(version),\n" ], "file_path": "scripts/esbuild.js", "type": "replace", "edit_start_line_idx": 26 }
const { removeRecursiveSync, buildWasmLib } = require('./esbuild.js'); const child_process = require('child_process'); const assert = require('assert'); const path = require('path'); const fs = require('fs'); const tests = { serveTest({ testDir, esbuildPathWASM }) { try { child_process.execFileSync('node', [ esbuildPathWASM, '--servedir=.', '--log-level=warning', ], { stdio: 'pipe', cwd: testDir, }); throw new Error('Expected an error to be thrown'); } catch (err) { assert.strictEqual(err.stderr + '', `✘ [ERROR] The "serve" API is not supported when using WebAssembly\n\n`) } }, basicStdinTest({ testDir, esbuildPathWASM }) { const stdout = child_process.execFileSync('node', [ esbuildPathWASM, '--format=cjs', '--log-level=warning', ], { stdio: ['pipe', 'pipe', 'inherit'], cwd: testDir, input: `export default 1+2`, }).toString(); // Check that the bundle is valid const module = { exports: {} }; new Function('module', 'exports', stdout)(module, module.exports); assert.deepStrictEqual(module.exports.default, 3); }, stdinOutfileTest({ testDir, esbuildPathWASM }) { const outfile = path.join(testDir, 'out.js') child_process.execFileSync('node', [ esbuildPathWASM, '--bundle', '--format=cjs', '--outfile=' + outfile, '--log-level=warning', ], { stdio: ['pipe', 'pipe', 'inherit'], cwd: testDir, input: `export default 1+2`, }).toString(); // Check that the bundle is valid const exports = require(outfile); assert.deepStrictEqual(exports.default, 3); }, stdinStdoutUnicodeTest({ testDir, esbuildPathWASM }) { const stdout = child_process.execFileSync('node', [ esbuildPathWASM, '--format=cjs', '--log-level=warning', ], { stdio: ['pipe', 'pipe', 'inherit'], cwd: testDir, input: `export default ['π', '🍕']`, }).toString(); // Check that the bundle is valid const module = { exports: {} }; new Function('module', 'exports', stdout)(module, module.exports); assert.deepStrictEqual(module.exports.default, ['π', '🍕']); }, stdinOutfileUnicodeTest({ testDir, esbuildPathWASM }) { const outfile = path.join(testDir, 'out.js') child_process.execFileSync('node', [ esbuildPathWASM, '--bundle', '--format=cjs', '--outfile=' + outfile, '--log-level=warning', ], { stdio: ['pipe', 'pipe', 'inherit'], cwd: testDir, input: `export default ['π', '🍕']`, }).toString(); // Check that the bundle is valid const exports = require(outfile); assert.deepStrictEqual(exports.default, ['π', '🍕']); }, stdoutLargeTest({ testDir, esbuildPathNative, esbuildPathWASM }) { const entryPoint = path.join(__dirname, 'js-api-tests.js'); // Build with native const stdoutNative = child_process.execFileSync(esbuildPathNative, [ entryPoint, '--log-level=warning', ], { stdio: ['pipe', 'pipe', 'inherit'], cwd: testDir, }).toString(); // Build with WASM const stdoutWASM = child_process.execFileSync('node', [ esbuildPathWASM, entryPoint, '--log-level=warning', ], { stdio: ['pipe', 'pipe', 'inherit'], cwd: testDir, }).toString(); // Check that the output is equal assert.deepStrictEqual(stdoutNative.length, stdoutWASM.length); assert.deepStrictEqual(stdoutNative, stdoutWASM); }, outfileLargeTest({ testDir, esbuildPathNative, esbuildPathWASM }) { const entryPoint = path.join(__dirname, 'js-api-tests.js'); // Build with native const outfileNative = path.join(testDir, 'a.js'); const stdoutNative = child_process.execFileSync(esbuildPathNative, [ entryPoint, '--outfile=' + outfileNative, '--log-level=warning', ], { stdio: ['pipe', 'pipe', 'inherit'], cwd: testDir, }).toString(); const jsNative = fs.readFileSync(outfileNative, 'utf8'); // Build with WASM const outfileWASM = path.join(testDir, 'b.js'); const stdoutWASM = child_process.execFileSync('node', [ esbuildPathWASM, entryPoint, '--outfile=' + outfileWASM, '--log-level=warning', ], { stdio: ['pipe', 'pipe', 'inherit'], cwd: testDir, }).toString(); const jsWASM = fs.readFileSync(outfileWASM, 'utf8'); // Check that the output is equal assert.deepStrictEqual(jsNative.length, jsWASM.length); assert.deepStrictEqual(jsNative, jsWASM); }, outfileNestedTest({ testDir, esbuildPathWASM }) { const outfile = path.join(testDir, 'a', 'b', 'c', 'd', 'out.js'); child_process.execFileSync('node', [ esbuildPathWASM, '--bundle', '--format=cjs', '--outfile=' + outfile, '--log-level=warning', ], { stdio: ['pipe', 'pipe', 'inherit'], cwd: testDir, input: `export default 123`, }).toString(); // Check that the bundle is valid const exports = require(outfile); assert.deepStrictEqual(exports.default, 123); }, metafileNestedTest({ testDir, esbuildPathWASM }) { const outfile = path.join(testDir, 'out.js'); const metafile = path.join(testDir, 'a', 'b', 'c', 'd', 'meta.json'); const cwd = path.join(testDir, 'a', 'b') fs.mkdirSync(cwd, { recursive: true }) child_process.execFileSync('node', [ esbuildPathWASM, '--bundle', '--format=cjs', '--outfile=' + outfile, '--metafile=' + metafile, '--log-level=warning', ], { stdio: ['pipe', 'pipe', 'inherit'], cwd, input: `export default 123`, }).toString(); // Check that the bundle is valid const exports = require(outfile); assert.deepStrictEqual(exports.default, 123); const json = JSON.parse(fs.readFileSync(metafile, 'utf8')); assert.deepStrictEqual(json.outputs['../../out.js'].entryPoint, '<stdin>'); }, importRelativeFileTest({ testDir, esbuildPathWASM }) { const outfile = path.join(testDir, 'out.js') const packageJSON = path.join(__dirname, '..', 'npm', 'esbuild-wasm', 'package.json'); child_process.execFileSync('node', [ esbuildPathWASM, '--bundle', '--format=cjs', '--outfile=' + outfile, '--log-level=warning', ], { stdio: ['pipe', 'pipe', 'inherit'], cwd: testDir, input: `export {default} from ` + JSON.stringify('./' + path.relative(testDir, packageJSON)), }).toString(); // Check that the bundle is valid const exports = require(outfile); assert.deepStrictEqual(exports.default, require(packageJSON)); }, importAbsoluteFileTest({ testDir, esbuildPathWASM }) { const outfile = path.join(testDir, 'out.js') const packageJSON = path.join(__dirname, '..', 'npm', 'esbuild-wasm', 'package.json'); child_process.execFileSync('node', [ esbuildPathWASM, '--bundle', '--format=cjs', '--outfile=' + outfile, '--log-level=warning', ], { stdio: ['pipe', 'pipe', 'inherit'], cwd: testDir, input: `export {default} from ` + JSON.stringify(packageJSON), }).toString(); // Check that the bundle is valid const exports = require(outfile); assert.deepStrictEqual(exports.default, require(packageJSON)); }, zipFile({ testDir, esbuildPathWASM }) { const entry = path.join(testDir, 'entry.js') fs.writeFileSync(entry, ` import foo from './test.zip/foo.js' import bar from './test.zip/bar/bar.js' import __virtual__1 from './test.zip/__virtual__/ignored/0/foo.js' import __virtual__2 from './test.zip/ignored/__virtual__/ignored/1/foo.js' import __virtual__3 from './test.zip/__virtual__/ignored/1/test.zip/foo.js' import $$virtual1 from './test.zip/$$virtual/ignored/0/foo.js' import $$virtual2 from './test.zip/ignored/$$virtual/ignored/1/foo.js' import $$virtual3 from './test.zip/$$virtual/ignored/1/test.zip/foo.js' console.log({ foo, bar, __virtual__1, __virtual__2, __virtual__3, $$virtual1, $$virtual2, $$virtual3, }) `) // This uses the real file system instead of the mock file system so that // we can check that everything works as expected on Windows, which is not // a POSIX environment. fs.writeFileSync(path.join(testDir, 'test.zip'), Buffer.from( `UEsDBAoAAgAAAG1qCFUSAXosFQAAABUAAAAGABwAZm9vLmpzVVQJAAOeRfFioEXxYnV4C` + `wABBPUBAAAEFAAAAGV4cG9ydCBkZWZhdWx0ICdmb28nClBLAwQKAAIAAABzaghVwuDbLR` + `UAAAAVAAAACgAcAGJhci9iYXIuanNVVAkAA6lF8WKrRfFidXgLAAEE9QEAAAQUAAAAZXh` + `wb3J0IGRlZmF1bHQgJ2JhcicKUEsBAh4DCgACAAAAbWoIVRIBeiwVAAAAFQAAAAYAGAAA` + `AAAAAQAAAKSBAAAAAGZvby5qc1VUBQADnkXxYnV4CwABBPUBAAAEFAAAAFBLAQIeAwoAA` + `gAAAHNqCFXC4NstFQAAABUAAAAKABgAAAAAAAEAAACkgVUAAABiYXIvYmFyLmpzVVQFAA` + `OpRfFidXgLAAEE9QEAAAQUAAAAUEsFBgAAAAACAAIAnAAAAK4AAAAAAA==`, 'base64')) const stdout = child_process.execFileSync('node', [ esbuildPathWASM, '--bundle', entry, ], { stdio: 'pipe', cwd: testDir, }).toString(); assert.strictEqual(stdout, `(() => { // test.zip/foo.js var foo_default = "foo"; // test.zip/bar/bar.js var bar_default = "bar"; // test.zip/__virtual__/ignored/0/foo.js var foo_default2 = "foo"; // test.zip/ignored/__virtual__/ignored/1/foo.js var foo_default3 = "foo"; // test.zip/__virtual__/ignored/1/test.zip/foo.js var foo_default4 = "foo"; // test.zip/$$virtual/ignored/0/foo.js var foo_default5 = "foo"; // test.zip/ignored/$$virtual/ignored/1/foo.js var foo_default6 = "foo"; // test.zip/$$virtual/ignored/1/test.zip/foo.js var foo_default7 = "foo"; // entry.js console.log({ foo: foo_default, bar: bar_default, __virtual__1: foo_default2, __virtual__2: foo_default3, __virtual__3: foo_default4, $$virtual1: foo_default5, $$virtual2: foo_default6, $$virtual3: foo_default7 }); })(); `) }, }; function runTest({ testDir, esbuildPathNative, esbuildPathWASM, test }) { try { fs.mkdirSync(testDir, { recursive: true }) test({ testDir, esbuildPathNative, esbuildPathWASM }) return true } catch (e) { console.error(`❌ ${test.name} failed: ${e && e.message || e}`) return false } } async function main() { // Generate the WebAssembly module const esbuildPathNative = path.join(__dirname, '..', process.platform === 'win32' ? 'esbuild.exe' : 'esbuild'); await buildWasmLib(esbuildPathNative); const esbuildPathWASM = path.join(__dirname, '..', 'npm', 'esbuild-wasm', 'bin', 'esbuild'); const testDir = path.join(__dirname, '.wasm-tests') // Run all tests in serial because WebAssembly compilation is a CPU hog let allTestsPassed = true; for (const test in tests) { if (!runTest({ testDir: path.join(testDir, test), test: tests[test], esbuildPathNative, esbuildPathWASM, })) { allTestsPassed = false; } } if (!allTestsPassed) { console.error(`❌ wasm-tests failed`) process.exit(1) } else { console.log(`✅ wasm-tests passed`) removeRecursiveSync(testDir) } } main().catch(e => setTimeout(() => { throw e }))
scripts/wasm-tests.js
0
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.0016011999687179923, 0.00028479471802711487, 0.00016398502339143306, 0.00019715559028554708, 0.0002562006702646613 ]
{ "id": 5, "code_window": [ " path.join(repoDir, 'lib', 'npm', 'node-install.ts'),\n", " '--outfile=' + path.join(npmDir, 'install.js'),\n", " '--bundle',\n", " '--target=' + nodeTarget,\n", " '--define:ESBUILD_VERSION=' + JSON.stringify(version),\n", " '--external:esbuild',\n", " '--platform=node',\n", " '--log-level=warning',\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ " // Note: https://socket.dev have complained that inlining the version into\n", " // the install script messes up some internal scanning that they do by\n", " // making it seem like esbuild's install script code changes with every\n", " // esbuild release. So now we read it from \"package.json\" instead.\n", " // '--define:ESBUILD_VERSION=' + JSON.stringify(version),\n" ], "file_path": "scripts/esbuild.js", "type": "replace", "edit_start_line_idx": 26 }
package helpers import ( "fmt" "strings" "sync" "time" "github.com/evanw/esbuild/internal/logger" ) type Timer struct { data []timerData mutex sync.Mutex } type timerData struct { time time.Time name string isEnd bool } func (t *Timer) Begin(name string) { if t != nil { t.data = append(t.data, timerData{ name: name, time: time.Now(), }) } } func (t *Timer) End(name string) { if t != nil { t.data = append(t.data, timerData{ name: name, time: time.Now(), isEnd: true, }) } } func (t *Timer) Fork() *Timer { if t != nil { return &Timer{} } return nil } func (t *Timer) Join(other *Timer) { if t != nil && other != nil { t.mutex.Lock() defer t.mutex.Unlock() t.data = append(t.data, other.data...) } } func (t *Timer) Log(log logger.Log) { if t == nil { return } type pair struct { timerData index uint32 } var notes []logger.MsgData var stack []pair indent := 0 for _, item := range t.data { if !item.isEnd { top := pair{timerData: item, index: uint32(len(notes))} notes = append(notes, logger.MsgData{DisableMaximumWidth: true}) stack = append(stack, top) indent++ } else { indent-- last := len(stack) - 1 top := stack[last] stack = stack[:last] if item.name != top.name { panic("Internal error") } notes[top.index].Text = fmt.Sprintf("%s%s: %dms", strings.Repeat(" ", indent), top.name, item.time.Sub(top.time).Milliseconds()) } } log.AddIDWithNotes(logger.MsgID_None, logger.Info, nil, logger.Range{}, "Timing information (times may not nest hierarchically due to parallelism)", notes) }
internal/helpers/timer.go
0
https://github.com/evanw/esbuild/commit/3f5138184a48b69893b7050577c9ea3ac31eab8b
[ 0.00017659150762483478, 0.000170541214174591, 0.00016456899174954742, 0.00017162939184345305, 0.000004153979716647882 ]
{ "id": 0, "code_window": [ "\t\tPermissions: 0644,\n", "\t\tModifiedS: 1234567890, // Sat Feb 14 00:31:30 CET 2009\n", "\t\tDeleted: fi.Deleted,\n", "\t\tVersion: version,\n", "\t\tSequence: fi.Sequence,\n", "\t\tRawBlockSize: fi.RawBlockSize + blockOverhead,\n", "\t\tBlocks: blocks,\n", "\t\tEncrypted: encryptedFI,\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tRawInvalid: fi.IsInvalid(),\n" ], "file_path": "lib/protocol/encryption.go", "type": "add", "edit_start_line_idx": 316 }
// Copyright (C) 2019 The Syncthing Authors. // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at https://mozilla.org/MPL/2.0/. package protocol import ( "bytes" "fmt" "reflect" "regexp" "strings" "sync" "testing" "github.com/syncthing/syncthing/lib/rand" "github.com/syncthing/syncthing/lib/sha256" ) func TestEnDecryptName(t *testing.T) { pattern := regexp.MustCompile( fmt.Sprintf("^[0-9A-V]%s/[0-9A-V]{2}/([0-9A-V]{%d}/)*[0-9A-V]{1,%d}$", regexp.QuoteMeta(encryptedDirExtension), maxPathComponent, maxPathComponent-1)) makeName := func(n int) string { b := make([]byte, n) for i := range b { b[i] = byte('a' + i%26) } return string(b) } var key [32]byte cases := []string{ "", "foo", "a longer name/with/slashes and spaces", makeName(maxPathComponent), makeName(1 + maxPathComponent), makeName(2 * maxPathComponent), makeName(1 + 2*maxPathComponent), } for _, tc := range cases { var prev string for i := 0; i < 5; i++ { enc := encryptName(tc, &key) if prev != "" && prev != enc { t.Error("name should always encrypt the same") } prev = enc if tc != "" && strings.Contains(enc, tc) { t.Error("shouldn't contain plaintext") } if !pattern.MatchString(enc) { t.Fatalf("encrypted name %s doesn't match %s", enc, pattern) } dec, err := decryptName(enc, &key) if err != nil { t.Error(err) } if dec != tc { t.Error("mismatch after decryption") } t.Logf("%q encrypts as %q", tc, enc) } } } func TestDecryptNameInvalid(t *testing.T) { key := new([32]byte) for _, c := range []string{ "T.syncthing-enc/OD", "T.syncthing-enc/OD/", "T.wrong-extension/OD/PHVDD67S7FI2K5QQMPSOFSK", "OD/PHVDD67S7FI2K5QQMPSOFSK", } { if _, err := decryptName(c, key); err == nil { t.Errorf("no error for %q", c) } } } func TestEnDecryptBytes(t *testing.T) { var key [32]byte cases := [][]byte{ {}, {1, 2, 3, 4, 5}, } for _, tc := range cases { var prev []byte for i := 0; i < 5; i++ { enc := encryptBytes(tc, &key) if bytes.Equal(enc, prev) { t.Error("encryption should not repeat") } prev = enc if len(tc) > 0 && bytes.Contains(enc, tc) { t.Error("shouldn't contain plaintext") } dec, err := DecryptBytes(enc, &key) if err != nil { t.Error(err) } if !bytes.Equal(dec, tc) { t.Error("mismatch after decryption") } } } } func TestEnDecryptFileInfo(t *testing.T) { var key [32]byte fi := FileInfo{ Name: "hello", Size: 45, Permissions: 0755, ModifiedS: 8080, Blocks: []BlockInfo{ { Offset: 0, Size: 45, Hash: []byte{1, 2, 3}, }, { Offset: 45, Size: 45, Hash: []byte{1, 2, 3}, }, }, } enc := encryptFileInfo(fi, &key) if bytes.Equal(enc.Blocks[0].Hash, enc.Blocks[1].Hash) { t.Error("block hashes should not repeat when on different offsets") } again := encryptFileInfo(fi, &key) if !bytes.Equal(enc.Blocks[0].Hash, again.Blocks[0].Hash) { t.Error("block hashes should remain stable (0)") } if !bytes.Equal(enc.Blocks[1].Hash, again.Blocks[1].Hash) { t.Error("block hashes should remain stable (1)") } dec, err := DecryptFileInfo(enc, &key) if err != nil { t.Error(err) } if !reflect.DeepEqual(fi, dec) { t.Error("mismatch after decryption") } } func TestIsEncryptedParent(t *testing.T) { comp := rand.String(maxPathComponent) cases := []struct { path string is bool }{ {"", false}, {".", false}, {"/", false}, {"12" + encryptedDirExtension, false}, {"1" + encryptedDirExtension, true}, {"1" + encryptedDirExtension + "/b", false}, {"1" + encryptedDirExtension + "/bc", true}, {"1" + encryptedDirExtension + "/bcd", false}, {"1" + encryptedDirExtension + "/bc/foo", false}, {"1.12/22", false}, {"1" + encryptedDirExtension + "/bc/" + comp, true}, {"1" + encryptedDirExtension + "/bc/" + comp + "/" + comp, true}, {"1" + encryptedDirExtension + "/bc/" + comp + "a", false}, {"1" + encryptedDirExtension + "/bc/" + comp + "/a/" + comp, false}, } for _, tc := range cases { if res := IsEncryptedParent(tc.path); res != tc.is { t.Errorf("%v: got %v, expected %v", tc.path, res, tc.is) } } } var benchmarkFileKey struct { key [keySize]byte sync.Once } func BenchmarkFileKey(b *testing.B) { benchmarkFileKey.Do(func() { sha256.SelectAlgo() rand.Read(benchmarkFileKey.key[:]) }) b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { FileKey("a_kind_of_long_filename.ext", &benchmarkFileKey.key) } }
lib/protocol/encryption_test.go
1
https://github.com/syncthing/syncthing/commit/f80ee472c2ab8278319f2932b1851ca5ab2fc62b
[ 0.0018372603226453066, 0.0002776658220682293, 0.00016758512356318533, 0.00017326207307633013, 0.0003565587685443461 ]
{ "id": 0, "code_window": [ "\t\tPermissions: 0644,\n", "\t\tModifiedS: 1234567890, // Sat Feb 14 00:31:30 CET 2009\n", "\t\tDeleted: fi.Deleted,\n", "\t\tVersion: version,\n", "\t\tSequence: fi.Sequence,\n", "\t\tRawBlockSize: fi.RawBlockSize + blockOverhead,\n", "\t\tBlocks: blocks,\n", "\t\tEncrypted: encryptedFI,\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tRawInvalid: fi.IsInvalid(),\n" ], "file_path": "lib/protocol/encryption.go", "type": "add", "edit_start_line_idx": 316 }
// Copyright (C) 2015 Audrius Butkevicius and Contributors (see the CONTRIBUTORS file). package client import ( "context" "crypto/tls" "encoding/json" "errors" "fmt" "net/http" "net/url" "sort" "time" "github.com/syncthing/syncthing/lib/osutil" "github.com/syncthing/syncthing/lib/rand" "github.com/syncthing/syncthing/lib/relay/protocol" ) type dynamicClient struct { commonClient pooladdr *url.URL certs []tls.Certificate timeout time.Duration client RelayClient } func newDynamicClient(uri *url.URL, certs []tls.Certificate, invitations chan protocol.SessionInvitation, timeout time.Duration) RelayClient { c := &dynamicClient{ pooladdr: uri, certs: certs, timeout: timeout, } c.commonClient = newCommonClient(invitations, c.serve, fmt.Sprintf("dynamicClient@%p", c)) return c } func (c *dynamicClient) serve(ctx context.Context) error { uri := *c.pooladdr // Trim off the `dynamic+` prefix uri.Scheme = uri.Scheme[8:] l.Debugln(c, "looking up dynamic relays") req, err := http.NewRequest("GET", uri.String(), nil) if err != nil { l.Debugln(c, "failed to lookup dynamic relays", err) return err } req.Cancel = ctx.Done() data, err := http.DefaultClient.Do(req) if err != nil { l.Debugln(c, "failed to lookup dynamic relays", err) return err } var ann dynamicAnnouncement err = json.NewDecoder(data.Body).Decode(&ann) data.Body.Close() if err != nil { l.Debugln(c, "failed to lookup dynamic relays", err) return err } var addrs []string for _, relayAnn := range ann.Relays { ruri, err := url.Parse(relayAnn.URL) if err != nil { l.Debugln(c, "failed to parse dynamic relay address", relayAnn.URL, err) continue } l.Debugln(c, "found", ruri) addrs = append(addrs, ruri.String()) } for _, addr := range relayAddressesOrder(ctx, addrs) { select { case <-ctx.Done(): l.Debugln(c, "stopping") return nil default: ruri, err := url.Parse(addr) if err != nil { l.Debugln(c, "skipping relay", addr, err) continue } client := newStaticClient(ruri, c.certs, c.invitations, c.timeout) c.mut.Lock() c.client = client c.mut.Unlock() c.client.Serve(ctx) c.mut.Lock() c.client = nil c.mut.Unlock() } } l.Debugln(c, "could not find a connectable relay") return errors.New("could not find a connectable relay") } func (c *dynamicClient) Error() error { c.mut.RLock() defer c.mut.RUnlock() if c.client == nil { return c.commonClient.Error() } return c.client.Error() } func (c *dynamicClient) Latency() time.Duration { c.mut.RLock() defer c.mut.RUnlock() if c.client == nil { return time.Hour } return c.client.Latency() } func (c *dynamicClient) String() string { return fmt.Sprintf("DynamicClient:%p:%s@%s", c, c.URI(), c.pooladdr) } func (c *dynamicClient) URI() *url.URL { c.mut.RLock() defer c.mut.RUnlock() if c.client == nil { return nil } return c.client.URI() } // This is the announcement received from the relay server; // {"relays": [{"url": "relay://10.20.30.40:5060"}, ...]} type dynamicAnnouncement struct { Relays []struct { URL string } } // relayAddressesOrder checks the latency to each relay, rounds latency down to // the closest 50ms, and puts them in buckets of 50ms latency ranges. Then // shuffles each bucket, and returns all addresses starting with the ones from // the lowest latency bucket, ending with the highest latency buceket. func relayAddressesOrder(ctx context.Context, input []string) []string { buckets := make(map[int][]string) for _, relay := range input { latency, err := osutil.GetLatencyForURL(ctx, relay) if err != nil { latency = time.Hour } id := int(latency/time.Millisecond) / 50 buckets[id] = append(buckets[id], relay) select { case <-ctx.Done(): return nil default: } } var ids []int for id, bucket := range buckets { rand.Shuffle(bucket) ids = append(ids, id) } sort.Ints(ids) addresses := make([]string, 0, len(input)) for _, id := range ids { addresses = append(addresses, buckets[id]...) } return addresses }
lib/relay/client/dynamic.go
0
https://github.com/syncthing/syncthing/commit/f80ee472c2ab8278319f2932b1851ca5ab2fc62b
[ 0.00017643498722463846, 0.0001715089165372774, 0.00016120573855005205, 0.00017319302423857152, 0.000004420346613187576 ]
{ "id": 0, "code_window": [ "\t\tPermissions: 0644,\n", "\t\tModifiedS: 1234567890, // Sat Feb 14 00:31:30 CET 2009\n", "\t\tDeleted: fi.Deleted,\n", "\t\tVersion: version,\n", "\t\tSequence: fi.Sequence,\n", "\t\tRawBlockSize: fi.RawBlockSize + blockOverhead,\n", "\t\tBlocks: blocks,\n", "\t\tEncrypted: encryptedFI,\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tRawInvalid: fi.IsInvalid(),\n" ], "file_path": "lib/protocol/encryption.go", "type": "add", "edit_start_line_idx": 316 }
<!--<div class="grid-container" gdAreas="header header | folders devices | status-list status-list | footer footer" gdGap="16px" gdRows="auto auto auto"> --> <!--<div class="grid-container" fxLayout="row" fxLayoutGap="16px grid" fxLayoutAlign="stretch">--> <div class="progress"> <mat-progress-bar mode="determinate" value="{{progressValue}}" [@progressBar]="isLoading ? 'start' : 'done'"> </mat-progress-bar> </div> <div fxLayout="column" fxLayoutGap="16px" class="grid-container" [@loading]="isLoading ? 'start' : 'done'"> <div fxLayout="row" fxLayoutGap="16px" fxLayoutAlign="space-between stretch"> <app-chart [type]=folderChart fxFlex="50"></app-chart> <app-chart [type]=deviceChart fxFlex="50"></app-chart> </div> <app-status-list gdArea="status-list"></app-status-list> <div></div> </div>
next-gen-gui/src/app/dashboard/dashboard.component.html
0
https://github.com/syncthing/syncthing/commit/f80ee472c2ab8278319f2932b1851ca5ab2fc62b
[ 0.0001764834305504337, 0.00017408811254426837, 0.00017169280909001827, 0.00017408811254426837, 0.0000023953107302077115 ]
{ "id": 0, "code_window": [ "\t\tPermissions: 0644,\n", "\t\tModifiedS: 1234567890, // Sat Feb 14 00:31:30 CET 2009\n", "\t\tDeleted: fi.Deleted,\n", "\t\tVersion: version,\n", "\t\tSequence: fi.Sequence,\n", "\t\tRawBlockSize: fi.RawBlockSize + blockOverhead,\n", "\t\tBlocks: blocks,\n", "\t\tEncrypted: encryptedFI,\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tRawInvalid: fi.IsInvalid(),\n" ], "file_path": "lib/protocol/encryption.go", "type": "add", "edit_start_line_idx": 316 }
import { Component, OnInit, ViewChild, Input, Type } from '@angular/core'; import Folder from '../../folder' import { FolderService } from 'src/app/services/folder.service'; import { DonutChartComponent } from '../donut-chart/donut-chart.component'; import { DeviceService } from 'src/app/services/device.service'; import Device from 'src/app/device'; import { StType } from '../../type'; import { FilterService } from 'src/app/services/filter.service'; import { Observable } from 'rxjs'; export interface ChartItemState { label: string, count: number, color: string, selected: boolean, } @Component({ selector: 'app-chart', templateUrl: './chart.component.html', styleUrls: ['./chart.component.scss'] }) export class ChartComponent implements OnInit { @ViewChild(DonutChartComponent) donutChart: DonutChartComponent; @Input() type: StType; title: string; chartID: string; states: ChartItemState[] = []; private observer: Observable<any>; private activeChartState: ChartItemState; constructor( private folderService: FolderService, private deviceService: DeviceService, private filterService: FilterService, ) { } onItemSelect(s: ChartItemState) { // Send chart item state to filter this.filterService.changeFilter({ type: this.type, text: s.label }); // Deselect all other items this.states.forEach(s => { s.selected = false; }); // Select item only if (s !== this.activeChartState) { s.selected = true; this.activeChartState = s; } else { this.activeChartState = null; this.filterService.changeFilter({ type: this.type, text: "" }) } } ngOnInit(): void { switch (this.type) { case StType.Folder: this.title = "Folders"; this.chartID = 'foldersChart'; this.observer = this.folderService.folderAdded$; break; case StType.Device: this.title = "Devices"; this.chartID = 'devicesChart'; this.observer = this.deviceService.deviceAdded$; break; } } ngAfterViewInit() { let totalCount: number = 0; this.observer.subscribe( t => { // Count the number of folders and set chart totalCount++; this.donutChart.count = totalCount; // Get StateType and convert to string const stateType = t.stateType; const state = t.state; let color; switch (this.type) { case StType.Folder: color = Folder.stateTypeToColor(t.stateType); break; case StType.Device: color = Device.stateTypeToColor(stateType); break; } // Check if state exists let found: boolean = false; this.states.forEach(s => { if (s.label === state) { s.count = s.count + 1; found = true; } }); if (!found) { this.states.push({ label: state, count: 1, color: color, selected: false }); } this.donutChart.updateData(this.states); }, err => console.error('Observer got an error: ' + err), () => { } ); } }
next-gen-gui/src/app/charts/chart/chart.component.ts
0
https://github.com/syncthing/syncthing/commit/f80ee472c2ab8278319f2932b1851ca5ab2fc62b
[ 0.00017743304488249123, 0.00017473312618676573, 0.00017169788770843297, 0.00017489990568719804, 0.0000017022967995217186 ]
{ "id": 1, "code_window": [ "\t}\n", "}\n", "\n", "func TestEnDecryptFileInfo(t *testing.T) {\n", "\tvar key [32]byte\n", "\tfi := FileInfo{\n", "\t\tName: \"hello\",\n", "\t\tSize: 45,\n", "\t\tPermissions: 0755,\n", "\t\tModifiedS: 8080,\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "func encFileInfo() FileInfo {\n", "\treturn FileInfo{\n" ], "file_path": "lib/protocol/encryption_test.go", "type": "replace", "edit_start_line_idx": 115 }
// Copyright (C) 2019 The Syncthing Authors. // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at https://mozilla.org/MPL/2.0/. package protocol import ( "bytes" "fmt" "reflect" "regexp" "strings" "sync" "testing" "github.com/syncthing/syncthing/lib/rand" "github.com/syncthing/syncthing/lib/sha256" ) func TestEnDecryptName(t *testing.T) { pattern := regexp.MustCompile( fmt.Sprintf("^[0-9A-V]%s/[0-9A-V]{2}/([0-9A-V]{%d}/)*[0-9A-V]{1,%d}$", regexp.QuoteMeta(encryptedDirExtension), maxPathComponent, maxPathComponent-1)) makeName := func(n int) string { b := make([]byte, n) for i := range b { b[i] = byte('a' + i%26) } return string(b) } var key [32]byte cases := []string{ "", "foo", "a longer name/with/slashes and spaces", makeName(maxPathComponent), makeName(1 + maxPathComponent), makeName(2 * maxPathComponent), makeName(1 + 2*maxPathComponent), } for _, tc := range cases { var prev string for i := 0; i < 5; i++ { enc := encryptName(tc, &key) if prev != "" && prev != enc { t.Error("name should always encrypt the same") } prev = enc if tc != "" && strings.Contains(enc, tc) { t.Error("shouldn't contain plaintext") } if !pattern.MatchString(enc) { t.Fatalf("encrypted name %s doesn't match %s", enc, pattern) } dec, err := decryptName(enc, &key) if err != nil { t.Error(err) } if dec != tc { t.Error("mismatch after decryption") } t.Logf("%q encrypts as %q", tc, enc) } } } func TestDecryptNameInvalid(t *testing.T) { key := new([32]byte) for _, c := range []string{ "T.syncthing-enc/OD", "T.syncthing-enc/OD/", "T.wrong-extension/OD/PHVDD67S7FI2K5QQMPSOFSK", "OD/PHVDD67S7FI2K5QQMPSOFSK", } { if _, err := decryptName(c, key); err == nil { t.Errorf("no error for %q", c) } } } func TestEnDecryptBytes(t *testing.T) { var key [32]byte cases := [][]byte{ {}, {1, 2, 3, 4, 5}, } for _, tc := range cases { var prev []byte for i := 0; i < 5; i++ { enc := encryptBytes(tc, &key) if bytes.Equal(enc, prev) { t.Error("encryption should not repeat") } prev = enc if len(tc) > 0 && bytes.Contains(enc, tc) { t.Error("shouldn't contain plaintext") } dec, err := DecryptBytes(enc, &key) if err != nil { t.Error(err) } if !bytes.Equal(dec, tc) { t.Error("mismatch after decryption") } } } } func TestEnDecryptFileInfo(t *testing.T) { var key [32]byte fi := FileInfo{ Name: "hello", Size: 45, Permissions: 0755, ModifiedS: 8080, Blocks: []BlockInfo{ { Offset: 0, Size: 45, Hash: []byte{1, 2, 3}, }, { Offset: 45, Size: 45, Hash: []byte{1, 2, 3}, }, }, } enc := encryptFileInfo(fi, &key) if bytes.Equal(enc.Blocks[0].Hash, enc.Blocks[1].Hash) { t.Error("block hashes should not repeat when on different offsets") } again := encryptFileInfo(fi, &key) if !bytes.Equal(enc.Blocks[0].Hash, again.Blocks[0].Hash) { t.Error("block hashes should remain stable (0)") } if !bytes.Equal(enc.Blocks[1].Hash, again.Blocks[1].Hash) { t.Error("block hashes should remain stable (1)") } dec, err := DecryptFileInfo(enc, &key) if err != nil { t.Error(err) } if !reflect.DeepEqual(fi, dec) { t.Error("mismatch after decryption") } } func TestIsEncryptedParent(t *testing.T) { comp := rand.String(maxPathComponent) cases := []struct { path string is bool }{ {"", false}, {".", false}, {"/", false}, {"12" + encryptedDirExtension, false}, {"1" + encryptedDirExtension, true}, {"1" + encryptedDirExtension + "/b", false}, {"1" + encryptedDirExtension + "/bc", true}, {"1" + encryptedDirExtension + "/bcd", false}, {"1" + encryptedDirExtension + "/bc/foo", false}, {"1.12/22", false}, {"1" + encryptedDirExtension + "/bc/" + comp, true}, {"1" + encryptedDirExtension + "/bc/" + comp + "/" + comp, true}, {"1" + encryptedDirExtension + "/bc/" + comp + "a", false}, {"1" + encryptedDirExtension + "/bc/" + comp + "/a/" + comp, false}, } for _, tc := range cases { if res := IsEncryptedParent(tc.path); res != tc.is { t.Errorf("%v: got %v, expected %v", tc.path, res, tc.is) } } } var benchmarkFileKey struct { key [keySize]byte sync.Once } func BenchmarkFileKey(b *testing.B) { benchmarkFileKey.Do(func() { sha256.SelectAlgo() rand.Read(benchmarkFileKey.key[:]) }) b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { FileKey("a_kind_of_long_filename.ext", &benchmarkFileKey.key) } }
lib/protocol/encryption_test.go
1
https://github.com/syncthing/syncthing/commit/f80ee472c2ab8278319f2932b1851ca5ab2fc62b
[ 0.9989094734191895, 0.2464451789855957, 0.0001673665246926248, 0.005062528885900974, 0.4110756814479828 ]
{ "id": 1, "code_window": [ "\t}\n", "}\n", "\n", "func TestEnDecryptFileInfo(t *testing.T) {\n", "\tvar key [32]byte\n", "\tfi := FileInfo{\n", "\t\tName: \"hello\",\n", "\t\tSize: 45,\n", "\t\tPermissions: 0755,\n", "\t\tModifiedS: 8080,\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "func encFileInfo() FileInfo {\n", "\treturn FileInfo{\n" ], "file_path": "lib/protocol/encryption_test.go", "type": "replace", "edit_start_line_idx": 115 }
<?xml version="1.0" encoding="utf-8"?> <!-- Generator: Adobe Illustrator 18.1.1, SVG Export Plug-In . SVG Version: 6.00 Build 0) --> <svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 429 117.3" enable-background="new 0 0 429 117.3" xml:space="preserve"> <g> <linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="58.666" y1="117.332" x2="58.666" y2="-9.094947e-13"> <stop offset="0" style="stop-color:#0882C8"/> <stop offset="1" style="stop-color:#26B6DB"/> </linearGradient> <circle fill="url(#SVGID_1_)" cx="58.7" cy="58.7" r="58.7"/> <g> <circle fill="none" stroke="#FFFFFF" stroke-width="6" stroke-miterlimit="10" cx="58.7" cy="58.5" r="43.7"/> <g> <path fill="#FFFFFF" d="M94.7,47.8c4.7,1.6,9.8-0.9,11.4-5.6c1.6-4.7-0.9-9.8-5.6-11.4c-4.7-1.6-9.8,0.9-11.4,5.6 C87.5,41.1,90,46.2,94.7,47.8z"/> <line fill="none" stroke="#FFFFFF" stroke-width="6" stroke-miterlimit="10" x1="97.6" y1="39.4" x2="67.5" y2="64.4"/> </g> <g> <path fill="#FFFFFF" d="M77.6,91c-0.4,4.9,3.2,9.3,8.2,9.8c5,0.4,9.3-3.2,9.8-8.2c0.4-4.9-3.2-9.3-8.2-9.8 C82.4,82.4,78,86,77.6,91z"/> <line fill="none" stroke="#FFFFFF" stroke-width="6" stroke-miterlimit="10" x1="86.5" y1="91.8" x2="67.5" y2="64.4"/> </g> <path fill="#FFFFFF" d="M60,69.3c2.7,4.2,8.3,5.4,12.4,2.7c4.2-2.7,5.4-8.3,2.7-12.4c-2.7-4.2-8.3-5.4-12.4-2.7 C58.5,59.5,57.3,65.1,60,69.3z"/> <g> <path fill="#FFFFFF" d="M21.2,61.4c-4.3-2.5-9.8-1.1-12.3,3.1c-2.5,4.3-1.1,9.8,3.1,12.3c4.3,2.5,9.8,1.1,12.3-3.1 C26.8,69.5,25.4,64,21.2,61.4z"/> <line fill="none" stroke="#FFFFFF" stroke-width="6" stroke-miterlimit="10" x1="16.6" y1="69.1" x2="67.5" y2="64.4"/> </g> </g> </g> <g> <path fill="#0891D1" d="M163.8,50.2c-0.6-0.7-6.3-4.1-11.4-4.1c-3.4,0-5.2,1.2-5.2,3.5c0,2.9,3.2,3.7,8.9,5.2 c8.2,2.2,13.3,5,13.3,12.9c0,9.7-7.8,13-16,13c-6.2,0-13.1-2-18.2-5.3l4.3-8.6c0.8,0.8,7.5,5,14,5c3.5,0,5.2-1.1,5.2-3.2 c0-3.2-4.4-4-10.3-5.8c-7.9-2.4-11.5-5.3-11.5-11.8c0-9,7.2-13.9,15.7-13.9c6.1,0,11.6,2.5,15.4,4.7L163.8,50.2z"/> <path fill="#0891D1" d="M175,85.1c1.7,0.5,3.3,0.8,4.4,0.8c2,0,3.3-1.5,4.2-5.5l-11.9-31.5h9.8l7.4,23.3l6.3-23.3h8.9l-12.1,36.6 c-1.7,5.3-6.2,8.7-11.8,8.8c-1.7,0-3.5-0.2-5.3-0.9V85.1z"/> <path fill="#0891D1" d="M239.3,80.3h-9.6V62.6c0-4.1-1.7-5.9-4.3-5.9c-2.6,0-5.8,2.3-7,5.6v18.1h-9.6V48.8h8.6v5.3 c2.3-3.7,6.8-5.9,12.2-5.9c8.2,0,9.5,6.7,9.5,11.9V80.3z"/> <path fill="#0891D1" d="M261.6,48.2c7.2,0,12.3,3.4,14.8,8.3l-9.4,2.8c-1.2-1.9-3.1-3-5.5-3c-4,0-7,3.2-7,8.2c0,5,3.1,8.3,7,8.3 c2.4,0,4.6-1.3,5.5-3.1l9.4,2.9c-2.3,4.9-7.6,8.3-14.8,8.3c-10.6,0-16.9-7.7-16.9-16.4S250.9,48.2,261.6,48.2z"/> <path fill="#0891D1" d="M302.1,78.7c-2.6,1.1-6.2,2.3-9.7,2.3c-4.7,0-8.8-2.3-8.8-8.4V56.1h-4v-7.3h4v-10h9.6v10h6.4v7.3h-6.4v13.1 c0,2.1,1.2,2.9,2.8,2.9c1.4,0,3-0.6,4.2-1.1L302.1,78.7z"/> <path fill="#0891D1" d="M337.2,80.3h-9.6V62.6c0-4.1-1.8-5.9-4.6-5.9c-2.3,0-5.5,2.2-6.7,5.6v18.1h-9.6V36.5h9.6v17.6 c2.3-3.7,6.3-5.9,10.9-5.9c8.5,0,9.9,6.5,9.9,11.9V80.3z"/> <path fill="#0891D1" d="M343.4,45.2v-8.7h9.6v8.7H343.4z M343.4,80.3V48.8h9.6v31.5H343.4z"/> <path fill="#0891D1" d="M389.9,80.3h-9.6V62.6c0-4.1-1.7-5.9-4.3-5.9c-2.6,0-5.8,2.3-7,5.6v18.1h-9.6V48.8h8.6v5.3 c2.3-3.7,6.8-5.9,12.2-5.9c8.2,0,9.5,6.7,9.5,11.9V80.3z"/> <path fill="#0891D1" d="M395.5,64.6c0-9.2,6-16.3,14.6-16.3c4.7,0,8.4,2.2,10.6,5.8v-5.2h8.3v29.3c0,9.6-7.5,15.5-18.2,15.5 c-6.8,0-11.5-2.3-15-6.3l5.1-5.2c2.3,2.6,6,4.3,9.9,4.3c4.6,0,8.6-2.4,8.6-8.3v-3.1c-1.9,3.5-5.9,5.3-10,5.3 C401.1,80.5,395.5,73.3,395.5,64.6z M419.4,68.5v-6.6c-1.3-3.3-4.2-5.5-7.1-5.5c-4.1,0-7,4-7,8.4c0,4.6,3.2,8,7.5,8 C415.7,72.8,418.1,71,419.4,68.5z"/> </g> </svg>
assets/logo-horizontal.svg
0
https://github.com/syncthing/syncthing/commit/f80ee472c2ab8278319f2932b1851ca5ab2fc62b
[ 0.0009145552758127451, 0.00030343272374011576, 0.00016570743173360825, 0.00017134594963863492, 0.0002741913194768131 ]
{ "id": 1, "code_window": [ "\t}\n", "}\n", "\n", "func TestEnDecryptFileInfo(t *testing.T) {\n", "\tvar key [32]byte\n", "\tfi := FileInfo{\n", "\t\tName: \"hello\",\n", "\t\tSize: 45,\n", "\t\tPermissions: 0755,\n", "\t\tModifiedS: 8080,\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "func encFileInfo() FileInfo {\n", "\treturn FileInfo{\n" ], "file_path": "lib/protocol/encryption_test.go", "type": "replace", "edit_start_line_idx": 115 }
// Copyright (C) 2020 The Syncthing Authors. // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at https://mozilla.org/MPL/2.0/. //go:generate counterfeiter -o mocks/manager.go --fake-name Manager . Manager package discover import ( "context" "crypto/tls" "fmt" "sort" "time" "github.com/thejerf/suture/v4" "github.com/syncthing/syncthing/lib/config" "github.com/syncthing/syncthing/lib/events" "github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/svcutil" "github.com/syncthing/syncthing/lib/sync" "github.com/syncthing/syncthing/lib/util" ) // The Manager aggregates results from multiple Finders. Each Finder has // an associated cache time and negative cache time. The cache time sets how // long we cache and return successful lookup results, the negative cache // time sets how long we refrain from asking about the same device ID after // receiving a negative answer. The value of zero disables caching (positive // or negative). type Manager interface { FinderService ChildErrors() map[string]error } type manager struct { *suture.Supervisor myID protocol.DeviceID cfg config.Wrapper cert tls.Certificate evLogger events.Logger addressLister AddressLister finders map[string]cachedFinder mut sync.RWMutex } func NewManager(myID protocol.DeviceID, cfg config.Wrapper, cert tls.Certificate, evLogger events.Logger, lister AddressLister) Manager { m := &manager{ Supervisor: suture.New("discover.Manager", svcutil.SpecWithDebugLogger(l)), myID: myID, cfg: cfg, cert: cert, evLogger: evLogger, addressLister: lister, finders: make(map[string]cachedFinder), mut: sync.NewRWMutex(), } m.Add(svcutil.AsService(m.serve, m.String())) return m } func (m *manager) serve(ctx context.Context) error { m.cfg.Subscribe(m) m.CommitConfiguration(config.Configuration{}, m.cfg.RawCopy()) <-ctx.Done() m.cfg.Unsubscribe(m) return nil } func (m *manager) addLocked(identity string, finder Finder, cacheTime, negCacheTime time.Duration) { entry := cachedFinder{ Finder: finder, cacheTime: cacheTime, negCacheTime: negCacheTime, cache: newCache(), token: nil, } if service, ok := finder.(suture.Service); ok { token := m.Supervisor.Add(service) entry.token = &token } m.finders[identity] = entry l.Infoln("Using discovery mechanism:", identity) } func (m *manager) removeLocked(identity string) { entry, ok := m.finders[identity] if !ok { return } if entry.token != nil { err := m.Supervisor.Remove(*entry.token) if err != nil { l.Warnf("removing discovery %s: %s", identity, err) } } delete(m.finders, identity) l.Infoln("Stopped using discovery mechanism: ", identity) } // Lookup attempts to resolve the device ID using any of the added Finders, // while obeying the cache settings. func (m *manager) Lookup(ctx context.Context, deviceID protocol.DeviceID) (addresses []string, err error) { m.mut.RLock() for _, finder := range m.finders { if cacheEntry, ok := finder.cache.Get(deviceID); ok { // We have a cache entry. Lets see what it says. if cacheEntry.found && time.Since(cacheEntry.when) < finder.cacheTime { // It's a positive, valid entry. Use it. l.Debugln("cached discovery entry for", deviceID, "at", finder) l.Debugln(" cache:", cacheEntry) addresses = append(addresses, cacheEntry.Addresses...) continue } valid := time.Now().Before(cacheEntry.validUntil) || time.Since(cacheEntry.when) < finder.negCacheTime if !cacheEntry.found && valid { // It's a negative, valid entry. We should not make another // attempt right now. l.Debugln("negative cache entry for", deviceID, "at", finder, "valid until", cacheEntry.when.Add(finder.negCacheTime), "or", cacheEntry.validUntil) continue } // It's expired. Ignore and continue. } // Perform the actual lookup and cache the result. if addrs, err := finder.Lookup(ctx, deviceID); err == nil { l.Debugln("lookup for", deviceID, "at", finder) l.Debugln(" addresses:", addrs) addresses = append(addresses, addrs...) finder.cache.Set(deviceID, CacheEntry{ Addresses: addrs, when: time.Now(), found: len(addrs) > 0, }) } else { // Lookup returned error, add a negative cache entry. entry := CacheEntry{ when: time.Now(), found: false, } if err, ok := err.(cachedError); ok { entry.validUntil = time.Now().Add(err.CacheFor()) } finder.cache.Set(deviceID, entry) } } m.mut.RUnlock() addresses = util.UniqueTrimmedStrings(addresses) sort.Strings(addresses) l.Debugln("lookup results for", deviceID) l.Debugln(" addresses: ", addresses) return addresses, nil } func (m *manager) String() string { return "discovery cache" } func (m *manager) Error() error { return nil } func (m *manager) ChildErrors() map[string]error { children := make(map[string]error, len(m.finders)) m.mut.RLock() for _, f := range m.finders { children[f.String()] = f.Error() } m.mut.RUnlock() return children } func (m *manager) Cache() map[protocol.DeviceID]CacheEntry { // Res will be the "total" cache, i.e. the union of our cache and all our // children's caches. res := make(map[protocol.DeviceID]CacheEntry) m.mut.RLock() for _, finder := range m.finders { // Each finder[i] has a corresponding cache. Go through // it and populate the total, appending any addresses and keeping // the newest "when" time. We skip any negative cache finders. for k, v := range finder.cache.Cache() { if v.found { cur := res[k] if v.when.After(cur.when) { cur.when = v.when } cur.Addresses = append(cur.Addresses, v.Addresses...) res[k] = cur } } // Then ask the finder itself for its cache and do the same. If this // finder is a global discovery client, it will have no cache. If it's // a local discovery client, this will be its current state. for k, v := range finder.Cache() { if v.found { cur := res[k] if v.when.After(cur.when) { cur.when = v.when } cur.Addresses = append(cur.Addresses, v.Addresses...) res[k] = cur } } } m.mut.RUnlock() for k, v := range res { v.Addresses = util.UniqueTrimmedStrings(v.Addresses) res[k] = v } return res } func (m *manager) VerifyConfiguration(_, _ config.Configuration) error { return nil } func (m *manager) CommitConfiguration(_, to config.Configuration) (handled bool) { m.mut.Lock() defer m.mut.Unlock() toIdentities := make(map[string]struct{}) if to.Options.GlobalAnnEnabled { for _, srv := range to.Options.GlobalDiscoveryServers() { toIdentities[globalDiscoveryIdentity(srv)] = struct{}{} } } if to.Options.LocalAnnEnabled { toIdentities[ipv4Identity(to.Options.LocalAnnPort)] = struct{}{} toIdentities[ipv6Identity(to.Options.LocalAnnMCAddr)] = struct{}{} } // Remove things that we're not expected to have. for identity := range m.finders { if _, ok := toIdentities[identity]; !ok { m.removeLocked(identity) } } // Add things we don't have. if to.Options.GlobalAnnEnabled { for _, srv := range to.Options.GlobalDiscoveryServers() { identity := globalDiscoveryIdentity(srv) // Skip, if it's already running. if _, ok := m.finders[identity]; ok { continue } gd, err := NewGlobal(srv, m.cert, m.addressLister, m.evLogger) if err != nil { l.Warnln("Global discovery:", err) continue } // Each global discovery server gets its results cached for five // minutes, and is not asked again for a minute when it's returned // unsuccessfully. m.addLocked(identity, gd, 5*time.Minute, time.Minute) } } if to.Options.LocalAnnEnabled { // v4 broadcasts v4Identity := ipv4Identity(to.Options.LocalAnnPort) if _, ok := m.finders[v4Identity]; !ok { bcd, err := NewLocal(m.myID, fmt.Sprintf(":%d", to.Options.LocalAnnPort), m.addressLister, m.evLogger) if err != nil { l.Warnln("IPv4 local discovery:", err) } else { m.addLocked(v4Identity, bcd, 0, 0) } } // v6 multicasts v6Identity := ipv6Identity(to.Options.LocalAnnMCAddr) if _, ok := m.finders[v6Identity]; !ok { mcd, err := NewLocal(m.myID, to.Options.LocalAnnMCAddr, m.addressLister, m.evLogger) if err != nil { l.Warnln("IPv6 local discovery:", err) } else { m.addLocked(v6Identity, mcd, 0, 0) } } } return true }
lib/discover/manager.go
0
https://github.com/syncthing/syncthing/commit/f80ee472c2ab8278319f2932b1851ca5ab2fc62b
[ 0.000177054651430808, 0.00017080639372579753, 0.00016498503100592643, 0.00017082829435821623, 0.0000027788130410044687 ]
{ "id": 1, "code_window": [ "\t}\n", "}\n", "\n", "func TestEnDecryptFileInfo(t *testing.T) {\n", "\tvar key [32]byte\n", "\tfi := FileInfo{\n", "\t\tName: \"hello\",\n", "\t\tSize: 45,\n", "\t\tPermissions: 0755,\n", "\t\tModifiedS: 8080,\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "func encFileInfo() FileInfo {\n", "\treturn FileInfo{\n" ], "file_path": "lib/protocol/encryption_test.go", "type": "replace", "edit_start_line_idx": 115 }
// Copyright (C) 2014 The Syncthing Authors. // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at https://mozilla.org/MPL/2.0/. package ur import ( "encoding/binary" "syscall" "unsafe" ) var ( kernel32, _ = syscall.LoadLibrary("kernel32.dll") globalMemoryStatusEx, _ = syscall.GetProcAddress(kernel32, "GlobalMemoryStatusEx") ) func memorySize() int64 { var memoryStatusEx [64]byte binary.LittleEndian.PutUint32(memoryStatusEx[:], 64) ret, _, _ := syscall.Syscall(uintptr(globalMemoryStatusEx), 1, uintptr(unsafe.Pointer(&memoryStatusEx[0])), 0, 0) if ret == 0 { return 0 } return int64(binary.LittleEndian.Uint64(memoryStatusEx[8:])) }
lib/ur/memsize_windows.go
0
https://github.com/syncthing/syncthing/commit/f80ee472c2ab8278319f2932b1851ca5ab2fc62b
[ 0.00017693718837108463, 0.0001737523707561195, 0.00017021938401740044, 0.00017392642621416599, 0.0000031787237730895868 ]
{ "id": 2, "code_window": [ "\t\t\t},\n", "\t\t},\n", "\t}\n", "\n", "\tenc := encryptFileInfo(fi, &key)\n", "\tif bytes.Equal(enc.Blocks[0].Hash, enc.Blocks[1].Hash) {\n", "\t\tt.Error(\"block hashes should not repeat when on different offsets\")\n", "\t}\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "}\n", "\n", "func TestEnDecryptFileInfo(t *testing.T) {\n", "\tvar key [32]byte\n", "\tfi := encFileInfo()\n" ], "file_path": "lib/protocol/encryption_test.go", "type": "add", "edit_start_line_idx": 135 }
// Copyright (C) 2019 The Syncthing Authors. // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at https://mozilla.org/MPL/2.0/. package protocol import ( "context" "encoding/base32" "encoding/binary" "errors" "fmt" "io" "strings" "time" "github.com/gogo/protobuf/proto" "github.com/miscreant/miscreant.go" "github.com/syncthing/syncthing/lib/rand" "github.com/syncthing/syncthing/lib/sha256" "golang.org/x/crypto/chacha20poly1305" "golang.org/x/crypto/hkdf" "golang.org/x/crypto/scrypt" ) const ( nonceSize = 24 // chacha20poly1305.NonceSizeX tagSize = 16 // chacha20poly1305.Overhead() keySize = 32 // fits both chacha20poly1305 and AES-SIV minPaddedSize = 1024 // smallest block we'll allow blockOverhead = tagSize + nonceSize maxPathComponent = 200 // characters encryptedDirExtension = ".syncthing-enc" // for top level dirs miscreantAlgo = "AES-SIV" ) // The encryptedModel sits between the encrypted device and the model. It // receives encrypted metadata and requests from the untrusted device, so it // must decrypt those and answer requests by encrypting the data. type encryptedModel struct { model Model folderKeys map[string]*[keySize]byte // folder ID -> key } func (e encryptedModel) Index(deviceID DeviceID, folder string, files []FileInfo) error { if folderKey, ok := e.folderKeys[folder]; ok { // incoming index data to be decrypted if err := decryptFileInfos(files, folderKey); err != nil { return err } } return e.model.Index(deviceID, folder, files) } func (e encryptedModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) error { if folderKey, ok := e.folderKeys[folder]; ok { // incoming index data to be decrypted if err := decryptFileInfos(files, folderKey); err != nil { return err } } return e.model.IndexUpdate(deviceID, folder, files) } func (e encryptedModel) Request(deviceID DeviceID, folder, name string, blockNo, size int32, offset int64, hash []byte, weakHash uint32, fromTemporary bool) (RequestResponse, error) { folderKey, ok := e.folderKeys[folder] if !ok { return e.model.Request(deviceID, folder, name, blockNo, size, offset, hash, weakHash, fromTemporary) } // Figure out the real file name, offset and size from the encrypted / // tweaked values. realName, err := decryptName(name, folderKey) if err != nil { return nil, fmt.Errorf("decrypting name: %w", err) } realSize := size - blockOverhead realOffset := offset - int64(blockNo*blockOverhead) if size < minPaddedSize { return nil, errors.New("short request") } // Decrypt the block hash. fileKey := FileKey(realName, folderKey) var additional [8]byte binary.BigEndian.PutUint64(additional[:], uint64(realOffset)) realHash, err := decryptDeterministic(hash, fileKey, additional[:]) if err != nil { // "Legacy", no offset additional data? realHash, err = decryptDeterministic(hash, fileKey, nil) } if err != nil { return nil, fmt.Errorf("decrypting block hash: %w", err) } // Perform that request and grab the data. resp, err := e.model.Request(deviceID, folder, realName, blockNo, realSize, realOffset, realHash, 0, false) if err != nil { return nil, err } // Encrypt the response. Blocks smaller than minPaddedSize are padded // with random data. data := resp.Data() if len(data) < minPaddedSize { nd := make([]byte, minPaddedSize) copy(nd, data) if _, err := rand.Read(nd[len(data):]); err != nil { panic("catastrophic randomness failure") } data = nd } enc := encryptBytes(data, fileKey) resp.Close() return rawResponse{enc}, nil } func (e encryptedModel) DownloadProgress(deviceID DeviceID, folder string, updates []FileDownloadProgressUpdate) error { if _, ok := e.folderKeys[folder]; !ok { return e.model.DownloadProgress(deviceID, folder, updates) } // Encrypted devices shouldn't send these - ignore them. return nil } func (e encryptedModel) ClusterConfig(deviceID DeviceID, config ClusterConfig) error { return e.model.ClusterConfig(deviceID, config) } func (e encryptedModel) Closed(conn Connection, err error) { e.model.Closed(conn, err) } // The encryptedConnection sits between the model and the encrypted device. It // encrypts outgoing metadata and decrypts incoming responses. type encryptedConnection struct { ConnectionInfo conn Connection folderKeys map[string]*[keySize]byte // folder ID -> key } func (e encryptedConnection) Start() { e.conn.Start() } func (e encryptedConnection) ID() DeviceID { return e.conn.ID() } func (e encryptedConnection) Index(ctx context.Context, folder string, files []FileInfo) error { if folderKey, ok := e.folderKeys[folder]; ok { encryptFileInfos(files, folderKey) } return e.conn.Index(ctx, folder, files) } func (e encryptedConnection) IndexUpdate(ctx context.Context, folder string, files []FileInfo) error { if folderKey, ok := e.folderKeys[folder]; ok { encryptFileInfos(files, folderKey) } return e.conn.IndexUpdate(ctx, folder, files) } func (e encryptedConnection) Request(ctx context.Context, folder string, name string, blockNo int, offset int64, size int, hash []byte, weakHash uint32, fromTemporary bool) ([]byte, error) { folderKey, ok := e.folderKeys[folder] if !ok { return e.conn.Request(ctx, folder, name, blockNo, offset, size, hash, weakHash, fromTemporary) } // Encrypt / adjust the request parameters. origSize := size if size < minPaddedSize { // Make a request for minPaddedSize data instead of the smaller // block. We'll chop of the extra data later. size = minPaddedSize } encName := encryptName(name, folderKey) encOffset := offset + int64(blockNo*blockOverhead) encSize := size + blockOverhead // Perform that request, getting back and encrypted block. bs, err := e.conn.Request(ctx, folder, encName, blockNo, encOffset, encSize, nil, 0, false) if err != nil { return nil, err } // Return the decrypted block (or an error if it fails decryption) fileKey := FileKey(name, folderKey) bs, err = DecryptBytes(bs, fileKey) if err != nil { return nil, err } return bs[:origSize], nil } func (e encryptedConnection) DownloadProgress(ctx context.Context, folder string, updates []FileDownloadProgressUpdate) { if _, ok := e.folderKeys[folder]; !ok { e.conn.DownloadProgress(ctx, folder, updates) } // No need to send these } func (e encryptedConnection) ClusterConfig(config ClusterConfig) { e.conn.ClusterConfig(config) } func (e encryptedConnection) Close(err error) { e.conn.Close(err) } func (e encryptedConnection) Closed() bool { return e.conn.Closed() } func (e encryptedConnection) Statistics() Statistics { return e.conn.Statistics() } func encryptFileInfos(files []FileInfo, folderKey *[keySize]byte) { for i, fi := range files { files[i] = encryptFileInfo(fi, folderKey) } } // encryptFileInfo encrypts a FileInfo and wraps it into a new fake FileInfo // with an encrypted name. func encryptFileInfo(fi FileInfo, folderKey *[keySize]byte) FileInfo { fileKey := FileKey(fi.Name, folderKey) // The entire FileInfo is encrypted with a random nonce, and concatenated // with that nonce. bs, err := proto.Marshal(&fi) if err != nil { panic("impossible serialization mishap: " + err.Error()) } encryptedFI := encryptBytes(bs, fileKey) // The vector is set to something that is higher than any other version sent // previously, assuming people's clocks are correct. We do this because // there is no way for the insecure device on the other end to do proper // conflict resolution, so they will simply accept and keep whatever is the // latest version they see. The secure devices will decrypt the real // FileInfo, see the real Version, and act appropriately regardless of what // this fake version happens to be. version := Vector{ Counters: []Counter{ { ID: 1, Value: uint64(time.Now().UnixNano()), }, }, } // Construct the fake block list. Each block will be blockOverhead bytes // larger than the corresponding real one and have an encrypted hash. // Very small blocks will be padded upwards to minPaddedSize. // // The encrypted hash becomes just a "token" for the data -- it doesn't // help verifying it, but it lets the encrypted device do block level // diffs and data reuse properly when it gets a new version of a file. var offset int64 blocks := make([]BlockInfo, len(fi.Blocks)) for i, b := range fi.Blocks { if b.Size < minPaddedSize { b.Size = minPaddedSize } size := b.Size + blockOverhead // The offset goes into the encrypted block hash as additional data, // essentially mixing in with the nonce. This means a block hash // remains stable for the same data at the same offset, but doesn't // reveal the existence of identical data blocks at other offsets. var additional [8]byte binary.BigEndian.PutUint64(additional[:], uint64(b.Offset)) hash := encryptDeterministic(b.Hash, fileKey, additional[:]) blocks[i] = BlockInfo{ Hash: hash, Offset: offset, Size: size, } offset += int64(size) } // Construct the fake FileInfo. This is mostly just a wrapper around the // encrypted FileInfo and fake block list. We'll represent symlinks as // directories, because they need some sort of on disk representation // but have no data outside of the metadata. Deletion and sequence // numbering are handled as usual. typ := FileInfoTypeFile if fi.Type != FileInfoTypeFile { typ = FileInfoTypeDirectory } enc := FileInfo{ Name: encryptName(fi.Name, folderKey), Type: typ, Size: offset, // new total file size Permissions: 0644, ModifiedS: 1234567890, // Sat Feb 14 00:31:30 CET 2009 Deleted: fi.Deleted, Version: version, Sequence: fi.Sequence, RawBlockSize: fi.RawBlockSize + blockOverhead, Blocks: blocks, Encrypted: encryptedFI, } return enc } func decryptFileInfos(files []FileInfo, folderKey *[keySize]byte) error { for i, fi := range files { decFI, err := DecryptFileInfo(fi, folderKey) if err != nil { return err } files[i] = decFI } return nil } // DecryptFileInfo extracts the encrypted portion of a FileInfo, decrypts it // and returns that. func DecryptFileInfo(fi FileInfo, folderKey *[keySize]byte) (FileInfo, error) { realName, err := decryptName(fi.Name, folderKey) if err != nil { return FileInfo{}, err } fileKey := FileKey(realName, folderKey) dec, err := DecryptBytes(fi.Encrypted, fileKey) if err != nil { return FileInfo{}, err } var decFI FileInfo if err := proto.Unmarshal(dec, &decFI); err != nil { return FileInfo{}, err } return decFI, nil } var base32Hex = base32.HexEncoding.WithPadding(base32.NoPadding) // encryptName encrypts the given string in a deterministic manner (the // result is always the same for any given string) and encodes it in a // filesystem-friendly manner. func encryptName(name string, key *[keySize]byte) string { enc := encryptDeterministic([]byte(name), key, nil) return slashify(base32Hex.EncodeToString(enc)) } // decryptName decrypts a string from encryptName func decryptName(name string, key *[keySize]byte) (string, error) { name, err := deslashify(name) if err != nil { return "", err } bs, err := base32Hex.DecodeString(name) if err != nil { return "", err } dec, err := decryptDeterministic(bs, key, nil) if err != nil { return "", err } return string(dec), nil } // encryptBytes encrypts bytes with a random nonce func encryptBytes(data []byte, key *[keySize]byte) []byte { nonce := randomNonce() return encrypt(data, nonce, key) } // encryptDeterministic encrypts bytes using AES-SIV func encryptDeterministic(data []byte, key *[keySize]byte, additionalData []byte) []byte { aead, err := miscreant.NewAEAD(miscreantAlgo, key[:], 0) if err != nil { panic("cipher failure: " + err.Error()) } return aead.Seal(nil, nil, data, additionalData) } // decryptDeterministic decrypts bytes using AES-SIV func decryptDeterministic(data []byte, key *[keySize]byte, additionalData []byte) ([]byte, error) { aead, err := miscreant.NewAEAD(miscreantAlgo, key[:], 0) if err != nil { panic("cipher failure: " + err.Error()) } return aead.Open(nil, nil, data, additionalData) } func encrypt(data []byte, nonce *[nonceSize]byte, key *[keySize]byte) []byte { aead, err := chacha20poly1305.NewX(key[:]) if err != nil { // Can only fail if the key is the wrong length panic("cipher failure: " + err.Error()) } if aead.NonceSize() != nonceSize || aead.Overhead() != tagSize { // We want these values to be constant for our type declarations so // we don't use the values returned by the GCM, but we verify them // here. panic("crypto parameter mismatch") } // Data is appended to the nonce return aead.Seal(nonce[:], nonce[:], data, nil) } // DecryptBytes returns the decrypted bytes, or an error if decryption // failed. func DecryptBytes(data []byte, key *[keySize]byte) ([]byte, error) { if len(data) < blockOverhead { return nil, errors.New("data too short") } aead, err := chacha20poly1305.NewX(key[:]) if err != nil { // Can only fail if the key is the wrong length panic("cipher failure: " + err.Error()) } if aead.NonceSize() != nonceSize || aead.Overhead() != tagSize { // We want these values to be constant for our type declarations so // we don't use the values returned by the GCM, but we verify them // here. panic("crypto parameter mismatch") } return aead.Open(nil, data[:nonceSize], data[nonceSize:], nil) } // randomNonce is a normal, cryptographically random nonce func randomNonce() *[nonceSize]byte { var nonce [nonceSize]byte if _, err := rand.Read(nonce[:]); err != nil { panic("catastrophic randomness failure: " + err.Error()) } return &nonce } // keysFromPasswords converts a set of folder ID to password into a set of // folder ID to encryption key, using our key derivation function. func keysFromPasswords(passwords map[string]string) map[string]*[keySize]byte { res := make(map[string]*[keySize]byte, len(passwords)) for folder, password := range passwords { res[folder] = KeyFromPassword(folder, password) } return res } func knownBytes(folderID string) []byte { return []byte("syncthing" + folderID) } // KeyFromPassword uses key derivation to generate a stronger key from a // probably weak password. func KeyFromPassword(folderID, password string) *[keySize]byte { bs, err := scrypt.Key([]byte(password), knownBytes(folderID), 32768, 8, 1, keySize) if err != nil { panic("key derivation failure: " + err.Error()) } if len(bs) != keySize { panic("key derivation failure: wrong number of bytes") } var key [keySize]byte copy(key[:], bs) return &key } var hkdfSalt = []byte("syncthing") func FileKey(filename string, folderKey *[keySize]byte) *[keySize]byte { kdf := hkdf.New(sha256.New, append(folderKey[:], filename...), hkdfSalt, nil) var fileKey [keySize]byte n, err := io.ReadFull(kdf, fileKey[:]) if err != nil || n != keySize { panic("hkdf failure") } return &fileKey } func PasswordToken(folderID, password string) []byte { return encryptDeterministic(knownBytes(folderID), KeyFromPassword(folderID, password), nil) } // slashify inserts slashes (and file extension) in the string to create an // appropriate tree. ABCDEFGH... => A.syncthing-enc/BC/DEFGH... We can use // forward slashes here because we're on the outside of native path formats, // the slash is the wire format. func slashify(s string) string { // We somewhat sloppily assume bytes == characters here, but the only // file names we should deal with are those that come from our base32 // encoding. comps := make([]string, 0, len(s)/maxPathComponent+3) comps = append(comps, s[:1]+encryptedDirExtension) s = s[1:] comps = append(comps, s[:2]) s = s[2:] for len(s) > maxPathComponent { comps = append(comps, s[:maxPathComponent]) s = s[maxPathComponent:] } if len(s) > 0 { comps = append(comps, s) } return strings.Join(comps, "/") } // deslashify removes slashes and encrypted file extensions from the string. // This is the inverse of slashify(). func deslashify(s string) (string, error) { if len(s) == 0 || !strings.HasPrefix(s[1:], encryptedDirExtension) { return "", fmt.Errorf("invalid encrypted path: %q", s) } s = s[:1] + s[1+len(encryptedDirExtension):] return strings.ReplaceAll(s, "/", ""), nil } type rawResponse struct { data []byte } func (r rawResponse) Data() []byte { return r.data } func (r rawResponse) Close() {} func (r rawResponse) Wait() {} // IsEncryptedPath returns true if the path points at encrypted data. This is // determined by checking for a sentinel string in the path. func IsEncryptedPath(path string) bool { pathComponents := strings.Split(path, "/") if len(pathComponents) != 3 { return false } return isEncryptedParentFromComponents(pathComponents[:2]) } // IsEncryptedParent returns true if the path points at a parent directory of // encrypted data, i.e. is not a "real" directory. This is determined by // checking for a sentinel string in the path. func IsEncryptedParent(path string) bool { return isEncryptedParentFromComponents(strings.Split(path, "/")) } func isEncryptedParentFromComponents(pathComponents []string) bool { l := len(pathComponents) if l == 2 && len(pathComponents[1]) != 2 { return false } else if l == 0 { return false } if len(pathComponents[0]) == 0 { return false } if pathComponents[0][1:] != encryptedDirExtension { return false } if l < 2 { return true } for _, comp := range pathComponents[2:] { if len(comp) != maxPathComponent { return false } } return true }
lib/protocol/encryption.go
1
https://github.com/syncthing/syncthing/commit/f80ee472c2ab8278319f2932b1851ca5ab2fc62b
[ 0.9947119951248169, 0.04886770620942116, 0.0001635019580135122, 0.00019118905765935779, 0.20353208482265472 ]
{ "id": 2, "code_window": [ "\t\t\t},\n", "\t\t},\n", "\t}\n", "\n", "\tenc := encryptFileInfo(fi, &key)\n", "\tif bytes.Equal(enc.Blocks[0].Hash, enc.Blocks[1].Hash) {\n", "\t\tt.Error(\"block hashes should not repeat when on different offsets\")\n", "\t}\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "}\n", "\n", "func TestEnDecryptFileInfo(t *testing.T) {\n", "\tvar key [32]byte\n", "\tfi := encFileInfo()\n" ], "file_path": "lib/protocol/encryption_test.go", "type": "add", "edit_start_line_idx": 135 }
// Copyright (C) 2015 The Syncthing Authors. // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at https://mozilla.org/MPL/2.0/. package model import ( "github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/sync" ) // deviceFolderFileDownloadState holds current download state of a file that // a remote device has advertised. blockIndexes represends indexes within // FileInfo.Blocks that the remote device already has, and version represents // the version of the file that the remote device is downloading. type deviceFolderFileDownloadState struct { blockIndexes []int version protocol.Vector blockSize int } // deviceFolderDownloadState holds current download state of all files that // a remote device is currently downloading in a specific folder. type deviceFolderDownloadState struct { mut sync.RWMutex files map[string]deviceFolderFileDownloadState } // Has returns whether a block at that specific index, and that specific version of the file // is currently available on the remote device for pulling from a temporary file. func (p *deviceFolderDownloadState) Has(file string, version protocol.Vector, index int) bool { p.mut.RLock() defer p.mut.RUnlock() local, ok := p.files[file] if !ok || !local.version.Equal(version) { return false } for _, existingIndex := range local.blockIndexes { if existingIndex == index { return true } } return false } // Update updates internal state of what has been downloaded into the temporary // files by the remote device for this specific folder. func (p *deviceFolderDownloadState) Update(updates []protocol.FileDownloadProgressUpdate) { p.mut.Lock() defer p.mut.Unlock() for _, update := range updates { local, ok := p.files[update.Name] if update.UpdateType == protocol.FileDownloadProgressUpdateTypeForget && ok && local.version.Equal(update.Version) { delete(p.files, update.Name) } else if update.UpdateType == protocol.FileDownloadProgressUpdateTypeAppend { if !ok { local = deviceFolderFileDownloadState{ blockIndexes: update.BlockIndexes, version: update.Version, blockSize: int(update.BlockSize), } } else if !local.version.Equal(update.Version) { local.blockIndexes = append(local.blockIndexes[:0], update.BlockIndexes...) local.version = update.Version local.blockSize = int(update.BlockSize) } else { local.blockIndexes = append(local.blockIndexes, update.BlockIndexes...) } p.files[update.Name] = local } } } func (p *deviceFolderDownloadState) BytesDownloaded() int64 { p.mut.RLock() defer p.mut.RUnlock() var res int64 for _, state := range p.files { // BlockSize is a new field introduced in 1.4.1, thus a fallback // is required (will potentially underrepresent downloaded bytes). if state.blockSize != 0 { res += int64(len(state.blockIndexes) * state.blockSize) } else { res += int64(len(state.blockIndexes) * protocol.MinBlockSize) } } return res } // GetBlockCounts returns a map filename -> number of blocks downloaded. func (p *deviceFolderDownloadState) GetBlockCounts() map[string]int { p.mut.RLock() res := make(map[string]int, len(p.files)) for name, state := range p.files { res[name] = len(state.blockIndexes) } p.mut.RUnlock() return res } // deviceDownloadState represents the state of all in progress downloads // for all folders of a specific device. type deviceDownloadState struct { mut sync.RWMutex folders map[string]*deviceFolderDownloadState } // Update updates internal state of what has been downloaded into the temporary // files by the remote device for this specific folder. func (t *deviceDownloadState) Update(folder string, updates []protocol.FileDownloadProgressUpdate) { if t == nil { return } t.mut.RLock() f, ok := t.folders[folder] t.mut.RUnlock() if !ok { f = &deviceFolderDownloadState{ mut: sync.NewRWMutex(), files: make(map[string]deviceFolderFileDownloadState), } t.mut.Lock() t.folders[folder] = f t.mut.Unlock() } f.Update(updates) } // Has returns whether block at that specific index, and that specific version of the file // is currently available on the remote device for pulling from a temporary file. func (t *deviceDownloadState) Has(folder, file string, version protocol.Vector, index int) bool { if t == nil { return false } t.mut.RLock() f, ok := t.folders[folder] t.mut.RUnlock() if !ok { return false } return f.Has(file, version, index) } // GetBlockCounts returns a map filename -> number of blocks downloaded for the // given folder. func (t *deviceDownloadState) GetBlockCounts(folder string) map[string]int { if t == nil { return nil } t.mut.RLock() defer t.mut.RUnlock() for name, state := range t.folders { if name == folder { return state.GetBlockCounts() } } return nil } func (t *deviceDownloadState) BytesDownloaded(folder string) int64 { if t == nil { return 0 } t.mut.RLock() defer t.mut.RUnlock() for name, state := range t.folders { if name == folder { return state.BytesDownloaded() } } return 0 } func newDeviceDownloadState() *deviceDownloadState { return &deviceDownloadState{ mut: sync.NewRWMutex(), folders: make(map[string]*deviceFolderDownloadState), } }
lib/model/devicedownloadstate.go
0
https://github.com/syncthing/syncthing/commit/f80ee472c2ab8278319f2932b1851ca5ab2fc62b
[ 0.0020813250448554754, 0.0003315707726869732, 0.00016360815789084882, 0.00018488689966034144, 0.00041321286698803306 ]
{ "id": 2, "code_window": [ "\t\t\t},\n", "\t\t},\n", "\t}\n", "\n", "\tenc := encryptFileInfo(fi, &key)\n", "\tif bytes.Equal(enc.Blocks[0].Hash, enc.Blocks[1].Hash) {\n", "\t\tt.Error(\"block hashes should not repeat when on different offsets\")\n", "\t}\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "}\n", "\n", "func TestEnDecryptFileInfo(t *testing.T) {\n", "\tvar key [32]byte\n", "\tfi := encFileInfo()\n" ], "file_path": "lib/protocol/encryption_test.go", "type": "add", "edit_start_line_idx": 135 }
The files in this directory contain metadata tests - that is, tests on the shape and colour of the code in the rest of the repository. This code is not compiled into the final product.
meta/README.txt
0
https://github.com/syncthing/syncthing/commit/f80ee472c2ab8278319f2932b1851ca5ab2fc62b
[ 0.00017207747441716492, 0.00017207747441716492, 0.00017207747441716492, 0.00017207747441716492, 0 ]
{ "id": 2, "code_window": [ "\t\t\t},\n", "\t\t},\n", "\t}\n", "\n", "\tenc := encryptFileInfo(fi, &key)\n", "\tif bytes.Equal(enc.Blocks[0].Hash, enc.Blocks[1].Hash) {\n", "\t\tt.Error(\"block hashes should not repeat when on different offsets\")\n", "\t}\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "}\n", "\n", "func TestEnDecryptFileInfo(t *testing.T) {\n", "\tvar key [32]byte\n", "\tfi := encFileInfo()\n" ], "file_path": "lib/protocol/encryption_test.go", "type": "add", "edit_start_line_idx": 135 }
<!--<div class="grid-container" gdAreas="header header | folders devices | status-list status-list | footer footer" gdGap="16px" gdRows="auto auto auto"> --> <!--<div class="grid-container" fxLayout="row" fxLayoutGap="16px grid" fxLayoutAlign="stretch">--> <div class="progress"> <mat-progress-bar mode="determinate" value="{{progressValue}}" [@progressBar]="isLoading ? 'start' : 'done'"> </mat-progress-bar> </div> <div fxLayout="column" fxLayoutGap="16px" class="grid-container" [@loading]="isLoading ? 'start' : 'done'"> <div fxLayout="row" fxLayoutGap="16px" fxLayoutAlign="space-between stretch"> <app-chart [type]=folderChart fxFlex="50"></app-chart> <app-chart [type]=deviceChart fxFlex="50"></app-chart> </div> <app-status-list gdArea="status-list"></app-status-list> <div></div> </div>
next-gen-gui/src/app/dashboard/dashboard.component.html
0
https://github.com/syncthing/syncthing/commit/f80ee472c2ab8278319f2932b1851ca5ab2fc62b
[ 0.00017479498637840152, 0.0001687027106527239, 0.00016261044947896153, 0.0001687027106527239, 0.000006092268449719995 ]
{ "id": 3, "code_window": [ "\t\tt.Error(\"mismatch after decryption\")\n", "\t}\n", "}\n", "\n", "func TestIsEncryptedParent(t *testing.T) {\n", "\tcomp := rand.String(maxPathComponent)\n", "\tcases := []struct {\n", "\t\tpath string\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "func TestEncryptedFileInfoConsistency(t *testing.T) {\n", "\tvar key [32]byte\n", "\tfiles := []FileInfo{\n", "\t\tencFileInfo(),\n", "\t\tencFileInfo(),\n", "\t}\n", "\tfiles[1].SetIgnored()\n", "\tfor i, f := range files {\n", "\t\tenc := encryptFileInfo(f, &key)\n", "\t\tif err := checkFileInfoConsistency(enc); err != nil {\n", "\t\t\tt.Errorf(\"%v: %v\", i, err)\n", "\t\t}\n", "\t}\n", "}\n", "\n" ], "file_path": "lib/protocol/encryption_test.go", "type": "add", "edit_start_line_idx": 157 }
// Copyright (C) 2019 The Syncthing Authors. // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at https://mozilla.org/MPL/2.0/. package protocol import ( "bytes" "fmt" "reflect" "regexp" "strings" "sync" "testing" "github.com/syncthing/syncthing/lib/rand" "github.com/syncthing/syncthing/lib/sha256" ) func TestEnDecryptName(t *testing.T) { pattern := regexp.MustCompile( fmt.Sprintf("^[0-9A-V]%s/[0-9A-V]{2}/([0-9A-V]{%d}/)*[0-9A-V]{1,%d}$", regexp.QuoteMeta(encryptedDirExtension), maxPathComponent, maxPathComponent-1)) makeName := func(n int) string { b := make([]byte, n) for i := range b { b[i] = byte('a' + i%26) } return string(b) } var key [32]byte cases := []string{ "", "foo", "a longer name/with/slashes and spaces", makeName(maxPathComponent), makeName(1 + maxPathComponent), makeName(2 * maxPathComponent), makeName(1 + 2*maxPathComponent), } for _, tc := range cases { var prev string for i := 0; i < 5; i++ { enc := encryptName(tc, &key) if prev != "" && prev != enc { t.Error("name should always encrypt the same") } prev = enc if tc != "" && strings.Contains(enc, tc) { t.Error("shouldn't contain plaintext") } if !pattern.MatchString(enc) { t.Fatalf("encrypted name %s doesn't match %s", enc, pattern) } dec, err := decryptName(enc, &key) if err != nil { t.Error(err) } if dec != tc { t.Error("mismatch after decryption") } t.Logf("%q encrypts as %q", tc, enc) } } } func TestDecryptNameInvalid(t *testing.T) { key := new([32]byte) for _, c := range []string{ "T.syncthing-enc/OD", "T.syncthing-enc/OD/", "T.wrong-extension/OD/PHVDD67S7FI2K5QQMPSOFSK", "OD/PHVDD67S7FI2K5QQMPSOFSK", } { if _, err := decryptName(c, key); err == nil { t.Errorf("no error for %q", c) } } } func TestEnDecryptBytes(t *testing.T) { var key [32]byte cases := [][]byte{ {}, {1, 2, 3, 4, 5}, } for _, tc := range cases { var prev []byte for i := 0; i < 5; i++ { enc := encryptBytes(tc, &key) if bytes.Equal(enc, prev) { t.Error("encryption should not repeat") } prev = enc if len(tc) > 0 && bytes.Contains(enc, tc) { t.Error("shouldn't contain plaintext") } dec, err := DecryptBytes(enc, &key) if err != nil { t.Error(err) } if !bytes.Equal(dec, tc) { t.Error("mismatch after decryption") } } } } func TestEnDecryptFileInfo(t *testing.T) { var key [32]byte fi := FileInfo{ Name: "hello", Size: 45, Permissions: 0755, ModifiedS: 8080, Blocks: []BlockInfo{ { Offset: 0, Size: 45, Hash: []byte{1, 2, 3}, }, { Offset: 45, Size: 45, Hash: []byte{1, 2, 3}, }, }, } enc := encryptFileInfo(fi, &key) if bytes.Equal(enc.Blocks[0].Hash, enc.Blocks[1].Hash) { t.Error("block hashes should not repeat when on different offsets") } again := encryptFileInfo(fi, &key) if !bytes.Equal(enc.Blocks[0].Hash, again.Blocks[0].Hash) { t.Error("block hashes should remain stable (0)") } if !bytes.Equal(enc.Blocks[1].Hash, again.Blocks[1].Hash) { t.Error("block hashes should remain stable (1)") } dec, err := DecryptFileInfo(enc, &key) if err != nil { t.Error(err) } if !reflect.DeepEqual(fi, dec) { t.Error("mismatch after decryption") } } func TestIsEncryptedParent(t *testing.T) { comp := rand.String(maxPathComponent) cases := []struct { path string is bool }{ {"", false}, {".", false}, {"/", false}, {"12" + encryptedDirExtension, false}, {"1" + encryptedDirExtension, true}, {"1" + encryptedDirExtension + "/b", false}, {"1" + encryptedDirExtension + "/bc", true}, {"1" + encryptedDirExtension + "/bcd", false}, {"1" + encryptedDirExtension + "/bc/foo", false}, {"1.12/22", false}, {"1" + encryptedDirExtension + "/bc/" + comp, true}, {"1" + encryptedDirExtension + "/bc/" + comp + "/" + comp, true}, {"1" + encryptedDirExtension + "/bc/" + comp + "a", false}, {"1" + encryptedDirExtension + "/bc/" + comp + "/a/" + comp, false}, } for _, tc := range cases { if res := IsEncryptedParent(tc.path); res != tc.is { t.Errorf("%v: got %v, expected %v", tc.path, res, tc.is) } } } var benchmarkFileKey struct { key [keySize]byte sync.Once } func BenchmarkFileKey(b *testing.B) { benchmarkFileKey.Do(func() { sha256.SelectAlgo() rand.Read(benchmarkFileKey.key[:]) }) b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { FileKey("a_kind_of_long_filename.ext", &benchmarkFileKey.key) } }
lib/protocol/encryption_test.go
1
https://github.com/syncthing/syncthing/commit/f80ee472c2ab8278319f2932b1851ca5ab2fc62b
[ 0.9987609386444092, 0.19255559146404266, 0.0001592351181898266, 0.0005906485021114349, 0.3908613920211792 ]
{ "id": 3, "code_window": [ "\t\tt.Error(\"mismatch after decryption\")\n", "\t}\n", "}\n", "\n", "func TestIsEncryptedParent(t *testing.T) {\n", "\tcomp := rand.String(maxPathComponent)\n", "\tcases := []struct {\n", "\t\tpath string\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "func TestEncryptedFileInfoConsistency(t *testing.T) {\n", "\tvar key [32]byte\n", "\tfiles := []FileInfo{\n", "\t\tencFileInfo(),\n", "\t\tencFileInfo(),\n", "\t}\n", "\tfiles[1].SetIgnored()\n", "\tfor i, f := range files {\n", "\t\tenc := encryptFileInfo(f, &key)\n", "\t\tif err := checkFileInfoConsistency(enc); err != nil {\n", "\t\t\tt.Errorf(\"%v: %v\", i, err)\n", "\t\t}\n", "\t}\n", "}\n", "\n" ], "file_path": "lib/protocol/encryption_test.go", "type": "add", "edit_start_line_idx": 157 }
The files in this directory contain metadata tests - that is, tests on the shape and colour of the code in the rest of the repository. This code is not compiled into the final product.
meta/README.txt
0
https://github.com/syncthing/syncthing/commit/f80ee472c2ab8278319f2932b1851ca5ab2fc62b
[ 0.00017207123164553195, 0.00017207123164553195, 0.00017207123164553195, 0.00017207123164553195, 0 ]
{ "id": 3, "code_window": [ "\t\tt.Error(\"mismatch after decryption\")\n", "\t}\n", "}\n", "\n", "func TestIsEncryptedParent(t *testing.T) {\n", "\tcomp := rand.String(maxPathComponent)\n", "\tcases := []struct {\n", "\t\tpath string\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "func TestEncryptedFileInfoConsistency(t *testing.T) {\n", "\tvar key [32]byte\n", "\tfiles := []FileInfo{\n", "\t\tencFileInfo(),\n", "\t\tencFileInfo(),\n", "\t}\n", "\tfiles[1].SetIgnored()\n", "\tfor i, f := range files {\n", "\t\tenc := encryptFileInfo(f, &key)\n", "\t\tif err := checkFileInfoConsistency(enc); err != nil {\n", "\t\t\tt.Errorf(\"%v: %v\", i, err)\n", "\t\t}\n", "\t}\n", "}\n", "\n" ], "file_path": "lib/protocol/encryption_test.go", "type": "add", "edit_start_line_idx": 157 }
#include excludes bfile dir1/cfile **/efile /ffile lost+found
lib/ignore/testdata/.stignore
0
https://github.com/syncthing/syncthing/commit/f80ee472c2ab8278319f2932b1851ca5ab2fc62b
[ 0.000174170098034665, 0.000174170098034665, 0.000174170098034665, 0.000174170098034665, 0 ]
{ "id": 3, "code_window": [ "\t\tt.Error(\"mismatch after decryption\")\n", "\t}\n", "}\n", "\n", "func TestIsEncryptedParent(t *testing.T) {\n", "\tcomp := rand.String(maxPathComponent)\n", "\tcases := []struct {\n", "\t\tpath string\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "func TestEncryptedFileInfoConsistency(t *testing.T) {\n", "\tvar key [32]byte\n", "\tfiles := []FileInfo{\n", "\t\tencFileInfo(),\n", "\t\tencFileInfo(),\n", "\t}\n", "\tfiles[1].SetIgnored()\n", "\tfor i, f := range files {\n", "\t\tenc := encryptFileInfo(f, &key)\n", "\t\tif err := checkFileInfoConsistency(enc); err != nil {\n", "\t\t\tt.Errorf(\"%v: %v\", i, err)\n", "\t\t}\n", "\t}\n", "}\n", "\n" ], "file_path": "lib/protocol/encryption_test.go", "type": "add", "edit_start_line_idx": 157 }
-----BEGIN RSA PRIVATE KEY----- MIIG5AIBAAKCAYEAypziXAfAGG5mEwTIo+dUt3a4SxKnCVWfTXq7RF5ukCdz3jRR Roqy2ujbW5oe7qmqELTUzkJUeJkuCyvLIhVsKgt2r1peThy5+3/kUst9Faitnke1 /jYtWwSf7WmK9TQBN/tFxdgRSM0LpdvsIJQqB5eodR51KrsjkJcKzEHeE2IQA8EK D1LcfXz8p22zdaZ4CNhz7mgFghkRT6NINDZiMtm6R4qXqYyW1MNlYLw+DqXvE2eC QMQOcqI7SSZTeS3eSTP5NQumAmaLKn3JZQ9G6Ldn0VSWSPlXqVSaFTI9LnYICTDa 4Nj4+L6idwzjmBkxPr8vBjlj15eo6xcU4fKqh2/xHIcZYpas1bwo5ljGdcfRm4L1 zpxDY+nJFIgRv0Ndpltve+T8o3qiqFvIMiphFUtdb2pzgaTi8FW9SHMRFiuj6sNL I0Yb9u8BoN/QsWZnFh6fQoHWPKtkJrPZGw4GMMTHeEQe/eLW9VAH7ywPyc2hSzSr i44aR44s4ErSV+XfAgMBAAECggGBALolpuXsjPUlQIyKoZfMag3gefMnIOW3j5NM hg6LP8MbLB3jLSTFOwtaUmZ3U6HrqP6OVNFnKVpfSWkkBA29ZtG+FH2IZgoX5FsH JgtXPwWOImy/75mtxr/PoOsrQ2qCK/h116WsHD0pfWEVi3xnA7JUCIYJxJXMtyEZ U+dTQKfIOXRpf0eS1lZIZYuhgvY9Shy+WMyZLy5Wv4vONQEbUd7sIHOoBizUqKKz HkngyJcGpn7KY2YDek6hdByBpME5fFRo1DmXPeE+pTycDBP3C55JTB6v6kGkqxAl hTFK+x8tU9N+d+Wmuvfdm4MdQ1CnLjv+TL01OTYJx4nTDRk8PkVxVfrXdSY/LFib 4mhghDPglvPHyFljbwkdiah60dAd2dIUhZwax79oxliyV6Ivf3teB5bvvrWIsMCX harESDSC6sgkHRGb05u1uYOPpsnAVOmi6CVgsKQtTmRZ6tqBA/O97RBWj92ROJi+ 0PqFs+cLXvpd4u36sI0MeWYO7Eko2QKBwQDo1Mc/fUsquniFrkjjXnGC8Q+tUpRC /TS2vjP5zFlgIOD8fSOoNF4VwFapCoG8l5NsOZrFOYvzHKZRtuBHLL0xdAOuxzge 72mJYDsfO+vwOtg8xMrdtlJqDOnQWU2NVgOB+xSQKILEOm422BRyXbOM3hylsL7Y GPSTdqst9jcLODd6VFY4xq9VDebO63z5Ku62K+BerUJsUfrs64ES6U//Pivq4c06 57BQq1AbVXqd8q9aInOsSQ1nJwg28PuBREUCgcEA3sZQRJSpktsdsnlvk4Nvf+KK OdYEAa2hPcflWDn/ZiEzaP0NR5s6NFSYhzGWVAX39lPUuHJ9MZ17CO4GqMojbXJc hneWi7+SWNqZn1rxdCl8FU7wJccX479G45lEL0aIN91i+9K0ZFGZg198EQcpX4kt s8J3O3n81rLnHg4bgMhJ6BGRIAxtYwmdOSfTmdyewVJfNUookswjC0sD5JDL/jd1 Yi4CeaFOqRFVY+4ge+whYz9OcfBBj4kBl1OYoq3TAoHAZe35CJd+l8coykVhjYCk KxIDrfpQA/+72yDrujk3C3l5ZbAXMyUx503b4odCAuFM3f1d/2fRF+579ZwdFavF a+gBULvQmuJvDoA9gdAG25W3Yus4cNXHwLvEhL0D0ZNNV9MmznGdxfBandH9KZBR 8aKvYe49rndGY2R5TlbTBCtpRjmzwYlh6td6Ky42+RJNjR0qTeiGAsvlEWGMkU3p ArIiIeMWqOoTa02EdWL3mjxLfidFArC8mGMjGoJnaNENAoHBAIQ9ZnJ/aPXS+hry uQfw0qQwXuscHr68SeW5nmuz6ea/OJxO8q4Z+AAOY4iFJ/5ymJHxi2l+FND58YoI eY2CiGs0orXzkTsdmgsCoISW4JOa+JxRgn56Y7T+217JoU8K6Ft5IIPpvMl8Ist4 R9Z30Nh0PtvhSRPWQv9TrZwKtcrJmg2XN/W1Ss2qbFj8SkCgVODfO8MPZWxWn0rZ 2dK5HU6nrxd7xl5bIa18q5qpRUEql1spvjAmdVR1+KrSpd2TnwKBwDGptuY2og9y PeEipbGrxww44DMFp6026xn0r8NXSlylD8Rihm9z2wQ0vFPjG1VHjupcQYI5bQLB piYt1PyPzCs27WrnXU6eEFjhVP4CzZHFKHkPeW6uBt7XbHpR14hxJ54FLN33Rewr Vt6xys+K3OHFzgDfBnhtvROYHtNQENiyJ3rMwRZcxXROmR/zj0cVl1R8wdE3XCwA rFPm/n3hzKv2OG3KX0KHh1bH0eXPVhIEmPcgz8RgL3uPGkqh8H+sgQ== -----END RSA PRIVATE KEY-----
test/h3/key.pem
0
https://github.com/syncthing/syncthing/commit/f80ee472c2ab8278319f2932b1851ca5ab2fc62b
[ 0.001978851156309247, 0.0010194273199886084, 0.0002508863981347531, 0.0009239858482033014, 0.0006272970931604505 ]