hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 4,
"code_window": [
"\t\t\terr: fmt.Errorf(\"Unable to create gce instance with running docker daemon for image %s. %v\", image, err),\n",
"\t\t}\n",
"\t}\n",
"\treturn testHost(host, archive)\n",
"}\n",
"\n",
"// Provision a gce instance using image\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testHost(host, archive, false)\n"
],
"file_path": "test/e2e_node/runner/run_e2e.go",
"type": "replace",
"edit_start_line_idx": 173
} | /*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// To run the e2e tests against one or more hosts on gce:
// $ godep go run run_e2e.go --logtostderr --v 2 --ssh-env gce --hosts <comma separated hosts>
// To run the e2e tests against one or more images on gce and provision them:
// $ godep go run run_e2e.go --logtostderr --v 2 --project <project> --zone <zone> --ssh-env gce --images <comma separated images>
package main
import (
"flag"
"fmt"
"math/rand"
"net/http"
"os"
"strings"
"time"
"k8s.io/kubernetes/test/e2e_node"
"github.com/golang/glog"
"github.com/pborman/uuid"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/compute/v1"
)
var instanceNamePrefix = flag.String("instance-name-prefix", "", "prefix for instance names")
var zone = flag.String("zone", "", "gce zone the hosts live in")
var project = flag.String("project", "", "gce project the hosts live in")
var images = flag.String("images", "", "images to test")
var hosts = flag.String("hosts", "", "hosts to test")
var cleanup = flag.Bool("cleanup", true, "If true remove files from remote hosts and delete temporary instances")
var buildOnly = flag.Bool("build-only", false, "If true, build e2e_node_test.tar.gz and exit.")
var computeService *compute.Service
type TestResult struct {
output string
err error
host string
}
func main() {
flag.Parse()
rand.Seed(time.Now().UTC().UnixNano())
if *buildOnly {
// Build the archive and exit
e2e_node.CreateTestArchive()
return
}
if *hosts == "" && *images == "" {
glog.Fatalf("Must specify one of --images or --hosts flag.")
}
if *images != "" && *zone == "" {
glog.Fatal("Must specify --zone flag")
}
if *images != "" && *project == "" {
glog.Fatal("Must specify --project flag")
}
if *instanceNamePrefix == "" {
*instanceNamePrefix = "tmp-node-e2e-" + uuid.NewUUID().String()[:8]
}
// Setup coloring
stat, _ := os.Stdout.Stat()
useColor := (stat.Mode() & os.ModeCharDevice) != 0
blue := ""
noColour := ""
if useColor {
blue = "\033[0;34m"
noColour = "\033[0m"
}
archive := e2e_node.CreateTestArchive()
defer os.Remove(archive)
results := make(chan *TestResult)
running := 0
if *images != "" {
// Setup the gce client for provisioning instances
// Getting credentials on gce jenkins is flaky, so try a couple times
var err error
for i := 0; i < 10; i++ {
var client *http.Client
client, err = google.DefaultClient(oauth2.NoContext, compute.ComputeScope)
if err != nil {
continue
}
computeService, err = compute.New(client)
if err != nil {
continue
}
time.Sleep(time.Second * 6)
}
if err != nil {
glog.Fatalf("Unable to create gcloud compute service using defaults. Make sure you are authenticated. %v", err)
}
for _, image := range strings.Split(*images, ",") {
running++
fmt.Printf("Initializing e2e tests using image %s.\n", image)
go func(image string) { results <- testImage(image, archive) }(image)
}
}
if *hosts != "" {
for _, host := range strings.Split(*hosts, ",") {
fmt.Printf("Initializing e2e tests using host %s.\n", host)
running++
go func(host string) {
results <- testHost(host, archive)
}(host)
}
}
// Wait for all tests to complete and emit the results
errCount := 0
for i := 0; i < running; i++ {
tr := <-results
host := tr.host
fmt.Printf("%s================================================================%s\n", blue, noColour)
if tr.err != nil {
errCount++
fmt.Printf("Failure Finished Host %s Test Suite\n%s\n%v\n", host, tr.output, tr.err)
} else {
fmt.Printf("Success Finished Host %s Test Suite\n%s\n", host, tr.output)
}
fmt.Printf("%s================================================================%s\n", blue, noColour)
}
// Set the exit code if there were failures
if errCount > 0 {
fmt.Printf("Failure: %d errors encountered.", errCount)
os.Exit(1)
}
}
// Run tests in archive against host
func testHost(host, archive string) *TestResult {
output, err := e2e_node.RunRemote(archive, host, *cleanup)
return &TestResult{
output: output,
err: err,
host: host,
}
}
// Provision a gce instance using image and run the tests in archive against the instance.
// Delete the instance afterward.
func testImage(image, archive string) *TestResult {
host, err := createInstance(image)
if *cleanup {
defer deleteInstance(image)
}
if err != nil {
return &TestResult{
err: fmt.Errorf("Unable to create gce instance with running docker daemon for image %s. %v", image, err),
}
}
return testHost(host, archive)
}
// Provision a gce instance using image
func createInstance(image string) (string, error) {
name := imageToInstanceName(image)
i := &compute.Instance{
Name: name,
MachineType: machineType(),
NetworkInterfaces: []*compute.NetworkInterface{
{
AccessConfigs: []*compute.AccessConfig{
{
Type: "ONE_TO_ONE_NAT",
Name: "External NAT",
},
}},
},
Disks: []*compute.AttachedDisk{
{
AutoDelete: true,
Boot: true,
Type: "PERSISTENT",
InitializeParams: &compute.AttachedDiskInitializeParams{
SourceImage: sourceImage(image),
},
},
},
}
op, err := computeService.Instances.Insert(*project, *zone, i).Do()
if err != nil {
return "", err
}
if op.Error != nil {
return "", fmt.Errorf("Could not create instance %s: %+v", name, op.Error)
}
instanceRunning := false
for i := 0; i < 30 && !instanceRunning; i++ {
if i > 0 {
time.Sleep(time.Second * 20)
}
var instance *compute.Instance
instance, err = computeService.Instances.Get(*project, *zone, name).Do()
if err != nil {
continue
}
if strings.ToUpper(instance.Status) != "RUNNING" {
err = fmt.Errorf("Instance %s not in state RUNNING, was %s.", name, instance.Status)
continue
}
var output string
output, err = e2e_node.RunSshCommand("ssh", name, "--", "sudo", "docker", "version")
if err != nil {
err = fmt.Errorf("Instance %s not running docker daemon - Command failed: %s", name, output)
continue
}
if !strings.Contains(output, "Server") {
err = fmt.Errorf("Instance %s not running docker daemon - Server not found: %s", name, output)
continue
}
instanceRunning = true
}
return name, err
}
func deleteInstance(image string) {
_, err := computeService.Instances.Delete(*project, *zone, imageToInstanceName(image)).Do()
if err != nil {
glog.Infof("Error deleting instance %s", imageToInstanceName(image))
}
}
func imageToInstanceName(image string) string {
return *instanceNamePrefix + "-" + image
}
func sourceImage(image string) string {
return fmt.Sprintf("projects/%s/global/images/%s", *project, image)
}
func machineType() string {
return fmt.Sprintf("zones/%s/machineTypes/n1-standard-1", *zone)
}
| test/e2e_node/runner/run_e2e.go | 1 | https://github.com/kubernetes/kubernetes/commit/5155df0287bd4e4a35dfc924750622243e574f84 | [
0.9464727640151978,
0.03800510987639427,
0.000159020273713395,
0.00022428584634326398,
0.1817343831062317
] |
{
"id": 4,
"code_window": [
"\t\t\terr: fmt.Errorf(\"Unable to create gce instance with running docker daemon for image %s. %v\", image, err),\n",
"\t\t}\n",
"\t}\n",
"\treturn testHost(host, archive)\n",
"}\n",
"\n",
"// Provision a gce instance using image\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testHost(host, archive, false)\n"
],
"file_path": "test/e2e_node/runner/run_e2e.go",
"type": "replace",
"edit_start_line_idx": 173
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package credentialprovider
import (
"sync"
"github.com/golang/glog"
)
// All registered credential providers.
var providersMutex sync.Mutex
var providers = make(map[string]DockerConfigProvider)
// RegisterCredentialProvider is called by provider implementations on
// initialization to register themselves, like so:
// func init() {
// RegisterCredentialProvider("name", &myProvider{...})
// }
func RegisterCredentialProvider(name string, provider DockerConfigProvider) {
providersMutex.Lock()
defer providersMutex.Unlock()
_, found := providers[name]
if found {
glog.Fatalf("Credential provider %q was registered twice", name)
}
glog.V(4).Infof("Registered credential provider %q", name)
providers[name] = provider
}
// NewDockerKeyring creates a DockerKeyring to use for resolving credentials,
// which lazily draws from the set of registered credential providers.
func NewDockerKeyring() DockerKeyring {
keyring := &lazyDockerKeyring{
Providers: make([]DockerConfigProvider, 0),
}
// TODO(mattmoor): iterating over the map is non-deterministic. We should
// introduce the notion of priorities for conflict resolution.
for name, provider := range providers {
if provider.Enabled() {
glog.V(4).Infof("Registering credential provider: %v", name)
keyring.Providers = append(keyring.Providers, provider)
}
}
return keyring
}
| pkg/credentialprovider/plugins.go | 0 | https://github.com/kubernetes/kubernetes/commit/5155df0287bd4e4a35dfc924750622243e574f84 | [
0.0001794502604752779,
0.0001736524427542463,
0.0001683420268818736,
0.00017226970521733165,
0.000004389348305267049
] |
{
"id": 4,
"code_window": [
"\t\t\terr: fmt.Errorf(\"Unable to create gce instance with running docker daemon for image %s. %v\", image, err),\n",
"\t\t}\n",
"\t}\n",
"\treturn testHost(host, archive)\n",
"}\n",
"\n",
"// Provision a gce instance using image\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testHost(host, archive, false)\n"
],
"file_path": "test/e2e_node/runner/run_e2e.go",
"type": "replace",
"edit_start_line_idx": 173
} | package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
| Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm.go | 0 | https://github.com/kubernetes/kubernetes/commit/5155df0287bd4e4a35dfc924750622243e574f84 | [
0.000172011845279485,
0.000172011845279485,
0.000172011845279485,
0.000172011845279485,
0
] |
{
"id": 4,
"code_window": [
"\t\t\terr: fmt.Errorf(\"Unable to create gce instance with running docker daemon for image %s. %v\", image, err),\n",
"\t\t}\n",
"\t}\n",
"\treturn testHost(host, archive)\n",
"}\n",
"\n",
"// Provision a gce instance using image\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testHost(host, archive, false)\n"
],
"file_path": "test/e2e_node/runner/run_e2e.go",
"type": "replace",
"edit_start_line_idx": 173
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
stderrs "errors"
"net/http"
"reflect"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned"
)
func TestErrorsToAPIStatus(t *testing.T) {
cases := map[error]unversioned.Status{
errors.NewNotFound(unversioned.GroupResource{Group: "legacy.kubernetes.io", Resource: "foos"}, "bar"): {
Status: unversioned.StatusFailure,
Code: http.StatusNotFound,
Reason: unversioned.StatusReasonNotFound,
Message: "foos.legacy.kubernetes.io \"bar\" not found",
Details: &unversioned.StatusDetails{
Group: "legacy.kubernetes.io",
Kind: "foos",
Name: "bar",
},
},
errors.NewAlreadyExists(api.Resource("foos"), "bar"): {
Status: unversioned.StatusFailure,
Code: http.StatusConflict,
Reason: "AlreadyExists",
Message: "foos \"bar\" already exists",
Details: &unversioned.StatusDetails{
Group: "",
Kind: "foos",
Name: "bar",
},
},
errors.NewConflict(api.Resource("foos"), "bar", stderrs.New("failure")): {
Status: unversioned.StatusFailure,
Code: http.StatusConflict,
Reason: "Conflict",
Message: "Operation cannot be fulfilled on foos \"bar\": failure",
Details: &unversioned.StatusDetails{
Group: "",
Kind: "foos",
Name: "bar",
},
},
}
for k, v := range cases {
actual := errToAPIStatus(k)
if !reflect.DeepEqual(actual, &v) {
t.Errorf("%s: Expected %#v, Got %#v", k, v, actual)
}
}
}
| pkg/apiserver/errors_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/5155df0287bd4e4a35dfc924750622243e574f84 | [
0.0001799224701244384,
0.0001753505930537358,
0.0001672149810474366,
0.00017517132801003754,
0.000003914974058716325
] |
{
"id": 0,
"code_window": [
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/security\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/server\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/sql\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/testutils\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/testutils/serverutils\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/leaktest\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 20
} | // Copyright 2021 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package kvtenantccl_test
import (
"context"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb"
"github.com/cockroachdb/redact"
"github.com/stretchr/testify/require"
)
// TestTenantTracesAreRedacted is an end-to-end version of
// `kvserver.TestMaybeRedactRecording`.
func TestTenantTracesAreRedacted(t *testing.T) {
defer leaktest.AfterTest(t)()
testutils.RunTrueAndFalse(t, "redactable", func(t *testing.T, redactable bool) {
testTenantTracesAreRedactedImpl(t, redactable)
})
}
const testStmt = "CREATE TABLE kv(k STRING PRIMARY KEY, v STRING)"
func testTenantTracesAreRedactedImpl(t *testing.T, redactable bool) {
defer log.Scope(t).Close(t)
ctx := context.Background()
const (
sensitiveString = "super-secret-stuff"
visibleString = "tenant-can-see-this"
)
recCh := make(chan tracingpb.Recording, 1)
args := base.TestServerArgs{
// Test hangs within a tenant. More investigation is required.
// Tracked with #76378.
DisableDefaultTestTenant: true,
Knobs: base.TestingKnobs{
Store: &kvserver.StoreTestingKnobs{
EvalKnobs: kvserverbase.BatchEvalTestingKnobs{
TestingEvalFilter: func(args kvserverbase.FilterArgs) *roachpb.Error {
log.Eventf(args.Ctx, "%v", sensitiveString)
log.Eventf(args.Ctx, "%v", redact.Safe(visibleString))
return nil
},
},
},
SQLExecutor: &sql.ExecutorTestingKnobs{
WithStatementTrace: func(trace tracingpb.Recording, stmt string) {
if stmt == testStmt {
recCh <- trace
}
},
},
},
}
s, db, _ := serverutils.StartServer(t, args)
if redactable {
runner := sqlutils.MakeSQLRunner(db)
runner.Exec(t, "SET CLUSTER SETTING trace.redactable.enabled = true")
}
defer db.Close()
defer s.Stopper().Stop(ctx)
// Queries from the system tenant will receive unredacted traces
// since the tracer will not have the redactable flag set.
t.Run("system-tenant", func(t *testing.T) {
runner := sqlutils.MakeSQLRunner(db)
runner.Exec(t, testStmt)
trace := <-recCh
require.NotEmpty(t, trace)
var found bool
for _, rs := range trace {
for _, s := range rs.Logs {
if strings.Contains(s.Msg().StripMarkers(), sensitiveString) {
found = true
}
}
}
require.True(t, found, "did not find '%q' in trace:\n%s",
sensitiveString, trace,
)
})
t.Run("regular-tenant", func(t *testing.T) {
_, tenDB := serverutils.StartTenant(t, s, base.TestTenantArgs{
TenantID: roachpb.MakeTenantID(security.EmbeddedTenantIDs()[0]),
TestingKnobs: args.Knobs,
})
defer tenDB.Close()
runner := sqlutils.MakeSQLRunner(tenDB)
runner.Exec(t, testStmt)
trace := <-recCh
require.NotEmpty(t, trace)
var found bool
var foundRedactedMarker bool
for _, rs := range trace {
for _, s := range rs.Logs {
if strings.Contains(s.Msg().StripMarkers(), sensitiveString) {
t.Fatalf(
"trace for tenant contained KV-level trace message '%q':\n%s",
sensitiveString, trace,
)
}
if strings.Contains(s.Msg().StripMarkers(), visibleString) {
found = true
}
if strings.Contains(s.Msg().StripMarkers(), string(server.TraceRedactedMarker)) {
foundRedactedMarker = true
}
}
}
// In both cases we don't expect to see the `TraceRedactedMarker`
// since that's only shown when the server is in an inconsistent
// state or if there's a version mismatch between client and server.
if redactable {
// If redaction was on, we expect the tenant to see safe information in its
// trace.
require.True(t, found, "did not see expected trace message '%q':\n%s",
visibleString, trace)
require.False(t, foundRedactedMarker, "unexpectedly found '%q':\n%s",
string(server.TraceRedactedMarker), trace)
} else {
// Otherwise, expect the opposite: not even safe information makes it through,
// because it gets replaced with foundRedactedMarker.
require.False(t, found, "unexpectedly saw message '%q':\n%s",
visibleString, trace)
require.False(t, foundRedactedMarker, "unexpectedly found '%q':\n%s",
string(server.TraceRedactedMarker), trace)
}
})
}
| pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.17077530920505524,
0.013932163827121258,
0.00016432719712611288,
0.00017147645121440291,
0.042225781828165054
] |
{
"id": 0,
"code_window": [
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/security\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/server\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/sql\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/testutils\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/testutils/serverutils\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/leaktest\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 20
} | // Code generated by generate-staticcheck; DO NOT EDIT.
//go:build bazel
// +build bazel
package sa1011
import (
util "github.com/cockroachdb/cockroach/pkg/testutils/lint/passes/staticcheck"
"golang.org/x/tools/go/analysis"
"honnef.co/go/tools/staticcheck"
)
var Analyzer *analysis.Analyzer
func init() {
for _, analyzer := range staticcheck.Analyzers {
if analyzer.Analyzer.Name == "SA1011" {
Analyzer = analyzer.Analyzer
break
}
}
util.MungeAnalyzer(Analyzer)
}
| build/bazelutil/staticcheckanalyzers/sa1011/analyzer.go | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.00019813468679785728,
0.00018157262820750475,
0.00017205005860887468,
0.00017453318287152797,
0.000011754926163121127
] |
{
"id": 0,
"code_window": [
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/security\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/server\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/sql\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/testutils\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/testutils/serverutils\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/leaktest\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 20
} | // Copyright 2022 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package cdctest
import (
"context"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangefeed"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/stretchr/testify/require"
)
// MakeRangeFeedValueReader starts rangefeed on the specified table and returns a function
// that returns the next *roachpb.RangeFeedValue from the table.
// This funciton is intended to be used in tests that wish to read low level roachpb.KeyValue(s).
// Instead of trying to generate KVs ourselves (subject to encoding restrictions, etc), it is
// simpler to just "INSERT ..." into the table, and then use this function to read next value.
func MakeRangeFeedValueReader(
t *testing.T, execCfgI interface{}, desc catalog.TableDescriptor,
) (func(t *testing.T) *roachpb.RangeFeedValue, func()) {
t.Helper()
execCfg := execCfgI.(sql.ExecutorConfig)
rows := make(chan *roachpb.RangeFeedValue)
ctx, cleanup := context.WithCancel(context.Background())
_, err := execCfg.RangeFeedFactory.RangeFeed(ctx, "feed-"+desc.GetName(),
[]roachpb.Span{desc.PrimaryIndexSpan(keys.SystemSQLCodec)},
execCfg.Clock.Now(),
func(ctx context.Context, value *roachpb.RangeFeedValue) {
select {
case <-ctx.Done():
case rows <- value:
}
},
rangefeed.WithDiff(true),
)
require.NoError(t, err)
var timeout = 5 * time.Second
if util.RaceEnabled {
timeout = 3 * timeout
}
// Helper to read next rangefeed value.
dups := make(map[string]struct{})
return func(t *testing.T) *roachpb.RangeFeedValue {
t.Helper()
for {
select {
case r := <-rows:
rowKey := r.Key.String() + r.Value.String()
if _, isDup := dups[rowKey]; isDup {
log.Infof(context.Background(), "Skip duplicate %s", roachpb.PrettyPrintKey(nil, r.Key))
continue
}
log.Infof(context.Background(), "Read row %s", roachpb.PrettyPrintKey(nil, r.Key))
dups[rowKey] = struct{}{}
return r
case <-time.After(timeout):
t.Fatal("timeout reading row")
return nil
}
}
}, cleanup
}
// GetHydratedTableDescriptor returns a table descriptor for the specified
// table. The descriptor is "hydrated" if it has user defined data types.
func GetHydratedTableDescriptor(
t *testing.T, execCfgI interface{}, parts ...tree.Name,
) (td catalog.TableDescriptor) {
t.Helper()
dbName, scName, tableName := func() (tree.Name, tree.Name, tree.Name) {
switch len(parts) {
case 1:
return "defaultdb", "public", parts[0]
case 2:
return parts[0], "public", parts[1]
case 3:
return parts[0], parts[1], parts[2]
default:
t.Fatal("invalid length")
return "", "", ""
}
}()
execCfg := execCfgI.(sql.ExecutorConfig)
var found bool
require.NoError(t, sql.DescsTxn(context.Background(), &execCfg,
func(ctx context.Context, txn *kv.Txn, col *descs.Collection) (err error) {
found, td, err = col.GetImmutableTableByName(ctx, txn,
tree.NewTableNameWithSchema(dbName, scName, tableName),
tree.ObjectLookupFlags{
CommonLookupFlags: tree.CommonLookupFlags{
Required: true,
AvoidLeased: true,
},
})
return err
}))
require.True(t, found)
return td
}
| pkg/ccl/changefeedccl/cdctest/row.go | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.009637542068958282,
0.0010934576857835054,
0.0001639695547055453,
0.00017320354527328163,
0.0026129288598895073
] |
{
"id": 0,
"code_window": [
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/security\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/server\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/sql\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/testutils\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/testutils/serverutils\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/leaktest\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 20
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// "make test" would normally test this file, but it should only be tested
// within docker compose.
//go:build compose
// +build compose
package compare
import (
"context"
"flag"
"os"
"path/filepath"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/cmd/cmpconn"
"github.com/cockroachdb/cockroach/pkg/internal/sqlsmith"
"github.com/cockroachdb/cockroach/pkg/sql/randgen"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/jackc/pgx/v4"
)
var (
flagEach = flag.Duration("each", 10*time.Minute, "individual test timeout")
flagArtifacts = flag.String("artifacts", "", "artifact directory")
)
func TestCompare(t *testing.T) {
uris := map[string]struct {
addr string
init []string
}{
"postgres": {
addr: "postgresql://postgres@postgres:5432/postgres",
init: []string{
"drop schema if exists public cascade",
"create schema public",
"CREATE EXTENSION IF NOT EXISTS postgis",
"CREATE EXTENSION IF NOT EXISTS postgis_topology",
"CREATE EXTENSION IF NOT EXISTS fuzzystrmatch;",
"CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\";",
"CREATE EXTENSION IF NOT EXISTS pg_trgm;",
},
},
"cockroach1": {
addr: "postgresql://root@cockroach1:26257/postgres?sslmode=disable",
init: []string{
"drop database if exists postgres",
"create database postgres",
},
},
"cockroach2": {
addr: "postgresql://root@cockroach2:26257/postgres?sslmode=disable",
init: []string{
"drop database if exists postgres",
"create database postgres",
},
},
}
configs := map[string]testConfig{
"postgres": {
setup: sqlsmith.Setups[sqlsmith.RandTableSetupName],
setupMutators: []randgen.Mutator{randgen.PostgresCreateTableMutator},
opts: []sqlsmith.SmitherOption{sqlsmith.PostgresMode()},
ignoreSQLErrors: true,
conns: []testConn{
{
name: "cockroach1",
mutators: []randgen.Mutator{},
},
{
name: "postgres",
mutators: []randgen.Mutator{randgen.PostgresMutator},
},
},
},
"mutators": {
setup: sqlsmith.Setups[sqlsmith.RandTableSetupName],
opts: []sqlsmith.SmitherOption{sqlsmith.CompareMode()},
ignoreSQLErrors: true,
conns: []testConn{
{
name: "cockroach1",
mutators: []randgen.Mutator{},
},
{
name: "cockroach2",
mutators: []randgen.Mutator{
randgen.StatisticsMutator,
randgen.ForeignKeyMutator,
randgen.ColumnFamilyMutator,
randgen.StatisticsMutator,
randgen.IndexStoringMutator,
randgen.PartialIndexMutator,
},
},
},
},
}
ctx := context.Background()
// docker-compose requires us to manually check for when a container
// is ready to receive connections.
// See https://docs.docker.com/compose/startup-order/
for name, uri := range uris {
t.Logf("Checking connection to: %s", name)
testutils.SucceedsSoon(t, func() error {
_, err := pgx.Connect(ctx, uri.addr)
return err
})
}
for confName, config := range configs {
t.Run(confName, func(t *testing.T) {
t.Logf("starting test: %s", confName)
rng, _ := randutil.NewTestRand()
setup := config.setup(rng)
for i := range setup {
setup[i], _ = randgen.ApplyString(rng, setup[i], config.setupMutators...)
}
conns := map[string]cmpconn.Conn{}
for _, testCn := range config.conns {
t.Logf("initializing connection: %s", testCn.name)
uri, ok := uris[testCn.name]
if !ok {
t.Fatalf("bad connection name: %s", testCn.name)
}
conn, err := cmpconn.NewConnWithMutators(ctx, uri.addr, rng, testCn.mutators)
if err != nil {
t.Fatal(err)
}
defer func(conn cmpconn.Conn) {
conn.Close(ctx)
}(conn)
for _, init := range uri.init {
if err := conn.Exec(ctx, init); err != nil {
t.Fatalf("%s: %v", testCn.name, err)
}
}
for i := range setup {
stmt, _ := randgen.ApplyString(rng, setup[i], testCn.mutators...)
if err := conn.Exec(ctx, stmt); err != nil {
t.Log(stmt)
t.Fatalf("%s: %v", testCn.name, err)
}
}
conns[testCn.name] = conn
}
smither, err := sqlsmith.NewSmither(conns[config.conns[0].name].DB(), rng, config.opts...)
if err != nil {
t.Fatal(err)
}
ignoredErrCount := 0
totalQueryCount := 0
until := time.After(*flagEach)
for {
select {
case <-until:
t.Logf("done with test. totalQueryCount=%d ignoredErrCount=%d test=%s",
totalQueryCount, ignoredErrCount, confName,
)
return
default:
}
query := smither.Generate()
if ignoredErr, err := cmpconn.CompareConns(
ctx, time.Second*30, conns, "" /* prep */, query, config.ignoreSQLErrors,
); err != nil {
path := filepath.Join(*flagArtifacts, confName+".log")
if err := os.WriteFile(path, []byte(err.Error()), 0666); err != nil {
t.Log(err)
}
t.Fatal(err)
} else if ignoredErr {
ignoredErrCount++
}
totalQueryCount++
// Make sure we can still ping on a connection. If we can't, we may have
// crashed something.
for name, conn := range conns {
if err := conn.Ping(ctx); err != nil {
t.Log(query)
t.Fatalf("%s: ping: %v", name, err)
}
}
}
})
}
}
type testConfig struct {
opts []sqlsmith.SmitherOption
conns []testConn
setup sqlsmith.Setup
setupMutators []randgen.Mutator
ignoreSQLErrors bool
}
type testConn struct {
name string
mutators []randgen.Mutator
}
| pkg/compose/compare/compare/compare_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.006905911490321159,
0.0004953103489242494,
0.00016431324183940887,
0.00016963883535936475,
0.0014016765635460615
] |
{
"id": 1,
"code_window": [
"\t\ttrace := <-recCh\n",
"\n",
"\t\trequire.NotEmpty(t, trace)\n",
"\t\tvar found bool\n",
"\t\tvar foundRedactedMarker bool\n",
"\t\tfor _, rs := range trace {\n",
"\t\t\tfor _, s := range rs.Logs {\n",
"\t\t\t\tif strings.Contains(s.Msg().StripMarkers(), sensitiveString) {\n",
"\t\t\t\t\tt.Fatalf(\n",
"\t\t\t\t\t\t\"trace for tenant contained KV-level trace message '%q':\\n%s\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 119
} | // Copyright 2021 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package kvtenantccl_test
import (
"context"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb"
"github.com/cockroachdb/redact"
"github.com/stretchr/testify/require"
)
// TestTenantTracesAreRedacted is an end-to-end version of
// `kvserver.TestMaybeRedactRecording`.
func TestTenantTracesAreRedacted(t *testing.T) {
defer leaktest.AfterTest(t)()
testutils.RunTrueAndFalse(t, "redactable", func(t *testing.T, redactable bool) {
testTenantTracesAreRedactedImpl(t, redactable)
})
}
const testStmt = "CREATE TABLE kv(k STRING PRIMARY KEY, v STRING)"
func testTenantTracesAreRedactedImpl(t *testing.T, redactable bool) {
defer log.Scope(t).Close(t)
ctx := context.Background()
const (
sensitiveString = "super-secret-stuff"
visibleString = "tenant-can-see-this"
)
recCh := make(chan tracingpb.Recording, 1)
args := base.TestServerArgs{
// Test hangs within a tenant. More investigation is required.
// Tracked with #76378.
DisableDefaultTestTenant: true,
Knobs: base.TestingKnobs{
Store: &kvserver.StoreTestingKnobs{
EvalKnobs: kvserverbase.BatchEvalTestingKnobs{
TestingEvalFilter: func(args kvserverbase.FilterArgs) *roachpb.Error {
log.Eventf(args.Ctx, "%v", sensitiveString)
log.Eventf(args.Ctx, "%v", redact.Safe(visibleString))
return nil
},
},
},
SQLExecutor: &sql.ExecutorTestingKnobs{
WithStatementTrace: func(trace tracingpb.Recording, stmt string) {
if stmt == testStmt {
recCh <- trace
}
},
},
},
}
s, db, _ := serverutils.StartServer(t, args)
if redactable {
runner := sqlutils.MakeSQLRunner(db)
runner.Exec(t, "SET CLUSTER SETTING trace.redactable.enabled = true")
}
defer db.Close()
defer s.Stopper().Stop(ctx)
// Queries from the system tenant will receive unredacted traces
// since the tracer will not have the redactable flag set.
t.Run("system-tenant", func(t *testing.T) {
runner := sqlutils.MakeSQLRunner(db)
runner.Exec(t, testStmt)
trace := <-recCh
require.NotEmpty(t, trace)
var found bool
for _, rs := range trace {
for _, s := range rs.Logs {
if strings.Contains(s.Msg().StripMarkers(), sensitiveString) {
found = true
}
}
}
require.True(t, found, "did not find '%q' in trace:\n%s",
sensitiveString, trace,
)
})
t.Run("regular-tenant", func(t *testing.T) {
_, tenDB := serverutils.StartTenant(t, s, base.TestTenantArgs{
TenantID: roachpb.MakeTenantID(security.EmbeddedTenantIDs()[0]),
TestingKnobs: args.Knobs,
})
defer tenDB.Close()
runner := sqlutils.MakeSQLRunner(tenDB)
runner.Exec(t, testStmt)
trace := <-recCh
require.NotEmpty(t, trace)
var found bool
var foundRedactedMarker bool
for _, rs := range trace {
for _, s := range rs.Logs {
if strings.Contains(s.Msg().StripMarkers(), sensitiveString) {
t.Fatalf(
"trace for tenant contained KV-level trace message '%q':\n%s",
sensitiveString, trace,
)
}
if strings.Contains(s.Msg().StripMarkers(), visibleString) {
found = true
}
if strings.Contains(s.Msg().StripMarkers(), string(server.TraceRedactedMarker)) {
foundRedactedMarker = true
}
}
}
// In both cases we don't expect to see the `TraceRedactedMarker`
// since that's only shown when the server is in an inconsistent
// state or if there's a version mismatch between client and server.
if redactable {
// If redaction was on, we expect the tenant to see safe information in its
// trace.
require.True(t, found, "did not see expected trace message '%q':\n%s",
visibleString, trace)
require.False(t, foundRedactedMarker, "unexpectedly found '%q':\n%s",
string(server.TraceRedactedMarker), trace)
} else {
// Otherwise, expect the opposite: not even safe information makes it through,
// because it gets replaced with foundRedactedMarker.
require.False(t, found, "unexpectedly saw message '%q':\n%s",
visibleString, trace)
require.False(t, foundRedactedMarker, "unexpectedly found '%q':\n%s",
string(server.TraceRedactedMarker), trace)
}
})
}
| pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.9990154504776001,
0.51058030128479,
0.00016899046022444963,
0.567754864692688,
0.48776543140411377
] |
{
"id": 1,
"code_window": [
"\t\ttrace := <-recCh\n",
"\n",
"\t\trequire.NotEmpty(t, trace)\n",
"\t\tvar found bool\n",
"\t\tvar foundRedactedMarker bool\n",
"\t\tfor _, rs := range trace {\n",
"\t\t\tfor _, s := range rs.Logs {\n",
"\t\t\t\tif strings.Contains(s.Msg().StripMarkers(), sensitiveString) {\n",
"\t\t\t\t\tt.Fatalf(\n",
"\t\t\t\t\t\t\"trace for tenant contained KV-level trace message '%q':\\n%s\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 119
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package gcjobnotifier
import (
"context"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/config"
"github.com/cockroachdb/cockroach/pkg/config/zonepb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/stretchr/testify/require"
)
type testingProvider struct {
syncutil.Mutex
cfg *config.SystemConfig
ch chan struct{}
}
func (t *testingProvider) GetSystemConfig() *config.SystemConfig {
t.Lock()
defer t.Unlock()
return t.cfg
}
func (t *testingProvider) setSystemConfig(cfg *config.SystemConfig) {
t.Lock()
defer t.Unlock()
t.cfg = cfg
}
func (t *testingProvider) RegisterSystemConfigChannel() (<-chan struct{}, func()) {
return t.ch, func() {}
}
var _ config.SystemConfigProvider = (*testingProvider)(nil)
func TestNotifier(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
settings := cluster.MakeTestingClusterSettings()
t.Run("start with stopped stopper leads to nil being returned", func(t *testing.T) {
stopper := stop.NewStopper()
stopper.Stop(ctx)
n := New(settings, &testingProvider{}, keys.SystemSQLCodec, stopper)
n.Start(ctx)
ch, _ := n.AddNotifyee(ctx)
require.Nil(t, ch)
})
stopper := stop.NewStopper()
defer stopper.Stop(ctx)
t.Run("panic on double start", func(t *testing.T) {
n := New(settings, &testingProvider{ch: make(chan struct{})}, keys.SystemSQLCodec, stopper)
n.Start(ctx)
require.Panics(t, func() {
n.Start(ctx)
})
})
t.Run("panic on AddNotifyee before start", func(t *testing.T) {
n := New(settings, &testingProvider{ch: make(chan struct{})}, keys.SystemSQLCodec, stopper)
require.Panics(t, func() {
n.AddNotifyee(ctx)
})
})
t.Run("safe to on AddNotifyee after start before config", func(t *testing.T) {
ch := make(chan struct{}, 1)
cfg := mkSystemConfig(mkZoneConfigKV(1, 1, "1"))
p := &testingProvider{ch: ch}
n := New(settings, p, keys.SystemSQLCodec, stopper)
n.Start(ctx)
n1Ch, cleanup := n.AddNotifyee(ctx)
defer cleanup()
select {
case <-time.After(10 * time.Millisecond):
case <-n1Ch:
t.Fatal("should not have gotten notified")
}
p.setSystemConfig(cfg)
ch <- struct{}{}
<-n1Ch
})
t.Run("notifies on changed delta and cleanup", func(t *testing.T) {
cfg := config.NewSystemConfig(zonepb.DefaultSystemZoneConfigRef())
cfg.Values = []roachpb.KeyValue{
mkZoneConfigKV(1, 1, "1"),
}
ch := make(chan struct{}, 1)
p := &testingProvider{
cfg: mkSystemConfig(mkZoneConfigKV(1, 1, "1")),
ch: ch,
}
n := New(settings, p, keys.SystemSQLCodec, stopper)
n.Start(ctx)
n1Ch, cleanup1 := n.AddNotifyee(ctx)
t.Run("don't receive on new notifyee", func(t *testing.T) {
expectNoSend(t, n1Ch)
})
t.Run("don't receive with no change", func(t *testing.T) {
ch <- struct{}{}
expectNoSend(t, n1Ch)
})
n2Ch, _ := n.AddNotifyee(ctx)
t.Run("receive from all notifyees when data does change", func(t *testing.T) {
p.setSystemConfig(mkSystemConfig(mkZoneConfigKV(1, 2, "2")))
ch <- struct{}{}
expectSend(t, n1Ch)
expectSend(t, n2Ch)
})
t.Run("don't receive after cleanup", func(t *testing.T) {
cleanup1()
p.setSystemConfig(mkSystemConfig(mkZoneConfigKV(1, 3, "3")))
ch <- struct{}{}
expectSend(t, n2Ch)
expectNoSend(t, n1Ch)
})
})
}
const (
// used for timeouts of things which should be fast
longTime = time.Second
// used for sanity check of channel sends which shouldn't happen
shortTime = 10 * time.Millisecond
)
func expectNoSend(t *testing.T, ch <-chan struct{}) {
t.Helper()
select {
case <-ch:
t.Fatal("did not expect to receive")
case <-time.After(shortTime):
}
}
func expectSend(t *testing.T, ch <-chan struct{}) {
t.Helper()
select {
case <-ch:
case <-time.After(longTime):
t.Fatal("expected to receive")
}
}
func mkZoneConfigKV(id descpb.ID, ts int64, value string) roachpb.KeyValue {
kv := roachpb.KeyValue{
Key: config.MakeZoneKey(keys.SystemSQLCodec, id),
Value: roachpb.Value{
Timestamp: hlc.Timestamp{WallTime: ts},
},
}
kv.Value.SetString(value)
return kv
}
func mkSystemConfig(kvs ...roachpb.KeyValue) *config.SystemConfig {
cfg := config.NewSystemConfig(zonepb.DefaultSystemZoneConfigRef())
cfg.Values = kvs
return cfg
}
| pkg/sql/gcjob/gcjobnotifier/notifier_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.0003822431026492268,
0.0001988531876122579,
0.00016418810992036015,
0.00017292308621108532,
0.0000663558894302696
] |
{
"id": 1,
"code_window": [
"\t\ttrace := <-recCh\n",
"\n",
"\t\trequire.NotEmpty(t, trace)\n",
"\t\tvar found bool\n",
"\t\tvar foundRedactedMarker bool\n",
"\t\tfor _, rs := range trace {\n",
"\t\t\tfor _, s := range rs.Logs {\n",
"\t\t\t\tif strings.Contains(s.Msg().StripMarkers(), sensitiveString) {\n",
"\t\t\t\t\tt.Fatalf(\n",
"\t\t\t\t\t\t\"trace for tenant contained KV-level trace message '%q':\\n%s\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 119
} | cancel_queries_stmt ::=
'CANCEL' 'QUERY' query_id
| 'CANCEL' 'QUERY' 'IF' 'EXISTS' query_id
| 'CANCEL' 'QUERIES' select_stmt
| 'CANCEL' 'QUERIES' 'IF' 'EXISTS' select_stmt
| docs/generated/sql/bnf/cancel_query.bnf | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.00017201283480972052,
0.00017201283480972052,
0.00017201283480972052,
0.00017201283480972052,
0
] |
{
"id": 1,
"code_window": [
"\t\ttrace := <-recCh\n",
"\n",
"\t\trequire.NotEmpty(t, trace)\n",
"\t\tvar found bool\n",
"\t\tvar foundRedactedMarker bool\n",
"\t\tfor _, rs := range trace {\n",
"\t\t\tfor _, s := range rs.Logs {\n",
"\t\t\t\tif strings.Contains(s.Msg().StripMarkers(), sensitiveString) {\n",
"\t\t\t\t\tt.Fatalf(\n",
"\t\t\t\t\t\t\"trace for tenant contained KV-level trace message '%q':\\n%s\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 119
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"fmt"
"io"
"strings"
"text/template"
"github.com/cockroachdb/cockroach/pkg/col/typeconv"
"github.com/cockroachdb/cockroach/pkg/sql/types"
)
type vecToDatumTmplInfo struct {
// TypeFamily contains the type family this struct is handling, with
// "types." prefix.
TypeFamily string
// Widths contains all of the type widths that this struct is handling.
// Note that the entry with 'anyWidth' width must be last in the slice.
Widths []vecToDatumWidthTmplInfo
}
type vecToDatumWidthTmplInfo struct {
CanonicalTypeFamily types.Family
Width int32
VecMethod string
// ConversionTmpl is a "format string" for the conversion template. It has
// the same "signature" as AssignConverted, meaning that it should use
// %[1]s for targetElem
// %[2]s for sourceElem
// %[3]s for datumAlloc.
ConversionTmpl string
}
// AssignConverted returns a string that performs a conversion of the element
// sourceElem and assigns the result to the newly declared targetElem.
// datumAlloc is the name of *tree.DatumAlloc struct that can be used to
// allocate new datums.
func (i vecToDatumWidthTmplInfo) AssignConverted(targetElem, sourceElem, datumAlloc string) string {
return fmt.Sprintf(i.ConversionTmpl, targetElem, sourceElem, datumAlloc)
}
// Sliceable returns whether the vector of i.CanonicalTypeFamily can be sliced
// (i.e. whether it is a Golang's slice).
func (i vecToDatumWidthTmplInfo) Sliceable() bool {
return sliceable(i.CanonicalTypeFamily)
}
// Remove unused warnings.
var _ = vecToDatumWidthTmplInfo{}.AssignConverted
var _ = vecToDatumWidthTmplInfo{}.Sliceable
// vecToDatumConversionTmpls maps the type families to the corresponding
// "format" strings (see comment above for details).
// Note that the strings are formatted this way so that generated code doesn't
// have empty lines.
var vecToDatumConversionTmpls = map[types.Family]string{
types.BoolFamily: `%[1]s := tree.MakeDBool(tree.DBool(%[2]s))`,
// Note that currently, regardless of the integer's width, we always return
// INT8, so there is a single conversion template for IntFamily.
types.IntFamily: `%[1]s := %[3]s.NewDInt(tree.DInt(%[2]s))`,
types.FloatFamily: `%[1]s := %[3]s.NewDFloat(tree.DFloat(%[2]s))`,
types.DecimalFamily: `%[1]s := %[3]s.NewDDecimal(tree.DDecimal{Decimal: %[2]s})`,
types.DateFamily: `%[1]s := %[3]s.NewDDate(tree.DDate{Date: pgdate.MakeCompatibleDateFromDisk(%[2]s)})`,
types.BytesFamily: `// Note that there is no need for a copy since DBytes uses a string
// as underlying storage, which will perform the copy for us.
%[1]s := %[3]s.NewDBytes(tree.DBytes(%[2]s))`,
types.EncodedKeyFamily: `// Note that there is no need for a copy since DEncodedKey uses a string
// as underlying storage, which will perform the copy for us.
%[1]s := %[3]s.NewDEncodedKey(tree.DEncodedKey(%[2]s))`,
types.JsonFamily: `
// The following operation deliberately copies the input JSON
// bytes, since FromEncoding is lazy and keeps a handle on the bytes
// it is passed in.
_bytes, _err := json.EncodeJSON(nil, %[2]s)
if _err != nil {
colexecerror.ExpectedError(_err)
}
var _j json.JSON
_j, _err = json.FromEncoding(_bytes)
if _err != nil {
colexecerror.ExpectedError(_err)
}
%[1]s := %[3]s.NewDJSON(tree.DJSON{JSON: _j})`,
types.UuidFamily: ` // Note that there is no need for a copy because uuid.FromBytes
// will perform a copy.
id, err := uuid.FromBytes(%[2]s)
if err != nil {
colexecerror.InternalError(err)
}
%[1]s := %[3]s.NewDUuid(tree.DUuid{UUID: id})`,
types.TimestampFamily: `%[1]s := %[3]s.NewDTimestamp(tree.DTimestamp{Time: %[2]s})`,
types.TimestampTZFamily: `%[1]s := %[3]s.NewDTimestampTZ(tree.DTimestampTZ{Time: %[2]s})`,
types.IntervalFamily: `%[1]s := %[3]s.NewDInterval(tree.DInterval{Duration: %[2]s})`,
typeconv.DatumVecCanonicalTypeFamily: `%[1]s := %[2]s.(tree.Datum)`,
}
const vecToDatumTmpl = "pkg/sql/colconv/vec_to_datum_tmpl.go"
func genVecToDatum(inputFileContents string, wr io.Writer) error {
r := strings.NewReplacer(
"_TYPE_FAMILY", "{{.TypeFamily}}",
"_TYPE_WIDTH", typeWidthReplacement,
"_VEC_METHOD", "{{.VecMethod}}",
)
s := r.Replace(inputFileContents)
assignConvertedRe := makeFunctionRegex("_ASSIGN_CONVERTED", 3)
s = assignConvertedRe.ReplaceAllString(s, makeTemplateFunctionCall("AssignConverted", 3))
tmpl, err := template.New("vec_to_datum").Funcs(template.FuncMap{"buildDict": buildDict}).Parse(s)
if err != nil {
return err
}
var tmplInfos []vecToDatumTmplInfo
// Note that String family is a special case that is handled separately by
// the template explicitly, so it is omitted from this slice.
optimizedTypeFamilies := []types.Family{
types.BoolFamily, types.IntFamily, types.FloatFamily, types.DecimalFamily,
types.DateFamily, types.BytesFamily, types.EncodedKeyFamily, types.JsonFamily,
types.UuidFamily, types.TimestampFamily, types.TimestampTZFamily, types.IntervalFamily,
}
for _, typeFamily := range optimizedTypeFamilies {
canonicalTypeFamily := typeconv.TypeFamilyToCanonicalTypeFamily(typeFamily)
tmplInfo := vecToDatumTmplInfo{TypeFamily: "types." + typeFamily.String()}
widths := supportedWidthsByCanonicalTypeFamily[canonicalTypeFamily]
if typeFamily != canonicalTypeFamily {
// We have a type family that is supported via another's physical
// representation (e.g. dates are the same as INT8s), so we
// override the widths to use only the default one.
widths = []int32{anyWidth}
}
for _, width := range widths {
tmplInfo.Widths = append(tmplInfo.Widths, vecToDatumWidthTmplInfo{
CanonicalTypeFamily: canonicalTypeFamily,
Width: width,
VecMethod: toVecMethod(canonicalTypeFamily, width),
ConversionTmpl: vecToDatumConversionTmpls[typeFamily],
})
}
tmplInfos = append(tmplInfos, tmplInfo)
}
// Datum-backed types require special handling.
tmplInfos = append(tmplInfos, vecToDatumTmplInfo{
// This special "type family" value will result in matching all type
// families that haven't been matched explicitly, i.e a code like this
// will get generated:
// switch typ.Family() {
// case <all types that have optimized physical representation>
// ...
// case typeconv.DatumVecCanonicalTypeFamily:
// default:
// <datum-vec conversion>
// }
// Such structure requires that datum-vec tmpl info is added last.
TypeFamily: "typeconv.DatumVecCanonicalTypeFamily: default",
Widths: []vecToDatumWidthTmplInfo{{
CanonicalTypeFamily: typeconv.DatumVecCanonicalTypeFamily,
Width: anyWidth,
VecMethod: toVecMethod(typeconv.DatumVecCanonicalTypeFamily, anyWidth),
ConversionTmpl: vecToDatumConversionTmpls[typeconv.DatumVecCanonicalTypeFamily],
}},
})
return tmpl.Execute(wr, tmplInfos)
}
func init() {
registerGenerator(genVecToDatum, "vec_to_datum.eg.go", vecToDatumTmpl)
}
| pkg/sql/colexec/execgen/cmd/execgen/vec_to_datum_gen.go | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.005338243208825588,
0.0005941330455243587,
0.00016503712686244398,
0.00017365353414788842,
0.0012096697464585304
] |
{
"id": 2,
"code_window": [
"\t\t\t\t}\n",
"\t\t\t\tif strings.Contains(s.Msg().StripMarkers(), visibleString) {\n",
"\t\t\t\t\tfound = true\n",
"\t\t\t\t}\n",
"\t\t\t\tif strings.Contains(s.Msg().StripMarkers(), string(server.TraceRedactedMarker)) {\n",
"\t\t\t\t\tfoundRedactedMarker = true\n",
"\t\t\t\t}\n",
"\t\t\t}\n",
"\t\t}\n",
"\n",
"\t\t// In both cases we don't expect to see the `TraceRedactedMarker`\n",
"\t\t// since that's only shown when the server is in an inconsistent\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 131
} | // Copyright 2021 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package kvtenantccl_test
import (
"context"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb"
"github.com/cockroachdb/redact"
"github.com/stretchr/testify/require"
)
// TestTenantTracesAreRedacted is an end-to-end version of
// `kvserver.TestMaybeRedactRecording`.
func TestTenantTracesAreRedacted(t *testing.T) {
defer leaktest.AfterTest(t)()
testutils.RunTrueAndFalse(t, "redactable", func(t *testing.T, redactable bool) {
testTenantTracesAreRedactedImpl(t, redactable)
})
}
const testStmt = "CREATE TABLE kv(k STRING PRIMARY KEY, v STRING)"
func testTenantTracesAreRedactedImpl(t *testing.T, redactable bool) {
defer log.Scope(t).Close(t)
ctx := context.Background()
const (
sensitiveString = "super-secret-stuff"
visibleString = "tenant-can-see-this"
)
recCh := make(chan tracingpb.Recording, 1)
args := base.TestServerArgs{
// Test hangs within a tenant. More investigation is required.
// Tracked with #76378.
DisableDefaultTestTenant: true,
Knobs: base.TestingKnobs{
Store: &kvserver.StoreTestingKnobs{
EvalKnobs: kvserverbase.BatchEvalTestingKnobs{
TestingEvalFilter: func(args kvserverbase.FilterArgs) *roachpb.Error {
log.Eventf(args.Ctx, "%v", sensitiveString)
log.Eventf(args.Ctx, "%v", redact.Safe(visibleString))
return nil
},
},
},
SQLExecutor: &sql.ExecutorTestingKnobs{
WithStatementTrace: func(trace tracingpb.Recording, stmt string) {
if stmt == testStmt {
recCh <- trace
}
},
},
},
}
s, db, _ := serverutils.StartServer(t, args)
if redactable {
runner := sqlutils.MakeSQLRunner(db)
runner.Exec(t, "SET CLUSTER SETTING trace.redactable.enabled = true")
}
defer db.Close()
defer s.Stopper().Stop(ctx)
// Queries from the system tenant will receive unredacted traces
// since the tracer will not have the redactable flag set.
t.Run("system-tenant", func(t *testing.T) {
runner := sqlutils.MakeSQLRunner(db)
runner.Exec(t, testStmt)
trace := <-recCh
require.NotEmpty(t, trace)
var found bool
for _, rs := range trace {
for _, s := range rs.Logs {
if strings.Contains(s.Msg().StripMarkers(), sensitiveString) {
found = true
}
}
}
require.True(t, found, "did not find '%q' in trace:\n%s",
sensitiveString, trace,
)
})
t.Run("regular-tenant", func(t *testing.T) {
_, tenDB := serverutils.StartTenant(t, s, base.TestTenantArgs{
TenantID: roachpb.MakeTenantID(security.EmbeddedTenantIDs()[0]),
TestingKnobs: args.Knobs,
})
defer tenDB.Close()
runner := sqlutils.MakeSQLRunner(tenDB)
runner.Exec(t, testStmt)
trace := <-recCh
require.NotEmpty(t, trace)
var found bool
var foundRedactedMarker bool
for _, rs := range trace {
for _, s := range rs.Logs {
if strings.Contains(s.Msg().StripMarkers(), sensitiveString) {
t.Fatalf(
"trace for tenant contained KV-level trace message '%q':\n%s",
sensitiveString, trace,
)
}
if strings.Contains(s.Msg().StripMarkers(), visibleString) {
found = true
}
if strings.Contains(s.Msg().StripMarkers(), string(server.TraceRedactedMarker)) {
foundRedactedMarker = true
}
}
}
// In both cases we don't expect to see the `TraceRedactedMarker`
// since that's only shown when the server is in an inconsistent
// state or if there's a version mismatch between client and server.
if redactable {
// If redaction was on, we expect the tenant to see safe information in its
// trace.
require.True(t, found, "did not see expected trace message '%q':\n%s",
visibleString, trace)
require.False(t, foundRedactedMarker, "unexpectedly found '%q':\n%s",
string(server.TraceRedactedMarker), trace)
} else {
// Otherwise, expect the opposite: not even safe information makes it through,
// because it gets replaced with foundRedactedMarker.
require.False(t, found, "unexpectedly saw message '%q':\n%s",
visibleString, trace)
require.False(t, foundRedactedMarker, "unexpectedly found '%q':\n%s",
string(server.TraceRedactedMarker), trace)
}
})
}
| pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.9965994954109192,
0.1735074371099472,
0.0001660542911849916,
0.0011923597194254398,
0.3609514832496643
] |
{
"id": 2,
"code_window": [
"\t\t\t\t}\n",
"\t\t\t\tif strings.Contains(s.Msg().StripMarkers(), visibleString) {\n",
"\t\t\t\t\tfound = true\n",
"\t\t\t\t}\n",
"\t\t\t\tif strings.Contains(s.Msg().StripMarkers(), string(server.TraceRedactedMarker)) {\n",
"\t\t\t\t\tfoundRedactedMarker = true\n",
"\t\t\t\t}\n",
"\t\t\t}\n",
"\t\t}\n",
"\n",
"\t\t// In both cases we don't expect to see the `TraceRedactedMarker`\n",
"\t\t// since that's only shown when the server is in an inconsistent\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 131
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import { assert } from "chai";
import {
DurationFitScale,
durationUnits,
BytesFitScale,
byteUnits,
HexStringToInt64String,
} from "./format";
describe("Format utils", () => {
describe("DurationFitScale", () => {
it("converts nanoseconds to provided units", () => {
// test zero values
assert.equal(DurationFitScale(durationUnits[0])(undefined), "0.00 ns");
assert.equal(DurationFitScale(durationUnits[0])(0), "0.00 ns");
// "ns", "µs", "ms", "s"
assert.equal(DurationFitScale(durationUnits[0])(32), "32.00 ns");
assert.equal(DurationFitScale(durationUnits[1])(32120), "32.12 µs");
assert.equal(DurationFitScale(durationUnits[2])(32122300), "32.12 ms");
assert.equal(DurationFitScale(durationUnits[3])(32122343000), "32.12 s");
});
});
describe("BytesFitScale", () => {
it("converts bytes to provided units", () => {
// test zero values
assert.equal(BytesFitScale(byteUnits[0])(undefined), "0.00 B");
assert.equal(BytesFitScale(byteUnits[0])(0), "0.00 B");
// "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"
assert.equal(BytesFitScale(byteUnits[0])(1), "1.00 B");
assert.equal(BytesFitScale(byteUnits[1])(10240), "10.00 KiB");
assert.equal(BytesFitScale(byteUnits[2])(12582912), "12.00 MiB");
assert.equal(BytesFitScale(byteUnits[3])(12884901888), "12.00 GiB");
assert.equal(BytesFitScale(byteUnits[4])(1.319414e13), "12.00 TiB");
assert.equal(BytesFitScale(byteUnits[5])(1.3510799e16), "12.00 PiB");
assert.equal(BytesFitScale(byteUnits[6])(1.3835058e19), "12.00 EiB");
assert.equal(BytesFitScale(byteUnits[7])(1.4167099e22), "12.00 ZiB");
assert.equal(BytesFitScale(byteUnits[8])(1.450711e25), "12.00 YiB");
});
});
describe("HexStringToInt64String", () => {
it("converts hex to int64", () => {
expect(HexStringToInt64String("af6ade04cbbc1c95")).toBe(
"12640159416348056725",
);
expect(HexStringToInt64String("fb9111f22f2213b7")).toBe(
"18127289707013477303",
);
});
});
});
| pkg/ui/workspaces/cluster-ui/src/util/format.spec.ts | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.00017826688417699188,
0.0001720298023428768,
0.0001674551167525351,
0.0001715664693620056,
0.0000034452816635166528
] |
{
"id": 2,
"code_window": [
"\t\t\t\t}\n",
"\t\t\t\tif strings.Contains(s.Msg().StripMarkers(), visibleString) {\n",
"\t\t\t\t\tfound = true\n",
"\t\t\t\t}\n",
"\t\t\t\tif strings.Contains(s.Msg().StripMarkers(), string(server.TraceRedactedMarker)) {\n",
"\t\t\t\t\tfoundRedactedMarker = true\n",
"\t\t\t\t}\n",
"\t\t\t}\n",
"\t\t}\n",
"\n",
"\t\t// In both cases we don't expect to see the `TraceRedactedMarker`\n",
"\t\t// since that's only shown when the server is in an inconsistent\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 131
} | [128 0 0 1 144 0 0 4 240 155 191 191] | pkg/util/json/testdata/encoded/string_nonCharacterInUTF-8_U+1FFFF.json.bytes | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.00016948393022175878,
0.00016948393022175878,
0.00016948393022175878,
0.00016948393022175878,
0
] |
{
"id": 2,
"code_window": [
"\t\t\t\t}\n",
"\t\t\t\tif strings.Contains(s.Msg().StripMarkers(), visibleString) {\n",
"\t\t\t\t\tfound = true\n",
"\t\t\t\t}\n",
"\t\t\t\tif strings.Contains(s.Msg().StripMarkers(), string(server.TraceRedactedMarker)) {\n",
"\t\t\t\t\tfoundRedactedMarker = true\n",
"\t\t\t\t}\n",
"\t\t\t}\n",
"\t\t}\n",
"\n",
"\t\t// In both cases we don't expect to see the `TraceRedactedMarker`\n",
"\t\t// since that's only shown when the server is in an inconsistent\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 131
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package ssmemstorage
import (
"context"
"time"
"unsafe"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/execstats"
"github.com/cockroachdb/cockroach/pkg/sql/sqlstats"
"github.com/cockroachdb/cockroach/pkg/sql/sqlstats/insights"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/errors"
)
var (
// ErrMemoryPressure is returned from the Container when we have reached
// the memory limit allowed.
ErrMemoryPressure = errors.New("insufficient sql stats memory")
// ErrFingerprintLimitReached is returned from the Container when we have
// more fingerprints than the limit specified in the cluster setting.
ErrFingerprintLimitReached = errors.New("sql stats fingerprint limit reached")
// ErrExecStatsFingerprintFlushed is returned from the Container when the
// stats object for the fingerprint has been flushed to system table before
// the roachpb.ExecStats can be recorded.
ErrExecStatsFingerprintFlushed = errors.New("stmtStats flushed before execution stats can be recorded")
)
var timestampSize = int64(unsafe.Sizeof(time.Time{}))
var _ sqlstats.Writer = &Container{}
func getStatus(statementError error) insights.Statement_Status {
if statementError == nil {
return insights.Statement_Completed
}
return insights.Statement_Failed
}
// RecordStatement implements sqlstats.Writer interface.
// RecordStatement saves per-statement statistics.
//
// samplePlanDescription can be nil, as these are only sampled periodically
// per unique fingerprint.
// RecordStatement always returns a valid stmtFingerprintID corresponding to the given
// stmt regardless of whether the statement is actually recorded or not.
//
// If the statement is not actually recorded due to either:
// 1. the memory budget has been exceeded
// 2. the unique statement fingerprint limit has been exceeded
// and error is being returned.
// Note: This error is only related to the operation of recording the statement
// statistics into in-memory structs. It is unrelated to the stmtErr in the
// arguments.
func (s *Container) RecordStatement(
ctx context.Context, key roachpb.StatementStatisticsKey, value sqlstats.RecordedStmtStats,
) (roachpb.StmtFingerprintID, error) {
createIfNonExistent := true
// If the statement is below the latency threshold, or stats aren't being
// recorded we don't need to create an entry in the stmts map for it. We do
// still need stmtFingerprintID for transaction level metrics tracking.
t := sqlstats.StatsCollectionLatencyThreshold.Get(&s.st.SV)
if !sqlstats.StmtStatsEnable.Get(&s.st.SV) || (t > 0 && t.Seconds() >= value.ServiceLatency) {
createIfNonExistent = false
}
// Get the statistics object.
stats, statementKey, stmtFingerprintID, created, throttled := s.getStatsForStmt(
key.Query,
key.ImplicitTxn,
key.Database,
key.Failed,
key.PlanHash,
key.TransactionFingerprintID,
createIfNonExistent,
)
// This means we have reached the limit of unique fingerprintstats. We don't
// record anything and abort the operation.
if throttled {
return stmtFingerprintID, ErrFingerprintLimitReached
}
// This statement was below the latency threshold or sql stats aren't being
// recorded. Either way, we don't need to record anything in the stats object
// for this statement, though we do need to return the statement fingerprint ID for
// transaction level metrics collection.
if !createIfNonExistent {
return stmtFingerprintID, nil
}
// Collect the per-statement statisticstats.
stats.mu.Lock()
defer stats.mu.Unlock()
stats.mu.data.Count++
if key.Failed {
stats.mu.data.SensitiveInfo.LastErr = value.StatementError.Error()
}
// Only update MostRecentPlanDescription if we sampled a new PlanDescription.
if value.Plan != nil {
stats.mu.data.SensitiveInfo.MostRecentPlanDescription = *value.Plan
stats.mu.data.SensitiveInfo.MostRecentPlanTimestamp = s.getTimeNow()
s.setLogicalPlanLastSampled(statementKey.sampledPlanKey, stats.mu.data.SensitiveInfo.MostRecentPlanTimestamp)
}
if value.AutoRetryCount == 0 {
stats.mu.data.FirstAttemptCount++
} else if int64(value.AutoRetryCount) > stats.mu.data.MaxRetries {
stats.mu.data.MaxRetries = int64(value.AutoRetryCount)
}
stats.mu.data.SQLType = value.StatementType.String()
stats.mu.data.NumRows.Record(stats.mu.data.Count, float64(value.RowsAffected))
stats.mu.data.ParseLat.Record(stats.mu.data.Count, value.ParseLatency)
stats.mu.data.PlanLat.Record(stats.mu.data.Count, value.PlanLatency)
stats.mu.data.RunLat.Record(stats.mu.data.Count, value.RunLatency)
stats.mu.data.ServiceLat.Record(stats.mu.data.Count, value.ServiceLatency)
stats.mu.data.OverheadLat.Record(stats.mu.data.Count, value.OverheadLatency)
stats.mu.data.BytesRead.Record(stats.mu.data.Count, float64(value.BytesRead))
stats.mu.data.RowsRead.Record(stats.mu.data.Count, float64(value.RowsRead))
stats.mu.data.RowsWritten.Record(stats.mu.data.Count, float64(value.RowsWritten))
stats.mu.data.LastExecTimestamp = s.getTimeNow()
stats.mu.data.Nodes = util.CombineUniqueInt64(stats.mu.data.Nodes, value.Nodes)
stats.mu.data.PlanGists = util.CombineUniqueString(stats.mu.data.PlanGists, []string{value.PlanGist})
stats.mu.data.IndexRecommendations = value.IndexRecommendations
// Note that some fields derived from tracing statements (such as
// BytesSentOverNetwork) are not updated here because they are collected
// on-demand.
// TODO(asubiotto): Record the aforementioned fields here when always-on
// tracing is a thing.
stats.mu.vectorized = key.Vec
stats.mu.distSQLUsed = key.DistSQL
stats.mu.fullScan = key.FullScan
stats.mu.database = key.Database
stats.mu.querySummary = key.QuerySummary
if created {
// stats size + stmtKey size + hash of the statementKey
estimatedMemoryAllocBytes := stats.sizeUnsafe() + statementKey.size() + 8
// We also accounts for the memory used for s.sampledPlanMetadataCache.
// timestamp size + key size + hash.
estimatedMemoryAllocBytes += timestampSize + statementKey.sampledPlanKey.size() + 8
s.mu.Lock()
defer s.mu.Unlock()
// If the monitor is nil, we do not track memory usage.
if s.mu.acc.Monitor() == nil {
return stats.ID, nil
}
// We attempt to account for all the memory we used. If we have exceeded our
// memory budget, delete the entry that we just created and report the error.
if err := s.mu.acc.Grow(ctx, estimatedMemoryAllocBytes); err != nil {
delete(s.mu.stmts, statementKey)
return stats.ID, ErrMemoryPressure
}
}
var autoRetryReason string
if value.AutoRetryReason != nil {
autoRetryReason = value.AutoRetryReason.Error()
}
var contention *time.Duration
if value.ExecStats != nil {
contention = &value.ExecStats.ContentionTime
}
s.insights.ObserveStatement(value.SessionID, &insights.Statement{
ID: value.StatementID,
FingerprintID: stmtFingerprintID,
LatencyInSeconds: value.ServiceLatency,
Query: value.Query,
Status: getStatus(value.StatementError),
StartTime: value.StartTime,
EndTime: value.EndTime,
FullScan: value.FullScan,
User: value.SessionData.User().Normalized(),
ApplicationName: value.SessionData.ApplicationName,
Database: value.SessionData.Database,
PlanGist: value.PlanGist,
Retries: int64(value.AutoRetryCount),
AutoRetryReason: autoRetryReason,
RowsRead: value.RowsRead,
RowsWritten: value.RowsWritten,
Nodes: value.Nodes,
Contention: contention,
IndexRecommendations: value.IndexRecommendations,
})
return stats.ID, nil
}
// RecordStatementExecStats implements sqlstats.Writer interface.
func (s *Container) RecordStatementExecStats(
key roachpb.StatementStatisticsKey, stats execstats.QueryLevelStats,
) error {
stmtStats, _, _, _, _ :=
s.getStatsForStmt(
key.Query,
key.ImplicitTxn,
key.Database,
key.Failed,
key.PlanHash,
key.TransactionFingerprintID,
false, /* createIfNotExists */
)
if stmtStats == nil {
return ErrExecStatsFingerprintFlushed
}
stmtStats.recordExecStats(stats)
return nil
}
// ShouldSaveLogicalPlanDesc implements sqlstats.Writer interface.
func (s *Container) ShouldSaveLogicalPlanDesc(
fingerprint string, implicitTxn bool, database string,
) bool {
lastSampled := s.getLogicalPlanLastSampled(sampledPlanKey{
stmtNoConstants: fingerprint,
implicitTxn: implicitTxn,
database: database,
})
return s.shouldSaveLogicalPlanDescription(lastSampled)
}
// RecordTransaction implements sqlstats.Writer interface and saves
// per-transaction statistics.
func (s *Container) RecordTransaction(
ctx context.Context, key roachpb.TransactionFingerprintID, value sqlstats.RecordedTxnStats,
) error {
s.recordTransactionHighLevelStats(value.TransactionTimeSec, value.Committed, value.ImplicitTxn)
if !sqlstats.TxnStatsEnable.Get(&s.st.SV) {
return nil
}
// Do not collect transaction statistics if the stats collection latency
// threshold is set, since our transaction UI relies on having stats for every
// statement in the transaction.
t := sqlstats.StatsCollectionLatencyThreshold.Get(&s.st.SV)
if t > 0 {
return nil
}
// Get the statistics object.
stats, created, throttled := s.getStatsForTxnWithKey(key, value.StatementFingerprintIDs, true /* createIfNonexistent */)
if throttled {
return ErrFingerprintLimitReached
}
// Collect the per-transaction statistics.
stats.mu.Lock()
defer stats.mu.Unlock()
// If we have created a new entry successfully, we check if we have reached
// the memory limit. If we have, then we delete the newly created entry and
// return the memory allocation error.
// If the entry is not created, this means we have reached the limit of unique
// fingerprints for this app. We also abort the operation and return an error.
if created {
estimatedMemAllocBytes :=
stats.sizeUnsafe() + key.Size() + 8 /* hash of transaction key */
s.mu.Lock()
// If the monitor is nil, we do not track memory usage.
if s.mu.acc.Monitor() != nil {
if err := s.mu.acc.Grow(ctx, estimatedMemAllocBytes); err != nil {
delete(s.mu.txns, key)
s.mu.Unlock()
return ErrMemoryPressure
}
}
s.mu.Unlock()
}
stats.mu.data.Count++
stats.mu.data.NumRows.Record(stats.mu.data.Count, float64(value.RowsAffected))
stats.mu.data.ServiceLat.Record(stats.mu.data.Count, value.ServiceLatency.Seconds())
stats.mu.data.RetryLat.Record(stats.mu.data.Count, value.RetryLatency.Seconds())
stats.mu.data.CommitLat.Record(stats.mu.data.Count, value.CommitLatency.Seconds())
if value.RetryCount > stats.mu.data.MaxRetries {
stats.mu.data.MaxRetries = value.RetryCount
}
stats.mu.data.RowsRead.Record(stats.mu.data.Count, float64(value.RowsRead))
stats.mu.data.RowsWritten.Record(stats.mu.data.Count, float64(value.RowsWritten))
stats.mu.data.BytesRead.Record(stats.mu.data.Count, float64(value.BytesRead))
if value.CollectedExecStats {
stats.mu.data.ExecStats.Count++
stats.mu.data.ExecStats.NetworkBytes.Record(stats.mu.data.ExecStats.Count, float64(value.ExecStats.NetworkBytesSent))
stats.mu.data.ExecStats.MaxMemUsage.Record(stats.mu.data.ExecStats.Count, float64(value.ExecStats.MaxMemUsage))
stats.mu.data.ExecStats.ContentionTime.Record(stats.mu.data.ExecStats.Count, value.ExecStats.ContentionTime.Seconds())
stats.mu.data.ExecStats.NetworkMessages.Record(stats.mu.data.ExecStats.Count, float64(value.ExecStats.NetworkMessages))
stats.mu.data.ExecStats.MaxDiskUsage.Record(stats.mu.data.ExecStats.Count, float64(value.ExecStats.MaxDiskUsage))
}
s.insights.ObserveTransaction(value.SessionID, &insights.Transaction{
ID: value.TransactionID,
FingerprintID: key,
UserPriority: value.Priority.String()})
return nil
}
func (s *Container) recordTransactionHighLevelStats(
transactionTimeSec float64, committed bool, implicit bool,
) {
if !sqlstats.TxnStatsEnable.Get(&s.st.SV) {
return
}
s.txnCounts.recordTransactionCounts(transactionTimeSec, committed, implicit)
}
| pkg/sql/sqlstats/ssmemstorage/ss_mem_writer.go | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.000574316072743386,
0.00018129432282876223,
0.00016237323870882392,
0.00016926736861933023,
0.00006957353616598994
] |
{
"id": 3,
"code_window": [
"\t\t\t// trace.\n",
"\t\t\trequire.True(t, found, \"did not see expected trace message '%q':\\n%s\",\n",
"\t\t\t\tvisibleString, trace)\n",
"\t\t\trequire.False(t, foundRedactedMarker, \"unexpectedly found '%q':\\n%s\",\n",
"\t\t\t\tstring(server.TraceRedactedMarker), trace)\n",
"\t\t} else {\n",
"\t\t\t// Otherwise, expect the opposite: not even safe information makes it through,\n",
"\t\t\t// because it gets replaced with foundRedactedMarker.\n",
"\t\t\trequire.False(t, found, \"unexpectedly saw message '%q':\\n%s\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 145
} | // Copyright 2021 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package kvtenantccl_test
import (
"context"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb"
"github.com/cockroachdb/redact"
"github.com/stretchr/testify/require"
)
// TestTenantTracesAreRedacted is an end-to-end version of
// `kvserver.TestMaybeRedactRecording`.
func TestTenantTracesAreRedacted(t *testing.T) {
defer leaktest.AfterTest(t)()
testutils.RunTrueAndFalse(t, "redactable", func(t *testing.T, redactable bool) {
testTenantTracesAreRedactedImpl(t, redactable)
})
}
const testStmt = "CREATE TABLE kv(k STRING PRIMARY KEY, v STRING)"
func testTenantTracesAreRedactedImpl(t *testing.T, redactable bool) {
defer log.Scope(t).Close(t)
ctx := context.Background()
const (
sensitiveString = "super-secret-stuff"
visibleString = "tenant-can-see-this"
)
recCh := make(chan tracingpb.Recording, 1)
args := base.TestServerArgs{
// Test hangs within a tenant. More investigation is required.
// Tracked with #76378.
DisableDefaultTestTenant: true,
Knobs: base.TestingKnobs{
Store: &kvserver.StoreTestingKnobs{
EvalKnobs: kvserverbase.BatchEvalTestingKnobs{
TestingEvalFilter: func(args kvserverbase.FilterArgs) *roachpb.Error {
log.Eventf(args.Ctx, "%v", sensitiveString)
log.Eventf(args.Ctx, "%v", redact.Safe(visibleString))
return nil
},
},
},
SQLExecutor: &sql.ExecutorTestingKnobs{
WithStatementTrace: func(trace tracingpb.Recording, stmt string) {
if stmt == testStmt {
recCh <- trace
}
},
},
},
}
s, db, _ := serverutils.StartServer(t, args)
if redactable {
runner := sqlutils.MakeSQLRunner(db)
runner.Exec(t, "SET CLUSTER SETTING trace.redactable.enabled = true")
}
defer db.Close()
defer s.Stopper().Stop(ctx)
// Queries from the system tenant will receive unredacted traces
// since the tracer will not have the redactable flag set.
t.Run("system-tenant", func(t *testing.T) {
runner := sqlutils.MakeSQLRunner(db)
runner.Exec(t, testStmt)
trace := <-recCh
require.NotEmpty(t, trace)
var found bool
for _, rs := range trace {
for _, s := range rs.Logs {
if strings.Contains(s.Msg().StripMarkers(), sensitiveString) {
found = true
}
}
}
require.True(t, found, "did not find '%q' in trace:\n%s",
sensitiveString, trace,
)
})
t.Run("regular-tenant", func(t *testing.T) {
_, tenDB := serverutils.StartTenant(t, s, base.TestTenantArgs{
TenantID: roachpb.MakeTenantID(security.EmbeddedTenantIDs()[0]),
TestingKnobs: args.Knobs,
})
defer tenDB.Close()
runner := sqlutils.MakeSQLRunner(tenDB)
runner.Exec(t, testStmt)
trace := <-recCh
require.NotEmpty(t, trace)
var found bool
var foundRedactedMarker bool
for _, rs := range trace {
for _, s := range rs.Logs {
if strings.Contains(s.Msg().StripMarkers(), sensitiveString) {
t.Fatalf(
"trace for tenant contained KV-level trace message '%q':\n%s",
sensitiveString, trace,
)
}
if strings.Contains(s.Msg().StripMarkers(), visibleString) {
found = true
}
if strings.Contains(s.Msg().StripMarkers(), string(server.TraceRedactedMarker)) {
foundRedactedMarker = true
}
}
}
// In both cases we don't expect to see the `TraceRedactedMarker`
// since that's only shown when the server is in an inconsistent
// state or if there's a version mismatch between client and server.
if redactable {
// If redaction was on, we expect the tenant to see safe information in its
// trace.
require.True(t, found, "did not see expected trace message '%q':\n%s",
visibleString, trace)
require.False(t, foundRedactedMarker, "unexpectedly found '%q':\n%s",
string(server.TraceRedactedMarker), trace)
} else {
// Otherwise, expect the opposite: not even safe information makes it through,
// because it gets replaced with foundRedactedMarker.
require.False(t, found, "unexpectedly saw message '%q':\n%s",
visibleString, trace)
require.False(t, foundRedactedMarker, "unexpectedly found '%q':\n%s",
string(server.TraceRedactedMarker), trace)
}
})
}
| pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.9978121519088745,
0.06851311028003693,
0.00016575500194448978,
0.0005143500166013837,
0.24050608277320862
] |
{
"id": 3,
"code_window": [
"\t\t\t// trace.\n",
"\t\t\trequire.True(t, found, \"did not see expected trace message '%q':\\n%s\",\n",
"\t\t\t\tvisibleString, trace)\n",
"\t\t\trequire.False(t, foundRedactedMarker, \"unexpectedly found '%q':\\n%s\",\n",
"\t\t\t\tstring(server.TraceRedactedMarker), trace)\n",
"\t\t} else {\n",
"\t\t\t// Otherwise, expect the opposite: not even safe information makes it through,\n",
"\t\t\t// because it gets replaced with foundRedactedMarker.\n",
"\t\t\trequire.False(t, found, \"unexpectedly saw message '%q':\\n%s\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 145
} | SELECT CAST('{"a": "b"}'::JSONB AS STRING)
| pkg/sql/sem/tree/testdata/pretty/21.sql | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.00016520700592081994,
0.00016520700592081994,
0.00016520700592081994,
0.00016520700592081994,
0
] |
{
"id": 3,
"code_window": [
"\t\t\t// trace.\n",
"\t\t\trequire.True(t, found, \"did not see expected trace message '%q':\\n%s\",\n",
"\t\t\t\tvisibleString, trace)\n",
"\t\t\trequire.False(t, foundRedactedMarker, \"unexpectedly found '%q':\\n%s\",\n",
"\t\t\t\tstring(server.TraceRedactedMarker), trace)\n",
"\t\t} else {\n",
"\t\t\t// Otherwise, expect the opposite: not even safe information makes it through,\n",
"\t\t\t// because it gets replaced with foundRedactedMarker.\n",
"\t\t\trequire.False(t, found, \"unexpectedly saw message '%q':\\n%s\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 145
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import React from "react";
import "./styles.styl";
interface IChipProps {
title: string;
type?: "green" | "lightgreen" | "grey" | "blue" | "lightblue" | "yellow";
}
export const Chip: React.SFC<IChipProps> = ({ title, type }) => (
<span className={`Chip Chip--${type}`}>{title}</span>
);
| pkg/ui/workspaces/db-console/src/views/app/components/chip/index.tsx | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.00017876406491268426,
0.0001741617452353239,
0.00016914153820835054,
0.0001745796180330217,
0.0000039394772102241404
] |
{
"id": 3,
"code_window": [
"\t\t\t// trace.\n",
"\t\t\trequire.True(t, found, \"did not see expected trace message '%q':\\n%s\",\n",
"\t\t\t\tvisibleString, trace)\n",
"\t\t\trequire.False(t, foundRedactedMarker, \"unexpectedly found '%q':\\n%s\",\n",
"\t\t\t\tstring(server.TraceRedactedMarker), trace)\n",
"\t\t} else {\n",
"\t\t\t// Otherwise, expect the opposite: not even safe information makes it through,\n",
"\t\t\t// because it gets replaced with foundRedactedMarker.\n",
"\t\t\trequire.False(t, found, \"unexpectedly saw message '%q':\\n%s\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 145
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tests
var asyncpgBlocklists = blocklistsForVersion{
{"v21.2", "asyncpgBlocklist21_2", asyncpgBlocklist21_2, "asyncpgIgnoreList21_2", asyncpgIgnoreList21_2},
{"v22.1", "asyncpgBlocklist22_1", asyncpgBlocklist22_1, "asyncpgIgnoreList22_1", asyncpgIgnoreList22_1},
{"v22.2", "asyncpgBlocklist22_2", asyncpgBlocklist22_2, "asyncpgIgnoreList22_2", asyncpgIgnoreList22_2},
}
// These are lists of known asyncpg test errors and failures.
// When the asyncpg test suite is run, the results are compared to this list.
// Any failed test that is on this list is reported as FAIL - expected.
// Any failed test that is not on this list is reported as FAIL - unexpected.
//
// Please keep these lists alphabetized for easy diffing.
// After a failed run, an updated version of this blocklist should be available
// in the test log.
var asyncpgBlocklist22_2 = asyncpgBlocklist22_1
var asyncpgBlocklist22_1 = blocklist{
"test_cache_invalidation.TestCacheInvalidation.test_prepare_cache_invalidation_in_pool": "unknown",
"test_cache_invalidation.TestCacheInvalidation.test_prepare_cache_invalidation_in_transaction": "unknown",
"test_cache_invalidation.TestCacheInvalidation.test_prepare_cache_invalidation_silent": "unknown",
"test_cache_invalidation.TestCacheInvalidation.test_prepared_type_cache_invalidation": "unknown",
"test_cache_invalidation.TestCacheInvalidation.test_type_cache_invalidation_in_cancelled_transaction": "unknown",
"test_cache_invalidation.TestCacheInvalidation.test_type_cache_invalidation_in_pool": "unknown",
"test_cache_invalidation.TestCacheInvalidation.test_type_cache_invalidation_in_transaction": "unknown",
"test_cache_invalidation.TestCacheInvalidation.test_type_cache_invalidation_on_change_attr": "unknown",
"test_cache_invalidation.TestCacheInvalidation.test_type_cache_invalidation_on_drop_type_attr": "unknown",
"test_codecs.TestCodecs.test_array_with_custom_json_text_codec": "unknown",
"test_codecs.TestCodecs.test_composites_in_arrays": "unknown",
"test_codecs.TestCodecs.test_enum": "unknown",
"test_codecs.TestCodecs.test_enum_and_range": "unknown",
"test_codecs.TestCodecs.test_enum_function_return": "unknown",
"test_codecs.TestCodecs.test_enum_in_array": "unknown",
"test_codecs.TestCodecs.test_enum_in_composite": "unknown",
"test_codecs.TestCodecs.test_invalid_input": "unknown",
"test_codecs.TestCodecs.test_relacl_array_type": "unknown",
"test_codecs.TestCodecs.test_timetz_encoding": "unknown",
"test_codecs.TestCodecs.test_unhandled_type_fallback": "unknown",
"test_codecs.TestCodecs.test_unknown_type_text_fallback": "unknown",
"test_codecs.TestCodecs.test_void": "unknown",
"test_connect.TestSettings.test_get_settings_01": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_basics": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_cancellation_explicit": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_cancellation_on_sink_error": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_cancellation_while_waiting_for_data": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_timeout_1": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_timeout_2": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_to_path": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_to_path_like": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_to_sink": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_with_args": "unknown",
"test_copy.TestCopyFrom.test_copy_from_table_basics": "unknown",
"test_copy.TestCopyFrom.test_copy_from_table_large_rows": "unknown",
"test_copy.TestCopyTo.test_copy_records_to_table_1": "unknown",
"test_copy.TestCopyTo.test_copy_records_to_table_async": "unknown",
"test_copy.TestCopyTo.test_copy_to_table_basics": "unknown",
"test_cursor.TestCursor.test_cursor_02": "unknown",
"test_cursor.TestCursor.test_cursor_04": "unknown",
"test_cursor.TestIterableCursor.test_cursor_iterable_02": "unknown",
"test_exceptions.TestExceptions.test_exceptions_str": "unknown",
"test_exceptions.TestExceptions.test_exceptions_unpacking": "unknown",
"test_execute.TestExecuteMany.test_executemany_client_failure_in_transaction": "unknown",
"test_execute.TestExecuteMany.test_executemany_client_server_failure_conflict": "unknown",
"test_execute.TestExecuteMany.test_executemany_prepare": "unknown",
"test_execute.TestExecuteMany.test_executemany_server_failure": "unknown",
"test_execute.TestExecuteMany.test_executemany_server_failure_after_writes": "unknown",
"test_execute.TestExecuteMany.test_executemany_server_failure_during_writes": "unknown",
"test_execute.TestExecuteMany.test_executemany_timeout": "unknown",
"test_execute.TestExecuteMany.test_executemany_timeout_flow_control": "unknown",
"test_execute.TestExecuteScript.test_execute_script_3": "unknown",
"test_execute.TestExecuteScript.test_execute_script_check_transactionality": "unknown",
"test_introspection.TestIntrospection.test_introspection_no_stmt_cache_01": "unknown",
"test_introspection.TestIntrospection.test_introspection_no_stmt_cache_02": "unknown",
"test_introspection.TestIntrospection.test_introspection_no_stmt_cache_03": "unknown",
"test_introspection.TestIntrospection.test_introspection_on_large_db": "unknown",
"test_introspection.TestIntrospection.test_introspection_retries_after_cache_bust": "unknown",
"test_introspection.TestIntrospection.test_introspection_sticks_for_ps": "unknown",
"test_listeners.TestListeners.test_listen_01": "unknown",
"test_listeners.TestListeners.test_listen_02": "unknown",
"test_listeners.TestListeners.test_listen_notletters": "unknown",
"test_listeners.TestLogListeners.test_log_listener_01": "unknown",
"test_listeners.TestLogListeners.test_log_listener_02": "unknown",
"test_listeners.TestLogListeners.test_log_listener_03": "unknown",
"test_pool.TestPool.test_pool_remote_close": "unknown",
"test_prepare.TestPrepare.test_prepare_08_big_result": "unknown",
"test_prepare.TestPrepare.test_prepare_09_raise_error": "unknown",
"test_prepare.TestPrepare.test_prepare_14_explain": "unknown",
"test_prepare.TestPrepare.test_prepare_16_command_result": "unknown",
"test_prepare.TestPrepare.test_prepare_19_concurrent_calls": "unknown",
"test_prepare.TestPrepare.test_prepare_22_empty": "unknown",
"test_prepare.TestPrepare.test_prepare_28_max_args": "unknown",
"test_prepare.TestPrepare.test_prepare_31_pgbouncer_note": "unknown",
"test_prepare.TestPrepare.test_prepare_statement_invalid": "unknown",
"test_record.TestRecord.test_record_subclass_01": "unknown",
"test_record.TestRecord.test_record_subclass_02": "unknown",
"test_record.TestRecord.test_record_subclass_03": "unknown",
"test_record.TestRecord.test_record_subclass_04": "unknown",
"test_utils.TestUtils.test_mogrify_multiple": "unknown",
"test_utils.TestUtils.test_mogrify_simple": "unknown",
}
var asyncpgBlocklist21_2 = blocklist{
"test_cache_invalidation.TestCacheInvalidation.test_prepare_cache_invalidation_in_pool": "unknown",
"test_cache_invalidation.TestCacheInvalidation.test_prepare_cache_invalidation_in_transaction": "unknown",
"test_cache_invalidation.TestCacheInvalidation.test_prepare_cache_invalidation_silent": "unknown",
"test_cache_invalidation.TestCacheInvalidation.test_prepared_type_cache_invalidation": "unknown",
"test_cache_invalidation.TestCacheInvalidation.test_type_cache_invalidation_in_cancelled_transaction": "unknown",
"test_cache_invalidation.TestCacheInvalidation.test_type_cache_invalidation_in_pool": "unknown",
"test_cache_invalidation.TestCacheInvalidation.test_type_cache_invalidation_in_transaction": "unknown",
"test_cache_invalidation.TestCacheInvalidation.test_type_cache_invalidation_on_change_attr": "unknown",
"test_cache_invalidation.TestCacheInvalidation.test_type_cache_invalidation_on_drop_type_attr": "unknown",
"test_cancellation.TestCancellation.test_cancellation_03": "unknown",
"test_codecs.TestCodecs.test_array_with_custom_json_text_codec": "unknown",
"test_codecs.TestCodecs.test_composites_in_arrays": "unknown",
"test_codecs.TestCodecs.test_enum": "unknown",
"test_codecs.TestCodecs.test_enum_and_range": "unknown",
"test_codecs.TestCodecs.test_enum_function_return": "unknown",
"test_codecs.TestCodecs.test_enum_in_array": "unknown",
"test_codecs.TestCodecs.test_enum_in_composite": "unknown",
"test_codecs.TestCodecs.test_invalid_input": "unknown",
"test_codecs.TestCodecs.test_relacl_array_type": "unknown",
"test_codecs.TestCodecs.test_timetz_encoding": "unknown",
"test_codecs.TestCodecs.test_unhandled_type_fallback": "unknown",
"test_codecs.TestCodecs.test_unknown_type_text_fallback": "unknown",
"test_codecs.TestCodecs.test_void": "unknown",
"test_connect.TestSettings.test_get_settings_01": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_basics": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_cancellation_explicit": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_cancellation_on_sink_error": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_cancellation_while_waiting_for_data": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_timeout_1": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_timeout_2": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_to_path": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_to_path_like": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_to_sink": "unknown",
"test_copy.TestCopyFrom.test_copy_from_query_with_args": "unknown",
"test_copy.TestCopyFrom.test_copy_from_table_basics": "unknown",
"test_copy.TestCopyFrom.test_copy_from_table_large_rows": "unknown",
"test_copy.TestCopyTo.test_copy_records_to_table_1": "unknown",
"test_copy.TestCopyTo.test_copy_records_to_table_async": "unknown",
"test_copy.TestCopyTo.test_copy_to_table_basics": "unknown",
"test_cursor.TestCursor.test_cursor_02": "unknown",
"test_cursor.TestCursor.test_cursor_04": "unknown",
"test_cursor.TestIterableCursor.test_cursor_iterable_02": "unknown",
"test_exceptions.TestExceptions.test_exceptions_str": "unknown",
"test_exceptions.TestExceptions.test_exceptions_unpacking": "unknown",
"test_execute.TestExecuteMany.test_executemany_client_failure_after_writes": "unknown",
"test_execute.TestExecuteMany.test_executemany_client_failure_in_transaction": "unknown",
"test_execute.TestExecuteMany.test_executemany_client_server_failure_conflict": "unknown",
"test_execute.TestExecuteMany.test_executemany_prepare": "unknown",
"test_execute.TestExecuteMany.test_executemany_server_failure": "unknown",
"test_execute.TestExecuteMany.test_executemany_server_failure_after_writes": "unknown",
"test_execute.TestExecuteMany.test_executemany_server_failure_during_writes": "unknown",
"test_execute.TestExecuteMany.test_executemany_timeout": "unknown",
"test_execute.TestExecuteMany.test_executemany_timeout_flow_control": "unknown",
"test_execute.TestExecuteScript.test_execute_script_3": "unknown",
"test_execute.TestExecuteScript.test_execute_script_check_transactionality": "unknown",
"test_execute.TestExecuteScript.test_execute_script_interrupted_close": "unknown",
"test_introspection.TestIntrospection.test_introspection_no_stmt_cache_01": "unknown",
"test_introspection.TestIntrospection.test_introspection_no_stmt_cache_02": "unknown",
"test_introspection.TestIntrospection.test_introspection_no_stmt_cache_03": "unknown",
"test_introspection.TestIntrospection.test_introspection_on_large_db": "unknown",
"test_introspection.TestIntrospection.test_introspection_retries_after_cache_bust": "unknown",
"test_introspection.TestIntrospection.test_introspection_sticks_for_ps": "unknown",
"test_listeners.TestListeners.test_listen_01": "unknown",
"test_listeners.TestListeners.test_listen_02": "unknown",
"test_listeners.TestListeners.test_listen_notletters": "unknown",
"test_listeners.TestLogListeners.test_log_listener_01": "unknown",
"test_listeners.TestLogListeners.test_log_listener_02": "unknown",
"test_listeners.TestLogListeners.test_log_listener_03": "unknown",
"test_pool.TestPool.test_pool_handles_query_cancel_in_release": "unknown",
"test_pool.TestPool.test_pool_handles_task_cancel_in_release": "unknown",
"test_pool.TestPool.test_pool_init_race": "unknown",
"test_pool.TestPool.test_pool_init_run_until_complete": "unknown",
"test_pool.TestPool.test_pool_remote_close": "unknown",
"test_prepare.TestPrepare.test_prepare_06_interrupted_close": "unknown",
"test_prepare.TestPrepare.test_prepare_08_big_result": "unknown",
"test_prepare.TestPrepare.test_prepare_09_raise_error": "unknown",
"test_prepare.TestPrepare.test_prepare_14_explain": "unknown",
"test_prepare.TestPrepare.test_prepare_16_command_result": "unknown",
"test_prepare.TestPrepare.test_prepare_19_concurrent_calls": "unknown",
"test_prepare.TestPrepare.test_prepare_22_empty": "unknown",
"test_prepare.TestPrepare.test_prepare_28_max_args": "unknown",
"test_prepare.TestPrepare.test_prepare_31_pgbouncer_note": "unknown",
"test_prepare.TestPrepare.test_prepare_statement_invalid": "unknown",
"test_record.TestRecord.test_record_subclass_01": "unknown",
"test_record.TestRecord.test_record_subclass_02": "unknown",
"test_record.TestRecord.test_record_subclass_03": "unknown",
"test_record.TestRecord.test_record_subclass_04": "unknown",
"test_timeout.TestConnectionCommandTimeout.test_command_timeout_01": "unknown",
"test_timeout.TestTimeout.test_timeout_04": "unknown",
"test_timeout.TestTimeout.test_timeout_06": "unknown",
"test_utils.TestUtils.test_mogrify_multiple": "unknown",
"test_utils.TestUtils.test_mogrify_simple": "unknown",
}
var asyncpgIgnoreList22_2 = asyncpgIgnoreList22_1
var asyncpgIgnoreList22_1 = asyncpgIgnoreList21_2
var asyncpgIgnoreList21_2 = blocklist{}
| pkg/cmd/roachtest/tests/asyncpg_blocklist.go | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.0001790593669284135,
0.00016844368656165898,
0.00016353953105863184,
0.00016806999337859452,
0.000003372206947460654
] |
{
"id": 4,
"code_window": [
"\t\t\t// Otherwise, expect the opposite: not even safe information makes it through,\n",
"\t\t\t// because it gets replaced with foundRedactedMarker.\n",
"\t\t\trequire.False(t, found, \"unexpectedly saw message '%q':\\n%s\",\n",
"\t\t\t\tvisibleString, trace)\n",
"\t\t\trequire.False(t, foundRedactedMarker, \"unexpectedly found '%q':\\n%s\",\n",
"\t\t\t\tstring(server.TraceRedactedMarker), trace)\n",
"\t\t}\n",
"\t})\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 152
} | // Copyright 2021 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package kvtenantccl_test
import (
"context"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb"
"github.com/cockroachdb/redact"
"github.com/stretchr/testify/require"
)
// TestTenantTracesAreRedacted is an end-to-end version of
// `kvserver.TestMaybeRedactRecording`.
func TestTenantTracesAreRedacted(t *testing.T) {
defer leaktest.AfterTest(t)()
testutils.RunTrueAndFalse(t, "redactable", func(t *testing.T, redactable bool) {
testTenantTracesAreRedactedImpl(t, redactable)
})
}
const testStmt = "CREATE TABLE kv(k STRING PRIMARY KEY, v STRING)"
func testTenantTracesAreRedactedImpl(t *testing.T, redactable bool) {
defer log.Scope(t).Close(t)
ctx := context.Background()
const (
sensitiveString = "super-secret-stuff"
visibleString = "tenant-can-see-this"
)
recCh := make(chan tracingpb.Recording, 1)
args := base.TestServerArgs{
// Test hangs within a tenant. More investigation is required.
// Tracked with #76378.
DisableDefaultTestTenant: true,
Knobs: base.TestingKnobs{
Store: &kvserver.StoreTestingKnobs{
EvalKnobs: kvserverbase.BatchEvalTestingKnobs{
TestingEvalFilter: func(args kvserverbase.FilterArgs) *roachpb.Error {
log.Eventf(args.Ctx, "%v", sensitiveString)
log.Eventf(args.Ctx, "%v", redact.Safe(visibleString))
return nil
},
},
},
SQLExecutor: &sql.ExecutorTestingKnobs{
WithStatementTrace: func(trace tracingpb.Recording, stmt string) {
if stmt == testStmt {
recCh <- trace
}
},
},
},
}
s, db, _ := serverutils.StartServer(t, args)
if redactable {
runner := sqlutils.MakeSQLRunner(db)
runner.Exec(t, "SET CLUSTER SETTING trace.redactable.enabled = true")
}
defer db.Close()
defer s.Stopper().Stop(ctx)
// Queries from the system tenant will receive unredacted traces
// since the tracer will not have the redactable flag set.
t.Run("system-tenant", func(t *testing.T) {
runner := sqlutils.MakeSQLRunner(db)
runner.Exec(t, testStmt)
trace := <-recCh
require.NotEmpty(t, trace)
var found bool
for _, rs := range trace {
for _, s := range rs.Logs {
if strings.Contains(s.Msg().StripMarkers(), sensitiveString) {
found = true
}
}
}
require.True(t, found, "did not find '%q' in trace:\n%s",
sensitiveString, trace,
)
})
t.Run("regular-tenant", func(t *testing.T) {
_, tenDB := serverutils.StartTenant(t, s, base.TestTenantArgs{
TenantID: roachpb.MakeTenantID(security.EmbeddedTenantIDs()[0]),
TestingKnobs: args.Knobs,
})
defer tenDB.Close()
runner := sqlutils.MakeSQLRunner(tenDB)
runner.Exec(t, testStmt)
trace := <-recCh
require.NotEmpty(t, trace)
var found bool
var foundRedactedMarker bool
for _, rs := range trace {
for _, s := range rs.Logs {
if strings.Contains(s.Msg().StripMarkers(), sensitiveString) {
t.Fatalf(
"trace for tenant contained KV-level trace message '%q':\n%s",
sensitiveString, trace,
)
}
if strings.Contains(s.Msg().StripMarkers(), visibleString) {
found = true
}
if strings.Contains(s.Msg().StripMarkers(), string(server.TraceRedactedMarker)) {
foundRedactedMarker = true
}
}
}
// In both cases we don't expect to see the `TraceRedactedMarker`
// since that's only shown when the server is in an inconsistent
// state or if there's a version mismatch between client and server.
if redactable {
// If redaction was on, we expect the tenant to see safe information in its
// trace.
require.True(t, found, "did not see expected trace message '%q':\n%s",
visibleString, trace)
require.False(t, foundRedactedMarker, "unexpectedly found '%q':\n%s",
string(server.TraceRedactedMarker), trace)
} else {
// Otherwise, expect the opposite: not even safe information makes it through,
// because it gets replaced with foundRedactedMarker.
require.False(t, found, "unexpectedly saw message '%q':\n%s",
visibleString, trace)
require.False(t, foundRedactedMarker, "unexpectedly found '%q':\n%s",
string(server.TraceRedactedMarker), trace)
}
})
}
| pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.9948081374168396,
0.08538738638162613,
0.0001640565023990348,
0.0005225210334174335,
0.24828964471817017
] |
{
"id": 4,
"code_window": [
"\t\t\t// Otherwise, expect the opposite: not even safe information makes it through,\n",
"\t\t\t// because it gets replaced with foundRedactedMarker.\n",
"\t\t\trequire.False(t, found, \"unexpectedly saw message '%q':\\n%s\",\n",
"\t\t\t\tvisibleString, trace)\n",
"\t\t\trequire.False(t, foundRedactedMarker, \"unexpectedly found '%q':\\n%s\",\n",
"\t\t\t\tstring(server.TraceRedactedMarker), trace)\n",
"\t\t}\n",
"\t})\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 152
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
/* global module, __dirname, process */
/* eslint-disable @typescript-eslint/no-var-requires */
const path = require("path");
const { pathsToModuleNameMapper } = require("ts-jest");
const { compilerOptions } = require("./tsconfig.json");
const isBazel = !!process.env.BAZEL_TARGET;
const bazelOnlySettings = {
haste: {
// Platforms that include a POSIX-compatible `find` binary default to using it for test file
// discovery, but jest-haste-map's invocation of `find` doesn't include `-L` when node was
// started with `--preserve-symlinks`. This causes Jest to be unable to find test files when run
// via Bazel, which uses readonly symlinks for its build sandbox and launches node with
// `--presrve-symlinks`. Use jest's pure-node implementation instead, which respects
// `--preserve-symlinks`.
forceNodeFilesystemAPI: true,
enableSymlinks: true,
},
watchman: false,
};
/*
* For a detailed explanation regarding each configuration property, visit:
* https://jestjs.io/docs/configuration
*/
module.exports = {
// All imported modules in your tests should be mocked automatically
// automock: false,
// Stop running tests after `n` failures
// bail: 0,
// The directory where Jest should store its cached dependency information
// cacheDirectory: "/private/var/folders/hs/c4079dx544nfw1z_951l0nc00000gq/T/jest_dz",
// Automatically clear mock calls, instances, contexts and results before every test
clearMocks: true,
// Indicates whether the coverage information should be collected while executing the test
// collectCoverage: false,
// An array of glob patterns indicating a set of files for which coverage information should be collected
// collectCoverageFrom: undefined,
// The directory where Jest should output its coverage files
// coverageDirectory: undefined,
// An array of regexp pattern strings used to skip coverage collection
// coveragePathIgnorePatterns: [
// "/node_modules/"
// ],
// Indicates which provider should be used to instrument code for coverage
coverageProvider: "v8",
// A list of reporter names that Jest uses when writing coverage reports
// coverageReporters: [
// "json",
// "text",
// "lcov",
// "clover"
// ],
// An object that configures minimum threshold enforcement for coverage results
// coverageThreshold: undefined,
// A path to a custom dependency extractor
// dependencyExtractor: undefined,
// Make calling deprecated APIs throw helpful error messages
// errorOnDeprecated: false,
// The default configuration for fake timers
// fakeTimers: {
// "enableGlobally": false
// },
// Force coverage collection from ignored files using an array of glob patterns
// forceCoverageMatch: [],
// A path to a module which exports an async function that is triggered once before all test suites
// globalSetup: undefined,
// A path to a module which exports an async function that is triggered once after all test suites
// globalTeardown: undefined,
// A set of global variables that need to be available in all test environments
globals: {
"ts-jest": {
tsconfig: path.join(__dirname, "./tsconfig.linting.json"),
},
},
// The maximum amount of workers used to run your tests. Can be specified as % or a number. E.g. maxWorkers: 10% will use 10% of your CPU amount + 1 as the maximum worker number. maxWorkers: 2 will use a maximum of 2 workers.
// maxWorkers: "50%",
// An array of directory names to be searched recursively up from the requiring module's location
moduleDirectories: ["node_modules"],
// An array of file extensions your modules use
moduleFileExtensions: ["ts", "tsx", "js", "jsx", "json", "node"],
// A map from regular expressions to module names or to arrays of module names that allow to stub out resources with a single module
moduleNameMapper: Object.assign(
{},
pathsToModuleNameMapper(
// The TypeScript compiler needs to know how to find Bazel-produced .d.ts
// files, but those overrides break Jest's module loader. Remove the
// @cockroachlabs entries from tsconfig.json's 'paths' object.
Object.fromEntries(
Object.entries(compilerOptions.paths).filter(([name, _paths]) => !name.includes("@cockroachlabs"))
), { prefix: "<rootDir>/" }),
{
"\\.(jpg|ico|jpeg|eot|otf|webp|ttf|woff|woff2|mp4|webm|wav|mp3|m4a|aac|oga|gif|png|svg)$":
"<rootDir>/src/test-utils/file.mock.js",
"\\.(css|scss|less|styl)$": "identity-obj-proxy",
"^react($|/.+)": "<rootDir>/node_modules/react$1",
},
),
// An alternative API to setting the NODE_PATH env variable, modulePaths is an array of absolute paths to additional locations to search when resolving modules.
modulePaths: ["<rootDir>/", ...module.paths],
// An array of regexp pattern strings, matched against all module paths before considered 'visible' to the module loader
// modulePathIgnorePatterns: [],
// Activates notifications for test results
// notify: false,
// An enum that specifies notification mode. Requires { notify: true }
// notifyMode: "failure-change",
// A preset that is used as a base for Jest's configuration
preset: "ts-jest",
// Run tests from one or more projects
// projects: undefined,
// Use this configuration option to add custom reporters to Jest
// reporters: [],
// Automatically reset mock state before every test
// resetMocks: false,
// Reset the module registry before running each individual test
// resetModules: false,
// A path to a custom resolver
// resolver: undefined,
// Automatically restore mock state and implementation before every test
// restoreMocks: false,
// The root directory that Jest should scan for tests and modules within
// rootDir: undefined,
// A list of paths to directories that Jest should use to search for files in
roots: ["<rootDir>/src", "<rootDir>/ccl"],
// Allows you to use a custom runner instead of Jest's default test runner
// runner: "jest-runner",
// The paths to modules that run some code to configure or set up the testing environment
// before each test.
// setupFiles: [],
// A list of paths to modules that run some code to configure or set up the testing framework
// before each test. These run after the test environment is setup for each test. This
setupFilesAfterEnv: [
"jest-canvas-mock",
"jest-enzyme",
"<rootDir>/src/setupTests.js",
],
// The number of seconds after which a test is considered as slow and reported as such in the results.
// slowTestThreshold: 5,
// A list of paths to snapshot serializer modules Jest should use for snapshot testing
// snapshotSerializers: [],
// The test environment that will be used for testing
testEnvironment: "jsdom",
// Options that will be passed to the testEnvironment
// testEnvironmentOptions: {},
// Adds a location field to test results
// testLocationInResults: false,
// The glob patterns Jest uses to detect test files
// testMatch: [
// "**/__tests__/**/*.[jt]s?(x)",
// "**/?(*.)+(spec|test).[tj]s?(x)"
// ],
// An array of regexp pattern strings that are matched against all test paths, matched tests are skipped
testPathIgnorePatterns: ["/node_modules/"],
// The regexp pattern or array of patterns that Jest uses to detect test files
// testRegex: [],
// This option allows the use of a custom results processor
// testResultsProcessor: undefined,
// This option allows use of a custom test runner
// testRunner: "jest-circus/runner",
// A map from regular expressions to paths to transformers
transform: {
"^.+\\.tsx?$": "ts-jest",
"^.+\\.js?$": [
"babel-jest",
{ configFile: path.resolve(__dirname, "babel.config.js") },
],
},
// An array of regexp pattern strings that are matched against all source file paths, matched files will skip transformation
transformIgnorePatterns: [
"/node_module\\/@cockroachlabs\\/crdb-protobuf-client/",
"/node_module\\/@cockroachlabs\\/cluster-ui/",
"/cluster-ui\\/dist\\/js\\/main.js$/",
],
// An array of regexp pattern strings that are matched against all modules before the module loader will automatically return a mock for them
// unmockedModulePathPatterns: undefined,
// Indicates whether each individual test should be reported during the run
// verbose: undefined,
// An array of regexp patterns that are matched against all source file paths before re-running tests in watch mode
// watchPathIgnorePatterns: [],
// Whether to use watchman for file crawling
// watchman: true,
...(isBazel ? bazelOnlySettings : {}),
};
| pkg/ui/workspaces/db-console/jest.config.js | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.00017825041140895337,
0.00017247750656679273,
0.000166044948855415,
0.000173119391547516,
0.0000031333036076830467
] |
{
"id": 4,
"code_window": [
"\t\t\t// Otherwise, expect the opposite: not even safe information makes it through,\n",
"\t\t\t// because it gets replaced with foundRedactedMarker.\n",
"\t\t\trequire.False(t, found, \"unexpectedly saw message '%q':\\n%s\",\n",
"\t\t\t\tvisibleString, trace)\n",
"\t\t\trequire.False(t, foundRedactedMarker, \"unexpectedly found '%q':\\n%s\",\n",
"\t\t\t\tstring(server.TraceRedactedMarker), trace)\n",
"\t\t}\n",
"\t})\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 152
} | rows,num_cols,estimated,actual
10,2,16.230000,0.011774
1000000,2,1376245.592416,1.651296
2000000,2,3064990.279862,2.977612
3000000,2,5066238.092341,4.109804
4000000,2,7379989.029850,5.443839
5000000,2,10006243.092391,6.691528
6000000,2,12945000.279962,7.883590
7000000,2,15540004.030000,9.237124
8000000,2,17760004.030000,10.576521
9000000,2,19980004.030000,11.711999
10000000,2,22200004.030000,12.932723
11000000,2,24420004.030000,14.126060
12000000,2,26640004.030000,15.369439
10,3,16.430000,0.001015
1000000,3,1396245.592416,1.617916
2000000,3,3104990.279862,3.204402
3000000,3,5126238.092341,4.671913
4000000,3,7459989.029850,6.247394
5000000,3,10106243.092391,7.863606
6000000,3,13065000.279962,9.258101
7000000,3,15680004.030000,10.856210
8000000,3,17920004.030000,12.580753
9000000,3,20160004.030000,14.026172
10000000,3,22400004.030000,15.294526
11000000,3,24640004.030000,16.809612
12000000,3,26880004.030000,18.538041
10,4,16.630000,0.000980
1000000,4,1416245.592416,1.813130
2000000,4,3144990.279862,3.583648
3000000,4,5186238.092341,5.256810
4000000,4,7539989.029850,7.017376
5000000,4,10206243.092391,8.704136
6000000,4,13185000.279962,10.338006
7000000,4,15820004.030000,12.068274
8000000,4,18080004.030000,13.764568
9000000,4,20340004.030000,15.522189
10000000,4,22600004.030000,16.815492
11000000,4,24860004.030000,18.783499
12000000,4,27120004.030000,20.327555
| pkg/sql/opt/opbench/testdata/hash-group-by-lineitem.csv | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.0001756586425472051,
0.0001696630788501352,
0.00016570964362472296,
0.0001692562218522653,
0.000003372953187863459
] |
{
"id": 4,
"code_window": [
"\t\t\t// Otherwise, expect the opposite: not even safe information makes it through,\n",
"\t\t\t// because it gets replaced with foundRedactedMarker.\n",
"\t\t\trequire.False(t, found, \"unexpectedly saw message '%q':\\n%s\",\n",
"\t\t\t\tvisibleString, trace)\n",
"\t\t\trequire.False(t, foundRedactedMarker, \"unexpectedly found '%q':\\n%s\",\n",
"\t\t\t\tstring(server.TraceRedactedMarker), trace)\n",
"\t\t}\n",
"\t})\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go",
"type": "replace",
"edit_start_line_idx": 152
} | load("//build/bazelutil/unused_checker:unused.bzl", "get_x_data")
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "stateloader",
srcs = [
"initial.go",
"stateloader.go",
],
importpath = "github.com/cockroachdb/cockroach/pkg/kv/kvserver/stateloader",
visibility = ["//visibility:public"],
deps = [
"//pkg/keys",
"//pkg/kv/kvserver/kvserverpb",
"//pkg/roachpb",
"//pkg/storage",
"//pkg/storage/enginepb",
"//pkg/util/hlc",
"//pkg/util/log",
"//pkg/util/protoutil",
"@com_github_cockroachdb_errors//:errors",
"@com_github_cockroachdb_redact//:redact",
"@io_etcd_go_etcd_raft_v3//raftpb",
],
)
go_test(
name = "stateloader_test",
size = "small",
srcs = ["initial_test.go"],
args = ["-test.timeout=55s"],
embed = [":stateloader"],
deps = [
"//pkg/roachpb",
"//pkg/storage",
"//pkg/testutils",
"//pkg/util/leaktest",
"//pkg/util/stop",
"@io_etcd_go_etcd_raft_v3//raftpb",
],
)
get_x_data(name = "get_x_data")
| pkg/kv/kvserver/stateloader/BUILD.bazel | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.00018484483007341623,
0.00017490175378043205,
0.0001697352563496679,
0.00017398237832821906,
0.000005214921657170635
] |
{
"id": 5,
"code_window": [
"import (\n",
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb\"\n",
"\t\"github.com/cockroachdb/redact\"\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/server/node_tenant.go",
"type": "replace",
"edit_start_line_idx": 15
} | // Copyright 2021 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package kvtenantccl_test
import (
"context"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb"
"github.com/cockroachdb/redact"
"github.com/stretchr/testify/require"
)
// TestTenantTracesAreRedacted is an end-to-end version of
// `kvserver.TestMaybeRedactRecording`.
func TestTenantTracesAreRedacted(t *testing.T) {
defer leaktest.AfterTest(t)()
testutils.RunTrueAndFalse(t, "redactable", func(t *testing.T, redactable bool) {
testTenantTracesAreRedactedImpl(t, redactable)
})
}
const testStmt = "CREATE TABLE kv(k STRING PRIMARY KEY, v STRING)"
func testTenantTracesAreRedactedImpl(t *testing.T, redactable bool) {
defer log.Scope(t).Close(t)
ctx := context.Background()
const (
sensitiveString = "super-secret-stuff"
visibleString = "tenant-can-see-this"
)
recCh := make(chan tracingpb.Recording, 1)
args := base.TestServerArgs{
// Test hangs within a tenant. More investigation is required.
// Tracked with #76378.
DisableDefaultTestTenant: true,
Knobs: base.TestingKnobs{
Store: &kvserver.StoreTestingKnobs{
EvalKnobs: kvserverbase.BatchEvalTestingKnobs{
TestingEvalFilter: func(args kvserverbase.FilterArgs) *roachpb.Error {
log.Eventf(args.Ctx, "%v", sensitiveString)
log.Eventf(args.Ctx, "%v", redact.Safe(visibleString))
return nil
},
},
},
SQLExecutor: &sql.ExecutorTestingKnobs{
WithStatementTrace: func(trace tracingpb.Recording, stmt string) {
if stmt == testStmt {
recCh <- trace
}
},
},
},
}
s, db, _ := serverutils.StartServer(t, args)
if redactable {
runner := sqlutils.MakeSQLRunner(db)
runner.Exec(t, "SET CLUSTER SETTING trace.redactable.enabled = true")
}
defer db.Close()
defer s.Stopper().Stop(ctx)
// Queries from the system tenant will receive unredacted traces
// since the tracer will not have the redactable flag set.
t.Run("system-tenant", func(t *testing.T) {
runner := sqlutils.MakeSQLRunner(db)
runner.Exec(t, testStmt)
trace := <-recCh
require.NotEmpty(t, trace)
var found bool
for _, rs := range trace {
for _, s := range rs.Logs {
if strings.Contains(s.Msg().StripMarkers(), sensitiveString) {
found = true
}
}
}
require.True(t, found, "did not find '%q' in trace:\n%s",
sensitiveString, trace,
)
})
t.Run("regular-tenant", func(t *testing.T) {
_, tenDB := serverutils.StartTenant(t, s, base.TestTenantArgs{
TenantID: roachpb.MakeTenantID(security.EmbeddedTenantIDs()[0]),
TestingKnobs: args.Knobs,
})
defer tenDB.Close()
runner := sqlutils.MakeSQLRunner(tenDB)
runner.Exec(t, testStmt)
trace := <-recCh
require.NotEmpty(t, trace)
var found bool
var foundRedactedMarker bool
for _, rs := range trace {
for _, s := range rs.Logs {
if strings.Contains(s.Msg().StripMarkers(), sensitiveString) {
t.Fatalf(
"trace for tenant contained KV-level trace message '%q':\n%s",
sensitiveString, trace,
)
}
if strings.Contains(s.Msg().StripMarkers(), visibleString) {
found = true
}
if strings.Contains(s.Msg().StripMarkers(), string(server.TraceRedactedMarker)) {
foundRedactedMarker = true
}
}
}
// In both cases we don't expect to see the `TraceRedactedMarker`
// since that's only shown when the server is in an inconsistent
// state or if there's a version mismatch between client and server.
if redactable {
// If redaction was on, we expect the tenant to see safe information in its
// trace.
require.True(t, found, "did not see expected trace message '%q':\n%s",
visibleString, trace)
require.False(t, foundRedactedMarker, "unexpectedly found '%q':\n%s",
string(server.TraceRedactedMarker), trace)
} else {
// Otherwise, expect the opposite: not even safe information makes it through,
// because it gets replaced with foundRedactedMarker.
require.False(t, found, "unexpectedly saw message '%q':\n%s",
visibleString, trace)
require.False(t, foundRedactedMarker, "unexpectedly found '%q':\n%s",
string(server.TraceRedactedMarker), trace)
}
})
}
| pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.018581537529826164,
0.0020715114660561085,
0.00016286819300148636,
0.0001689367782091722,
0.0051542785950005054
] |
{
"id": 5,
"code_window": [
"import (\n",
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb\"\n",
"\t\"github.com/cockroachdb/redact\"\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/server/node_tenant.go",
"type": "replace",
"edit_start_line_idx": 15
} | // Code generated by TestPretty. DO NOT EDIT.
// GENERATED FILE DO NOT EDIT
1:
-
SELECT 1
| pkg/sql/sem/tree/testdata/pretty/1.ref.golden.short | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.0001746427151374519,
0.0001746427151374519,
0.0001746427151374519,
0.0001746427151374519,
0
] |
{
"id": 5,
"code_window": [
"import (\n",
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb\"\n",
"\t\"github.com/cockroachdb/redact\"\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/server/node_tenant.go",
"type": "replace",
"edit_start_line_idx": 15
} | // Code generated by generate-staticcheck; DO NOT EDIT.
//go:build bazel
// +build bazel
package s1029
import (
util "github.com/cockroachdb/cockroach/pkg/testutils/lint/passes/staticcheck"
"golang.org/x/tools/go/analysis"
"honnef.co/go/tools/simple"
)
var Analyzer *analysis.Analyzer
func init() {
for _, analyzer := range simple.Analyzers {
if analyzer.Analyzer.Name == "S1029" {
Analyzer = analyzer.Analyzer
break
}
}
util.MungeAnalyzer(Analyzer)
}
| build/bazelutil/staticcheckanalyzers/s1029/analyzer.go | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.0016993442550301552,
0.0006812541396357119,
0.00017150102939922363,
0.0001729172799969092,
0.0007198986713774502
] |
{
"id": 5,
"code_window": [
"import (\n",
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb\"\n",
"\t\"github.com/cockroachdb/redact\"\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/server/node_tenant.go",
"type": "replace",
"edit_start_line_idx": 15
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package execinfrapb
import (
"bytes"
"compress/zlib"
"encoding/base64"
"encoding/json"
"fmt"
"net/url"
"sort"
"strings"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb"
"github.com/cockroachdb/errors"
"github.com/dustin/go-humanize"
)
// DiagramFlags contains diagram settings.
type DiagramFlags struct {
// ShowInputTypes adds input type information.
ShowInputTypes bool
// MakeDeterministic resets all stats that can vary from run to run (like
// execution time), suitable for tests. See CompositeStats.MakeDeterministic.
MakeDeterministic bool
}
type diagramCellType interface {
// summary produces a title and an arbitrary number of lines that describe a
// "cell" in a diagram node (input sync, processor core, or output router).
summary() (title string, details []string)
}
func (ord *Ordering) diagramString() string {
var buf bytes.Buffer
for i, c := range ord.Columns {
if i > 0 {
buf.WriteByte(',')
}
fmt.Fprintf(&buf, "@%d", c.ColIdx+1)
if c.Direction == Ordering_Column_DESC {
buf.WriteByte('-')
} else {
buf.WriteByte('+')
}
}
return buf.String()
}
func colListStr(cols []uint32) string {
var buf bytes.Buffer
for i, c := range cols {
if i > 0 {
buf.WriteByte(',')
}
fmt.Fprintf(&buf, "@%d", c+1)
}
return buf.String()
}
// summary implements the diagramCellType interface.
func (*NoopCoreSpec) summary() (string, []string) {
return "No-op", []string{}
}
// summary implements the diagramCellType interface.
func (f *FiltererSpec) summary() (string, []string) {
return "Filterer", []string{
fmt.Sprintf("Filter: %s", f.Filter),
}
}
// summary implements the diagramCellType interface.
func (v *ValuesCoreSpec) summary() (string, []string) {
var bytes uint64
for _, b := range v.RawBytes {
bytes += uint64(len(b))
}
detail := fmt.Sprintf("%s (%d chunks)", humanize.IBytes(bytes), len(v.RawBytes))
return "Values", []string{detail}
}
// summary implements the diagramCellType interface.
func (a *AggregatorSpec) summary() (string, []string) {
details := make([]string, 0, len(a.Aggregations)+1)
if len(a.GroupCols) > 0 {
details = append(details, colListStr(a.GroupCols))
}
if len(a.OrderedGroupCols) > 0 {
details = append(details, fmt.Sprintf("Ordered: %s", colListStr(a.OrderedGroupCols)))
}
for _, agg := range a.Aggregations {
var buf bytes.Buffer
buf.WriteString(agg.Func.String())
buf.WriteByte('(')
if agg.Distinct {
buf.WriteString("DISTINCT ")
}
buf.WriteString(colListStr(agg.ColIdx))
buf.WriteByte(')')
if agg.FilterColIdx != nil {
fmt.Fprintf(&buf, " FILTER @%d", *agg.FilterColIdx+1)
}
details = append(details, buf.String())
}
return "Aggregator", details
}
func appendColumns(details []string, columns []descpb.IndexFetchSpec_Column) []string {
var b strings.Builder
b.WriteString("Columns:")
const wrapAt = 100
for i := range columns {
if i > 0 {
b.WriteByte(',')
}
name := columns[i].Name
if b.Len()+len(name)+1 > wrapAt {
details = append(details, b.String())
b.Reset()
}
b.WriteByte(' ')
b.WriteString(name)
}
details = append(details, b.String())
return details
}
// summary implements the diagramCellType interface.
func (tr *TableReaderSpec) summary() (string, []string) {
details := make([]string, 0, 3)
details = append(details, fmt.Sprintf("%s@%s", tr.FetchSpec.TableName, tr.FetchSpec.IndexName))
details = appendColumns(details, tr.FetchSpec.FetchedColumns)
if len(tr.Spans) > 0 {
// only show the first span
keyDirs := make([]encoding.Direction, len(tr.FetchSpec.KeyAndSuffixColumns))
for i := range keyDirs {
keyDirs[i] = encoding.Ascending
if tr.FetchSpec.KeyAndSuffixColumns[i].Direction == catpb.IndexColumn_DESC {
keyDirs[i] = encoding.Descending
}
}
var spanStr strings.Builder
spanStr.WriteString("Spans: ")
spanStr.WriteString(catalogkeys.PrettySpan(keyDirs, tr.Spans[0], 2))
if len(tr.Spans) > 1 {
spanStr.WriteString(fmt.Sprintf(" and %d other", len(tr.Spans)-1))
}
if len(tr.Spans) > 2 {
spanStr.WriteString("s") // pluralize the 'other'
}
details = append(details, spanStr.String())
}
return "TableReader", details
}
// summary implements the diagramCellType interface.
func (jr *JoinReaderSpec) summary() (string, []string) {
details := make([]string, 0, 5)
if jr.Type != descpb.InnerJoin {
details = append(details, joinTypeDetail(jr.Type))
}
details = append(details, fmt.Sprintf("%s@%s", jr.FetchSpec.TableName, jr.FetchSpec.IndexName))
if len(jr.LookupColumns) > 0 {
details = append(details, fmt.Sprintf("Lookup join on: %s", colListStr(jr.LookupColumns)))
}
if !jr.LookupExpr.Empty() {
details = append(details, fmt.Sprintf("Lookup join on: %s", jr.LookupExpr))
}
if !jr.RemoteLookupExpr.Empty() {
details = append(details, fmt.Sprintf("Remote lookup join on: %s", jr.RemoteLookupExpr))
}
if !jr.OnExpr.Empty() {
details = append(details, fmt.Sprintf("ON %s", jr.OnExpr))
}
if jr.LeftJoinWithPairedJoiner {
details = append(details, "second join in paired-join")
}
if jr.OutputGroupContinuationForLeftRow {
details = append(details, "first join in paired-join")
}
details = appendColumns(details, jr.FetchSpec.FetchedColumns)
return "JoinReader", details
}
func joinTypeDetail(joinType descpb.JoinType) string {
typeStr := strings.Replace(joinType.String(), "_", " ", -1)
if joinType == descpb.IntersectAllJoin || joinType == descpb.ExceptAllJoin {
return fmt.Sprintf("Type: %s", typeStr)
}
return fmt.Sprintf("Type: %s JOIN", typeStr)
}
// summary implements the diagramCellType interface.
func (hj *HashJoinerSpec) summary() (string, []string) {
name := "HashJoiner"
if hj.Type.IsSetOpJoin() {
name = "HashSetOp"
}
details := make([]string, 0, 4)
if hj.Type != descpb.InnerJoin {
details = append(details, joinTypeDetail(hj.Type))
}
if len(hj.LeftEqColumns) > 0 {
details = append(details, fmt.Sprintf(
"left(%s)=right(%s)",
colListStr(hj.LeftEqColumns), colListStr(hj.RightEqColumns),
))
}
if !hj.OnExpr.Empty() {
details = append(details, fmt.Sprintf("ON %s", hj.OnExpr))
}
return name, details
}
// summary implements the diagramCellType interface.
func (ifs *InvertedFiltererSpec) summary() (string, []string) {
name := "InvertedFilterer"
var b strings.Builder
for i := range ifs.InvertedExpr.SpansToRead {
if i > 0 {
fmt.Fprintf(&b, " and %d others", len(ifs.InvertedExpr.SpansToRead)-1)
break
} else {
fmt.Fprintf(&b, "%s", ifs.InvertedExpr.SpansToRead[i].String())
}
}
details := append([]string(nil), fmt.Sprintf(
"InvertedExpr on @%d: spans %s", ifs.InvertedColIdx, b.String()))
return name, details
}
func orderedJoinDetails(
joinType descpb.JoinType, left, right Ordering, onExpr Expression,
) []string {
details := make([]string, 0, 3)
if joinType != descpb.InnerJoin {
details = append(details, joinTypeDetail(joinType))
}
details = append(details, fmt.Sprintf(
"left(%s)=right(%s)", left.diagramString(), right.diagramString(),
))
if !onExpr.Empty() {
details = append(details, fmt.Sprintf("ON %s", onExpr))
}
return details
}
// summary implements the diagramCellType interface.
func (mj *MergeJoinerSpec) summary() (string, []string) {
name := "MergeJoiner"
if mj.Type.IsSetOpJoin() {
name = "MergeSetOp"
}
return name, orderedJoinDetails(mj.Type, mj.LeftOrdering, mj.RightOrdering, mj.OnExpr)
}
// summary implements the diagramCellType interface.
func (zj *ZigzagJoinerSpec) summary() (string, []string) {
name := "ZigzagJoiner"
details := make([]string, 0, len(zj.Sides)+1)
for i := range zj.Sides {
fetchSpec := &zj.Sides[i].FetchSpec
details = append(details, fmt.Sprintf(
"Side %d: %s@%s", i, fetchSpec.TableName, fetchSpec.IndexName,
))
details = appendColumns(details, fetchSpec.FetchedColumns)
}
if !zj.OnExpr.Empty() {
details = append(details, fmt.Sprintf("ON %s", zj.OnExpr))
}
return name, details
}
// summary implements the diagramCellType interface.
func (ij *InvertedJoinerSpec) summary() (string, []string) {
details := make([]string, 0, 5)
if ij.Type != descpb.InnerJoin {
details = append(details, joinTypeDetail(ij.Type))
}
details = append(details, fmt.Sprintf("%s@%s", ij.FetchSpec.TableName, ij.FetchSpec.IndexName))
details = append(details, fmt.Sprintf("InvertedExpr %s", ij.InvertedExpr))
if !ij.OnExpr.Empty() {
details = append(details, fmt.Sprintf("ON %s", ij.OnExpr))
}
if ij.OutputGroupContinuationForLeftRow {
details = append(details, "first join in paired-join")
}
details = appendColumns(details, ij.FetchSpec.FetchedColumns)
return "InvertedJoiner", details
}
// summary implements the diagramCellType interface.
func (s *SorterSpec) summary() (string, []string) {
details := []string{s.OutputOrdering.diagramString()}
if s.OrderingMatchLen != 0 {
details = append(details, fmt.Sprintf("match len: %d", s.OrderingMatchLen))
}
if s.Limit > 0 {
details = append(details, fmt.Sprintf("TopK: %d", s.Limit))
}
return "Sorter", details
}
// summary implements the diagramCellType interface.
func (bf *BackfillerSpec) summary() (string, []string) {
details := []string{
bf.Table.Name,
fmt.Sprintf("Type: %s", bf.Type.String()),
}
return "Backfiller", details
}
// summary implements the diagramCellType interface.
func (m *BackupDataSpec) summary() (string, []string) {
var spanStr strings.Builder
if len(m.Spans) > 0 {
spanStr.WriteString(fmt.Sprintf("Spans [%d]: ", len(m.Spans)))
const limit = 3
for i := 0; i < len(m.Spans) && i < limit; i++ {
if i > 0 {
spanStr.WriteString(", ")
}
spanStr.WriteString(m.Spans[i].String())
}
if len(m.Spans) > limit {
spanStr.WriteString("...")
}
}
details := []string{
spanStr.String(),
}
return "BACKUP", details
}
// summary implements the diagramCellType interface.
func (d *DistinctSpec) summary() (string, []string) {
details := []string{
colListStr(d.DistinctColumns),
}
if len(d.OrderedColumns) > 0 {
details = append(details, fmt.Sprintf("Ordered: %s", colListStr(d.OrderedColumns)))
}
return "Distinct", details
}
// summary implements the diagramCellType interface.
func (o *OrdinalitySpec) summary() (string, []string) {
return "Ordinality", []string{}
}
// summary implements the diagramCellType interface.
func (d *ProjectSetSpec) summary() (string, []string) {
var details []string
for _, expr := range d.Exprs {
details = append(details, expr.String())
}
return "ProjectSet", details
}
// summary implements the diagramCellType interface.
func (s *SamplerSpec) summary() (string, []string) {
details := []string{fmt.Sprintf("SampleSize: %d", s.SampleSize)}
for _, sk := range s.Sketches {
details = append(details, fmt.Sprintf("Stat: %s", colListStr(sk.Columns)))
}
return "Sampler", details
}
// summary implements the diagramCellType interface.
func (s *SampleAggregatorSpec) summary() (string, []string) {
details := []string{
fmt.Sprintf("SampleSize: %d", s.SampleSize),
}
for _, sk := range s.Sketches {
s := fmt.Sprintf("Stat: %s", colListStr(sk.Columns))
if sk.GenerateHistogram {
s = fmt.Sprintf("%s (%d buckets)", s, sk.HistogramMaxBuckets)
}
details = append(details, s)
}
return "SampleAggregator", details
}
func (is *InputSyncSpec) summary(showTypes bool) (string, []string) {
typs := make([]string, 0, len(is.ColumnTypes)+1)
if showTypes {
for _, typ := range is.ColumnTypes {
typs = append(typs, typ.Name())
}
}
switch is.Type {
case InputSyncSpec_PARALLEL_UNORDERED:
return "unordered", typs
case InputSyncSpec_ORDERED:
return "ordered", append(typs, is.Ordering.diagramString())
case InputSyncSpec_SERIAL_UNORDERED:
return "serial unordered", typs
default:
return "unknown", []string{}
}
}
// summary implements the diagramCellType interface.
func (r *LocalPlanNodeSpec) summary() (string, []string) {
return fmt.Sprintf("local %s %d", r.Name, r.RowSourceIdx), []string{}
}
// summary implements the diagramCellType interface.
func (r *OutputRouterSpec) summary() (string, []string) {
switch r.Type {
case OutputRouterSpec_PASS_THROUGH:
return "", []string{}
case OutputRouterSpec_MIRROR:
return "mirror", []string{}
case OutputRouterSpec_BY_HASH:
return "by hash", []string{colListStr(r.HashColumns)}
case OutputRouterSpec_BY_RANGE:
return "by range", []string{}
default:
return "unknown", []string{}
}
}
// summary implements the diagramCellType interface.
func (post *PostProcessSpec) summary() []string {
var res []string
if post.Projection {
outputColumns := "None"
if len(post.OutputColumns) > 0 {
outputColumns = colListStr(post.OutputColumns)
}
res = append(res, fmt.Sprintf("Out: %s", outputColumns))
}
if len(post.RenderExprs) > 0 {
var buf bytes.Buffer
buf.WriteString("Render: ")
for i, expr := range post.RenderExprs {
if i > 0 {
buf.WriteString(", ")
}
// Remove any spaces in the expression (makes things more compact
// and it's easier to visually separate expressions).
buf.WriteString(strings.Replace(expr.String(), " ", "", -1))
}
res = append(res, buf.String())
}
if post.Limit != 0 || post.Offset != 0 {
var buf bytes.Buffer
if post.Limit != 0 {
fmt.Fprintf(&buf, "Limit %d", post.Limit)
}
if post.Offset != 0 {
if buf.Len() != 0 {
buf.WriteByte(' ')
}
fmt.Fprintf(&buf, "Offset %d", post.Offset)
}
res = append(res, buf.String())
}
return res
}
// summary implements the diagramCellType interface.
func (c *RestoreDataSpec) summary() (string, []string) {
return "RestoreDataSpec", []string{}
}
// summary implements the diagramCellType interface.
func (c *SplitAndScatterSpec) summary() (string, []string) {
detail := fmt.Sprintf("%d chunks", len(c.Chunks))
return "SplitAndScatterSpec", []string{detail}
}
// summary implements the diagramCellType interface.
func (c *ReadImportDataSpec) summary() (string, []string) {
ss := make([]string, 0, len(c.Uri))
for _, s := range c.Uri {
ss = append(ss, s)
}
return "ReadImportData", ss
}
// summary implements the diagramCellType interface.
func (s *StreamIngestionDataSpec) summary() (string, []string) {
return "StreamIngestionData", []string{}
}
// summary implements the diagramCellType interface.
func (s *StreamIngestionFrontierSpec) summary() (string, []string) {
return "StreamIngestionFrontier", []string{}
}
// summary implements the diagramCellType interface.
func (s *IndexBackfillMergerSpec) summary() (string, []string) {
return "IndexBackfillMerger", []string{}
}
// summary implements the diagramCellType interface.
func (s *ExportSpec) summary() (string, []string) {
return "Exporter", []string{s.Destination}
}
// summary implements the diagramCellType interface.
func (s *BulkRowWriterSpec) summary() (string, []string) {
return "BulkRowWriterSpec", []string{}
}
// summary implements the diagramCellType interface.
func (w *WindowerSpec) summary() (string, []string) {
details := make([]string, 0, len(w.WindowFns))
if len(w.PartitionBy) > 0 {
details = append(details, fmt.Sprintf("PARTITION BY: %s", colListStr(w.PartitionBy)))
}
for _, windowFn := range w.WindowFns {
var buf bytes.Buffer
if windowFn.Func.WindowFunc != nil {
buf.WriteString(windowFn.Func.WindowFunc.String())
} else {
buf.WriteString(windowFn.Func.AggregateFunc.String())
}
buf.WriteByte('(')
buf.WriteString(colListStr(windowFn.ArgsIdxs))
buf.WriteByte(')')
if len(windowFn.Ordering.Columns) > 0 {
buf.WriteString(" (ORDER BY ")
buf.WriteString(windowFn.Ordering.diagramString())
buf.WriteByte(')')
}
details = append(details, buf.String())
}
return "Windower", details
}
// summary implements the diagramCellType interface.
func (s *ChangeAggregatorSpec) summary() (string, []string) {
var details []string
for _, watch := range s.Watches {
details = append(details, watch.Span.String())
}
return "ChangeAggregator", details
}
// summary implements the diagramCellType interface.
func (s *ChangeFrontierSpec) summary() (string, []string) {
return "ChangeFrontier", []string{}
}
// summary implements the diagramCellType interface.
func (s *TTLSpec) summary() (string, []string) {
details := s.RowLevelTTLDetails
return "TTL", []string{
fmt.Sprintf("JobID: %d", s.JobID),
fmt.Sprintf("TableID: %d", details.TableID),
fmt.Sprintf("TableVersion: %d", details.TableVersion),
}
}
type diagramCell struct {
Title string `json:"title"`
Details []string `json:"details"`
}
type diagramProcessor struct {
NodeIdx int `json:"nodeIdx"`
Inputs []diagramCell `json:"inputs"`
Core diagramCell `json:"core"`
Outputs []diagramCell `json:"outputs"`
StageID int32 `json:"stage"`
processorID int32
}
type diagramEdge struct {
SourceProc int `json:"sourceProc"`
SourceOutput int `json:"sourceOutput"`
DestProc int `json:"destProc"`
DestInput int `json:"destInput"`
Stats []string `json:"stats,omitempty"`
streamID StreamID
}
// FlowDiagram is a plan diagram that can be made into a URL.
type FlowDiagram interface {
// ToURL generates the json data for a flow diagram and a URL which encodes the
// diagram.
ToURL() (string, url.URL, error)
// AddSpans adds stats extracted from the input spans to the diagram.
AddSpans([]tracingpb.RecordedSpan)
}
type diagramData struct {
SQL string `json:"sql"`
NodeNames []string `json:"nodeNames"`
Processors []diagramProcessor `json:"processors"`
Edges []diagramEdge `json:"edges"`
flags DiagramFlags
flowID FlowID
sqlInstanceIDs []base.SQLInstanceID
}
var _ FlowDiagram = &diagramData{}
// ToURL implements the FlowDiagram interface.
func (d diagramData) ToURL() (string, url.URL, error) {
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(d); err != nil {
return "", url.URL{}, err
}
return encodeJSONToURL(buf)
}
// AddSpans implements the FlowDiagram interface.
func (d *diagramData) AddSpans(spans []tracingpb.RecordedSpan) {
statsMap := ExtractStatsFromSpans(spans, d.flags.MakeDeterministic)
for i := range d.Processors {
p := &d.Processors[i]
sqlInstanceID := d.sqlInstanceIDs[p.NodeIdx]
component := ProcessorComponentID(sqlInstanceID, d.flowID, p.processorID)
if compStats := statsMap[component]; compStats != nil {
p.Core.Details = append(p.Core.Details, compStats.StatsForQueryPlan()...)
}
}
for i := range d.Edges {
originSQLInstanceID := d.sqlInstanceIDs[d.Processors[d.Edges[i].SourceProc].NodeIdx]
component := StreamComponentID(originSQLInstanceID, d.flowID, d.Edges[i].streamID)
if compStats := statsMap[component]; compStats != nil {
d.Edges[i].Stats = compStats.StatsForQueryPlan()
}
}
}
// generateDiagramData generates the diagram data, given a list of flows (one
// per node). The sqlInstanceIDs list corresponds 1-1 to the flows list.
func generateDiagramData(
sql string, flows []FlowSpec, sqlInstanceIDs []base.SQLInstanceID, flags DiagramFlags,
) (FlowDiagram, error) {
d := &diagramData{
SQL: sql,
sqlInstanceIDs: sqlInstanceIDs,
flags: flags,
}
d.NodeNames = make([]string, len(sqlInstanceIDs))
for i := range d.NodeNames {
d.NodeNames[i] = sqlInstanceIDs[i].String()
}
if len(flows) > 0 {
d.flowID = flows[0].FlowID
for i := 1; i < len(flows); i++ {
if flows[i].FlowID != d.flowID {
return nil, errors.AssertionFailedf("flow ID mismatch within a diagram")
}
}
}
// inPorts maps streams to their "destination" attachment point. Only DestProc
// and DestInput are set in each diagramEdge value.
inPorts := make(map[StreamID]diagramEdge)
syncResponseNode := -1
pIdx := 0
for n := range flows {
for _, p := range flows[n].Processors {
proc := diagramProcessor{NodeIdx: n}
proc.Core.Title, proc.Core.Details = p.Core.GetValue().(diagramCellType).summary()
proc.Core.Title += fmt.Sprintf("/%d", p.ProcessorID)
proc.processorID = p.ProcessorID
proc.Core.Details = append(proc.Core.Details, p.Post.summary()...)
// We need explicit synchronizers if we have multiple inputs, or if the
// one input has multiple input streams.
if len(p.Input) > 1 || (len(p.Input) == 1 && len(p.Input[0].Streams) > 1) {
proc.Inputs = make([]diagramCell, len(p.Input))
for i, s := range p.Input {
proc.Inputs[i].Title, proc.Inputs[i].Details = s.summary(flags.ShowInputTypes)
}
} else {
proc.Inputs = []diagramCell{}
}
// Add entries in the map for the inputs.
for i, input := range p.Input {
val := diagramEdge{
DestProc: pIdx,
}
if len(proc.Inputs) > 0 {
val.DestInput = i + 1
}
for _, stream := range input.Streams {
inPorts[stream.StreamID] = val
}
}
for _, r := range p.Output {
for _, o := range r.Streams {
if o.Type == StreamEndpointSpec_SYNC_RESPONSE {
if syncResponseNode != -1 && syncResponseNode != n {
return nil, errors.Errorf("multiple nodes with SyncResponse")
}
syncResponseNode = n
}
}
}
// We need explicit routers if we have multiple outputs, or if the one
// output has multiple input streams.
if len(p.Output) > 1 || (len(p.Output) == 1 && len(p.Output[0].Streams) > 1) {
proc.Outputs = make([]diagramCell, len(p.Output))
for i, r := range p.Output {
proc.Outputs[i].Title, proc.Outputs[i].Details = r.summary()
}
} else {
proc.Outputs = []diagramCell{}
}
proc.StageID = p.StageID
d.Processors = append(d.Processors, proc)
pIdx++
}
}
if syncResponseNode != -1 {
d.Processors = append(d.Processors, diagramProcessor{
NodeIdx: syncResponseNode,
Core: diagramCell{Title: "Response", Details: []string{}},
Inputs: []diagramCell{},
Outputs: []diagramCell{},
// When generating stats, spans are mapped from processor ID in the span
// tags to processor ID in the diagram data. To avoid clashing with
// the processor with ID 0, assign an impossible processorID.
processorID: -1,
})
}
// Produce the edges.
pIdx = 0
for n := range flows {
for _, p := range flows[n].Processors {
for i, output := range p.Output {
srcOutput := 0
if len(d.Processors[pIdx].Outputs) > 0 {
srcOutput = i + 1
}
for _, o := range output.Streams {
edge := diagramEdge{
SourceProc: pIdx,
SourceOutput: srcOutput,
streamID: o.StreamID,
}
if o.Type == StreamEndpointSpec_SYNC_RESPONSE {
edge.DestProc = len(d.Processors) - 1
} else {
to, ok := inPorts[o.StreamID]
if !ok {
return nil, errors.Errorf("stream %d has no destination", o.StreamID)
}
edge.DestProc = to.DestProc
edge.DestInput = to.DestInput
}
d.Edges = append(d.Edges, edge)
}
}
pIdx++
}
}
return d, nil
}
// GeneratePlanDiagram generates the data for a flow diagram. There should be
// one FlowSpec per node. The function assumes that StreamIDs are unique across
// all flows.
func GeneratePlanDiagram(
sql string, flows map[base.SQLInstanceID]*FlowSpec, flags DiagramFlags,
) (FlowDiagram, error) {
// We sort the flows by node because we want the diagram data to be
// deterministic.
sqlInstanceIDs := make([]base.SQLInstanceID, 0, len(flows))
for n := range flows {
sqlInstanceIDs = append(sqlInstanceIDs, n)
}
sort.Slice(sqlInstanceIDs, func(i, j int) bool {
return sqlInstanceIDs[i] < sqlInstanceIDs[j]
})
flowSlice := make([]FlowSpec, len(sqlInstanceIDs))
for i, n := range sqlInstanceIDs {
flowSlice[i] = *flows[n]
}
return generateDiagramData(sql, flowSlice, sqlInstanceIDs, flags)
}
// GeneratePlanDiagramURL generates the json data for a flow diagram and a
// URL which encodes the diagram. There should be one FlowSpec per node. The
// function assumes that StreamIDs are unique across all flows.
func GeneratePlanDiagramURL(
sql string, flows map[base.SQLInstanceID]*FlowSpec, flags DiagramFlags,
) (string, url.URL, error) {
d, err := GeneratePlanDiagram(sql, flows, flags)
if err != nil {
return "", url.URL{}, err
}
return d.ToURL()
}
func encodeJSONToURL(json bytes.Buffer) (string, url.URL, error) {
var compressed bytes.Buffer
jsonStr := json.String()
encoder := base64.NewEncoder(base64.URLEncoding, &compressed)
compressor := zlib.NewWriter(encoder)
if _, err := json.WriteTo(compressor); err != nil {
return "", url.URL{}, err
}
if err := compressor.Close(); err != nil {
return "", url.URL{}, err
}
if err := encoder.Close(); err != nil {
return "", url.URL{}, err
}
url := url.URL{
Scheme: "https",
Host: "cockroachdb.github.io",
Path: "distsqlplan/decode.html",
Fragment: compressed.String(),
}
return jsonStr, url, nil
}
| pkg/sql/execinfrapb/flow_diagram.go | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.006794242653995752,
0.0002554134698584676,
0.00016291261999867857,
0.00017311295960098505,
0.0007055601454339921
] |
{
"id": 6,
"code_window": [
")\n",
"\n",
"// TraceRedactedMarker is used to replace logs that weren't redacted.\n",
"const TraceRedactedMarker = redact.RedactableString(\"verbose trace message redacted\")\n",
"\n",
"// redactRecordingForTenant redacts the sensitive parts of log messages in the\n",
"// recording if the tenant to which this recording is intended is not the system\n",
"// tenant (the system tenant gets an unredacted trace).\n",
"// See https://github.com/cockroachdb/cockroach/issues/70407.\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/server/node_tenant.go",
"type": "replace",
"edit_start_line_idx": 18
} | // Copyright 2021 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package kvtenantccl_test
import (
"context"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb"
"github.com/cockroachdb/redact"
"github.com/stretchr/testify/require"
)
// TestTenantTracesAreRedacted is an end-to-end version of
// `kvserver.TestMaybeRedactRecording`.
func TestTenantTracesAreRedacted(t *testing.T) {
defer leaktest.AfterTest(t)()
testutils.RunTrueAndFalse(t, "redactable", func(t *testing.T, redactable bool) {
testTenantTracesAreRedactedImpl(t, redactable)
})
}
const testStmt = "CREATE TABLE kv(k STRING PRIMARY KEY, v STRING)"
func testTenantTracesAreRedactedImpl(t *testing.T, redactable bool) {
defer log.Scope(t).Close(t)
ctx := context.Background()
const (
sensitiveString = "super-secret-stuff"
visibleString = "tenant-can-see-this"
)
recCh := make(chan tracingpb.Recording, 1)
args := base.TestServerArgs{
// Test hangs within a tenant. More investigation is required.
// Tracked with #76378.
DisableDefaultTestTenant: true,
Knobs: base.TestingKnobs{
Store: &kvserver.StoreTestingKnobs{
EvalKnobs: kvserverbase.BatchEvalTestingKnobs{
TestingEvalFilter: func(args kvserverbase.FilterArgs) *roachpb.Error {
log.Eventf(args.Ctx, "%v", sensitiveString)
log.Eventf(args.Ctx, "%v", redact.Safe(visibleString))
return nil
},
},
},
SQLExecutor: &sql.ExecutorTestingKnobs{
WithStatementTrace: func(trace tracingpb.Recording, stmt string) {
if stmt == testStmt {
recCh <- trace
}
},
},
},
}
s, db, _ := serverutils.StartServer(t, args)
if redactable {
runner := sqlutils.MakeSQLRunner(db)
runner.Exec(t, "SET CLUSTER SETTING trace.redactable.enabled = true")
}
defer db.Close()
defer s.Stopper().Stop(ctx)
// Queries from the system tenant will receive unredacted traces
// since the tracer will not have the redactable flag set.
t.Run("system-tenant", func(t *testing.T) {
runner := sqlutils.MakeSQLRunner(db)
runner.Exec(t, testStmt)
trace := <-recCh
require.NotEmpty(t, trace)
var found bool
for _, rs := range trace {
for _, s := range rs.Logs {
if strings.Contains(s.Msg().StripMarkers(), sensitiveString) {
found = true
}
}
}
require.True(t, found, "did not find '%q' in trace:\n%s",
sensitiveString, trace,
)
})
t.Run("regular-tenant", func(t *testing.T) {
_, tenDB := serverutils.StartTenant(t, s, base.TestTenantArgs{
TenantID: roachpb.MakeTenantID(security.EmbeddedTenantIDs()[0]),
TestingKnobs: args.Knobs,
})
defer tenDB.Close()
runner := sqlutils.MakeSQLRunner(tenDB)
runner.Exec(t, testStmt)
trace := <-recCh
require.NotEmpty(t, trace)
var found bool
var foundRedactedMarker bool
for _, rs := range trace {
for _, s := range rs.Logs {
if strings.Contains(s.Msg().StripMarkers(), sensitiveString) {
t.Fatalf(
"trace for tenant contained KV-level trace message '%q':\n%s",
sensitiveString, trace,
)
}
if strings.Contains(s.Msg().StripMarkers(), visibleString) {
found = true
}
if strings.Contains(s.Msg().StripMarkers(), string(server.TraceRedactedMarker)) {
foundRedactedMarker = true
}
}
}
// In both cases we don't expect to see the `TraceRedactedMarker`
// since that's only shown when the server is in an inconsistent
// state or if there's a version mismatch between client and server.
if redactable {
// If redaction was on, we expect the tenant to see safe information in its
// trace.
require.True(t, found, "did not see expected trace message '%q':\n%s",
visibleString, trace)
require.False(t, foundRedactedMarker, "unexpectedly found '%q':\n%s",
string(server.TraceRedactedMarker), trace)
} else {
// Otherwise, expect the opposite: not even safe information makes it through,
// because it gets replaced with foundRedactedMarker.
require.False(t, found, "unexpectedly saw message '%q':\n%s",
visibleString, trace)
require.False(t, foundRedactedMarker, "unexpectedly found '%q':\n%s",
string(server.TraceRedactedMarker), trace)
}
})
}
| pkg/ccl/kvccl/kvtenantccl/tenant_trace_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.9986039996147156,
0.19080103933811188,
0.00016180540842469782,
0.002513997256755829,
0.3880159854888916
] |
{
"id": 6,
"code_window": [
")\n",
"\n",
"// TraceRedactedMarker is used to replace logs that weren't redacted.\n",
"const TraceRedactedMarker = redact.RedactableString(\"verbose trace message redacted\")\n",
"\n",
"// redactRecordingForTenant redacts the sensitive parts of log messages in the\n",
"// recording if the tenant to which this recording is intended is not the system\n",
"// tenant (the system tenant gets an unredacted trace).\n",
"// See https://github.com/cockroachdb/cockroach/issues/70407.\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/server/node_tenant.go",
"type": "replace",
"edit_start_line_idx": 18
} | // Code generated by execgen; DO NOT EDIT.
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colconv
import (
"github.com/cockroachdb/cockroach/pkg/col/typeconv"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/errors"
)
// Workaround for bazel auto-generated code. goimports does not automatically
// pick up the right packages when run within the bazel sandbox.
var (
_ encoding.Direction
_ = typeconv.DatumVecCanonicalTypeFamily
)
// GetDatumToPhysicalFn returns a function for converting a datum of the given
// ColumnType to the corresponding Go type. Note that the signature of the
// return function doesn't contain an error since we assume that the conversion
// must succeed. If for some reason it fails, a panic will be emitted and will
// be caught by the panic-catcher mechanism of the vectorized engine and will
// be propagated as an error accordingly.
func GetDatumToPhysicalFn(ct *types.T) func(tree.Datum) interface{} {
switch ct.Family() {
case types.BoolFamily:
switch ct.Width() {
case -1:
default:
return func(datum tree.Datum) interface{} {
return bool(*datum.(*tree.DBool))
}
}
case types.IntFamily:
switch ct.Width() {
case 16:
return func(datum tree.Datum) interface{} {
return int16(*datum.(*tree.DInt))
}
case 32:
return func(datum tree.Datum) interface{} {
return int32(*datum.(*tree.DInt))
}
case -1:
default:
return func(datum tree.Datum) interface{} {
return int64(*datum.(*tree.DInt))
}
}
case types.FloatFamily:
switch ct.Width() {
case -1:
default:
return func(datum tree.Datum) interface{} {
return float64(*datum.(*tree.DFloat))
}
}
case types.DecimalFamily:
switch ct.Width() {
case -1:
default:
return func(datum tree.Datum) interface{} {
return datum.(*tree.DDecimal).Decimal
}
}
case types.DateFamily:
switch ct.Width() {
case -1:
default:
return func(datum tree.Datum) interface{} {
return datum.(*tree.DDate).UnixEpochDaysWithOrig()
}
}
case types.TimestampFamily:
switch ct.Width() {
case -1:
default:
return func(datum tree.Datum) interface{} {
return datum.(*tree.DTimestamp).Time
}
}
case types.IntervalFamily:
switch ct.Width() {
case -1:
default:
return func(datum tree.Datum) interface{} {
return datum.(*tree.DInterval).Duration
}
}
case types.StringFamily:
switch ct.Width() {
case -1:
default:
return func(datum tree.Datum) interface{} {
// Handle other STRING-related OID types, like oid.T_name.
wrapper, ok := datum.(*tree.DOidWrapper)
if ok {
datum = wrapper.Wrapped
}
return encoding.UnsafeConvertStringToBytes(string(*datum.(*tree.DString)))
}
}
case types.BytesFamily:
switch ct.Width() {
case -1:
default:
return func(datum tree.Datum) interface{} {
return encoding.UnsafeConvertStringToBytes(string(*datum.(*tree.DBytes)))
}
}
case types.TimestampTZFamily:
switch ct.Width() {
case -1:
default:
return func(datum tree.Datum) interface{} {
return datum.(*tree.DTimestampTZ).Time
}
}
case types.UuidFamily:
switch ct.Width() {
case -1:
default:
return func(datum tree.Datum) interface{} {
return datum.(*tree.DUuid).UUID.GetBytesMut()
}
}
case types.JsonFamily:
switch ct.Width() {
case -1:
default:
return func(datum tree.Datum) interface{} {
return datum.(*tree.DJSON).JSON
}
}
case types.EncodedKeyFamily:
switch ct.Width() {
case -1:
default:
return func(datum tree.Datum) interface{} {
return encoding.UnsafeConvertStringToBytes(string(*datum.(*tree.DEncodedKey)))
}
}
case typeconv.DatumVecCanonicalTypeFamily:
default:
switch ct.Width() {
case -1:
default:
return func(datum tree.Datum) interface{} {
return datum
}
}
}
colexecerror.InternalError(errors.AssertionFailedf("unexpectedly unhandled type %s", ct.DebugString()))
// This code is unreachable, but the compiler cannot infer that.
return nil
}
| pkg/sql/colconv/datum_to_vec.eg.go | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.0003022812306880951,
0.00017779867630451918,
0.00016193537157960236,
0.0001715950929792598,
0.00002950109774246812
] |
{
"id": 6,
"code_window": [
")\n",
"\n",
"// TraceRedactedMarker is used to replace logs that weren't redacted.\n",
"const TraceRedactedMarker = redact.RedactableString(\"verbose trace message redacted\")\n",
"\n",
"// redactRecordingForTenant redacts the sensitive parts of log messages in the\n",
"// recording if the tenant to which this recording is intended is not the system\n",
"// tenant (the system tenant gets an unredacted trace).\n",
"// See https://github.com/cockroachdb/cockroach/issues/70407.\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/server/node_tenant.go",
"type": "replace",
"edit_start_line_idx": 18
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package grpcinterceptor_test
import (
"context"
"fmt"
"io"
"net"
"testing"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/grpcutils"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/tracing/grpcinterceptor"
"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb"
"github.com/cockroachdb/errors"
"github.com/gogo/protobuf/types"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
)
// testStructuredImpl is a testing implementation of Structured event.
type testStructuredImpl struct {
*types.StringValue
}
var _ tracing.Structured = &testStructuredImpl{}
func (t *testStructuredImpl) String() string {
return fmt.Sprintf("structured=%s", t.Value)
}
func newTestStructured(s string) *testStructuredImpl {
return &testStructuredImpl{
&types.StringValue{Value: s},
}
}
// TestGRPCInterceptors verifies that the streaming and unary tracing
// interceptors work as advertised. We expect to see a span on the client side
// and a span on the server side.
func TestGRPCInterceptors(t *testing.T) {
defer leaktest.AfterTest(t)()
const (
magicValue = "magic-value"
)
checkForSpanAndReturnRecording := func(ctx context.Context) (*types.Any, error) {
sp := tracing.SpanFromContext(ctx)
if sp == nil {
return nil, errors.New("no span in ctx")
}
sp.RecordStructured(newTestStructured(magicValue))
recs := sp.GetRecording(tracingpb.RecordingVerbose)
if len(recs) != 1 {
return nil, errors.Newf("expected exactly one recorded span, not %+v", recs)
}
return types.MarshalAny(&recs[0])
}
impl := &grpcutils.TestServerImpl{
UU: func(ctx context.Context, any *types.Any) (*types.Any, error) {
return checkForSpanAndReturnRecording(ctx)
},
US: func(_ *types.Any, server grpcutils.GRPCTest_UnaryStreamServer) error {
any, err := checkForSpanAndReturnRecording(server.Context())
if err != nil {
return err
}
return server.Send(any)
},
SU: func(server grpcutils.GRPCTest_StreamUnaryServer) error {
_, err := server.Recv()
if err != nil {
return err
}
any, err := checkForSpanAndReturnRecording(server.Context())
if err != nil {
return err
}
return server.SendAndClose(any)
},
SS: func(server grpcutils.GRPCTest_StreamStreamServer) error {
_, err := server.Recv()
if err != nil {
return err
}
any, err := checkForSpanAndReturnRecording(server.Context())
if err != nil {
return err
}
return server.Send(any)
},
}
unusedAny, err := types.MarshalAny(&types.Empty{})
require.NoError(t, err)
for _, tc := range []struct {
name string
// expSpanName is the expected name of the RPC spans (client-side and
// server-side). If not specified, the test's name is used.
expSpanName string
do func(context.Context, grpcutils.GRPCTestClient) (*types.Any, error)
}{
{
name: "UnaryUnary",
do: func(ctx context.Context, c grpcutils.GRPCTestClient) (*types.Any, error) {
return c.UnaryUnary(ctx, unusedAny)
},
},
{
name: "UnaryStream",
do: func(ctx context.Context, c grpcutils.GRPCTestClient) (*types.Any, error) {
sc, err := c.UnaryStream(ctx, unusedAny)
if err != nil {
return nil, err
}
if err := sc.CloseSend(); err != nil {
return nil, err
}
var firstResponse *types.Any
// Consume the stream fully, as mandated by the gRPC API.
for {
any, err := sc.Recv()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
if firstResponse == nil {
firstResponse = any
}
}
return firstResponse, nil
},
},
{
// Test that cancelling the client's ctx finishes the client span. The
// client span is usually finished either when Recv() receives an error
// (e.g. when receiving an io.EOF after exhausting the stream). But the
// client is allowed to not read from the stream any more if it cancels
// the ctx.
name: "UnaryStream_ContextCancel",
expSpanName: "UnaryStream",
do: func(ctx context.Context, c grpcutils.GRPCTestClient) (*types.Any, error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
sc, err := c.UnaryStream(ctx, unusedAny)
if err != nil {
return nil, err
}
if err := sc.CloseSend(); err != nil {
return nil, err
}
return sc.Recv()
},
},
{
name: "StreamUnary",
do: func(ctx context.Context, c grpcutils.GRPCTestClient) (*types.Any, error) {
sc, err := c.StreamUnary(ctx)
if err != nil {
return nil, err
}
if err := sc.Send(unusedAny); err != nil {
return nil, err
}
return sc.CloseAndRecv()
},
},
{
name: "StreamStream",
do: func(ctx context.Context, c grpcutils.GRPCTestClient) (*types.Any, error) {
sc, err := c.StreamStream(ctx)
if err != nil {
return nil, err
}
if err := sc.Send(unusedAny); err != nil {
return nil, err
}
if err := sc.CloseSend(); err != nil {
return nil, err
}
var firstResponse *types.Any
// Consume the stream fully, as mandated by the gRPC API.
for {
any, err := sc.Recv()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
if firstResponse == nil {
firstResponse = any
}
}
return firstResponse, nil
},
},
} {
t.Run(tc.name, func(t *testing.T) {
bgCtx := context.Background()
s := stop.NewStopper()
defer s.Stop(bgCtx)
tr := tracing.NewTracer()
srv := grpc.NewServer(
grpc.UnaryInterceptor(grpcinterceptor.ServerInterceptor(tr)),
grpc.StreamInterceptor(grpcinterceptor.StreamServerInterceptor(tr)),
)
grpcutils.RegisterGRPCTestServer(srv, impl)
defer srv.GracefulStop()
ln, err := net.Listen(util.TestAddr.Network(), util.TestAddr.String())
require.NoError(t, err)
require.NoError(t, s.RunAsyncTask(bgCtx, "serve", func(ctx context.Context) {
if err := srv.Serve(ln); err != nil {
t.Error(err)
}
}))
conn, err := grpc.DialContext(bgCtx, ln.Addr().String(),
//lint:ignore SA1019 grpc.WithInsecure is deprecated
grpc.WithInsecure(),
grpc.WithUnaryInterceptor(grpcinterceptor.ClientInterceptor(tr, nil /* init */)),
grpc.WithStreamInterceptor(grpcinterceptor.StreamClientInterceptor(tr, nil /* init */)),
)
require.NoError(t, err)
defer func() {
_ = conn.Close() // nolint:grpcconnclose
}()
c := grpcutils.NewGRPCTestClient(conn)
require.NoError(t, err)
ctx, sp := tr.StartSpanCtx(bgCtx, "root", tracing.WithRecording(tracingpb.RecordingVerbose))
recAny, err := tc.do(ctx, c)
require.NoError(t, err)
var rec tracingpb.RecordedSpan
require.NoError(t, types.UnmarshalAny(recAny, &rec))
require.Len(t, rec.StructuredRecords, 1)
sp.ImportRemoteRecording([]tracingpb.RecordedSpan{rec})
var n int
finalRecs := sp.FinishAndGetRecording(tracingpb.RecordingVerbose)
for i := range finalRecs {
rec := &finalRecs[i]
n += len(rec.StructuredRecords)
// Remove all of the _unfinished tags. These crop up because
// in this test we are pulling the recorder in the handler impl,
// but the span is only closed in the interceptor. Additionally,
// this differs between the streaming and unary interceptor, and
// it's not worth having to have a separate expectation for each.
// Note that we check that we're not leaking spans at the end of
// the test.
anonymousTagGroup := rec.FindTagGroup(tracingpb.AnonymousTagGroupName)
if anonymousTagGroup == nil {
continue
}
filteredAnonymousTagGroup := make([]tracingpb.Tag, 0)
for _, tag := range anonymousTagGroup.Tags {
if tag.Key == "_unfinished" {
continue
}
if tag.Key == "_verbose" {
continue
}
filteredAnonymousTagGroup = append(filteredAnonymousTagGroup, tag)
}
anonymousTagGroup.Tags = filteredAnonymousTagGroup
}
require.Equal(t, 1, n)
expSpanName := tc.expSpanName
if expSpanName == "" {
expSpanName = tc.name
}
exp := fmt.Sprintf(`
span: root
span: /cockroach.testutils.grpcutils.GRPCTest/%[1]s
tags: span.kind=client
span: /cockroach.testutils.grpcutils.GRPCTest/%[1]s
tags: span.kind=server
event: structured=magic-value`, expSpanName)
require.NoError(t, tracing.CheckRecordedSpans(finalRecs, exp))
// Check that all the RPC spans (client-side and server-side) have been
// closed. SucceedsSoon because the closing of the span is async (although
// immediate) in the ctx cancellation subtest.
testutils.SucceedsSoon(t, func() error {
return tr.VisitSpans(func(sp tracing.RegistrySpan) error {
rec := sp.GetFullRecording(tracingpb.RecordingVerbose)[0]
return errors.Newf("leaked span: %s %s", rec.Operation, rec.TagGroups)
})
})
})
}
}
| pkg/util/tracing/grpcinterceptor/grpc_interceptor_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.0014946082374081016,
0.0002223659394076094,
0.0001594127097632736,
0.00017027115973178297,
0.00023398554185405374
] |
{
"id": 6,
"code_window": [
")\n",
"\n",
"// TraceRedactedMarker is used to replace logs that weren't redacted.\n",
"const TraceRedactedMarker = redact.RedactableString(\"verbose trace message redacted\")\n",
"\n",
"// redactRecordingForTenant redacts the sensitive parts of log messages in the\n",
"// recording if the tenant to which this recording is intended is not the system\n",
"// tenant (the system tenant gets an unredacted trace).\n",
"// See https://github.com/cockroachdb/cockroach/issues/70407.\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/server/node_tenant.go",
"type": "replace",
"edit_start_line_idx": 18
} | // Code generated by execgen; DO NOT EDIT.
// Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexecwindow
import (
"context"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/col/typeconv"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/errors"
)
// NewFirstValueOperator creates a new Operator that computes window
// function firstValue. outputColIdx specifies in which coldata.Vec the operator
// should put its output (if there is no such column, a new column is appended).
func NewFirstValueOperator(
args *WindowArgs,
frame *execinfrapb.WindowerSpec_Frame,
ordering *execinfrapb.Ordering,
argIdxs []int,
) (colexecop.ClosableOperator, error) {
framer := newWindowFramer(args.EvalCtx, frame, ordering, args.InputTypes, args.PeersColIdx)
colsToStore := framer.getColsToStore([]int{argIdxs[0]})
// Allow the direct-access buffer 10% of the available memory. The rest will
// be given to the bufferedWindowOp queue. While it is somewhat more important
// for the direct-access buffer tuples to be kept in-memory, it only has to
// store a single column. TODO(drewk): play around with benchmarks to find a
// good empirically-supported fraction to use.
bufferMemLimit := int64(float64(args.MemoryLimit) * 0.10)
mainMemLimit := args.MemoryLimit - bufferMemLimit
buffer := colexecutils.NewSpillingBuffer(
args.BufferAllocator, bufferMemLimit, args.QueueCfg,
args.FdSemaphore, args.InputTypes, args.DiskAcc, colsToStore...)
base := firstValueBase{
partitionSeekerBase: partitionSeekerBase{
buffer: buffer,
partitionColIdx: args.PartitionColIdx,
},
framer: framer,
outputColIdx: args.OutputColIdx,
bufferArgIdx: 0, // The arg column is the first column in the buffer.
}
argType := args.InputTypes[argIdxs[0]]
switch typeconv.TypeFamilyToCanonicalTypeFamily(argType.Family()) {
case types.BoolFamily:
switch argType.Width() {
case -1:
default:
windower := &firstValueBoolWindow{firstValueBase: base}
return newBufferedWindowOperator(args, windower, argType, mainMemLimit), nil
}
case types.BytesFamily:
switch argType.Width() {
case -1:
default:
windower := &firstValueBytesWindow{firstValueBase: base}
return newBufferedWindowOperator(args, windower, argType, mainMemLimit), nil
}
case types.DecimalFamily:
switch argType.Width() {
case -1:
default:
windower := &firstValueDecimalWindow{firstValueBase: base}
return newBufferedWindowOperator(args, windower, argType, mainMemLimit), nil
}
case types.IntFamily:
switch argType.Width() {
case 16:
windower := &firstValueInt16Window{firstValueBase: base}
return newBufferedWindowOperator(args, windower, argType, mainMemLimit), nil
case 32:
windower := &firstValueInt32Window{firstValueBase: base}
return newBufferedWindowOperator(args, windower, argType, mainMemLimit), nil
case -1:
default:
windower := &firstValueInt64Window{firstValueBase: base}
return newBufferedWindowOperator(args, windower, argType, mainMemLimit), nil
}
case types.FloatFamily:
switch argType.Width() {
case -1:
default:
windower := &firstValueFloat64Window{firstValueBase: base}
return newBufferedWindowOperator(args, windower, argType, mainMemLimit), nil
}
case types.TimestampTZFamily:
switch argType.Width() {
case -1:
default:
windower := &firstValueTimestampWindow{firstValueBase: base}
return newBufferedWindowOperator(args, windower, argType, mainMemLimit), nil
}
case types.IntervalFamily:
switch argType.Width() {
case -1:
default:
windower := &firstValueIntervalWindow{firstValueBase: base}
return newBufferedWindowOperator(args, windower, argType, mainMemLimit), nil
}
case types.JsonFamily:
switch argType.Width() {
case -1:
default:
windower := &firstValueJSONWindow{firstValueBase: base}
return newBufferedWindowOperator(args, windower, argType, mainMemLimit), nil
}
case typeconv.DatumVecCanonicalTypeFamily:
switch argType.Width() {
case -1:
default:
windower := &firstValueDatumWindow{firstValueBase: base}
return newBufferedWindowOperator(args, windower, argType, mainMemLimit), nil
}
}
return nil, errors.Errorf("unsupported firstValue window operator type %s", argType.Name())
}
type firstValueBase struct {
partitionSeekerBase
colexecop.CloserHelper
framer windowFramer
outputColIdx int
bufferArgIdx int
}
type firstValueBoolWindow struct {
firstValueBase
}
var _ bufferedWindower = &firstValueBoolWindow{}
// processBatch implements the bufferedWindower interface.
func (w *firstValueBoolWindow) processBatch(batch coldata.Batch, startIdx, endIdx int) {
if startIdx >= endIdx {
// No processing needs to be done for this portion of the current partition.
return
}
outputVec := batch.ColVec(w.outputColIdx)
outputCol := outputVec.Bool()
outputNulls := outputVec.Nulls()
_, _ = outputCol.Get(startIdx), outputCol.Get(endIdx-1)
for i := startIdx; i < endIdx; i++ {
w.framer.next(w.Ctx)
requestedIdx := w.framer.frameFirstIdx()
if requestedIdx == -1 {
// The requested row does not exist.
outputNulls.SetNull(i)
continue
}
vec, idx, _ := w.buffer.GetVecWithTuple(w.Ctx, w.bufferArgIdx, requestedIdx)
if vec.Nulls().MaybeHasNulls() && vec.Nulls().NullAt(idx) {
outputNulls.SetNull(i)
continue
}
col := vec.Bool()
val := col.Get(idx)
//gcassert:bce
outputCol.Set(i, val)
}
}
type firstValueBytesWindow struct {
firstValueBase
}
var _ bufferedWindower = &firstValueBytesWindow{}
// processBatch implements the bufferedWindower interface.
func (w *firstValueBytesWindow) processBatch(batch coldata.Batch, startIdx, endIdx int) {
if startIdx >= endIdx {
// No processing needs to be done for this portion of the current partition.
return
}
outputVec := batch.ColVec(w.outputColIdx)
outputCol := outputVec.Bytes()
outputNulls := outputVec.Nulls()
for i := startIdx; i < endIdx; i++ {
w.framer.next(w.Ctx)
requestedIdx := w.framer.frameFirstIdx()
if requestedIdx == -1 {
// The requested row does not exist.
outputNulls.SetNull(i)
continue
}
vec, idx, _ := w.buffer.GetVecWithTuple(w.Ctx, w.bufferArgIdx, requestedIdx)
if vec.Nulls().MaybeHasNulls() && vec.Nulls().NullAt(idx) {
outputNulls.SetNull(i)
continue
}
col := vec.Bytes()
outputCol.Copy(col, i, idx)
}
}
type firstValueDecimalWindow struct {
firstValueBase
}
var _ bufferedWindower = &firstValueDecimalWindow{}
// processBatch implements the bufferedWindower interface.
func (w *firstValueDecimalWindow) processBatch(batch coldata.Batch, startIdx, endIdx int) {
if startIdx >= endIdx {
// No processing needs to be done for this portion of the current partition.
return
}
outputVec := batch.ColVec(w.outputColIdx)
outputCol := outputVec.Decimal()
outputNulls := outputVec.Nulls()
_, _ = outputCol.Get(startIdx), outputCol.Get(endIdx-1)
for i := startIdx; i < endIdx; i++ {
w.framer.next(w.Ctx)
requestedIdx := w.framer.frameFirstIdx()
if requestedIdx == -1 {
// The requested row does not exist.
outputNulls.SetNull(i)
continue
}
vec, idx, _ := w.buffer.GetVecWithTuple(w.Ctx, w.bufferArgIdx, requestedIdx)
if vec.Nulls().MaybeHasNulls() && vec.Nulls().NullAt(idx) {
outputNulls.SetNull(i)
continue
}
col := vec.Decimal()
val := col.Get(idx)
//gcassert:bce
outputCol.Set(i, val)
}
}
type firstValueInt16Window struct {
firstValueBase
}
var _ bufferedWindower = &firstValueInt16Window{}
// processBatch implements the bufferedWindower interface.
func (w *firstValueInt16Window) processBatch(batch coldata.Batch, startIdx, endIdx int) {
if startIdx >= endIdx {
// No processing needs to be done for this portion of the current partition.
return
}
outputVec := batch.ColVec(w.outputColIdx)
outputCol := outputVec.Int16()
outputNulls := outputVec.Nulls()
_, _ = outputCol.Get(startIdx), outputCol.Get(endIdx-1)
for i := startIdx; i < endIdx; i++ {
w.framer.next(w.Ctx)
requestedIdx := w.framer.frameFirstIdx()
if requestedIdx == -1 {
// The requested row does not exist.
outputNulls.SetNull(i)
continue
}
vec, idx, _ := w.buffer.GetVecWithTuple(w.Ctx, w.bufferArgIdx, requestedIdx)
if vec.Nulls().MaybeHasNulls() && vec.Nulls().NullAt(idx) {
outputNulls.SetNull(i)
continue
}
col := vec.Int16()
val := col.Get(idx)
//gcassert:bce
outputCol.Set(i, val)
}
}
type firstValueInt32Window struct {
firstValueBase
}
var _ bufferedWindower = &firstValueInt32Window{}
// processBatch implements the bufferedWindower interface.
func (w *firstValueInt32Window) processBatch(batch coldata.Batch, startIdx, endIdx int) {
if startIdx >= endIdx {
// No processing needs to be done for this portion of the current partition.
return
}
outputVec := batch.ColVec(w.outputColIdx)
outputCol := outputVec.Int32()
outputNulls := outputVec.Nulls()
_, _ = outputCol.Get(startIdx), outputCol.Get(endIdx-1)
for i := startIdx; i < endIdx; i++ {
w.framer.next(w.Ctx)
requestedIdx := w.framer.frameFirstIdx()
if requestedIdx == -1 {
// The requested row does not exist.
outputNulls.SetNull(i)
continue
}
vec, idx, _ := w.buffer.GetVecWithTuple(w.Ctx, w.bufferArgIdx, requestedIdx)
if vec.Nulls().MaybeHasNulls() && vec.Nulls().NullAt(idx) {
outputNulls.SetNull(i)
continue
}
col := vec.Int32()
val := col.Get(idx)
//gcassert:bce
outputCol.Set(i, val)
}
}
type firstValueInt64Window struct {
firstValueBase
}
var _ bufferedWindower = &firstValueInt64Window{}
// processBatch implements the bufferedWindower interface.
func (w *firstValueInt64Window) processBatch(batch coldata.Batch, startIdx, endIdx int) {
if startIdx >= endIdx {
// No processing needs to be done for this portion of the current partition.
return
}
outputVec := batch.ColVec(w.outputColIdx)
outputCol := outputVec.Int64()
outputNulls := outputVec.Nulls()
_, _ = outputCol.Get(startIdx), outputCol.Get(endIdx-1)
for i := startIdx; i < endIdx; i++ {
w.framer.next(w.Ctx)
requestedIdx := w.framer.frameFirstIdx()
if requestedIdx == -1 {
// The requested row does not exist.
outputNulls.SetNull(i)
continue
}
vec, idx, _ := w.buffer.GetVecWithTuple(w.Ctx, w.bufferArgIdx, requestedIdx)
if vec.Nulls().MaybeHasNulls() && vec.Nulls().NullAt(idx) {
outputNulls.SetNull(i)
continue
}
col := vec.Int64()
val := col.Get(idx)
//gcassert:bce
outputCol.Set(i, val)
}
}
type firstValueFloat64Window struct {
firstValueBase
}
var _ bufferedWindower = &firstValueFloat64Window{}
// processBatch implements the bufferedWindower interface.
func (w *firstValueFloat64Window) processBatch(batch coldata.Batch, startIdx, endIdx int) {
if startIdx >= endIdx {
// No processing needs to be done for this portion of the current partition.
return
}
outputVec := batch.ColVec(w.outputColIdx)
outputCol := outputVec.Float64()
outputNulls := outputVec.Nulls()
_, _ = outputCol.Get(startIdx), outputCol.Get(endIdx-1)
for i := startIdx; i < endIdx; i++ {
w.framer.next(w.Ctx)
requestedIdx := w.framer.frameFirstIdx()
if requestedIdx == -1 {
// The requested row does not exist.
outputNulls.SetNull(i)
continue
}
vec, idx, _ := w.buffer.GetVecWithTuple(w.Ctx, w.bufferArgIdx, requestedIdx)
if vec.Nulls().MaybeHasNulls() && vec.Nulls().NullAt(idx) {
outputNulls.SetNull(i)
continue
}
col := vec.Float64()
val := col.Get(idx)
//gcassert:bce
outputCol.Set(i, val)
}
}
type firstValueTimestampWindow struct {
firstValueBase
}
var _ bufferedWindower = &firstValueTimestampWindow{}
// processBatch implements the bufferedWindower interface.
func (w *firstValueTimestampWindow) processBatch(batch coldata.Batch, startIdx, endIdx int) {
if startIdx >= endIdx {
// No processing needs to be done for this portion of the current partition.
return
}
outputVec := batch.ColVec(w.outputColIdx)
outputCol := outputVec.Timestamp()
outputNulls := outputVec.Nulls()
_, _ = outputCol.Get(startIdx), outputCol.Get(endIdx-1)
for i := startIdx; i < endIdx; i++ {
w.framer.next(w.Ctx)
requestedIdx := w.framer.frameFirstIdx()
if requestedIdx == -1 {
// The requested row does not exist.
outputNulls.SetNull(i)
continue
}
vec, idx, _ := w.buffer.GetVecWithTuple(w.Ctx, w.bufferArgIdx, requestedIdx)
if vec.Nulls().MaybeHasNulls() && vec.Nulls().NullAt(idx) {
outputNulls.SetNull(i)
continue
}
col := vec.Timestamp()
val := col.Get(idx)
//gcassert:bce
outputCol.Set(i, val)
}
}
type firstValueIntervalWindow struct {
firstValueBase
}
var _ bufferedWindower = &firstValueIntervalWindow{}
// processBatch implements the bufferedWindower interface.
func (w *firstValueIntervalWindow) processBatch(batch coldata.Batch, startIdx, endIdx int) {
if startIdx >= endIdx {
// No processing needs to be done for this portion of the current partition.
return
}
outputVec := batch.ColVec(w.outputColIdx)
outputCol := outputVec.Interval()
outputNulls := outputVec.Nulls()
_, _ = outputCol.Get(startIdx), outputCol.Get(endIdx-1)
for i := startIdx; i < endIdx; i++ {
w.framer.next(w.Ctx)
requestedIdx := w.framer.frameFirstIdx()
if requestedIdx == -1 {
// The requested row does not exist.
outputNulls.SetNull(i)
continue
}
vec, idx, _ := w.buffer.GetVecWithTuple(w.Ctx, w.bufferArgIdx, requestedIdx)
if vec.Nulls().MaybeHasNulls() && vec.Nulls().NullAt(idx) {
outputNulls.SetNull(i)
continue
}
col := vec.Interval()
val := col.Get(idx)
//gcassert:bce
outputCol.Set(i, val)
}
}
type firstValueJSONWindow struct {
firstValueBase
}
var _ bufferedWindower = &firstValueJSONWindow{}
// processBatch implements the bufferedWindower interface.
func (w *firstValueJSONWindow) processBatch(batch coldata.Batch, startIdx, endIdx int) {
if startIdx >= endIdx {
// No processing needs to be done for this portion of the current partition.
return
}
outputVec := batch.ColVec(w.outputColIdx)
outputCol := outputVec.JSON()
outputNulls := outputVec.Nulls()
for i := startIdx; i < endIdx; i++ {
w.framer.next(w.Ctx)
requestedIdx := w.framer.frameFirstIdx()
if requestedIdx == -1 {
// The requested row does not exist.
outputNulls.SetNull(i)
continue
}
vec, idx, _ := w.buffer.GetVecWithTuple(w.Ctx, w.bufferArgIdx, requestedIdx)
if vec.Nulls().MaybeHasNulls() && vec.Nulls().NullAt(idx) {
outputNulls.SetNull(i)
continue
}
col := vec.JSON()
outputCol.Copy(col, i, idx)
}
}
type firstValueDatumWindow struct {
firstValueBase
}
var _ bufferedWindower = &firstValueDatumWindow{}
// processBatch implements the bufferedWindower interface.
func (w *firstValueDatumWindow) processBatch(batch coldata.Batch, startIdx, endIdx int) {
if startIdx >= endIdx {
// No processing needs to be done for this portion of the current partition.
return
}
outputVec := batch.ColVec(w.outputColIdx)
outputCol := outputVec.Datum()
outputNulls := outputVec.Nulls()
for i := startIdx; i < endIdx; i++ {
w.framer.next(w.Ctx)
requestedIdx := w.framer.frameFirstIdx()
if requestedIdx == -1 {
// The requested row does not exist.
outputNulls.SetNull(i)
continue
}
vec, idx, _ := w.buffer.GetVecWithTuple(w.Ctx, w.bufferArgIdx, requestedIdx)
if vec.Nulls().MaybeHasNulls() && vec.Nulls().NullAt(idx) {
outputNulls.SetNull(i)
continue
}
col := vec.Datum()
val := col.Get(idx)
outputCol.Set(i, val)
}
}
// transitionToProcessing implements the bufferedWindower interface.
func (b *firstValueBase) transitionToProcessing() {
b.framer.startPartition(b.Ctx, b.partitionSize, b.buffer)
}
// startNewPartition implements the bufferedWindower interface.
func (b *firstValueBase) startNewPartition() {
b.partitionSize = 0
b.buffer.Reset(b.Ctx)
}
// Init implements the bufferedWindower interface.
func (b *firstValueBase) Init(ctx context.Context) {
if !b.InitHelper.Init(ctx) {
return
}
}
// Close implements the bufferedWindower interface.
func (b *firstValueBase) Close(ctx context.Context) {
if !b.CloserHelper.Close() {
return
}
b.buffer.Close(ctx)
}
| pkg/sql/colexec/colexecwindow/first_value.eg.go | 0 | https://github.com/cockroachdb/cockroach/commit/f89261dd76554360960fbb91788d9a541ae80ec3 | [
0.00019875589350704104,
0.0001707297924440354,
0.00016459973994642496,
0.00016891254927031696,
0.000006510952516691759
] |
{
"id": 0,
"code_window": [
"\t\"github.com/rclone/rclone/fs/config/configmap\"\n",
"\t\"github.com/rclone/rclone/fs/config/configstruct\"\n",
"\t\"github.com/rclone/rclone/fs/fshttp\"\n",
"\t\"github.com/rclone/rclone/fs/hash\"\n",
"\t\"github.com/rclone/rclone/fs/walk\"\n",
"\tqsConfig \"github.com/yunify/qingstor-sdk-go/config\"\n",
"\tqsErr \"github.com/yunify/qingstor-sdk-go/request/errors\"\n",
"\tqs \"github.com/yunify/qingstor-sdk-go/service\"\n",
")\n",
"\n",
"// Register with Fs\n",
"func init() {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tqsConfig \"github.com/yunify/qingstor-sdk-go/v3/config\"\n",
"\tqsErr \"github.com/yunify/qingstor-sdk-go/v3/request/errors\"\n",
"\tqs \"github.com/yunify/qingstor-sdk-go/v3/service\"\n"
],
"file_path": "backend/qingstor/qingstor.go",
"type": "replace",
"edit_start_line_idx": 26
} | // Package qingstor provides an interface to QingStor object storage
// Home: https://www.qingcloud.com/
// +build !plan9
package qingstor
import (
"context"
"fmt"
"io"
"net/http"
"path"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
qsConfig "github.com/yunify/qingstor-sdk-go/config"
qsErr "github.com/yunify/qingstor-sdk-go/request/errors"
qs "github.com/yunify/qingstor-sdk-go/service"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "qingstor",
Description: "QingCloud Object Storage",
NewFs: NewFs,
Options: []fs.Option{{
Name: "env_auth",
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
Default: false,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter QingStor credentials in the next step",
}, {
Value: "true",
Help: "Get QingStor credentials from the environment (env vars or IAM)",
}},
}, {
Name: "access_key_id",
Help: "QingStor Access Key ID\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "secret_access_key",
Help: "QingStor Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "endpoint",
Help: "Enter a endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"",
}, {
Name: "zone",
Help: "Zone to connect to.\nDefault is \"pek3a\".",
Examples: []fs.OptionExample{{
Value: "pek3a",
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
}, {
Value: "sh1a",
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
}, {
Value: "gd2a",
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
}},
}, {
Name: "connection_retries",
Help: "Number of connection retries.",
Default: 3,
Advanced: true,
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5GB.`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff they will be uploaded
as multipart uploads using this chunk size.
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high speed links and you have
enough memory, then increasing this will speed up the transfers.`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
NB if you set this to > 1 then the checksums of multpart uploads
become corrupted (the uploads themselves are not corrupted though).
If you are uploading small numbers of large file over high speed link
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: 1,
Advanced: true,
}},
})
}
// Constants
const (
listLimitSize = 1000 // Number of items to read at once
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
minChunkSize = fs.SizeSuffix(minMultiPartSize)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
)
// Globals
func timestampToTime(tp int64) time.Time {
timeLayout := time.RFC3339Nano
ts := time.Unix(tp, 0).Format(timeLayout)
tm, _ := time.Parse(timeLayout, ts)
return tm.UTC()
}
// Options defines the configuration for this backend
type Options struct {
EnvAuth bool `config:"env_auth"`
AccessKeyID string `config:"access_key_id"`
SecretAccessKey string `config:"secret_access_key"`
Endpoint string `config:"endpoint"`
Zone string `config:"zone"`
ConnectionRetries int `config:"connection_retries"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
}
// Fs represents a remote qingstor server
type Fs struct {
name string // The name of the remote
root string // The root is a subdir, is a special object
opt Options // parsed options
features *fs.Features // optional features
svc *qs.Service // The connection to the qingstor server
zone string // The zone we are working on
bucket string // The bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucketOK and bucketDeleted
bucketOK bool // true if we have created the bucket
bucketDeleted bool // true if we have deleted the bucket
}
// Object describes a qingstor object
type Object struct {
// Will definitely have everything but meta which may be nil
//
// List will read everything but meta & mimeType - to fill
// that in you need to call readMetaData
fs *Fs // what this object is part of
remote string // object of remote
etag string // md5sum of the object
size int64 // length of the object content
mimeType string // ContentType of object - may be ""
lastModified time.Time // Last modified
encrypted bool // whether the object is encryption
algo string // Custom encryption algorithms
}
// ------------------------------------------------------------
// Pattern to match a qingstor path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
// parseParse parses a qingstor 'url'
func qsParsePath(path string) (bucket, key string, err error) {
// Pattern to match a qingstor path
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("Couldn't parse bucket out of qingstor path %q", path)
} else {
bucket, key = parts[1], parts[2]
key = strings.Trim(key, "/")
}
return
}
// Split an URL into three parts: protocol host and port
func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
/*
Pattern to match a endpoint,
eg: "http(s)://qingstor.com:443" --> "http(s)", "qingstor.com", 443
"http(s)//qingstor.com" --> "http(s)", "qingstor.com", ""
"qingstor.com" --> "", "qingstor.com", ""
*/
defer func() {
if r := recover(); r != nil {
switch x := r.(type) {
case error:
err = x
default:
err = nil
}
}
}()
var mather = regexp.MustCompile(`^(?:(http|https)://)*(\w+\.(?:[\w\.])*)(?::(\d{0,5}))*$`)
parts := mather.FindStringSubmatch(endpoint)
protocol, host, port = parts[1], parts[2], parts[3]
return
}
// qsConnection makes a connection to qingstor
func qsServiceConnection(opt *Options) (*qs.Service, error) {
accessKeyID := opt.AccessKeyID
secretAccessKey := opt.SecretAccessKey
switch {
case opt.EnvAuth:
// No need for empty checks if "env_auth" is true
case accessKeyID == "" && secretAccessKey == "":
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
case accessKeyID == "":
return nil, errors.New("access_key_id not found")
case secretAccessKey == "":
return nil, errors.New("secret_access_key not found")
}
protocol := "https"
host := "qingstor.com"
port := 443
endpoint := opt.Endpoint
if endpoint != "" {
_protocol, _host, _port, err := qsParseEndpoint(endpoint)
if err != nil {
return nil, fmt.Errorf("The endpoint \"%s\" format error", endpoint)
}
if _protocol != "" {
protocol = _protocol
}
host = _host
if _port != "" {
port, _ = strconv.Atoi(_port)
} else if protocol == "http" {
port = 80
}
}
cf, err := qsConfig.NewDefault()
if err != nil {
return nil, err
}
cf.AccessKeyID = accessKeyID
cf.SecretAccessKey = secretAccessKey
cf.Protocol = protocol
cf.Host = host
cf.Port = port
cf.ConnectionRetries = opt.ConnectionRetries
cf.Connection = fshttp.NewClient(fs.Config)
return qs.Init(cf)
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "qingstor: chunk size")
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "qingstor: upload cutoff")
}
bucket, key, err := qsParsePath(root)
if err != nil {
return nil, err
}
svc, err := qsServiceConnection(opt)
if err != nil {
return nil, err
}
if opt.Zone == "" {
opt.Zone = "pek3a"
}
f := &Fs{
name: name,
root: key,
opt: *opt,
svc: svc,
zone: opt.Zone,
bucket: bucket,
}
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
}).Fill(f)
if f.root != "" {
if !strings.HasSuffix(f.root, "/") {
f.root += "/"
}
//Check to see if the object exists
bucketInit, err := svc.Bucket(bucket, opt.Zone)
if err != nil {
return nil, err
}
_, err = bucketInit.HeadObject(key, &qs.HeadObjectInput{})
if err == nil {
f.root = path.Dir(key)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.root == "" {
return fmt.Sprintf("QingStor bucket %s", f.bucket)
}
return fmt.Sprintf("QingStor bucket %s root %s", f.bucket, f.root)
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
//return time.Nanosecond
//Not supported temporary
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
//return hash.HashSet(hash.HashNone)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Put created a new object
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
fsObj := &Object{
fs: f,
remote: src.Remote(),
}
return fsObj, fsObj.Update(ctx, in, src, options...)
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
err := f.Mkdir(ctx, "")
if err != nil {
return nil, err
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcFs := srcObj.fs
key := f.root + remote
source := path.Join("/"+srcFs.bucket, srcFs.root+srcObj.remote)
fs.Debugf(f, "Copied, source key is: %s, and dst key is: %s", source, key)
req := qs.PutObjectInput{
XQSCopySource: &source,
}
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return nil, err
}
_, err = bucketInit.PutObject(key, &req)
if err != nil {
fs.Debugf(f, "Copy Failed, API Error: %v", err)
return nil, err
}
return f.NewObject(ctx, remote)
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// Return an Object from a path
//
//If it can't be found it returns the error ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, info *qs.KeyType) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
// Set info
if info.Size != nil {
o.size = *info.Size
}
if info.Etag != nil {
o.etag = qs.StringValue(info.Etag)
}
if info.Modified == nil {
fs.Logf(o, "Failed to read last modified")
o.lastModified = time.Now()
} else {
o.lastModified = timestampToTime(int64(*info.Modified))
}
if info.MimeType != nil {
o.mimeType = qs.StringValue(info.MimeType)
}
if info.Encrypted != nil {
o.encrypted = qs.BoolValue(info.Encrypted)
}
} else {
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// listFn is called from list to handle an object.
type listFn func(remote string, object *qs.KeyType, isDirectory bool) error
// list the objects into the function supplied
//
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) error {
prefix := f.root
if dir != "" {
prefix += dir + "/"
}
delimiter := ""
if !recurse {
delimiter = "/"
}
maxLimit := int(listLimitSize)
var marker *string
for {
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return err
}
// FIXME need to implement ALL loop
req := qs.ListObjectsInput{
Delimiter: &delimiter,
Prefix: &prefix,
Limit: &maxLimit,
Marker: marker,
}
resp, err := bucketInit.ListObjects(&req)
if err != nil {
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusNotFound {
err = fs.ErrorDirNotFound
}
}
return err
}
rootLength := len(f.root)
if !recurse {
for _, commonPrefix := range resp.CommonPrefixes {
if commonPrefix == nil {
fs.Logf(f, "Nil common prefix received")
continue
}
remote := *commonPrefix
if !strings.HasPrefix(remote, f.root) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[rootLength:]
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
err = fn(remote, &qs.KeyType{Key: &remote}, true)
if err != nil {
return err
}
}
}
for _, object := range resp.Keys {
key := qs.StringValue(object.Key)
if !strings.HasPrefix(key, f.root) {
fs.Logf(f, "Odd name received %q", key)
continue
}
remote := key[rootLength:]
err = fn(remote, object, false)
if err != nil {
return err
}
}
// Use NextMarker if set, otherwise use last Key
if resp.NextMarker == nil || *resp.NextMarker == "" {
//marker = resp.Keys[len(resp.Keys)-1].Key
break
} else {
marker = resp.NextMarker
}
}
return nil
}
// Convert a list item into a BasicInfo
func (f *Fs) itemToDirEntry(remote string, object *qs.KeyType, isDirectory bool) (fs.DirEntry, error) {
if isDirectory {
size := int64(0)
if object.Size != nil {
size = *object.Size
}
d := fs.NewDir(remote, time.Time{}).SetSize(size)
return d, nil
}
o, err := f.newObjectWithInfo(remote, object)
if err != nil {
return nil, err
}
return o, nil
}
// mark the bucket as being OK
func (f *Fs) markBucketOK() {
if f.bucket != "" {
f.bucketOKMu.Lock()
f.bucketOK = true
f.bucketDeleted = false
f.bucketOKMu.Unlock()
}
}
// listDir lists files and directories to out
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// List the objects and directories
err = f.list(ctx, dir, false, func(remote string, object *qs.KeyType, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
if entry != nil {
entries = append(entries, entry)
}
return nil
})
if err != nil {
return nil, err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return entries, nil
}
// listBuckets lists the buckets to out
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
req := qs.ListBucketsInput{
Location: &f.zone,
}
resp, err := f.svc.ListBuckets(&req)
if err != nil {
return nil, err
}
for _, bucket := range resp.Buckets {
d := fs.NewDir(qs.StringValue(bucket.Name), qs.TimeValue(bucket.Created))
entries = append(entries, d)
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.bucket == "" {
return f.listBuckets(dir)
}
return f.listDir(ctx, dir)
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
err = f.list(ctx, dir, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return list.Flush()
}
// Check if the bucket exists
func (f *Fs) dirExists() (bool, error) {
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return false, err
}
_, err = bucketInit.Head()
if err == nil {
return true, nil
}
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusNotFound {
err = nil
}
}
return false, err
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.bucketOK {
return nil
}
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return err
}
/* When delete a bucket, qingstor need about 60 second to sync status;
So, need wait for it sync end if we try to operation a just deleted bucket
*/
retries := 0
for retries <= 120 {
statistics, err := bucketInit.GetStatistics()
if statistics == nil || err != nil {
break
}
switch *statistics.Status {
case "deleted":
fs.Debugf(f, "Wait for qingstor sync bucket status, retries: %d", retries)
time.Sleep(time.Second * 1)
retries++
continue
default:
break
}
break
}
if !f.bucketDeleted {
exists, err := f.dirExists()
if err == nil {
f.bucketOK = exists
}
if err != nil || exists {
return err
}
}
_, err = bucketInit.Put()
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusConflict {
err = nil
}
}
if err == nil {
f.bucketOK = true
f.bucketDeleted = false
}
return err
}
// dirIsEmpty check if the bucket empty
func (f *Fs) dirIsEmpty() (bool, error) {
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return true, err
}
statistics, err := bucketInit.GetStatistics()
if err != nil {
return true, err
}
if *statistics.Count == 0 {
return true, nil
}
return false, nil
}
// Rmdir delete a bucket
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" {
return nil
}
isEmpty, err := f.dirIsEmpty()
if err != nil {
return err
}
if !isEmpty {
fs.Debugf(f, "The bucket %s you tried to delete not empty.", f.bucket)
return errors.New("BucketNotEmpty: The bucket you tried to delete is not empty")
}
fs.Debugf(f, "Tried to delete the bucket %s", f.bucket)
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return err
}
retries := 0
for retries <= 10 {
_, delErr := bucketInit.Delete()
if delErr != nil {
if e, ok := delErr.(*qsErr.QingStorError); ok {
switch e.Code {
// The status of "lease" takes a few seconds to "ready" when creating a new bucket
// wait for lease status ready
case "lease_not_ready":
fs.Debugf(f, "QingStor bucket lease not ready, retries: %d", retries)
retries++
time.Sleep(time.Second * 1)
continue
default:
err = e
break
}
}
} else {
err = delErr
}
break
}
if err == nil {
f.bucketOK = false
f.bucketDeleted = true
}
return err
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData() (err error) {
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return err
}
key := o.fs.root + o.remote
fs.Debugf(o, "Read metadata of key: %s", key)
resp, err := bucketInit.HeadObject(key, &qs.HeadObjectInput{})
if err != nil {
fs.Debugf(o, "Read metadata failed, API Error: %v", err)
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
}
// Ignore missing Content-Length assuming it is 0
if resp.ContentLength != nil {
o.size = *resp.ContentLength
}
if resp.ETag != nil {
o.etag = qs.StringValue(resp.ETag)
}
if resp.LastModified == nil {
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
o.lastModified = time.Now()
} else {
o.lastModified = *resp.LastModified
}
if resp.ContentType != nil {
o.mimeType = qs.StringValue(resp.ContentType)
}
if resp.XQSEncryptionCustomerAlgorithm != nil {
o.algo = qs.StringValue(resp.XQSEncryptionCustomerAlgorithm)
o.encrypted = true
}
return nil
}
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata, %v", err)
return time.Now()
}
modTime := o.lastModified
return modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
err := o.readMetaData()
if err != nil {
return err
}
o.lastModified = modTime
mimeType := fs.MimeType(ctx, o)
if o.size >= maxSizeForCopy {
fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
return nil
}
// Copy the object to itself to update the metadata
key := o.fs.root + o.remote
sourceKey := path.Join("/", o.fs.bucket, key)
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return err
}
req := qs.PutObjectInput{
XQSCopySource: &sourceKey,
ContentType: &mimeType,
}
_, err = bucketInit.PutObject(key, &req)
return err
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return nil, err
}
key := o.fs.root + o.remote
req := qs.GetObjectInput{}
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch option.(type) {
case *fs.RangeOption, *fs.SeekOption:
_, value := option.Header()
req.Range = &value
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
resp, err := bucketInit.GetObject(key, &req)
if err != nil {
return nil, err
}
return resp.Body, nil
}
// Update in to the object
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
// The maximum size of upload object is multipartUploadSize * MaxMultipleParts
err := o.fs.Mkdir(ctx, "")
if err != nil {
return err
}
key := o.fs.root + o.remote
// Guess the content type
mimeType := fs.MimeType(ctx, src)
req := uploadInput{
body: in,
qsSvc: o.fs.svc,
bucket: o.fs.bucket,
zone: o.fs.zone,
key: key,
mimeType: mimeType,
partSize: int64(o.fs.opt.ChunkSize),
concurrency: o.fs.opt.UploadConcurrency,
}
uploader := newUploader(&req)
size := src.Size()
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
if multipart {
err = uploader.upload()
} else {
err = uploader.singlePartUpload(in, size)
}
if err != nil {
return err
}
// Read Metadata of object
err = o.readMetaData()
return err
}
// Remove this object
func (o *Object) Remove(ctx context.Context) error {
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return err
}
key := o.fs.root + o.remote
_, err = bucketInit.DeleteObject(key)
return err
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.fs
}
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
etag := strings.Trim(strings.ToLower(o.etag), `"`)
// Check the etag is a valid md5sum
if !matchMd5.MatchString(etag) {
fs.Debugf(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
return "", nil
}
return etag, nil
}
// Storable says whether this object can be stored
func (o *Object) Storable() bool {
return true
}
// String returns a description of the Object
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return o.size
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return ""
}
return o.mimeType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.Object = &Object{}
_ fs.ListRer = &Fs{}
_ fs.MimeTyper = &Object{}
)
| backend/qingstor/qingstor.go | 1 | https://github.com/rclone/rclone/commit/c2050172aa3b1e9c2a323cbd98b4df0d66450360 | [
0.7901530861854553,
0.007546928711235523,
0.00016074188170023263,
0.00017021610983647406,
0.07496470957994461
] |
{
"id": 0,
"code_window": [
"\t\"github.com/rclone/rclone/fs/config/configmap\"\n",
"\t\"github.com/rclone/rclone/fs/config/configstruct\"\n",
"\t\"github.com/rclone/rclone/fs/fshttp\"\n",
"\t\"github.com/rclone/rclone/fs/hash\"\n",
"\t\"github.com/rclone/rclone/fs/walk\"\n",
"\tqsConfig \"github.com/yunify/qingstor-sdk-go/config\"\n",
"\tqsErr \"github.com/yunify/qingstor-sdk-go/request/errors\"\n",
"\tqs \"github.com/yunify/qingstor-sdk-go/service\"\n",
")\n",
"\n",
"// Register with Fs\n",
"func init() {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tqsConfig \"github.com/yunify/qingstor-sdk-go/v3/config\"\n",
"\tqsErr \"github.com/yunify/qingstor-sdk-go/v3/request/errors\"\n",
"\tqs \"github.com/yunify/qingstor-sdk-go/v3/service\"\n"
],
"file_path": "backend/qingstor/qingstor.go",
"type": "replace",
"edit_start_line_idx": 26
} | module github.com/ncw/swift
| vendor/github.com/ncw/swift/go.mod | 0 | https://github.com/rclone/rclone/commit/c2050172aa3b1e9c2a323cbd98b4df0d66450360 | [
0.00017221501911990345,
0.00017221501911990345,
0.00017221501911990345,
0.00017221501911990345,
0
] |
{
"id": 0,
"code_window": [
"\t\"github.com/rclone/rclone/fs/config/configmap\"\n",
"\t\"github.com/rclone/rclone/fs/config/configstruct\"\n",
"\t\"github.com/rclone/rclone/fs/fshttp\"\n",
"\t\"github.com/rclone/rclone/fs/hash\"\n",
"\t\"github.com/rclone/rclone/fs/walk\"\n",
"\tqsConfig \"github.com/yunify/qingstor-sdk-go/config\"\n",
"\tqsErr \"github.com/yunify/qingstor-sdk-go/request/errors\"\n",
"\tqs \"github.com/yunify/qingstor-sdk-go/service\"\n",
")\n",
"\n",
"// Register with Fs\n",
"func init() {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tqsConfig \"github.com/yunify/qingstor-sdk-go/v3/config\"\n",
"\tqsErr \"github.com/yunify/qingstor-sdk-go/v3/request/errors\"\n",
"\tqs \"github.com/yunify/qingstor-sdk-go/v3/service\"\n"
],
"file_path": "backend/qingstor/qingstor.go",
"type": "replace",
"edit_start_line_idx": 26
} | package fuse
| vendor/bazil.org/fuse/fuse_kernel_std.go | 0 | https://github.com/rclone/rclone/commit/c2050172aa3b1e9c2a323cbd98b4df0d66450360 | [
0.0001725226902635768,
0.0001725226902635768,
0.0001725226902635768,
0.0001725226902635768,
0
] |
{
"id": 0,
"code_window": [
"\t\"github.com/rclone/rclone/fs/config/configmap\"\n",
"\t\"github.com/rclone/rclone/fs/config/configstruct\"\n",
"\t\"github.com/rclone/rclone/fs/fshttp\"\n",
"\t\"github.com/rclone/rclone/fs/hash\"\n",
"\t\"github.com/rclone/rclone/fs/walk\"\n",
"\tqsConfig \"github.com/yunify/qingstor-sdk-go/config\"\n",
"\tqsErr \"github.com/yunify/qingstor-sdk-go/request/errors\"\n",
"\tqs \"github.com/yunify/qingstor-sdk-go/service\"\n",
")\n",
"\n",
"// Register with Fs\n",
"func init() {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tqsConfig \"github.com/yunify/qingstor-sdk-go/v3/config\"\n",
"\tqsErr \"github.com/yunify/qingstor-sdk-go/v3/request/errors\"\n",
"\tqs \"github.com/yunify/qingstor-sdk-go/v3/service\"\n"
],
"file_path": "backend/qingstor/qingstor.go",
"type": "replace",
"edit_start_line_idx": 26
} | package serve
import (
"fmt"
"html/template"
"net/http"
"net/url"
"path"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/rest"
)
// DirEntry is a directory entry
type DirEntry struct {
remote string
URL string
Leaf string
}
// Directory represents a directory
type Directory struct {
DirRemote string
Title string
Entries []DirEntry
Query string
HTMLTemplate *template.Template
}
// NewDirectory makes an empty Directory
func NewDirectory(dirRemote string, htmlTemplate *template.Template) *Directory {
d := &Directory{
DirRemote: dirRemote,
Title: fmt.Sprintf("Directory listing of /%s", dirRemote),
HTMLTemplate: htmlTemplate,
}
return d
}
// SetQuery sets the query parameters for each URL
func (d *Directory) SetQuery(queryParams url.Values) *Directory {
d.Query = ""
if len(queryParams) > 0 {
d.Query = "?" + queryParams.Encode()
}
return d
}
// AddEntry adds an entry to that directory
func (d *Directory) AddEntry(remote string, isDir bool) {
leaf := path.Base(remote)
if leaf == "." {
leaf = ""
}
urlRemote := leaf
if isDir {
leaf += "/"
urlRemote += "/"
}
d.Entries = append(d.Entries, DirEntry{
remote: remote,
URL: rest.URLPathEscape(urlRemote) + d.Query,
Leaf: leaf,
})
}
// Error returns an http.StatusInternalServerError and logs the error
func Error(what interface{}, w http.ResponseWriter, text string, err error) {
fs.CountError(err)
fs.Errorf(what, "%s: %v", text, err)
http.Error(w, text+".", http.StatusInternalServerError)
}
// Serve serves a directory
func (d *Directory) Serve(w http.ResponseWriter, r *http.Request) {
// Account the transfer
tr := accounting.Stats(r.Context()).NewTransferRemoteSize(d.DirRemote, -1)
defer tr.Done(nil)
fs.Infof(d.DirRemote, "%s: Serving directory", r.RemoteAddr)
err := d.HTMLTemplate.Execute(w, d)
if err != nil {
Error(d.DirRemote, w, "Failed to render template", err)
return
}
}
| cmd/serve/httplib/serve/dir.go | 0 | https://github.com/rclone/rclone/commit/c2050172aa3b1e9c2a323cbd98b4df0d66450360 | [
0.00048725458327680826,
0.00021635265147779137,
0.00016819522716104984,
0.000170752260601148,
0.00009934211266227067
] |
{
"id": 1,
"code_window": [
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t}\n",
"\t\t// Use NextMarker if set, otherwise use last Key\n",
"\t\tif resp.NextMarker == nil || *resp.NextMarker == \"\" {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\tif resp.HasMore != nil && !*resp.HasMore {\n",
"\t\t\tbreak\n",
"\t\t}\n"
],
"file_path": "backend/qingstor/qingstor.go",
"type": "add",
"edit_start_line_idx": 584
} | // Package qingstor provides an interface to QingStor object storage
// Home: https://www.qingcloud.com/
// +build !plan9
package qingstor
import (
"context"
"fmt"
"io"
"net/http"
"path"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
qsConfig "github.com/yunify/qingstor-sdk-go/config"
qsErr "github.com/yunify/qingstor-sdk-go/request/errors"
qs "github.com/yunify/qingstor-sdk-go/service"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "qingstor",
Description: "QingCloud Object Storage",
NewFs: NewFs,
Options: []fs.Option{{
Name: "env_auth",
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
Default: false,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter QingStor credentials in the next step",
}, {
Value: "true",
Help: "Get QingStor credentials from the environment (env vars or IAM)",
}},
}, {
Name: "access_key_id",
Help: "QingStor Access Key ID\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "secret_access_key",
Help: "QingStor Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "endpoint",
Help: "Enter a endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"",
}, {
Name: "zone",
Help: "Zone to connect to.\nDefault is \"pek3a\".",
Examples: []fs.OptionExample{{
Value: "pek3a",
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
}, {
Value: "sh1a",
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
}, {
Value: "gd2a",
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
}},
}, {
Name: "connection_retries",
Help: "Number of connection retries.",
Default: 3,
Advanced: true,
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5GB.`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff they will be uploaded
as multipart uploads using this chunk size.
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high speed links and you have
enough memory, then increasing this will speed up the transfers.`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
NB if you set this to > 1 then the checksums of multpart uploads
become corrupted (the uploads themselves are not corrupted though).
If you are uploading small numbers of large file over high speed link
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: 1,
Advanced: true,
}},
})
}
// Constants
const (
listLimitSize = 1000 // Number of items to read at once
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
minChunkSize = fs.SizeSuffix(minMultiPartSize)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
)
// Globals
func timestampToTime(tp int64) time.Time {
timeLayout := time.RFC3339Nano
ts := time.Unix(tp, 0).Format(timeLayout)
tm, _ := time.Parse(timeLayout, ts)
return tm.UTC()
}
// Options defines the configuration for this backend
type Options struct {
EnvAuth bool `config:"env_auth"`
AccessKeyID string `config:"access_key_id"`
SecretAccessKey string `config:"secret_access_key"`
Endpoint string `config:"endpoint"`
Zone string `config:"zone"`
ConnectionRetries int `config:"connection_retries"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
}
// Fs represents a remote qingstor server
type Fs struct {
name string // The name of the remote
root string // The root is a subdir, is a special object
opt Options // parsed options
features *fs.Features // optional features
svc *qs.Service // The connection to the qingstor server
zone string // The zone we are working on
bucket string // The bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucketOK and bucketDeleted
bucketOK bool // true if we have created the bucket
bucketDeleted bool // true if we have deleted the bucket
}
// Object describes a qingstor object
type Object struct {
// Will definitely have everything but meta which may be nil
//
// List will read everything but meta & mimeType - to fill
// that in you need to call readMetaData
fs *Fs // what this object is part of
remote string // object of remote
etag string // md5sum of the object
size int64 // length of the object content
mimeType string // ContentType of object - may be ""
lastModified time.Time // Last modified
encrypted bool // whether the object is encryption
algo string // Custom encryption algorithms
}
// ------------------------------------------------------------
// Pattern to match a qingstor path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
// parseParse parses a qingstor 'url'
func qsParsePath(path string) (bucket, key string, err error) {
// Pattern to match a qingstor path
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("Couldn't parse bucket out of qingstor path %q", path)
} else {
bucket, key = parts[1], parts[2]
key = strings.Trim(key, "/")
}
return
}
// Split an URL into three parts: protocol host and port
func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
/*
Pattern to match a endpoint,
eg: "http(s)://qingstor.com:443" --> "http(s)", "qingstor.com", 443
"http(s)//qingstor.com" --> "http(s)", "qingstor.com", ""
"qingstor.com" --> "", "qingstor.com", ""
*/
defer func() {
if r := recover(); r != nil {
switch x := r.(type) {
case error:
err = x
default:
err = nil
}
}
}()
var mather = regexp.MustCompile(`^(?:(http|https)://)*(\w+\.(?:[\w\.])*)(?::(\d{0,5}))*$`)
parts := mather.FindStringSubmatch(endpoint)
protocol, host, port = parts[1], parts[2], parts[3]
return
}
// qsConnection makes a connection to qingstor
func qsServiceConnection(opt *Options) (*qs.Service, error) {
accessKeyID := opt.AccessKeyID
secretAccessKey := opt.SecretAccessKey
switch {
case opt.EnvAuth:
// No need for empty checks if "env_auth" is true
case accessKeyID == "" && secretAccessKey == "":
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
case accessKeyID == "":
return nil, errors.New("access_key_id not found")
case secretAccessKey == "":
return nil, errors.New("secret_access_key not found")
}
protocol := "https"
host := "qingstor.com"
port := 443
endpoint := opt.Endpoint
if endpoint != "" {
_protocol, _host, _port, err := qsParseEndpoint(endpoint)
if err != nil {
return nil, fmt.Errorf("The endpoint \"%s\" format error", endpoint)
}
if _protocol != "" {
protocol = _protocol
}
host = _host
if _port != "" {
port, _ = strconv.Atoi(_port)
} else if protocol == "http" {
port = 80
}
}
cf, err := qsConfig.NewDefault()
if err != nil {
return nil, err
}
cf.AccessKeyID = accessKeyID
cf.SecretAccessKey = secretAccessKey
cf.Protocol = protocol
cf.Host = host
cf.Port = port
cf.ConnectionRetries = opt.ConnectionRetries
cf.Connection = fshttp.NewClient(fs.Config)
return qs.Init(cf)
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "qingstor: chunk size")
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "qingstor: upload cutoff")
}
bucket, key, err := qsParsePath(root)
if err != nil {
return nil, err
}
svc, err := qsServiceConnection(opt)
if err != nil {
return nil, err
}
if opt.Zone == "" {
opt.Zone = "pek3a"
}
f := &Fs{
name: name,
root: key,
opt: *opt,
svc: svc,
zone: opt.Zone,
bucket: bucket,
}
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
}).Fill(f)
if f.root != "" {
if !strings.HasSuffix(f.root, "/") {
f.root += "/"
}
//Check to see if the object exists
bucketInit, err := svc.Bucket(bucket, opt.Zone)
if err != nil {
return nil, err
}
_, err = bucketInit.HeadObject(key, &qs.HeadObjectInput{})
if err == nil {
f.root = path.Dir(key)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.root == "" {
return fmt.Sprintf("QingStor bucket %s", f.bucket)
}
return fmt.Sprintf("QingStor bucket %s root %s", f.bucket, f.root)
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
//return time.Nanosecond
//Not supported temporary
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
//return hash.HashSet(hash.HashNone)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Put created a new object
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
fsObj := &Object{
fs: f,
remote: src.Remote(),
}
return fsObj, fsObj.Update(ctx, in, src, options...)
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
err := f.Mkdir(ctx, "")
if err != nil {
return nil, err
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcFs := srcObj.fs
key := f.root + remote
source := path.Join("/"+srcFs.bucket, srcFs.root+srcObj.remote)
fs.Debugf(f, "Copied, source key is: %s, and dst key is: %s", source, key)
req := qs.PutObjectInput{
XQSCopySource: &source,
}
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return nil, err
}
_, err = bucketInit.PutObject(key, &req)
if err != nil {
fs.Debugf(f, "Copy Failed, API Error: %v", err)
return nil, err
}
return f.NewObject(ctx, remote)
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// Return an Object from a path
//
//If it can't be found it returns the error ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, info *qs.KeyType) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
// Set info
if info.Size != nil {
o.size = *info.Size
}
if info.Etag != nil {
o.etag = qs.StringValue(info.Etag)
}
if info.Modified == nil {
fs.Logf(o, "Failed to read last modified")
o.lastModified = time.Now()
} else {
o.lastModified = timestampToTime(int64(*info.Modified))
}
if info.MimeType != nil {
o.mimeType = qs.StringValue(info.MimeType)
}
if info.Encrypted != nil {
o.encrypted = qs.BoolValue(info.Encrypted)
}
} else {
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// listFn is called from list to handle an object.
type listFn func(remote string, object *qs.KeyType, isDirectory bool) error
// list the objects into the function supplied
//
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) error {
prefix := f.root
if dir != "" {
prefix += dir + "/"
}
delimiter := ""
if !recurse {
delimiter = "/"
}
maxLimit := int(listLimitSize)
var marker *string
for {
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return err
}
// FIXME need to implement ALL loop
req := qs.ListObjectsInput{
Delimiter: &delimiter,
Prefix: &prefix,
Limit: &maxLimit,
Marker: marker,
}
resp, err := bucketInit.ListObjects(&req)
if err != nil {
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusNotFound {
err = fs.ErrorDirNotFound
}
}
return err
}
rootLength := len(f.root)
if !recurse {
for _, commonPrefix := range resp.CommonPrefixes {
if commonPrefix == nil {
fs.Logf(f, "Nil common prefix received")
continue
}
remote := *commonPrefix
if !strings.HasPrefix(remote, f.root) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[rootLength:]
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
err = fn(remote, &qs.KeyType{Key: &remote}, true)
if err != nil {
return err
}
}
}
for _, object := range resp.Keys {
key := qs.StringValue(object.Key)
if !strings.HasPrefix(key, f.root) {
fs.Logf(f, "Odd name received %q", key)
continue
}
remote := key[rootLength:]
err = fn(remote, object, false)
if err != nil {
return err
}
}
// Use NextMarker if set, otherwise use last Key
if resp.NextMarker == nil || *resp.NextMarker == "" {
//marker = resp.Keys[len(resp.Keys)-1].Key
break
} else {
marker = resp.NextMarker
}
}
return nil
}
// Convert a list item into a BasicInfo
func (f *Fs) itemToDirEntry(remote string, object *qs.KeyType, isDirectory bool) (fs.DirEntry, error) {
if isDirectory {
size := int64(0)
if object.Size != nil {
size = *object.Size
}
d := fs.NewDir(remote, time.Time{}).SetSize(size)
return d, nil
}
o, err := f.newObjectWithInfo(remote, object)
if err != nil {
return nil, err
}
return o, nil
}
// mark the bucket as being OK
func (f *Fs) markBucketOK() {
if f.bucket != "" {
f.bucketOKMu.Lock()
f.bucketOK = true
f.bucketDeleted = false
f.bucketOKMu.Unlock()
}
}
// listDir lists files and directories to out
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// List the objects and directories
err = f.list(ctx, dir, false, func(remote string, object *qs.KeyType, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
if entry != nil {
entries = append(entries, entry)
}
return nil
})
if err != nil {
return nil, err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return entries, nil
}
// listBuckets lists the buckets to out
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
req := qs.ListBucketsInput{
Location: &f.zone,
}
resp, err := f.svc.ListBuckets(&req)
if err != nil {
return nil, err
}
for _, bucket := range resp.Buckets {
d := fs.NewDir(qs.StringValue(bucket.Name), qs.TimeValue(bucket.Created))
entries = append(entries, d)
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.bucket == "" {
return f.listBuckets(dir)
}
return f.listDir(ctx, dir)
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
err = f.list(ctx, dir, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return list.Flush()
}
// Check if the bucket exists
func (f *Fs) dirExists() (bool, error) {
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return false, err
}
_, err = bucketInit.Head()
if err == nil {
return true, nil
}
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusNotFound {
err = nil
}
}
return false, err
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.bucketOK {
return nil
}
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return err
}
/* When delete a bucket, qingstor need about 60 second to sync status;
So, need wait for it sync end if we try to operation a just deleted bucket
*/
retries := 0
for retries <= 120 {
statistics, err := bucketInit.GetStatistics()
if statistics == nil || err != nil {
break
}
switch *statistics.Status {
case "deleted":
fs.Debugf(f, "Wait for qingstor sync bucket status, retries: %d", retries)
time.Sleep(time.Second * 1)
retries++
continue
default:
break
}
break
}
if !f.bucketDeleted {
exists, err := f.dirExists()
if err == nil {
f.bucketOK = exists
}
if err != nil || exists {
return err
}
}
_, err = bucketInit.Put()
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusConflict {
err = nil
}
}
if err == nil {
f.bucketOK = true
f.bucketDeleted = false
}
return err
}
// dirIsEmpty check if the bucket empty
func (f *Fs) dirIsEmpty() (bool, error) {
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return true, err
}
statistics, err := bucketInit.GetStatistics()
if err != nil {
return true, err
}
if *statistics.Count == 0 {
return true, nil
}
return false, nil
}
// Rmdir delete a bucket
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" {
return nil
}
isEmpty, err := f.dirIsEmpty()
if err != nil {
return err
}
if !isEmpty {
fs.Debugf(f, "The bucket %s you tried to delete not empty.", f.bucket)
return errors.New("BucketNotEmpty: The bucket you tried to delete is not empty")
}
fs.Debugf(f, "Tried to delete the bucket %s", f.bucket)
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return err
}
retries := 0
for retries <= 10 {
_, delErr := bucketInit.Delete()
if delErr != nil {
if e, ok := delErr.(*qsErr.QingStorError); ok {
switch e.Code {
// The status of "lease" takes a few seconds to "ready" when creating a new bucket
// wait for lease status ready
case "lease_not_ready":
fs.Debugf(f, "QingStor bucket lease not ready, retries: %d", retries)
retries++
time.Sleep(time.Second * 1)
continue
default:
err = e
break
}
}
} else {
err = delErr
}
break
}
if err == nil {
f.bucketOK = false
f.bucketDeleted = true
}
return err
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData() (err error) {
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return err
}
key := o.fs.root + o.remote
fs.Debugf(o, "Read metadata of key: %s", key)
resp, err := bucketInit.HeadObject(key, &qs.HeadObjectInput{})
if err != nil {
fs.Debugf(o, "Read metadata failed, API Error: %v", err)
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
}
// Ignore missing Content-Length assuming it is 0
if resp.ContentLength != nil {
o.size = *resp.ContentLength
}
if resp.ETag != nil {
o.etag = qs.StringValue(resp.ETag)
}
if resp.LastModified == nil {
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
o.lastModified = time.Now()
} else {
o.lastModified = *resp.LastModified
}
if resp.ContentType != nil {
o.mimeType = qs.StringValue(resp.ContentType)
}
if resp.XQSEncryptionCustomerAlgorithm != nil {
o.algo = qs.StringValue(resp.XQSEncryptionCustomerAlgorithm)
o.encrypted = true
}
return nil
}
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata, %v", err)
return time.Now()
}
modTime := o.lastModified
return modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
err := o.readMetaData()
if err != nil {
return err
}
o.lastModified = modTime
mimeType := fs.MimeType(ctx, o)
if o.size >= maxSizeForCopy {
fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
return nil
}
// Copy the object to itself to update the metadata
key := o.fs.root + o.remote
sourceKey := path.Join("/", o.fs.bucket, key)
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return err
}
req := qs.PutObjectInput{
XQSCopySource: &sourceKey,
ContentType: &mimeType,
}
_, err = bucketInit.PutObject(key, &req)
return err
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return nil, err
}
key := o.fs.root + o.remote
req := qs.GetObjectInput{}
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch option.(type) {
case *fs.RangeOption, *fs.SeekOption:
_, value := option.Header()
req.Range = &value
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
resp, err := bucketInit.GetObject(key, &req)
if err != nil {
return nil, err
}
return resp.Body, nil
}
// Update in to the object
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
// The maximum size of upload object is multipartUploadSize * MaxMultipleParts
err := o.fs.Mkdir(ctx, "")
if err != nil {
return err
}
key := o.fs.root + o.remote
// Guess the content type
mimeType := fs.MimeType(ctx, src)
req := uploadInput{
body: in,
qsSvc: o.fs.svc,
bucket: o.fs.bucket,
zone: o.fs.zone,
key: key,
mimeType: mimeType,
partSize: int64(o.fs.opt.ChunkSize),
concurrency: o.fs.opt.UploadConcurrency,
}
uploader := newUploader(&req)
size := src.Size()
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
if multipart {
err = uploader.upload()
} else {
err = uploader.singlePartUpload(in, size)
}
if err != nil {
return err
}
// Read Metadata of object
err = o.readMetaData()
return err
}
// Remove this object
func (o *Object) Remove(ctx context.Context) error {
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return err
}
key := o.fs.root + o.remote
_, err = bucketInit.DeleteObject(key)
return err
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.fs
}
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
etag := strings.Trim(strings.ToLower(o.etag), `"`)
// Check the etag is a valid md5sum
if !matchMd5.MatchString(etag) {
fs.Debugf(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
return "", nil
}
return etag, nil
}
// Storable says whether this object can be stored
func (o *Object) Storable() bool {
return true
}
// String returns a description of the Object
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return o.size
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return ""
}
return o.mimeType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.Object = &Object{}
_ fs.ListRer = &Fs{}
_ fs.MimeTyper = &Object{}
)
| backend/qingstor/qingstor.go | 1 | https://github.com/rclone/rclone/commit/c2050172aa3b1e9c2a323cbd98b4df0d66450360 | [
0.9931811690330505,
0.015241489745676517,
0.0001609237806405872,
0.00017119698168244213,
0.10654772818088531
] |
{
"id": 1,
"code_window": [
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t}\n",
"\t\t// Use NextMarker if set, otherwise use last Key\n",
"\t\tif resp.NextMarker == nil || *resp.NextMarker == \"\" {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\tif resp.HasMore != nil && !*resp.HasMore {\n",
"\t\t\tbreak\n",
"\t\t}\n"
],
"file_path": "backend/qingstor/qingstor.go",
"type": "add",
"edit_start_line_idx": 584
} | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
import (
"bytes"
"context"
"encoding/xml"
"errors"
"fmt"
"io"
"mime"
"net/http"
"os"
"path/filepath"
"strconv"
)
// Proppatch describes a property update instruction as defined in RFC 4918.
// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH
type Proppatch struct {
// Remove specifies whether this patch removes properties. If it does not
// remove them, it sets them.
Remove bool
// Props contains the properties to be set or removed.
Props []Property
}
// Propstat describes a XML propstat element as defined in RFC 4918.
// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
type Propstat struct {
// Props contains the properties for which Status applies.
Props []Property
// Status defines the HTTP status code of the properties in Prop.
// Allowed values include, but are not limited to the WebDAV status
// code extensions for HTTP/1.1.
// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
Status int
// XMLError contains the XML representation of the optional error element.
// XML content within this field must not rely on any predefined
// namespace declarations or prefixes. If empty, the XML error element
// is omitted.
XMLError string
// ResponseDescription contains the contents of the optional
// responsedescription field. If empty, the XML element is omitted.
ResponseDescription string
}
// makePropstats returns a slice containing those of x and y whose Props slice
// is non-empty. If both are empty, it returns a slice containing an otherwise
// zero Propstat whose HTTP status code is 200 OK.
func makePropstats(x, y Propstat) []Propstat {
pstats := make([]Propstat, 0, 2)
if len(x.Props) != 0 {
pstats = append(pstats, x)
}
if len(y.Props) != 0 {
pstats = append(pstats, y)
}
if len(pstats) == 0 {
pstats = append(pstats, Propstat{
Status: http.StatusOK,
})
}
return pstats
}
// DeadPropsHolder holds the dead properties of a resource.
//
// Dead properties are those properties that are explicitly defined. In
// comparison, live properties, such as DAV:getcontentlength, are implicitly
// defined by the underlying resource, and cannot be explicitly overridden or
// removed. See the Terminology section of
// http://www.webdav.org/specs/rfc4918.html#rfc.section.3
//
// There is a whitelist of the names of live properties. This package handles
// all live properties, and will only pass non-whitelisted names to the Patch
// method of DeadPropsHolder implementations.
type DeadPropsHolder interface {
// DeadProps returns a copy of the dead properties held.
DeadProps() (map[xml.Name]Property, error)
// Patch patches the dead properties held.
//
// Patching is atomic; either all or no patches succeed. It returns (nil,
// non-nil) if an internal server error occurred, otherwise the Propstats
// collectively contain one Property for each proposed patch Property. If
// all patches succeed, Patch returns a slice of length one and a Propstat
// element with a 200 OK HTTP status code. If none succeed, for reasons
// other than an internal server error, no Propstat has status 200 OK.
//
// For more details on when various HTTP status codes apply, see
// http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status
Patch([]Proppatch) ([]Propstat, error)
}
// liveProps contains all supported, protected DAV: properties.
var liveProps = map[xml.Name]struct {
// findFn implements the propfind function of this property. If nil,
// it indicates a hidden property.
findFn func(context.Context, FileSystem, LockSystem, string, os.FileInfo) (string, error)
// dir is true if the property applies to directories.
dir bool
}{
{Space: "DAV:", Local: "resourcetype"}: {
findFn: findResourceType,
dir: true,
},
{Space: "DAV:", Local: "displayname"}: {
findFn: findDisplayName,
dir: true,
},
{Space: "DAV:", Local: "getcontentlength"}: {
findFn: findContentLength,
dir: false,
},
{Space: "DAV:", Local: "getlastmodified"}: {
findFn: findLastModified,
// http://webdav.org/specs/rfc4918.html#PROPERTY_getlastmodified
// suggests that getlastmodified should only apply to GETable
// resources, and this package does not support GET on directories.
//
// Nonetheless, some WebDAV clients expect child directories to be
// sortable by getlastmodified date, so this value is true, not false.
// See golang.org/issue/15334.
dir: true,
},
{Space: "DAV:", Local: "creationdate"}: {
findFn: nil,
dir: false,
},
{Space: "DAV:", Local: "getcontentlanguage"}: {
findFn: nil,
dir: false,
},
{Space: "DAV:", Local: "getcontenttype"}: {
findFn: findContentType,
dir: false,
},
{Space: "DAV:", Local: "getetag"}: {
findFn: findETag,
// findETag implements ETag as the concatenated hex values of a file's
// modification time and size. This is not a reliable synchronization
// mechanism for directories, so we do not advertise getetag for DAV
// collections.
dir: false,
},
// TODO: The lockdiscovery property requires LockSystem to list the
// active locks on a resource.
{Space: "DAV:", Local: "lockdiscovery"}: {},
{Space: "DAV:", Local: "supportedlock"}: {
findFn: findSupportedLock,
dir: true,
},
}
// TODO(nigeltao) merge props and allprop?
// Props returns the status of the properties named pnames for resource name.
//
// Each Propstat has a unique status and each property name will only be part
// of one Propstat element.
func props(ctx context.Context, fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) {
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return nil, err
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return nil, err
}
isDir := fi.IsDir()
var deadProps map[xml.Name]Property
if dph, ok := f.(DeadPropsHolder); ok {
deadProps, err = dph.DeadProps()
if err != nil {
return nil, err
}
}
pstatOK := Propstat{Status: http.StatusOK}
pstatNotFound := Propstat{Status: http.StatusNotFound}
for _, pn := range pnames {
// If this file has dead properties, check if they contain pn.
if dp, ok := deadProps[pn]; ok {
pstatOK.Props = append(pstatOK.Props, dp)
continue
}
// Otherwise, it must either be a live property or we don't know it.
if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) {
innerXML, err := prop.findFn(ctx, fs, ls, name, fi)
if err != nil {
return nil, err
}
pstatOK.Props = append(pstatOK.Props, Property{
XMLName: pn,
InnerXML: []byte(innerXML),
})
} else {
pstatNotFound.Props = append(pstatNotFound.Props, Property{
XMLName: pn,
})
}
}
return makePropstats(pstatOK, pstatNotFound), nil
}
// Propnames returns the property names defined for resource name.
func propnames(ctx context.Context, fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) {
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return nil, err
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return nil, err
}
isDir := fi.IsDir()
var deadProps map[xml.Name]Property
if dph, ok := f.(DeadPropsHolder); ok {
deadProps, err = dph.DeadProps()
if err != nil {
return nil, err
}
}
pnames := make([]xml.Name, 0, len(liveProps)+len(deadProps))
for pn, prop := range liveProps {
if prop.findFn != nil && (prop.dir || !isDir) {
pnames = append(pnames, pn)
}
}
for pn := range deadProps {
pnames = append(pnames, pn)
}
return pnames, nil
}
// Allprop returns the properties defined for resource name and the properties
// named in include.
//
// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined
// within the RFC plus dead properties. Other live properties should only be
// returned if they are named in 'include'.
//
// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
func allprop(ctx context.Context, fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) {
pnames, err := propnames(ctx, fs, ls, name)
if err != nil {
return nil, err
}
// Add names from include if they are not already covered in pnames.
nameset := make(map[xml.Name]bool)
for _, pn := range pnames {
nameset[pn] = true
}
for _, pn := range include {
if !nameset[pn] {
pnames = append(pnames, pn)
}
}
return props(ctx, fs, ls, name, pnames)
}
// Patch patches the properties of resource name. The return values are
// constrained in the same manner as DeadPropsHolder.Patch.
func patch(ctx context.Context, fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) {
conflict := false
loop:
for _, patch := range patches {
for _, p := range patch.Props {
if _, ok := liveProps[p.XMLName]; ok {
conflict = true
break loop
}
}
}
if conflict {
pstatForbidden := Propstat{
Status: http.StatusForbidden,
XMLError: `<D:cannot-modify-protected-property xmlns:D="DAV:"/>`,
}
pstatFailedDep := Propstat{
Status: StatusFailedDependency,
}
for _, patch := range patches {
for _, p := range patch.Props {
if _, ok := liveProps[p.XMLName]; ok {
pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName})
} else {
pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName})
}
}
}
return makePropstats(pstatForbidden, pstatFailedDep), nil
}
f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0)
if err != nil {
return nil, err
}
defer f.Close()
if dph, ok := f.(DeadPropsHolder); ok {
ret, err := dph.Patch(patches)
if err != nil {
return nil, err
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that
// "The contents of the prop XML element must only list the names of
// properties to which the result in the status element applies."
for _, pstat := range ret {
for i, p := range pstat.Props {
pstat.Props[i] = Property{XMLName: p.XMLName}
}
}
return ret, nil
}
// The file doesn't implement the optional DeadPropsHolder interface, so
// all patches are forbidden.
pstat := Propstat{Status: http.StatusForbidden}
for _, patch := range patches {
for _, p := range patch.Props {
pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
}
}
return []Propstat{pstat}, nil
}
func escapeXML(s string) string {
for i := 0; i < len(s); i++ {
// As an optimization, if s contains only ASCII letters, digits or a
// few special characters, the escaped value is s itself and we don't
// need to allocate a buffer and convert between string and []byte.
switch c := s[i]; {
case c == ' ' || c == '_' ||
('+' <= c && c <= '9') || // Digits as well as + , - . and /
('A' <= c && c <= 'Z') ||
('a' <= c && c <= 'z'):
continue
}
// Otherwise, go through the full escaping process.
var buf bytes.Buffer
xml.EscapeText(&buf, []byte(s))
return buf.String()
}
return s
}
func findResourceType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
if fi.IsDir() {
return `<D:collection xmlns:D="DAV:"/>`, nil
}
return "", nil
}
func findDisplayName(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
if slashClean(name) == "/" {
// Hide the real name of a possibly prefixed root directory.
return "", nil
}
return escapeXML(fi.Name()), nil
}
func findContentLength(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
return strconv.FormatInt(fi.Size(), 10), nil
}
func findLastModified(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
return fi.ModTime().UTC().Format(http.TimeFormat), nil
}
// ErrNotImplemented should be returned by optional interfaces if they
// want the original implementation to be used.
var ErrNotImplemented = errors.New("not implemented")
// ContentTyper is an optional interface for the os.FileInfo
// objects returned by the FileSystem.
//
// If this interface is defined then it will be used to read the
// content type from the object.
//
// If this interface is not defined the file will be opened and the
// content type will be guessed from the initial contents of the file.
type ContentTyper interface {
// ContentType returns the content type for the file.
//
// If this returns error ErrNotImplemented then the error will
// be ignored and the base implementation will be used
// instead.
ContentType(ctx context.Context) (string, error)
}
func findContentType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
if do, ok := fi.(ContentTyper); ok {
ctype, err := do.ContentType(ctx)
if err != ErrNotImplemented {
return ctype, err
}
}
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return "", err
}
defer f.Close()
// This implementation is based on serveContent's code in the standard net/http package.
ctype := mime.TypeByExtension(filepath.Ext(name))
if ctype != "" {
return ctype, nil
}
// Read a chunk to decide between utf-8 text and binary.
var buf [512]byte
n, err := io.ReadFull(f, buf[:])
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
return "", err
}
ctype = http.DetectContentType(buf[:n])
// Rewind file.
_, err = f.Seek(0, os.SEEK_SET)
return ctype, err
}
// ETager is an optional interface for the os.FileInfo objects
// returned by the FileSystem.
//
// If this interface is defined then it will be used to read the ETag
// for the object.
//
// If this interface is not defined an ETag will be computed using the
// ModTime() and the Size() methods of the os.FileInfo object.
type ETager interface {
// ETag returns an ETag for the file. This should be of the
// form "value" or W/"value"
//
// If this returns error ErrNotImplemented then the error will
// be ignored and the base implementation will be used
// instead.
ETag(ctx context.Context) (string, error)
}
func findETag(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
if do, ok := fi.(ETager); ok {
etag, err := do.ETag(ctx)
if err != ErrNotImplemented {
return etag, err
}
}
// The Apache http 2.4 web server by default concatenates the
// modification time and size of a file. We replicate the heuristic
// with nanosecond granularity.
return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil
}
func findSupportedLock(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
return `` +
`<D:lockentry xmlns:D="DAV:">` +
`<D:lockscope><D:exclusive/></D:lockscope>` +
`<D:locktype><D:write/></D:locktype>` +
`</D:lockentry>`, nil
}
| vendor/golang.org/x/net/webdav/prop.go | 0 | https://github.com/rclone/rclone/commit/c2050172aa3b1e9c2a323cbd98b4df0d66450360 | [
0.0008192651439458132,
0.00019915342272724956,
0.00016294400847982615,
0.00017123747966252267,
0.00009971341205528006
] |
{
"id": 1,
"code_window": [
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t}\n",
"\t\t// Use NextMarker if set, otherwise use last Key\n",
"\t\tif resp.NextMarker == nil || *resp.NextMarker == \"\" {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\tif resp.HasMore != nil && !*resp.HasMore {\n",
"\t\t\tbreak\n",
"\t\t}\n"
],
"file_path": "backend/qingstor/qingstor.go",
"type": "add",
"edit_start_line_idx": 584
} | // Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x,go1.11,!gccgo,!appengine
#include "textflag.h"
// Implementation of Poly1305 using the vector facility (vx) and the VMSL instruction.
// constants
#define EX0 V1
#define EX1 V2
#define EX2 V3
// temporaries
#define T_0 V4
#define T_1 V5
#define T_2 V6
#define T_3 V7
#define T_4 V8
#define T_5 V9
#define T_6 V10
#define T_7 V11
#define T_8 V12
#define T_9 V13
#define T_10 V14
// r**2 & r**4
#define R_0 V15
#define R_1 V16
#define R_2 V17
#define R5_1 V18
#define R5_2 V19
// key (r)
#define RSAVE_0 R7
#define RSAVE_1 R8
#define RSAVE_2 R9
#define R5SAVE_1 R10
#define R5SAVE_2 R11
// message block
#define M0 V20
#define M1 V21
#define M2 V22
#define M3 V23
#define M4 V24
#define M5 V25
// accumulator
#define H0_0 V26
#define H1_0 V27
#define H2_0 V28
#define H0_1 V29
#define H1_1 V30
#define H2_1 V31
GLOBL ·keyMask<>(SB), RODATA, $16
DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f
DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f
GLOBL ·bswapMask<>(SB), RODATA, $16
DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908
DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100
GLOBL ·constants<>(SB), RODATA, $48
// EX0
DATA ·constants<>+0(SB)/8, $0x18191a1b1c1d1e1f
DATA ·constants<>+8(SB)/8, $0x0000050403020100
// EX1
DATA ·constants<>+16(SB)/8, $0x18191a1b1c1d1e1f
DATA ·constants<>+24(SB)/8, $0x00000a0908070605
// EX2
DATA ·constants<>+32(SB)/8, $0x18191a1b1c1d1e1f
DATA ·constants<>+40(SB)/8, $0x0000000f0e0d0c0b
GLOBL ·c<>(SB), RODATA, $48
// EX0
DATA ·c<>+0(SB)/8, $0x0000050403020100
DATA ·c<>+8(SB)/8, $0x0000151413121110
// EX1
DATA ·c<>+16(SB)/8, $0x00000a0908070605
DATA ·c<>+24(SB)/8, $0x00001a1918171615
// EX2
DATA ·c<>+32(SB)/8, $0x0000000f0e0d0c0b
DATA ·c<>+40(SB)/8, $0x0000001f1e1d1c1b
GLOBL ·reduce<>(SB), RODATA, $32
// 44 bit
DATA ·reduce<>+0(SB)/8, $0x0
DATA ·reduce<>+8(SB)/8, $0xfffffffffff
// 42 bit
DATA ·reduce<>+16(SB)/8, $0x0
DATA ·reduce<>+24(SB)/8, $0x3ffffffffff
// h = (f*g) % (2**130-5) [partial reduction]
// uses T_0...T_9 temporary registers
// input: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2
// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9
// output: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2
#define MULTIPLY(m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9) \
\ // Eliminate the dependency for the last 2 VMSLs
VMSLG m02_0, r_2, m4_2, m4_2 \
VMSLG m13_0, r_2, m5_2, m5_2 \ // 8 VMSLs pipelined
VMSLG m02_0, r_0, m4_0, m4_0 \
VMSLG m02_1, r5_2, V0, T_0 \
VMSLG m02_0, r_1, m4_1, m4_1 \
VMSLG m02_1, r_0, V0, T_1 \
VMSLG m02_1, r_1, V0, T_2 \
VMSLG m02_2, r5_1, V0, T_3 \
VMSLG m02_2, r5_2, V0, T_4 \
VMSLG m13_0, r_0, m5_0, m5_0 \
VMSLG m13_1, r5_2, V0, T_5 \
VMSLG m13_0, r_1, m5_1, m5_1 \
VMSLG m13_1, r_0, V0, T_6 \
VMSLG m13_1, r_1, V0, T_7 \
VMSLG m13_2, r5_1, V0, T_8 \
VMSLG m13_2, r5_2, V0, T_9 \
VMSLG m02_2, r_0, m4_2, m4_2 \
VMSLG m13_2, r_0, m5_2, m5_2 \
VAQ m4_0, T_0, m02_0 \
VAQ m4_1, T_1, m02_1 \
VAQ m5_0, T_5, m13_0 \
VAQ m5_1, T_6, m13_1 \
VAQ m02_0, T_3, m02_0 \
VAQ m02_1, T_4, m02_1 \
VAQ m13_0, T_8, m13_0 \
VAQ m13_1, T_9, m13_1 \
VAQ m4_2, T_2, m02_2 \
VAQ m5_2, T_7, m13_2 \
// SQUARE uses three limbs of r and r_2*5 to output square of r
// uses T_1, T_5 and T_7 temporary registers
// input: r_0, r_1, r_2, r5_2
// temp: TEMP0, TEMP1, TEMP2
// output: p0, p1, p2
#define SQUARE(r_0, r_1, r_2, r5_2, p0, p1, p2, TEMP0, TEMP1, TEMP2) \
VMSLG r_0, r_0, p0, p0 \
VMSLG r_1, r5_2, V0, TEMP0 \
VMSLG r_2, r5_2, p1, p1 \
VMSLG r_0, r_1, V0, TEMP1 \
VMSLG r_1, r_1, p2, p2 \
VMSLG r_0, r_2, V0, TEMP2 \
VAQ TEMP0, p0, p0 \
VAQ TEMP1, p1, p1 \
VAQ TEMP2, p2, p2 \
VAQ TEMP0, p0, p0 \
VAQ TEMP1, p1, p1 \
VAQ TEMP2, p2, p2 \
// carry h0->h1->h2->h0 || h3->h4->h5->h3
// uses T_2, T_4, T_5, T_7, T_8, T_9
// t6, t7, t8, t9, t10, t11
// input: h0, h1, h2, h3, h4, h5
// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11
// output: h0, h1, h2, h3, h4, h5
#define REDUCE(h0, h1, h2, h3, h4, h5, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) \
VLM (R12), t6, t7 \ // 44 and 42 bit clear mask
VLEIB $7, $0x28, t10 \ // 5 byte shift mask
VREPIB $4, t8 \ // 4 bit shift mask
VREPIB $2, t11 \ // 2 bit shift mask
VSRLB t10, h0, t0 \ // h0 byte shift
VSRLB t10, h1, t1 \ // h1 byte shift
VSRLB t10, h2, t2 \ // h2 byte shift
VSRLB t10, h3, t3 \ // h3 byte shift
VSRLB t10, h4, t4 \ // h4 byte shift
VSRLB t10, h5, t5 \ // h5 byte shift
VSRL t8, t0, t0 \ // h0 bit shift
VSRL t8, t1, t1 \ // h2 bit shift
VSRL t11, t2, t2 \ // h2 bit shift
VSRL t8, t3, t3 \ // h3 bit shift
VSRL t8, t4, t4 \ // h4 bit shift
VESLG $2, t2, t9 \ // h2 carry x5
VSRL t11, t5, t5 \ // h5 bit shift
VN t6, h0, h0 \ // h0 clear carry
VAQ t2, t9, t2 \ // h2 carry x5
VESLG $2, t5, t9 \ // h5 carry x5
VN t6, h1, h1 \ // h1 clear carry
VN t7, h2, h2 \ // h2 clear carry
VAQ t5, t9, t5 \ // h5 carry x5
VN t6, h3, h3 \ // h3 clear carry
VN t6, h4, h4 \ // h4 clear carry
VN t7, h5, h5 \ // h5 clear carry
VAQ t0, h1, h1 \ // h0->h1
VAQ t3, h4, h4 \ // h3->h4
VAQ t1, h2, h2 \ // h1->h2
VAQ t4, h5, h5 \ // h4->h5
VAQ t2, h0, h0 \ // h2->h0
VAQ t5, h3, h3 \ // h5->h3
VREPG $1, t6, t6 \ // 44 and 42 bit masks across both halves
VREPG $1, t7, t7 \
VSLDB $8, h0, h0, h0 \ // set up [h0/1/2, h3/4/5]
VSLDB $8, h1, h1, h1 \
VSLDB $8, h2, h2, h2 \
VO h0, h3, h3 \
VO h1, h4, h4 \
VO h2, h5, h5 \
VESRLG $44, h3, t0 \ // 44 bit shift right
VESRLG $44, h4, t1 \
VESRLG $42, h5, t2 \
VN t6, h3, h3 \ // clear carry bits
VN t6, h4, h4 \
VN t7, h5, h5 \
VESLG $2, t2, t9 \ // multiply carry by 5
VAQ t9, t2, t2 \
VAQ t0, h4, h4 \
VAQ t1, h5, h5 \
VAQ t2, h3, h3 \
// carry h0->h1->h2->h0
// input: h0, h1, h2
// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8
// output: h0, h1, h2
#define REDUCE2(h0, h1, h2, t0, t1, t2, t3, t4, t5, t6, t7, t8) \
VLEIB $7, $0x28, t3 \ // 5 byte shift mask
VREPIB $4, t4 \ // 4 bit shift mask
VREPIB $2, t7 \ // 2 bit shift mask
VGBM $0x003F, t5 \ // mask to clear carry bits
VSRLB t3, h0, t0 \
VSRLB t3, h1, t1 \
VSRLB t3, h2, t2 \
VESRLG $4, t5, t5 \ // 44 bit clear mask
VSRL t4, t0, t0 \
VSRL t4, t1, t1 \
VSRL t7, t2, t2 \
VESRLG $2, t5, t6 \ // 42 bit clear mask
VESLG $2, t2, t8 \
VAQ t8, t2, t2 \
VN t5, h0, h0 \
VN t5, h1, h1 \
VN t6, h2, h2 \
VAQ t0, h1, h1 \
VAQ t1, h2, h2 \
VAQ t2, h0, h0 \
VSRLB t3, h0, t0 \
VSRLB t3, h1, t1 \
VSRLB t3, h2, t2 \
VSRL t4, t0, t0 \
VSRL t4, t1, t1 \
VSRL t7, t2, t2 \
VN t5, h0, h0 \
VN t5, h1, h1 \
VESLG $2, t2, t8 \
VN t6, h2, h2 \
VAQ t0, h1, h1 \
VAQ t8, t2, t2 \
VAQ t1, h2, h2 \
VAQ t2, h0, h0 \
// expands two message blocks into the lower halfs of the d registers
// moves the contents of the d registers into upper halfs
// input: in1, in2, d0, d1, d2, d3, d4, d5
// temp: TEMP0, TEMP1, TEMP2, TEMP3
// output: d0, d1, d2, d3, d4, d5
#define EXPACC(in1, in2, d0, d1, d2, d3, d4, d5, TEMP0, TEMP1, TEMP2, TEMP3) \
VGBM $0xff3f, TEMP0 \
VGBM $0xff1f, TEMP1 \
VESLG $4, d1, TEMP2 \
VESLG $4, d4, TEMP3 \
VESRLG $4, TEMP0, TEMP0 \
VPERM in1, d0, EX0, d0 \
VPERM in2, d3, EX0, d3 \
VPERM in1, d2, EX2, d2 \
VPERM in2, d5, EX2, d5 \
VPERM in1, TEMP2, EX1, d1 \
VPERM in2, TEMP3, EX1, d4 \
VN TEMP0, d0, d0 \
VN TEMP0, d3, d3 \
VESRLG $4, d1, d1 \
VESRLG $4, d4, d4 \
VN TEMP1, d2, d2 \
VN TEMP1, d5, d5 \
VN TEMP0, d1, d1 \
VN TEMP0, d4, d4 \
// expands one message block into the lower halfs of the d registers
// moves the contents of the d registers into upper halfs
// input: in, d0, d1, d2
// temp: TEMP0, TEMP1, TEMP2
// output: d0, d1, d2
#define EXPACC2(in, d0, d1, d2, TEMP0, TEMP1, TEMP2) \
VGBM $0xff3f, TEMP0 \
VESLG $4, d1, TEMP2 \
VGBM $0xff1f, TEMP1 \
VPERM in, d0, EX0, d0 \
VESRLG $4, TEMP0, TEMP0 \
VPERM in, d2, EX2, d2 \
VPERM in, TEMP2, EX1, d1 \
VN TEMP0, d0, d0 \
VN TEMP1, d2, d2 \
VESRLG $4, d1, d1 \
VN TEMP0, d1, d1 \
// pack h2:h0 into h1:h0 (no carry)
// input: h0, h1, h2
// output: h0, h1, h2
#define PACK(h0, h1, h2) \
VMRLG h1, h2, h2 \ // copy h1 to upper half h2
VESLG $44, h1, h1 \ // shift limb 1 44 bits, leaving 20
VO h0, h1, h0 \ // combine h0 with 20 bits from limb 1
VESRLG $20, h2, h1 \ // put top 24 bits of limb 1 into h1
VLEIG $1, $0, h1 \ // clear h2 stuff from lower half of h1
VO h0, h1, h0 \ // h0 now has 88 bits (limb 0 and 1)
VLEIG $0, $0, h2 \ // clear upper half of h2
VESRLG $40, h2, h1 \ // h1 now has upper two bits of result
VLEIB $7, $88, h1 \ // for byte shift (11 bytes)
VSLB h1, h2, h2 \ // shift h2 11 bytes to the left
VO h0, h2, h0 \ // combine h0 with 20 bits from limb 1
VLEIG $0, $0, h1 \ // clear upper half of h1
// if h > 2**130-5 then h -= 2**130-5
// input: h0, h1
// temp: t0, t1, t2
// output: h0
#define MOD(h0, h1, t0, t1, t2) \
VZERO t0 \
VLEIG $1, $5, t0 \
VACCQ h0, t0, t1 \
VAQ h0, t0, t0 \
VONE t2 \
VLEIG $1, $-4, t2 \
VAQ t2, t1, t1 \
VACCQ h1, t1, t1 \
VONE t2 \
VAQ t2, t1, t1 \
VN h0, t1, t2 \
VNC t0, t1, t1 \
VO t1, t2, h0 \
// func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]key)
TEXT ·poly1305vmsl(SB), $0-32
// This code processes 6 + up to 4 blocks (32 bytes) per iteration
// using the algorithm described in:
// NEON crypto, Daniel J. Bernstein & Peter Schwabe
// https://cryptojedi.org/papers/neoncrypto-20120320.pdf
// And as moddified for VMSL as described in
// Accelerating Poly1305 Cryptographic Message Authentication on the z14
// O'Farrell et al, CASCON 2017, p48-55
// https://ibm.ent.box.com/s/jf9gedj0e9d2vjctfyh186shaztavnht
LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key
VZERO V0 // c
// load EX0, EX1 and EX2
MOVD $·constants<>(SB), R5
VLM (R5), EX0, EX2 // c
// setup r
VL (R4), T_0
MOVD $·keyMask<>(SB), R6
VL (R6), T_1
VN T_0, T_1, T_0
VZERO T_2 // limbs for r
VZERO T_3
VZERO T_4
EXPACC2(T_0, T_2, T_3, T_4, T_1, T_5, T_7)
// T_2, T_3, T_4: [0, r]
// setup r*20
VLEIG $0, $0, T_0
VLEIG $1, $20, T_0 // T_0: [0, 20]
VZERO T_5
VZERO T_6
VMSLG T_0, T_3, T_5, T_5
VMSLG T_0, T_4, T_6, T_6
// store r for final block in GR
VLGVG $1, T_2, RSAVE_0 // c
VLGVG $1, T_3, RSAVE_1 // c
VLGVG $1, T_4, RSAVE_2 // c
VLGVG $1, T_5, R5SAVE_1 // c
VLGVG $1, T_6, R5SAVE_2 // c
// initialize h
VZERO H0_0
VZERO H1_0
VZERO H2_0
VZERO H0_1
VZERO H1_1
VZERO H2_1
// initialize pointer for reduce constants
MOVD $·reduce<>(SB), R12
// calculate r**2 and 20*(r**2)
VZERO R_0
VZERO R_1
VZERO R_2
SQUARE(T_2, T_3, T_4, T_6, R_0, R_1, R_2, T_1, T_5, T_7)
REDUCE2(R_0, R_1, R_2, M0, M1, M2, M3, M4, R5_1, R5_2, M5, T_1)
VZERO R5_1
VZERO R5_2
VMSLG T_0, R_1, R5_1, R5_1
VMSLG T_0, R_2, R5_2, R5_2
// skip r**4 calculation if 3 blocks or less
CMPBLE R3, $48, b4
// calculate r**4 and 20*(r**4)
VZERO T_8
VZERO T_9
VZERO T_10
SQUARE(R_0, R_1, R_2, R5_2, T_8, T_9, T_10, T_1, T_5, T_7)
REDUCE2(T_8, T_9, T_10, M0, M1, M2, M3, M4, T_2, T_3, M5, T_1)
VZERO T_2
VZERO T_3
VMSLG T_0, T_9, T_2, T_2
VMSLG T_0, T_10, T_3, T_3
// put r**2 to the right and r**4 to the left of R_0, R_1, R_2
VSLDB $8, T_8, T_8, T_8
VSLDB $8, T_9, T_9, T_9
VSLDB $8, T_10, T_10, T_10
VSLDB $8, T_2, T_2, T_2
VSLDB $8, T_3, T_3, T_3
VO T_8, R_0, R_0
VO T_9, R_1, R_1
VO T_10, R_2, R_2
VO T_2, R5_1, R5_1
VO T_3, R5_2, R5_2
CMPBLE R3, $80, load // less than or equal to 5 blocks in message
// 6(or 5+1) blocks
SUB $81, R3
VLM (R2), M0, M4
VLL R3, 80(R2), M5
ADD $1, R3
MOVBZ $1, R0
CMPBGE R3, $16, 2(PC)
VLVGB R3, R0, M5
MOVD $96(R2), R2
EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
EXPACC(M2, M3, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
VLEIB $2, $1, H2_0
VLEIB $2, $1, H2_1
VLEIB $10, $1, H2_0
VLEIB $10, $1, H2_1
VZERO M0
VZERO M1
VZERO M2
VZERO M3
VZERO T_4
VZERO T_10
EXPACC(M4, M5, M0, M1, M2, M3, T_4, T_10, T_0, T_1, T_2, T_3)
VLR T_4, M4
VLEIB $10, $1, M2
CMPBLT R3, $16, 2(PC)
VLEIB $10, $1, T_10
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9)
VMRHG V0, H0_1, H0_0
VMRHG V0, H1_1, H1_0
VMRHG V0, H2_1, H2_0
VMRLG V0, H0_1, H0_1
VMRLG V0, H1_1, H1_1
VMRLG V0, H2_1, H2_1
SUB $16, R3
CMPBLE R3, $0, square
load:
// load EX0, EX1 and EX2
MOVD $·c<>(SB), R5
VLM (R5), EX0, EX2
loop:
CMPBLE R3, $64, add // b4 // last 4 or less blocks left
// next 4 full blocks
VLM (R2), M2, M5
SUB $64, R3
MOVD $64(R2), R2
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, T_0, T_1, T_3, T_4, T_5, T_2, T_7, T_8, T_9)
// expacc in-lined to create [m2, m3] limbs
VGBM $0x3f3f, T_0 // 44 bit clear mask
VGBM $0x1f1f, T_1 // 40 bit clear mask
VPERM M2, M3, EX0, T_3
VESRLG $4, T_0, T_0 // 44 bit clear mask ready
VPERM M2, M3, EX1, T_4
VPERM M2, M3, EX2, T_5
VN T_0, T_3, T_3
VESRLG $4, T_4, T_4
VN T_1, T_5, T_5
VN T_0, T_4, T_4
VMRHG H0_1, T_3, H0_0
VMRHG H1_1, T_4, H1_0
VMRHG H2_1, T_5, H2_0
VMRLG H0_1, T_3, H0_1
VMRLG H1_1, T_4, H1_1
VMRLG H2_1, T_5, H2_1
VLEIB $10, $1, H2_0
VLEIB $10, $1, H2_1
VPERM M4, M5, EX0, T_3
VPERM M4, M5, EX1, T_4
VPERM M4, M5, EX2, T_5
VN T_0, T_3, T_3
VESRLG $4, T_4, T_4
VN T_1, T_5, T_5
VN T_0, T_4, T_4
VMRHG V0, T_3, M0
VMRHG V0, T_4, M1
VMRHG V0, T_5, M2
VMRLG V0, T_3, M3
VMRLG V0, T_4, M4
VMRLG V0, T_5, M5
VLEIB $10, $1, M2
VLEIB $10, $1, M5
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
CMPBNE R3, $0, loop
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
VMRHG V0, H0_1, H0_0
VMRHG V0, H1_1, H1_0
VMRHG V0, H2_1, H2_0
VMRLG V0, H0_1, H0_1
VMRLG V0, H1_1, H1_1
VMRLG V0, H2_1, H2_1
// load EX0, EX1, EX2
MOVD $·constants<>(SB), R5
VLM (R5), EX0, EX2
// sum vectors
VAQ H0_0, H0_1, H0_0
VAQ H1_0, H1_1, H1_0
VAQ H2_0, H2_1, H2_0
// h may be >= 2*(2**130-5) so we need to reduce it again
// M0...M4 are used as temps here
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
next: // carry h1->h2
VLEIB $7, $0x28, T_1
VREPIB $4, T_2
VGBM $0x003F, T_3
VESRLG $4, T_3
// byte shift
VSRLB T_1, H1_0, T_4
// bit shift
VSRL T_2, T_4, T_4
// clear h1 carry bits
VN T_3, H1_0, H1_0
// add carry
VAQ T_4, H2_0, H2_0
// h is now < 2*(2**130-5)
// pack h into h1 (hi) and h0 (lo)
PACK(H0_0, H1_0, H2_0)
// if h > 2**130-5 then h -= 2**130-5
MOD(H0_0, H1_0, T_0, T_1, T_2)
// h += s
MOVD $·bswapMask<>(SB), R5
VL (R5), T_1
VL 16(R4), T_0
VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big)
VAQ T_0, H0_0, H0_0
VPERM H0_0, H0_0, T_1, H0_0 // reverse bytes (to little)
VST H0_0, (R1)
RET
add:
// load EX0, EX1, EX2
MOVD $·constants<>(SB), R5
VLM (R5), EX0, EX2
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
VMRHG V0, H0_1, H0_0
VMRHG V0, H1_1, H1_0
VMRHG V0, H2_1, H2_0
VMRLG V0, H0_1, H0_1
VMRLG V0, H1_1, H1_1
VMRLG V0, H2_1, H2_1
CMPBLE R3, $64, b4
b4:
CMPBLE R3, $48, b3 // 3 blocks or less
// 4(3+1) blocks remaining
SUB $49, R3
VLM (R2), M0, M2
VLL R3, 48(R2), M3
ADD $1, R3
MOVBZ $1, R0
CMPBEQ R3, $16, 2(PC)
VLVGB R3, R0, M3
MOVD $64(R2), R2
EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
VLEIB $10, $1, H2_0
VLEIB $10, $1, H2_1
VZERO M0
VZERO M1
VZERO M4
VZERO M5
VZERO T_4
VZERO T_10
EXPACC(M2, M3, M0, M1, M4, M5, T_4, T_10, T_0, T_1, T_2, T_3)
VLR T_4, M2
VLEIB $10, $1, M4
CMPBNE R3, $16, 2(PC)
VLEIB $10, $1, T_10
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M4, M5, M2, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
VMRHG V0, H0_1, H0_0
VMRHG V0, H1_1, H1_0
VMRHG V0, H2_1, H2_0
VMRLG V0, H0_1, H0_1
VMRLG V0, H1_1, H1_1
VMRLG V0, H2_1, H2_1
SUB $16, R3
CMPBLE R3, $0, square // this condition must always hold true!
b3:
CMPBLE R3, $32, b2
// 3 blocks remaining
// setup [r²,r]
VSLDB $8, R_0, R_0, R_0
VSLDB $8, R_1, R_1, R_1
VSLDB $8, R_2, R_2, R_2
VSLDB $8, R5_1, R5_1, R5_1
VSLDB $8, R5_2, R5_2, R5_2
VLVGG $1, RSAVE_0, R_0
VLVGG $1, RSAVE_1, R_1
VLVGG $1, RSAVE_2, R_2
VLVGG $1, R5SAVE_1, R5_1
VLVGG $1, R5SAVE_2, R5_2
// setup [h0, h1]
VSLDB $8, H0_0, H0_0, H0_0
VSLDB $8, H1_0, H1_0, H1_0
VSLDB $8, H2_0, H2_0, H2_0
VO H0_1, H0_0, H0_0
VO H1_1, H1_0, H1_0
VO H2_1, H2_0, H2_0
VZERO H0_1
VZERO H1_1
VZERO H2_1
VZERO M0
VZERO M1
VZERO M2
VZERO M3
VZERO M4
VZERO M5
// H*[r**2, r]
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, T_10, M5)
SUB $33, R3
VLM (R2), M0, M1
VLL R3, 32(R2), M2
ADD $1, R3
MOVBZ $1, R0
CMPBEQ R3, $16, 2(PC)
VLVGB R3, R0, M2
// H += m0
VZERO T_1
VZERO T_2
VZERO T_3
EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)
VLEIB $10, $1, T_3
VAG H0_0, T_1, H0_0
VAG H1_0, T_2, H1_0
VAG H2_0, T_3, H2_0
VZERO M0
VZERO M3
VZERO M4
VZERO M5
VZERO T_10
// (H+m0)*r
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M3, M4, M5, V0, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_10, H0_1, H1_1, H2_1, T_9)
// H += m1
VZERO V0
VZERO T_1
VZERO T_2
VZERO T_3
EXPACC2(M1, T_1, T_2, T_3, T_4, T_5, T_6)
VLEIB $10, $1, T_3
VAQ H0_0, T_1, H0_0
VAQ H1_0, T_2, H1_0
VAQ H2_0, T_3, H2_0
REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10)
// [H, m2] * [r**2, r]
EXPACC2(M2, H0_0, H1_0, H2_0, T_1, T_2, T_3)
CMPBNE R3, $16, 2(PC)
VLEIB $10, $1, H2_0
VZERO M0
VZERO M1
VZERO M2
VZERO M3
VZERO M4
VZERO M5
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, M5, T_10)
SUB $16, R3
CMPBLE R3, $0, next // this condition must always hold true!
b2:
CMPBLE R3, $16, b1
// 2 blocks remaining
// setup [r²,r]
VSLDB $8, R_0, R_0, R_0
VSLDB $8, R_1, R_1, R_1
VSLDB $8, R_2, R_2, R_2
VSLDB $8, R5_1, R5_1, R5_1
VSLDB $8, R5_2, R5_2, R5_2
VLVGG $1, RSAVE_0, R_0
VLVGG $1, RSAVE_1, R_1
VLVGG $1, RSAVE_2, R_2
VLVGG $1, R5SAVE_1, R5_1
VLVGG $1, R5SAVE_2, R5_2
// setup [h0, h1]
VSLDB $8, H0_0, H0_0, H0_0
VSLDB $8, H1_0, H1_0, H1_0
VSLDB $8, H2_0, H2_0, H2_0
VO H0_1, H0_0, H0_0
VO H1_1, H1_0, H1_0
VO H2_1, H2_0, H2_0
VZERO H0_1
VZERO H1_1
VZERO H2_1
VZERO M0
VZERO M1
VZERO M2
VZERO M3
VZERO M4
VZERO M5
// H*[r**2, r]
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9)
VMRHG V0, H0_1, H0_0
VMRHG V0, H1_1, H1_0
VMRHG V0, H2_1, H2_0
VMRLG V0, H0_1, H0_1
VMRLG V0, H1_1, H1_1
VMRLG V0, H2_1, H2_1
// move h to the left and 0s at the right
VSLDB $8, H0_0, H0_0, H0_0
VSLDB $8, H1_0, H1_0, H1_0
VSLDB $8, H2_0, H2_0, H2_0
// get message blocks and append 1 to start
SUB $17, R3
VL (R2), M0
VLL R3, 16(R2), M1
ADD $1, R3
MOVBZ $1, R0
CMPBEQ R3, $16, 2(PC)
VLVGB R3, R0, M1
VZERO T_6
VZERO T_7
VZERO T_8
EXPACC2(M0, T_6, T_7, T_8, T_1, T_2, T_3)
EXPACC2(M1, T_6, T_7, T_8, T_1, T_2, T_3)
VLEIB $2, $1, T_8
CMPBNE R3, $16, 2(PC)
VLEIB $10, $1, T_8
// add [m0, m1] to h
VAG H0_0, T_6, H0_0
VAG H1_0, T_7, H1_0
VAG H2_0, T_8, H2_0
VZERO M2
VZERO M3
VZERO M4
VZERO M5
VZERO T_10
VZERO M0
// at this point R_0 .. R5_2 look like [r**2, r]
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M2, M3, M4, M5, T_10, M0, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE2(H0_0, H1_0, H2_0, M2, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10)
SUB $16, R3, R3
CMPBLE R3, $0, next
b1:
CMPBLE R3, $0, next
// 1 block remaining
// setup [r²,r]
VSLDB $8, R_0, R_0, R_0
VSLDB $8, R_1, R_1, R_1
VSLDB $8, R_2, R_2, R_2
VSLDB $8, R5_1, R5_1, R5_1
VSLDB $8, R5_2, R5_2, R5_2
VLVGG $1, RSAVE_0, R_0
VLVGG $1, RSAVE_1, R_1
VLVGG $1, RSAVE_2, R_2
VLVGG $1, R5SAVE_1, R5_1
VLVGG $1, R5SAVE_2, R5_2
// setup [h0, h1]
VSLDB $8, H0_0, H0_0, H0_0
VSLDB $8, H1_0, H1_0, H1_0
VSLDB $8, H2_0, H2_0, H2_0
VO H0_1, H0_0, H0_0
VO H1_1, H1_0, H1_0
VO H2_1, H2_0, H2_0
VZERO H0_1
VZERO H1_1
VZERO H2_1
VZERO M0
VZERO M1
VZERO M2
VZERO M3
VZERO M4
VZERO M5
// H*[r**2, r]
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
// set up [0, m0] limbs
SUB $1, R3
VLL R3, (R2), M0
ADD $1, R3
MOVBZ $1, R0
CMPBEQ R3, $16, 2(PC)
VLVGB R3, R0, M0
VZERO T_1
VZERO T_2
VZERO T_3
EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)// limbs: [0, m]
CMPBNE R3, $16, 2(PC)
VLEIB $10, $1, T_3
// h+m0
VAQ H0_0, T_1, H0_0
VAQ H1_0, T_2, H1_0
VAQ H2_0, T_3, H2_0
VZERO M0
VZERO M1
VZERO M2
VZERO M3
VZERO M4
VZERO M5
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
BR next
square:
// setup [r²,r]
VSLDB $8, R_0, R_0, R_0
VSLDB $8, R_1, R_1, R_1
VSLDB $8, R_2, R_2, R_2
VSLDB $8, R5_1, R5_1, R5_1
VSLDB $8, R5_2, R5_2, R5_2
VLVGG $1, RSAVE_0, R_0
VLVGG $1, RSAVE_1, R_1
VLVGG $1, RSAVE_2, R_2
VLVGG $1, R5SAVE_1, R5_1
VLVGG $1, R5SAVE_2, R5_2
// setup [h0, h1]
VSLDB $8, H0_0, H0_0, H0_0
VSLDB $8, H1_0, H1_0, H1_0
VSLDB $8, H2_0, H2_0, H2_0
VO H0_1, H0_0, H0_0
VO H1_1, H1_0, H1_0
VO H2_1, H2_0, H2_0
VZERO H0_1
VZERO H1_1
VZERO H2_1
VZERO M0
VZERO M1
VZERO M2
VZERO M3
VZERO M4
VZERO M5
// (h0*r**2) + (h1*r)
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
BR next
| vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s | 0 | https://github.com/rclone/rclone/commit/c2050172aa3b1e9c2a323cbd98b4df0d66450360 | [
0.0009320310200564563,
0.000197154629859142,
0.00016152870375663042,
0.00016697232786100358,
0.00010666553134797141
] |
{
"id": 1,
"code_window": [
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t}\n",
"\t\t// Use NextMarker if set, otherwise use last Key\n",
"\t\tif resp.NextMarker == nil || *resp.NextMarker == \"\" {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\tif resp.HasMore != nil && !*resp.HasMore {\n",
"\t\t\tbreak\n",
"\t\t}\n"
],
"file_path": "backend/qingstor/qingstor.go",
"type": "add",
"edit_start_line_idx": 584
} | package pkcs7
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestPad(t *testing.T) {
for _, test := range []struct {
n int
in string
expected string
}{
{8, "", "\x08\x08\x08\x08\x08\x08\x08\x08"},
{8, "1", "1\x07\x07\x07\x07\x07\x07\x07"},
{8, "12", "12\x06\x06\x06\x06\x06\x06"},
{8, "123", "123\x05\x05\x05\x05\x05"},
{8, "1234", "1234\x04\x04\x04\x04"},
{8, "12345", "12345\x03\x03\x03"},
{8, "123456", "123456\x02\x02"},
{8, "1234567", "1234567\x01"},
{8, "abcdefgh", "abcdefgh\x08\x08\x08\x08\x08\x08\x08\x08"},
{8, "abcdefgh1", "abcdefgh1\x07\x07\x07\x07\x07\x07\x07"},
{8, "abcdefgh12", "abcdefgh12\x06\x06\x06\x06\x06\x06"},
{8, "abcdefgh123", "abcdefgh123\x05\x05\x05\x05\x05"},
{8, "abcdefgh1234", "abcdefgh1234\x04\x04\x04\x04"},
{8, "abcdefgh12345", "abcdefgh12345\x03\x03\x03"},
{8, "abcdefgh123456", "abcdefgh123456\x02\x02"},
{8, "abcdefgh1234567", "abcdefgh1234567\x01"},
{8, "abcdefgh12345678", "abcdefgh12345678\x08\x08\x08\x08\x08\x08\x08\x08"},
{16, "", "\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10"},
{16, "a", "a\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f"},
} {
actual := Pad(test.n, []byte(test.in))
assert.Equal(t, test.expected, string(actual), fmt.Sprintf("Pad %d %q", test.n, test.in))
recovered, err := Unpad(test.n, actual)
assert.NoError(t, err)
assert.Equal(t, []byte(test.in), recovered, fmt.Sprintf("Unpad %d %q", test.n, test.in))
}
assert.Panics(t, func() { Pad(1, []byte("")) }, "bad multiple")
assert.Panics(t, func() { Pad(256, []byte("")) }, "bad multiple")
}
func TestUnpad(t *testing.T) {
// We've tested the OK decoding in TestPad, now test the error cases
for _, test := range []struct {
n int
in string
err error
}{
{8, "", ErrorPaddingNotFound},
{8, "1", ErrorPaddingNotAMultiple},
{8, "12", ErrorPaddingNotAMultiple},
{8, "123", ErrorPaddingNotAMultiple},
{8, "1234", ErrorPaddingNotAMultiple},
{8, "12345", ErrorPaddingNotAMultiple},
{8, "123456", ErrorPaddingNotAMultiple},
{8, "1234567", ErrorPaddingNotAMultiple},
{8, "1234567\xFF", ErrorPaddingTooLong},
{8, "1234567\x09", ErrorPaddingTooLong},
{8, "1234567\x00", ErrorPaddingTooShort},
{8, "123456\x01\x02", ErrorPaddingNotAllTheSame},
{8, "\x07\x08\x08\x08\x08\x08\x08\x08", ErrorPaddingNotAllTheSame},
} {
result, actualErr := Unpad(test.n, []byte(test.in))
assert.Equal(t, test.err, actualErr, fmt.Sprintf("Unpad %d %q", test.n, test.in))
assert.Equal(t, result, []byte(nil))
}
assert.Panics(t, func() { _, _ = Unpad(1, []byte("")) }, "bad multiple")
assert.Panics(t, func() { _, _ = Unpad(256, []byte("")) }, "bad multiple")
}
| backend/crypt/pkcs7/pkcs7_test.go | 0 | https://github.com/rclone/rclone/commit/c2050172aa3b1e9c2a323cbd98b4df0d66450360 | [
0.00017451969324611127,
0.00016943085938692093,
0.00016427971422672272,
0.00016932119615375996,
0.0000030924793463782407
] |
{
"id": 2,
"code_window": [
"\t\t// Use NextMarker if set, otherwise use last Key\n",
"\t\tif resp.NextMarker == nil || *resp.NextMarker == \"\" {\n",
"\t\t\t//marker = resp.Keys[len(resp.Keys)-1].Key\n",
"\t\t\tbreak\n",
"\t\t} else {\n",
"\t\t\tmarker = resp.NextMarker\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfs.Errorf(f, \"Expecting NextMarker but didn't find one\")\n"
],
"file_path": "backend/qingstor/qingstor.go",
"type": "replace",
"edit_start_line_idx": 586
} | // Package qingstor provides an interface to QingStor object storage
// Home: https://www.qingcloud.com/
// +build !plan9
package qingstor
import (
"context"
"fmt"
"io"
"net/http"
"path"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
qsConfig "github.com/yunify/qingstor-sdk-go/config"
qsErr "github.com/yunify/qingstor-sdk-go/request/errors"
qs "github.com/yunify/qingstor-sdk-go/service"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "qingstor",
Description: "QingCloud Object Storage",
NewFs: NewFs,
Options: []fs.Option{{
Name: "env_auth",
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
Default: false,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter QingStor credentials in the next step",
}, {
Value: "true",
Help: "Get QingStor credentials from the environment (env vars or IAM)",
}},
}, {
Name: "access_key_id",
Help: "QingStor Access Key ID\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "secret_access_key",
Help: "QingStor Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "endpoint",
Help: "Enter a endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"",
}, {
Name: "zone",
Help: "Zone to connect to.\nDefault is \"pek3a\".",
Examples: []fs.OptionExample{{
Value: "pek3a",
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
}, {
Value: "sh1a",
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
}, {
Value: "gd2a",
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
}},
}, {
Name: "connection_retries",
Help: "Number of connection retries.",
Default: 3,
Advanced: true,
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5GB.`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff they will be uploaded
as multipart uploads using this chunk size.
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high speed links and you have
enough memory, then increasing this will speed up the transfers.`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
NB if you set this to > 1 then the checksums of multpart uploads
become corrupted (the uploads themselves are not corrupted though).
If you are uploading small numbers of large file over high speed link
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: 1,
Advanced: true,
}},
})
}
// Constants
const (
listLimitSize = 1000 // Number of items to read at once
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
minChunkSize = fs.SizeSuffix(minMultiPartSize)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
)
// Globals
func timestampToTime(tp int64) time.Time {
timeLayout := time.RFC3339Nano
ts := time.Unix(tp, 0).Format(timeLayout)
tm, _ := time.Parse(timeLayout, ts)
return tm.UTC()
}
// Options defines the configuration for this backend
type Options struct {
EnvAuth bool `config:"env_auth"`
AccessKeyID string `config:"access_key_id"`
SecretAccessKey string `config:"secret_access_key"`
Endpoint string `config:"endpoint"`
Zone string `config:"zone"`
ConnectionRetries int `config:"connection_retries"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
}
// Fs represents a remote qingstor server
type Fs struct {
name string // The name of the remote
root string // The root is a subdir, is a special object
opt Options // parsed options
features *fs.Features // optional features
svc *qs.Service // The connection to the qingstor server
zone string // The zone we are working on
bucket string // The bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucketOK and bucketDeleted
bucketOK bool // true if we have created the bucket
bucketDeleted bool // true if we have deleted the bucket
}
// Object describes a qingstor object
type Object struct {
// Will definitely have everything but meta which may be nil
//
// List will read everything but meta & mimeType - to fill
// that in you need to call readMetaData
fs *Fs // what this object is part of
remote string // object of remote
etag string // md5sum of the object
size int64 // length of the object content
mimeType string // ContentType of object - may be ""
lastModified time.Time // Last modified
encrypted bool // whether the object is encryption
algo string // Custom encryption algorithms
}
// ------------------------------------------------------------
// Pattern to match a qingstor path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
// parseParse parses a qingstor 'url'
func qsParsePath(path string) (bucket, key string, err error) {
// Pattern to match a qingstor path
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("Couldn't parse bucket out of qingstor path %q", path)
} else {
bucket, key = parts[1], parts[2]
key = strings.Trim(key, "/")
}
return
}
// Split an URL into three parts: protocol host and port
func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
/*
Pattern to match a endpoint,
eg: "http(s)://qingstor.com:443" --> "http(s)", "qingstor.com", 443
"http(s)//qingstor.com" --> "http(s)", "qingstor.com", ""
"qingstor.com" --> "", "qingstor.com", ""
*/
defer func() {
if r := recover(); r != nil {
switch x := r.(type) {
case error:
err = x
default:
err = nil
}
}
}()
var mather = regexp.MustCompile(`^(?:(http|https)://)*(\w+\.(?:[\w\.])*)(?::(\d{0,5}))*$`)
parts := mather.FindStringSubmatch(endpoint)
protocol, host, port = parts[1], parts[2], parts[3]
return
}
// qsConnection makes a connection to qingstor
func qsServiceConnection(opt *Options) (*qs.Service, error) {
accessKeyID := opt.AccessKeyID
secretAccessKey := opt.SecretAccessKey
switch {
case opt.EnvAuth:
// No need for empty checks if "env_auth" is true
case accessKeyID == "" && secretAccessKey == "":
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
case accessKeyID == "":
return nil, errors.New("access_key_id not found")
case secretAccessKey == "":
return nil, errors.New("secret_access_key not found")
}
protocol := "https"
host := "qingstor.com"
port := 443
endpoint := opt.Endpoint
if endpoint != "" {
_protocol, _host, _port, err := qsParseEndpoint(endpoint)
if err != nil {
return nil, fmt.Errorf("The endpoint \"%s\" format error", endpoint)
}
if _protocol != "" {
protocol = _protocol
}
host = _host
if _port != "" {
port, _ = strconv.Atoi(_port)
} else if protocol == "http" {
port = 80
}
}
cf, err := qsConfig.NewDefault()
if err != nil {
return nil, err
}
cf.AccessKeyID = accessKeyID
cf.SecretAccessKey = secretAccessKey
cf.Protocol = protocol
cf.Host = host
cf.Port = port
cf.ConnectionRetries = opt.ConnectionRetries
cf.Connection = fshttp.NewClient(fs.Config)
return qs.Init(cf)
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "qingstor: chunk size")
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "qingstor: upload cutoff")
}
bucket, key, err := qsParsePath(root)
if err != nil {
return nil, err
}
svc, err := qsServiceConnection(opt)
if err != nil {
return nil, err
}
if opt.Zone == "" {
opt.Zone = "pek3a"
}
f := &Fs{
name: name,
root: key,
opt: *opt,
svc: svc,
zone: opt.Zone,
bucket: bucket,
}
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
}).Fill(f)
if f.root != "" {
if !strings.HasSuffix(f.root, "/") {
f.root += "/"
}
//Check to see if the object exists
bucketInit, err := svc.Bucket(bucket, opt.Zone)
if err != nil {
return nil, err
}
_, err = bucketInit.HeadObject(key, &qs.HeadObjectInput{})
if err == nil {
f.root = path.Dir(key)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.root == "" {
return fmt.Sprintf("QingStor bucket %s", f.bucket)
}
return fmt.Sprintf("QingStor bucket %s root %s", f.bucket, f.root)
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
//return time.Nanosecond
//Not supported temporary
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
//return hash.HashSet(hash.HashNone)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Put created a new object
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
fsObj := &Object{
fs: f,
remote: src.Remote(),
}
return fsObj, fsObj.Update(ctx, in, src, options...)
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
err := f.Mkdir(ctx, "")
if err != nil {
return nil, err
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcFs := srcObj.fs
key := f.root + remote
source := path.Join("/"+srcFs.bucket, srcFs.root+srcObj.remote)
fs.Debugf(f, "Copied, source key is: %s, and dst key is: %s", source, key)
req := qs.PutObjectInput{
XQSCopySource: &source,
}
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return nil, err
}
_, err = bucketInit.PutObject(key, &req)
if err != nil {
fs.Debugf(f, "Copy Failed, API Error: %v", err)
return nil, err
}
return f.NewObject(ctx, remote)
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// Return an Object from a path
//
//If it can't be found it returns the error ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, info *qs.KeyType) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
// Set info
if info.Size != nil {
o.size = *info.Size
}
if info.Etag != nil {
o.etag = qs.StringValue(info.Etag)
}
if info.Modified == nil {
fs.Logf(o, "Failed to read last modified")
o.lastModified = time.Now()
} else {
o.lastModified = timestampToTime(int64(*info.Modified))
}
if info.MimeType != nil {
o.mimeType = qs.StringValue(info.MimeType)
}
if info.Encrypted != nil {
o.encrypted = qs.BoolValue(info.Encrypted)
}
} else {
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// listFn is called from list to handle an object.
type listFn func(remote string, object *qs.KeyType, isDirectory bool) error
// list the objects into the function supplied
//
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) error {
prefix := f.root
if dir != "" {
prefix += dir + "/"
}
delimiter := ""
if !recurse {
delimiter = "/"
}
maxLimit := int(listLimitSize)
var marker *string
for {
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return err
}
// FIXME need to implement ALL loop
req := qs.ListObjectsInput{
Delimiter: &delimiter,
Prefix: &prefix,
Limit: &maxLimit,
Marker: marker,
}
resp, err := bucketInit.ListObjects(&req)
if err != nil {
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusNotFound {
err = fs.ErrorDirNotFound
}
}
return err
}
rootLength := len(f.root)
if !recurse {
for _, commonPrefix := range resp.CommonPrefixes {
if commonPrefix == nil {
fs.Logf(f, "Nil common prefix received")
continue
}
remote := *commonPrefix
if !strings.HasPrefix(remote, f.root) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[rootLength:]
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
err = fn(remote, &qs.KeyType{Key: &remote}, true)
if err != nil {
return err
}
}
}
for _, object := range resp.Keys {
key := qs.StringValue(object.Key)
if !strings.HasPrefix(key, f.root) {
fs.Logf(f, "Odd name received %q", key)
continue
}
remote := key[rootLength:]
err = fn(remote, object, false)
if err != nil {
return err
}
}
// Use NextMarker if set, otherwise use last Key
if resp.NextMarker == nil || *resp.NextMarker == "" {
//marker = resp.Keys[len(resp.Keys)-1].Key
break
} else {
marker = resp.NextMarker
}
}
return nil
}
// Convert a list item into a BasicInfo
func (f *Fs) itemToDirEntry(remote string, object *qs.KeyType, isDirectory bool) (fs.DirEntry, error) {
if isDirectory {
size := int64(0)
if object.Size != nil {
size = *object.Size
}
d := fs.NewDir(remote, time.Time{}).SetSize(size)
return d, nil
}
o, err := f.newObjectWithInfo(remote, object)
if err != nil {
return nil, err
}
return o, nil
}
// mark the bucket as being OK
func (f *Fs) markBucketOK() {
if f.bucket != "" {
f.bucketOKMu.Lock()
f.bucketOK = true
f.bucketDeleted = false
f.bucketOKMu.Unlock()
}
}
// listDir lists files and directories to out
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// List the objects and directories
err = f.list(ctx, dir, false, func(remote string, object *qs.KeyType, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
if entry != nil {
entries = append(entries, entry)
}
return nil
})
if err != nil {
return nil, err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return entries, nil
}
// listBuckets lists the buckets to out
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
req := qs.ListBucketsInput{
Location: &f.zone,
}
resp, err := f.svc.ListBuckets(&req)
if err != nil {
return nil, err
}
for _, bucket := range resp.Buckets {
d := fs.NewDir(qs.StringValue(bucket.Name), qs.TimeValue(bucket.Created))
entries = append(entries, d)
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.bucket == "" {
return f.listBuckets(dir)
}
return f.listDir(ctx, dir)
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
err = f.list(ctx, dir, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return list.Flush()
}
// Check if the bucket exists
func (f *Fs) dirExists() (bool, error) {
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return false, err
}
_, err = bucketInit.Head()
if err == nil {
return true, nil
}
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusNotFound {
err = nil
}
}
return false, err
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.bucketOK {
return nil
}
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return err
}
/* When delete a bucket, qingstor need about 60 second to sync status;
So, need wait for it sync end if we try to operation a just deleted bucket
*/
retries := 0
for retries <= 120 {
statistics, err := bucketInit.GetStatistics()
if statistics == nil || err != nil {
break
}
switch *statistics.Status {
case "deleted":
fs.Debugf(f, "Wait for qingstor sync bucket status, retries: %d", retries)
time.Sleep(time.Second * 1)
retries++
continue
default:
break
}
break
}
if !f.bucketDeleted {
exists, err := f.dirExists()
if err == nil {
f.bucketOK = exists
}
if err != nil || exists {
return err
}
}
_, err = bucketInit.Put()
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusConflict {
err = nil
}
}
if err == nil {
f.bucketOK = true
f.bucketDeleted = false
}
return err
}
// dirIsEmpty check if the bucket empty
func (f *Fs) dirIsEmpty() (bool, error) {
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return true, err
}
statistics, err := bucketInit.GetStatistics()
if err != nil {
return true, err
}
if *statistics.Count == 0 {
return true, nil
}
return false, nil
}
// Rmdir delete a bucket
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" {
return nil
}
isEmpty, err := f.dirIsEmpty()
if err != nil {
return err
}
if !isEmpty {
fs.Debugf(f, "The bucket %s you tried to delete not empty.", f.bucket)
return errors.New("BucketNotEmpty: The bucket you tried to delete is not empty")
}
fs.Debugf(f, "Tried to delete the bucket %s", f.bucket)
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return err
}
retries := 0
for retries <= 10 {
_, delErr := bucketInit.Delete()
if delErr != nil {
if e, ok := delErr.(*qsErr.QingStorError); ok {
switch e.Code {
// The status of "lease" takes a few seconds to "ready" when creating a new bucket
// wait for lease status ready
case "lease_not_ready":
fs.Debugf(f, "QingStor bucket lease not ready, retries: %d", retries)
retries++
time.Sleep(time.Second * 1)
continue
default:
err = e
break
}
}
} else {
err = delErr
}
break
}
if err == nil {
f.bucketOK = false
f.bucketDeleted = true
}
return err
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData() (err error) {
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return err
}
key := o.fs.root + o.remote
fs.Debugf(o, "Read metadata of key: %s", key)
resp, err := bucketInit.HeadObject(key, &qs.HeadObjectInput{})
if err != nil {
fs.Debugf(o, "Read metadata failed, API Error: %v", err)
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
}
// Ignore missing Content-Length assuming it is 0
if resp.ContentLength != nil {
o.size = *resp.ContentLength
}
if resp.ETag != nil {
o.etag = qs.StringValue(resp.ETag)
}
if resp.LastModified == nil {
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
o.lastModified = time.Now()
} else {
o.lastModified = *resp.LastModified
}
if resp.ContentType != nil {
o.mimeType = qs.StringValue(resp.ContentType)
}
if resp.XQSEncryptionCustomerAlgorithm != nil {
o.algo = qs.StringValue(resp.XQSEncryptionCustomerAlgorithm)
o.encrypted = true
}
return nil
}
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata, %v", err)
return time.Now()
}
modTime := o.lastModified
return modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
err := o.readMetaData()
if err != nil {
return err
}
o.lastModified = modTime
mimeType := fs.MimeType(ctx, o)
if o.size >= maxSizeForCopy {
fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
return nil
}
// Copy the object to itself to update the metadata
key := o.fs.root + o.remote
sourceKey := path.Join("/", o.fs.bucket, key)
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return err
}
req := qs.PutObjectInput{
XQSCopySource: &sourceKey,
ContentType: &mimeType,
}
_, err = bucketInit.PutObject(key, &req)
return err
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return nil, err
}
key := o.fs.root + o.remote
req := qs.GetObjectInput{}
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch option.(type) {
case *fs.RangeOption, *fs.SeekOption:
_, value := option.Header()
req.Range = &value
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
resp, err := bucketInit.GetObject(key, &req)
if err != nil {
return nil, err
}
return resp.Body, nil
}
// Update in to the object
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
// The maximum size of upload object is multipartUploadSize * MaxMultipleParts
err := o.fs.Mkdir(ctx, "")
if err != nil {
return err
}
key := o.fs.root + o.remote
// Guess the content type
mimeType := fs.MimeType(ctx, src)
req := uploadInput{
body: in,
qsSvc: o.fs.svc,
bucket: o.fs.bucket,
zone: o.fs.zone,
key: key,
mimeType: mimeType,
partSize: int64(o.fs.opt.ChunkSize),
concurrency: o.fs.opt.UploadConcurrency,
}
uploader := newUploader(&req)
size := src.Size()
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
if multipart {
err = uploader.upload()
} else {
err = uploader.singlePartUpload(in, size)
}
if err != nil {
return err
}
// Read Metadata of object
err = o.readMetaData()
return err
}
// Remove this object
func (o *Object) Remove(ctx context.Context) error {
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return err
}
key := o.fs.root + o.remote
_, err = bucketInit.DeleteObject(key)
return err
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.fs
}
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
etag := strings.Trim(strings.ToLower(o.etag), `"`)
// Check the etag is a valid md5sum
if !matchMd5.MatchString(etag) {
fs.Debugf(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
return "", nil
}
return etag, nil
}
// Storable says whether this object can be stored
func (o *Object) Storable() bool {
return true
}
// String returns a description of the Object
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return o.size
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return ""
}
return o.mimeType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.Object = &Object{}
_ fs.ListRer = &Fs{}
_ fs.MimeTyper = &Object{}
)
| backend/qingstor/qingstor.go | 1 | https://github.com/rclone/rclone/commit/c2050172aa3b1e9c2a323cbd98b4df0d66450360 | [
0.9981489181518555,
0.009498420171439648,
0.0001598710659891367,
0.0001726117916405201,
0.09470382332801819
] |
{
"id": 2,
"code_window": [
"\t\t// Use NextMarker if set, otherwise use last Key\n",
"\t\tif resp.NextMarker == nil || *resp.NextMarker == \"\" {\n",
"\t\t\t//marker = resp.Keys[len(resp.Keys)-1].Key\n",
"\t\t\tbreak\n",
"\t\t} else {\n",
"\t\t\tmarker = resp.NextMarker\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfs.Errorf(f, \"Expecting NextMarker but didn't find one\")\n"
],
"file_path": "backend/qingstor/qingstor.go",
"type": "replace",
"edit_start_line_idx": 586
} | // Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs defs_linux.go
package socket
type iovec struct {
Base *byte
Len uint64
}
type msghdr struct {
Name *byte
Namelen uint32
Pad_cgo_0 [4]byte
Iov *iovec
Iovlen uint64
Control *byte
Controllen uint64
Flags int32
Pad_cgo_1 [4]byte
}
type mmsghdr struct {
Hdr msghdr
Len uint32
Pad_cgo_0 [4]byte
}
type cmsghdr struct {
Len uint64
Level int32
Type int32
}
type sockaddrInet struct {
Family uint16
Port uint16
Addr [4]byte /* in_addr */
X__pad [8]uint8
}
type sockaddrInet6 struct {
Family uint16
Port uint16
Flowinfo uint32
Addr [16]byte /* in6_addr */
Scope_id uint32
}
const (
sizeofIovec = 0x10
sizeofMsghdr = 0x38
sizeofMmsghdr = 0x40
sizeofCmsghdr = 0x10
sizeofSockaddrInet = 0x10
sizeofSockaddrInet6 = 0x1c
)
| vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go | 0 | https://github.com/rclone/rclone/commit/c2050172aa3b1e9c2a323cbd98b4df0d66450360 | [
0.00022228428861126304,
0.00017996261885855347,
0.00016679138934705406,
0.00017196469707414508,
0.000019231942133046687
] |
{
"id": 2,
"code_window": [
"\t\t// Use NextMarker if set, otherwise use last Key\n",
"\t\tif resp.NextMarker == nil || *resp.NextMarker == \"\" {\n",
"\t\t\t//marker = resp.Keys[len(resp.Keys)-1].Key\n",
"\t\t\tbreak\n",
"\t\t} else {\n",
"\t\t\tmarker = resp.NextMarker\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfs.Errorf(f, \"Expecting NextMarker but didn't find one\")\n"
],
"file_path": "backend/qingstor/qingstor.go",
"type": "replace",
"edit_start_line_idx": 586
} | // go run mksyscall.go -openbsd -tags openbsd,arm64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm64.go
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build openbsd,arm64
package unix
import (
"syscall"
"unsafe"
)
var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setgroups(ngid int, gid *_Gid_t) (err error) {
_, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
wpid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
_, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
_, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func socket(domain int, typ int, proto int) (fd int, err error) {
r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
_, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
_, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
_, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
_, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Shutdown(s int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
_, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) {
r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
var _p0 unsafe.Pointer
if len(mib) > 0 {
_p0 = unsafe.Pointer(&mib[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func utimes(path string, timeval *[2]Timeval) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func futimes(fd int, timeval *[2]Timeval) (err error) {
_, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fcntl(fd int, cmd int, arg int) (val int, err error) {
r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
val = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Madvise(b []byte, behav int) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mlock(b []byte) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mlockall(flags int) (err error) {
_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mprotect(b []byte, prot int) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Msync(b []byte, flags int) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Munlock(b []byte) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Munlockall() (err error) {
_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe(p *[2]_C_int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getdents(fd int, buf []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getcwd(buf []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ioctl(fd int, req uint, arg uintptr) (err error) {
_, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Access(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Adjtime(delta *Timeval, olddelta *Timeval) (err error) {
_, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chflags(path string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chmod(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chown(path string, uid int, gid int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chroot(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup(fd int) (nfd int, err error) {
r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0)
nfd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup2(from int, to int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Exit(code int) {
Syscall(SYS_EXIT, uintptr(code), 0, 0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchdir(fd int) (err error) {
_, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchflags(fd int, flags int) (err error) {
_, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchmod(fd int, mode uint32) (err error) {
_, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchown(fd int, uid int, gid int) (err error) {
_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Flock(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fpathconf(fd int, name int) (val int, err error) {
r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0)
val = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstatfs(fd int, stat *Statfs_t) (err error) {
_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fsync(fd int) (err error) {
_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Ftruncate(fd int, length int64) (err error) {
_, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getegid() (egid int) {
r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
egid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Geteuid() (uid int) {
r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
uid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getgid() (gid int) {
r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
gid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpgid(pid int) (pgid int, err error) {
r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
pgid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpgrp() (pgrp int) {
r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0)
pgrp = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpid() (pid int) {
r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
pid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getppid() (ppid int) {
r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
ppid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpriority(which int, who int) (prio int, err error) {
r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
prio = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrlimit(which int, lim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrtable() (rtable int, err error) {
r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0)
rtable = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrusage(who int, rusage *Rusage) (err error) {
_, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getsid(pid int) (sid int, err error) {
r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
sid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Gettimeofday(tv *Timeval) (err error) {
_, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getuid() (uid int) {
r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
uid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Issetugid() (tainted bool) {
r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0)
tainted = bool(r0 != 0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Kill(pid int, signum syscall.Signal) (err error) {
_, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Kqueue() (fd int, err error) {
r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Lchown(path string, uid int, gid int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Link(path string, link string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(link)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(link)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Listen(s int, backlog int) (err error) {
_, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Lstat(path string, stat *Stat_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkdir(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkfifo(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkfifoat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mknod(path string, mode uint32, dev int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
_, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Open(path string, mode int, perm uint32) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pathconf(path string, name int) (val int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0)
val = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pread(fd int, p []byte, offset int64) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func read(fd int, p []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Readlink(path string, buf []byte) (n int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(buf) > 0 {
_p1 = unsafe.Pointer(&buf[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(buf) > 0 {
_p1 = unsafe.Pointer(&buf[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Rename(from string, to string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(from)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(to)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Renameat(fromfd int, from string, tofd int, to string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(from)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(to)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Revoke(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Rmdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0)
newoffset = int64(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) {
_, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setegid(egid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Seteuid(euid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setgid(gid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setlogin(name string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(name)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setpgid(pid int, pgid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setpriority(which int, who int, prio int) (err error) {
_, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setregid(rgid int, egid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setreuid(ruid int, euid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setresgid(rgid int, egid int, sgid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setresuid(ruid int, euid int, suid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setrlimit(which int, lim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setrtable(rtable int) (err error) {
_, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setsid() (pid int, err error) {
r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
pid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Settimeofday(tp *Timeval) (err error) {
_, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setuid(uid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Stat(path string, stat *Stat_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Statfs(path string, stat *Statfs_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Symlink(path string, link string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(link)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(oldpath)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(newpath)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Sync() (err error) {
_, _, e1 := Syscall(SYS_SYNC, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Truncate(path string, length int64) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Umask(newmask int) (oldmask int) {
r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0)
oldmask = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unlink(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unlinkat(dirfd int, path string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unmount(path string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func write(fd int, p []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) {
r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0)
ret = uintptr(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func munmap(addr uintptr, length uintptr) (err error) {
_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
| vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go | 0 | https://github.com/rclone/rclone/commit/c2050172aa3b1e9c2a323cbd98b4df0d66450360 | [
0.0012784773716703057,
0.0001841650519054383,
0.0001621250994503498,
0.00016885067452676594,
0.00010204237332800403
] |
{
"id": 2,
"code_window": [
"\t\t// Use NextMarker if set, otherwise use last Key\n",
"\t\tif resp.NextMarker == nil || *resp.NextMarker == \"\" {\n",
"\t\t\t//marker = resp.Keys[len(resp.Keys)-1].Key\n",
"\t\t\tbreak\n",
"\t\t} else {\n",
"\t\t\tmarker = resp.NextMarker\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfs.Errorf(f, \"Expecting NextMarker but didn't find one\")\n"
],
"file_path": "backend/qingstor/qingstor.go",
"type": "replace",
"edit_start_line_idx": 586
} | //+build go1.9,!go1.10
// Fallback deadline setting for pre go1.10
package restic
import "time"
// SetDeadline sets the read/write deadline.
func (s *StdioConn) SetDeadline(t time.Time) error {
return nil
}
// SetReadDeadline sets the read/write deadline.
func (s *StdioConn) SetReadDeadline(t time.Time) error {
return nil
}
// SetWriteDeadline sets the read/write deadline.
func (s *StdioConn) SetWriteDeadline(t time.Time) error {
return nil
}
| cmd/serve/restic/stdio_conn_pre_go1.10.go | 0 | https://github.com/rclone/rclone/commit/c2050172aa3b1e9c2a323cbd98b4df0d66450360 | [
0.00017820807988755405,
0.0001696735853329301,
0.00016507346299476922,
0.00016573919856455177,
0.0000060409192883525975
] |
{
"id": 3,
"code_window": [
"\t\"sync\"\n",
"\n",
"\t\"github.com/pkg/errors\"\n",
"\t\"github.com/rclone/rclone/fs\"\n",
"\tqs \"github.com/yunify/qingstor-sdk-go/service\"\n",
")\n",
"\n",
"const (\n",
"\t// maxSinglePartSize = 1024 * 1024 * 1024 * 5 // The maximum allowed size when uploading a single object to QingStor\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tqs \"github.com/yunify/qingstor-sdk-go/v3/service\"\n"
],
"file_path": "backend/qingstor/upload.go",
"type": "replace",
"edit_start_line_idx": 17
} | // Package qingstor provides an interface to QingStor object storage
// Home: https://www.qingcloud.com/
// +build !plan9
package qingstor
import (
"context"
"fmt"
"io"
"net/http"
"path"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
qsConfig "github.com/yunify/qingstor-sdk-go/config"
qsErr "github.com/yunify/qingstor-sdk-go/request/errors"
qs "github.com/yunify/qingstor-sdk-go/service"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "qingstor",
Description: "QingCloud Object Storage",
NewFs: NewFs,
Options: []fs.Option{{
Name: "env_auth",
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
Default: false,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter QingStor credentials in the next step",
}, {
Value: "true",
Help: "Get QingStor credentials from the environment (env vars or IAM)",
}},
}, {
Name: "access_key_id",
Help: "QingStor Access Key ID\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "secret_access_key",
Help: "QingStor Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "endpoint",
Help: "Enter a endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"",
}, {
Name: "zone",
Help: "Zone to connect to.\nDefault is \"pek3a\".",
Examples: []fs.OptionExample{{
Value: "pek3a",
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
}, {
Value: "sh1a",
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
}, {
Value: "gd2a",
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
}},
}, {
Name: "connection_retries",
Help: "Number of connection retries.",
Default: 3,
Advanced: true,
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5GB.`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff they will be uploaded
as multipart uploads using this chunk size.
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high speed links and you have
enough memory, then increasing this will speed up the transfers.`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
NB if you set this to > 1 then the checksums of multpart uploads
become corrupted (the uploads themselves are not corrupted though).
If you are uploading small numbers of large file over high speed link
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: 1,
Advanced: true,
}},
})
}
// Constants
const (
listLimitSize = 1000 // Number of items to read at once
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
minChunkSize = fs.SizeSuffix(minMultiPartSize)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
)
// Globals
func timestampToTime(tp int64) time.Time {
timeLayout := time.RFC3339Nano
ts := time.Unix(tp, 0).Format(timeLayout)
tm, _ := time.Parse(timeLayout, ts)
return tm.UTC()
}
// Options defines the configuration for this backend
type Options struct {
EnvAuth bool `config:"env_auth"`
AccessKeyID string `config:"access_key_id"`
SecretAccessKey string `config:"secret_access_key"`
Endpoint string `config:"endpoint"`
Zone string `config:"zone"`
ConnectionRetries int `config:"connection_retries"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
}
// Fs represents a remote qingstor server
type Fs struct {
name string // The name of the remote
root string // The root is a subdir, is a special object
opt Options // parsed options
features *fs.Features // optional features
svc *qs.Service // The connection to the qingstor server
zone string // The zone we are working on
bucket string // The bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucketOK and bucketDeleted
bucketOK bool // true if we have created the bucket
bucketDeleted bool // true if we have deleted the bucket
}
// Object describes a qingstor object
type Object struct {
// Will definitely have everything but meta which may be nil
//
// List will read everything but meta & mimeType - to fill
// that in you need to call readMetaData
fs *Fs // what this object is part of
remote string // object of remote
etag string // md5sum of the object
size int64 // length of the object content
mimeType string // ContentType of object - may be ""
lastModified time.Time // Last modified
encrypted bool // whether the object is encryption
algo string // Custom encryption algorithms
}
// ------------------------------------------------------------
// Pattern to match a qingstor path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
// parseParse parses a qingstor 'url'
func qsParsePath(path string) (bucket, key string, err error) {
// Pattern to match a qingstor path
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("Couldn't parse bucket out of qingstor path %q", path)
} else {
bucket, key = parts[1], parts[2]
key = strings.Trim(key, "/")
}
return
}
// Split an URL into three parts: protocol host and port
func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
/*
Pattern to match a endpoint,
eg: "http(s)://qingstor.com:443" --> "http(s)", "qingstor.com", 443
"http(s)//qingstor.com" --> "http(s)", "qingstor.com", ""
"qingstor.com" --> "", "qingstor.com", ""
*/
defer func() {
if r := recover(); r != nil {
switch x := r.(type) {
case error:
err = x
default:
err = nil
}
}
}()
var mather = regexp.MustCompile(`^(?:(http|https)://)*(\w+\.(?:[\w\.])*)(?::(\d{0,5}))*$`)
parts := mather.FindStringSubmatch(endpoint)
protocol, host, port = parts[1], parts[2], parts[3]
return
}
// qsConnection makes a connection to qingstor
func qsServiceConnection(opt *Options) (*qs.Service, error) {
accessKeyID := opt.AccessKeyID
secretAccessKey := opt.SecretAccessKey
switch {
case opt.EnvAuth:
// No need for empty checks if "env_auth" is true
case accessKeyID == "" && secretAccessKey == "":
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
case accessKeyID == "":
return nil, errors.New("access_key_id not found")
case secretAccessKey == "":
return nil, errors.New("secret_access_key not found")
}
protocol := "https"
host := "qingstor.com"
port := 443
endpoint := opt.Endpoint
if endpoint != "" {
_protocol, _host, _port, err := qsParseEndpoint(endpoint)
if err != nil {
return nil, fmt.Errorf("The endpoint \"%s\" format error", endpoint)
}
if _protocol != "" {
protocol = _protocol
}
host = _host
if _port != "" {
port, _ = strconv.Atoi(_port)
} else if protocol == "http" {
port = 80
}
}
cf, err := qsConfig.NewDefault()
if err != nil {
return nil, err
}
cf.AccessKeyID = accessKeyID
cf.SecretAccessKey = secretAccessKey
cf.Protocol = protocol
cf.Host = host
cf.Port = port
cf.ConnectionRetries = opt.ConnectionRetries
cf.Connection = fshttp.NewClient(fs.Config)
return qs.Init(cf)
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "qingstor: chunk size")
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "qingstor: upload cutoff")
}
bucket, key, err := qsParsePath(root)
if err != nil {
return nil, err
}
svc, err := qsServiceConnection(opt)
if err != nil {
return nil, err
}
if opt.Zone == "" {
opt.Zone = "pek3a"
}
f := &Fs{
name: name,
root: key,
opt: *opt,
svc: svc,
zone: opt.Zone,
bucket: bucket,
}
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
}).Fill(f)
if f.root != "" {
if !strings.HasSuffix(f.root, "/") {
f.root += "/"
}
//Check to see if the object exists
bucketInit, err := svc.Bucket(bucket, opt.Zone)
if err != nil {
return nil, err
}
_, err = bucketInit.HeadObject(key, &qs.HeadObjectInput{})
if err == nil {
f.root = path.Dir(key)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.root == "" {
return fmt.Sprintf("QingStor bucket %s", f.bucket)
}
return fmt.Sprintf("QingStor bucket %s root %s", f.bucket, f.root)
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
//return time.Nanosecond
//Not supported temporary
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
//return hash.HashSet(hash.HashNone)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Put created a new object
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
fsObj := &Object{
fs: f,
remote: src.Remote(),
}
return fsObj, fsObj.Update(ctx, in, src, options...)
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
err := f.Mkdir(ctx, "")
if err != nil {
return nil, err
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcFs := srcObj.fs
key := f.root + remote
source := path.Join("/"+srcFs.bucket, srcFs.root+srcObj.remote)
fs.Debugf(f, "Copied, source key is: %s, and dst key is: %s", source, key)
req := qs.PutObjectInput{
XQSCopySource: &source,
}
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return nil, err
}
_, err = bucketInit.PutObject(key, &req)
if err != nil {
fs.Debugf(f, "Copy Failed, API Error: %v", err)
return nil, err
}
return f.NewObject(ctx, remote)
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// Return an Object from a path
//
//If it can't be found it returns the error ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, info *qs.KeyType) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
// Set info
if info.Size != nil {
o.size = *info.Size
}
if info.Etag != nil {
o.etag = qs.StringValue(info.Etag)
}
if info.Modified == nil {
fs.Logf(o, "Failed to read last modified")
o.lastModified = time.Now()
} else {
o.lastModified = timestampToTime(int64(*info.Modified))
}
if info.MimeType != nil {
o.mimeType = qs.StringValue(info.MimeType)
}
if info.Encrypted != nil {
o.encrypted = qs.BoolValue(info.Encrypted)
}
} else {
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// listFn is called from list to handle an object.
type listFn func(remote string, object *qs.KeyType, isDirectory bool) error
// list the objects into the function supplied
//
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) error {
prefix := f.root
if dir != "" {
prefix += dir + "/"
}
delimiter := ""
if !recurse {
delimiter = "/"
}
maxLimit := int(listLimitSize)
var marker *string
for {
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return err
}
// FIXME need to implement ALL loop
req := qs.ListObjectsInput{
Delimiter: &delimiter,
Prefix: &prefix,
Limit: &maxLimit,
Marker: marker,
}
resp, err := bucketInit.ListObjects(&req)
if err != nil {
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusNotFound {
err = fs.ErrorDirNotFound
}
}
return err
}
rootLength := len(f.root)
if !recurse {
for _, commonPrefix := range resp.CommonPrefixes {
if commonPrefix == nil {
fs.Logf(f, "Nil common prefix received")
continue
}
remote := *commonPrefix
if !strings.HasPrefix(remote, f.root) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[rootLength:]
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
err = fn(remote, &qs.KeyType{Key: &remote}, true)
if err != nil {
return err
}
}
}
for _, object := range resp.Keys {
key := qs.StringValue(object.Key)
if !strings.HasPrefix(key, f.root) {
fs.Logf(f, "Odd name received %q", key)
continue
}
remote := key[rootLength:]
err = fn(remote, object, false)
if err != nil {
return err
}
}
// Use NextMarker if set, otherwise use last Key
if resp.NextMarker == nil || *resp.NextMarker == "" {
//marker = resp.Keys[len(resp.Keys)-1].Key
break
} else {
marker = resp.NextMarker
}
}
return nil
}
// Convert a list item into a BasicInfo
func (f *Fs) itemToDirEntry(remote string, object *qs.KeyType, isDirectory bool) (fs.DirEntry, error) {
if isDirectory {
size := int64(0)
if object.Size != nil {
size = *object.Size
}
d := fs.NewDir(remote, time.Time{}).SetSize(size)
return d, nil
}
o, err := f.newObjectWithInfo(remote, object)
if err != nil {
return nil, err
}
return o, nil
}
// mark the bucket as being OK
func (f *Fs) markBucketOK() {
if f.bucket != "" {
f.bucketOKMu.Lock()
f.bucketOK = true
f.bucketDeleted = false
f.bucketOKMu.Unlock()
}
}
// listDir lists files and directories to out
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// List the objects and directories
err = f.list(ctx, dir, false, func(remote string, object *qs.KeyType, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
if entry != nil {
entries = append(entries, entry)
}
return nil
})
if err != nil {
return nil, err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return entries, nil
}
// listBuckets lists the buckets to out
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
req := qs.ListBucketsInput{
Location: &f.zone,
}
resp, err := f.svc.ListBuckets(&req)
if err != nil {
return nil, err
}
for _, bucket := range resp.Buckets {
d := fs.NewDir(qs.StringValue(bucket.Name), qs.TimeValue(bucket.Created))
entries = append(entries, d)
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.bucket == "" {
return f.listBuckets(dir)
}
return f.listDir(ctx, dir)
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
err = f.list(ctx, dir, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return list.Flush()
}
// Check if the bucket exists
func (f *Fs) dirExists() (bool, error) {
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return false, err
}
_, err = bucketInit.Head()
if err == nil {
return true, nil
}
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusNotFound {
err = nil
}
}
return false, err
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.bucketOK {
return nil
}
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return err
}
/* When delete a bucket, qingstor need about 60 second to sync status;
So, need wait for it sync end if we try to operation a just deleted bucket
*/
retries := 0
for retries <= 120 {
statistics, err := bucketInit.GetStatistics()
if statistics == nil || err != nil {
break
}
switch *statistics.Status {
case "deleted":
fs.Debugf(f, "Wait for qingstor sync bucket status, retries: %d", retries)
time.Sleep(time.Second * 1)
retries++
continue
default:
break
}
break
}
if !f.bucketDeleted {
exists, err := f.dirExists()
if err == nil {
f.bucketOK = exists
}
if err != nil || exists {
return err
}
}
_, err = bucketInit.Put()
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusConflict {
err = nil
}
}
if err == nil {
f.bucketOK = true
f.bucketDeleted = false
}
return err
}
// dirIsEmpty check if the bucket empty
func (f *Fs) dirIsEmpty() (bool, error) {
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return true, err
}
statistics, err := bucketInit.GetStatistics()
if err != nil {
return true, err
}
if *statistics.Count == 0 {
return true, nil
}
return false, nil
}
// Rmdir delete a bucket
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" {
return nil
}
isEmpty, err := f.dirIsEmpty()
if err != nil {
return err
}
if !isEmpty {
fs.Debugf(f, "The bucket %s you tried to delete not empty.", f.bucket)
return errors.New("BucketNotEmpty: The bucket you tried to delete is not empty")
}
fs.Debugf(f, "Tried to delete the bucket %s", f.bucket)
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return err
}
retries := 0
for retries <= 10 {
_, delErr := bucketInit.Delete()
if delErr != nil {
if e, ok := delErr.(*qsErr.QingStorError); ok {
switch e.Code {
// The status of "lease" takes a few seconds to "ready" when creating a new bucket
// wait for lease status ready
case "lease_not_ready":
fs.Debugf(f, "QingStor bucket lease not ready, retries: %d", retries)
retries++
time.Sleep(time.Second * 1)
continue
default:
err = e
break
}
}
} else {
err = delErr
}
break
}
if err == nil {
f.bucketOK = false
f.bucketDeleted = true
}
return err
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData() (err error) {
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return err
}
key := o.fs.root + o.remote
fs.Debugf(o, "Read metadata of key: %s", key)
resp, err := bucketInit.HeadObject(key, &qs.HeadObjectInput{})
if err != nil {
fs.Debugf(o, "Read metadata failed, API Error: %v", err)
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
}
// Ignore missing Content-Length assuming it is 0
if resp.ContentLength != nil {
o.size = *resp.ContentLength
}
if resp.ETag != nil {
o.etag = qs.StringValue(resp.ETag)
}
if resp.LastModified == nil {
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
o.lastModified = time.Now()
} else {
o.lastModified = *resp.LastModified
}
if resp.ContentType != nil {
o.mimeType = qs.StringValue(resp.ContentType)
}
if resp.XQSEncryptionCustomerAlgorithm != nil {
o.algo = qs.StringValue(resp.XQSEncryptionCustomerAlgorithm)
o.encrypted = true
}
return nil
}
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata, %v", err)
return time.Now()
}
modTime := o.lastModified
return modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
err := o.readMetaData()
if err != nil {
return err
}
o.lastModified = modTime
mimeType := fs.MimeType(ctx, o)
if o.size >= maxSizeForCopy {
fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
return nil
}
// Copy the object to itself to update the metadata
key := o.fs.root + o.remote
sourceKey := path.Join("/", o.fs.bucket, key)
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return err
}
req := qs.PutObjectInput{
XQSCopySource: &sourceKey,
ContentType: &mimeType,
}
_, err = bucketInit.PutObject(key, &req)
return err
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return nil, err
}
key := o.fs.root + o.remote
req := qs.GetObjectInput{}
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch option.(type) {
case *fs.RangeOption, *fs.SeekOption:
_, value := option.Header()
req.Range = &value
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
resp, err := bucketInit.GetObject(key, &req)
if err != nil {
return nil, err
}
return resp.Body, nil
}
// Update in to the object
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
// The maximum size of upload object is multipartUploadSize * MaxMultipleParts
err := o.fs.Mkdir(ctx, "")
if err != nil {
return err
}
key := o.fs.root + o.remote
// Guess the content type
mimeType := fs.MimeType(ctx, src)
req := uploadInput{
body: in,
qsSvc: o.fs.svc,
bucket: o.fs.bucket,
zone: o.fs.zone,
key: key,
mimeType: mimeType,
partSize: int64(o.fs.opt.ChunkSize),
concurrency: o.fs.opt.UploadConcurrency,
}
uploader := newUploader(&req)
size := src.Size()
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
if multipart {
err = uploader.upload()
} else {
err = uploader.singlePartUpload(in, size)
}
if err != nil {
return err
}
// Read Metadata of object
err = o.readMetaData()
return err
}
// Remove this object
func (o *Object) Remove(ctx context.Context) error {
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return err
}
key := o.fs.root + o.remote
_, err = bucketInit.DeleteObject(key)
return err
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.fs
}
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
etag := strings.Trim(strings.ToLower(o.etag), `"`)
// Check the etag is a valid md5sum
if !matchMd5.MatchString(etag) {
fs.Debugf(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
return "", nil
}
return etag, nil
}
// Storable says whether this object can be stored
func (o *Object) Storable() bool {
return true
}
// String returns a description of the Object
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return o.size
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return ""
}
return o.mimeType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.Object = &Object{}
_ fs.ListRer = &Fs{}
_ fs.MimeTyper = &Object{}
)
| backend/qingstor/qingstor.go | 1 | https://github.com/rclone/rclone/commit/c2050172aa3b1e9c2a323cbd98b4df0d66450360 | [
0.9201679229736328,
0.008952874690294266,
0.00016178225632756948,
0.0001733337703626603,
0.08729267865419388
] |
{
"id": 3,
"code_window": [
"\t\"sync\"\n",
"\n",
"\t\"github.com/pkg/errors\"\n",
"\t\"github.com/rclone/rclone/fs\"\n",
"\tqs \"github.com/yunify/qingstor-sdk-go/service\"\n",
")\n",
"\n",
"const (\n",
"\t// maxSinglePartSize = 1024 * 1024 * 1024 * 5 // The maximum allowed size when uploading a single object to QingStor\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tqs \"github.com/yunify/qingstor-sdk-go/v3/service\"\n"
],
"file_path": "backend/qingstor/upload.go",
"type": "replace",
"edit_start_line_idx": 17
} | // This file contains the albums abstraction
package googlephotos
import (
"path"
"strings"
"sync"
"github.com/rclone/rclone/backend/googlephotos/api"
)
// All the albums
type albums struct {
mu sync.Mutex
dupes map[string][]*api.Album // duplicated names
byID map[string]*api.Album //..indexed by ID
byTitle map[string]*api.Album //..indexed by Title
path map[string][]string // partial album names to directory
}
// Create a new album
func newAlbums() *albums {
return &albums{
dupes: map[string][]*api.Album{},
byID: map[string]*api.Album{},
byTitle: map[string]*api.Album{},
path: map[string][]string{},
}
}
// add an album
func (as *albums) add(album *api.Album) {
// Munge the name of the album into a sensible path name
album.Title = path.Clean(album.Title)
if album.Title == "." || album.Title == "/" {
album.Title = addID("", album.ID)
}
as.mu.Lock()
as._add(album)
as.mu.Unlock()
}
// _add an album - call with lock held
func (as *albums) _add(album *api.Album) {
// update dupes by title
dupes := as.dupes[album.Title]
dupes = append(dupes, album)
as.dupes[album.Title] = dupes
// Dedupe the album name if necessary
if len(dupes) >= 2 {
// If this is the first dupe, then need to adjust the first one
if len(dupes) == 2 {
firstAlbum := dupes[0]
as._del(firstAlbum)
as._add(firstAlbum)
// undo add of firstAlbum to dupes
as.dupes[album.Title] = dupes
}
album.Title = addID(album.Title, album.ID)
}
// Store the new album
as.byID[album.ID] = album
as.byTitle[album.Title] = album
// Store the partial paths
dir, leaf := album.Title, ""
for dir != "" {
i := strings.LastIndex(dir, "/")
if i >= 0 {
dir, leaf = dir[:i], dir[i+1:]
} else {
dir, leaf = "", dir
}
dirs := as.path[dir]
found := false
for _, dir := range dirs {
if dir == leaf {
found = true
}
}
if !found {
as.path[dir] = append(as.path[dir], leaf)
}
}
}
// del an album
func (as *albums) del(album *api.Album) {
as.mu.Lock()
as._del(album)
as.mu.Unlock()
}
// _del an album - call with lock held
func (as *albums) _del(album *api.Album) {
// We leave in dupes so it doesn't cause albums to get renamed
// Remove from byID and byTitle
delete(as.byID, album.ID)
delete(as.byTitle, album.Title)
// Remove from paths
dir, leaf := album.Title, ""
for dir != "" {
// Can't delete if this dir exists anywhere in the path structure
if _, found := as.path[dir]; found {
break
}
i := strings.LastIndex(dir, "/")
if i >= 0 {
dir, leaf = dir[:i], dir[i+1:]
} else {
dir, leaf = "", dir
}
dirs := as.path[dir]
for i, dir := range dirs {
if dir == leaf {
dirs = append(dirs[:i], dirs[i+1:]...)
break
}
}
if len(dirs) == 0 {
delete(as.path, dir)
} else {
as.path[dir] = dirs
}
}
}
// get an album by title
func (as *albums) get(title string) (album *api.Album, ok bool) {
as.mu.Lock()
defer as.mu.Unlock()
album, ok = as.byTitle[title]
return album, ok
}
// getDirs gets directories below an album path
func (as *albums) getDirs(albumPath string) (dirs []string, ok bool) {
as.mu.Lock()
defer as.mu.Unlock()
dirs, ok = as.path[albumPath]
return dirs, ok
}
| backend/googlephotos/albums.go | 0 | https://github.com/rclone/rclone/commit/c2050172aa3b1e9c2a323cbd98b4df0d66450360 | [
0.00025685629225336015,
0.0001762738829711452,
0.00016575815971009433,
0.0001701745786704123,
0.000021727561033912934
] |
{
"id": 3,
"code_window": [
"\t\"sync\"\n",
"\n",
"\t\"github.com/pkg/errors\"\n",
"\t\"github.com/rclone/rclone/fs\"\n",
"\tqs \"github.com/yunify/qingstor-sdk-go/service\"\n",
")\n",
"\n",
"const (\n",
"\t// maxSinglePartSize = 1024 * 1024 * 1024 * 5 // The maximum allowed size when uploading a single object to QingStor\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tqs \"github.com/yunify/qingstor-sdk-go/v3/service\"\n"
],
"file_path": "backend/qingstor/upload.go",
"type": "replace",
"edit_start_line_idx": 17
} | ---
date: 2019-06-20T16:09:42+01:00
title: "rclone check"
slug: rclone_check
url: /commands/rclone_check/
---
## rclone check
Checks the files in the source and destination match.
### Synopsis
Checks the files in the source and destination match. It compares
sizes and hashes (MD5 or SHA1) and logs a report of files which don't
match. It doesn't alter the source or destination.
If you supply the --size-only flag, it will only compare the sizes not
the hashes as well. Use this for a quick check.
If you supply the --download flag, it will download the data from
both remotes and check them against each other on the fly. This can
be useful for remotes that don't support hashes or if you really want
to check all the data.
If you supply the --one-way flag, it will only check that files in source
match the files in destination, not the other way around. Meaning extra files in
destination that are not in the source will not trigger an error.
```
rclone check source:path dest:path [flags]
```
### Options
```
--download Check by downloading rather than with hash.
-h, --help help for check
--one-way Check one way only, source files must exist on remote
```
See the [global flags page](/flags/) for global options not listed here.
### SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
| docs/content/commands/rclone_check.md | 0 | https://github.com/rclone/rclone/commit/c2050172aa3b1e9c2a323cbd98b4df0d66450360 | [
0.0002769594721030444,
0.0001900641218526289,
0.00016593288455624133,
0.0001691916841082275,
0.00004348767834017053
] |
{
"id": 3,
"code_window": [
"\t\"sync\"\n",
"\n",
"\t\"github.com/pkg/errors\"\n",
"\t\"github.com/rclone/rclone/fs\"\n",
"\tqs \"github.com/yunify/qingstor-sdk-go/service\"\n",
")\n",
"\n",
"const (\n",
"\t// maxSinglePartSize = 1024 * 1024 * 1024 * 5 // The maximum allowed size when uploading a single object to QingStor\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tqs \"github.com/yunify/qingstor-sdk-go/v3/service\"\n"
],
"file_path": "backend/qingstor/upload.go",
"type": "replace",
"edit_start_line_idx": 17
} | // Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
/*
Package tag contains OpenCensus tags.
Tags are key-value pairs. Tags provide additional cardinality to
the OpenCensus instrumentation data.
Tags can be propagated on the wire and in the same
process via context.Context. Encode and Decode should be
used to represent tags into their binary propagation form.
*/
package tag // import "go.opencensus.io/tag"
| vendor/go.opencensus.io/tag/doc.go | 0 | https://github.com/rclone/rclone/commit/c2050172aa3b1e9c2a323cbd98b4df0d66450360 | [
0.00017809748533181846,
0.0001725236652418971,
0.0001633266656426713,
0.0001761468593031168,
0.000006551839760504663
] |
{
"id": 0,
"code_window": [
"\tdefer func() {\n",
"\t\t// If we acquired the semaphore but the cancellation request failed, then\n",
"\t\t// hold on to the semaphore for longer. This helps mitigate a DoS attack\n",
"\t\t// of random cancellation requests.\n",
"\t\tif !resp.Canceled {\n",
"\t\t\ttime.Sleep(1 * time.Second)\n",
"\t\t}\n",
"\t\talloc.Release()\n",
"\t}()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif err != nil || (resp != nil && !resp.Canceled) {\n"
],
"file_path": "pkg/server/tenant_status.go",
"type": "replace",
"edit_start_line_idx": 289
} | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package pgwire
import (
"context"
"crypto/tls"
"encoding/base64"
"fmt"
"io"
"net"
"net/url"
"strconv"
"strings"
"sync/atomic"
"time"
"unicode"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catconstants"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/hba"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/identmap"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgwirebase"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgwirecancel"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/util/contextutil"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/humanizeutil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/log/eventpb"
"github.com/cockroachdb/cockroach/pkg/util/metric"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/logtags"
"github.com/cockroachdb/redact"
"go.opentelemetry.io/otel/attribute"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// ATTENTION: After changing this value in a unit test, you probably want to
// open a new connection pool since the connections in the existing one are not
// affected.
//
// The "results_buffer_size" connection parameter can be used to override this
// default for an individual connection.
var connResultsBufferSize = settings.RegisterByteSizeSetting(
settings.TenantWritable,
"sql.defaults.results_buffer.size",
"default size of the buffer that accumulates results for a statement or a batch "+
"of statements before they are sent to the client. This can be overridden on "+
"an individual connection with the 'results_buffer_size' parameter. Note that auto-retries "+
"generally only happen while no results have been delivered to the client, so "+
"reducing this size can increase the number of retriable errors a client "+
"receives. On the other hand, increasing the buffer size can increase the "+
"delay until the client receives the first result row. "+
"Updating the setting only affects new connections. "+
"Setting to 0 disables any buffering.",
16<<10, // 16 KiB
).WithPublic()
var logConnAuth = settings.RegisterBoolSetting(
settings.TenantWritable,
sql.ConnAuditingClusterSettingName,
"if set, log SQL client connect and disconnect events (note: may hinder performance on loaded nodes)",
false).WithPublic()
var logSessionAuth = settings.RegisterBoolSetting(
settings.TenantWritable,
sql.AuthAuditingClusterSettingName,
"if set, log SQL session login/disconnection events (note: may hinder performance on loaded nodes)",
false).WithPublic()
const (
// ErrSSLRequired is returned when a client attempts to connect to a
// secure server in cleartext.
ErrSSLRequired = "node is running secure mode, SSL connection required"
// ErrDrainingNewConn is returned when a client attempts to connect to a server
// which is not accepting client connections.
ErrDrainingNewConn = "server is not accepting clients, try another node"
// ErrDrainingExistingConn is returned when a connection is shut down because
// the server is draining.
ErrDrainingExistingConn = "server is shutting down"
)
// Fully-qualified names for metrics.
var (
MetaConns = metric.Metadata{
Name: "sql.conns",
Help: "Number of active sql connections",
Measurement: "Connections",
Unit: metric.Unit_COUNT,
}
MetaNewConns = metric.Metadata{
Name: "sql.new_conns",
Help: "Counter of the number of sql connections created",
Measurement: "Connections",
Unit: metric.Unit_COUNT,
}
MetaBytesIn = metric.Metadata{
Name: "sql.bytesin",
Help: "Number of sql bytes received",
Measurement: "SQL Bytes",
Unit: metric.Unit_BYTES,
}
MetaBytesOut = metric.Metadata{
Name: "sql.bytesout",
Help: "Number of sql bytes sent",
Measurement: "SQL Bytes",
Unit: metric.Unit_BYTES,
}
MetaConnLatency = metric.Metadata{
Name: "sql.conn.latency",
Help: "Latency to establish and authenticate a SQL connection",
Measurement: "Nanoseconds",
Unit: metric.Unit_NANOSECONDS,
}
MetaPGWireCancelTotal = metric.Metadata{
Name: "sql.pgwire_cancel.total",
Help: "Counter of the number of pgwire query cancel requests",
Measurement: "Requests",
Unit: metric.Unit_COUNT,
}
MetaPGWireCancelIgnored = metric.Metadata{
Name: "sql.pgwire_cancel.ignored",
Help: "Counter of the number of pgwire query cancel requests that were ignored due to rate limiting",
Measurement: "Requests",
Unit: metric.Unit_COUNT,
}
MetaPGWireCancelSuccessful = metric.Metadata{
Name: "sql.pgwire_cancel.successful",
Help: "Counter of the number of pgwire query cancel requests that were successful",
Measurement: "Requests",
Unit: metric.Unit_COUNT,
}
)
const (
// The below constants can occur during the first message a client
// sends to the server. There are two categories: protocol version and
// request code. The protocol version is (major version number << 16)
// + minor version number. Request codes are (1234 << 16) + 5678 + N,
// where N started at 0 and is increased by 1 for every new request
// code added, which happens rarely during major or minor Postgres
// releases.
//
// See: https://www.postgresql.org/docs/current/protocol-message-formats.html
version30 = 196608 // (3 << 16) + 0
versionCancel = 80877102 // (1234 << 16) + 5678
versionSSL = 80877103 // (1234 << 16) + 5679
versionGSSENC = 80877104 // (1234 << 16) + 5680
)
// cancelMaxWait is the amount of time a draining server gives to sessions to
// react to cancellation and return before a forceful shutdown.
const cancelMaxWait = 1 * time.Second
// baseSQLMemoryBudget is the amount of memory pre-allocated in each connection.
var baseSQLMemoryBudget = envutil.EnvOrDefaultInt64("COCKROACH_BASE_SQL_MEMORY_BUDGET",
int64(2.1*float64(mon.DefaultPoolAllocationSize)))
// connReservationBatchSize determines for how many connections memory
// is pre-reserved at once.
var connReservationBatchSize = 5
var (
sslSupported = []byte{'S'}
sslUnsupported = []byte{'N'}
)
// cancelChanMap keeps track of channels that are closed after the associated
// cancellation function has been called and the cancellation has taken place.
type cancelChanMap map[chan struct{}]context.CancelFunc
// Server implements the server side of the PostgreSQL wire protocol.
type Server struct {
AmbientCtx log.AmbientContext
cfg *base.Config
SQLServer *sql.Server
execCfg *sql.ExecutorConfig
metrics ServerMetrics
mu struct {
syncutil.Mutex
// connCancelMap entries represent connections started when the server
// was not draining. Each value is a function that can be called to
// cancel the associated connection. The corresponding key is a channel
// that is closed when the connection is done.
connCancelMap cancelChanMap
draining bool
}
auth struct {
syncutil.RWMutex
conf *hba.Conf
identityMap *identmap.Conf
}
sqlMemoryPool *mon.BytesMonitor
connMonitor *mon.BytesMonitor
// testing{Conn,Auth}LogEnabled is used in unit tests in this
// package to force-enable conn/auth logging without dancing around
// the asynchronicity of cluster settings.
testingConnLogEnabled int32
testingAuthLogEnabled int32
// trustClientProvidedRemoteAddr indicates whether the server should honor
// a `crdb:remote_addr` status parameter provided by the client during
// session authentication. This status parameter can be set by SQL proxies
// to feed the "real" client address, where otherwise the CockroachDB SQL
// server would only see the address of the proxy.
//
// This setting is security-sensitive and should not be enabled
// without a SQL proxy that carefully scrubs any client-provided
// `crdb:remote_addr` field. In particular, this setting should never
// be set when there is no SQL proxy at all. Otherwise, a malicious
// client could use this field to pretend being from another address
// than its own and defeat the HBA rules.
//
// TODO(knz,ben): It would be good to have something more specific
// than a boolean, i.e. to accept the provided address only from
// certain peer IPs, or with certain certificates. (could it be a
// special hba.conf directive?)
trustClientProvidedRemoteAddr syncutil.AtomicBool
}
// ServerMetrics is the set of metrics for the pgwire server.
type ServerMetrics struct {
BytesInCount *metric.Counter
BytesOutCount *metric.Counter
Conns *metric.Gauge
NewConns *metric.Counter
ConnLatency *metric.Histogram
PGWireCancelTotalCount *metric.Counter
PGWireCancelIgnoredCount *metric.Counter
PGWireCancelSuccessfulCount *metric.Counter
ConnMemMetrics sql.BaseMemoryMetrics
SQLMemMetrics sql.MemoryMetrics
}
func makeServerMetrics(
sqlMemMetrics sql.MemoryMetrics, histogramWindow time.Duration,
) ServerMetrics {
return ServerMetrics{
BytesInCount: metric.NewCounter(MetaBytesIn),
BytesOutCount: metric.NewCounter(MetaBytesOut),
Conns: metric.NewGauge(MetaConns),
NewConns: metric.NewCounter(MetaNewConns),
ConnLatency: metric.NewLatency(MetaConnLatency, histogramWindow),
PGWireCancelTotalCount: metric.NewCounter(MetaPGWireCancelTotal),
PGWireCancelIgnoredCount: metric.NewCounter(MetaPGWireCancelIgnored),
PGWireCancelSuccessfulCount: metric.NewCounter(MetaPGWireCancelSuccessful),
ConnMemMetrics: sql.MakeBaseMemMetrics("conns", histogramWindow),
SQLMemMetrics: sqlMemMetrics,
}
}
// noteworthySQLMemoryUsageBytes is the minimum size tracked by the
// client SQL pool before the pool start explicitly logging overall
// usage growth in the log.
var noteworthySQLMemoryUsageBytes = envutil.EnvOrDefaultInt64("COCKROACH_NOTEWORTHY_SQL_MEMORY_USAGE", 100*1024*1024)
// noteworthyConnMemoryUsageBytes is the minimum size tracked by the
// connection monitor before the monitor start explicitly logging overall
// usage growth in the log.
var noteworthyConnMemoryUsageBytes = envutil.EnvOrDefaultInt64("COCKROACH_NOTEWORTHY_CONN_MEMORY_USAGE", 2*1024*1024)
// MakeServer creates a Server.
//
// Start() needs to be called on the Server so it begins processing.
func MakeServer(
ambientCtx log.AmbientContext,
cfg *base.Config,
st *cluster.Settings,
sqlMemMetrics sql.MemoryMetrics,
parentMemoryMonitor *mon.BytesMonitor,
histogramWindow time.Duration,
executorConfig *sql.ExecutorConfig,
) *Server {
server := &Server{
AmbientCtx: ambientCtx,
cfg: cfg,
execCfg: executorConfig,
metrics: makeServerMetrics(sqlMemMetrics, histogramWindow),
}
server.sqlMemoryPool = mon.NewMonitor("sql",
mon.MemoryResource,
// Note that we don't report metrics on this monitor. The reason for this is
// that we report metrics on the sum of all the child monitors of this pool.
// This monitor is the "main sql" monitor. It's a child of the root memory
// monitor. Its children are the sql monitors for each new connection. The
// sum of those children, plus the extra memory in the "conn" monitor below,
// is more than enough metrics information about the monitors.
nil, /* curCount */
nil, /* maxHist */
0, noteworthySQLMemoryUsageBytes, st)
server.sqlMemoryPool.Start(context.Background(), parentMemoryMonitor, mon.BoundAccount{})
server.SQLServer = sql.NewServer(executorConfig, server.sqlMemoryPool)
// TODO(knz,ben): Use a cluster setting for this.
server.trustClientProvidedRemoteAddr.Set(trustClientProvidedRemoteAddrOverride)
server.connMonitor = mon.NewMonitor("conn",
mon.MemoryResource,
server.metrics.ConnMemMetrics.CurBytesCount,
server.metrics.ConnMemMetrics.MaxBytesHist,
int64(connReservationBatchSize)*baseSQLMemoryBudget, noteworthyConnMemoryUsageBytes, st)
server.connMonitor.Start(context.Background(), server.sqlMemoryPool, mon.BoundAccount{})
server.mu.Lock()
server.mu.connCancelMap = make(cancelChanMap)
server.mu.Unlock()
connAuthConf.SetOnChange(&st.SV, func(ctx context.Context) {
loadLocalHBAConfigUponRemoteSettingChange(
ambientCtx.AnnotateCtx(context.Background()), server, st)
})
connIdentityMapConf.SetOnChange(&st.SV, func(ctx context.Context) {
loadLocalIdentityMapUponRemoteSettingChange(
ambientCtx.AnnotateCtx(context.Background()), server, st)
})
return server
}
// BytesOut returns the total number of bytes transmitted from this server.
func (s *Server) BytesOut() uint64 {
return uint64(s.metrics.BytesOutCount.Count())
}
// AnnotateCtxForIncomingConn annotates the provided context with a
// tag that reports the peer's address. In the common case, the
// context is annotated with a "client" tag. When the server is
// configured to recognize client-specified remote addresses, it is
// annotated with a "peer" tag and the "client" tag is added later
// when the session is set up.
func (s *Server) AnnotateCtxForIncomingConn(ctx context.Context, conn net.Conn) context.Context {
tag := "client"
if s.trustClientProvidedRemoteAddr.Get() {
tag = "peer"
}
return logtags.AddTag(ctx, tag, conn.RemoteAddr().String())
}
// Match returns true if rd appears to be a Postgres connection.
func Match(rd io.Reader) bool {
buf := pgwirebase.MakeReadBuffer()
_, err := buf.ReadUntypedMsg(rd)
if err != nil {
return false
}
version, err := buf.GetUint32()
if err != nil {
return false
}
return version == version30 || version == versionSSL || version == versionCancel || version == versionGSSENC
}
// Start makes the Server ready for serving connections.
func (s *Server) Start(ctx context.Context, stopper *stop.Stopper) {
s.SQLServer.Start(ctx, stopper)
}
// IsDraining returns true if the server is not currently accepting
// connections.
func (s *Server) IsDraining() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.mu.draining
}
// Metrics returns the set of metrics structs.
func (s *Server) Metrics() (res []interface{}) {
return []interface{}{
&s.metrics,
&s.SQLServer.Metrics.StartedStatementCounters,
&s.SQLServer.Metrics.ExecutedStatementCounters,
&s.SQLServer.Metrics.EngineMetrics,
&s.SQLServer.Metrics.GuardrailMetrics,
&s.SQLServer.InternalMetrics.StartedStatementCounters,
&s.SQLServer.InternalMetrics.ExecutedStatementCounters,
&s.SQLServer.InternalMetrics.EngineMetrics,
&s.SQLServer.InternalMetrics.GuardrailMetrics,
&s.SQLServer.ServerMetrics.StatsMetrics,
&s.SQLServer.ServerMetrics.ContentionSubsystemMetrics,
}
}
// Drain prevents new connections from being served and waits for drainWait for
// open connections to terminate before canceling them.
// An error will be returned when connections that have been canceled have not
// responded to this cancellation and closed themselves in time. The server
// will remain in draining state, though open connections may continue to
// exist.
// The RFC on drain modes has more information regarding the specifics of
// what will happen to connections in different states:
// https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160425_drain_modes.md
//
// The reporter callback, if non-nil, is called on a best effort basis
// to report work that needed to be done and which may or may not have
// been done by the time this call returns. See the explanation in
// pkg/server/drain.go for details.
func (s *Server) Drain(
ctx context.Context, drainWait time.Duration, reporter func(int, redact.SafeString),
) error {
return s.drainImpl(ctx, drainWait, cancelMaxWait, reporter)
}
// Undrain switches the server back to the normal mode of operation in which
// connections are accepted.
func (s *Server) Undrain() {
s.mu.Lock()
s.setDrainingLocked(false)
s.mu.Unlock()
}
// setDrainingLocked sets the server's draining state and returns whether the
// state changed (i.e. drain != s.mu.draining). s.mu must be locked.
func (s *Server) setDrainingLocked(drain bool) bool {
if s.mu.draining == drain {
return false
}
s.mu.draining = drain
return true
}
// drainImpl drains the SQL clients.
//
// The queryWait duration is used to wait on clients to
// self-disconnect after their session has been canceled. The
// cancelWait is used to wait after the queryWait timer has expired
// and there are still clients connected, and their context.Context is
// canceled.
//
// The reporter callback, if non-nil, is called on a best effort basis
// to report work that needed to be done and which may or may not have
// been done by the time this call returns. See the explanation in
// pkg/server/drain.go for details.
func (s *Server) drainImpl(
ctx context.Context,
queryWait time.Duration,
cancelWait time.Duration,
reporter func(int, redact.SafeString),
) error {
// This anonymous function returns a copy of s.mu.connCancelMap if there are
// any active connections to cancel. We will only attempt to cancel
// connections that were active at the moment the draining switch happened.
// It is enough to do this because:
// 1) If no new connections are added to the original map all connections
// will be canceled.
// 2) If new connections are added to the original map, it follows that they
// were added when s.mu.draining = false, thus not requiring cancellation.
// These connections are not our responsibility and will be handled when the
// server starts draining again.
connCancelMap := func() cancelChanMap {
s.mu.Lock()
defer s.mu.Unlock()
if !s.setDrainingLocked(true) {
// We are already draining.
return nil
}
connCancelMap := make(cancelChanMap)
for done, cancel := range s.mu.connCancelMap {
connCancelMap[done] = cancel
}
return connCancelMap
}()
if len(connCancelMap) == 0 {
return nil
}
if reporter != nil {
// Report progress to the Drain RPC.
reporter(len(connCancelMap), "SQL clients")
}
// Spin off a goroutine that waits for all connections to signal that they
// are done and reports it on allConnsDone. The main goroutine signals this
// goroutine to stop work through quitWaitingForConns.
allConnsDone := make(chan struct{})
quitWaitingForConns := make(chan struct{})
defer close(quitWaitingForConns)
go func() {
defer close(allConnsDone)
for done := range connCancelMap {
select {
case <-done:
case <-quitWaitingForConns:
return
}
}
}()
// Wait for all connections to finish up to drainWait.
select {
case <-time.After(queryWait):
log.Ops.Warningf(ctx, "canceling all sessions after waiting %s", queryWait)
case <-allConnsDone:
}
// Cancel the contexts of all sessions if the server is still in draining
// mode.
if stop := func() bool {
s.mu.Lock()
defer s.mu.Unlock()
if !s.mu.draining {
return true
}
for _, cancel := range connCancelMap {
// There is a possibility that different calls to SetDraining have
// overlapping connCancelMaps, but context.CancelFunc calls are
// idempotent.
cancel()
}
return false
}(); stop {
return nil
}
select {
case <-time.After(cancelWait):
return errors.Errorf("some sessions did not respond to cancellation within %s", cancelWait)
case <-allConnsDone:
}
return nil
}
// SocketType indicates the connection type. This is an optimization to
// prevent a comparison against conn.LocalAddr().Network().
type SocketType bool
const (
// SocketTCP is used for TCP sockets. The standard.
SocketTCP SocketType = true
// SocketUnix is used for unix datagram sockets.
SocketUnix SocketType = false
)
func (s SocketType) asConnType() (hba.ConnType, error) {
switch s {
case SocketTCP:
return hba.ConnHostNoSSL, nil
case SocketUnix:
return hba.ConnLocal, nil
default:
return 0, errors.AssertionFailedf("unimplemented socket type: %v", errors.Safe(s))
}
}
func (s *Server) connLogEnabled() bool {
return atomic.LoadInt32(&s.testingConnLogEnabled) != 0 || logConnAuth.Get(&s.execCfg.Settings.SV)
}
// TestingEnableConnLogging is exported for use in tests.
func (s *Server) TestingEnableConnLogging() {
atomic.StoreInt32(&s.testingConnLogEnabled, 1)
}
// TestingEnableAuthLogging is exported for use in tests.
func (s *Server) TestingEnableAuthLogging() {
atomic.StoreInt32(&s.testingAuthLogEnabled, 1)
}
// ServeConn serves a single connection, driving the handshake process and
// delegating to the appropriate connection type.
//
// The socketType argument is an optimization to avoid a string
// compare on conn.LocalAddr().Network(). When the socket type is
// unix datagram (local filesystem), SSL negotiation is disabled
// even when the server is running securely with certificates.
// This has the effect of forcing password auth, also in a way
// compatible with postgres.
//
// An error is returned if the initial handshake of the connection fails.
func (s *Server) ServeConn(ctx context.Context, conn net.Conn, socketType SocketType) error {
ctx, draining, onCloseFn := s.registerConn(ctx)
defer onCloseFn()
connDetails := eventpb.CommonConnectionDetails{
InstanceID: int32(s.execCfg.NodeID.SQLInstanceID()),
Network: conn.RemoteAddr().Network(),
RemoteAddress: conn.RemoteAddr().String(),
}
// Some bookkeeping, for security-minded administrators.
// This registers the connection to the authentication log.
connStart := timeutil.Now()
if s.connLogEnabled() {
ev := &eventpb.ClientConnectionStart{
CommonEventDetails: eventpb.CommonEventDetails{Timestamp: connStart.UnixNano()},
CommonConnectionDetails: connDetails,
}
log.StructuredEvent(ctx, ev)
}
defer func() {
// The duration of the session is logged at the end so that the
// reader of the log file can know how much to look back in time
// to find when the connection was opened. This is important
// because the log files may have been rotated since.
if s.connLogEnabled() {
endTime := timeutil.Now()
ev := &eventpb.ClientConnectionEnd{
CommonEventDetails: eventpb.CommonEventDetails{Timestamp: endTime.UnixNano()},
CommonConnectionDetails: connDetails,
Duration: endTime.Sub(connStart).Nanoseconds(),
}
log.StructuredEvent(ctx, ev)
}
}()
// In any case, first check the command in the start-up message.
//
// We're assuming that a client is not willing/able to receive error
// packets before we drain that message.
version, buf, err := s.readVersion(conn)
if err != nil {
return err
}
switch version {
case versionCancel:
// The cancel message is rather peculiar: it is sent without
// authentication, always over an unencrypted channel.
s.handleCancel(ctx, conn, &buf)
return nil
case versionGSSENC:
// This is a request for an unsupported feature: GSS encryption.
// https://github.com/cockroachdb/cockroach/issues/52184
//
// Ensure the right SQLSTATE is sent to the SQL client.
err := pgerror.New(pgcode.ProtocolViolation, "GSS encryption is not yet supported")
// Annotate a telemetry key. These objects
// are treated specially by sendErr: they increase a
// telemetry counter to indicate an attempt was made
// to use this feature.
err = errors.WithTelemetry(err, "#52184")
return s.sendErr(ctx, conn, err)
}
// If the server is shutting down, terminate the connection early.
if draining {
log.Ops.Info(ctx, "rejecting new connection while server is draining")
return s.sendErr(ctx, conn, newAdminShutdownErr(ErrDrainingNewConn))
}
// Compute the initial connType.
connType, err := socketType.asConnType()
if err != nil {
return err
}
// If the client requests SSL, upgrade the connection to use TLS.
var clientErr error
conn, connType, version, clientErr, err = s.maybeUpgradeToSecureConn(ctx, conn, connType, version, &buf)
if err != nil {
return err
}
if clientErr != nil {
return s.sendErr(ctx, conn, clientErr)
}
sp := tracing.SpanFromContext(ctx)
sp.SetTag("conn_type", attribute.StringValue(connType.String()))
// What does the client want to do?
switch version {
case version30:
// Normal SQL connection. Proceed normally below.
case versionCancel:
// The PostgreSQL protocol definition says that cancel payloads
// must be sent *prior to upgrading the connection to use TLS*.
// Yet, we've found clients in the wild that send the cancel
// after the TLS handshake, for example at
// https://github.com/cockroachlabs/support/issues/600.
s.handleCancel(ctx, conn, &buf)
return nil
default:
// We don't know this protocol.
err := pgerror.Newf(pgcode.ProtocolViolation, "unknown protocol version %d", version)
err = errors.WithTelemetry(err, fmt.Sprintf("protocol-version-%d", version))
return s.sendErr(ctx, conn, err)
}
// Reserve some memory for this connection using the server's monitor. This
// reduces pressure on the shared pool because the server monitor allocates in
// chunks from the shared pool and these chunks should be larger than
// baseSQLMemoryBudget.
reserved := s.connMonitor.MakeBoundAccount()
if err := reserved.Grow(ctx, baseSQLMemoryBudget); err != nil {
return errors.Wrapf(err, "unable to pre-allocate %d bytes for this connection",
baseSQLMemoryBudget)
}
// Load the client-provided session parameters.
var sArgs sql.SessionArgs
if sArgs, err = parseClientProvidedSessionParameters(ctx, &s.execCfg.Settings.SV, &buf,
conn.RemoteAddr(), s.trustClientProvidedRemoteAddr.Get()); err != nil {
return s.sendErr(ctx, conn, err)
}
// Populate the client address field in the context tags and the
// shared struct for structured logging.
// Only now do we know the remote client address for sure (it may have
// been overridden by a status parameter).
connDetails.RemoteAddress = sArgs.RemoteAddr.String()
ctx = logtags.AddTag(ctx, "client", connDetails.RemoteAddress)
sp.SetTag("client", attribute.StringValue(connDetails.RemoteAddress))
// If a test is hooking in some authentication option, load it.
var testingAuthHook func(context.Context) error
if k := s.execCfg.PGWireTestingKnobs; k != nil {
testingAuthHook = k.AuthHook
}
hbaConf, identMap := s.GetAuthenticationConfiguration()
// Defer the rest of the processing to the connection handler.
// This includes authentication.
s.serveConn(
ctx, conn, sArgs,
reserved,
connStart,
authOptions{
connType: connType,
connDetails: connDetails,
insecure: s.cfg.Insecure,
ie: s.execCfg.InternalExecutor,
auth: hbaConf,
identMap: identMap,
testingAuthHook: testingAuthHook,
})
return nil
}
// handleCancel handles a pgwire query cancellation request. Note that the
// request is unauthenticated. To mitigate the security risk (i.e., a
// malicious actor spamming this endpoint with random data to try to cancel
// a query), the logic is rate-limited by a semaphore. Refer to the comments
// in the pgwirecancel package for more information.
//
// This function does not return an error, so the caller (and possible
// attacker) will not know if the cancellation attempt succeeded. Errors are
// logged so that an operator can be aware of any possibly malicious requests.
func (s *Server) handleCancel(ctx context.Context, conn net.Conn, buf *pgwirebase.ReadBuffer) {
telemetry.Inc(sqltelemetry.CancelRequestCounter)
s.metrics.PGWireCancelTotalCount.Inc(1)
resp, err := func() (*serverpb.CancelQueryByKeyResponse, error) {
backendKeyDataBits, err := buf.GetUint64()
// The connection that issued the cancel is not a SQL session -- it's an
// entirely new connection that's created just to send the cancel. We close
// the connection as soon as possible after reading the data, since there
// is nothing to send back to the client.
_ = conn.Close()
if err != nil {
return nil, err
}
cancelKey := pgwirecancel.BackendKeyData(backendKeyDataBits)
// The request is forwarded to the appropriate node.
req := &serverpb.CancelQueryByKeyRequest{
SQLInstanceID: cancelKey.GetSQLInstanceID(),
CancelQueryKey: cancelKey,
}
resp, err := s.execCfg.SQLStatusServer.CancelQueryByKey(ctx, req)
if len(resp.Error) > 0 {
err = errors.CombineErrors(err, errors.Newf("error from CancelQueryByKeyResponse: %s", resp.Error))
}
return resp, err
}()
if resp != nil && resp.Canceled {
s.metrics.PGWireCancelSuccessfulCount.Inc(1)
} else if err != nil {
if status := status.Convert(err); status.Code() == codes.ResourceExhausted {
s.metrics.PGWireCancelIgnoredCount.Inc(1)
}
log.Sessions.Warningf(ctx, "unexpected while handling pgwire cancellation request: %v", err)
}
}
// parseClientProvidedSessionParameters reads the incoming k/v pairs
// in the startup message into a sql.SessionArgs struct.
func parseClientProvidedSessionParameters(
ctx context.Context,
sv *settings.Values,
buf *pgwirebase.ReadBuffer,
origRemoteAddr net.Addr,
trustClientProvidedRemoteAddr bool,
) (sql.SessionArgs, error) {
args := sql.SessionArgs{
SessionDefaults: make(map[string]string),
CustomOptionSessionDefaults: make(map[string]string),
RemoteAddr: origRemoteAddr,
}
foundBufferSize := false
for {
// Read a key-value pair from the client.
key, err := buf.GetString()
if err != nil {
return sql.SessionArgs{}, pgerror.Wrap(
err, pgcode.ProtocolViolation,
"error reading option key",
)
}
if len(key) == 0 {
// End of parameter list.
break
}
value, err := buf.GetString()
if err != nil {
return sql.SessionArgs{}, pgerror.Wrapf(
err, pgcode.ProtocolViolation,
"error reading option value for key %q", key,
)
}
// Case-fold for the key for easier comparison.
key = strings.ToLower(key)
// Load the parameter.
switch key {
case "user":
// In CockroachDB SQL, unlike in PostgreSQL, usernames are
// case-insensitive. Therefore we need to normalize the username
// here, so that further lookups for authentication have the correct
// identifier.
args.User, _ = security.MakeSQLUsernameFromUserInput(value, security.UsernameValidation)
// IsSuperuser will get updated later when we load the user's session
// initialization information.
args.IsSuperuser = args.User.IsRootUser()
case "crdb:session_revival_token_base64":
token, err := base64.StdEncoding.DecodeString(value)
if err != nil {
return sql.SessionArgs{}, pgerror.Wrapf(
err, pgcode.ProtocolViolation,
"%s", key,
)
}
args.SessionRevivalToken = token
case "results_buffer_size":
if args.ConnResultsBufferSize, err = humanizeutil.ParseBytes(value); err != nil {
return sql.SessionArgs{}, errors.WithSecondaryError(
pgerror.Newf(pgcode.ProtocolViolation,
"error parsing results_buffer_size option value '%s' as bytes", value), err)
}
if args.ConnResultsBufferSize < 0 {
return sql.SessionArgs{}, pgerror.Newf(pgcode.ProtocolViolation,
"results_buffer_size option value '%s' cannot be negative", value)
}
foundBufferSize = true
case "crdb:remote_addr":
if !trustClientProvidedRemoteAddr {
return sql.SessionArgs{}, pgerror.Newf(pgcode.ProtocolViolation,
"server not configured to accept remote address override (requested: %q)", value)
}
hostS, portS, err := net.SplitHostPort(value)
if err != nil {
return sql.SessionArgs{}, pgerror.Wrap(
err, pgcode.ProtocolViolation,
"invalid address format",
)
}
port, err := strconv.Atoi(portS)
if err != nil {
return sql.SessionArgs{}, pgerror.Wrap(
err, pgcode.ProtocolViolation,
"remote port is not numeric",
)
}
ip := net.ParseIP(hostS)
if ip == nil {
return sql.SessionArgs{}, pgerror.New(pgcode.ProtocolViolation,
"remote address is not numeric")
}
args.RemoteAddr = &net.TCPAddr{IP: ip, Port: port}
case "options":
opts, err := parseOptions(value)
if err != nil {
return sql.SessionArgs{}, err
}
for _, opt := range opts {
err = loadParameter(ctx, opt.key, opt.value, &args)
if err != nil {
return sql.SessionArgs{}, pgerror.Wrapf(err, pgerror.GetPGCode(err), "options")
}
}
default:
err = loadParameter(ctx, key, value, &args)
if err != nil {
return sql.SessionArgs{}, err
}
}
}
if !foundBufferSize && sv != nil {
// The client did not provide buffer_size; use the cluster setting as default.
args.ConnResultsBufferSize = connResultsBufferSize.Get(sv)
}
// TODO(richardjcai): When connecting to the database, we'll want to
// check for CONNECT privilege on the database. #59875.
if _, ok := args.SessionDefaults["database"]; !ok {
// CockroachDB-specific behavior: if no database is specified,
// default to "defaultdb". In PostgreSQL this would be "postgres".
args.SessionDefaults["database"] = catalogkeys.DefaultDatabaseName
}
// The client might override the application name,
// which would prevent it from being counted in telemetry.
// We've decided that this noise in the data is acceptable.
if appName, ok := args.SessionDefaults["application_name"]; ok {
if appName == catconstants.ReportableAppNamePrefix+catconstants.InternalSQLAppName {
telemetry.Inc(sqltelemetry.CockroachShellCounter)
}
}
return args, nil
}
func loadParameter(ctx context.Context, key, value string, args *sql.SessionArgs) error {
key = strings.ToLower(key)
exists, configurable := sql.IsSessionVariableConfigurable(key)
switch {
case exists && configurable:
args.SessionDefaults[key] = value
case sql.IsCustomOptionSessionVariable(key):
args.CustomOptionSessionDefaults[key] = value
case !exists:
if _, ok := sql.UnsupportedVars[key]; ok {
counter := sqltelemetry.UnimplementedClientStatusParameterCounter(key)
telemetry.Inc(counter)
}
log.Warningf(ctx, "unknown configuration parameter: %q", key)
case !configurable:
return pgerror.Newf(pgcode.CantChangeRuntimeParam,
"parameter %q cannot be changed", key)
}
return nil
}
// option represents an option argument passed in the connection URL.
type option struct {
key string
value string
}
// parseOptions parses the given string into the options. The options must be
// separated by space and have one of the following patterns:
// '-c key=value', '-ckey=value', '--key=value'
func parseOptions(optionsString string) ([]option, error) {
var res []option
optionsRaw, err := url.QueryUnescape(optionsString)
if err != nil {
return nil, pgerror.Newf(pgcode.ProtocolViolation, "failed to unescape options %q", optionsString)
}
lastWasDashC := false
opts := splitOptions(optionsRaw)
for i := 0; i < len(opts); i++ {
prefix := ""
if len(opts[i]) > 1 {
prefix = opts[i][:2]
}
switch {
case opts[i] == "-c":
lastWasDashC = true
continue
case lastWasDashC:
lastWasDashC = false
// if the last option was '-c' parse current option with no regard to
// the prefix
prefix = ""
case prefix == "--" || prefix == "-c":
lastWasDashC = false
default:
return nil, pgerror.Newf(pgcode.ProtocolViolation,
"option %q is invalid, must have prefix '-c' or '--'", opts[i])
}
opt, err := splitOption(opts[i], prefix)
if err != nil {
return nil, err
}
res = append(res, opt)
}
return res, nil
}
// splitOptions slices the given string into substrings separated by space
// unless the space is escaped using backslashes '\\'. It also skips multiple
// subsequent spaces.
func splitOptions(options string) []string {
var res []string
var sb strings.Builder
i := 0
for i < len(options) {
sb.Reset()
// skip leading space
for i < len(options) && unicode.IsSpace(rune(options[i])) {
i++
}
if i == len(options) {
break
}
lastWasEscape := false
for i < len(options) {
if unicode.IsSpace(rune(options[i])) && !lastWasEscape {
break
}
if !lastWasEscape && options[i] == '\\' {
lastWasEscape = true
} else {
lastWasEscape = false
sb.WriteByte(options[i])
}
i++
}
res = append(res, sb.String())
}
return res
}
// splitOption splits the given opt argument into substrings separated by '='.
// It returns an error if the given option does not comply with the pattern
// "key=value" and the number of elements in the result is not two.
// splitOption removes the prefix from the key and replaces '-' with '_' so
// "--option-name=value" becomes [option_name, value].
func splitOption(opt, prefix string) (option, error) {
kv := strings.Split(opt, "=")
if len(kv) != 2 {
return option{}, pgerror.Newf(pgcode.ProtocolViolation,
"option %q is invalid, check '='", opt)
}
kv[0] = strings.TrimPrefix(kv[0], prefix)
return option{key: strings.ReplaceAll(kv[0], "-", "_"), value: kv[1]}, nil
}
// Note: Usage of an env var here makes it possible to unconditionally
// enable this feature when cluster settings do not work reliably,
// e.g. in multi-tenant setups in v20.2. This override mechanism can
// be removed after all of CC is moved to use v21.1 or a version which
// supports cluster settings.
var trustClientProvidedRemoteAddrOverride = envutil.EnvOrDefaultBool("COCKROACH_TRUST_CLIENT_PROVIDED_SQL_REMOTE_ADDR", false)
// TestingSetTrustClientProvidedRemoteAddr is used in tests.
func (s *Server) TestingSetTrustClientProvidedRemoteAddr(b bool) func() {
prev := s.trustClientProvidedRemoteAddr.Get()
s.trustClientProvidedRemoteAddr.Set(b)
return func() { s.trustClientProvidedRemoteAddr.Set(prev) }
}
// maybeUpgradeToSecureConn upgrades the connection to TLS/SSL if
// requested by the client, and available in the server configuration.
func (s *Server) maybeUpgradeToSecureConn(
ctx context.Context,
conn net.Conn,
connType hba.ConnType,
version uint32,
buf *pgwirebase.ReadBuffer,
) (newConn net.Conn, newConnType hba.ConnType, newVersion uint32, clientErr, serverErr error) {
// By default, this is a no-op.
newConn = conn
newConnType = connType
newVersion = version
var n int // byte counts
if version != versionSSL {
// The client did not require a SSL connection.
// Insecure mode: nothing to say, nothing to do.
// TODO(knz): Remove this condition - see
// https://github.com/cockroachdb/cockroach/issues/53404
if s.cfg.Insecure {
return
}
// Secure mode: disallow if TCP and the user did not opt into
// non-TLS SQL conns.
if !s.cfg.AcceptSQLWithoutTLS && connType != hba.ConnLocal {
clientErr = pgerror.New(pgcode.ProtocolViolation, ErrSSLRequired)
}
return
}
if connType == hba.ConnLocal {
// No existing PostgreSQL driver ever tries to activate TLS over
// a unix socket. But in case someone, sometime, somewhere, makes
// that mistake, let them know that we don't want it.
clientErr = pgerror.New(pgcode.ProtocolViolation,
"cannot use SSL/TLS over local connections")
return
}
// Protocol sanity check.
if len(buf.Msg) > 0 {
serverErr = errors.Errorf("unexpected data after SSLRequest: %q", buf.Msg)
return
}
// The client has requested SSL. We're going to try and upgrade the
// connection to use TLS/SSL.
// Do we have a TLS configuration?
tlsConfig, serverErr := s.execCfg.RPCContext.GetServerTLSConfig()
if serverErr != nil {
return
}
if tlsConfig == nil {
// We don't have a TLS configuration available, so we can't honor
// the client's request.
n, serverErr = conn.Write(sslUnsupported)
if serverErr != nil {
return
}
} else {
// We have a TLS configuration. Upgrade the connection.
n, serverErr = conn.Write(sslSupported)
if serverErr != nil {
return
}
newConn = tls.Server(conn, tlsConfig)
newConnType = hba.ConnHostSSL
}
s.metrics.BytesOutCount.Inc(int64(n))
// Finally, re-read the version/command from the client.
newVersion, *buf, serverErr = s.readVersion(newConn)
return
}
// registerConn registers the incoming connection to the map of active connections,
// which can be canceled by a concurrent server drain. It also returns
// the current draining status of the server.
//
// The onCloseFn() callback must be called at the end of the
// connection by the caller.
func (s *Server) registerConn(
ctx context.Context,
) (newCtx context.Context, draining bool, onCloseFn func()) {
onCloseFn = func() {}
newCtx = ctx
s.mu.Lock()
draining = s.mu.draining
if !draining {
var cancel context.CancelFunc
newCtx, cancel = contextutil.WithCancel(ctx)
done := make(chan struct{})
s.mu.connCancelMap[done] = cancel
onCloseFn = func() {
cancel()
close(done)
s.mu.Lock()
delete(s.mu.connCancelMap, done)
s.mu.Unlock()
}
}
s.mu.Unlock()
// If the Server is draining, we will use the connection only to send an
// error, so we don't count it in the stats. This makes sense since
// DrainClient() waits for that number to drop to zero,
// so we don't want it to oscillate unnecessarily.
if !draining {
s.metrics.NewConns.Inc(1)
s.metrics.Conns.Inc(1)
prevOnCloseFn := onCloseFn
onCloseFn = func() { prevOnCloseFn(); s.metrics.Conns.Dec(1) }
}
return
}
// readVersion reads the start-up message, then returns the version
// code (first uint32 in message) and the buffer containing the rest
// of the payload.
func (s *Server) readVersion(
conn io.Reader,
) (version uint32, buf pgwirebase.ReadBuffer, err error) {
var n int
buf = pgwirebase.MakeReadBuffer(
pgwirebase.ReadBufferOptionWithClusterSettings(&s.execCfg.Settings.SV),
)
n, err = buf.ReadUntypedMsg(conn)
if err != nil {
return
}
version, err = buf.GetUint32()
if err != nil {
return
}
s.metrics.BytesInCount.Inc(int64(n))
return
}
// sendErr sends errors to the client during the connection startup
// sequence. Later error sends during/after authentication are handled
// in conn.go.
func (s *Server) sendErr(ctx context.Context, conn net.Conn, err error) error {
msgBuilder := newWriteBuffer(s.metrics.BytesOutCount)
// We could, but do not, report server-side network errors while
// trying to send the client error. This is because clients that
// receive error payload are highly correlated with clients
// disconnecting abruptly.
_ /* err */ = writeErr(ctx, &s.execCfg.Settings.SV, err, msgBuilder, conn)
_ = conn.Close()
return err
}
func newAdminShutdownErr(msg string) error {
return pgerror.New(pgcode.AdminShutdown, msg)
}
| pkg/sql/pgwire/server.go | 1 | https://github.com/cockroachdb/cockroach/commit/648299bb6843159e625dacbee11ef177b8e2ced5 | [
0.9716281294822693,
0.024803446605801582,
0.00016171856259461492,
0.00017157464753836393,
0.13747034966945648
] |
{
"id": 0,
"code_window": [
"\tdefer func() {\n",
"\t\t// If we acquired the semaphore but the cancellation request failed, then\n",
"\t\t// hold on to the semaphore for longer. This helps mitigate a DoS attack\n",
"\t\t// of random cancellation requests.\n",
"\t\tif !resp.Canceled {\n",
"\t\t\ttime.Sleep(1 * time.Second)\n",
"\t\t}\n",
"\t\talloc.Release()\n",
"\t}()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif err != nil || (resp != nil && !resp.Canceled) {\n"
],
"file_path": "pkg/server/tenant_status.go",
"type": "replace",
"edit_start_line_idx": 289
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// {{/*
//go:build execgen_template
// +build execgen_template
//
// This file is the execgen template for const.eg.go. It's formatted in a
// special way, so it's both valid Go and a valid text/template input. This
// permits editing this file with editor support.
//
// */}}
package colexecbase
import (
"github.com/cockroachdb/apd/v3"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/col/typeconv"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/colmem"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/duration"
"github.com/cockroachdb/cockroach/pkg/util/json"
"github.com/cockroachdb/errors"
)
// Workaround for bazel auto-generated code. goimports does not automatically
// pick up the right packages when run within the bazel sandbox.
var (
_ apd.Context
_ duration.Duration
_ json.JSON
)
// {{/*
// Declarations to make the template compile properly.
// _GOTYPE is the template variable.
type _GOTYPE interface{}
// _CANONICAL_TYPE_FAMILY is the template variable.
const _CANONICAL_TYPE_FAMILY = types.UnknownFamily
// _TYPE_WIDTH is the template variable.
const _TYPE_WIDTH = 0
// */}}
// NewConstOp creates a new operator that produces a constant value constVal of
// type t at index outputIdx.
func NewConstOp(
allocator *colmem.Allocator,
input colexecop.Operator,
t *types.T,
constVal interface{},
outputIdx int,
) (colexecop.Operator, error) {
input = colexecutils.NewVectorTypeEnforcer(allocator, input, t, outputIdx)
switch typeconv.TypeFamilyToCanonicalTypeFamily(t.Family()) {
// {{range .}}
case _CANONICAL_TYPE_FAMILY:
switch t.Width() {
// {{range .WidthOverloads}}
case _TYPE_WIDTH:
return &const_TYPEOp{
OneInputHelper: colexecop.MakeOneInputHelper(input),
allocator: allocator,
outputIdx: outputIdx,
constVal: constVal.(_GOTYPE),
}, nil
// {{end}}
}
// {{end}}
}
return nil, errors.Errorf("unsupported const type %s", t.Name())
}
// {{range .}}
// {{range .WidthOverloads}}
type const_TYPEOp struct {
colexecop.OneInputHelper
allocator *colmem.Allocator
outputIdx int
constVal _GOTYPE
}
func (c const_TYPEOp) Next() coldata.Batch {
batch := c.Input.Next()
n := batch.Length()
if n == 0 {
return coldata.ZeroBatch
}
vec := batch.ColVec(c.outputIdx)
col := vec.TemplateType()
if vec.MaybeHasNulls() {
// We need to make sure that there are no left over null values in the
// output vector.
vec.Nulls().UnsetNulls()
}
c.allocator.PerformOperation(
[]coldata.Vec{vec},
func() {
// Shallow copy col to work around Go issue
// https://github.com/golang/go/issues/39756 which prevents bound check
// elimination from working in this case.
col := col
if sel := batch.Selection(); sel != nil {
for _, i := range sel[:n] {
col.Set(i, c.constVal)
}
} else {
_ = col.Get(n - 1)
for i := 0; i < n; i++ {
// {{if .Sliceable}}
//gcassert:bce
// {{end}}
col.Set(i, c.constVal)
}
}
},
)
return batch
}
// {{end}}
// {{end}}
// NewConstNullOp creates a new operator that produces a constant (untyped) NULL
// value at index outputIdx.
func NewConstNullOp(
allocator *colmem.Allocator, input colexecop.Operator, outputIdx int,
) colexecop.Operator {
input = colexecutils.NewVectorTypeEnforcer(allocator, input, types.Unknown, outputIdx)
return &constNullOp{
OneInputHelper: colexecop.MakeOneInputHelper(input),
outputIdx: outputIdx,
}
}
type constNullOp struct {
colexecop.OneInputHelper
outputIdx int
}
var _ colexecop.Operator = &constNullOp{}
func (c constNullOp) Next() coldata.Batch {
batch := c.Input.Next()
n := batch.Length()
if n == 0 {
return coldata.ZeroBatch
}
batch.ColVec(c.outputIdx).Nulls().SetNulls()
return batch
}
| pkg/sql/colexec/colexecbase/const_tmpl.go | 0 | https://github.com/cockroachdb/cockroach/commit/648299bb6843159e625dacbee11ef177b8e2ced5 | [
0.0015815490623936057,
0.0003373119980096817,
0.00016406962822657079,
0.00017156990361399949,
0.000370890338672325
] |
{
"id": 0,
"code_window": [
"\tdefer func() {\n",
"\t\t// If we acquired the semaphore but the cancellation request failed, then\n",
"\t\t// hold on to the semaphore for longer. This helps mitigate a DoS attack\n",
"\t\t// of random cancellation requests.\n",
"\t\tif !resp.Canceled {\n",
"\t\t\ttime.Sleep(1 * time.Second)\n",
"\t\t}\n",
"\t\talloc.Release()\n",
"\t}()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif err != nil || (resp != nil && !resp.Canceled) {\n"
],
"file_path": "pkg/server/tenant_status.go",
"type": "replace",
"edit_start_line_idx": 289
} | load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "catprivilege",
srcs = [
"default_privilege.go",
"fix.go",
"system.go",
"validate.go",
],
importpath = "github.com/cockroachdb/cockroach/pkg/sql/catalog/catprivilege",
visibility = ["//visibility:public"],
deps = [
"//pkg/keys",
"//pkg/security",
"//pkg/sql/catalog",
"//pkg/sql/catalog/catconstants",
"//pkg/sql/catalog/catpb",
"//pkg/sql/catalog/descpb",
"//pkg/sql/privilege",
"//pkg/sql/sem/tree",
],
)
go_test(
name = "catprivilege_test",
srcs = [
"default_privilege_test.go",
"fix_test.go",
],
embed = [":catprivilege"],
deps = [
"//pkg/keys",
"//pkg/security",
"//pkg/sql/catalog/bootstrap",
"//pkg/sql/catalog/catpb",
"//pkg/sql/catalog/descpb",
"//pkg/sql/privilege",
"//pkg/sql/sem/tree",
"//pkg/util/leaktest",
"@com_github_stretchr_testify//require",
],
)
| pkg/sql/catalog/catprivilege/BUILD.bazel | 0 | https://github.com/cockroachdb/cockroach/commit/648299bb6843159e625dacbee11ef177b8e2ced5 | [
0.00017508106247987598,
0.00017157153342850506,
0.000165541612659581,
0.0001725376641843468,
0.000003345843879287713
] |
{
"id": 0,
"code_window": [
"\tdefer func() {\n",
"\t\t// If we acquired the semaphore but the cancellation request failed, then\n",
"\t\t// hold on to the semaphore for longer. This helps mitigate a DoS attack\n",
"\t\t// of random cancellation requests.\n",
"\t\tif !resp.Canceled {\n",
"\t\t\ttime.Sleep(1 * time.Second)\n",
"\t\t}\n",
"\t\talloc.Release()\n",
"\t}()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif err != nil || (resp != nil && !resp.Canceled) {\n"
],
"file_path": "pkg/server/tenant_status.go",
"type": "replace",
"edit_start_line_idx": 289
} | // Copyright 2022 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package importccl_test
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/ccl/importccl"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/randgen"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
goparquet "github.com/fraugster/parquet-go"
"github.com/fraugster/parquet-go/parquet"
"github.com/stretchr/testify/require"
)
const parquetExportFilePattern = "export*-n*.0.parquet"
// parquetTest provides information to validate a test of EXPORT PARQUET. All
// fields below the stmt validate some aspect of the exported parquet file.
type parquetTest struct {
// filePrefix provides the parquet file name in front of the parquetExportFilePattern.
filePrefix string
// fileSuffix provides the compression type, if any, of the parquet file.
fileSuffix string
// dir is the temp directory the parquet file will be in.
dir string
// dbName is the name of the exported table's database.
dbName string
// prep contains sql commands that will execute before the stmt.
prep []string
// stmt contains the EXPORT PARQUET sql statement to test.
stmt string
// cols provides the expected column name and type
cols colinfo.ResultColumns
// colFieldRepType provides the expected parquet repetition type of each column in
// the parquet file.
colFieldRepType []parquet.FieldRepetitionType
// datums provides the expected values of the parquet file.
datums []tree.Datums
}
// validateParquetFile reads the parquet file and validates various aspects of
// the parquet file.
func validateParquetFile(
t *testing.T, ctx context.Context, ie *sql.InternalExecutor, test parquetTest,
) error {
paths, err := filepath.Glob(filepath.Join(test.dir, test.filePrefix, parquetExportFilePattern+test.fileSuffix))
require.NoError(t, err)
require.Equal(t, 1, len(paths))
r, err := os.Open(paths[0])
if err != nil {
return err
}
defer r.Close()
fr, err := goparquet.NewFileReader(r)
if err != nil {
return err
}
t.Logf("Schema: %s", fr.GetSchemaDefinition())
cols := fr.SchemaReader.GetSchemaDefinition().RootColumn.Children
if test.colFieldRepType != nil {
for i, col := range cols {
require.Equal(t, *col.SchemaElement.RepetitionType, test.colFieldRepType[i])
}
}
// Get the datums returned by the SELECT statement called in the EXPORT
// PARQUET statement to validate the data in the parquet file.
validationStmt := strings.SplitN(test.stmt, "FROM ", 2)[1]
test.datums, test.cols, err = ie.QueryBufferedExWithCols(
ctx,
"",
nil,
sessiondata.InternalExecutorOverride{
User: security.RootUserName(),
Database: test.dbName},
validationStmt)
require.NoError(t, err)
for j, col := range cols {
require.Equal(t, col.SchemaElement.Name, test.cols[j].Name)
}
i := 0
for {
row, err := fr.NextRow()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("reading record failed: %w", err)
}
for j := 0; j < len(cols); j++ {
if test.datums[i][j].ResolvedType() == types.Unknown {
// If we expect a null value, the row created by the parquet reader
// will not have the associated column.
_, ok := row[cols[j].SchemaElement.Name]
require.Equal(t, ok, false)
continue
}
parquetCol, err := importccl.NewParquetColumn(test.cols[j].Typ, "", false)
if err != nil {
return err
}
datum, err := parquetCol.DecodeFn(row[cols[j].SchemaElement.Name])
if err != nil {
return err
}
tester := test.datums[i][j]
switch tester.ResolvedType().Family() {
case types.DateFamily:
// pgDate.orig property doesn't matter and can cause the test to fail
require.Equal(t, tester.(*tree.DDate).Date.UnixEpochDays(),
datum.(*tree.DDate).Date.UnixEpochDays())
case types.JsonFamily:
// Only the value of the json object matters, not that additional properties
require.Equal(t, tester.(*tree.DJSON).JSON.String(),
datum.(*tree.DJSON).JSON.String())
case types.FloatFamily:
if tester.(*tree.DFloat).String() == "NaN" {
// NaN != NaN, therefore stringify the comparison.
require.Equal(t, "NaN", datum.(*tree.DFloat).String())
continue
}
require.Equal(t, tester, datum)
default:
require.Equal(t, tester, datum)
}
}
i++
}
return nil
}
func TestRandomParquetExports(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
dir, dirCleanupFn := testutils.TempDir(t)
defer dirCleanupFn()
dbName := "rand"
params := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
UseDatabase: dbName,
ExternalIODir: dir,
},
}
ctx := context.Background()
tc := testcluster.StartTestCluster(t, 3, params)
defer tc.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0])
sqlDB.Exec(t, fmt.Sprintf("CREATE DATABASE %s", dbName))
tableName := "table_1"
// TODO (butler): Randomly generate additional table(s) using tools from PR
// #75677. The function below only creates a deterministic table with a bunch
// of interesting corner case values.
err := randgen.GenerateRandInterestingTable(tc.Conns[0], dbName, tableName)
require.NoError(t, err)
s0 := tc.Server(0)
ie := s0.ExecutorConfig().(sql.ExecutorConfig).InternalExecutor
{
// Ensure table only contains columns supported by EXPORT Parquet
_, cols, err := ie.QueryRowExWithCols(
ctx,
"",
nil,
sessiondata.InternalExecutorOverride{
User: security.RootUserName(),
Database: dbName},
fmt.Sprintf("SELECT * FROM %s LIMIT 1", tableName))
require.NoError(t, err)
for _, col := range cols {
_, err := importccl.NewParquetColumn(col.Typ, "", false)
if err != nil {
t.Logf("Column type %s not supported in parquet, dropping", col.Typ.String())
sqlDB.Exec(t, fmt.Sprintf(`ALTER TABLE %s DROP COLUMN %s`, tableName, col.Name))
}
}
}
// TODO (butler): iterate over random select statements
test := parquetTest{
filePrefix: tableName,
dbName: dbName,
dir: dir,
stmt: fmt.Sprintf("EXPORT INTO PARQUET 'nodelocal://0/%s' FROM SELECT * FROM %s",
tableName, tableName),
}
sqlDB.Exec(t, test.stmt)
err = validateParquetFile(t, ctx, ie, test)
require.NoError(t, err)
}
// TestBasicParquetTypes exports a variety of relations into parquet files, and
// then asserts that the parquet exporter properly encoded the values of the
// crdb relations.
func TestBasicParquetTypes(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
dir, dirCleanupFn := testutils.TempDir(t)
defer dirCleanupFn()
dbName := "baz"
params := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
UseDatabase: dbName,
ExternalIODir: dir,
},
}
ctx := context.Background()
tc := testcluster.StartTestCluster(t, 3, params)
defer tc.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0])
sqlDB.Exec(t, fmt.Sprintf("CREATE DATABASE %s", dbName))
// instantiating an internal executor to easily get datums from the table
s0 := tc.Server(0)
ie := s0.ExecutorConfig().(sql.ExecutorConfig).InternalExecutor
sqlDB.Exec(t, `CREATE TABLE foo (i INT PRIMARY KEY, x STRING, y INT, z FLOAT NOT NULL, a BOOL,
INDEX (y))`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'Alice', 3, 14.3, true), (2, 'Bob', 2, 24.1,
false),(3, 'Carl', 1, 34.214,true),(4, 'Alex', 3, 14.3, NULL), (5, 'Bobby', 2, 3.4,false),
(6, NULL, NULL, 4.5, NULL)`)
tests := []parquetTest{
{
filePrefix: "basic",
stmt: `EXPORT INTO PARQUET 'nodelocal://0/basic' FROM SELECT *
FROM foo WHERE y IS NOT NULL ORDER BY y ASC LIMIT 2 `,
},
{
filePrefix: "null_vals",
stmt: `EXPORT INTO PARQUET 'nodelocal://0/null_vals' FROM SELECT *
FROM foo ORDER BY x ASC LIMIT 2`,
},
{
filePrefix: "colname",
stmt: `EXPORT INTO PARQUET 'nodelocal://0/colname' FROM SELECT avg(z), min(y) AS baz
FROM foo`,
},
{
filePrefix: "nullable",
stmt: `EXPORT INTO PARQUET 'nodelocal://0/nullable' FROM SELECT y,z,x
FROM foo`,
colFieldRepType: []parquet.FieldRepetitionType{
parquet.FieldRepetitionType_OPTIONAL,
parquet.FieldRepetitionType_REQUIRED,
parquet.FieldRepetitionType_OPTIONAL},
},
{
// TODO (mb): switch one of the values in the array to NULL once the
// vendor's parquet file reader bug resolves.
// https://github.com/fraugster/parquet-go/issues/60
filePrefix: "arrays",
prep: []string{
"CREATE TABLE atable (i INT PRIMARY KEY, x INT[])",
"INSERT INTO atable VALUES (1, ARRAY[1,2]), (2, ARRAY[2]), (3,ARRAY[1,13,5]),(4, NULL),(5, ARRAY[])"},
stmt: `EXPORT INTO PARQUET 'nodelocal://0/arrays' FROM SELECT * FROM atable`,
},
{
filePrefix: "user_types",
prep: []string{
"CREATE TYPE greeting AS ENUM ('hello', 'hi')",
"CREATE TABLE greeting_table (x greeting, y greeting)",
"INSERT INTO greeting_table VALUES ('hello', 'hello'), ('hi', 'hi')",
},
stmt: `EXPORT INTO PARQUET 'nodelocal://0/user_types' FROM SELECT * FROM greeting_table`,
},
{
filePrefix: "collate",
prep: []string{
"CREATE TABLE de_names (name STRING COLLATE de PRIMARY KEY)",
"INSERT INTO de_names VALUES ('Backhaus' COLLATE de), ('Bär' COLLATE de), ('Baz' COLLATE de)",
},
stmt: `EXPORT INTO PARQUET 'nodelocal://0/collate' FROM SELECT * FROM de_names ORDER BY name`,
},
{
filePrefix: "ints_floats",
prep: []string{
"CREATE TABLE nums (int_2 INT2, int_4 INT4, int_8 INT8, real_0 REAL, double_0 DOUBLE PRECISION)",
"INSERT INTO nums VALUES (2, 2, 2, 3.2, 3.2)",
},
stmt: `EXPORT INTO PARQUET 'nodelocal://0/ints_floats' FROM SELECT * FROM nums`,
},
{
filePrefix: "compress_gzip",
fileSuffix: ".gz",
stmt: `EXPORT INTO PARQUET 'nodelocal://0/compress_gzip' WITH compression = gzip
FROM SELECT * FROM foo`,
},
{
filePrefix: "compress_snappy",
fileSuffix: ".snappy",
stmt: `EXPORT INTO PARQUET 'nodelocal://0/compress_snappy' WITH compression = snappy
FROM SELECT * FROM foo `,
},
{
filePrefix: "uncompress",
stmt: `EXPORT INTO PARQUET 'nodelocal://0/uncompress'
FROM SELECT * FROM foo `,
},
}
for _, test := range tests {
t.Logf("Test %s", test.filePrefix)
if test.prep != nil {
for _, cmd := range test.prep {
sqlDB.Exec(t, cmd)
}
}
sqlDB.Exec(t, test.stmt)
test.dir = dir
test.dbName = dbName
err := validateParquetFile(t, ctx, ie, test)
require.NoError(t, err)
}
}
| pkg/ccl/importccl/exportparquet_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/648299bb6843159e625dacbee11ef177b8e2ced5 | [
0.0006308017182163894,
0.00019834689737763256,
0.00016445557412225753,
0.00017096014926210046,
0.00010318091517547145
] |
{
"id": 1,
"code_window": [
"\t\treq := &serverpb.CancelQueryByKeyRequest{\n",
"\t\t\tSQLInstanceID: cancelKey.GetSQLInstanceID(),\n",
"\t\t\tCancelQueryKey: cancelKey,\n",
"\t\t}\n",
"\t\tresp, err := s.execCfg.SQLStatusServer.CancelQueryByKey(ctx, req)\n",
"\t\tif len(resp.Error) > 0 {\n",
"\t\t\terr = errors.CombineErrors(err, errors.Newf(\"error from CancelQueryByKeyResponse: %s\", resp.Error))\n",
"\t\t}\n",
"\t\treturn resp, err\n",
"\t}()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif resp != nil && len(resp.Error) > 0 {\n"
],
"file_path": "pkg/sql/pgwire/server.go",
"type": "replace",
"edit_start_line_idx": 789
} | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package pgwire
import (
"context"
"crypto/tls"
"encoding/base64"
"fmt"
"io"
"net"
"net/url"
"strconv"
"strings"
"sync/atomic"
"time"
"unicode"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catconstants"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/hba"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/identmap"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgwirebase"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgwirecancel"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/util/contextutil"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/humanizeutil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/log/eventpb"
"github.com/cockroachdb/cockroach/pkg/util/metric"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/logtags"
"github.com/cockroachdb/redact"
"go.opentelemetry.io/otel/attribute"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// ATTENTION: After changing this value in a unit test, you probably want to
// open a new connection pool since the connections in the existing one are not
// affected.
//
// The "results_buffer_size" connection parameter can be used to override this
// default for an individual connection.
var connResultsBufferSize = settings.RegisterByteSizeSetting(
settings.TenantWritable,
"sql.defaults.results_buffer.size",
"default size of the buffer that accumulates results for a statement or a batch "+
"of statements before they are sent to the client. This can be overridden on "+
"an individual connection with the 'results_buffer_size' parameter. Note that auto-retries "+
"generally only happen while no results have been delivered to the client, so "+
"reducing this size can increase the number of retriable errors a client "+
"receives. On the other hand, increasing the buffer size can increase the "+
"delay until the client receives the first result row. "+
"Updating the setting only affects new connections. "+
"Setting to 0 disables any buffering.",
16<<10, // 16 KiB
).WithPublic()
var logConnAuth = settings.RegisterBoolSetting(
settings.TenantWritable,
sql.ConnAuditingClusterSettingName,
"if set, log SQL client connect and disconnect events (note: may hinder performance on loaded nodes)",
false).WithPublic()
var logSessionAuth = settings.RegisterBoolSetting(
settings.TenantWritable,
sql.AuthAuditingClusterSettingName,
"if set, log SQL session login/disconnection events (note: may hinder performance on loaded nodes)",
false).WithPublic()
const (
// ErrSSLRequired is returned when a client attempts to connect to a
// secure server in cleartext.
ErrSSLRequired = "node is running secure mode, SSL connection required"
// ErrDrainingNewConn is returned when a client attempts to connect to a server
// which is not accepting client connections.
ErrDrainingNewConn = "server is not accepting clients, try another node"
// ErrDrainingExistingConn is returned when a connection is shut down because
// the server is draining.
ErrDrainingExistingConn = "server is shutting down"
)
// Fully-qualified names for metrics.
var (
MetaConns = metric.Metadata{
Name: "sql.conns",
Help: "Number of active sql connections",
Measurement: "Connections",
Unit: metric.Unit_COUNT,
}
MetaNewConns = metric.Metadata{
Name: "sql.new_conns",
Help: "Counter of the number of sql connections created",
Measurement: "Connections",
Unit: metric.Unit_COUNT,
}
MetaBytesIn = metric.Metadata{
Name: "sql.bytesin",
Help: "Number of sql bytes received",
Measurement: "SQL Bytes",
Unit: metric.Unit_BYTES,
}
MetaBytesOut = metric.Metadata{
Name: "sql.bytesout",
Help: "Number of sql bytes sent",
Measurement: "SQL Bytes",
Unit: metric.Unit_BYTES,
}
MetaConnLatency = metric.Metadata{
Name: "sql.conn.latency",
Help: "Latency to establish and authenticate a SQL connection",
Measurement: "Nanoseconds",
Unit: metric.Unit_NANOSECONDS,
}
MetaPGWireCancelTotal = metric.Metadata{
Name: "sql.pgwire_cancel.total",
Help: "Counter of the number of pgwire query cancel requests",
Measurement: "Requests",
Unit: metric.Unit_COUNT,
}
MetaPGWireCancelIgnored = metric.Metadata{
Name: "sql.pgwire_cancel.ignored",
Help: "Counter of the number of pgwire query cancel requests that were ignored due to rate limiting",
Measurement: "Requests",
Unit: metric.Unit_COUNT,
}
MetaPGWireCancelSuccessful = metric.Metadata{
Name: "sql.pgwire_cancel.successful",
Help: "Counter of the number of pgwire query cancel requests that were successful",
Measurement: "Requests",
Unit: metric.Unit_COUNT,
}
)
const (
// The below constants can occur during the first message a client
// sends to the server. There are two categories: protocol version and
// request code. The protocol version is (major version number << 16)
// + minor version number. Request codes are (1234 << 16) + 5678 + N,
// where N started at 0 and is increased by 1 for every new request
// code added, which happens rarely during major or minor Postgres
// releases.
//
// See: https://www.postgresql.org/docs/current/protocol-message-formats.html
version30 = 196608 // (3 << 16) + 0
versionCancel = 80877102 // (1234 << 16) + 5678
versionSSL = 80877103 // (1234 << 16) + 5679
versionGSSENC = 80877104 // (1234 << 16) + 5680
)
// cancelMaxWait is the amount of time a draining server gives to sessions to
// react to cancellation and return before a forceful shutdown.
const cancelMaxWait = 1 * time.Second
// baseSQLMemoryBudget is the amount of memory pre-allocated in each connection.
var baseSQLMemoryBudget = envutil.EnvOrDefaultInt64("COCKROACH_BASE_SQL_MEMORY_BUDGET",
int64(2.1*float64(mon.DefaultPoolAllocationSize)))
// connReservationBatchSize determines for how many connections memory
// is pre-reserved at once.
var connReservationBatchSize = 5
var (
sslSupported = []byte{'S'}
sslUnsupported = []byte{'N'}
)
// cancelChanMap keeps track of channels that are closed after the associated
// cancellation function has been called and the cancellation has taken place.
type cancelChanMap map[chan struct{}]context.CancelFunc
// Server implements the server side of the PostgreSQL wire protocol.
type Server struct {
AmbientCtx log.AmbientContext
cfg *base.Config
SQLServer *sql.Server
execCfg *sql.ExecutorConfig
metrics ServerMetrics
mu struct {
syncutil.Mutex
// connCancelMap entries represent connections started when the server
// was not draining. Each value is a function that can be called to
// cancel the associated connection. The corresponding key is a channel
// that is closed when the connection is done.
connCancelMap cancelChanMap
draining bool
}
auth struct {
syncutil.RWMutex
conf *hba.Conf
identityMap *identmap.Conf
}
sqlMemoryPool *mon.BytesMonitor
connMonitor *mon.BytesMonitor
// testing{Conn,Auth}LogEnabled is used in unit tests in this
// package to force-enable conn/auth logging without dancing around
// the asynchronicity of cluster settings.
testingConnLogEnabled int32
testingAuthLogEnabled int32
// trustClientProvidedRemoteAddr indicates whether the server should honor
// a `crdb:remote_addr` status parameter provided by the client during
// session authentication. This status parameter can be set by SQL proxies
// to feed the "real" client address, where otherwise the CockroachDB SQL
// server would only see the address of the proxy.
//
// This setting is security-sensitive and should not be enabled
// without a SQL proxy that carefully scrubs any client-provided
// `crdb:remote_addr` field. In particular, this setting should never
// be set when there is no SQL proxy at all. Otherwise, a malicious
// client could use this field to pretend being from another address
// than its own and defeat the HBA rules.
//
// TODO(knz,ben): It would be good to have something more specific
// than a boolean, i.e. to accept the provided address only from
// certain peer IPs, or with certain certificates. (could it be a
// special hba.conf directive?)
trustClientProvidedRemoteAddr syncutil.AtomicBool
}
// ServerMetrics is the set of metrics for the pgwire server.
type ServerMetrics struct {
BytesInCount *metric.Counter
BytesOutCount *metric.Counter
Conns *metric.Gauge
NewConns *metric.Counter
ConnLatency *metric.Histogram
PGWireCancelTotalCount *metric.Counter
PGWireCancelIgnoredCount *metric.Counter
PGWireCancelSuccessfulCount *metric.Counter
ConnMemMetrics sql.BaseMemoryMetrics
SQLMemMetrics sql.MemoryMetrics
}
func makeServerMetrics(
sqlMemMetrics sql.MemoryMetrics, histogramWindow time.Duration,
) ServerMetrics {
return ServerMetrics{
BytesInCount: metric.NewCounter(MetaBytesIn),
BytesOutCount: metric.NewCounter(MetaBytesOut),
Conns: metric.NewGauge(MetaConns),
NewConns: metric.NewCounter(MetaNewConns),
ConnLatency: metric.NewLatency(MetaConnLatency, histogramWindow),
PGWireCancelTotalCount: metric.NewCounter(MetaPGWireCancelTotal),
PGWireCancelIgnoredCount: metric.NewCounter(MetaPGWireCancelIgnored),
PGWireCancelSuccessfulCount: metric.NewCounter(MetaPGWireCancelSuccessful),
ConnMemMetrics: sql.MakeBaseMemMetrics("conns", histogramWindow),
SQLMemMetrics: sqlMemMetrics,
}
}
// noteworthySQLMemoryUsageBytes is the minimum size tracked by the
// client SQL pool before the pool start explicitly logging overall
// usage growth in the log.
var noteworthySQLMemoryUsageBytes = envutil.EnvOrDefaultInt64("COCKROACH_NOTEWORTHY_SQL_MEMORY_USAGE", 100*1024*1024)
// noteworthyConnMemoryUsageBytes is the minimum size tracked by the
// connection monitor before the monitor start explicitly logging overall
// usage growth in the log.
var noteworthyConnMemoryUsageBytes = envutil.EnvOrDefaultInt64("COCKROACH_NOTEWORTHY_CONN_MEMORY_USAGE", 2*1024*1024)
// MakeServer creates a Server.
//
// Start() needs to be called on the Server so it begins processing.
func MakeServer(
ambientCtx log.AmbientContext,
cfg *base.Config,
st *cluster.Settings,
sqlMemMetrics sql.MemoryMetrics,
parentMemoryMonitor *mon.BytesMonitor,
histogramWindow time.Duration,
executorConfig *sql.ExecutorConfig,
) *Server {
server := &Server{
AmbientCtx: ambientCtx,
cfg: cfg,
execCfg: executorConfig,
metrics: makeServerMetrics(sqlMemMetrics, histogramWindow),
}
server.sqlMemoryPool = mon.NewMonitor("sql",
mon.MemoryResource,
// Note that we don't report metrics on this monitor. The reason for this is
// that we report metrics on the sum of all the child monitors of this pool.
// This monitor is the "main sql" monitor. It's a child of the root memory
// monitor. Its children are the sql monitors for each new connection. The
// sum of those children, plus the extra memory in the "conn" monitor below,
// is more than enough metrics information about the monitors.
nil, /* curCount */
nil, /* maxHist */
0, noteworthySQLMemoryUsageBytes, st)
server.sqlMemoryPool.Start(context.Background(), parentMemoryMonitor, mon.BoundAccount{})
server.SQLServer = sql.NewServer(executorConfig, server.sqlMemoryPool)
// TODO(knz,ben): Use a cluster setting for this.
server.trustClientProvidedRemoteAddr.Set(trustClientProvidedRemoteAddrOverride)
server.connMonitor = mon.NewMonitor("conn",
mon.MemoryResource,
server.metrics.ConnMemMetrics.CurBytesCount,
server.metrics.ConnMemMetrics.MaxBytesHist,
int64(connReservationBatchSize)*baseSQLMemoryBudget, noteworthyConnMemoryUsageBytes, st)
server.connMonitor.Start(context.Background(), server.sqlMemoryPool, mon.BoundAccount{})
server.mu.Lock()
server.mu.connCancelMap = make(cancelChanMap)
server.mu.Unlock()
connAuthConf.SetOnChange(&st.SV, func(ctx context.Context) {
loadLocalHBAConfigUponRemoteSettingChange(
ambientCtx.AnnotateCtx(context.Background()), server, st)
})
connIdentityMapConf.SetOnChange(&st.SV, func(ctx context.Context) {
loadLocalIdentityMapUponRemoteSettingChange(
ambientCtx.AnnotateCtx(context.Background()), server, st)
})
return server
}
// BytesOut returns the total number of bytes transmitted from this server.
func (s *Server) BytesOut() uint64 {
return uint64(s.metrics.BytesOutCount.Count())
}
// AnnotateCtxForIncomingConn annotates the provided context with a
// tag that reports the peer's address. In the common case, the
// context is annotated with a "client" tag. When the server is
// configured to recognize client-specified remote addresses, it is
// annotated with a "peer" tag and the "client" tag is added later
// when the session is set up.
func (s *Server) AnnotateCtxForIncomingConn(ctx context.Context, conn net.Conn) context.Context {
tag := "client"
if s.trustClientProvidedRemoteAddr.Get() {
tag = "peer"
}
return logtags.AddTag(ctx, tag, conn.RemoteAddr().String())
}
// Match returns true if rd appears to be a Postgres connection.
func Match(rd io.Reader) bool {
buf := pgwirebase.MakeReadBuffer()
_, err := buf.ReadUntypedMsg(rd)
if err != nil {
return false
}
version, err := buf.GetUint32()
if err != nil {
return false
}
return version == version30 || version == versionSSL || version == versionCancel || version == versionGSSENC
}
// Start makes the Server ready for serving connections.
func (s *Server) Start(ctx context.Context, stopper *stop.Stopper) {
s.SQLServer.Start(ctx, stopper)
}
// IsDraining returns true if the server is not currently accepting
// connections.
func (s *Server) IsDraining() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.mu.draining
}
// Metrics returns the set of metrics structs.
func (s *Server) Metrics() (res []interface{}) {
return []interface{}{
&s.metrics,
&s.SQLServer.Metrics.StartedStatementCounters,
&s.SQLServer.Metrics.ExecutedStatementCounters,
&s.SQLServer.Metrics.EngineMetrics,
&s.SQLServer.Metrics.GuardrailMetrics,
&s.SQLServer.InternalMetrics.StartedStatementCounters,
&s.SQLServer.InternalMetrics.ExecutedStatementCounters,
&s.SQLServer.InternalMetrics.EngineMetrics,
&s.SQLServer.InternalMetrics.GuardrailMetrics,
&s.SQLServer.ServerMetrics.StatsMetrics,
&s.SQLServer.ServerMetrics.ContentionSubsystemMetrics,
}
}
// Drain prevents new connections from being served and waits for drainWait for
// open connections to terminate before canceling them.
// An error will be returned when connections that have been canceled have not
// responded to this cancellation and closed themselves in time. The server
// will remain in draining state, though open connections may continue to
// exist.
// The RFC on drain modes has more information regarding the specifics of
// what will happen to connections in different states:
// https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160425_drain_modes.md
//
// The reporter callback, if non-nil, is called on a best effort basis
// to report work that needed to be done and which may or may not have
// been done by the time this call returns. See the explanation in
// pkg/server/drain.go for details.
func (s *Server) Drain(
ctx context.Context, drainWait time.Duration, reporter func(int, redact.SafeString),
) error {
return s.drainImpl(ctx, drainWait, cancelMaxWait, reporter)
}
// Undrain switches the server back to the normal mode of operation in which
// connections are accepted.
func (s *Server) Undrain() {
s.mu.Lock()
s.setDrainingLocked(false)
s.mu.Unlock()
}
// setDrainingLocked sets the server's draining state and returns whether the
// state changed (i.e. drain != s.mu.draining). s.mu must be locked.
func (s *Server) setDrainingLocked(drain bool) bool {
if s.mu.draining == drain {
return false
}
s.mu.draining = drain
return true
}
// drainImpl drains the SQL clients.
//
// The queryWait duration is used to wait on clients to
// self-disconnect after their session has been canceled. The
// cancelWait is used to wait after the queryWait timer has expired
// and there are still clients connected, and their context.Context is
// canceled.
//
// The reporter callback, if non-nil, is called on a best effort basis
// to report work that needed to be done and which may or may not have
// been done by the time this call returns. See the explanation in
// pkg/server/drain.go for details.
func (s *Server) drainImpl(
ctx context.Context,
queryWait time.Duration,
cancelWait time.Duration,
reporter func(int, redact.SafeString),
) error {
// This anonymous function returns a copy of s.mu.connCancelMap if there are
// any active connections to cancel. We will only attempt to cancel
// connections that were active at the moment the draining switch happened.
// It is enough to do this because:
// 1) If no new connections are added to the original map all connections
// will be canceled.
// 2) If new connections are added to the original map, it follows that they
// were added when s.mu.draining = false, thus not requiring cancellation.
// These connections are not our responsibility and will be handled when the
// server starts draining again.
connCancelMap := func() cancelChanMap {
s.mu.Lock()
defer s.mu.Unlock()
if !s.setDrainingLocked(true) {
// We are already draining.
return nil
}
connCancelMap := make(cancelChanMap)
for done, cancel := range s.mu.connCancelMap {
connCancelMap[done] = cancel
}
return connCancelMap
}()
if len(connCancelMap) == 0 {
return nil
}
if reporter != nil {
// Report progress to the Drain RPC.
reporter(len(connCancelMap), "SQL clients")
}
// Spin off a goroutine that waits for all connections to signal that they
// are done and reports it on allConnsDone. The main goroutine signals this
// goroutine to stop work through quitWaitingForConns.
allConnsDone := make(chan struct{})
quitWaitingForConns := make(chan struct{})
defer close(quitWaitingForConns)
go func() {
defer close(allConnsDone)
for done := range connCancelMap {
select {
case <-done:
case <-quitWaitingForConns:
return
}
}
}()
// Wait for all connections to finish up to drainWait.
select {
case <-time.After(queryWait):
log.Ops.Warningf(ctx, "canceling all sessions after waiting %s", queryWait)
case <-allConnsDone:
}
// Cancel the contexts of all sessions if the server is still in draining
// mode.
if stop := func() bool {
s.mu.Lock()
defer s.mu.Unlock()
if !s.mu.draining {
return true
}
for _, cancel := range connCancelMap {
// There is a possibility that different calls to SetDraining have
// overlapping connCancelMaps, but context.CancelFunc calls are
// idempotent.
cancel()
}
return false
}(); stop {
return nil
}
select {
case <-time.After(cancelWait):
return errors.Errorf("some sessions did not respond to cancellation within %s", cancelWait)
case <-allConnsDone:
}
return nil
}
// SocketType indicates the connection type. This is an optimization to
// prevent a comparison against conn.LocalAddr().Network().
type SocketType bool
const (
// SocketTCP is used for TCP sockets. The standard.
SocketTCP SocketType = true
// SocketUnix is used for unix datagram sockets.
SocketUnix SocketType = false
)
func (s SocketType) asConnType() (hba.ConnType, error) {
switch s {
case SocketTCP:
return hba.ConnHostNoSSL, nil
case SocketUnix:
return hba.ConnLocal, nil
default:
return 0, errors.AssertionFailedf("unimplemented socket type: %v", errors.Safe(s))
}
}
func (s *Server) connLogEnabled() bool {
return atomic.LoadInt32(&s.testingConnLogEnabled) != 0 || logConnAuth.Get(&s.execCfg.Settings.SV)
}
// TestingEnableConnLogging is exported for use in tests.
func (s *Server) TestingEnableConnLogging() {
atomic.StoreInt32(&s.testingConnLogEnabled, 1)
}
// TestingEnableAuthLogging is exported for use in tests.
func (s *Server) TestingEnableAuthLogging() {
atomic.StoreInt32(&s.testingAuthLogEnabled, 1)
}
// ServeConn serves a single connection, driving the handshake process and
// delegating to the appropriate connection type.
//
// The socketType argument is an optimization to avoid a string
// compare on conn.LocalAddr().Network(). When the socket type is
// unix datagram (local filesystem), SSL negotiation is disabled
// even when the server is running securely with certificates.
// This has the effect of forcing password auth, also in a way
// compatible with postgres.
//
// An error is returned if the initial handshake of the connection fails.
func (s *Server) ServeConn(ctx context.Context, conn net.Conn, socketType SocketType) error {
ctx, draining, onCloseFn := s.registerConn(ctx)
defer onCloseFn()
connDetails := eventpb.CommonConnectionDetails{
InstanceID: int32(s.execCfg.NodeID.SQLInstanceID()),
Network: conn.RemoteAddr().Network(),
RemoteAddress: conn.RemoteAddr().String(),
}
// Some bookkeeping, for security-minded administrators.
// This registers the connection to the authentication log.
connStart := timeutil.Now()
if s.connLogEnabled() {
ev := &eventpb.ClientConnectionStart{
CommonEventDetails: eventpb.CommonEventDetails{Timestamp: connStart.UnixNano()},
CommonConnectionDetails: connDetails,
}
log.StructuredEvent(ctx, ev)
}
defer func() {
// The duration of the session is logged at the end so that the
// reader of the log file can know how much to look back in time
// to find when the connection was opened. This is important
// because the log files may have been rotated since.
if s.connLogEnabled() {
endTime := timeutil.Now()
ev := &eventpb.ClientConnectionEnd{
CommonEventDetails: eventpb.CommonEventDetails{Timestamp: endTime.UnixNano()},
CommonConnectionDetails: connDetails,
Duration: endTime.Sub(connStart).Nanoseconds(),
}
log.StructuredEvent(ctx, ev)
}
}()
// In any case, first check the command in the start-up message.
//
// We're assuming that a client is not willing/able to receive error
// packets before we drain that message.
version, buf, err := s.readVersion(conn)
if err != nil {
return err
}
switch version {
case versionCancel:
// The cancel message is rather peculiar: it is sent without
// authentication, always over an unencrypted channel.
s.handleCancel(ctx, conn, &buf)
return nil
case versionGSSENC:
// This is a request for an unsupported feature: GSS encryption.
// https://github.com/cockroachdb/cockroach/issues/52184
//
// Ensure the right SQLSTATE is sent to the SQL client.
err := pgerror.New(pgcode.ProtocolViolation, "GSS encryption is not yet supported")
// Annotate a telemetry key. These objects
// are treated specially by sendErr: they increase a
// telemetry counter to indicate an attempt was made
// to use this feature.
err = errors.WithTelemetry(err, "#52184")
return s.sendErr(ctx, conn, err)
}
// If the server is shutting down, terminate the connection early.
if draining {
log.Ops.Info(ctx, "rejecting new connection while server is draining")
return s.sendErr(ctx, conn, newAdminShutdownErr(ErrDrainingNewConn))
}
// Compute the initial connType.
connType, err := socketType.asConnType()
if err != nil {
return err
}
// If the client requests SSL, upgrade the connection to use TLS.
var clientErr error
conn, connType, version, clientErr, err = s.maybeUpgradeToSecureConn(ctx, conn, connType, version, &buf)
if err != nil {
return err
}
if clientErr != nil {
return s.sendErr(ctx, conn, clientErr)
}
sp := tracing.SpanFromContext(ctx)
sp.SetTag("conn_type", attribute.StringValue(connType.String()))
// What does the client want to do?
switch version {
case version30:
// Normal SQL connection. Proceed normally below.
case versionCancel:
// The PostgreSQL protocol definition says that cancel payloads
// must be sent *prior to upgrading the connection to use TLS*.
// Yet, we've found clients in the wild that send the cancel
// after the TLS handshake, for example at
// https://github.com/cockroachlabs/support/issues/600.
s.handleCancel(ctx, conn, &buf)
return nil
default:
// We don't know this protocol.
err := pgerror.Newf(pgcode.ProtocolViolation, "unknown protocol version %d", version)
err = errors.WithTelemetry(err, fmt.Sprintf("protocol-version-%d", version))
return s.sendErr(ctx, conn, err)
}
// Reserve some memory for this connection using the server's monitor. This
// reduces pressure on the shared pool because the server monitor allocates in
// chunks from the shared pool and these chunks should be larger than
// baseSQLMemoryBudget.
reserved := s.connMonitor.MakeBoundAccount()
if err := reserved.Grow(ctx, baseSQLMemoryBudget); err != nil {
return errors.Wrapf(err, "unable to pre-allocate %d bytes for this connection",
baseSQLMemoryBudget)
}
// Load the client-provided session parameters.
var sArgs sql.SessionArgs
if sArgs, err = parseClientProvidedSessionParameters(ctx, &s.execCfg.Settings.SV, &buf,
conn.RemoteAddr(), s.trustClientProvidedRemoteAddr.Get()); err != nil {
return s.sendErr(ctx, conn, err)
}
// Populate the client address field in the context tags and the
// shared struct for structured logging.
// Only now do we know the remote client address for sure (it may have
// been overridden by a status parameter).
connDetails.RemoteAddress = sArgs.RemoteAddr.String()
ctx = logtags.AddTag(ctx, "client", connDetails.RemoteAddress)
sp.SetTag("client", attribute.StringValue(connDetails.RemoteAddress))
// If a test is hooking in some authentication option, load it.
var testingAuthHook func(context.Context) error
if k := s.execCfg.PGWireTestingKnobs; k != nil {
testingAuthHook = k.AuthHook
}
hbaConf, identMap := s.GetAuthenticationConfiguration()
// Defer the rest of the processing to the connection handler.
// This includes authentication.
s.serveConn(
ctx, conn, sArgs,
reserved,
connStart,
authOptions{
connType: connType,
connDetails: connDetails,
insecure: s.cfg.Insecure,
ie: s.execCfg.InternalExecutor,
auth: hbaConf,
identMap: identMap,
testingAuthHook: testingAuthHook,
})
return nil
}
// handleCancel handles a pgwire query cancellation request. Note that the
// request is unauthenticated. To mitigate the security risk (i.e., a
// malicious actor spamming this endpoint with random data to try to cancel
// a query), the logic is rate-limited by a semaphore. Refer to the comments
// in the pgwirecancel package for more information.
//
// This function does not return an error, so the caller (and possible
// attacker) will not know if the cancellation attempt succeeded. Errors are
// logged so that an operator can be aware of any possibly malicious requests.
func (s *Server) handleCancel(ctx context.Context, conn net.Conn, buf *pgwirebase.ReadBuffer) {
telemetry.Inc(sqltelemetry.CancelRequestCounter)
s.metrics.PGWireCancelTotalCount.Inc(1)
resp, err := func() (*serverpb.CancelQueryByKeyResponse, error) {
backendKeyDataBits, err := buf.GetUint64()
// The connection that issued the cancel is not a SQL session -- it's an
// entirely new connection that's created just to send the cancel. We close
// the connection as soon as possible after reading the data, since there
// is nothing to send back to the client.
_ = conn.Close()
if err != nil {
return nil, err
}
cancelKey := pgwirecancel.BackendKeyData(backendKeyDataBits)
// The request is forwarded to the appropriate node.
req := &serverpb.CancelQueryByKeyRequest{
SQLInstanceID: cancelKey.GetSQLInstanceID(),
CancelQueryKey: cancelKey,
}
resp, err := s.execCfg.SQLStatusServer.CancelQueryByKey(ctx, req)
if len(resp.Error) > 0 {
err = errors.CombineErrors(err, errors.Newf("error from CancelQueryByKeyResponse: %s", resp.Error))
}
return resp, err
}()
if resp != nil && resp.Canceled {
s.metrics.PGWireCancelSuccessfulCount.Inc(1)
} else if err != nil {
if status := status.Convert(err); status.Code() == codes.ResourceExhausted {
s.metrics.PGWireCancelIgnoredCount.Inc(1)
}
log.Sessions.Warningf(ctx, "unexpected while handling pgwire cancellation request: %v", err)
}
}
// parseClientProvidedSessionParameters reads the incoming k/v pairs
// in the startup message into a sql.SessionArgs struct.
func parseClientProvidedSessionParameters(
ctx context.Context,
sv *settings.Values,
buf *pgwirebase.ReadBuffer,
origRemoteAddr net.Addr,
trustClientProvidedRemoteAddr bool,
) (sql.SessionArgs, error) {
args := sql.SessionArgs{
SessionDefaults: make(map[string]string),
CustomOptionSessionDefaults: make(map[string]string),
RemoteAddr: origRemoteAddr,
}
foundBufferSize := false
for {
// Read a key-value pair from the client.
key, err := buf.GetString()
if err != nil {
return sql.SessionArgs{}, pgerror.Wrap(
err, pgcode.ProtocolViolation,
"error reading option key",
)
}
if len(key) == 0 {
// End of parameter list.
break
}
value, err := buf.GetString()
if err != nil {
return sql.SessionArgs{}, pgerror.Wrapf(
err, pgcode.ProtocolViolation,
"error reading option value for key %q", key,
)
}
// Case-fold for the key for easier comparison.
key = strings.ToLower(key)
// Load the parameter.
switch key {
case "user":
// In CockroachDB SQL, unlike in PostgreSQL, usernames are
// case-insensitive. Therefore we need to normalize the username
// here, so that further lookups for authentication have the correct
// identifier.
args.User, _ = security.MakeSQLUsernameFromUserInput(value, security.UsernameValidation)
// IsSuperuser will get updated later when we load the user's session
// initialization information.
args.IsSuperuser = args.User.IsRootUser()
case "crdb:session_revival_token_base64":
token, err := base64.StdEncoding.DecodeString(value)
if err != nil {
return sql.SessionArgs{}, pgerror.Wrapf(
err, pgcode.ProtocolViolation,
"%s", key,
)
}
args.SessionRevivalToken = token
case "results_buffer_size":
if args.ConnResultsBufferSize, err = humanizeutil.ParseBytes(value); err != nil {
return sql.SessionArgs{}, errors.WithSecondaryError(
pgerror.Newf(pgcode.ProtocolViolation,
"error parsing results_buffer_size option value '%s' as bytes", value), err)
}
if args.ConnResultsBufferSize < 0 {
return sql.SessionArgs{}, pgerror.Newf(pgcode.ProtocolViolation,
"results_buffer_size option value '%s' cannot be negative", value)
}
foundBufferSize = true
case "crdb:remote_addr":
if !trustClientProvidedRemoteAddr {
return sql.SessionArgs{}, pgerror.Newf(pgcode.ProtocolViolation,
"server not configured to accept remote address override (requested: %q)", value)
}
hostS, portS, err := net.SplitHostPort(value)
if err != nil {
return sql.SessionArgs{}, pgerror.Wrap(
err, pgcode.ProtocolViolation,
"invalid address format",
)
}
port, err := strconv.Atoi(portS)
if err != nil {
return sql.SessionArgs{}, pgerror.Wrap(
err, pgcode.ProtocolViolation,
"remote port is not numeric",
)
}
ip := net.ParseIP(hostS)
if ip == nil {
return sql.SessionArgs{}, pgerror.New(pgcode.ProtocolViolation,
"remote address is not numeric")
}
args.RemoteAddr = &net.TCPAddr{IP: ip, Port: port}
case "options":
opts, err := parseOptions(value)
if err != nil {
return sql.SessionArgs{}, err
}
for _, opt := range opts {
err = loadParameter(ctx, opt.key, opt.value, &args)
if err != nil {
return sql.SessionArgs{}, pgerror.Wrapf(err, pgerror.GetPGCode(err), "options")
}
}
default:
err = loadParameter(ctx, key, value, &args)
if err != nil {
return sql.SessionArgs{}, err
}
}
}
if !foundBufferSize && sv != nil {
// The client did not provide buffer_size; use the cluster setting as default.
args.ConnResultsBufferSize = connResultsBufferSize.Get(sv)
}
// TODO(richardjcai): When connecting to the database, we'll want to
// check for CONNECT privilege on the database. #59875.
if _, ok := args.SessionDefaults["database"]; !ok {
// CockroachDB-specific behavior: if no database is specified,
// default to "defaultdb". In PostgreSQL this would be "postgres".
args.SessionDefaults["database"] = catalogkeys.DefaultDatabaseName
}
// The client might override the application name,
// which would prevent it from being counted in telemetry.
// We've decided that this noise in the data is acceptable.
if appName, ok := args.SessionDefaults["application_name"]; ok {
if appName == catconstants.ReportableAppNamePrefix+catconstants.InternalSQLAppName {
telemetry.Inc(sqltelemetry.CockroachShellCounter)
}
}
return args, nil
}
func loadParameter(ctx context.Context, key, value string, args *sql.SessionArgs) error {
key = strings.ToLower(key)
exists, configurable := sql.IsSessionVariableConfigurable(key)
switch {
case exists && configurable:
args.SessionDefaults[key] = value
case sql.IsCustomOptionSessionVariable(key):
args.CustomOptionSessionDefaults[key] = value
case !exists:
if _, ok := sql.UnsupportedVars[key]; ok {
counter := sqltelemetry.UnimplementedClientStatusParameterCounter(key)
telemetry.Inc(counter)
}
log.Warningf(ctx, "unknown configuration parameter: %q", key)
case !configurable:
return pgerror.Newf(pgcode.CantChangeRuntimeParam,
"parameter %q cannot be changed", key)
}
return nil
}
// option represents an option argument passed in the connection URL.
type option struct {
key string
value string
}
// parseOptions parses the given string into the options. The options must be
// separated by space and have one of the following patterns:
// '-c key=value', '-ckey=value', '--key=value'
func parseOptions(optionsString string) ([]option, error) {
var res []option
optionsRaw, err := url.QueryUnescape(optionsString)
if err != nil {
return nil, pgerror.Newf(pgcode.ProtocolViolation, "failed to unescape options %q", optionsString)
}
lastWasDashC := false
opts := splitOptions(optionsRaw)
for i := 0; i < len(opts); i++ {
prefix := ""
if len(opts[i]) > 1 {
prefix = opts[i][:2]
}
switch {
case opts[i] == "-c":
lastWasDashC = true
continue
case lastWasDashC:
lastWasDashC = false
// if the last option was '-c' parse current option with no regard to
// the prefix
prefix = ""
case prefix == "--" || prefix == "-c":
lastWasDashC = false
default:
return nil, pgerror.Newf(pgcode.ProtocolViolation,
"option %q is invalid, must have prefix '-c' or '--'", opts[i])
}
opt, err := splitOption(opts[i], prefix)
if err != nil {
return nil, err
}
res = append(res, opt)
}
return res, nil
}
// splitOptions slices the given string into substrings separated by space
// unless the space is escaped using backslashes '\\'. It also skips multiple
// subsequent spaces.
func splitOptions(options string) []string {
var res []string
var sb strings.Builder
i := 0
for i < len(options) {
sb.Reset()
// skip leading space
for i < len(options) && unicode.IsSpace(rune(options[i])) {
i++
}
if i == len(options) {
break
}
lastWasEscape := false
for i < len(options) {
if unicode.IsSpace(rune(options[i])) && !lastWasEscape {
break
}
if !lastWasEscape && options[i] == '\\' {
lastWasEscape = true
} else {
lastWasEscape = false
sb.WriteByte(options[i])
}
i++
}
res = append(res, sb.String())
}
return res
}
// splitOption splits the given opt argument into substrings separated by '='.
// It returns an error if the given option does not comply with the pattern
// "key=value" and the number of elements in the result is not two.
// splitOption removes the prefix from the key and replaces '-' with '_' so
// "--option-name=value" becomes [option_name, value].
func splitOption(opt, prefix string) (option, error) {
kv := strings.Split(opt, "=")
if len(kv) != 2 {
return option{}, pgerror.Newf(pgcode.ProtocolViolation,
"option %q is invalid, check '='", opt)
}
kv[0] = strings.TrimPrefix(kv[0], prefix)
return option{key: strings.ReplaceAll(kv[0], "-", "_"), value: kv[1]}, nil
}
// Note: Usage of an env var here makes it possible to unconditionally
// enable this feature when cluster settings do not work reliably,
// e.g. in multi-tenant setups in v20.2. This override mechanism can
// be removed after all of CC is moved to use v21.1 or a version which
// supports cluster settings.
var trustClientProvidedRemoteAddrOverride = envutil.EnvOrDefaultBool("COCKROACH_TRUST_CLIENT_PROVIDED_SQL_REMOTE_ADDR", false)
// TestingSetTrustClientProvidedRemoteAddr is used in tests.
func (s *Server) TestingSetTrustClientProvidedRemoteAddr(b bool) func() {
prev := s.trustClientProvidedRemoteAddr.Get()
s.trustClientProvidedRemoteAddr.Set(b)
return func() { s.trustClientProvidedRemoteAddr.Set(prev) }
}
// maybeUpgradeToSecureConn upgrades the connection to TLS/SSL if
// requested by the client, and available in the server configuration.
func (s *Server) maybeUpgradeToSecureConn(
ctx context.Context,
conn net.Conn,
connType hba.ConnType,
version uint32,
buf *pgwirebase.ReadBuffer,
) (newConn net.Conn, newConnType hba.ConnType, newVersion uint32, clientErr, serverErr error) {
// By default, this is a no-op.
newConn = conn
newConnType = connType
newVersion = version
var n int // byte counts
if version != versionSSL {
// The client did not require a SSL connection.
// Insecure mode: nothing to say, nothing to do.
// TODO(knz): Remove this condition - see
// https://github.com/cockroachdb/cockroach/issues/53404
if s.cfg.Insecure {
return
}
// Secure mode: disallow if TCP and the user did not opt into
// non-TLS SQL conns.
if !s.cfg.AcceptSQLWithoutTLS && connType != hba.ConnLocal {
clientErr = pgerror.New(pgcode.ProtocolViolation, ErrSSLRequired)
}
return
}
if connType == hba.ConnLocal {
// No existing PostgreSQL driver ever tries to activate TLS over
// a unix socket. But in case someone, sometime, somewhere, makes
// that mistake, let them know that we don't want it.
clientErr = pgerror.New(pgcode.ProtocolViolation,
"cannot use SSL/TLS over local connections")
return
}
// Protocol sanity check.
if len(buf.Msg) > 0 {
serverErr = errors.Errorf("unexpected data after SSLRequest: %q", buf.Msg)
return
}
// The client has requested SSL. We're going to try and upgrade the
// connection to use TLS/SSL.
// Do we have a TLS configuration?
tlsConfig, serverErr := s.execCfg.RPCContext.GetServerTLSConfig()
if serverErr != nil {
return
}
if tlsConfig == nil {
// We don't have a TLS configuration available, so we can't honor
// the client's request.
n, serverErr = conn.Write(sslUnsupported)
if serverErr != nil {
return
}
} else {
// We have a TLS configuration. Upgrade the connection.
n, serverErr = conn.Write(sslSupported)
if serverErr != nil {
return
}
newConn = tls.Server(conn, tlsConfig)
newConnType = hba.ConnHostSSL
}
s.metrics.BytesOutCount.Inc(int64(n))
// Finally, re-read the version/command from the client.
newVersion, *buf, serverErr = s.readVersion(newConn)
return
}
// registerConn registers the incoming connection to the map of active connections,
// which can be canceled by a concurrent server drain. It also returns
// the current draining status of the server.
//
// The onCloseFn() callback must be called at the end of the
// connection by the caller.
func (s *Server) registerConn(
ctx context.Context,
) (newCtx context.Context, draining bool, onCloseFn func()) {
onCloseFn = func() {}
newCtx = ctx
s.mu.Lock()
draining = s.mu.draining
if !draining {
var cancel context.CancelFunc
newCtx, cancel = contextutil.WithCancel(ctx)
done := make(chan struct{})
s.mu.connCancelMap[done] = cancel
onCloseFn = func() {
cancel()
close(done)
s.mu.Lock()
delete(s.mu.connCancelMap, done)
s.mu.Unlock()
}
}
s.mu.Unlock()
// If the Server is draining, we will use the connection only to send an
// error, so we don't count it in the stats. This makes sense since
// DrainClient() waits for that number to drop to zero,
// so we don't want it to oscillate unnecessarily.
if !draining {
s.metrics.NewConns.Inc(1)
s.metrics.Conns.Inc(1)
prevOnCloseFn := onCloseFn
onCloseFn = func() { prevOnCloseFn(); s.metrics.Conns.Dec(1) }
}
return
}
// readVersion reads the start-up message, then returns the version
// code (first uint32 in message) and the buffer containing the rest
// of the payload.
func (s *Server) readVersion(
conn io.Reader,
) (version uint32, buf pgwirebase.ReadBuffer, err error) {
var n int
buf = pgwirebase.MakeReadBuffer(
pgwirebase.ReadBufferOptionWithClusterSettings(&s.execCfg.Settings.SV),
)
n, err = buf.ReadUntypedMsg(conn)
if err != nil {
return
}
version, err = buf.GetUint32()
if err != nil {
return
}
s.metrics.BytesInCount.Inc(int64(n))
return
}
// sendErr sends errors to the client during the connection startup
// sequence. Later error sends during/after authentication are handled
// in conn.go.
func (s *Server) sendErr(ctx context.Context, conn net.Conn, err error) error {
msgBuilder := newWriteBuffer(s.metrics.BytesOutCount)
// We could, but do not, report server-side network errors while
// trying to send the client error. This is because clients that
// receive error payload are highly correlated with clients
// disconnecting abruptly.
_ /* err */ = writeErr(ctx, &s.execCfg.Settings.SV, err, msgBuilder, conn)
_ = conn.Close()
return err
}
func newAdminShutdownErr(msg string) error {
return pgerror.New(pgcode.AdminShutdown, msg)
}
| pkg/sql/pgwire/server.go | 1 | https://github.com/cockroachdb/cockroach/commit/648299bb6843159e625dacbee11ef177b8e2ced5 | [
0.997463583946228,
0.016368892043828964,
0.00015948298096191138,
0.00017094254144467413,
0.12453582137823105
] |
{
"id": 1,
"code_window": [
"\t\treq := &serverpb.CancelQueryByKeyRequest{\n",
"\t\t\tSQLInstanceID: cancelKey.GetSQLInstanceID(),\n",
"\t\t\tCancelQueryKey: cancelKey,\n",
"\t\t}\n",
"\t\tresp, err := s.execCfg.SQLStatusServer.CancelQueryByKey(ctx, req)\n",
"\t\tif len(resp.Error) > 0 {\n",
"\t\t\terr = errors.CombineErrors(err, errors.Newf(\"error from CancelQueryByKeyResponse: %s\", resp.Error))\n",
"\t\t}\n",
"\t\treturn resp, err\n",
"\t}()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif resp != nil && len(resp.Error) > 0 {\n"
],
"file_path": "pkg/sql/pgwire/server.go",
"type": "replace",
"edit_start_line_idx": 789
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "st1011",
srcs = ["analyzer.go"],
importpath = "github.com/cockroachdb/cockroach/build/bazelutil/staticcheckanalyzers/st1011",
visibility = ["//visibility:public"],
deps = [
"//pkg/testutils/lint/passes/staticcheck",
"@co_honnef_go_tools//stylecheck",
"@org_golang_x_tools//go/analysis",
],
)
| build/bazelutil/staticcheckanalyzers/st1011/BUILD.bazel | 0 | https://github.com/cockroachdb/cockroach/commit/648299bb6843159e625dacbee11ef177b8e2ced5 | [
0.00017460575327277184,
0.00017433654284104705,
0.00017406733240932226,
0.00017433654284104705,
2.6921043172478676e-7
] |
{
"id": 1,
"code_window": [
"\t\treq := &serverpb.CancelQueryByKeyRequest{\n",
"\t\t\tSQLInstanceID: cancelKey.GetSQLInstanceID(),\n",
"\t\t\tCancelQueryKey: cancelKey,\n",
"\t\t}\n",
"\t\tresp, err := s.execCfg.SQLStatusServer.CancelQueryByKey(ctx, req)\n",
"\t\tif len(resp.Error) > 0 {\n",
"\t\t\terr = errors.CombineErrors(err, errors.Newf(\"error from CancelQueryByKeyResponse: %s\", resp.Error))\n",
"\t\t}\n",
"\t\treturn resp, err\n",
"\t}()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif resp != nil && len(resp.Error) > 0 {\n"
],
"file_path": "pkg/sql/pgwire/server.go",
"type": "replace",
"edit_start_line_idx": 789
} | # LogicTest: local
statement ok
CREATE TABLE foo (x CHAR PRIMARY KEY); INSERT INTO foo(x) VALUES ('a'), ('b')
query T
EXPLAIN (VERBOSE) SELECT max(ordinality) FROM foo WITH ORDINALITY
----
distribution: local
vectorized: true
·
• group (scalar)
│ columns: (max)
│ estimated row count: 1 (missing stats)
│ aggregate 0: max(ordinality)
│
└── • ordinality
│ columns: ("ordinality")
│ estimated row count: 1,000 (missing stats)
│
└── • scan
columns: ()
estimated row count: 1,000 (missing stats)
table: foo@foo_pkey
spans: FULL SCAN
query T
EXPLAIN (VERBOSE) SELECT * FROM foo WITH ORDINALITY WHERE ordinality > 1 ORDER BY ordinality
----
distribution: local
vectorized: true
·
• filter
│ columns: (x, "ordinality")
│ ordering: +"ordinality"
│ estimated row count: 333 (missing stats)
│ filter: "ordinality" > 1
│
└── • ordinality
│ columns: (x, "ordinality")
│ estimated row count: 1,000 (missing stats)
│
└── • scan
columns: (x)
estimated row count: 1,000 (missing stats)
table: foo@foo_pkey
spans: FULL SCAN
query T
EXPLAIN (VERBOSE) SELECT * FROM foo WITH ORDINALITY WHERE ordinality > 1 ORDER BY ordinality DESC
----
distribution: local
vectorized: true
·
• sort
│ columns: (x, "ordinality")
│ ordering: -"ordinality"
│ estimated row count: 333 (missing stats)
│ order: -"ordinality"
│
└── • filter
│ columns: (x, "ordinality")
│ estimated row count: 333 (missing stats)
│ filter: "ordinality" > 1
│
└── • ordinality
│ columns: (x, "ordinality")
│ estimated row count: 1,000 (missing stats)
│
└── • scan
columns: (x)
estimated row count: 1,000 (missing stats)
table: foo@foo_pkey
spans: FULL SCAN
# Show that the primary key is used under ordinalityNode.
query T
EXPLAIN (VERBOSE) SELECT * FROM (SELECT * FROM foo WHERE x > 'a') WITH ORDINALITY
----
distribution: local
vectorized: true
·
• ordinality
│ columns: (x, "ordinality")
│ estimated row count: 333 (missing stats)
│
└── • scan
columns: (x)
estimated row count: 333 (missing stats)
table: foo@foo_pkey
spans: /"a\x00"-
# Show that the primary key cannot be used with a PK predicate
# outside of ordinalityNode.
query T
EXPLAIN (VERBOSE) SELECT * FROM foo WITH ORDINALITY WHERE x > 'a'
----
distribution: local
vectorized: true
·
• filter
│ columns: (x, "ordinality")
│ estimated row count: 333 (missing stats)
│ filter: x > 'a'
│
└── • ordinality
│ columns: (x, "ordinality")
│ estimated row count: 1,000 (missing stats)
│
└── • scan
columns: (x)
estimated row count: 1,000 (missing stats)
table: foo@foo_pkey
spans: FULL SCAN
| pkg/sql/opt/exec/execbuilder/testdata/ordinality | 0 | https://github.com/cockroachdb/cockroach/commit/648299bb6843159e625dacbee11ef177b8e2ced5 | [
0.00017687394574750215,
0.00017293274868279696,
0.00016208212764468044,
0.00017535084043629467,
0.0000049082395889854524
] |
{
"id": 1,
"code_window": [
"\t\treq := &serverpb.CancelQueryByKeyRequest{\n",
"\t\t\tSQLInstanceID: cancelKey.GetSQLInstanceID(),\n",
"\t\t\tCancelQueryKey: cancelKey,\n",
"\t\t}\n",
"\t\tresp, err := s.execCfg.SQLStatusServer.CancelQueryByKey(ctx, req)\n",
"\t\tif len(resp.Error) > 0 {\n",
"\t\t\terr = errors.CombineErrors(err, errors.Newf(\"error from CancelQueryByKeyResponse: %s\", resp.Error))\n",
"\t\t}\n",
"\t\treturn resp, err\n",
"\t}()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif resp != nil && len(resp.Error) > 0 {\n"
],
"file_path": "pkg/sql/pgwire/server.go",
"type": "replace",
"edit_start_line_idx": 789
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
@require nib
@require 'base/layout-vars.styl'
@require 'base/palette.styl'
@require 'base/typography.styl'
@require 'utils/fonts.styl'
@require 'base/reset.styl'
@require 'pages/reports.styl'
@require 'shame.styl'
| pkg/ui/workspaces/db-console/styl/app.styl | 0 | https://github.com/cockroachdb/cockroach/commit/648299bb6843159e625dacbee11ef177b8e2ced5 | [
0.00017770785780157894,
0.00017500687681604177,
0.0001722546003293246,
0.000175058186869137,
0.0000022265785446506925
] |
{
"id": 2,
"code_window": [
"\t}()\n",
"\n",
"\tif resp != nil && resp.Canceled {\n",
"\t\ts.metrics.PGWireCancelSuccessfulCount.Inc(1)\n",
"\t} else if err != nil {\n",
"\t\tif status := status.Convert(err); status.Code() == codes.ResourceExhausted {\n",
"\t\t\ts.metrics.PGWireCancelIgnoredCount.Inc(1)\n",
"\t\t}\n",
"\t\tlog.Sessions.Warningf(ctx, \"unexpected while handling pgwire cancellation request: %v\", err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif respStatus := status.Convert(err); respStatus.Code() == codes.ResourceExhausted {\n"
],
"file_path": "pkg/sql/pgwire/server.go",
"type": "replace",
"edit_start_line_idx": 798
} | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package pgwire
import (
"context"
"crypto/tls"
"encoding/base64"
"fmt"
"io"
"net"
"net/url"
"strconv"
"strings"
"sync/atomic"
"time"
"unicode"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catconstants"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/hba"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/identmap"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgwirebase"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgwirecancel"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/util/contextutil"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/humanizeutil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/log/eventpb"
"github.com/cockroachdb/cockroach/pkg/util/metric"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/logtags"
"github.com/cockroachdb/redact"
"go.opentelemetry.io/otel/attribute"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// ATTENTION: After changing this value in a unit test, you probably want to
// open a new connection pool since the connections in the existing one are not
// affected.
//
// The "results_buffer_size" connection parameter can be used to override this
// default for an individual connection.
var connResultsBufferSize = settings.RegisterByteSizeSetting(
settings.TenantWritable,
"sql.defaults.results_buffer.size",
"default size of the buffer that accumulates results for a statement or a batch "+
"of statements before they are sent to the client. This can be overridden on "+
"an individual connection with the 'results_buffer_size' parameter. Note that auto-retries "+
"generally only happen while no results have been delivered to the client, so "+
"reducing this size can increase the number of retriable errors a client "+
"receives. On the other hand, increasing the buffer size can increase the "+
"delay until the client receives the first result row. "+
"Updating the setting only affects new connections. "+
"Setting to 0 disables any buffering.",
16<<10, // 16 KiB
).WithPublic()
var logConnAuth = settings.RegisterBoolSetting(
settings.TenantWritable,
sql.ConnAuditingClusterSettingName,
"if set, log SQL client connect and disconnect events (note: may hinder performance on loaded nodes)",
false).WithPublic()
var logSessionAuth = settings.RegisterBoolSetting(
settings.TenantWritable,
sql.AuthAuditingClusterSettingName,
"if set, log SQL session login/disconnection events (note: may hinder performance on loaded nodes)",
false).WithPublic()
const (
// ErrSSLRequired is returned when a client attempts to connect to a
// secure server in cleartext.
ErrSSLRequired = "node is running secure mode, SSL connection required"
// ErrDrainingNewConn is returned when a client attempts to connect to a server
// which is not accepting client connections.
ErrDrainingNewConn = "server is not accepting clients, try another node"
// ErrDrainingExistingConn is returned when a connection is shut down because
// the server is draining.
ErrDrainingExistingConn = "server is shutting down"
)
// Fully-qualified names for metrics.
var (
MetaConns = metric.Metadata{
Name: "sql.conns",
Help: "Number of active sql connections",
Measurement: "Connections",
Unit: metric.Unit_COUNT,
}
MetaNewConns = metric.Metadata{
Name: "sql.new_conns",
Help: "Counter of the number of sql connections created",
Measurement: "Connections",
Unit: metric.Unit_COUNT,
}
MetaBytesIn = metric.Metadata{
Name: "sql.bytesin",
Help: "Number of sql bytes received",
Measurement: "SQL Bytes",
Unit: metric.Unit_BYTES,
}
MetaBytesOut = metric.Metadata{
Name: "sql.bytesout",
Help: "Number of sql bytes sent",
Measurement: "SQL Bytes",
Unit: metric.Unit_BYTES,
}
MetaConnLatency = metric.Metadata{
Name: "sql.conn.latency",
Help: "Latency to establish and authenticate a SQL connection",
Measurement: "Nanoseconds",
Unit: metric.Unit_NANOSECONDS,
}
MetaPGWireCancelTotal = metric.Metadata{
Name: "sql.pgwire_cancel.total",
Help: "Counter of the number of pgwire query cancel requests",
Measurement: "Requests",
Unit: metric.Unit_COUNT,
}
MetaPGWireCancelIgnored = metric.Metadata{
Name: "sql.pgwire_cancel.ignored",
Help: "Counter of the number of pgwire query cancel requests that were ignored due to rate limiting",
Measurement: "Requests",
Unit: metric.Unit_COUNT,
}
MetaPGWireCancelSuccessful = metric.Metadata{
Name: "sql.pgwire_cancel.successful",
Help: "Counter of the number of pgwire query cancel requests that were successful",
Measurement: "Requests",
Unit: metric.Unit_COUNT,
}
)
const (
// The below constants can occur during the first message a client
// sends to the server. There are two categories: protocol version and
// request code. The protocol version is (major version number << 16)
// + minor version number. Request codes are (1234 << 16) + 5678 + N,
// where N started at 0 and is increased by 1 for every new request
// code added, which happens rarely during major or minor Postgres
// releases.
//
// See: https://www.postgresql.org/docs/current/protocol-message-formats.html
version30 = 196608 // (3 << 16) + 0
versionCancel = 80877102 // (1234 << 16) + 5678
versionSSL = 80877103 // (1234 << 16) + 5679
versionGSSENC = 80877104 // (1234 << 16) + 5680
)
// cancelMaxWait is the amount of time a draining server gives to sessions to
// react to cancellation and return before a forceful shutdown.
const cancelMaxWait = 1 * time.Second
// baseSQLMemoryBudget is the amount of memory pre-allocated in each connection.
var baseSQLMemoryBudget = envutil.EnvOrDefaultInt64("COCKROACH_BASE_SQL_MEMORY_BUDGET",
int64(2.1*float64(mon.DefaultPoolAllocationSize)))
// connReservationBatchSize determines for how many connections memory
// is pre-reserved at once.
var connReservationBatchSize = 5
var (
sslSupported = []byte{'S'}
sslUnsupported = []byte{'N'}
)
// cancelChanMap keeps track of channels that are closed after the associated
// cancellation function has been called and the cancellation has taken place.
type cancelChanMap map[chan struct{}]context.CancelFunc
// Server implements the server side of the PostgreSQL wire protocol.
type Server struct {
AmbientCtx log.AmbientContext
cfg *base.Config
SQLServer *sql.Server
execCfg *sql.ExecutorConfig
metrics ServerMetrics
mu struct {
syncutil.Mutex
// connCancelMap entries represent connections started when the server
// was not draining. Each value is a function that can be called to
// cancel the associated connection. The corresponding key is a channel
// that is closed when the connection is done.
connCancelMap cancelChanMap
draining bool
}
auth struct {
syncutil.RWMutex
conf *hba.Conf
identityMap *identmap.Conf
}
sqlMemoryPool *mon.BytesMonitor
connMonitor *mon.BytesMonitor
// testing{Conn,Auth}LogEnabled is used in unit tests in this
// package to force-enable conn/auth logging without dancing around
// the asynchronicity of cluster settings.
testingConnLogEnabled int32
testingAuthLogEnabled int32
// trustClientProvidedRemoteAddr indicates whether the server should honor
// a `crdb:remote_addr` status parameter provided by the client during
// session authentication. This status parameter can be set by SQL proxies
// to feed the "real" client address, where otherwise the CockroachDB SQL
// server would only see the address of the proxy.
//
// This setting is security-sensitive and should not be enabled
// without a SQL proxy that carefully scrubs any client-provided
// `crdb:remote_addr` field. In particular, this setting should never
// be set when there is no SQL proxy at all. Otherwise, a malicious
// client could use this field to pretend being from another address
// than its own and defeat the HBA rules.
//
// TODO(knz,ben): It would be good to have something more specific
// than a boolean, i.e. to accept the provided address only from
// certain peer IPs, or with certain certificates. (could it be a
// special hba.conf directive?)
trustClientProvidedRemoteAddr syncutil.AtomicBool
}
// ServerMetrics is the set of metrics for the pgwire server.
type ServerMetrics struct {
BytesInCount *metric.Counter
BytesOutCount *metric.Counter
Conns *metric.Gauge
NewConns *metric.Counter
ConnLatency *metric.Histogram
PGWireCancelTotalCount *metric.Counter
PGWireCancelIgnoredCount *metric.Counter
PGWireCancelSuccessfulCount *metric.Counter
ConnMemMetrics sql.BaseMemoryMetrics
SQLMemMetrics sql.MemoryMetrics
}
func makeServerMetrics(
sqlMemMetrics sql.MemoryMetrics, histogramWindow time.Duration,
) ServerMetrics {
return ServerMetrics{
BytesInCount: metric.NewCounter(MetaBytesIn),
BytesOutCount: metric.NewCounter(MetaBytesOut),
Conns: metric.NewGauge(MetaConns),
NewConns: metric.NewCounter(MetaNewConns),
ConnLatency: metric.NewLatency(MetaConnLatency, histogramWindow),
PGWireCancelTotalCount: metric.NewCounter(MetaPGWireCancelTotal),
PGWireCancelIgnoredCount: metric.NewCounter(MetaPGWireCancelIgnored),
PGWireCancelSuccessfulCount: metric.NewCounter(MetaPGWireCancelSuccessful),
ConnMemMetrics: sql.MakeBaseMemMetrics("conns", histogramWindow),
SQLMemMetrics: sqlMemMetrics,
}
}
// noteworthySQLMemoryUsageBytes is the minimum size tracked by the
// client SQL pool before the pool start explicitly logging overall
// usage growth in the log.
var noteworthySQLMemoryUsageBytes = envutil.EnvOrDefaultInt64("COCKROACH_NOTEWORTHY_SQL_MEMORY_USAGE", 100*1024*1024)
// noteworthyConnMemoryUsageBytes is the minimum size tracked by the
// connection monitor before the monitor start explicitly logging overall
// usage growth in the log.
var noteworthyConnMemoryUsageBytes = envutil.EnvOrDefaultInt64("COCKROACH_NOTEWORTHY_CONN_MEMORY_USAGE", 2*1024*1024)
// MakeServer creates a Server.
//
// Start() needs to be called on the Server so it begins processing.
func MakeServer(
ambientCtx log.AmbientContext,
cfg *base.Config,
st *cluster.Settings,
sqlMemMetrics sql.MemoryMetrics,
parentMemoryMonitor *mon.BytesMonitor,
histogramWindow time.Duration,
executorConfig *sql.ExecutorConfig,
) *Server {
server := &Server{
AmbientCtx: ambientCtx,
cfg: cfg,
execCfg: executorConfig,
metrics: makeServerMetrics(sqlMemMetrics, histogramWindow),
}
server.sqlMemoryPool = mon.NewMonitor("sql",
mon.MemoryResource,
// Note that we don't report metrics on this monitor. The reason for this is
// that we report metrics on the sum of all the child monitors of this pool.
// This monitor is the "main sql" monitor. It's a child of the root memory
// monitor. Its children are the sql monitors for each new connection. The
// sum of those children, plus the extra memory in the "conn" monitor below,
// is more than enough metrics information about the monitors.
nil, /* curCount */
nil, /* maxHist */
0, noteworthySQLMemoryUsageBytes, st)
server.sqlMemoryPool.Start(context.Background(), parentMemoryMonitor, mon.BoundAccount{})
server.SQLServer = sql.NewServer(executorConfig, server.sqlMemoryPool)
// TODO(knz,ben): Use a cluster setting for this.
server.trustClientProvidedRemoteAddr.Set(trustClientProvidedRemoteAddrOverride)
server.connMonitor = mon.NewMonitor("conn",
mon.MemoryResource,
server.metrics.ConnMemMetrics.CurBytesCount,
server.metrics.ConnMemMetrics.MaxBytesHist,
int64(connReservationBatchSize)*baseSQLMemoryBudget, noteworthyConnMemoryUsageBytes, st)
server.connMonitor.Start(context.Background(), server.sqlMemoryPool, mon.BoundAccount{})
server.mu.Lock()
server.mu.connCancelMap = make(cancelChanMap)
server.mu.Unlock()
connAuthConf.SetOnChange(&st.SV, func(ctx context.Context) {
loadLocalHBAConfigUponRemoteSettingChange(
ambientCtx.AnnotateCtx(context.Background()), server, st)
})
connIdentityMapConf.SetOnChange(&st.SV, func(ctx context.Context) {
loadLocalIdentityMapUponRemoteSettingChange(
ambientCtx.AnnotateCtx(context.Background()), server, st)
})
return server
}
// BytesOut returns the total number of bytes transmitted from this server.
func (s *Server) BytesOut() uint64 {
return uint64(s.metrics.BytesOutCount.Count())
}
// AnnotateCtxForIncomingConn annotates the provided context with a
// tag that reports the peer's address. In the common case, the
// context is annotated with a "client" tag. When the server is
// configured to recognize client-specified remote addresses, it is
// annotated with a "peer" tag and the "client" tag is added later
// when the session is set up.
func (s *Server) AnnotateCtxForIncomingConn(ctx context.Context, conn net.Conn) context.Context {
tag := "client"
if s.trustClientProvidedRemoteAddr.Get() {
tag = "peer"
}
return logtags.AddTag(ctx, tag, conn.RemoteAddr().String())
}
// Match returns true if rd appears to be a Postgres connection.
func Match(rd io.Reader) bool {
buf := pgwirebase.MakeReadBuffer()
_, err := buf.ReadUntypedMsg(rd)
if err != nil {
return false
}
version, err := buf.GetUint32()
if err != nil {
return false
}
return version == version30 || version == versionSSL || version == versionCancel || version == versionGSSENC
}
// Start makes the Server ready for serving connections.
func (s *Server) Start(ctx context.Context, stopper *stop.Stopper) {
s.SQLServer.Start(ctx, stopper)
}
// IsDraining returns true if the server is not currently accepting
// connections.
func (s *Server) IsDraining() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.mu.draining
}
// Metrics returns the set of metrics structs.
func (s *Server) Metrics() (res []interface{}) {
return []interface{}{
&s.metrics,
&s.SQLServer.Metrics.StartedStatementCounters,
&s.SQLServer.Metrics.ExecutedStatementCounters,
&s.SQLServer.Metrics.EngineMetrics,
&s.SQLServer.Metrics.GuardrailMetrics,
&s.SQLServer.InternalMetrics.StartedStatementCounters,
&s.SQLServer.InternalMetrics.ExecutedStatementCounters,
&s.SQLServer.InternalMetrics.EngineMetrics,
&s.SQLServer.InternalMetrics.GuardrailMetrics,
&s.SQLServer.ServerMetrics.StatsMetrics,
&s.SQLServer.ServerMetrics.ContentionSubsystemMetrics,
}
}
// Drain prevents new connections from being served and waits for drainWait for
// open connections to terminate before canceling them.
// An error will be returned when connections that have been canceled have not
// responded to this cancellation and closed themselves in time. The server
// will remain in draining state, though open connections may continue to
// exist.
// The RFC on drain modes has more information regarding the specifics of
// what will happen to connections in different states:
// https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160425_drain_modes.md
//
// The reporter callback, if non-nil, is called on a best effort basis
// to report work that needed to be done and which may or may not have
// been done by the time this call returns. See the explanation in
// pkg/server/drain.go for details.
func (s *Server) Drain(
ctx context.Context, drainWait time.Duration, reporter func(int, redact.SafeString),
) error {
return s.drainImpl(ctx, drainWait, cancelMaxWait, reporter)
}
// Undrain switches the server back to the normal mode of operation in which
// connections are accepted.
func (s *Server) Undrain() {
s.mu.Lock()
s.setDrainingLocked(false)
s.mu.Unlock()
}
// setDrainingLocked sets the server's draining state and returns whether the
// state changed (i.e. drain != s.mu.draining). s.mu must be locked.
func (s *Server) setDrainingLocked(drain bool) bool {
if s.mu.draining == drain {
return false
}
s.mu.draining = drain
return true
}
// drainImpl drains the SQL clients.
//
// The queryWait duration is used to wait on clients to
// self-disconnect after their session has been canceled. The
// cancelWait is used to wait after the queryWait timer has expired
// and there are still clients connected, and their context.Context is
// canceled.
//
// The reporter callback, if non-nil, is called on a best effort basis
// to report work that needed to be done and which may or may not have
// been done by the time this call returns. See the explanation in
// pkg/server/drain.go for details.
func (s *Server) drainImpl(
ctx context.Context,
queryWait time.Duration,
cancelWait time.Duration,
reporter func(int, redact.SafeString),
) error {
// This anonymous function returns a copy of s.mu.connCancelMap if there are
// any active connections to cancel. We will only attempt to cancel
// connections that were active at the moment the draining switch happened.
// It is enough to do this because:
// 1) If no new connections are added to the original map all connections
// will be canceled.
// 2) If new connections are added to the original map, it follows that they
// were added when s.mu.draining = false, thus not requiring cancellation.
// These connections are not our responsibility and will be handled when the
// server starts draining again.
connCancelMap := func() cancelChanMap {
s.mu.Lock()
defer s.mu.Unlock()
if !s.setDrainingLocked(true) {
// We are already draining.
return nil
}
connCancelMap := make(cancelChanMap)
for done, cancel := range s.mu.connCancelMap {
connCancelMap[done] = cancel
}
return connCancelMap
}()
if len(connCancelMap) == 0 {
return nil
}
if reporter != nil {
// Report progress to the Drain RPC.
reporter(len(connCancelMap), "SQL clients")
}
// Spin off a goroutine that waits for all connections to signal that they
// are done and reports it on allConnsDone. The main goroutine signals this
// goroutine to stop work through quitWaitingForConns.
allConnsDone := make(chan struct{})
quitWaitingForConns := make(chan struct{})
defer close(quitWaitingForConns)
go func() {
defer close(allConnsDone)
for done := range connCancelMap {
select {
case <-done:
case <-quitWaitingForConns:
return
}
}
}()
// Wait for all connections to finish up to drainWait.
select {
case <-time.After(queryWait):
log.Ops.Warningf(ctx, "canceling all sessions after waiting %s", queryWait)
case <-allConnsDone:
}
// Cancel the contexts of all sessions if the server is still in draining
// mode.
if stop := func() bool {
s.mu.Lock()
defer s.mu.Unlock()
if !s.mu.draining {
return true
}
for _, cancel := range connCancelMap {
// There is a possibility that different calls to SetDraining have
// overlapping connCancelMaps, but context.CancelFunc calls are
// idempotent.
cancel()
}
return false
}(); stop {
return nil
}
select {
case <-time.After(cancelWait):
return errors.Errorf("some sessions did not respond to cancellation within %s", cancelWait)
case <-allConnsDone:
}
return nil
}
// SocketType indicates the connection type. This is an optimization to
// prevent a comparison against conn.LocalAddr().Network().
type SocketType bool
const (
// SocketTCP is used for TCP sockets. The standard.
SocketTCP SocketType = true
// SocketUnix is used for unix datagram sockets.
SocketUnix SocketType = false
)
func (s SocketType) asConnType() (hba.ConnType, error) {
switch s {
case SocketTCP:
return hba.ConnHostNoSSL, nil
case SocketUnix:
return hba.ConnLocal, nil
default:
return 0, errors.AssertionFailedf("unimplemented socket type: %v", errors.Safe(s))
}
}
func (s *Server) connLogEnabled() bool {
return atomic.LoadInt32(&s.testingConnLogEnabled) != 0 || logConnAuth.Get(&s.execCfg.Settings.SV)
}
// TestingEnableConnLogging is exported for use in tests.
func (s *Server) TestingEnableConnLogging() {
atomic.StoreInt32(&s.testingConnLogEnabled, 1)
}
// TestingEnableAuthLogging is exported for use in tests.
func (s *Server) TestingEnableAuthLogging() {
atomic.StoreInt32(&s.testingAuthLogEnabled, 1)
}
// ServeConn serves a single connection, driving the handshake process and
// delegating to the appropriate connection type.
//
// The socketType argument is an optimization to avoid a string
// compare on conn.LocalAddr().Network(). When the socket type is
// unix datagram (local filesystem), SSL negotiation is disabled
// even when the server is running securely with certificates.
// This has the effect of forcing password auth, also in a way
// compatible with postgres.
//
// An error is returned if the initial handshake of the connection fails.
func (s *Server) ServeConn(ctx context.Context, conn net.Conn, socketType SocketType) error {
ctx, draining, onCloseFn := s.registerConn(ctx)
defer onCloseFn()
connDetails := eventpb.CommonConnectionDetails{
InstanceID: int32(s.execCfg.NodeID.SQLInstanceID()),
Network: conn.RemoteAddr().Network(),
RemoteAddress: conn.RemoteAddr().String(),
}
// Some bookkeeping, for security-minded administrators.
// This registers the connection to the authentication log.
connStart := timeutil.Now()
if s.connLogEnabled() {
ev := &eventpb.ClientConnectionStart{
CommonEventDetails: eventpb.CommonEventDetails{Timestamp: connStart.UnixNano()},
CommonConnectionDetails: connDetails,
}
log.StructuredEvent(ctx, ev)
}
defer func() {
// The duration of the session is logged at the end so that the
// reader of the log file can know how much to look back in time
// to find when the connection was opened. This is important
// because the log files may have been rotated since.
if s.connLogEnabled() {
endTime := timeutil.Now()
ev := &eventpb.ClientConnectionEnd{
CommonEventDetails: eventpb.CommonEventDetails{Timestamp: endTime.UnixNano()},
CommonConnectionDetails: connDetails,
Duration: endTime.Sub(connStart).Nanoseconds(),
}
log.StructuredEvent(ctx, ev)
}
}()
// In any case, first check the command in the start-up message.
//
// We're assuming that a client is not willing/able to receive error
// packets before we drain that message.
version, buf, err := s.readVersion(conn)
if err != nil {
return err
}
switch version {
case versionCancel:
// The cancel message is rather peculiar: it is sent without
// authentication, always over an unencrypted channel.
s.handleCancel(ctx, conn, &buf)
return nil
case versionGSSENC:
// This is a request for an unsupported feature: GSS encryption.
// https://github.com/cockroachdb/cockroach/issues/52184
//
// Ensure the right SQLSTATE is sent to the SQL client.
err := pgerror.New(pgcode.ProtocolViolation, "GSS encryption is not yet supported")
// Annotate a telemetry key. These objects
// are treated specially by sendErr: they increase a
// telemetry counter to indicate an attempt was made
// to use this feature.
err = errors.WithTelemetry(err, "#52184")
return s.sendErr(ctx, conn, err)
}
// If the server is shutting down, terminate the connection early.
if draining {
log.Ops.Info(ctx, "rejecting new connection while server is draining")
return s.sendErr(ctx, conn, newAdminShutdownErr(ErrDrainingNewConn))
}
// Compute the initial connType.
connType, err := socketType.asConnType()
if err != nil {
return err
}
// If the client requests SSL, upgrade the connection to use TLS.
var clientErr error
conn, connType, version, clientErr, err = s.maybeUpgradeToSecureConn(ctx, conn, connType, version, &buf)
if err != nil {
return err
}
if clientErr != nil {
return s.sendErr(ctx, conn, clientErr)
}
sp := tracing.SpanFromContext(ctx)
sp.SetTag("conn_type", attribute.StringValue(connType.String()))
// What does the client want to do?
switch version {
case version30:
// Normal SQL connection. Proceed normally below.
case versionCancel:
// The PostgreSQL protocol definition says that cancel payloads
// must be sent *prior to upgrading the connection to use TLS*.
// Yet, we've found clients in the wild that send the cancel
// after the TLS handshake, for example at
// https://github.com/cockroachlabs/support/issues/600.
s.handleCancel(ctx, conn, &buf)
return nil
default:
// We don't know this protocol.
err := pgerror.Newf(pgcode.ProtocolViolation, "unknown protocol version %d", version)
err = errors.WithTelemetry(err, fmt.Sprintf("protocol-version-%d", version))
return s.sendErr(ctx, conn, err)
}
// Reserve some memory for this connection using the server's monitor. This
// reduces pressure on the shared pool because the server monitor allocates in
// chunks from the shared pool and these chunks should be larger than
// baseSQLMemoryBudget.
reserved := s.connMonitor.MakeBoundAccount()
if err := reserved.Grow(ctx, baseSQLMemoryBudget); err != nil {
return errors.Wrapf(err, "unable to pre-allocate %d bytes for this connection",
baseSQLMemoryBudget)
}
// Load the client-provided session parameters.
var sArgs sql.SessionArgs
if sArgs, err = parseClientProvidedSessionParameters(ctx, &s.execCfg.Settings.SV, &buf,
conn.RemoteAddr(), s.trustClientProvidedRemoteAddr.Get()); err != nil {
return s.sendErr(ctx, conn, err)
}
// Populate the client address field in the context tags and the
// shared struct for structured logging.
// Only now do we know the remote client address for sure (it may have
// been overridden by a status parameter).
connDetails.RemoteAddress = sArgs.RemoteAddr.String()
ctx = logtags.AddTag(ctx, "client", connDetails.RemoteAddress)
sp.SetTag("client", attribute.StringValue(connDetails.RemoteAddress))
// If a test is hooking in some authentication option, load it.
var testingAuthHook func(context.Context) error
if k := s.execCfg.PGWireTestingKnobs; k != nil {
testingAuthHook = k.AuthHook
}
hbaConf, identMap := s.GetAuthenticationConfiguration()
// Defer the rest of the processing to the connection handler.
// This includes authentication.
s.serveConn(
ctx, conn, sArgs,
reserved,
connStart,
authOptions{
connType: connType,
connDetails: connDetails,
insecure: s.cfg.Insecure,
ie: s.execCfg.InternalExecutor,
auth: hbaConf,
identMap: identMap,
testingAuthHook: testingAuthHook,
})
return nil
}
// handleCancel handles a pgwire query cancellation request. Note that the
// request is unauthenticated. To mitigate the security risk (i.e., a
// malicious actor spamming this endpoint with random data to try to cancel
// a query), the logic is rate-limited by a semaphore. Refer to the comments
// in the pgwirecancel package for more information.
//
// This function does not return an error, so the caller (and possible
// attacker) will not know if the cancellation attempt succeeded. Errors are
// logged so that an operator can be aware of any possibly malicious requests.
func (s *Server) handleCancel(ctx context.Context, conn net.Conn, buf *pgwirebase.ReadBuffer) {
telemetry.Inc(sqltelemetry.CancelRequestCounter)
s.metrics.PGWireCancelTotalCount.Inc(1)
resp, err := func() (*serverpb.CancelQueryByKeyResponse, error) {
backendKeyDataBits, err := buf.GetUint64()
// The connection that issued the cancel is not a SQL session -- it's an
// entirely new connection that's created just to send the cancel. We close
// the connection as soon as possible after reading the data, since there
// is nothing to send back to the client.
_ = conn.Close()
if err != nil {
return nil, err
}
cancelKey := pgwirecancel.BackendKeyData(backendKeyDataBits)
// The request is forwarded to the appropriate node.
req := &serverpb.CancelQueryByKeyRequest{
SQLInstanceID: cancelKey.GetSQLInstanceID(),
CancelQueryKey: cancelKey,
}
resp, err := s.execCfg.SQLStatusServer.CancelQueryByKey(ctx, req)
if len(resp.Error) > 0 {
err = errors.CombineErrors(err, errors.Newf("error from CancelQueryByKeyResponse: %s", resp.Error))
}
return resp, err
}()
if resp != nil && resp.Canceled {
s.metrics.PGWireCancelSuccessfulCount.Inc(1)
} else if err != nil {
if status := status.Convert(err); status.Code() == codes.ResourceExhausted {
s.metrics.PGWireCancelIgnoredCount.Inc(1)
}
log.Sessions.Warningf(ctx, "unexpected while handling pgwire cancellation request: %v", err)
}
}
// parseClientProvidedSessionParameters reads the incoming k/v pairs
// in the startup message into a sql.SessionArgs struct.
func parseClientProvidedSessionParameters(
ctx context.Context,
sv *settings.Values,
buf *pgwirebase.ReadBuffer,
origRemoteAddr net.Addr,
trustClientProvidedRemoteAddr bool,
) (sql.SessionArgs, error) {
args := sql.SessionArgs{
SessionDefaults: make(map[string]string),
CustomOptionSessionDefaults: make(map[string]string),
RemoteAddr: origRemoteAddr,
}
foundBufferSize := false
for {
// Read a key-value pair from the client.
key, err := buf.GetString()
if err != nil {
return sql.SessionArgs{}, pgerror.Wrap(
err, pgcode.ProtocolViolation,
"error reading option key",
)
}
if len(key) == 0 {
// End of parameter list.
break
}
value, err := buf.GetString()
if err != nil {
return sql.SessionArgs{}, pgerror.Wrapf(
err, pgcode.ProtocolViolation,
"error reading option value for key %q", key,
)
}
// Case-fold for the key for easier comparison.
key = strings.ToLower(key)
// Load the parameter.
switch key {
case "user":
// In CockroachDB SQL, unlike in PostgreSQL, usernames are
// case-insensitive. Therefore we need to normalize the username
// here, so that further lookups for authentication have the correct
// identifier.
args.User, _ = security.MakeSQLUsernameFromUserInput(value, security.UsernameValidation)
// IsSuperuser will get updated later when we load the user's session
// initialization information.
args.IsSuperuser = args.User.IsRootUser()
case "crdb:session_revival_token_base64":
token, err := base64.StdEncoding.DecodeString(value)
if err != nil {
return sql.SessionArgs{}, pgerror.Wrapf(
err, pgcode.ProtocolViolation,
"%s", key,
)
}
args.SessionRevivalToken = token
case "results_buffer_size":
if args.ConnResultsBufferSize, err = humanizeutil.ParseBytes(value); err != nil {
return sql.SessionArgs{}, errors.WithSecondaryError(
pgerror.Newf(pgcode.ProtocolViolation,
"error parsing results_buffer_size option value '%s' as bytes", value), err)
}
if args.ConnResultsBufferSize < 0 {
return sql.SessionArgs{}, pgerror.Newf(pgcode.ProtocolViolation,
"results_buffer_size option value '%s' cannot be negative", value)
}
foundBufferSize = true
case "crdb:remote_addr":
if !trustClientProvidedRemoteAddr {
return sql.SessionArgs{}, pgerror.Newf(pgcode.ProtocolViolation,
"server not configured to accept remote address override (requested: %q)", value)
}
hostS, portS, err := net.SplitHostPort(value)
if err != nil {
return sql.SessionArgs{}, pgerror.Wrap(
err, pgcode.ProtocolViolation,
"invalid address format",
)
}
port, err := strconv.Atoi(portS)
if err != nil {
return sql.SessionArgs{}, pgerror.Wrap(
err, pgcode.ProtocolViolation,
"remote port is not numeric",
)
}
ip := net.ParseIP(hostS)
if ip == nil {
return sql.SessionArgs{}, pgerror.New(pgcode.ProtocolViolation,
"remote address is not numeric")
}
args.RemoteAddr = &net.TCPAddr{IP: ip, Port: port}
case "options":
opts, err := parseOptions(value)
if err != nil {
return sql.SessionArgs{}, err
}
for _, opt := range opts {
err = loadParameter(ctx, opt.key, opt.value, &args)
if err != nil {
return sql.SessionArgs{}, pgerror.Wrapf(err, pgerror.GetPGCode(err), "options")
}
}
default:
err = loadParameter(ctx, key, value, &args)
if err != nil {
return sql.SessionArgs{}, err
}
}
}
if !foundBufferSize && sv != nil {
// The client did not provide buffer_size; use the cluster setting as default.
args.ConnResultsBufferSize = connResultsBufferSize.Get(sv)
}
// TODO(richardjcai): When connecting to the database, we'll want to
// check for CONNECT privilege on the database. #59875.
if _, ok := args.SessionDefaults["database"]; !ok {
// CockroachDB-specific behavior: if no database is specified,
// default to "defaultdb". In PostgreSQL this would be "postgres".
args.SessionDefaults["database"] = catalogkeys.DefaultDatabaseName
}
// The client might override the application name,
// which would prevent it from being counted in telemetry.
// We've decided that this noise in the data is acceptable.
if appName, ok := args.SessionDefaults["application_name"]; ok {
if appName == catconstants.ReportableAppNamePrefix+catconstants.InternalSQLAppName {
telemetry.Inc(sqltelemetry.CockroachShellCounter)
}
}
return args, nil
}
func loadParameter(ctx context.Context, key, value string, args *sql.SessionArgs) error {
key = strings.ToLower(key)
exists, configurable := sql.IsSessionVariableConfigurable(key)
switch {
case exists && configurable:
args.SessionDefaults[key] = value
case sql.IsCustomOptionSessionVariable(key):
args.CustomOptionSessionDefaults[key] = value
case !exists:
if _, ok := sql.UnsupportedVars[key]; ok {
counter := sqltelemetry.UnimplementedClientStatusParameterCounter(key)
telemetry.Inc(counter)
}
log.Warningf(ctx, "unknown configuration parameter: %q", key)
case !configurable:
return pgerror.Newf(pgcode.CantChangeRuntimeParam,
"parameter %q cannot be changed", key)
}
return nil
}
// option represents an option argument passed in the connection URL.
type option struct {
key string
value string
}
// parseOptions parses the given string into the options. The options must be
// separated by space and have one of the following patterns:
// '-c key=value', '-ckey=value', '--key=value'
func parseOptions(optionsString string) ([]option, error) {
var res []option
optionsRaw, err := url.QueryUnescape(optionsString)
if err != nil {
return nil, pgerror.Newf(pgcode.ProtocolViolation, "failed to unescape options %q", optionsString)
}
lastWasDashC := false
opts := splitOptions(optionsRaw)
for i := 0; i < len(opts); i++ {
prefix := ""
if len(opts[i]) > 1 {
prefix = opts[i][:2]
}
switch {
case opts[i] == "-c":
lastWasDashC = true
continue
case lastWasDashC:
lastWasDashC = false
// if the last option was '-c' parse current option with no regard to
// the prefix
prefix = ""
case prefix == "--" || prefix == "-c":
lastWasDashC = false
default:
return nil, pgerror.Newf(pgcode.ProtocolViolation,
"option %q is invalid, must have prefix '-c' or '--'", opts[i])
}
opt, err := splitOption(opts[i], prefix)
if err != nil {
return nil, err
}
res = append(res, opt)
}
return res, nil
}
// splitOptions slices the given string into substrings separated by space
// unless the space is escaped using backslashes '\\'. It also skips multiple
// subsequent spaces.
func splitOptions(options string) []string {
var res []string
var sb strings.Builder
i := 0
for i < len(options) {
sb.Reset()
// skip leading space
for i < len(options) && unicode.IsSpace(rune(options[i])) {
i++
}
if i == len(options) {
break
}
lastWasEscape := false
for i < len(options) {
if unicode.IsSpace(rune(options[i])) && !lastWasEscape {
break
}
if !lastWasEscape && options[i] == '\\' {
lastWasEscape = true
} else {
lastWasEscape = false
sb.WriteByte(options[i])
}
i++
}
res = append(res, sb.String())
}
return res
}
// splitOption splits the given opt argument into substrings separated by '='.
// It returns an error if the given option does not comply with the pattern
// "key=value" and the number of elements in the result is not two.
// splitOption removes the prefix from the key and replaces '-' with '_' so
// "--option-name=value" becomes [option_name, value].
func splitOption(opt, prefix string) (option, error) {
kv := strings.Split(opt, "=")
if len(kv) != 2 {
return option{}, pgerror.Newf(pgcode.ProtocolViolation,
"option %q is invalid, check '='", opt)
}
kv[0] = strings.TrimPrefix(kv[0], prefix)
return option{key: strings.ReplaceAll(kv[0], "-", "_"), value: kv[1]}, nil
}
// Note: Usage of an env var here makes it possible to unconditionally
// enable this feature when cluster settings do not work reliably,
// e.g. in multi-tenant setups in v20.2. This override mechanism can
// be removed after all of CC is moved to use v21.1 or a version which
// supports cluster settings.
var trustClientProvidedRemoteAddrOverride = envutil.EnvOrDefaultBool("COCKROACH_TRUST_CLIENT_PROVIDED_SQL_REMOTE_ADDR", false)
// TestingSetTrustClientProvidedRemoteAddr is used in tests.
func (s *Server) TestingSetTrustClientProvidedRemoteAddr(b bool) func() {
prev := s.trustClientProvidedRemoteAddr.Get()
s.trustClientProvidedRemoteAddr.Set(b)
return func() { s.trustClientProvidedRemoteAddr.Set(prev) }
}
// maybeUpgradeToSecureConn upgrades the connection to TLS/SSL if
// requested by the client, and available in the server configuration.
func (s *Server) maybeUpgradeToSecureConn(
ctx context.Context,
conn net.Conn,
connType hba.ConnType,
version uint32,
buf *pgwirebase.ReadBuffer,
) (newConn net.Conn, newConnType hba.ConnType, newVersion uint32, clientErr, serverErr error) {
// By default, this is a no-op.
newConn = conn
newConnType = connType
newVersion = version
var n int // byte counts
if version != versionSSL {
// The client did not require a SSL connection.
// Insecure mode: nothing to say, nothing to do.
// TODO(knz): Remove this condition - see
// https://github.com/cockroachdb/cockroach/issues/53404
if s.cfg.Insecure {
return
}
// Secure mode: disallow if TCP and the user did not opt into
// non-TLS SQL conns.
if !s.cfg.AcceptSQLWithoutTLS && connType != hba.ConnLocal {
clientErr = pgerror.New(pgcode.ProtocolViolation, ErrSSLRequired)
}
return
}
if connType == hba.ConnLocal {
// No existing PostgreSQL driver ever tries to activate TLS over
// a unix socket. But in case someone, sometime, somewhere, makes
// that mistake, let them know that we don't want it.
clientErr = pgerror.New(pgcode.ProtocolViolation,
"cannot use SSL/TLS over local connections")
return
}
// Protocol sanity check.
if len(buf.Msg) > 0 {
serverErr = errors.Errorf("unexpected data after SSLRequest: %q", buf.Msg)
return
}
// The client has requested SSL. We're going to try and upgrade the
// connection to use TLS/SSL.
// Do we have a TLS configuration?
tlsConfig, serverErr := s.execCfg.RPCContext.GetServerTLSConfig()
if serverErr != nil {
return
}
if tlsConfig == nil {
// We don't have a TLS configuration available, so we can't honor
// the client's request.
n, serverErr = conn.Write(sslUnsupported)
if serverErr != nil {
return
}
} else {
// We have a TLS configuration. Upgrade the connection.
n, serverErr = conn.Write(sslSupported)
if serverErr != nil {
return
}
newConn = tls.Server(conn, tlsConfig)
newConnType = hba.ConnHostSSL
}
s.metrics.BytesOutCount.Inc(int64(n))
// Finally, re-read the version/command from the client.
newVersion, *buf, serverErr = s.readVersion(newConn)
return
}
// registerConn registers the incoming connection to the map of active connections,
// which can be canceled by a concurrent server drain. It also returns
// the current draining status of the server.
//
// The onCloseFn() callback must be called at the end of the
// connection by the caller.
func (s *Server) registerConn(
ctx context.Context,
) (newCtx context.Context, draining bool, onCloseFn func()) {
onCloseFn = func() {}
newCtx = ctx
s.mu.Lock()
draining = s.mu.draining
if !draining {
var cancel context.CancelFunc
newCtx, cancel = contextutil.WithCancel(ctx)
done := make(chan struct{})
s.mu.connCancelMap[done] = cancel
onCloseFn = func() {
cancel()
close(done)
s.mu.Lock()
delete(s.mu.connCancelMap, done)
s.mu.Unlock()
}
}
s.mu.Unlock()
// If the Server is draining, we will use the connection only to send an
// error, so we don't count it in the stats. This makes sense since
// DrainClient() waits for that number to drop to zero,
// so we don't want it to oscillate unnecessarily.
if !draining {
s.metrics.NewConns.Inc(1)
s.metrics.Conns.Inc(1)
prevOnCloseFn := onCloseFn
onCloseFn = func() { prevOnCloseFn(); s.metrics.Conns.Dec(1) }
}
return
}
// readVersion reads the start-up message, then returns the version
// code (first uint32 in message) and the buffer containing the rest
// of the payload.
func (s *Server) readVersion(
conn io.Reader,
) (version uint32, buf pgwirebase.ReadBuffer, err error) {
var n int
buf = pgwirebase.MakeReadBuffer(
pgwirebase.ReadBufferOptionWithClusterSettings(&s.execCfg.Settings.SV),
)
n, err = buf.ReadUntypedMsg(conn)
if err != nil {
return
}
version, err = buf.GetUint32()
if err != nil {
return
}
s.metrics.BytesInCount.Inc(int64(n))
return
}
// sendErr sends errors to the client during the connection startup
// sequence. Later error sends during/after authentication are handled
// in conn.go.
func (s *Server) sendErr(ctx context.Context, conn net.Conn, err error) error {
msgBuilder := newWriteBuffer(s.metrics.BytesOutCount)
// We could, but do not, report server-side network errors while
// trying to send the client error. This is because clients that
// receive error payload are highly correlated with clients
// disconnecting abruptly.
_ /* err */ = writeErr(ctx, &s.execCfg.Settings.SV, err, msgBuilder, conn)
_ = conn.Close()
return err
}
func newAdminShutdownErr(msg string) error {
return pgerror.New(pgcode.AdminShutdown, msg)
}
| pkg/sql/pgwire/server.go | 1 | https://github.com/cockroachdb/cockroach/commit/648299bb6843159e625dacbee11ef177b8e2ced5 | [
0.9981561303138733,
0.009388886392116547,
0.00015930423978716135,
0.00017355124873574823,
0.08861704915761948
] |
{
"id": 2,
"code_window": [
"\t}()\n",
"\n",
"\tif resp != nil && resp.Canceled {\n",
"\t\ts.metrics.PGWireCancelSuccessfulCount.Inc(1)\n",
"\t} else if err != nil {\n",
"\t\tif status := status.Convert(err); status.Code() == codes.ResourceExhausted {\n",
"\t\t\ts.metrics.PGWireCancelIgnoredCount.Inc(1)\n",
"\t\t}\n",
"\t\tlog.Sessions.Warningf(ctx, \"unexpected while handling pgwire cancellation request: %v\", err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif respStatus := status.Convert(err); respStatus.Code() == codes.ResourceExhausted {\n"
],
"file_path": "pkg/sql/pgwire/server.go",
"type": "replace",
"edit_start_line_idx": 798
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package pgnotice
import (
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/errors"
)
// Notice is an wrapper around errors that are intended to be notices.
type Notice error
// Newf generates a Notice with a format string.
func Newf(format string, args ...interface{}) Notice {
err := errors.NewWithDepthf(1, format, args...)
err = pgerror.WithCandidateCode(err, pgcode.SuccessfulCompletion)
err = pgerror.WithSeverity(err, "NOTICE")
return Notice(err)
}
// NewWithSeverityf generates a Notice with a format string and severity.
func NewWithSeverityf(severity string, format string, args ...interface{}) Notice {
err := errors.NewWithDepthf(1, format, args...)
err = pgerror.WithCandidateCode(err, pgcode.SuccessfulCompletion)
err = pgerror.WithSeverity(err, severity)
return Notice(err)
}
| pkg/sql/pgwire/pgnotice/pgnotice.go | 0 | https://github.com/cockroachdb/cockroach/commit/648299bb6843159e625dacbee11ef177b8e2ced5 | [
0.0009522616164758801,
0.00038099600351415575,
0.00016253790818154812,
0.00020459223014768213,
0.0003307612205389887
] |
{
"id": 2,
"code_window": [
"\t}()\n",
"\n",
"\tif resp != nil && resp.Canceled {\n",
"\t\ts.metrics.PGWireCancelSuccessfulCount.Inc(1)\n",
"\t} else if err != nil {\n",
"\t\tif status := status.Convert(err); status.Code() == codes.ResourceExhausted {\n",
"\t\t\ts.metrics.PGWireCancelIgnoredCount.Inc(1)\n",
"\t\t}\n",
"\t\tlog.Sessions.Warningf(ctx, \"unexpected while handling pgwire cancellation request: %v\", err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif respStatus := status.Convert(err); respStatus.Code() == codes.ResourceExhausted {\n"
],
"file_path": "pkg/sql/pgwire/server.go",
"type": "replace",
"edit_start_line_idx": 798
} | <svg version="1.1" xmlns="http://www.w3.org/2000/svg" x="0px" y="0px" viewBox="0 0 24 24" xml:space="preserve" width="24" height="24">
<g class="nc-icon-wrapper" fill="#5f6c87">
<polyline data-color="color-2" fill="none" stroke="#5f6c87" stroke-width="2" stroke-linecap="round" stroke-miterlimit="10" points=" 8,5 8,1 16,1 16,5 " stroke-linejoin="round"/> <polyline fill="none" stroke="#5f6c87" stroke-width="2" stroke-linecap="round" stroke-miterlimit="10" points="9,15 1,15 1,5 23,5 23,15 15,15 " stroke-linejoin="round"></polyline> <polyline fill="none" stroke="#5f6c87" stroke-width="2" stroke-linecap="round" stroke-miterlimit="10" points="22,18 22,23 2,23 2,18 " stroke-linejoin="round"></polyline> <rect data-color="color-2" x="9" y="13" fill="none" stroke="#5f6c87" stroke-width="2" stroke-linecap="round" stroke-miterlimit="10" width="6" height="4" stroke-linejoin="round"/>
</g>
</svg>
| pkg/ui/workspaces/cluster-ui/src/assets/sidebarIcons/jobs.svg | 0 | https://github.com/cockroachdb/cockroach/commit/648299bb6843159e625dacbee11ef177b8e2ced5 | [
0.0001675247331149876,
0.0001675247331149876,
0.0001675247331149876,
0.0001675247331149876,
0
] |
{
"id": 2,
"code_window": [
"\t}()\n",
"\n",
"\tif resp != nil && resp.Canceled {\n",
"\t\ts.metrics.PGWireCancelSuccessfulCount.Inc(1)\n",
"\t} else if err != nil {\n",
"\t\tif status := status.Convert(err); status.Code() == codes.ResourceExhausted {\n",
"\t\t\ts.metrics.PGWireCancelIgnoredCount.Inc(1)\n",
"\t\t}\n",
"\t\tlog.Sessions.Warningf(ctx, \"unexpected while handling pgwire cancellation request: %v\", err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif respStatus := status.Convert(err); respStatus.Code() == codes.ResourceExhausted {\n"
],
"file_path": "pkg/sql/pgwire/server.go",
"type": "replace",
"edit_start_line_idx": 798
} | // Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
//go:build !windows
// +build !windows
package sdnotify
import (
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"path/filepath"
"strings"
)
const (
envName = "NOTIFY_SOCKET"
readyMsg = "READY=1"
netType = "unixgram"
)
func ready() error {
return notifyEnv(readyMsg)
}
func notifyEnv(msg string) error {
if path, ok := os.LookupEnv(envName); ok {
return notify(path, msg)
}
return nil
}
func notify(path, msg string) error {
addr := net.UnixAddr{
Net: netType,
Name: path,
}
conn, err := net.DialUnix(netType, nil, &addr)
if err != nil {
return err
}
defer net.Conn(conn).Close()
_, err = conn.Write([]byte(msg))
return err
}
func bgExec(cmd *exec.Cmd) error {
l, err := listen()
if err != nil {
return err
}
defer func() { _ = l.close() }()
if cmd.Env == nil {
// Default the environment to the parent process's, minus any
// existing versions of our variable.
varPrefix := fmt.Sprintf("%s=", envName)
for _, v := range os.Environ() {
if !strings.HasPrefix(v, varPrefix) {
cmd.Env = append(cmd.Env, v)
}
}
}
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", envName, l.Path))
if err := cmd.Start(); err != nil {
return err
}
// This can leak goroutines, but we don't really care because we
// always exit after calling this function.
ch := make(chan error, 2)
go func() {
ch <- l.wait()
}()
go func() {
ch <- cmd.Wait()
}()
return <-ch
}
type listener struct {
Path string
tempDir string
conn *net.UnixConn
}
func listen() (listener, error) {
dir, err := ioutil.TempDir("", "sdnotify")
if err != nil {
return listener{}, err
}
path := filepath.Join(dir, "notify.sock")
conn, err := net.ListenUnixgram(netType, &net.UnixAddr{
Net: netType,
Name: path,
})
if err != nil {
return listener{}, err
}
l := listener{
Path: path,
tempDir: dir,
conn: conn}
return l, nil
}
func (l listener) wait() error {
buf := make([]byte, len(readyMsg))
for {
n, err := l.conn.Read(buf)
if err != nil {
return err
}
if string(buf[:n]) == readyMsg {
return nil
}
}
}
func (l listener) close() error {
net.Conn(l.conn).Close()
return os.RemoveAll(l.tempDir)
}
| pkg/util/sdnotify/sdnotify_unix.go | 0 | https://github.com/cockroachdb/cockroach/commit/648299bb6843159e625dacbee11ef177b8e2ced5 | [
0.00019535126921255141,
0.00017026123532559723,
0.0001582358672749251,
0.0001694493694230914,
0.000008582203008700162
] |
{
"id": 0,
"code_window": [
"\t\t\t// the logger and close it. No need to also do it here.\n",
"\t\t}\n",
"\t}\n",
"\n",
"\t// Apply the stderr sink configuration.\n",
"\tlogging.stderrSink.noColor = config.Sinks.Stderr.NoColor\n",
"\tif err := logging.stderrSinkInfoTemplate.applyConfig(config.Sinks.Stderr.CommonSinkConfig); err != nil {\n",
"\t\tcleanupFn()\n",
"\t\treturn nil, err\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlogging.stderrSink.noColor.Set(config.Sinks.Stderr.NoColor)\n"
],
"file_path": "pkg/util/log/flags.go",
"type": "replace",
"edit_start_line_idx": 227
} | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package log
import (
"context"
"fmt"
"math"
"strings"
"github.com/cockroachdb/cockroach/pkg/util/log/channel"
"github.com/cockroachdb/cockroach/pkg/util/log/logconfig"
"github.com/cockroachdb/cockroach/pkg/util/log/logflags"
"github.com/cockroachdb/cockroach/pkg/util/log/logpb"
"github.com/cockroachdb/cockroach/pkg/util/log/severity"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/errors"
)
type config struct {
// showLogs reflects the use of -show-logs on the command line and is
// used for testing.
showLogs bool
// syncWrites can be set asynchronously to force all file output to
// synchronize to disk. This is set via SetSync() and used e.g. in
// start.go upon encountering errors.
syncWrites syncutil.AtomicBool
}
var debugLog *loggerT
func init() {
logflags.InitFlags(
&logging.showLogs,
&logging.vmoduleConfig.mu.vmodule,
)
// package is imported but not further initialized.
defaultConfig := logconfig.DefaultConfig()
if err := defaultConfig.Validate(nil /* no default directory */); err != nil {
panic(err)
}
// Default stderrThreshold to log everything to the process'
// external stderr (OrigStderr).
defaultConfig.Sinks.Stderr.Filter = severity.INFO
// We only register it for the DEV channels. No other
// channels get a configuration, whereby every channel
// ends up sharing the DEV logger (debugLog).
defaultConfig.Sinks.Stderr.Channels.Channels = []logpb.Channel{channel.DEV}
// We also don't capture internal writes to fd2 by default:
// let the writes go to the external stderr.
defaultConfig.CaptureFd2.Enable = false
// Since we are letting writes go to the external stderr,
// we cannot keep redaction markers there.
*defaultConfig.Sinks.Stderr.Redactable = false
// Remove all sinks other than stderr.
defaultConfig.Sinks.FileGroups = nil
if _, err := ApplyConfig(defaultConfig); err != nil {
panic(err)
}
// Reset the "active' flag so that the main commands can reset the
// configuration.
logging.mu.active = false
}
// IsActive returns true iff the main logger already has some events
// logged, or some secondary logger was created with configuration
// taken from the main logger.
//
// This is used to assert that configuration is performed
// before logging has been used for the first time.
func IsActive() (active bool, firstUse string) {
logging.mu.Lock()
defer logging.mu.Unlock()
return logging.mu.active, logging.mu.firstUseStack
}
// ApplyConfig applies the given configuration.
//
// The returned cleanup fn can be invoked by the caller to close
// asynchronous processes.
// NB: This is only useful in tests: for a long-running server process the
// cleanup function should likely not be called, to ensure that the
// file used to capture internal fd2 writes remains open up until the
// process entirely terminates. This ensures that any Go runtime
// assertion failures on the way to termination can be properly
// captured.
func ApplyConfig(config logconfig.Config) (cleanupFn func(), err error) {
// Sanity check.
if active, firstUse := IsActive(); active {
panic(errors.Newf("logging already active; first use:\n%s", firstUse))
}
// Our own cancellable context to stop the secondary loggers below.
//
// Note: we don't want to take a cancellable context from the
// caller, because in the usual case we don't want to stop the
// logger when the remainder of the process stops. See the
// discussion on cancel at the top of the function.
secLoggersCtx, secLoggersCancel := context.WithCancel(context.Background())
// secLoggers collects the secondary loggers derived by the configuration.
var secLoggers []*loggerT
// sinkInfos collects the sinkInfos derived by the configuration.
var sinkInfos []*sinkInfo
// fd2CaptureCleanupFn is the cleanup function for the fd2 capture,
// which is populated if fd2 capture is enabled, below.
fd2CaptureCleanupFn := func() {}
// cleanupFn is the returned cleanup function, whose purpose
// is to tear down the work we are doing here.
cleanupFn = func() {
// Reset the logging channels to default.
si := logging.stderrSinkInfoTemplate
logging.setChannelLoggers(make(map[Channel]*loggerT), &si)
fd2CaptureCleanupFn()
secLoggersCancel()
for _, l := range secLoggers {
allLoggers.del(l)
}
for _, l := range sinkInfos {
allSinkInfos.del(l)
}
}
// If capture of internal fd2 writes is enabled, set it up here.
if config.CaptureFd2.Enable {
if logging.testingFd2CaptureLogger != nil {
cleanupFn()
return nil, errors.New("fd2 capture already set up. Maybe use TestLogScope?")
}
// We use a secondary logger, even though no logging *event* will ever
// be logged to it, for the convenience of getting a standard log
// file header at the beginning of the file (which will contain
// a timestamp, command-line arguments, etc.).
secLogger := &loggerT{}
allLoggers.put(secLogger)
secLoggers = append(secLoggers, secLogger)
// A pseudo file sink. Again, for convenience, so we don't need
// to implement separate file management.
bt, bf := true, false
mf := logconfig.ByteSize(math.MaxInt64)
f := logconfig.DefaultFileFormat
fakeConfig := logconfig.FileConfig{
CommonSinkConfig: logconfig.CommonSinkConfig{
Filter: severity.INFO,
Criticality: &bt,
Format: &f,
Redact: &bf,
// Be careful about stripping the redaction markers from log
// entries. The captured fd2 writes are inherently unsafe, so
// we don't want the header entry to give a mistaken
// impression to the entry parser.
Redactable: &bf,
},
Dir: config.CaptureFd2.Dir,
MaxGroupSize: config.CaptureFd2.MaxGroupSize,
MaxFileSize: &mf,
SyncWrites: &bt,
}
fileSinkInfo, fileSink, err := newFileSinkInfo("stderr", fakeConfig)
if err != nil {
cleanupFn()
return nil, err
}
sinkInfos = append(sinkInfos, fileSinkInfo)
allSinkInfos.put(fileSinkInfo)
if fileSink.logFilesCombinedMaxSize > 0 {
// Do a start round of GC, so clear up past accumulated files.
fileSink.gcOldFiles()
// Start the GC process. This ensures that old capture files get
// erased as new files get created.
go fileSink.gcDaemon(secLoggersCtx)
}
// Connect the sink to the logger.
secLogger.sinkInfos = []*sinkInfo{fileSinkInfo}
// Force a log entry. This does two things: it forces the creation
// of a file and it also introduces a timestamp marker.
entry := MakeEntry(secLoggersCtx, severity.INFO, channel.DEV, 0, false,
"stderr capture started")
secLogger.outputLogEntry(entry)
// Now tell this logger to capture internal stderr writes.
if err := fileSink.takeOverInternalStderr(secLogger); err != nil {
// Oof, it turns out we can't use this logger after all. Give up
// on everything we did.
cleanupFn()
return nil, err
}
// Now inform the other functions using stderrLog that we
// have a new logger for it.
logging.testingFd2CaptureLogger = secLogger
fd2CaptureCleanupFn = func() {
// Relinquish the stderr redirect.
if err := secLogger.getFileSink().relinquishInternalStderr(); err != nil {
// This should not fail. If it does, some caller messed up by
// switching over stderr redirection to a different logger
// without our involvement. That's invalid API usage.
panic(err)
}
// Restore the apparent stderr logger used by Shout() and tests.
logging.testingFd2CaptureLogger = nil
// Note: the remainder of the code in cleanupFn() will remove
// the logger and close it. No need to also do it here.
}
}
// Apply the stderr sink configuration.
logging.stderrSink.noColor = config.Sinks.Stderr.NoColor
if err := logging.stderrSinkInfoTemplate.applyConfig(config.Sinks.Stderr.CommonSinkConfig); err != nil {
cleanupFn()
return nil, err
}
// Create the per-channel loggers.
chans := make(map[Channel]*loggerT, len(logpb.Channel_name))
for chi := range logpb.Channel_name {
ch := Channel(chi)
chans[ch] = &loggerT{}
if ch == channel.DEV {
debugLog = chans[ch]
}
}
// Make a copy of the template so that any subsequent config
// changes don't race with logging operations.
stderrSinkInfo := logging.stderrSinkInfoTemplate
// Connect the stderr channels.
for _, ch := range config.Sinks.Stderr.Channels.Channels {
// Note: we connect stderr even if the severity is NONE
// so that tests can raise the severity after configuration.
l := chans[ch]
l.sinkInfos = append(l.sinkInfos, &stderrSinkInfo)
}
// Create the file sinks.
for prefix, fc := range config.Sinks.FileGroups {
if fc.Filter == severity.NONE || fc.Dir == nil {
continue
}
if prefix == "default" {
prefix = ""
}
fileSinkInfo, _, err := newFileSinkInfo(prefix, *fc)
if err != nil {
cleanupFn()
return nil, err
}
sinkInfos = append(sinkInfos, fileSinkInfo)
allSinkInfos.put(fileSinkInfo)
// Connect the channels for this sink.
for _, ch := range fc.Channels.Channels {
l := chans[ch]
l.sinkInfos = append(l.sinkInfos, fileSinkInfo)
}
}
logging.setChannelLoggers(chans, &stderrSinkInfo)
setActive()
return cleanupFn, nil
}
// newFileSinkInfo creates a new fileSink and its accompanying sinkInfo
// from the provided configuration.
func newFileSinkInfo(fileNamePrefix string, c logconfig.FileConfig) (*sinkInfo, *fileSink, error) {
info := &sinkInfo{}
if err := info.applyConfig(c.CommonSinkConfig); err != nil {
return nil, nil, err
}
fileSink := newFileSink(
*c.Dir,
fileNamePrefix,
*c.SyncWrites,
int64(*c.MaxFileSize),
int64(*c.MaxGroupSize),
info.getStartLines)
info.sink = fileSink
return info, fileSink, nil
}
// applyConfig applies a common sink configuration to a sinkInfo.
func (l *sinkInfo) applyConfig(c logconfig.CommonSinkConfig) error {
l.threshold = c.Filter
l.redact = *c.Redact
l.redactable = *c.Redactable
l.editor = getEditor(SelectEditMode(*c.Redact, *c.Redactable))
l.criticality = *c.Criticality
f, ok := formatters[*c.Format]
if !ok {
return errors.Newf("unknown format: %q", *c.Format)
}
l.formatter = f
return nil
}
// describeAppliedConfig reports a sinkInfo's configuration as a
// CommonSinkConfig. Note that the returned config object
// holds into the sinkInfo parameters by reference and thus should
// not be reused if the configuration can change asynchronously.
func (l *sinkInfo) describeAppliedConfig() (c logconfig.CommonSinkConfig) {
c.Filter = l.threshold
c.Redact = &l.redact
c.Redactable = &l.redactable
c.Criticality = &l.criticality
f := l.formatter.formatterName()
c.Format = &f
return c
}
// TestingResetActive clears the active bit. This is for use in tests
// that use stderr redirection alongside other tests that use
// logging.
func TestingResetActive() {
logging.mu.Lock()
defer logging.mu.Unlock()
logging.mu.active = false
}
// DescribeAppliedConfig describes the current setup as effected by
// ApplyConfig(). This is useful in tests and also to check
// when something may be wrong with the logging configuration.
func DescribeAppliedConfig() string {
var config logconfig.Config
// Describe the fd2 capture, if installed.
if logging.testingFd2CaptureLogger != nil {
config.CaptureFd2.Enable = true
fs := logging.testingFd2CaptureLogger.sinkInfos[0].sink.(*fileSink)
fs.mu.Lock()
dir := fs.mu.logDir
fs.mu.Unlock()
config.CaptureFd2.Dir = &dir
m := logconfig.ByteSize(fs.logFilesCombinedMaxSize)
config.CaptureFd2.MaxGroupSize = &m
}
// Describe the stderr sink.
config.Sinks.Stderr.NoColor = logging.stderrSink.noColor
config.Sinks.Stderr.CommonSinkConfig = logging.stderrSinkInfoTemplate.describeAppliedConfig()
describeConnections := func(l *loggerT, ch Channel,
target *sinkInfo, list *logconfig.ChannelList) {
for _, s := range l.sinkInfos {
if s == target {
list.Channels = append(list.Channels, ch)
}
}
list.Sort()
}
// Describe the connections to the stderr sink.
logging.rmu.RLock()
chans := logging.rmu.channels
stderrSinkInfo := logging.rmu.currentStderrSinkInfo
logging.rmu.RUnlock()
for ch, logger := range chans {
describeConnections(logger, ch,
stderrSinkInfo, &config.Sinks.Stderr.Channels)
}
// Describe the file sinks.
config.Sinks.FileGroups = make(map[string]*logconfig.FileConfig)
_ = allSinkInfos.iter(func(l *sinkInfo) error {
if cl := logging.testingFd2CaptureLogger; cl != nil && cl.sinkInfos[0] == l {
// Not a real sink. Omit.
return nil
}
fileSink, ok := l.sink.(*fileSink)
if !ok {
return nil
}
fc := &logconfig.FileConfig{}
fc.CommonSinkConfig = l.describeAppliedConfig()
mf := logconfig.ByteSize(fileSink.logFileMaxSize)
fc.MaxFileSize = &mf
mg := logconfig.ByteSize(fileSink.logFilesCombinedMaxSize)
fc.MaxGroupSize = &mg
fileSink.mu.Lock()
dir := fileSink.mu.logDir
fileSink.mu.Unlock()
fc.Dir = &dir
fc.SyncWrites = &fileSink.syncWrites
// Describe the connections to this file sink.
for ch, logger := range chans {
describeConnections(logger, ch, l, &fc.Channels)
}
prefix := strings.TrimPrefix(fileSink.prefix, program)
if prefix == "" {
prefix = "default"
} else {
prefix = strings.TrimPrefix(prefix, "-")
}
if prev, ok := config.Sinks.FileGroups[prefix]; ok {
fmt.Fprintf(OrigStderr,
"warning: multiple file loggers with prefix %q, previous: %+v\n",
prefix, prev)
}
config.Sinks.FileGroups[prefix] = fc
return nil
})
// Note: we cannot return 'config' directly, because this captures
// certain variables from the loggers by reference and thus could be
// invalidated by concurrent uses of ApplyConfig().
return config.String()
}
| pkg/util/log/flags.go | 1 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.9983525276184082,
0.051143087446689606,
0.00016199173114728183,
0.0018236697651445866,
0.19967058300971985
] |
{
"id": 0,
"code_window": [
"\t\t\t// the logger and close it. No need to also do it here.\n",
"\t\t}\n",
"\t}\n",
"\n",
"\t// Apply the stderr sink configuration.\n",
"\tlogging.stderrSink.noColor = config.Sinks.Stderr.NoColor\n",
"\tif err := logging.stderrSinkInfoTemplate.applyConfig(config.Sinks.Stderr.CommonSinkConfig); err != nil {\n",
"\t\tcleanupFn()\n",
"\t\treturn nil, err\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlogging.stderrSink.noColor.Set(config.Sinks.Stderr.NoColor)\n"
],
"file_path": "pkg/util/log/flags.go",
"type": "replace",
"edit_start_line_idx": 227
} | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tree
// AlterSequence represents an ALTER SEQUENCE statement, except in the case of
// ALTER SEQUENCE <seqName> RENAME TO <newSeqName>, which is represented by a
// RenameTable node.
type AlterSequence struct {
IfExists bool
Name *UnresolvedObjectName
Options SequenceOptions
}
// Format implements the NodeFormatter interface.
func (node *AlterSequence) Format(ctx *FmtCtx) {
ctx.WriteString("ALTER SEQUENCE ")
if node.IfExists {
ctx.WriteString("IF EXISTS ")
}
ctx.FormatNode(node.Name)
ctx.FormatNode(&node.Options)
}
| pkg/sql/sem/tree/alter_sequence.go | 0 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.00018022207950707525,
0.00017112161731347442,
0.00016175296332221478,
0.0001712556986603886,
0.00000659208762954222
] |
{
"id": 0,
"code_window": [
"\t\t\t// the logger and close it. No need to also do it here.\n",
"\t\t}\n",
"\t}\n",
"\n",
"\t// Apply the stderr sink configuration.\n",
"\tlogging.stderrSink.noColor = config.Sinks.Stderr.NoColor\n",
"\tif err := logging.stderrSinkInfoTemplate.applyConfig(config.Sinks.Stderr.CommonSinkConfig); err != nil {\n",
"\t\tcleanupFn()\n",
"\t\treturn nil, err\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlogging.stderrSink.noColor.Set(config.Sinks.Stderr.NoColor)\n"
],
"file_path": "pkg/util/log/flags.go",
"type": "replace",
"edit_start_line_idx": 227
} | // Code generated by TestPretty. DO NOT EDIT.
// GENERATED FILE DO NOT EDIT
1:
-
IMPORT
TABLE
t (
i
INT8,
s
STRING
)
CSV
DATA (
$1
)
6:
------
IMPORT
TABLE t (
i
INT8,
s
STRING
)
CSV DATA (
$1
)
15:
---------------
IMPORT
TABLE t (
i INT8,
s STRING
)
CSV DATA (
$1
)
16:
----------------
IMPORT
TABLE t (
i INT8,
s STRING
)
CSV DATA ($1)
24:
------------------------
IMPORT
TABLE t (
i INT8, s STRING
)
CSV DATA ($1)
27:
---------------------------
IMPORT
TABLE t (i INT8, s STRING)
CSV DATA ($1)
47:
-----------------------------------------------
IMPORT TABLE t (i INT8, s STRING) CSV DATA ($1)
| pkg/sql/sem/tree/testdata/pretty/import4.align-only.golden | 0 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.0001744999026414007,
0.00016980736108962446,
0.0001608124002814293,
0.00016994985344354063,
0.000004057644673594041
] |
{
"id": 0,
"code_window": [
"\t\t\t// the logger and close it. No need to also do it here.\n",
"\t\t}\n",
"\t}\n",
"\n",
"\t// Apply the stderr sink configuration.\n",
"\tlogging.stderrSink.noColor = config.Sinks.Stderr.NoColor\n",
"\tif err := logging.stderrSinkInfoTemplate.applyConfig(config.Sinks.Stderr.CommonSinkConfig); err != nil {\n",
"\t\tcleanupFn()\n",
"\t\treturn nil, err\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlogging.stderrSink.noColor.Set(config.Sinks.Stderr.NoColor)\n"
],
"file_path": "pkg/util/log/flags.go",
"type": "replace",
"edit_start_line_idx": 227
} | # This logic test fails in the 3node-tenant configuration because the keys are
# prefixed with the tenant ID if run by a tenant:
# https://github.com/cockroachdb/cockroach/issues/49582
# LogicTest: !3node-tenant
statement ok
CREATE TABLE b(
a int primary key,
geog geography(geometry, 4326),
geom geometry(point),
FAMILY (a, geog, geom)
)
statement ok
CREATE TABLE c(
a int primary key,
geog geography(geometry, 4326),
geom geometry(point),
FAMILY (a, geog, geom),
INVERTED INDEX (geog),
INVERTED INDEX (geom)
)
query T kvtrace
INSERT INTO b VALUES
(1, 'POINT(1.0 1.0)', 'POINT(2.0 2.0)'),
(2, 'LINESTRING(1.0 1.0, 2.0 2.0)', 'POINT(1.0 1.0)')
----
CPut /Table/53/1/1/0 -> /TUPLE/
CPut /Table/53/1/2/0 -> /TUPLE/
query T kvtrace
INSERT INTO c VALUES
(1, 'POINT(1.0 1.0)', 'POINT(2.0 2.0)'),
(2, 'LINESTRING(1.0 1.0, 2.0 2.0)', 'POINT(1.0 1.0)')
----
CPut /Table/54/1/1/0 -> /TUPLE/
InitPut /Table/54/2/"B\xfd\x10\x01D\x15@\x80K\xd5\x01?\x91\xdfF\xa2R\x9d9?\x91\xdfF\xa2R\x9d9\x89\x88" -> /BYTES/
InitPut /Table/54/3/"B\xfd\x10\x00\x00\x00\x00\x00\x00\x01\x01@\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x89\x88" -> /BYTES/
CPut /Table/54/1/2/0 -> /TUPLE/
InitPut /Table/54/2/"B\xfd\x10\x01P\x00\x00\x00\x00\x00\x00?\x91\xdfF\xa2R\x9d8?\x91\xdfF\xa2R\x9c\xb9?\xa1\xdfF\xa2R\x9d9?\xa1\xdfF\xa2R\x9dx\x8a\x88" -> /BYTES/
InitPut /Table/54/2/"B\xfd\x10\x03\xff\xff\xfc\x00\x00\x00\x00?\x91\xdfF\xa2R\x9d8?\x91\xdfF\xa2R\x9c\xb9?\xa1\xdfF\xa2R\x9d9?\xa1\xdfF\xa2R\x9dx\x8a\x88" -> /BYTES/
InitPut /Table/54/2/"B\xfd\x10\x05\x00\x00\x00\x00\x00\x00\x00?\x91\xdfF\xa2R\x9d8?\x91\xdfF\xa2R\x9c\xb9?\xa1\xdfF\xa2R\x9d9?\xa1\xdfF\xa2R\x9dx\x8a\x88" -> /BYTES/
InitPut /Table/54/3/"B\xfd\x10\x00\x00\x00\x00\x00\x00\x01\x01?\xf0\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00\x8a\x88" -> /BYTES/
statement ok
CREATE INVERTED INDEX geog_idx ON b(geog)
statement ok
CREATE INVERTED INDEX geom_idx ON b(geom)
query T kvtrace
INSERT INTO b VALUES
(3, 'POINT(1.0 1.0)', 'POINT(2.0 2.0)'),
(4, 'LINESTRING(1.0 1.0, 2.0 2.0)', 'POINT(1.0 1.0)')
----
CPut /Table/53/1/3/0 -> /TUPLE/
InitPut /Table/53/2/"B\xfd\x10\x01D\x15@\x80K\xd5\x01?\x91\xdfF\xa2R\x9d9?\x91\xdfF\xa2R\x9d9\x8b\x88" -> /BYTES/
InitPut /Table/53/3/"B\xfd\x10\x00\x00\x00\x00\x00\x00\x01\x01@\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x8b\x88" -> /BYTES/
CPut /Table/53/1/4/0 -> /TUPLE/
InitPut /Table/53/2/"B\xfd\x10\x01P\x00\x00\x00\x00\x00\x00?\x91\xdfF\xa2R\x9d8?\x91\xdfF\xa2R\x9c\xb9?\xa1\xdfF\xa2R\x9d9?\xa1\xdfF\xa2R\x9dx\x8c\x88" -> /BYTES/
InitPut /Table/53/2/"B\xfd\x10\x03\xff\xff\xfc\x00\x00\x00\x00?\x91\xdfF\xa2R\x9d8?\x91\xdfF\xa2R\x9c\xb9?\xa1\xdfF\xa2R\x9d9?\xa1\xdfF\xa2R\x9dx\x8c\x88" -> /BYTES/
InitPut /Table/53/2/"B\xfd\x10\x05\x00\x00\x00\x00\x00\x00\x00?\x91\xdfF\xa2R\x9d8?\x91\xdfF\xa2R\x9c\xb9?\xa1\xdfF\xa2R\x9d9?\xa1\xdfF\xa2R\x9dx\x8c\x88" -> /BYTES/
InitPut /Table/53/3/"B\xfd\x10\x00\x00\x00\x00\x00\x00\x01\x01?\xf0\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00\x8c\x88" -> /BYTES/
statement ok
CREATE TABLE ltable(
k int primary key,
geom geometry
)
statement ok
CREATE TABLE rtable(
k int primary key,
geom geometry,
INVERTED INDEX geom_index(geom)
)
statement ok
SELECT url FROM [EXPLAIN (DISTSQL)
SELECT ltable.k, rtable.k FROM ltable JOIN rtable@geom_index ON ST_Intersects(ltable.geom, rtable.geom)]
| pkg/sql/opt/exec/execbuilder/testdata/geospatial | 0 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.001789439469575882,
0.0005239194142632186,
0.00016312883235514164,
0.00016853652778081596,
0.0006647986592724919
] |
{
"id": 1,
"code_window": [
"\t\tm := logconfig.ByteSize(fs.logFilesCombinedMaxSize)\n",
"\t\tconfig.CaptureFd2.MaxGroupSize = &m\n",
"\t}\n",
"\n",
"\t// Describe the stderr sink.\n",
"\tconfig.Sinks.Stderr.NoColor = logging.stderrSink.noColor\n",
"\tconfig.Sinks.Stderr.CommonSinkConfig = logging.stderrSinkInfoTemplate.describeAppliedConfig()\n",
"\n",
"\tdescribeConnections := func(l *loggerT, ch Channel,\n",
"\t\ttarget *sinkInfo, list *logconfig.ChannelList) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tconfig.Sinks.Stderr.NoColor = logging.stderrSink.noColor.Get()\n"
],
"file_path": "pkg/util/log/flags.go",
"type": "replace",
"edit_start_line_idx": 358
} | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package log
import (
"context"
"fmt"
"math"
"strings"
"github.com/cockroachdb/cockroach/pkg/util/log/channel"
"github.com/cockroachdb/cockroach/pkg/util/log/logconfig"
"github.com/cockroachdb/cockroach/pkg/util/log/logflags"
"github.com/cockroachdb/cockroach/pkg/util/log/logpb"
"github.com/cockroachdb/cockroach/pkg/util/log/severity"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/errors"
)
type config struct {
// showLogs reflects the use of -show-logs on the command line and is
// used for testing.
showLogs bool
// syncWrites can be set asynchronously to force all file output to
// synchronize to disk. This is set via SetSync() and used e.g. in
// start.go upon encountering errors.
syncWrites syncutil.AtomicBool
}
var debugLog *loggerT
func init() {
logflags.InitFlags(
&logging.showLogs,
&logging.vmoduleConfig.mu.vmodule,
)
// package is imported but not further initialized.
defaultConfig := logconfig.DefaultConfig()
if err := defaultConfig.Validate(nil /* no default directory */); err != nil {
panic(err)
}
// Default stderrThreshold to log everything to the process'
// external stderr (OrigStderr).
defaultConfig.Sinks.Stderr.Filter = severity.INFO
// We only register it for the DEV channels. No other
// channels get a configuration, whereby every channel
// ends up sharing the DEV logger (debugLog).
defaultConfig.Sinks.Stderr.Channels.Channels = []logpb.Channel{channel.DEV}
// We also don't capture internal writes to fd2 by default:
// let the writes go to the external stderr.
defaultConfig.CaptureFd2.Enable = false
// Since we are letting writes go to the external stderr,
// we cannot keep redaction markers there.
*defaultConfig.Sinks.Stderr.Redactable = false
// Remove all sinks other than stderr.
defaultConfig.Sinks.FileGroups = nil
if _, err := ApplyConfig(defaultConfig); err != nil {
panic(err)
}
// Reset the "active' flag so that the main commands can reset the
// configuration.
logging.mu.active = false
}
// IsActive returns true iff the main logger already has some events
// logged, or some secondary logger was created with configuration
// taken from the main logger.
//
// This is used to assert that configuration is performed
// before logging has been used for the first time.
func IsActive() (active bool, firstUse string) {
logging.mu.Lock()
defer logging.mu.Unlock()
return logging.mu.active, logging.mu.firstUseStack
}
// ApplyConfig applies the given configuration.
//
// The returned cleanup fn can be invoked by the caller to close
// asynchronous processes.
// NB: This is only useful in tests: for a long-running server process the
// cleanup function should likely not be called, to ensure that the
// file used to capture internal fd2 writes remains open up until the
// process entirely terminates. This ensures that any Go runtime
// assertion failures on the way to termination can be properly
// captured.
func ApplyConfig(config logconfig.Config) (cleanupFn func(), err error) {
// Sanity check.
if active, firstUse := IsActive(); active {
panic(errors.Newf("logging already active; first use:\n%s", firstUse))
}
// Our own cancellable context to stop the secondary loggers below.
//
// Note: we don't want to take a cancellable context from the
// caller, because in the usual case we don't want to stop the
// logger when the remainder of the process stops. See the
// discussion on cancel at the top of the function.
secLoggersCtx, secLoggersCancel := context.WithCancel(context.Background())
// secLoggers collects the secondary loggers derived by the configuration.
var secLoggers []*loggerT
// sinkInfos collects the sinkInfos derived by the configuration.
var sinkInfos []*sinkInfo
// fd2CaptureCleanupFn is the cleanup function for the fd2 capture,
// which is populated if fd2 capture is enabled, below.
fd2CaptureCleanupFn := func() {}
// cleanupFn is the returned cleanup function, whose purpose
// is to tear down the work we are doing here.
cleanupFn = func() {
// Reset the logging channels to default.
si := logging.stderrSinkInfoTemplate
logging.setChannelLoggers(make(map[Channel]*loggerT), &si)
fd2CaptureCleanupFn()
secLoggersCancel()
for _, l := range secLoggers {
allLoggers.del(l)
}
for _, l := range sinkInfos {
allSinkInfos.del(l)
}
}
// If capture of internal fd2 writes is enabled, set it up here.
if config.CaptureFd2.Enable {
if logging.testingFd2CaptureLogger != nil {
cleanupFn()
return nil, errors.New("fd2 capture already set up. Maybe use TestLogScope?")
}
// We use a secondary logger, even though no logging *event* will ever
// be logged to it, for the convenience of getting a standard log
// file header at the beginning of the file (which will contain
// a timestamp, command-line arguments, etc.).
secLogger := &loggerT{}
allLoggers.put(secLogger)
secLoggers = append(secLoggers, secLogger)
// A pseudo file sink. Again, for convenience, so we don't need
// to implement separate file management.
bt, bf := true, false
mf := logconfig.ByteSize(math.MaxInt64)
f := logconfig.DefaultFileFormat
fakeConfig := logconfig.FileConfig{
CommonSinkConfig: logconfig.CommonSinkConfig{
Filter: severity.INFO,
Criticality: &bt,
Format: &f,
Redact: &bf,
// Be careful about stripping the redaction markers from log
// entries. The captured fd2 writes are inherently unsafe, so
// we don't want the header entry to give a mistaken
// impression to the entry parser.
Redactable: &bf,
},
Dir: config.CaptureFd2.Dir,
MaxGroupSize: config.CaptureFd2.MaxGroupSize,
MaxFileSize: &mf,
SyncWrites: &bt,
}
fileSinkInfo, fileSink, err := newFileSinkInfo("stderr", fakeConfig)
if err != nil {
cleanupFn()
return nil, err
}
sinkInfos = append(sinkInfos, fileSinkInfo)
allSinkInfos.put(fileSinkInfo)
if fileSink.logFilesCombinedMaxSize > 0 {
// Do a start round of GC, so clear up past accumulated files.
fileSink.gcOldFiles()
// Start the GC process. This ensures that old capture files get
// erased as new files get created.
go fileSink.gcDaemon(secLoggersCtx)
}
// Connect the sink to the logger.
secLogger.sinkInfos = []*sinkInfo{fileSinkInfo}
// Force a log entry. This does two things: it forces the creation
// of a file and it also introduces a timestamp marker.
entry := MakeEntry(secLoggersCtx, severity.INFO, channel.DEV, 0, false,
"stderr capture started")
secLogger.outputLogEntry(entry)
// Now tell this logger to capture internal stderr writes.
if err := fileSink.takeOverInternalStderr(secLogger); err != nil {
// Oof, it turns out we can't use this logger after all. Give up
// on everything we did.
cleanupFn()
return nil, err
}
// Now inform the other functions using stderrLog that we
// have a new logger for it.
logging.testingFd2CaptureLogger = secLogger
fd2CaptureCleanupFn = func() {
// Relinquish the stderr redirect.
if err := secLogger.getFileSink().relinquishInternalStderr(); err != nil {
// This should not fail. If it does, some caller messed up by
// switching over stderr redirection to a different logger
// without our involvement. That's invalid API usage.
panic(err)
}
// Restore the apparent stderr logger used by Shout() and tests.
logging.testingFd2CaptureLogger = nil
// Note: the remainder of the code in cleanupFn() will remove
// the logger and close it. No need to also do it here.
}
}
// Apply the stderr sink configuration.
logging.stderrSink.noColor = config.Sinks.Stderr.NoColor
if err := logging.stderrSinkInfoTemplate.applyConfig(config.Sinks.Stderr.CommonSinkConfig); err != nil {
cleanupFn()
return nil, err
}
// Create the per-channel loggers.
chans := make(map[Channel]*loggerT, len(logpb.Channel_name))
for chi := range logpb.Channel_name {
ch := Channel(chi)
chans[ch] = &loggerT{}
if ch == channel.DEV {
debugLog = chans[ch]
}
}
// Make a copy of the template so that any subsequent config
// changes don't race with logging operations.
stderrSinkInfo := logging.stderrSinkInfoTemplate
// Connect the stderr channels.
for _, ch := range config.Sinks.Stderr.Channels.Channels {
// Note: we connect stderr even if the severity is NONE
// so that tests can raise the severity after configuration.
l := chans[ch]
l.sinkInfos = append(l.sinkInfos, &stderrSinkInfo)
}
// Create the file sinks.
for prefix, fc := range config.Sinks.FileGroups {
if fc.Filter == severity.NONE || fc.Dir == nil {
continue
}
if prefix == "default" {
prefix = ""
}
fileSinkInfo, _, err := newFileSinkInfo(prefix, *fc)
if err != nil {
cleanupFn()
return nil, err
}
sinkInfos = append(sinkInfos, fileSinkInfo)
allSinkInfos.put(fileSinkInfo)
// Connect the channels for this sink.
for _, ch := range fc.Channels.Channels {
l := chans[ch]
l.sinkInfos = append(l.sinkInfos, fileSinkInfo)
}
}
logging.setChannelLoggers(chans, &stderrSinkInfo)
setActive()
return cleanupFn, nil
}
// newFileSinkInfo creates a new fileSink and its accompanying sinkInfo
// from the provided configuration.
func newFileSinkInfo(fileNamePrefix string, c logconfig.FileConfig) (*sinkInfo, *fileSink, error) {
info := &sinkInfo{}
if err := info.applyConfig(c.CommonSinkConfig); err != nil {
return nil, nil, err
}
fileSink := newFileSink(
*c.Dir,
fileNamePrefix,
*c.SyncWrites,
int64(*c.MaxFileSize),
int64(*c.MaxGroupSize),
info.getStartLines)
info.sink = fileSink
return info, fileSink, nil
}
// applyConfig applies a common sink configuration to a sinkInfo.
func (l *sinkInfo) applyConfig(c logconfig.CommonSinkConfig) error {
l.threshold = c.Filter
l.redact = *c.Redact
l.redactable = *c.Redactable
l.editor = getEditor(SelectEditMode(*c.Redact, *c.Redactable))
l.criticality = *c.Criticality
f, ok := formatters[*c.Format]
if !ok {
return errors.Newf("unknown format: %q", *c.Format)
}
l.formatter = f
return nil
}
// describeAppliedConfig reports a sinkInfo's configuration as a
// CommonSinkConfig. Note that the returned config object
// holds into the sinkInfo parameters by reference and thus should
// not be reused if the configuration can change asynchronously.
func (l *sinkInfo) describeAppliedConfig() (c logconfig.CommonSinkConfig) {
c.Filter = l.threshold
c.Redact = &l.redact
c.Redactable = &l.redactable
c.Criticality = &l.criticality
f := l.formatter.formatterName()
c.Format = &f
return c
}
// TestingResetActive clears the active bit. This is for use in tests
// that use stderr redirection alongside other tests that use
// logging.
func TestingResetActive() {
logging.mu.Lock()
defer logging.mu.Unlock()
logging.mu.active = false
}
// DescribeAppliedConfig describes the current setup as effected by
// ApplyConfig(). This is useful in tests and also to check
// when something may be wrong with the logging configuration.
func DescribeAppliedConfig() string {
var config logconfig.Config
// Describe the fd2 capture, if installed.
if logging.testingFd2CaptureLogger != nil {
config.CaptureFd2.Enable = true
fs := logging.testingFd2CaptureLogger.sinkInfos[0].sink.(*fileSink)
fs.mu.Lock()
dir := fs.mu.logDir
fs.mu.Unlock()
config.CaptureFd2.Dir = &dir
m := logconfig.ByteSize(fs.logFilesCombinedMaxSize)
config.CaptureFd2.MaxGroupSize = &m
}
// Describe the stderr sink.
config.Sinks.Stderr.NoColor = logging.stderrSink.noColor
config.Sinks.Stderr.CommonSinkConfig = logging.stderrSinkInfoTemplate.describeAppliedConfig()
describeConnections := func(l *loggerT, ch Channel,
target *sinkInfo, list *logconfig.ChannelList) {
for _, s := range l.sinkInfos {
if s == target {
list.Channels = append(list.Channels, ch)
}
}
list.Sort()
}
// Describe the connections to the stderr sink.
logging.rmu.RLock()
chans := logging.rmu.channels
stderrSinkInfo := logging.rmu.currentStderrSinkInfo
logging.rmu.RUnlock()
for ch, logger := range chans {
describeConnections(logger, ch,
stderrSinkInfo, &config.Sinks.Stderr.Channels)
}
// Describe the file sinks.
config.Sinks.FileGroups = make(map[string]*logconfig.FileConfig)
_ = allSinkInfos.iter(func(l *sinkInfo) error {
if cl := logging.testingFd2CaptureLogger; cl != nil && cl.sinkInfos[0] == l {
// Not a real sink. Omit.
return nil
}
fileSink, ok := l.sink.(*fileSink)
if !ok {
return nil
}
fc := &logconfig.FileConfig{}
fc.CommonSinkConfig = l.describeAppliedConfig()
mf := logconfig.ByteSize(fileSink.logFileMaxSize)
fc.MaxFileSize = &mf
mg := logconfig.ByteSize(fileSink.logFilesCombinedMaxSize)
fc.MaxGroupSize = &mg
fileSink.mu.Lock()
dir := fileSink.mu.logDir
fileSink.mu.Unlock()
fc.Dir = &dir
fc.SyncWrites = &fileSink.syncWrites
// Describe the connections to this file sink.
for ch, logger := range chans {
describeConnections(logger, ch, l, &fc.Channels)
}
prefix := strings.TrimPrefix(fileSink.prefix, program)
if prefix == "" {
prefix = "default"
} else {
prefix = strings.TrimPrefix(prefix, "-")
}
if prev, ok := config.Sinks.FileGroups[prefix]; ok {
fmt.Fprintf(OrigStderr,
"warning: multiple file loggers with prefix %q, previous: %+v\n",
prefix, prev)
}
config.Sinks.FileGroups[prefix] = fc
return nil
})
// Note: we cannot return 'config' directly, because this captures
// certain variables from the loggers by reference and thus could be
// invalidated by concurrent uses of ApplyConfig().
return config.String()
}
| pkg/util/log/flags.go | 1 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.9979130625724792,
0.09022220969200134,
0.00016075430903583765,
0.002066795714199543,
0.27677270770072937
] |
{
"id": 1,
"code_window": [
"\t\tm := logconfig.ByteSize(fs.logFilesCombinedMaxSize)\n",
"\t\tconfig.CaptureFd2.MaxGroupSize = &m\n",
"\t}\n",
"\n",
"\t// Describe the stderr sink.\n",
"\tconfig.Sinks.Stderr.NoColor = logging.stderrSink.noColor\n",
"\tconfig.Sinks.Stderr.CommonSinkConfig = logging.stderrSinkInfoTemplate.describeAppliedConfig()\n",
"\n",
"\tdescribeConnections := func(l *loggerT, ch Channel,\n",
"\t\ttarget *sinkInfo, list *logconfig.ChannelList) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tconfig.Sinks.Stderr.NoColor = logging.stderrSink.noColor.Get()\n"
],
"file_path": "pkg/util/log/flags.go",
"type": "replace",
"edit_start_line_idx": 358
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
syntax = "proto3";
package cockroach.kv.kvserver.concurrency.lock;
option go_package = "lock";
import "gogoproto/gogo.proto";
// Strength represents the different locking modes that determine how key-values
// can be accessed by concurrent transactions.
//
// Locking modes apply to locks that are held with a per-key granularity. It is
// up to users of the key-value layer to decide on which keys to acquire locks
// for when imposing structure that can span multiple keys, such as SQL rows
// (see column families and secondary indexes).
//
// Locking modes have differing levels of strength, growing from "weakest" to
// "strongest" in the order that the variants are presented in the enumeration.
// The "stronger" a locking mode, the more protection it provides for the lock
// holder but the more restrictive it is to concurrent transactions attempting
// to access the same keys.
//
// Compatibility Matrix
//
// The following matrix presents the compatibility of locking strengths with one
// another. A cell with an X means that the two strengths are incompatible with
// each other and that they can not both be held on a given key by different
// transactions, concurrently. A cell without an X means that the two strengths
// are compatible with each other and that they can be held on a given key by
// different transactions, concurrently.
//
// +-----------+-----------+-----------+-----------+-----------+
// | | None | Shared | Upgrade | Exclusive |
// +-----------+-----------+-----------+-----------+-----------+
// | None | | | | X^† |
// +-----------+-----------+-----------+-----------+-----------+
// | Shared | | | X | X |
// +-----------+-----------+-----------+-----------+-----------+
// | Upgrade | | X | X | X |
// +-----------+-----------+-----------+-----------+-----------+
// | Exclusive | X^† | X | X | X |
// +-----------+-----------+-----------+-----------+-----------+
//
// [†] reads under optimistic concurrency control in CockroachDB only conflict
// with Exclusive locks if the read's timestamp is equal to or greater than the
// lock's timestamp. If the read's timestamp is below the Exclusive lock's
// timestamp then the two are compatible.
enum Strength {
option (gogoproto.goproto_enum_prefix) = false;
// None represents the absence of a lock or the intention to acquire locks.
// It corresponds to the behavior of transactions performing key-value reads
// under optimistic concurrency control. No locks are acquired on the keys
// read by these requests when they evaluate. However, the reads do respect
// Exclusive locks already held by other transactions at timestamps equal to
// or less than their read timestamp.
//
// Optimistic concurrency control (OCC) can improve performance under some
// workloads because it avoids the need to perform any locking during reads.
// This can increase the amount of concurrency that the system can permit
// between ongoing transactions. However, OCC does mandate a read validation
// phase if/when transactions need to commit at a different timestamp than
// they performed all reads at. CockroachDB calls this a "read refresh",
// which is implemented by the txnSpanRefresher. If a read refresh fails due
// to new key-value writes that invalidate what was previously read,
// transactions are forced to restart. See the comment on txnSpanRefresher
// for more.
None = 0;
// Shared (S) locks are used by read-only operations and allow concurrent
// transactions to read under pessimistic concurrency control. Shared locks
// are compatible with each other but are not compatible with Upgrade or
// Exclusive locks. This means that multiple transactions can hold a Shared
// lock on the same key at the same time, but no other transaction can
// modify the key at the same time. A holder of a Shared lock on a key is
// only permitted to read the key's value while the lock is held.
//
// Share locks are currently unused, as all KV reads are currently performed
// optimistically (see None).
Shared = 1;
// Upgrade (U) locks are a hybrid of Shared and Exclusive locks which are
// used to prevent a common form of deadlock. When a transaction intends to
// modify existing KVs, it is often the case that it reads the KVs first and
// then attempts to modify them. Under pessimistic concurrency control, this
// would correspond to first acquiring a Shared lock on the keys and then
// converting the lock to an Exclusive lock when modifying the keys. If two
// transactions were to acquire the Shared lock initially and then attempt
// to update the keys concurrently, both transactions would get stuck
// waiting for the other to release its Shared lock and a deadlock would
// occur. To resolve the deadlock, one of the two transactions would need to
// be aborted.
//
// To avoid this potential deadlock problem, an Upgrade lock can be used in
// place of a Shared lock. Upgrade locks are not compatible with any other
// form of locking. As with Shared locks, the lock holder of a Shared lock
// on a key is only allowed to read from the key while the lock is held.
// This resolves the deadlock scenario presented above because only one of
// the transactions would have been able to acquire an Upgrade lock at a
// time while reading the initial state of the KVs. This means that the
// Shared-to-Exclusive lock upgrade would never need to wait on another
// transaction to release its locks.
//
// Under pure pessimistic concurrency control, an Upgrade lock is equivalent
// to an Exclusive lock. However, unlike with Exclusive locks, reads under
// optimistic concurrency control do not conflict with Upgrade locks. This
// is because a transaction can only hold an Upgrade lock on keys that it
// has not yet modified. This improves concurrency between read and write
// transactions compared to if the writing transaction had immediately
// acquired an Exclusive lock.
//
// The trade-off here is twofold. First, if the Upgrade lock holder does
// convert its lock on a key to an Exclusive lock after an optimistic read
// has observed the state of the key, the transaction that performed the
// optimistic read may be unable to perform a successful read refresh if it
// attempts to refresh to a timestamp at or past the timestamp of the lock
// conversion. Second, the optimistic reads permitted while the Upgrade lock
// is held will bump the timestamp cache. This may result in the Upgrade
// lock holder being forced to increase its write timestamp when converting
// to an Exclusive lock, which in turn may force it to restart if its read
// refresh fails.
Upgrade = 2;
// Exclusive (X) locks are used by read-write and read-only operations and
// provide a transaction with exclusive access to a key. When an Exclusive
// lock is held by a transaction on a given key, no other transaction can
// read from or write to that key. The lock holder is free to read from and
// write to the key as frequently as it would like.
Exclusive = 3;
}
// Durability represents the different durability properties of a lock acquired
// by a transaction. Durability levels provide varying degrees of survivability,
// often in exchange for the cost of lock acquisition.
enum Durability {
option (gogoproto.goproto_enum_prefix) = false;
// Replicated locks are held on at least a quorum of Replicas in a Range.
// They are slower to acquire and release than Unreplicated locks because
// updating them requires both cross-node coordination and interaction with
// durable storage. In exchange, Replicated locks provide a guarantee of
// survivability across lease transfers, leaseholder crashes, and other
// forms of failure events. They will remain available as long as their
// Range remains available and they will never be lost.
Replicated = 0;
// Unreplicated locks are held only on a single Replica in a Range, which is
// typically the leaseholder. Unreplicated locks are very fast to acquire
// and release because they are held in memory or on fast local storage and
// require no cross-node coordination to update. In exchange, Unreplicated
// locks provide no guarantee of survivability across lease transfers or
// leaseholder crashes. They should therefore be thought of as best-effort
// and should not be relied upon for correctness.
Unreplicated = 1;
}
// WaitPolicy specifies the behavior of a request when it encounters conflicting
// locks held by other active transactions. The default behavior is to block
// until the conflicting lock is released, but other policies can make sense in
// special situations.
enum WaitPolicy {
// Block indicates that if a request encounters a conflicting locks held by
// another active transaction, it should wait for the conflicting lock to be
// released before proceeding.
Block = 0;
// Error indicates that if a request encounters a conflicting locks held by
// another active transaction, it should raise an error instead of blocking.
// If the request encounters a conflicting lock that was abandoned by an
// inactive transaction, which is likely due to a transaction coordinator
// crash, the lock is removed and no error is raised.
Error = 1;
}
| pkg/kv/kvserver/concurrency/lock/locking.proto | 0 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.00032270682277157903,
0.0001771497045410797,
0.00016014798893593252,
0.00016967409464996308,
0.00003456184276728891
] |
{
"id": 1,
"code_window": [
"\t\tm := logconfig.ByteSize(fs.logFilesCombinedMaxSize)\n",
"\t\tconfig.CaptureFd2.MaxGroupSize = &m\n",
"\t}\n",
"\n",
"\t// Describe the stderr sink.\n",
"\tconfig.Sinks.Stderr.NoColor = logging.stderrSink.noColor\n",
"\tconfig.Sinks.Stderr.CommonSinkConfig = logging.stderrSinkInfoTemplate.describeAppliedConfig()\n",
"\n",
"\tdescribeConnections := func(l *loggerT, ch Channel,\n",
"\t\ttarget *sinkInfo, list *logconfig.ChannelList) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tconfig.Sinks.Stderr.NoColor = logging.stderrSink.noColor.Get()\n"
],
"file_path": "pkg/util/log/flags.go",
"type": "replace",
"edit_start_line_idx": 358
} | load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "exprgen",
srcs = [
"custom_funcs.go",
"expr_gen.go",
"parse_type.go",
"private.go",
],
importpath = "github.com/cockroachdb/cockroach/pkg/sql/opt/optgen/exprgen",
visibility = ["//visibility:public"],
deps = [
"//pkg/sql/opt",
"//pkg/sql/opt/cat",
"//pkg/sql/opt/memo",
"//pkg/sql/opt/norm",
"//pkg/sql/opt/optgen/lang",
"//pkg/sql/opt/ordering",
"//pkg/sql/opt/props",
"//pkg/sql/opt/props/physical",
"//pkg/sql/opt/xform",
"//pkg/sql/parser",
"//pkg/sql/sem/tree",
"//pkg/sql/stats",
"//pkg/sql/types",
"//vendor/github.com/cockroachdb/errors",
],
)
go_test(
name = "exprgen_test",
srcs = ["expr_gen_test.go"],
data = glob(["testdata/**"]),
deps = [
"//pkg/sql/opt/testutils/opttester",
"//pkg/sql/opt/testutils/testcat",
"//vendor/github.com/cockroachdb/datadriven",
],
)
| pkg/sql/opt/optgen/exprgen/BUILD.bazel | 0 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.00017605838365852833,
0.0001720262662274763,
0.00016917330503929406,
0.00017009314615279436,
0.0000030355449780472554
] |
{
"id": 1,
"code_window": [
"\t\tm := logconfig.ByteSize(fs.logFilesCombinedMaxSize)\n",
"\t\tconfig.CaptureFd2.MaxGroupSize = &m\n",
"\t}\n",
"\n",
"\t// Describe the stderr sink.\n",
"\tconfig.Sinks.Stderr.NoColor = logging.stderrSink.noColor\n",
"\tconfig.Sinks.Stderr.CommonSinkConfig = logging.stderrSinkInfoTemplate.describeAppliedConfig()\n",
"\n",
"\tdescribeConnections := func(l *loggerT, ch Channel,\n",
"\t\ttarget *sinkInfo, list *logconfig.ChannelList) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tconfig.Sinks.Stderr.NoColor = logging.stderrSink.noColor.Get()\n"
],
"file_path": "pkg/util/log/flags.go",
"type": "replace",
"edit_start_line_idx": 358
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tree_test
import (
"bytes"
"context"
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
)
func TestClassifyTablePattern(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testCases := []struct {
in, out string
expanded string
err string
}{
{`a`, `a`, `""."".a`, ``},
{`a.b`, `a.b`, `"".a.b`, ``},
{`a.b.c`, `a.b.c`, `a.b.c`, ``},
{`a.b.c.d`, ``, ``, `at or near "\.": syntax error`},
{`a.""`, ``, ``, `invalid table name: a\.""`},
{`a.b.""`, ``, ``, `invalid table name: a\.b\.""`},
{`a.b.c.""`, ``, ``, `at or near "\.": syntax error`},
{`a."".c`, ``, ``, `invalid table name: a\.""\.c`},
// CockroachDB extension: empty catalog name.
{`"".b.c`, `"".b.c`, `"".b.c`, ``},
// Check keywords: disallowed in first position, ok afterwards.
{`user.x.y`, ``, ``, `syntax error`},
{`"user".x.y`, `"user".x.y`, `"user".x.y`, ``},
{`x.user.y`, `x."user".y`, `x."user".y`, ``},
{`x.user`, `x."user"`, `"".x."user"`, ``},
{`*`, `*`, `""."".*`, ``},
{`a.*`, `a.*`, `"".a.*`, ``},
{`a.b.*`, `a.b.*`, `a.b.*`, ``},
{`a.b.c.*`, ``, ``, `at or near "\.": syntax error`},
{`a.b.*.c`, ``, ``, `at or near "\.": syntax error`},
{`a.*.b`, ``, ``, `at or near "\.": syntax error`},
{`*.b`, ``, ``, `at or near "\.": syntax error`},
{`"".*`, ``, ``, `invalid table name: "".\*`},
{`a."".*`, ``, ``, `invalid table name: a\.""\.\*`},
{`a.b."".*`, ``, ``, `invalid table name: a.b.""`},
// CockroachDB extension: empty catalog name.
{`"".b.*`, `"".b.*`, `"".b.*`, ``},
// Check keywords: disallowed in first position, ok afterwards.
{`user.x.*`, ``, ``, `syntax error`},
{`"user".x.*`, `"user".x.*`, `"user".x.*`, ``},
{`x.user.*`, `x."user".*`, `x."user".*`, ``},
{`foo@bar`, ``, ``, `at or near "@": syntax error`},
}
for _, tc := range testCases {
t.Run(tc.in, func(t *testing.T) {
tp, err := func() (tree.TablePattern, error) {
stmt, err := parser.ParseOne(fmt.Sprintf("GRANT SELECT ON %s TO foo", tc.in))
if err != nil {
return nil, err
}
tp, err := stmt.AST.(*tree.Grant).Targets.Tables[0].NormalizeTablePattern()
if err != nil {
return nil, err
}
return tp, nil
}()
if !testutils.IsError(err, tc.err) {
t.Fatalf("%s: expected %s, but found %v", tc.in, tc.err, err)
}
if tc.err != "" {
return
}
if out := tp.String(); tc.out != out {
t.Fatalf("%s: expected %s, but found %s", tc.in, tc.out, out)
}
switch tpv := tp.(type) {
case *tree.AllTablesSelector:
tpv.ExplicitSchema = true
tpv.ExplicitCatalog = true
case *tree.TableName:
tpv.ExplicitSchema = true
tpv.ExplicitCatalog = true
default:
t.Fatalf("%s: unknown pattern type: %T", tc.in, tp)
}
if out := tp.String(); tc.expanded != out {
t.Fatalf("%s: expected full %s, but found %s", tc.in, tc.expanded, out)
}
})
}
}
func TestClassifyColumnName(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testCases := []struct {
in, out string
err string
}{
{`a`, `a`, ``},
{`a.b`, `a.b`, ``},
{`a.b.c`, `a.b.c`, ``},
{`a.b.c.d`, `a.b.c.d`, ``},
{`a.b.c.d.e`, ``, `at or near "\.": syntax error`},
{`""`, ``, `invalid column name: ""`},
{`a.""`, ``, `invalid column name: a\.""`},
{`a.b.""`, ``, `invalid column name: a\.b\.""`},
{`a.b.c.""`, ``, `invalid column name: a\.b\.c\.""`},
{`a.b.c.d.""`, ``, `at or near "\.": syntax error`},
{`"".a`, ``, `invalid column name: ""\.a`},
{`"".a.b`, ``, `invalid column name: ""\.a\.b`},
// CockroachDB extension: empty catalog name.
{`"".a.b.c`, `"".a.b.c`, ``},
{`a.b."".d`, ``, `invalid column name: a\.b\.""\.d`},
{`a."".c.d`, ``, `invalid column name: a\.""\.c\.d`},
// Check keywords: disallowed in first position, ok afterwards.
{`user.x.y`, ``, `syntax error`},
{`"user".x.y`, `"user".x.y`, ``},
{`x.user.y`, `x.user.y`, ``},
{`x.user`, `x."user"`, ``},
{`*`, `*`, ``},
{`a.*`, `a.*`, ``},
{`a.b.*`, `a.b.*`, ``},
{`a.b.c.*`, `a.b.c.*`, ``},
{`a.b.c.d.*`, ``, `at or near "\.": syntax error`},
{`a.b.*.c`, ``, `at or near "\.": syntax error`},
{`a.*.b`, ``, `at or near "\.": syntax error`},
{`*.b`, ``, `at or near "\.": syntax error`},
{`"".*`, ``, `invalid column name: "".\*`},
{`a."".*`, ``, `invalid column name: a\.""\.\*`},
{`a.b."".*`, ``, `invalid column name: a\.b\.""\.\*`},
{`a.b.c."".*`, ``, `at or near "\.": syntax error`},
{`"".a.*`, ``, `invalid column name: ""\.a.*`},
// CockroachDB extension: empty catalog name.
{`"".a.b.*`, `"".a.b.*`, ``},
{`a."".c.*`, ``, `invalid column name: a\.""\.c\.*`},
// Check keywords: disallowed in first position, ok afterwards.
{`user.x.*`, ``, `syntax error`},
{`"user".x.*`, `"user".x.*`, ``},
{`x.user.*`, `x.user.*`, ``},
{`foo@bar`, ``, `at or near "@": syntax error`},
}
for _, tc := range testCases {
t.Run(tc.in, func(t *testing.T) {
v, err := func() (tree.VarName, error) {
stmt, err := parser.ParseOne(fmt.Sprintf("SELECT %s", tc.in))
if err != nil {
return nil, err
}
v := stmt.AST.(*tree.Select).Select.(*tree.SelectClause).Exprs[0].Expr.(tree.VarName)
return v.NormalizeVarName()
}()
if !testutils.IsError(err, tc.err) {
t.Fatalf("%s: expected %s, but found %v", tc.in, tc.err, err)
}
if tc.err != "" {
return
}
if out := v.String(); tc.out != out {
t.Fatalf("%s: expected %s, but found %s", tc.in, tc.out, out)
}
switch v.(type) {
case *tree.AllColumnsSelector:
case tree.UnqualifiedStar:
case *tree.ColumnItem:
default:
t.Fatalf("%s: unknown var type: %T", tc.in, v)
}
})
}
}
// fakeSource represents a fake column resolution environment for tests.
type fakeSource struct {
t *testing.T
knownTables []knownTable
}
type knownTable struct {
srcName tree.TableName
columns []tree.Name
}
type colsRes tree.NameList
func (c colsRes) ColumnSourceMeta() {}
// FindSourceMatchingName is part of the ColumnItemResolver interface.
func (f *fakeSource) FindSourceMatchingName(
_ context.Context, tn tree.TableName,
) (
res tree.NumResolutionResults,
prefix *tree.TableName,
srcMeta tree.ColumnSourceMeta,
err error,
) {
defer func() {
f.t.Logf("FindSourceMatchingName(%s) -> res %d prefix %s meta %v err %v",
&tn, res, prefix, srcMeta, err)
}()
found := false
var columns colsRes
for i := range f.knownTables {
t := &f.knownTables[i]
if t.srcName.ObjectName != tn.ObjectName {
continue
}
if tn.ExplicitSchema {
if !t.srcName.ExplicitSchema || t.srcName.SchemaName != tn.SchemaName {
continue
}
if tn.ExplicitCatalog {
if !t.srcName.ExplicitCatalog || t.srcName.CatalogName != tn.CatalogName {
continue
}
}
}
if found {
return tree.MoreThanOne, nil, nil, fmt.Errorf("ambiguous source name: %q", &tn)
}
found = true
prefix = &t.srcName
columns = colsRes(t.columns)
}
if !found {
return tree.NoResults, nil, nil, nil
}
return tree.ExactlyOne, prefix, columns, nil
}
// FindSourceProvidingColumn is part of the ColumnItemResolver interface.
func (f *fakeSource) FindSourceProvidingColumn(
_ context.Context, col tree.Name,
) (prefix *tree.TableName, srcMeta tree.ColumnSourceMeta, colHint int, err error) {
defer func() {
f.t.Logf("FindSourceProvidingColumn(%s) -> prefix %s meta %v hint %d err %v",
col, prefix, srcMeta, colHint, err)
}()
found := false
var columns colsRes
for i := range f.knownTables {
t := &f.knownTables[i]
for c, cn := range t.columns {
if cn != col {
continue
}
if found {
return nil, nil, -1, f.ambiguousColumnErr(col)
}
found = true
colHint = c
columns = colsRes(t.columns)
prefix = &t.srcName
break
}
}
if !found {
return nil, nil, -1, fmt.Errorf("column %q does not exist", &col)
}
return prefix, columns, colHint, nil
}
func (f *fakeSource) ambiguousColumnErr(col tree.Name) error {
var candidates bytes.Buffer
sep := ""
for i := range f.knownTables {
t := &f.knownTables[i]
for _, cn := range t.columns {
if cn == col {
fmt.Fprintf(&candidates, "%s%s.%s", sep, tree.ErrString(&t.srcName), cn)
sep = ", "
}
}
}
return fmt.Errorf("column reference %q is ambiguous (candidates: %s)", &col, candidates.String())
}
type colRes string
func (c colRes) ColumnResolutionResult() {}
// Resolve is part of the ColumnItemResolver interface.
func (f *fakeSource) Resolve(
_ context.Context,
prefix *tree.TableName,
srcMeta tree.ColumnSourceMeta,
colHint int,
col tree.Name,
) (tree.ColumnResolutionResult, error) {
f.t.Logf("in Resolve: prefix %s meta %v colHint %d col %s",
prefix, srcMeta, colHint, col)
columns, ok := srcMeta.(colsRes)
if !ok {
return nil, fmt.Errorf("programming error: srcMeta invalid")
}
if colHint >= 0 {
// Resolution succeeded. Let's do some sanity checking.
if columns[colHint] != col {
return nil, fmt.Errorf("programming error: invalid colHint %d", colHint)
}
return colRes(fmt.Sprintf("%s.%s", prefix, col)), nil
}
for _, cn := range columns {
if col == cn {
// Resolution succeeded.
return colRes(fmt.Sprintf("%s.%s", prefix, col)), nil
}
}
return nil, fmt.Errorf("unknown column name: %s", &col)
}
var _ sqlutils.ColumnItemResolverTester = &fakeSource{}
// GetColumnItemResolver is part of the sqlutils.ColumnItemResolverTester
// interface.
func (f *fakeSource) GetColumnItemResolver() tree.ColumnItemResolver {
return f
}
// AddTable is part of the sqlutils.ColumnItemResolverTester interface.
func (f *fakeSource) AddTable(tabName tree.TableName, colNames []tree.Name) {
f.knownTables = append(f.knownTables, knownTable{srcName: tabName, columns: colNames})
}
// ResolveQualifiedStarTestResults is part of the
// sqlutils.ColumnItemResolverTester interface.
func (f *fakeSource) ResolveQualifiedStarTestResults(
srcName *tree.TableName, srcMeta tree.ColumnSourceMeta,
) (string, string, error) {
cs, ok := srcMeta.(colsRes)
if !ok {
return "", "", fmt.Errorf("fake resolver did not return colsRes, found %T instead", srcMeta)
}
nl := tree.NameList(cs)
return srcName.String(), nl.String(), nil
}
// ResolveColumnItemTestResults is part of the
// sqlutils.ColumnItemResolverTester interface.
func (f *fakeSource) ResolveColumnItemTestResults(res tree.ColumnResolutionResult) (string, error) {
c, ok := res.(colRes)
if !ok {
return "", fmt.Errorf("fake resolver did not return colRes, found %T instead", res)
}
return string(c), nil
}
func TestResolveQualifiedStar(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
f := &fakeSource{t: t}
sqlutils.RunResolveQualifiedStarTest(t, f)
}
func TestResolveColumnItem(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
f := &fakeSource{t: t}
sqlutils.RunResolveColumnItemTest(t, f)
}
// fakeMetadata represents a fake table resolution environment for tests.
type fakeMetadata struct {
t *testing.T
knownVSchemas []knownSchema
knownCatalogs []knownCatalog
}
type knownSchema struct {
scName tree.Name
tables []tree.Name
}
func (*knownSchema) SchemaMeta() {}
type knownCatalog struct {
ctName tree.Name
schemas []knownSchema
}
// LookupSchema implements the TableNameResolver interface.
func (f *fakeMetadata) LookupSchema(
_ context.Context, dbName, scName string,
) (found bool, scMeta tree.SchemaMeta, err error) {
defer func() {
f.t.Logf("LookupSchema(%s, %s) -> found %v meta %v err %v",
dbName, scName, found, scMeta, err)
}()
for i := range f.knownVSchemas {
v := &f.knownVSchemas[i]
if scName == string(v.scName) {
// Virtual schema found, check that the db exists.
// The empty database is valid.
if dbName == "" {
return true, v, nil
}
for j := range f.knownCatalogs {
c := &f.knownCatalogs[j]
if dbName == string(c.ctName) {
return true, v, nil
}
}
// No valid database, schema is invalid.
return false, nil, nil
}
}
for i := range f.knownCatalogs {
c := &f.knownCatalogs[i]
if dbName == string(c.ctName) {
for j := range c.schemas {
s := &c.schemas[j]
if scName == string(s.scName) {
return true, s, nil
}
}
break
}
}
return false, nil, nil
}
type fakeResResult int
func (fakeResResult) NameResolutionResult() {}
// LookupObject implements the TableNameResolver interface.
func (f *fakeMetadata) LookupObject(
_ context.Context, lookupFlags tree.ObjectLookupFlags, dbName, scName, tbName string,
) (found bool, obMeta tree.NameResolutionResult, err error) {
defer func() {
f.t.Logf("LookupObject(%s, %s, %s) -> found %v meta %v err %v",
dbName, scName, tbName, found, obMeta, err)
}()
foundV := false
for i := range f.knownVSchemas {
v := &f.knownVSchemas[i]
if scName == string(v.scName) {
// Virtual schema found, check that the db exists.
// The empty database is valid.
if dbName != "" {
hasDb := false
for j := range f.knownCatalogs {
c := &f.knownCatalogs[j]
if dbName == string(c.ctName) {
hasDb = true
break
}
}
if !hasDb {
return false, nil, nil
}
}
// Db valid, check the table name.
for tbIdx, tb := range v.tables {
if tbName == string(tb) {
return true, fakeResResult(tbIdx), nil
}
}
foundV = true
break
}
}
if foundV {
// Virtual schema matched, but there was no table. Fail.
return false, nil, nil
}
for i := range f.knownCatalogs {
c := &f.knownCatalogs[i]
if dbName == string(c.ctName) {
for j := range c.schemas {
s := &c.schemas[j]
if scName == string(s.scName) {
for tbIdx, tb := range s.tables {
if tbName == string(tb) {
return true, fakeResResult(tbIdx), nil
}
}
break
}
}
break
}
}
return false, nil, nil
}
func newFakeMetadata() *fakeMetadata {
return &fakeMetadata{
knownVSchemas: []knownSchema{
{"pg_catalog", []tree.Name{"pg_tables"}},
},
knownCatalogs: []knownCatalog{
{"db1", []knownSchema{{"public", []tree.Name{"foo", "kv"}}}},
{"db2", []knownSchema{
{"public", []tree.Name{"foo"}},
{"extended", []tree.Name{"bar", "pg_tables"}},
}},
{"db3", []knownSchema{
{"public", []tree.Name{"foo", "bar"}},
{"pg_temp_123", []tree.Name{"foo", "baz"}},
}},
},
}
}
func TestResolveTablePatternOrName(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
type spath = sessiondata.SearchPath
var mpath = func(args ...string) spath {
return sessiondata.MakeSearchPath(args)
}
var tpath = func(tempSchemaName string, args ...string) spath {
return sessiondata.MakeSearchPath(args).WithTemporarySchemaName(tempSchemaName)
}
testCases := []struct {
// Test inputs.
in string // The table name or pattern.
curDb string // The current database.
searchPath spath // The current search path.
expected bool // If non-star, whether the object is expected to exist already.
// Expected outputs.
out string // The prefix after resolution.
expanded string // The prefix after resolution, with hidden fields revealed.
scName string // The schema name after resolution.
err string // Error, if expected.
}{
//
// Tests for table names.
//
// Names of length 1.
{`kv`, `db1`, mpath("public", "pg_catalog"), true, `kv`, `db1.public.kv`, `db1.public[1]`, ``},
{`foo`, `db1`, mpath("public", "pg_catalog"), true, `foo`, `db1.public.foo`, `db1.public[0]`, ``},
{`blix`, `db1`, mpath("public", "pg_catalog"), true, ``, ``, ``, `prefix or object not found`},
{`pg_tables`, `db1`, mpath("public", "pg_catalog"), true, `pg_tables`, `db1.pg_catalog.pg_tables`, `db1.pg_catalog[0]`, ``},
{`blix`, `db1`, mpath("public", "pg_catalog"), false, `blix`, `db1.public.blix`, `db1.public`, ``},
// A valid table is invisible if "public" is not in the search path.
{`kv`, `db1`, mpath(), true, ``, ``, ``, `prefix or object not found`},
// But pg_catalog is magic and "always there".
{`pg_tables`, `db1`, mpath(), true, `pg_tables`, `db1.pg_catalog.pg_tables`, `db1.pg_catalog[0]`, ``},
{`blix`, `db1`, mpath(), false, ``, ``, ``, `prefix or object not found`},
// If there's a table with the same name as a pg_catalog table, then search path order matters.
{`pg_tables`, `db2`, mpath("extended", "pg_catalog"), true, `pg_tables`, `db2.extended.pg_tables`, `db2.extended[1]`, ``},
{`pg_tables`, `db2`, mpath("pg_catalog", "extended"), true, `pg_tables`, `db2.pg_catalog.pg_tables`, `db2.pg_catalog[0]`, ``},
// When pg_catalog is not explicitly mentioned in the search path, it is searched first.
{`pg_tables`, `db2`, mpath("foo"), true, `pg_tables`, `db2.pg_catalog.pg_tables`, `db2.pg_catalog[0]`, ``},
// Names of length 2.
{`public.kv`, `db1`, mpath("public", "pg_catalog"), true, `public.kv`, `db1.public.kv`, `db1.public[1]`, ``},
{`public.foo`, `db1`, mpath("public", "pg_catalog"), true, `public.foo`, `db1.public.foo`, `db1.public[0]`, ``},
{`public.blix`, `db1`, mpath("public", "pg_catalog"), true, ``, ``, ``, `prefix or object not found`},
{`public.pg_tables`, `db1`, mpath("public", "pg_catalog"), true, ``, ``, ``, `prefix or object not found`},
{`extended.pg_tables`, `db2`, mpath("public", "pg_catalog"), true, `extended.pg_tables`, `db2.extended.pg_tables`, `db2.extended[1]`, ``},
{`pg_catalog.pg_tables`, `db1`, mpath("public", "pg_catalog"), true, `pg_catalog.pg_tables`, `db1.pg_catalog.pg_tables`, `db1.pg_catalog[0]`, ``},
{`public.blix`, `db1`, mpath("public", "pg_catalog"), false, `public.blix`, `db1.public.blix`, `db1.public`, ``},
// Compat with CockroachDB v1.x.
{`db1.kv`, `db1`, mpath("public", "pg_catalog"), true, `db1.public.kv`, `db1.public.kv`, `db1.public[1]`, ``},
{`blix.foo`, `db1`, mpath("public", "pg_catalog"), true, ``, ``, ``, `prefix or object not found`},
{`blix.pg_tables`, `db1`, mpath("public", "pg_catalog"), true, ``, ``, ``, `prefix or object not found`},
// Names of length 3.
{`db1.public.foo`, `db1`, mpath("public", "pg_catalog"), true, `db1.public.foo`, `db1.public.foo`, `db1.public[0]`, ``},
{`db1.public.kv`, `db1`, mpath(), true, `db1.public.kv`, `db1.public.kv`, `db1.public[1]`, ``},
{`db1.public.blix`, `db1`, mpath(), false, `db1.public.blix`, `db1.public.blix`, `db1.public`, ``},
{`blix.public.foo`, `db1`, mpath("public"), true, ``, ``, ``, `prefix or object not found`},
{`blix.public.foo`, `db1`, mpath("public"), false, ``, ``, ``, `prefix or object not found`},
// Beware: vtables only exist in valid databases and the empty database name.
{`db1.pg_catalog.pg_tables`, `db1`, mpath(), true, `db1.pg_catalog.pg_tables`, `db1.pg_catalog.pg_tables`, `db1.pg_catalog[0]`, ``},
{`"".pg_catalog.pg_tables`, `db1`, mpath(), true, `"".pg_catalog.pg_tables`, `"".pg_catalog.pg_tables`, `.pg_catalog[0]`, ``},
{`blix.pg_catalog.pg_tables`, `db1`, mpath("public"), true, ``, ``, ``, `prefix or object not found`},
{`blix.pg_catalog.pg_tables`, `db1`, mpath("public"), false, ``, ``, ``, `prefix or object not found`},
{`"".pg_catalog.blix`, `db1`, mpath(), false, `"".pg_catalog.blix`, `"".pg_catalog.blix`, `.pg_catalog`, ``},
//
// Tests for table names with no current database.
//
{`kv`, ``, mpath("public", "pg_catalog"), true, ``, ``, ``, `prefix or object not found`},
{`pg_tables`, ``, mpath("public", "pg_catalog"), true, `pg_tables`, `"".pg_catalog.pg_tables`, `.pg_catalog[0]`, ``},
{`pg_tables`, ``, mpath(), true, `pg_tables`, `"".pg_catalog.pg_tables`, `.pg_catalog[0]`, ``},
{`blix`, ``, mpath("public"), false, ``, ``, ``, `prefix or object not found`},
{`blix`, ``, mpath("public", "pg_catalog"), false, `blix`, `"".pg_catalog.blix`, `.pg_catalog`, ``},
// Names of length 2.
{`public.kv`, ``, mpath("public", "pg_catalog"), true, ``, ``, ``, `prefix or object not found`},
{`pg_catalog.pg_tables`, ``, mpath("public", "pg_catalog"), true, `pg_catalog.pg_tables`, `"".pg_catalog.pg_tables`, `.pg_catalog[0]`, ``},
// Compat with CockroachDB v1.x.
{`db1.kv`, ``, mpath("public", "pg_catalog"), true, `db1.public.kv`, `db1.public.kv`, `db1.public[1]`, ``},
{`db1.blix`, ``, mpath("public", "pg_catalog"), true, ``, ``, ``, `prefix or object not found`},
{`blix.pg_tables`, ``, mpath("public", "pg_catalog"), true, ``, ``, ``, `prefix or object not found`},
// Names of length 3.
{`db1.public.foo`, ``, mpath("public", "pg_catalog"), true, `db1.public.foo`, `db1.public.foo`, `db1.public[0]`, ``},
{`db1.public.kv`, ``, mpath(), true, `db1.public.kv`, `db1.public.kv`, `db1.public[1]`, ``},
{`db1.public.blix`, ``, mpath(), false, `db1.public.blix`, `db1.public.blix`, `db1.public`, ``},
{`blix.public.foo`, ``, mpath("public"), true, ``, ``, ``, `prefix or object not found`},
{`blix.public.foo`, ``, mpath("public"), false, ``, ``, ``, `prefix or object not found`},
// Beware: vtables only exist in valid databases and the empty database name.
{`db1.pg_catalog.pg_tables`, ``, mpath(), true, `db1.pg_catalog.pg_tables`, `db1.pg_catalog.pg_tables`, `db1.pg_catalog[0]`, ``},
{`"".pg_catalog.pg_tables`, ``, mpath(), true, `"".pg_catalog.pg_tables`, `"".pg_catalog.pg_tables`, `.pg_catalog[0]`, ``},
{`blix.pg_catalog.pg_tables`, ``, mpath("public"), true, ``, ``, ``, `prefix or object not found`},
{`blix.pg_catalog.pg_tables`, ``, mpath("public"), false, ``, ``, ``, `prefix or object not found`},
{`"".pg_catalog.blix`, ``, mpath(), false, `"".pg_catalog.blix`, `"".pg_catalog.blix`, `.pg_catalog`, ``},
//
// Tests for table patterns.
//
// Patterns of length 1.
{`*`, `db1`, mpath("public", "pg_catalog"), false, `*`, `db1.public.*`, `db1.public`, ``},
// Patterns of length 2.
{`public.*`, `db1`, mpath("public"), false, `public.*`, `db1.public.*`, `db1.public`, ``},
{`public.*`, `db1`, mpath("public", "pg_catalog"), false, `public.*`, `db1.public.*`, `db1.public`, ``},
{`public.*`, `db1`, mpath(), false, `public.*`, `db1.public.*`, `db1.public`, ``},
{`blix.*`, `db1`, mpath("public"), false, ``, ``, ``, `prefix or object not found`},
{`pg_catalog.*`, `db1`, mpath("public"), false, `pg_catalog.*`, `db1.pg_catalog.*`, `db1.pg_catalog`, ``},
{`pg_catalog.*`, `db1`, mpath("public", "pg_catalog"), false, `pg_catalog.*`, `db1.pg_catalog.*`, `db1.pg_catalog`, ``},
{`pg_catalog.*`, `db1`, mpath(), false, `pg_catalog.*`, `db1.pg_catalog.*`, `db1.pg_catalog`, ``},
//
// Tests for table patterns with no current database.
//
// Patterns of length 1.
{`*`, ``, mpath("public"), false, ``, ``, ``, `prefix or object not found`},
{`*`, ``, mpath("public", "pg_catalog"), false, `*`, `"".pg_catalog.*`, `.pg_catalog`, ``},
// Patterns of length 2.
{`public.*`, ``, mpath("public", "pg_catalog"), false, ``, ``, ``, `prefix or object not found`},
// vtables exist also in the empty database.
{`pg_catalog.*`, ``, mpath("public", "pg_catalog"), false, `pg_catalog.*`, `"".pg_catalog.*`, `.pg_catalog`, ``},
{`pg_catalog.*`, ``, mpath(), false, `pg_catalog.*`, `"".pg_catalog.*`, `.pg_catalog`, ``},
// Compat with CockroachDB v1.x.
{`db1.*`, ``, mpath("public", "pg_catalog"), false, `db1.public.*`, `db1.public.*`, `db1.public`, ``},
{`blix.*`, ``, mpath("public"), false, ``, ``, ``, `prefix or object not found`},
{`blix.*`, ``, mpath("public", "pg_catalog"), false, ``, ``, ``, `prefix or object not found`},
{`blix.*`, ``, mpath(), false, ``, ``, ``, `prefix or object not found`},
// Patterns of length 3.
{`db1.public.*`, ``, mpath("public", "pg_catalog"), false, `db1.public.*`, `db1.public.*`, `db1.public`, ``},
{`db1.public.*`, ``, mpath(), false, `db1.public.*`, `db1.public.*`, `db1.public`, ``},
{`blix.public.*`, ``, mpath("public"), false, ``, ``, ``, `prefix or object not found`},
{`blix.public.*`, ``, mpath("public", "pg_catalog"), false, ``, ``, ``, `prefix or object not found`},
// Beware: vtables only exist in valid databases and the empty database name.
{`db1.pg_catalog.*`, ``, mpath(), false, `db1.pg_catalog.*`, `db1.pg_catalog.*`, `db1.pg_catalog`, ``},
{`"".pg_catalog.*`, ``, mpath(), false, `"".pg_catalog.*`, `"".pg_catalog.*`, `.pg_catalog`, ``},
{`blix.pg_catalog.*`, ``, mpath("public"), false, ``, ``, ``, `prefix or object not found`},
//
// Tests for temporary table resolution
//
// Names of length 1
{`foo`, `db3`, tpath("pg_temp_123", "public"), true, `foo`, `db3.pg_temp_123.foo`, `db3.pg_temp_123[0]`, ``},
{`foo`, `db3`, tpath("pg_temp_123", "public", "pg_temp"), true, `foo`, `db3.public.foo`, `db3.public[0]`, ``},
{`baz`, `db3`, tpath("pg_temp_123", "public"), true, `baz`, `db3.pg_temp_123.baz`, `db3.pg_temp_123[1]`, ``},
{`bar`, `db3`, tpath("pg_temp_123", "public"), true, `bar`, `db3.public.bar`, `db3.public[1]`, ``},
{`bar`, `db3`, tpath("pg_temp_123", "public", "pg_temp"), true, `bar`, `db3.public.bar`, `db3.public[1]`, ``},
// Names of length 2
{`public.foo`, `db3`, tpath("pg_temp_123", "public"), true, `public.foo`, `db3.public.foo`, `db3.public[0]`, ``},
{`pg_temp.foo`, `db3`, tpath("pg_temp_123", "public"), true, `pg_temp_123.foo`, `db3.pg_temp_123.foo`, `db3.pg_temp_123[0]`, ``},
{`pg_temp_123.foo`, `db3`, tpath("pg_temp_123", "public"), true, `pg_temp_123.foo`, `db3.pg_temp_123.foo`, `db3.pg_temp_123[0]`, ``},
// Wrongly qualifying a TT/PT as a PT/TT results in an error.
{`pg_temp.bar`, `db3`, tpath("pg_temp_123", "public"), true, ``, ``, ``, `prefix or object not found`},
{`public.baz`, `db3`, tpath("pg_temp_123", "public"), true, ``, ``, ``, `prefix or object not found`},
// Cases where a session tries to access a temporary table of another session.
{`pg_temp_111.foo`, `db3`, tpath("pg_temp_123", "public"), true, ``, ``, ``, `cannot access temporary tables of other sessions`},
{`pg_temp_111.foo`, `db3`, tpath("pg_temp_123", "public"), false, ``, ``, ``, `cannot access temporary tables of other sessions`},
// Case where the temporary table being created has the same name as an
// existing persistent table.
{`pg_temp.bar`, `db3`, tpath("pg_temp_123", "public"), false, `pg_temp_123.bar`, `db3.pg_temp_123.bar`, `db3.pg_temp_123`, ``},
// Case where the persistent table being created has the same name as an
// existing temporary table.
{`public.baz`, `db3`, tpath("pg_temp_123", "public"), false, `public.baz`, `db3.public.baz`, `db3.public`, ``},
// Cases where the temporary schema has not been created yet
{`pg_temp.foo`, `db3`, mpath("public"), false, ``, ``, ``, `prefix or object not found`},
// Names of length 3
{`db3.public.foo`, `db3`, tpath("pg_temp_123", "public"), true, `db3.public.foo`, `db3.public.foo`, `db3.public[0]`, ``},
{`db3.pg_temp.foo`, `db3`, tpath("pg_temp_123", "public"), true, `db3.pg_temp_123.foo`, `db3.pg_temp_123.foo`, `db3.pg_temp_123[0]`, ``},
{`db3.pg_temp_123.foo`, `db3`, tpath("pg_temp_123", "public"), true, `db3.pg_temp_123.foo`, `db3.pg_temp_123.foo`, `db3.pg_temp_123[0]`, ``},
// Wrongly qualifying a TT/PT as a PT/TT results in an error.
{`db3.pg_temp.bar`, `db3`, tpath("pg_temp_123", "public"), true, ``, ``, ``, `prefix or object not found`},
{`db3.public.baz`, `db3`, tpath("pg_temp_123", "public"), true, ``, ``, ``, `prefix or object not found`},
// Cases where a session tries to access a temporary table of another session.
{`db3.pg_temp_111.foo`, `db3`, tpath("pg_temp_123", "public"), true, ``, ``, ``, `cannot access temporary tables of other sessions`},
{`db3.pg_temp_111.foo`, `db3`, tpath("pg_temp_123", "public"), false, ``, ``, ``, `cannot access temporary tables of other sessions`},
// Case where the temporary table being created has the same name as an
// existing persistent table.
{`db3.pg_temp.bar`, `db3`, tpath("pg_temp_123", "public"), false, `db3.pg_temp_123.bar`, `db3.pg_temp_123.bar`, `db3.pg_temp_123`, ``},
// Case where the persistent table being created has the same name as an
// existing temporary table.
{`db3.public.baz`, `db3`, tpath("pg_temp_123", "public"), false, `db3.public.baz`, `db3.public.baz`, `db3.public`, ``},
// Cases where the temporary schema has not been created yet
{`db3.pg_temp.foo`, `db3`, mpath("public"), false, ``, ``, ``, `prefix or object not found`},
}
fakeResolver := newFakeMetadata()
for _, tc := range testCases {
t.Run(fmt.Sprintf("%s/%s/%s/%v", tc.in, tc.curDb, tc.searchPath, tc.expected), func(t *testing.T) {
fakeResolver.t = t
tp, sc, err := func() (tree.TablePattern, string, error) {
stmt, err := parser.ParseOne(fmt.Sprintf("GRANT SELECT ON TABLE %s TO foo", tc.in))
if err != nil {
return nil, "", err
}
tp, err := stmt.AST.(*tree.Grant).Targets.Tables[0].NormalizeTablePattern()
if err != nil {
return nil, "", err
}
var found bool
var scPrefix, ctPrefix string
var scMeta interface{}
var obMeta interface{}
ctx := context.Background()
switch tpv := tp.(type) {
case *tree.AllTablesSelector:
found, scMeta, err = tpv.ObjectNamePrefix.Resolve(ctx, fakeResolver, tc.curDb, tc.searchPath)
scPrefix = tpv.Schema()
ctPrefix = tpv.Catalog()
case *tree.TableName:
var prefix tree.ObjectNamePrefix
if tc.expected {
flags := tree.ObjectLookupFlags{}
// TODO: As part of work for #34240, we should be operating on
// UnresolvedObjectNames here, rather than TableNames.
un := tpv.ToUnresolvedObjectName()
found, prefix, obMeta, err = tree.ResolveExisting(ctx, un, fakeResolver, flags, tc.curDb, tc.searchPath)
} else {
// TODO: As part of work for #34240, we should be operating on
// UnresolvedObjectNames here, rather than TableNames.
un := tpv.ToUnresolvedObjectName()
found, prefix, scMeta, err = tree.ResolveTarget(ctx, un, fakeResolver, tc.curDb, tc.searchPath)
}
tpv.ObjectNamePrefix = prefix
scPrefix = tpv.Schema()
ctPrefix = tpv.Catalog()
default:
t.Fatalf("%s: unknown pattern type: %T", t.Name(), tp)
}
if err != nil {
return nil, "", err
}
var scRes string
if scMeta != nil {
sc, ok := scMeta.(*knownSchema)
if !ok {
t.Fatalf("%s: scMeta not of correct type: %v", t.Name(), scMeta)
}
scRes = fmt.Sprintf("%s.%s", ctPrefix, sc.scName)
}
if obMeta != nil {
obIdx, ok := obMeta.(fakeResResult)
if !ok {
t.Fatalf("%s: obMeta not of correct type: %v", t.Name(), obMeta)
}
scRes = fmt.Sprintf("%s.%s[%d]", ctPrefix, scPrefix, obIdx)
}
if !found {
return nil, "", fmt.Errorf("prefix or object not found")
}
return tp, scRes, nil
}()
if !testutils.IsError(err, tc.err) {
t.Fatalf("%s: expected %s, but found %v", t.Name(), tc.err, err)
}
if tc.err != "" {
return
}
if out := tp.String(); tc.out != out {
t.Errorf("%s: expected %s, but found %s", t.Name(), tc.out, out)
}
switch tpv := tp.(type) {
case *tree.AllTablesSelector:
tpv.ObjectNamePrefix.ExplicitCatalog = true
tpv.ObjectNamePrefix.ExplicitSchema = true
case *tree.TableName:
tpv.ObjectNamePrefix.ExplicitCatalog = true
tpv.ObjectNamePrefix.ExplicitSchema = true
}
if out := tp.String(); tc.expanded != out {
t.Errorf("%s: expected full %s, but found %s", t.Name(), tc.expanded, out)
}
if tc.scName != sc {
t.Errorf("%s: expected schema %s, but found %s", t.Name(), tc.scName, sc)
}
})
}
}
| pkg/sql/sem/tree/name_resolution_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.00042230976396240294,
0.00017438440409023315,
0.00016120758664328605,
0.00017226125055458397,
0.00002683876118680928
] |
{
"id": 2,
"code_window": [
"func (formatCrdbV1TTY) formatterName() string { return \"crdb-v1-tty\" }\n",
"\n",
"func (formatCrdbV1TTY) formatEntry(entry logpb.Entry, stacks []byte) *buffer {\n",
"\tcp := ttycolor.StderrProfile\n",
"\tif logging.stderrSink.noColor {\n",
"\t\tcp = nil\n",
"\t}\n",
"\treturn formatLogEntryInternal(entry, false /*showCounter*/, cp, stacks)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif logging.stderrSink.noColor.Get() {\n"
],
"file_path": "pkg/util/log/format_crdb_v1.go",
"type": "replace",
"edit_start_line_idx": 64
} | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package log
import (
"context"
"fmt"
"math"
"strings"
"github.com/cockroachdb/cockroach/pkg/util/log/channel"
"github.com/cockroachdb/cockroach/pkg/util/log/logconfig"
"github.com/cockroachdb/cockroach/pkg/util/log/logflags"
"github.com/cockroachdb/cockroach/pkg/util/log/logpb"
"github.com/cockroachdb/cockroach/pkg/util/log/severity"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/errors"
)
type config struct {
// showLogs reflects the use of -show-logs on the command line and is
// used for testing.
showLogs bool
// syncWrites can be set asynchronously to force all file output to
// synchronize to disk. This is set via SetSync() and used e.g. in
// start.go upon encountering errors.
syncWrites syncutil.AtomicBool
}
var debugLog *loggerT
func init() {
logflags.InitFlags(
&logging.showLogs,
&logging.vmoduleConfig.mu.vmodule,
)
// package is imported but not further initialized.
defaultConfig := logconfig.DefaultConfig()
if err := defaultConfig.Validate(nil /* no default directory */); err != nil {
panic(err)
}
// Default stderrThreshold to log everything to the process'
// external stderr (OrigStderr).
defaultConfig.Sinks.Stderr.Filter = severity.INFO
// We only register it for the DEV channels. No other
// channels get a configuration, whereby every channel
// ends up sharing the DEV logger (debugLog).
defaultConfig.Sinks.Stderr.Channels.Channels = []logpb.Channel{channel.DEV}
// We also don't capture internal writes to fd2 by default:
// let the writes go to the external stderr.
defaultConfig.CaptureFd2.Enable = false
// Since we are letting writes go to the external stderr,
// we cannot keep redaction markers there.
*defaultConfig.Sinks.Stderr.Redactable = false
// Remove all sinks other than stderr.
defaultConfig.Sinks.FileGroups = nil
if _, err := ApplyConfig(defaultConfig); err != nil {
panic(err)
}
// Reset the "active' flag so that the main commands can reset the
// configuration.
logging.mu.active = false
}
// IsActive returns true iff the main logger already has some events
// logged, or some secondary logger was created with configuration
// taken from the main logger.
//
// This is used to assert that configuration is performed
// before logging has been used for the first time.
func IsActive() (active bool, firstUse string) {
logging.mu.Lock()
defer logging.mu.Unlock()
return logging.mu.active, logging.mu.firstUseStack
}
// ApplyConfig applies the given configuration.
//
// The returned cleanup fn can be invoked by the caller to close
// asynchronous processes.
// NB: This is only useful in tests: for a long-running server process the
// cleanup function should likely not be called, to ensure that the
// file used to capture internal fd2 writes remains open up until the
// process entirely terminates. This ensures that any Go runtime
// assertion failures on the way to termination can be properly
// captured.
func ApplyConfig(config logconfig.Config) (cleanupFn func(), err error) {
// Sanity check.
if active, firstUse := IsActive(); active {
panic(errors.Newf("logging already active; first use:\n%s", firstUse))
}
// Our own cancellable context to stop the secondary loggers below.
//
// Note: we don't want to take a cancellable context from the
// caller, because in the usual case we don't want to stop the
// logger when the remainder of the process stops. See the
// discussion on cancel at the top of the function.
secLoggersCtx, secLoggersCancel := context.WithCancel(context.Background())
// secLoggers collects the secondary loggers derived by the configuration.
var secLoggers []*loggerT
// sinkInfos collects the sinkInfos derived by the configuration.
var sinkInfos []*sinkInfo
// fd2CaptureCleanupFn is the cleanup function for the fd2 capture,
// which is populated if fd2 capture is enabled, below.
fd2CaptureCleanupFn := func() {}
// cleanupFn is the returned cleanup function, whose purpose
// is to tear down the work we are doing here.
cleanupFn = func() {
// Reset the logging channels to default.
si := logging.stderrSinkInfoTemplate
logging.setChannelLoggers(make(map[Channel]*loggerT), &si)
fd2CaptureCleanupFn()
secLoggersCancel()
for _, l := range secLoggers {
allLoggers.del(l)
}
for _, l := range sinkInfos {
allSinkInfos.del(l)
}
}
// If capture of internal fd2 writes is enabled, set it up here.
if config.CaptureFd2.Enable {
if logging.testingFd2CaptureLogger != nil {
cleanupFn()
return nil, errors.New("fd2 capture already set up. Maybe use TestLogScope?")
}
// We use a secondary logger, even though no logging *event* will ever
// be logged to it, for the convenience of getting a standard log
// file header at the beginning of the file (which will contain
// a timestamp, command-line arguments, etc.).
secLogger := &loggerT{}
allLoggers.put(secLogger)
secLoggers = append(secLoggers, secLogger)
// A pseudo file sink. Again, for convenience, so we don't need
// to implement separate file management.
bt, bf := true, false
mf := logconfig.ByteSize(math.MaxInt64)
f := logconfig.DefaultFileFormat
fakeConfig := logconfig.FileConfig{
CommonSinkConfig: logconfig.CommonSinkConfig{
Filter: severity.INFO,
Criticality: &bt,
Format: &f,
Redact: &bf,
// Be careful about stripping the redaction markers from log
// entries. The captured fd2 writes are inherently unsafe, so
// we don't want the header entry to give a mistaken
// impression to the entry parser.
Redactable: &bf,
},
Dir: config.CaptureFd2.Dir,
MaxGroupSize: config.CaptureFd2.MaxGroupSize,
MaxFileSize: &mf,
SyncWrites: &bt,
}
fileSinkInfo, fileSink, err := newFileSinkInfo("stderr", fakeConfig)
if err != nil {
cleanupFn()
return nil, err
}
sinkInfos = append(sinkInfos, fileSinkInfo)
allSinkInfos.put(fileSinkInfo)
if fileSink.logFilesCombinedMaxSize > 0 {
// Do a start round of GC, so clear up past accumulated files.
fileSink.gcOldFiles()
// Start the GC process. This ensures that old capture files get
// erased as new files get created.
go fileSink.gcDaemon(secLoggersCtx)
}
// Connect the sink to the logger.
secLogger.sinkInfos = []*sinkInfo{fileSinkInfo}
// Force a log entry. This does two things: it forces the creation
// of a file and it also introduces a timestamp marker.
entry := MakeEntry(secLoggersCtx, severity.INFO, channel.DEV, 0, false,
"stderr capture started")
secLogger.outputLogEntry(entry)
// Now tell this logger to capture internal stderr writes.
if err := fileSink.takeOverInternalStderr(secLogger); err != nil {
// Oof, it turns out we can't use this logger after all. Give up
// on everything we did.
cleanupFn()
return nil, err
}
// Now inform the other functions using stderrLog that we
// have a new logger for it.
logging.testingFd2CaptureLogger = secLogger
fd2CaptureCleanupFn = func() {
// Relinquish the stderr redirect.
if err := secLogger.getFileSink().relinquishInternalStderr(); err != nil {
// This should not fail. If it does, some caller messed up by
// switching over stderr redirection to a different logger
// without our involvement. That's invalid API usage.
panic(err)
}
// Restore the apparent stderr logger used by Shout() and tests.
logging.testingFd2CaptureLogger = nil
// Note: the remainder of the code in cleanupFn() will remove
// the logger and close it. No need to also do it here.
}
}
// Apply the stderr sink configuration.
logging.stderrSink.noColor = config.Sinks.Stderr.NoColor
if err := logging.stderrSinkInfoTemplate.applyConfig(config.Sinks.Stderr.CommonSinkConfig); err != nil {
cleanupFn()
return nil, err
}
// Create the per-channel loggers.
chans := make(map[Channel]*loggerT, len(logpb.Channel_name))
for chi := range logpb.Channel_name {
ch := Channel(chi)
chans[ch] = &loggerT{}
if ch == channel.DEV {
debugLog = chans[ch]
}
}
// Make a copy of the template so that any subsequent config
// changes don't race with logging operations.
stderrSinkInfo := logging.stderrSinkInfoTemplate
// Connect the stderr channels.
for _, ch := range config.Sinks.Stderr.Channels.Channels {
// Note: we connect stderr even if the severity is NONE
// so that tests can raise the severity after configuration.
l := chans[ch]
l.sinkInfos = append(l.sinkInfos, &stderrSinkInfo)
}
// Create the file sinks.
for prefix, fc := range config.Sinks.FileGroups {
if fc.Filter == severity.NONE || fc.Dir == nil {
continue
}
if prefix == "default" {
prefix = ""
}
fileSinkInfo, _, err := newFileSinkInfo(prefix, *fc)
if err != nil {
cleanupFn()
return nil, err
}
sinkInfos = append(sinkInfos, fileSinkInfo)
allSinkInfos.put(fileSinkInfo)
// Connect the channels for this sink.
for _, ch := range fc.Channels.Channels {
l := chans[ch]
l.sinkInfos = append(l.sinkInfos, fileSinkInfo)
}
}
logging.setChannelLoggers(chans, &stderrSinkInfo)
setActive()
return cleanupFn, nil
}
// newFileSinkInfo creates a new fileSink and its accompanying sinkInfo
// from the provided configuration.
func newFileSinkInfo(fileNamePrefix string, c logconfig.FileConfig) (*sinkInfo, *fileSink, error) {
info := &sinkInfo{}
if err := info.applyConfig(c.CommonSinkConfig); err != nil {
return nil, nil, err
}
fileSink := newFileSink(
*c.Dir,
fileNamePrefix,
*c.SyncWrites,
int64(*c.MaxFileSize),
int64(*c.MaxGroupSize),
info.getStartLines)
info.sink = fileSink
return info, fileSink, nil
}
// applyConfig applies a common sink configuration to a sinkInfo.
func (l *sinkInfo) applyConfig(c logconfig.CommonSinkConfig) error {
l.threshold = c.Filter
l.redact = *c.Redact
l.redactable = *c.Redactable
l.editor = getEditor(SelectEditMode(*c.Redact, *c.Redactable))
l.criticality = *c.Criticality
f, ok := formatters[*c.Format]
if !ok {
return errors.Newf("unknown format: %q", *c.Format)
}
l.formatter = f
return nil
}
// describeAppliedConfig reports a sinkInfo's configuration as a
// CommonSinkConfig. Note that the returned config object
// holds into the sinkInfo parameters by reference and thus should
// not be reused if the configuration can change asynchronously.
func (l *sinkInfo) describeAppliedConfig() (c logconfig.CommonSinkConfig) {
c.Filter = l.threshold
c.Redact = &l.redact
c.Redactable = &l.redactable
c.Criticality = &l.criticality
f := l.formatter.formatterName()
c.Format = &f
return c
}
// TestingResetActive clears the active bit. This is for use in tests
// that use stderr redirection alongside other tests that use
// logging.
func TestingResetActive() {
logging.mu.Lock()
defer logging.mu.Unlock()
logging.mu.active = false
}
// DescribeAppliedConfig describes the current setup as effected by
// ApplyConfig(). This is useful in tests and also to check
// when something may be wrong with the logging configuration.
func DescribeAppliedConfig() string {
var config logconfig.Config
// Describe the fd2 capture, if installed.
if logging.testingFd2CaptureLogger != nil {
config.CaptureFd2.Enable = true
fs := logging.testingFd2CaptureLogger.sinkInfos[0].sink.(*fileSink)
fs.mu.Lock()
dir := fs.mu.logDir
fs.mu.Unlock()
config.CaptureFd2.Dir = &dir
m := logconfig.ByteSize(fs.logFilesCombinedMaxSize)
config.CaptureFd2.MaxGroupSize = &m
}
// Describe the stderr sink.
config.Sinks.Stderr.NoColor = logging.stderrSink.noColor
config.Sinks.Stderr.CommonSinkConfig = logging.stderrSinkInfoTemplate.describeAppliedConfig()
describeConnections := func(l *loggerT, ch Channel,
target *sinkInfo, list *logconfig.ChannelList) {
for _, s := range l.sinkInfos {
if s == target {
list.Channels = append(list.Channels, ch)
}
}
list.Sort()
}
// Describe the connections to the stderr sink.
logging.rmu.RLock()
chans := logging.rmu.channels
stderrSinkInfo := logging.rmu.currentStderrSinkInfo
logging.rmu.RUnlock()
for ch, logger := range chans {
describeConnections(logger, ch,
stderrSinkInfo, &config.Sinks.Stderr.Channels)
}
// Describe the file sinks.
config.Sinks.FileGroups = make(map[string]*logconfig.FileConfig)
_ = allSinkInfos.iter(func(l *sinkInfo) error {
if cl := logging.testingFd2CaptureLogger; cl != nil && cl.sinkInfos[0] == l {
// Not a real sink. Omit.
return nil
}
fileSink, ok := l.sink.(*fileSink)
if !ok {
return nil
}
fc := &logconfig.FileConfig{}
fc.CommonSinkConfig = l.describeAppliedConfig()
mf := logconfig.ByteSize(fileSink.logFileMaxSize)
fc.MaxFileSize = &mf
mg := logconfig.ByteSize(fileSink.logFilesCombinedMaxSize)
fc.MaxGroupSize = &mg
fileSink.mu.Lock()
dir := fileSink.mu.logDir
fileSink.mu.Unlock()
fc.Dir = &dir
fc.SyncWrites = &fileSink.syncWrites
// Describe the connections to this file sink.
for ch, logger := range chans {
describeConnections(logger, ch, l, &fc.Channels)
}
prefix := strings.TrimPrefix(fileSink.prefix, program)
if prefix == "" {
prefix = "default"
} else {
prefix = strings.TrimPrefix(prefix, "-")
}
if prev, ok := config.Sinks.FileGroups[prefix]; ok {
fmt.Fprintf(OrigStderr,
"warning: multiple file loggers with prefix %q, previous: %+v\n",
prefix, prev)
}
config.Sinks.FileGroups[prefix] = fc
return nil
})
// Note: we cannot return 'config' directly, because this captures
// certain variables from the loggers by reference and thus could be
// invalidated by concurrent uses of ApplyConfig().
return config.String()
}
| pkg/util/log/flags.go | 1 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.6083219051361084,
0.02115599997341633,
0.00016318593407049775,
0.0001961647067219019,
0.0988863930106163
] |
{
"id": 2,
"code_window": [
"func (formatCrdbV1TTY) formatterName() string { return \"crdb-v1-tty\" }\n",
"\n",
"func (formatCrdbV1TTY) formatEntry(entry logpb.Entry, stacks []byte) *buffer {\n",
"\tcp := ttycolor.StderrProfile\n",
"\tif logging.stderrSink.noColor {\n",
"\t\tcp = nil\n",
"\t}\n",
"\treturn formatLogEntryInternal(entry, false /*showCounter*/, cp, stacks)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif logging.stderrSink.noColor.Get() {\n"
],
"file_path": "pkg/util/log/format_crdb_v1.go",
"type": "replace",
"edit_start_line_idx": 64
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in licenses/BSD-golang.txt.
// Package csv reads and writes comma-separated values (CSV) files.
// There are many kinds of CSV files; this package supports the format
// described in RFC 4180.
//
// A csv file contains zero or more records of one or more fields per record.
// Each record is separated by the newline character. The final record may
// optionally be followed by a newline character.
//
// field1,field2,field3
//
// White space is considered part of a field.
//
// Carriage returns before newline characters are silently removed.
//
// Blank lines are ignored. A line with only whitespace characters (excluding
// the ending newline character) is not considered a blank line.
//
// Fields which start and stop with the quote character " are called
// quoted-fields. The beginning and ending quote are not part of the
// field.
//
// The source:
//
// normal string,"quoted-field"
//
// results in the fields
//
// {`normal string`, `quoted-field`}
//
// Within a quoted-field a quote character followed by a second quote
// character is considered a single quote.
//
// "the ""word"" is true","a ""quoted-field"""
//
// results in
//
// {`the "word" is true`, `a "quoted-field"`}
//
// Newlines and commas may be included in a quoted-field
//
// "Multi-line
// field","comma is ,"
//
// results in
//
// {`Multi-line
// field`, `comma is ,`}
package csv
import (
"bufio"
"bytes"
"fmt"
"io"
"unicode"
"unicode/utf8"
"github.com/cockroachdb/errors"
)
// A ParseError is returned for parsing errors.
// Line numbers are 1-indexed and columns are 0-indexed.
type ParseError struct {
StartLine int // Line where the record starts
Line int // Line where the error occurred
Column int // Column (rune index) where the error occurred
Err error // The actual error
}
var _ error = (*ParseError)(nil)
var _ fmt.Formatter = (*ParseError)(nil)
var _ errors.Formatter = (*ParseError)(nil)
// Error implements error.
func (e *ParseError) Error() string { return fmt.Sprintf("%v", e) }
// Cause implements causer.
func (e *ParseError) Cause() error { return e.Err }
// Format implements fmt.Formatter.
func (e *ParseError) Format(s fmt.State, verb rune) { errors.FormatError(e, s, verb) }
// FormatError implements errors.Formatter.
func (e *ParseError) FormatError(p errors.Printer) error {
if errors.Is(e.Err, ErrFieldCount) {
p.Printf("record on line %d", e.Line)
} else if e.StartLine != e.Line {
p.Printf("record on line %d; parse error on line %d, column %d", e.StartLine, e.Line, e.Column)
} else {
p.Printf("parse error on line %d, column %d", e.Line, e.Column)
}
return e.Err
}
// These are the errors that can be returned in ParseError.Err.
var (
ErrBareQuote = errors.New("bare \" in non-quoted-field")
ErrQuote = errors.New("extraneous or missing \" in quoted-field")
ErrFieldCount = errors.New("wrong number of fields")
)
var errInvalidDelim = errors.New("csv: invalid field or comment delimiter")
func validDelim(r rune) bool {
return r != 0 && r != '\r' && r != '\n' && utf8.ValidRune(r) && r != utf8.RuneError
}
// A Reader reads records from a CSV-encoded file.
//
// As returned by NewReader, a Reader expects input conforming to RFC 4180.
// The exported fields can be changed to customize the details before the
// first call to Read or ReadAll.
type Reader struct {
// Comma is the field delimiter.
// It is set to comma (',') by NewReader.
Comma rune
// Comment, if not 0, is the comment character. Lines beginning with the
// Comment character without preceding whitespace are ignored.
// With leading whitespace the Comment character becomes part of the
// field, even if TrimLeadingSpace is true.
Comment rune
// FieldsPerRecord is the number of expected fields per record.
// If FieldsPerRecord is positive, Read requires each record to
// have the given number of fields. If FieldsPerRecord is 0, Read sets it to
// the number of fields in the first record, so that future records must
// have the same field count. If FieldsPerRecord is negative, no check is
// made and records may have a variable number of fields.
FieldsPerRecord int
// If LazyQuotes is true, a quote may appear in an unquoted field and a
// non-doubled quote may appear in a quoted field.
LazyQuotes bool
// If TrimLeadingSpace is true, leading white space in a field is ignored.
// This is done even if the field delimiter, Comma, is white space.
TrimLeadingSpace bool
// ReuseRecord controls whether calls to Read may return a slice sharing
// the backing array of the previous call's returned slice for performance.
// By default, each call to Read returns newly allocated memory owned by the caller.
ReuseRecord bool
r *bufio.Reader
// numLine is the current line being read in the CSV file.
numLine int
// rawBuffer is a line buffer only used by the readLine method.
rawBuffer []byte
// recordBuffer holds the unescaped fields, one after another.
// The fields can be accessed by using the indexes in fieldIndexes.
// E.g., For the row `a,"b","c""d",e`, recordBuffer will contain `abc"de`
// and fieldIndexes will contain the indexes [1, 2, 5, 6].
recordBuffer []byte
// fieldIndexes is an index of fields inside recordBuffer.
// The i'th field ends at offset fieldIndexes[i] in recordBuffer.
fieldIndexes []int
// lastRecord is a record cache and only used when ReuseRecord == true.
lastRecord []string
}
// NewReader returns a new Reader that reads from r.
func NewReader(r io.Reader) *Reader {
return &Reader{
Comma: ',',
r: bufio.NewReader(r),
}
}
// Read reads one record (a slice of fields) from r.
// If the record has an unexpected number of fields,
// Read returns the record along with the error ErrFieldCount.
// Except for that case, Read always returns either a non-nil
// record or a non-nil error, but not both.
// If there is no data left to be read, Read returns nil, io.EOF.
// If ReuseRecord is true, the returned slice may be shared
// between multiple calls to Read.
func (r *Reader) Read() (record []string, err error) {
if r.ReuseRecord {
record, err = r.readRecord(r.lastRecord)
r.lastRecord = record
} else {
record, err = r.readRecord(nil)
}
return record, err
}
// ReadAll reads all the remaining records from r.
// Each record is a slice of fields.
// A successful call returns err == nil, not err == io.EOF. Because ReadAll is
// defined to read until EOF, it does not treat end of file as an error to be
// reported.
func (r *Reader) ReadAll() (records [][]string, err error) {
for {
record, err := r.readRecord(nil)
if err == io.EOF {
return records, nil
}
if err != nil {
return nil, err
}
records = append(records, record)
}
}
// readLine reads the next line (with the trailing endline).
// If EOF is hit without a trailing endline, it will be omitted.
// If some bytes were read, then the error is never io.EOF.
// The result is only valid until the next call to readLine.
func (r *Reader) readLine() ([]byte, error) {
line, err := r.r.ReadSlice('\n')
if errors.Is(err, bufio.ErrBufferFull) {
r.rawBuffer = append(r.rawBuffer[:0], line...)
for errors.Is(err, bufio.ErrBufferFull) {
line, err = r.r.ReadSlice('\n')
r.rawBuffer = append(r.rawBuffer, line...)
}
line = r.rawBuffer
}
if len(line) > 0 && err == io.EOF {
err = nil
// For backwards compatibility, drop trailing \r before EOF.
if line[len(line)-1] == '\r' {
line = line[:len(line)-1]
}
}
r.numLine++
return line, err
}
// lengthCRLF reports the number of bytes for a trailing "\r\n".
func lengthCRLF(b []byte) int {
if j := len(b) - 1; j >= 0 && b[j] == '\n' {
if j := len(b) - 2; j >= 0 && b[j] == '\r' {
return 2
}
return 1
}
return 0
}
// nextRune returns the next rune in b or utf8.RuneError.
func nextRune(b []byte) rune {
r, _ := utf8.DecodeRune(b)
return r
}
func (r *Reader) readRecord(dst []string) ([]string, error) {
if r.Comma == r.Comment || !validDelim(r.Comma) || (r.Comment != 0 && !validDelim(r.Comment)) {
return nil, errInvalidDelim
}
// Read line (automatically skipping past empty lines and any comments).
var line, fullLine []byte
var errRead error
for errRead == nil {
line, errRead = r.readLine()
if r.Comment != 0 && nextRune(line) == r.Comment {
line = nil
continue // Skip comment lines
}
if errRead == nil && len(line) == lengthCRLF(line) {
line = nil
continue // Skip empty lines
}
fullLine = line
break
}
if errRead == io.EOF {
return nil, errRead
}
// Parse each field in the record.
var err error
const quoteLen = len(`"`)
commaLen := utf8.RuneLen(r.Comma)
recLine := r.numLine // Starting line for record
r.recordBuffer = r.recordBuffer[:0]
r.fieldIndexes = r.fieldIndexes[:0]
parseField:
for {
if r.TrimLeadingSpace {
line = bytes.TrimLeftFunc(line, unicode.IsSpace)
}
if len(line) == 0 || line[0] != '"' {
// Non-quoted string field
i := bytes.IndexRune(line, r.Comma)
field := line
if i >= 0 {
field = field[:i]
} else {
field = field[:len(field)-lengthCRLF(field)]
}
// Check to make sure a quote does not appear in field.
if !r.LazyQuotes {
if j := bytes.IndexByte(field, '"'); j >= 0 {
col := utf8.RuneCount(fullLine[:len(fullLine)-len(line[j:])])
err = &ParseError{StartLine: recLine, Line: r.numLine, Column: col, Err: ErrBareQuote}
break parseField
}
}
r.recordBuffer = append(r.recordBuffer, field...)
r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
if i >= 0 {
line = line[i+commaLen:]
continue parseField
}
break parseField
} else {
// Quoted string field
line = line[quoteLen:]
for {
i := bytes.IndexByte(line, '"')
if i >= 0 {
// Hit next quote.
r.recordBuffer = append(r.recordBuffer, line[:i]...)
line = line[i+quoteLen:]
switch rn := nextRune(line); {
case rn == '"':
// `""` sequence (append quote).
r.recordBuffer = append(r.recordBuffer, '"')
line = line[quoteLen:]
case rn == r.Comma:
// `",` sequence (end of field).
line = line[commaLen:]
r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
continue parseField
case lengthCRLF(line) == len(line):
// `"\n` sequence (end of line).
r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
break parseField
case r.LazyQuotes:
// `"` sequence (bare quote).
r.recordBuffer = append(r.recordBuffer, '"')
default:
// `"*` sequence (invalid non-escaped quote).
col := utf8.RuneCount(fullLine[:len(fullLine)-len(line)-quoteLen])
err = &ParseError{StartLine: recLine, Line: r.numLine, Column: col, Err: ErrQuote}
break parseField
}
} else if len(line) > 0 {
// Hit end of line (copy all data so far).
r.recordBuffer = append(r.recordBuffer, line...)
if errRead != nil {
break parseField
}
line, errRead = r.readLine()
if errRead == io.EOF {
errRead = nil
}
fullLine = line
} else {
// Abrupt end of file (EOF or error).
if !r.LazyQuotes && errRead == nil {
col := utf8.RuneCount(fullLine)
err = &ParseError{StartLine: recLine, Line: r.numLine, Column: col, Err: ErrQuote}
break parseField
}
r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
break parseField
}
}
}
}
if err == nil {
err = errRead
}
// Create a single string and create slices out of it.
// This pins the memory of the fields together, but allocates once.
str := string(r.recordBuffer) // Convert to string once to batch allocations
dst = dst[:0]
if cap(dst) < len(r.fieldIndexes) {
dst = make([]string, len(r.fieldIndexes))
}
dst = dst[:len(r.fieldIndexes)]
var preIdx int
for i, idx := range r.fieldIndexes {
dst[i] = str[preIdx:idx]
preIdx = idx
}
// Check or update the expected fields per record.
if r.FieldsPerRecord > 0 {
if len(dst) != r.FieldsPerRecord && err == nil {
err = &ParseError{StartLine: recLine, Line: recLine, Err: ErrFieldCount}
}
} else if r.FieldsPerRecord == 0 {
r.FieldsPerRecord = len(dst)
}
return dst, err
}
| pkg/util/encoding/csv/reader.go | 0 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.0006935102283023298,
0.0001884291268652305,
0.00016323730233125389,
0.00016942276852205396,
0.00008247925870819017
] |
{
"id": 2,
"code_window": [
"func (formatCrdbV1TTY) formatterName() string { return \"crdb-v1-tty\" }\n",
"\n",
"func (formatCrdbV1TTY) formatEntry(entry logpb.Entry, stacks []byte) *buffer {\n",
"\tcp := ttycolor.StderrProfile\n",
"\tif logging.stderrSink.noColor {\n",
"\t\tcp = nil\n",
"\t}\n",
"\treturn formatLogEntryInternal(entry, false /*showCounter*/, cp, stacks)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif logging.stderrSink.noColor.Get() {\n"
],
"file_path": "pkg/util/log/format_crdb_v1.go",
"type": "replace",
"edit_start_line_idx": 64
} | // Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package ts
import (
"context"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/mon"
)
// ContainsTimeSeries returns true if the given key range overlaps the
// range of possible time series keys.
func (tsdb *DB) ContainsTimeSeries(start, end roachpb.RKey) bool {
return !lastTSRKey.Less(start) && !end.Less(firstTSRKey)
}
// MaintainTimeSeries provides a function that can be called from an external
// process periodically in order to perform "maintenance" work on time series
// data. Currently, this includes computing rollups and pruning data which has
// exceeded its retention threshold, as well as computing low-resolution rollups
// of data. This system was designed specifically to be used by scanner queue
// from the storage package.
//
// The snapshot should be supplied by a local store, and is used only to
// discover the names of time series which are store in that snapshot. The KV
// client is then used to interact with data from the time series that are
// discovered; this may result in data being deleted, but may also write new
// data in the form of rollups.
//
// The snapshot is used for key discovery (as opposed to the KV client) because
// the task of pruning time series is distributed across the cluster to the
// individual ranges which contain that time series data. Because replicas of
// those ranges are guaranteed to have time series data locally, we can use the
// snapshot to quickly obtain a set of keys to be pruned with no network calls.
func (tsdb *DB) MaintainTimeSeries(
ctx context.Context,
snapshot storage.Reader,
start, end roachpb.RKey,
db *kv.DB,
mem *mon.BytesMonitor,
budgetBytes int64,
now hlc.Timestamp,
) error {
series, err := tsdb.findTimeSeries(snapshot, start, end, now)
if err != nil {
return err
}
if tsdb.WriteRollups() {
qmc := MakeQueryMemoryContext(mem, mem, QueryMemoryOptions{
BudgetBytes: budgetBytes,
})
if err := tsdb.rollupTimeSeries(ctx, series, now, qmc); err != nil {
return err
}
}
return tsdb.pruneTimeSeries(ctx, db, series, now)
}
// Assert that DB implements the necessary interface from the storage package.
var _ kvserver.TimeSeriesDataStore = (*DB)(nil)
| pkg/ts/maintenance.go | 0 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.00018122929031960666,
0.00017291904077865183,
0.00016547390259802341,
0.000171384061104618,
0.00000556193663214799
] |
{
"id": 2,
"code_window": [
"func (formatCrdbV1TTY) formatterName() string { return \"crdb-v1-tty\" }\n",
"\n",
"func (formatCrdbV1TTY) formatEntry(entry logpb.Entry, stacks []byte) *buffer {\n",
"\tcp := ttycolor.StderrProfile\n",
"\tif logging.stderrSink.noColor {\n",
"\t\tcp = nil\n",
"\t}\n",
"\treturn formatLogEntryInternal(entry, false /*showCounter*/, cp, stacks)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif logging.stderrSink.noColor.Get() {\n"
],
"file_path": "pkg/util/log/format_crdb_v1.go",
"type": "replace",
"edit_start_line_idx": 64
} | show_stmt ::=
| docs/generated/sql/bnf/show_constraints.bnf | 0 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.00016757220146246254,
0.00016757220146246254,
0.00016757220146246254,
0.00016757220146246254,
0
] |
{
"id": 3,
"code_window": [
"func (formatCrdbV1TTYWithCounter) formatterName() string { return \"crdb-v1-tty-count\" }\n",
"\n",
"func (formatCrdbV1TTYWithCounter) formatEntry(entry logpb.Entry, stacks []byte) *buffer {\n",
"\tcp := ttycolor.StderrProfile\n",
"\tif logging.stderrSink.noColor {\n",
"\t\tcp = nil\n",
"\t}\n",
"\treturn formatLogEntryInternal(entry, true /*showCounter*/, cp, stacks)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif logging.stderrSink.noColor.Get() {\n"
],
"file_path": "pkg/util/log/format_crdb_v1.go",
"type": "replace",
"edit_start_line_idx": 79
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package log
import "github.com/cockroachdb/cockroach/pkg/cli/exit"
// Type of a stderr copy sink.
type stderrSink struct {
// the --no-color flag. When set it disables escapes code on the
// stderr copy.
noColor bool
}
// activeAtSeverity implements the logSink interface.
func (l *stderrSink) active() bool { return true }
// attachHints implements the logSink interface.
func (l *stderrSink) attachHints(stacks []byte) []byte {
return stacks
}
// output implements the logSink interface.
func (l *stderrSink) output(_ bool, b []byte) error {
_, err := OrigStderr.Write(b)
return err
}
// exitCode implements the logSink interface.
func (l *stderrSink) exitCode() exit.Code {
return exit.LoggingStderrUnavailable()
}
// emergencyOutput implements the logSink interface.
func (l *stderrSink) emergencyOutput(b []byte) {
_, _ = OrigStderr.Write(b)
}
| pkg/util/log/stderr_sink.go | 1 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.00624628271907568,
0.0032172941137105227,
0.00017793267033994198,
0.0034792853984981775,
0.0021616241429001093
] |
{
"id": 3,
"code_window": [
"func (formatCrdbV1TTYWithCounter) formatterName() string { return \"crdb-v1-tty-count\" }\n",
"\n",
"func (formatCrdbV1TTYWithCounter) formatEntry(entry logpb.Entry, stacks []byte) *buffer {\n",
"\tcp := ttycolor.StderrProfile\n",
"\tif logging.stderrSink.noColor {\n",
"\t\tcp = nil\n",
"\t}\n",
"\treturn formatLogEntryInternal(entry, true /*showCounter*/, cp, stacks)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif logging.stderrSink.noColor.Get() {\n"
],
"file_path": "pkg/util/log/format_crdb_v1.go",
"type": "replace",
"edit_start_line_idx": 79
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package opttester
import (
"fmt"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/memo"
"github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical"
"github.com/cockroachdb/cockroach/pkg/sql/opt/xform"
)
// forcingOptimizer is a wrapper around an Optimizer which adds low-level
// control, like restricting rule application or the expressions that can be
// part of the final expression.
type forcingOptimizer struct {
o xform.Optimizer
groups memoGroups
coster forcingCoster
// remaining is the number of "unused" steps remaining.
remaining int
// lastMatched records the name of the rule that was most recently matched
// by the optimizer.
lastMatched opt.RuleName
// lastApplied records the name of the rule that was most recently applied by
// the optimizer. This is not necessarily the same with lastMatched because
// normalization rules can run in-between the match and the application of an
// exploration rule.
lastApplied opt.RuleName
// lastAppliedSource is the expression matched by an exploration rule, or is
// nil for a normalization rule.
lastAppliedSource opt.Expr
// lastAppliedTarget is the new expression constructed by a normalization or
// exploration rule. For an exploration rule, it can be nil if no expressions
// were constructed, or can have additional expressions beyond the first that
// are accessible via NextExpr links.
lastAppliedTarget opt.Expr
}
// newForcingOptimizer creates a forcing optimizer that stops applying any rules
// after <steps> rules are matched. If ignoreNormRules is true, normalization
// rules don't count against this limit.
func newForcingOptimizer(
tester *OptTester, steps int, ignoreNormRules bool,
) (*forcingOptimizer, error) {
fo := &forcingOptimizer{
remaining: steps,
lastMatched: opt.InvalidRuleName,
}
fo.o.Init(&tester.evalCtx, tester.catalog)
fo.o.Factory().FoldingControl().AllowStableFolds()
fo.coster.Init(&fo.o, &fo.groups)
fo.o.SetCoster(&fo.coster)
fo.o.NotifyOnMatchedRule(func(ruleName opt.RuleName) bool {
if ignoreNormRules && ruleName.IsNormalize() {
return true
}
if fo.remaining == 0 {
return false
}
if tester.Flags.DisableRules.Contains(int(ruleName)) {
return false
}
fo.remaining--
fo.lastMatched = ruleName
return true
})
// Hook the AppliedRule notification in order to track the portion of the
// expression tree affected by each transformation rule.
fo.o.NotifyOnAppliedRule(
func(ruleName opt.RuleName, source, target opt.Expr) {
if ignoreNormRules && ruleName.IsNormalize() {
return
}
fo.lastApplied = ruleName
fo.lastAppliedSource = source
fo.lastAppliedTarget = target
},
)
fo.o.Memo().NotifyOnNewGroup(func(expr opt.Expr) {
fo.groups.AddGroup(expr)
})
if err := tester.buildExpr(fo.o.Factory()); err != nil {
return nil, err
}
return fo, nil
}
func (fo *forcingOptimizer) Optimize() opt.Expr {
expr, err := fo.o.Optimize()
if err != nil {
// Print the full error (it might contain a stack trace).
fmt.Printf("%+v\n", err)
panic(err)
}
return expr
}
// LookupPath returns the path of the given node.
func (fo *forcingOptimizer) LookupPath(target opt.Expr) []memoLoc {
return fo.groups.FindPath(fo.o.Memo().RootExpr(), target)
}
// RestrictToExpr sets up the optimizer to restrict the result to only those
// expression trees which include the given expression path.
func (fo *forcingOptimizer) RestrictToExpr(path []memoLoc) {
for _, l := range path {
fo.coster.RestrictGroupToMember(l)
}
}
// forcingCoster implements the xform.Coster interface so that it can suppress
// expressions in the memo that can't be part of the output tree.
type forcingCoster struct {
o *xform.Optimizer
groups *memoGroups
inner xform.Coster
restricted map[groupID]memberOrd
}
func (fc *forcingCoster) Init(o *xform.Optimizer, groups *memoGroups) {
fc.o = o
fc.groups = groups
fc.inner = o.Coster()
}
// RestrictGroupToMember forces the expression in the given location to be the
// best expression for its group.
func (fc *forcingCoster) RestrictGroupToMember(loc memoLoc) {
if fc.restricted == nil {
fc.restricted = make(map[groupID]memberOrd)
}
fc.restricted[loc.group] = loc.member
}
// ComputeCost is part of the xform.Coster interface.
func (fc *forcingCoster) ComputeCost(e memo.RelExpr, required *physical.Required) memo.Cost {
if fc.restricted != nil {
loc := fc.groups.MemoLoc(e)
if mIdx, ok := fc.restricted[loc.group]; ok && loc.member != mIdx {
return memo.MaxCost
}
}
return fc.inner.ComputeCost(e, required)
}
| pkg/sql/opt/testutils/opttester/forcing_opt.go | 0 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.0022733998484909534,
0.0003804763255175203,
0.00016476257587783039,
0.0001702555746305734,
0.000527700234670192
] |
{
"id": 3,
"code_window": [
"func (formatCrdbV1TTYWithCounter) formatterName() string { return \"crdb-v1-tty-count\" }\n",
"\n",
"func (formatCrdbV1TTYWithCounter) formatEntry(entry logpb.Entry, stacks []byte) *buffer {\n",
"\tcp := ttycolor.StderrProfile\n",
"\tif logging.stderrSink.noColor {\n",
"\t\tcp = nil\n",
"\t}\n",
"\treturn formatLogEntryInternal(entry, true /*showCounter*/, cp, stacks)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif logging.stderrSink.noColor.Get() {\n"
],
"file_path": "pkg/util/log/format_crdb_v1.go",
"type": "replace",
"edit_start_line_idx": 79
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvcoord_test
import (
"context"
"fmt"
"reflect"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
)
// Test that a transaction gets cleaned up when the heartbeat loop finds out
// that it has already been aborted (by a 3rd party). That is, we don't wait for
// the client to find out before the intents are removed.
// This relies on the TxnCoordSender's heartbeat loop to notice the changed
// transaction status and do an async abort.
// After the heartbeat loop finds out about the abort, subsequent requests sent
// through the TxnCoordSender return TransactionAbortedErrors. On those errors,
// the contract is that the client.Txn creates a new transaction internally and
// switches the TxnCoordSender instance. The expectation is that the old
// transaction has been cleaned up by that point.
func TestHeartbeatFindsOutAboutAbortedTransaction(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
var cleanupSeen int64
key := roachpb.Key("a")
key2 := roachpb.Key("b")
s, _, origDB := serverutils.StartServer(t, base.TestServerArgs{
Knobs: base.TestingKnobs{
Store: &kvserver.StoreTestingKnobs{
TestingProposalFilter: func(args kvserverbase.ProposalFilterArgs) *roachpb.Error {
// We'll eventually expect to see an EndTxn(commit=false)
// with the right intents.
if args.Req.IsSingleEndTxnRequest() {
et := args.Req.Requests[0].GetInner().(*roachpb.EndTxnRequest)
if !et.Commit && et.Key.Equal(key) &&
reflect.DeepEqual(et.LockSpans, []roachpb.Span{{Key: key}, {Key: key2}}) {
atomic.StoreInt64(&cleanupSeen, 1)
}
}
return nil
},
},
},
})
ctx := context.Background()
defer s.Stopper().Stop(ctx)
push := func(ctx context.Context, key roachpb.Key) error {
// Conflicting transaction that pushes the above transaction.
conflictTxn := kv.NewTxn(ctx, origDB, 0 /* gatewayNodeID */)
// We need to explicitly set a high priority for the push to happen.
if err := conflictTxn.SetUserPriority(roachpb.MaxUserPriority); err != nil {
return err
}
// Push through a Put, as opposed to a Get, so that the pushee gets aborted.
if err := conflictTxn.Put(ctx, key, "pusher was here"); err != nil {
return err
}
return conflictTxn.CommitOrCleanup(ctx)
}
// Make a db with a short heartbeat interval.
ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
tsf := kvcoord.NewTxnCoordSenderFactory(
kvcoord.TxnCoordSenderFactoryConfig{
AmbientCtx: ambient,
// Short heartbeat interval.
HeartbeatInterval: time.Millisecond,
Settings: s.ClusterSettings(),
Clock: s.Clock(),
Stopper: s.Stopper(),
},
s.DistSenderI().(*kvcoord.DistSender),
)
db := kv.NewDB(ambient, tsf, s.Clock(), s.Stopper())
txn := kv.NewTxn(ctx, db, 0 /* gatewayNodeID */)
if err := txn.Put(ctx, key, "val"); err != nil {
t.Fatal(err)
}
if err := txn.Put(ctx, key2, "val"); err != nil {
t.Fatal(err)
}
if err := push(ctx, key); err != nil {
t.Fatal(err)
}
// Now wait until the heartbeat loop notices that the transaction is aborted.
testutils.SucceedsSoon(t, func() error {
if txn.Sender().(*kvcoord.TxnCoordSender).IsTracking() {
return fmt.Errorf("txn heartbeat loop running")
}
return nil
})
// Check that an EndTxn(commit=false) with the right intents has been sent.
testutils.SucceedsSoon(t, func() error {
if atomic.LoadInt64(&cleanupSeen) == 0 {
return fmt.Errorf("no cleanup sent yet")
}
return nil
})
// Check that further sends through the aborted txn are rejected. The
// TxnCoordSender is supposed to synthesize a TransactionAbortedError.
if err := txn.CommitOrCleanup(ctx); !testutils.IsError(
err, "TransactionRetryWithProtoRefreshError: TransactionAbortedError",
) {
t.Fatalf("expected aborted error, got: %s", err)
}
}
// Test that, when a transaction restarts, we don't get a second heartbeat loop
// for it. This bug happened in the past.
//
// The test traces the restarting transaction and looks in it to see how many
// times a heartbeat loop was started.
func TestNoDuplicateHeartbeatLoops(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
s, _, db := serverutils.StartServer(t, base.TestServerArgs{})
ctx := context.Background()
defer s.Stopper().Stop(ctx)
key := roachpb.Key("a")
tracer := tracing.NewTracer()
sp := tracer.StartSpan("test", tracing.WithForceRealSpan())
sp.StartRecording(tracing.SingleNodeRecording)
txnCtx := tracing.ContextWithSpan(context.Background(), sp)
push := func(ctx context.Context, key roachpb.Key) error {
return db.Put(ctx, key, "push")
}
var attempts int
err := db.Txn(txnCtx, func(ctx context.Context, txn *kv.Txn) error {
attempts++
if attempts == 1 {
if err := push(context.Background() /* keep the contexts separate */, key); err != nil {
return err
}
}
if _, err := txn.Get(ctx, key); err != nil {
return err
}
return txn.Put(ctx, key, "val")
})
if err != nil {
t.Fatal(err)
}
if attempts != 2 {
t.Fatalf("expected 2 attempts, got: %d", attempts)
}
sp.Finish()
recording := sp.GetRecording()
var foundHeartbeatLoop bool
for _, sp := range recording {
if strings.Contains(sp.Operation, "heartbeat loop") {
if foundHeartbeatLoop {
t.Fatal("second heartbeat loop found")
}
foundHeartbeatLoop = true
}
}
if !foundHeartbeatLoop {
t.Fatal("no heartbeat loop found. Test rotted?")
}
}
| pkg/kv/kvclient/kvcoord/txn_coord_sender_server_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.00019163073739036918,
0.0001707507617538795,
0.00016535593022126704,
0.00016976549522951245,
0.000005455593054648489
] |
{
"id": 3,
"code_window": [
"func (formatCrdbV1TTYWithCounter) formatterName() string { return \"crdb-v1-tty-count\" }\n",
"\n",
"func (formatCrdbV1TTYWithCounter) formatEntry(entry logpb.Entry, stacks []byte) *buffer {\n",
"\tcp := ttycolor.StderrProfile\n",
"\tif logging.stderrSink.noColor {\n",
"\t\tcp = nil\n",
"\t}\n",
"\treturn formatLogEntryInternal(entry, true /*showCounter*/, cp, stacks)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif logging.stderrSink.noColor.Get() {\n"
],
"file_path": "pkg/util/log/format_crdb_v1.go",
"type": "replace",
"edit_start_line_idx": 79
} | // Copyright 2014 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvserver_test
import (
"context"
"reflect"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
"go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/raft/v3/raftpb"
"google.golang.org/grpc/metadata"
)
// testStream is a mock implementation of roachpb.Internal_RangeFeedServer.
type testStream struct {
ctx context.Context
cancel func()
mu struct {
syncutil.Mutex
events []*roachpb.RangeFeedEvent
}
}
func newTestStream() *testStream {
ctx, cancel := context.WithCancel(context.Background())
return &testStream{ctx: ctx, cancel: cancel}
}
func (s *testStream) SendMsg(m interface{}) error { panic("unimplemented") }
func (s *testStream) RecvMsg(m interface{}) error { panic("unimplemented") }
func (s *testStream) SetHeader(metadata.MD) error { panic("unimplemented") }
func (s *testStream) SendHeader(metadata.MD) error { panic("unimplemented") }
func (s *testStream) SetTrailer(metadata.MD) { panic("unimplemented") }
func (s *testStream) Context() context.Context {
return s.ctx
}
func (s *testStream) Cancel() {
s.cancel()
}
func (s *testStream) Send(e *roachpb.RangeFeedEvent) error {
s.mu.Lock()
defer s.mu.Unlock()
s.mu.events = append(s.mu.events, e)
return nil
}
func (s *testStream) Events() []*roachpb.RangeFeedEvent {
s.mu.Lock()
defer s.mu.Unlock()
return s.mu.events
}
func TestReplicaRangefeed(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
const numNodes = 3
args := base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
ServerArgsPerNode: make(map[int]base.TestServerArgs, numNodes),
}
for i := 0; i < numNodes; i++ {
// Disable closed timestamps as this test was designed assuming no closed
// timestamps would get propagated.
settings := cluster.MakeTestingClusterSettings()
closedts.TargetDuration.Override(&settings.SV, 24*time.Hour)
kvserver.RangefeedEnabled.Override(&settings.SV, true)
args.ServerArgsPerNode[i] = base.TestServerArgs{Settings: settings}
}
tc := testcluster.StartTestCluster(t, numNodes, args)
defer tc.Stopper().Stop(ctx)
ts := tc.Servers[0]
firstStore, pErr := ts.Stores().GetStore(ts.GetFirstStoreID())
if pErr != nil {
t.Fatal(pErr)
}
db := firstStore.DB().NonTransactionalSender()
// Split the range so that the RHS uses epoch-based leases.
startKey := []byte("a")
tc.SplitRangeOrFatal(t, startKey)
tc.AddVotersOrFatal(t, startKey, tc.Target(1), tc.Target(2))
if pErr := tc.WaitForVoters(startKey, tc.Target(1), tc.Target(2)); pErr != nil {
t.Fatalf("Unexpected error waiting for replication: %v", pErr)
}
rangeID := firstStore.LookupReplica(startKey).RangeID
// Insert a key before starting the rangefeeds.
initTime := ts.Clock().Now()
ts1 := initTime.Add(0, 1)
incArgs := incrementArgs(roachpb.Key("b"), 9)
if _, pErr := kv.SendWrappedWith(ctx, db, roachpb.Header{Timestamp: ts1}, incArgs); pErr != nil {
t.Fatal(pErr)
}
tc.WaitForValues(t, roachpb.Key("b"), []int64{9, 9, 9})
replNum := 3
streams := make([]*testStream, replNum)
streamErrC := make(chan *roachpb.Error, replNum)
rangefeedSpan := roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("z")}
for i := 0; i < replNum; i++ {
stream := newTestStream()
streams[i] = stream
ts := tc.Servers[i]
store, err := ts.Stores().GetStore(ts.GetFirstStoreID())
if err != nil {
t.Fatal(err)
}
go func(i int) {
req := roachpb.RangeFeedRequest{
Header: roachpb.Header{
Timestamp: initTime,
RangeID: rangeID,
},
Span: rangefeedSpan,
WithDiff: true,
}
pErr := store.RangeFeed(&req, stream)
streamErrC <- pErr
}(i)
}
checkForExpEvents := func(expEvents []*roachpb.RangeFeedEvent) {
t.Helper()
for i, stream := range streams {
var events []*roachpb.RangeFeedEvent
testutils.SucceedsSoon(t, func() error {
if len(streamErrC) > 0 {
// Break if the error channel is already populated.
return nil
}
events = stream.Events()
if len(events) < len(expEvents) {
return errors.Errorf("too few events: %v", events)
}
return nil
})
if len(streamErrC) > 0 {
t.Fatalf("unexpected error from stream: %v", <-streamErrC)
}
if !reflect.DeepEqual(events, expEvents) {
t.Fatalf("incorrect events on stream %d, found %v, want %v", i, events, expEvents)
}
}
}
// Wait for all streams to observe the catch-up related events.
expVal1 := roachpb.Value{Timestamp: ts1}
expVal1.SetInt(9)
expVal1.InitChecksum(roachpb.Key("b"))
expEvents := []*roachpb.RangeFeedEvent{
{Val: &roachpb.RangeFeedValue{
Key: roachpb.Key("b"), Value: expVal1,
}},
{Checkpoint: &roachpb.RangeFeedCheckpoint{
Span: rangefeedSpan,
ResolvedTS: hlc.Timestamp{},
}},
}
checkForExpEvents(expEvents)
// Insert a key non-transactionally.
ts2 := initTime.Add(0, 2)
pArgs := putArgs(roachpb.Key("c"), []byte("val2"))
_, err := kv.SendWrappedWith(ctx, db, roachpb.Header{Timestamp: ts2}, pArgs)
if err != nil {
t.Fatal(err)
}
server1 := tc.Servers[1]
store1, pErr := server1.Stores().GetStore(server1.GetFirstStoreID())
if pErr != nil {
t.Fatal(pErr)
}
// Insert a second key transactionally.
ts3 := initTime.Add(0, 3)
if err := store1.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
txn.SetFixedTimestamp(ctx, ts3)
return txn.Put(ctx, roachpb.Key("m"), []byte("val3"))
}); err != nil {
t.Fatal(err)
}
// Read to force intent resolution.
if _, err := store1.DB().Get(ctx, roachpb.Key("m")); err != nil {
t.Fatal(err)
}
// Update the originally incremented key non-transactionally.
ts4 := initTime.Add(0, 4)
_, err = kv.SendWrappedWith(ctx, db, roachpb.Header{Timestamp: ts4}, incArgs)
if err != nil {
t.Fatal(err)
}
// Update the originally incremented key transactionally.
ts5 := initTime.Add(0, 5)
if err := store1.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
txn.SetFixedTimestamp(ctx, ts5)
_, err := txn.Inc(ctx, incArgs.Key, 7)
return err
}); err != nil {
t.Fatal(err)
}
// Read to force intent resolution.
if _, err := store1.DB().Get(ctx, roachpb.Key("b")); err != nil {
t.Fatal(err)
}
// Wait for all streams to observe the expected events.
expVal2 := roachpb.MakeValueFromBytesAndTimestamp([]byte("val2"), ts2)
expVal3 := roachpb.MakeValueFromBytesAndTimestamp([]byte("val3"), ts3)
expVal3.InitChecksum([]byte("m")) // client.Txn sets value checksum
expVal4 := roachpb.Value{Timestamp: ts4}
expVal4.SetInt(18)
expVal4.InitChecksum(roachpb.Key("b"))
expVal5 := roachpb.Value{Timestamp: ts5}
expVal5.SetInt(25)
expVal5.InitChecksum(roachpb.Key("b"))
expVal1NoTS, expVal4NoTS := expVal1, expVal4
expVal1NoTS.Timestamp, expVal4NoTS.Timestamp = hlc.Timestamp{}, hlc.Timestamp{}
expEvents = append(expEvents, []*roachpb.RangeFeedEvent{
{Val: &roachpb.RangeFeedValue{
Key: roachpb.Key("c"), Value: expVal2,
}},
{Val: &roachpb.RangeFeedValue{
Key: roachpb.Key("m"), Value: expVal3,
}},
{Val: &roachpb.RangeFeedValue{
Key: roachpb.Key("b"), Value: expVal4, PrevValue: expVal1NoTS,
}},
{Val: &roachpb.RangeFeedValue{
Key: roachpb.Key("b"), Value: expVal5, PrevValue: expVal4NoTS,
}},
}...)
checkForExpEvents(expEvents)
// Cancel each of the rangefeed streams.
for _, stream := range streams {
stream.Cancel()
pErr := <-streamErrC
if !testutils.IsPError(pErr, "context canceled") {
t.Fatalf("got error for RangeFeed: %v", pErr)
}
}
// Bump the GC threshold and assert that RangeFeed below the timestamp will
// catch an error.
gcReq := &roachpb.GCRequest{
Threshold: initTime.Add(0, 1),
}
gcReq.Key = startKey
gcReq.EndKey = firstStore.LookupReplica(startKey).Desc().EndKey.AsRawKey()
var ba roachpb.BatchRequest
ba.RangeID = rangeID
ba.Add(gcReq)
if _, pErr := firstStore.Send(ctx, ba); pErr != nil {
t.Fatal(pErr)
}
req := roachpb.RangeFeedRequest{
Header: roachpb.Header{
Timestamp: initTime,
RangeID: rangeID,
},
Span: roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("z")},
}
testutils.SucceedsSoon(t, func() error {
for i := 0; i < replNum; i++ {
ts := tc.Servers[i]
store, pErr := ts.Stores().GetStore(ts.GetFirstStoreID())
if pErr != nil {
t.Fatal(pErr)
}
repl := store.LookupReplica(startKey)
if repl == nil {
return errors.Errorf("replica not found on node #%d", i+1)
}
if cur := repl.GetGCThreshold(); cur.Less(gcReq.Threshold) {
return errors.Errorf("%s has GCThreshold %s < %s; hasn't applied the bump yet", repl, cur, gcReq.Threshold)
}
stream := newTestStream()
timer := time.AfterFunc(10*time.Second, stream.Cancel)
defer timer.Stop()
defer stream.Cancel()
if pErr := store.RangeFeed(&req, stream); !testutils.IsPError(
pErr, `must be after replica GC threshold`,
) {
return pErr.GoError()
}
}
return nil
})
}
func TestReplicaRangefeedExpiringLeaseError(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
stopper := stop.NewStopper()
defer stopper.Stop(context.Background())
store, _ := createTestStore(t, stopper)
// Establish a rangefeed on the replica we plan to remove.
stream := newTestStream()
req := roachpb.RangeFeedRequest{
Header: roachpb.Header{
RangeID: store.LookupReplica(roachpb.RKey("a")).RangeID,
},
Span: roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("z")},
}
// Cancel the stream's context so that RangeFeed would return
// immediately even if it didn't return the correct error.
stream.Cancel()
kvserver.RangefeedEnabled.Override(&store.ClusterSettings().SV, true)
pErr := store.RangeFeed(&req, stream)
const exp = "expiration-based leases are incompatible with rangefeeds"
if !testutils.IsPError(pErr, exp) {
t.Errorf("expected error %q, found %v", exp, pErr)
}
}
func TestReplicaRangefeedRetryErrors(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
startKey := []byte("a")
setup := func(subT *testing.T) (
*testcluster.TestCluster, roachpb.RangeID) {
subT.Helper()
tc := testcluster.StartTestCluster(t, 3,
base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
},
)
ts := tc.Servers[0]
store, pErr := ts.Stores().GetStore(ts.GetFirstStoreID())
if pErr != nil {
t.Fatal(pErr)
}
tc.SplitRangeOrFatal(t, startKey)
tc.AddVotersOrFatal(t, startKey, tc.Target(1), tc.Target(2))
rangeID := store.LookupReplica(startKey).RangeID
// Write to the RHS of the split and wait for all replicas to process it.
// This ensures that all replicas have seen the split before we move on.
incArgs := incrementArgs(roachpb.Key("a"), 9)
if _, pErr := kv.SendWrapped(ctx, store.TestSender(), incArgs); pErr != nil {
t.Fatal(pErr)
}
tc.WaitForValues(t, roachpb.Key("a"), []int64{9, 9, 9})
return tc, rangeID
}
waitForInitialCheckpointAcrossSpan := func(
subT *testing.T, stream *testStream, streamErrC <-chan *roachpb.Error, span roachpb.Span,
) {
subT.Helper()
noResolveTimestampEvent := roachpb.RangeFeedEvent{
Checkpoint: &roachpb.RangeFeedCheckpoint{
Span: span,
ResolvedTS: hlc.Timestamp{},
},
}
resolveTimestampEvent := roachpb.RangeFeedEvent{
Checkpoint: &roachpb.RangeFeedCheckpoint{
Span: span,
},
}
var events []*roachpb.RangeFeedEvent
testutils.SucceedsSoon(t, func() error {
if len(streamErrC) > 0 {
// Break if the error channel is already populated.
return nil
}
events = stream.Events()
if len(events) < 1 {
return errors.Errorf("too few events: %v", events)
}
return nil
})
if len(streamErrC) > 0 {
subT.Fatalf("unexpected error from stream: %v", <-streamErrC)
}
expEvents := []*roachpb.RangeFeedEvent{&noResolveTimestampEvent}
if len(events) > 1 {
// Unfortunately there is a timing issue here and the range feed may
// publish two checkpoints, one with a resolvedTs and one without, so we
// check for either case.
resolveTimestampEvent.Checkpoint.ResolvedTS = events[1].Checkpoint.ResolvedTS
expEvents = []*roachpb.RangeFeedEvent{
&noResolveTimestampEvent,
&resolveTimestampEvent,
}
}
if !reflect.DeepEqual(events, expEvents) {
subT.Fatalf("incorrect events on stream, found %v, want %v", events, expEvents)
}
}
assertRangefeedRetryErr := func(
subT *testing.T, pErr *roachpb.Error, expReason roachpb.RangeFeedRetryError_Reason,
) {
subT.Helper()
expErr := roachpb.NewRangeFeedRetryError(expReason)
if pErr == nil {
subT.Fatalf("got nil error for RangeFeed: expecting %v", expErr)
}
rfErr, ok := pErr.GetDetail().(*roachpb.RangeFeedRetryError)
if !ok {
subT.Fatalf("got incorrect error for RangeFeed: %v; expecting %v", pErr, expErr)
}
if rfErr.Reason != expReason {
subT.Fatalf("got incorrect RangeFeedRetryError reason for RangeFeed: %v; expecting %v",
rfErr.Reason, expReason)
}
}
t.Run(roachpb.RangeFeedRetryError_REASON_REPLICA_REMOVED.String(), func(t *testing.T) {
const removeStore = 2
tc, rangeID := setup(t)
defer tc.Stopper().Stop(ctx)
// Establish a rangefeed on the replica we plan to remove.
stream := newTestStream()
streamErrC := make(chan *roachpb.Error, 1)
rangefeedSpan := roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("z")}
ts := tc.Servers[removeStore]
store, err := ts.Stores().GetStore(ts.GetFirstStoreID())
if err != nil {
t.Fatal(err)
}
go func() {
req := roachpb.RangeFeedRequest{
Header: roachpb.Header{
RangeID: rangeID,
},
Span: rangefeedSpan,
}
pErr := store.RangeFeed(&req, stream)
streamErrC <- pErr
}()
// Wait for the first checkpoint event.
waitForInitialCheckpointAcrossSpan(t, stream, streamErrC, rangefeedSpan)
// Remove the replica from the range.
tc.RemoveVotersOrFatal(t, startKey, tc.Target(removeStore))
// Check the error.
pErr := <-streamErrC
assertRangefeedRetryErr(t, pErr, roachpb.RangeFeedRetryError_REASON_REPLICA_REMOVED)
})
t.Run(roachpb.RangeFeedRetryError_REASON_RANGE_SPLIT.String(), func(t *testing.T) {
tc, rangeID := setup(t)
defer tc.Stopper().Stop(ctx)
// Establish a rangefeed on the replica we plan to split.
stream := newTestStream()
streamErrC := make(chan *roachpb.Error, 1)
rangefeedSpan := roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("z")}
ts := tc.Servers[0]
store, err := ts.Stores().GetStore(ts.GetFirstStoreID())
if err != nil {
t.Fatal(err)
}
go func() {
req := roachpb.RangeFeedRequest{
Header: roachpb.Header{
RangeID: rangeID,
},
Span: rangefeedSpan,
}
streamErrC <- store.RangeFeed(&req, stream)
}()
// Wait for the first checkpoint event.
waitForInitialCheckpointAcrossSpan(t, stream, streamErrC, rangefeedSpan)
// Split the range.
tc.SplitRangeOrFatal(t, []byte("m"))
// Check the error.
pErr := <-streamErrC
assertRangefeedRetryErr(t, pErr, roachpb.RangeFeedRetryError_REASON_RANGE_SPLIT)
})
t.Run(roachpb.RangeFeedRetryError_REASON_RANGE_MERGED.String(), func(t *testing.T) {
tc, rangeID := setup(t)
defer tc.Stopper().Stop(ctx)
ts := tc.Servers[0]
store, err := ts.Stores().GetStore(ts.GetFirstStoreID())
if err != nil {
t.Fatal(err)
}
// Split the range.
splitKey := []byte("m")
tc.SplitRangeOrFatal(t, splitKey)
if pErr := tc.WaitForSplitAndInitialization(splitKey); pErr != nil {
t.Fatalf("Unexpected error waiting for range split: %v", pErr)
}
rightRangeID := store.LookupReplica(splitKey).RangeID
// Establish a rangefeed on the left replica.
streamLeft := newTestStream()
streamLeftErrC := make(chan *roachpb.Error, 1)
rangefeedLeftSpan := roachpb.Span{Key: roachpb.Key("a"), EndKey: splitKey}
go func() {
req := roachpb.RangeFeedRequest{
Header: roachpb.Header{
RangeID: rangeID,
},
Span: rangefeedLeftSpan,
}
pErr := store.RangeFeed(&req, streamLeft)
streamLeftErrC <- pErr
}()
// Establish a rangefeed on the right replica.
streamRight := newTestStream()
streamRightErrC := make(chan *roachpb.Error, 1)
rangefeedRightSpan := roachpb.Span{Key: splitKey, EndKey: roachpb.Key("z")}
go func() {
req := roachpb.RangeFeedRequest{
Header: roachpb.Header{
RangeID: rightRangeID,
},
Span: rangefeedRightSpan,
}
pErr := store.RangeFeed(&req, streamRight)
streamRightErrC <- pErr
}()
// Wait for the first checkpoint event on each stream.
waitForInitialCheckpointAcrossSpan(t, streamLeft, streamLeftErrC, rangefeedLeftSpan)
waitForInitialCheckpointAcrossSpan(t, streamRight, streamRightErrC, rangefeedRightSpan)
// Merge the ranges back together
mergeArgs := adminMergeArgs(startKey)
if _, pErr := kv.SendWrapped(ctx, store.TestSender(), mergeArgs); pErr != nil {
t.Fatalf("merge saw unexpected error: %v", pErr)
}
// Check the errors.
pErrLeft, pErrRight := <-streamLeftErrC, <-streamRightErrC
assertRangefeedRetryErr(t, pErrLeft, roachpb.RangeFeedRetryError_REASON_RANGE_MERGED)
assertRangefeedRetryErr(t, pErrRight, roachpb.RangeFeedRetryError_REASON_RANGE_MERGED)
})
t.Run(roachpb.RangeFeedRetryError_REASON_RAFT_SNAPSHOT.String(), func(t *testing.T) {
tc, rangeID := setup(t)
defer tc.Stopper().Stop(ctx)
ts2 := tc.Servers[2]
partitionStore, err := ts2.Stores().GetStore(ts2.GetFirstStoreID())
if err != nil {
t.Fatal(err)
}
ts := tc.Servers[0]
firstStore, err := ts.Stores().GetStore(ts.GetFirstStoreID())
if err != nil {
t.Fatal(err)
}
for _, server := range tc.Servers {
store, err := server.Stores().GetStore(server.GetFirstStoreID())
if err != nil {
t.Fatal(err)
}
store.SetReplicaGCQueueActive(false)
}
// Establish a rangefeed on the replica we plan to partition.
stream := newTestStream()
streamErrC := make(chan *roachpb.Error, 1)
rangefeedSpan := roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("z")}
go func() {
req := roachpb.RangeFeedRequest{
Header: roachpb.Header{
RangeID: rangeID,
},
Span: rangefeedSpan,
}
timer := time.AfterFunc(10*time.Second, stream.Cancel)
defer timer.Stop()
pErr := partitionStore.RangeFeed(&req, stream)
streamErrC <- pErr
}()
// Wait for the first checkpoint event.
waitForInitialCheckpointAcrossSpan(t, stream, streamErrC, rangefeedSpan)
// Force the leader off the replica on partitionedStore. If it's the
// leader, this test will fall over when it cuts the replica off from
// Raft traffic.
testutils.SucceedsSoon(t, func() error {
repl, err := partitionStore.GetReplica(rangeID)
if err != nil {
return err
}
raftStatus := repl.RaftStatus()
if raftStatus != nil && raftStatus.RaftState == raft.StateFollower {
return nil
}
err = repl.AdminTransferLease(ctx, roachpb.StoreID(1))
return errors.Errorf("not raft follower: %+v, transferred lease: %v", raftStatus, err)
})
// Partition the replica from the rest of its range.
partitionStore.Transport().Listen(partitionStore.Ident.StoreID, &unreliableRaftHandler{
rangeID: rangeID,
RaftMessageHandler: partitionStore,
})
// Perform a write on the range.
pArgs := putArgs(roachpb.Key("c"), []byte("val2"))
if _, pErr := kv.SendWrapped(ctx, firstStore.TestSender(), pArgs); pErr != nil {
t.Fatal(pErr)
}
// Get that command's log index.
repl, err := firstStore.GetReplica(rangeID)
if err != nil {
t.Fatal(err)
}
index, err := repl.GetLastIndex()
if err != nil {
t.Fatal(err)
}
// Truncate the log at index+1 (log entries < N are removed, so this
// includes the put). This necessitates a snapshot when the partitioned
// replica rejoins the rest of the range.
truncArgs := truncateLogArgs(index+1, rangeID)
truncArgs.Key = startKey
if _, err := kv.SendWrapped(ctx, firstStore.TestSender(), truncArgs); err != nil {
t.Fatal(err)
}
// Remove the partition. Snapshot should follow.
partitionStore.Transport().Listen(partitionStore.Ident.StoreID, &unreliableRaftHandler{
rangeID: rangeID,
RaftMessageHandler: partitionStore,
unreliableRaftHandlerFuncs: unreliableRaftHandlerFuncs{
dropReq: func(req *kvserver.RaftMessageRequest) bool {
// Make sure that even going forward no MsgApp for what we just truncated can
// make it through. The Raft transport is asynchronous so this is necessary
// to make the test pass reliably.
// NB: the Index on the message is the log index that _precedes_ any of the
// entries in the MsgApp, so filter where msg.Index < index, not <= index.
return req.Message.Type == raftpb.MsgApp && req.Message.Index < index
},
dropHB: func(*kvserver.RaftHeartbeat) bool { return false },
dropResp: func(*kvserver.RaftMessageResponse) bool { return false },
},
})
// Check the error.
pErr := <-streamErrC
assertRangefeedRetryErr(t, pErr, roachpb.RangeFeedRetryError_REASON_RAFT_SNAPSHOT)
})
t.Run(roachpb.RangeFeedRetryError_REASON_LOGICAL_OPS_MISSING.String(), func(t *testing.T) {
tc, _ := setup(t)
defer tc.Stopper().Stop(ctx)
ts := tc.Servers[0]
store, err := ts.Stores().GetStore(ts.GetFirstStoreID())
if err != nil {
t.Fatal(err)
}
// Split the range so that the RHS is not a system range and thus will
// respect the rangefeed_enabled cluster setting.
startKey := keys.UserTableDataMin
tc.SplitRangeOrFatal(t, startKey)
rightRangeID := store.LookupReplica(roachpb.RKey(startKey)).RangeID
// Establish a rangefeed.
stream := newTestStream()
streamErrC := make(chan *roachpb.Error, 1)
endKey := keys.TableDataMax
rangefeedSpan := roachpb.Span{Key: startKey, EndKey: endKey}
go func() {
req := roachpb.RangeFeedRequest{
Header: roachpb.Header{
RangeID: rightRangeID,
},
Span: rangefeedSpan,
}
kvserver.RangefeedEnabled.Override(&store.ClusterSettings().SV, true)
pErr := store.RangeFeed(&req, stream)
streamErrC <- pErr
}()
// Wait for the first checkpoint event.
waitForInitialCheckpointAcrossSpan(t, stream, streamErrC, rangefeedSpan)
// Disable rangefeeds, which stops logical op logs from being provided
// with Raft commands.
kvserver.RangefeedEnabled.Override(&store.ClusterSettings().SV, false)
// Perform a write on the range.
writeKey := encoding.EncodeStringAscending(keys.SystemSQLCodec.TablePrefix(55), "c")
pArgs := putArgs(writeKey, []byte("val2"))
if _, pErr := kv.SendWrapped(ctx, store.TestSender(), pArgs); pErr != nil {
t.Fatal(pErr)
}
// Check the error.
pErr := <-streamErrC
assertRangefeedRetryErr(t, pErr, roachpb.RangeFeedRetryError_REASON_LOGICAL_OPS_MISSING)
})
}
// TestReplicaRangefeedPushesTransactions tests that rangefeed detects intents
// that are holding up its resolved timestamp and periodically pushes them to
// ensure that its resolved timestamp continues to advance.
func TestReplicaRangefeedPushesTransactions(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
tc, db, _, repls := setupClusterForClosedTimestampTesting(ctx, t, testingTargetDuration, testingCloseFraction, aggressiveResolvedTimestampClusterArgs)
defer tc.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `SET CLUSTER SETTING kv.rangefeed.enabled = true`)
// While we're here, drop the target duration. This was set to
// testingTargetDuration above, but this is higher then it needs to be now
// that cluster and schema setup is complete.
sqlDB.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '10ms'`)
// Make sure all the nodes have gotten the rangefeed enabled setting from
// gossip, so that they will immediately be able to accept RangeFeeds. The
// target_duration one is just to speed up the test, we don't care if it has
// propagated everywhere yet.
testutils.SucceedsSoon(t, func() error {
for i := 0; i < tc.NumServers(); i++ {
var enabled bool
if err := tc.ServerConn(i).QueryRow(
`SHOW CLUSTER SETTING kv.rangefeed.enabled`,
).Scan(&enabled); err != nil {
return err
}
if !enabled {
return errors.Errorf(`waiting for rangefeed to be enabled on node %d`, i)
}
}
return nil
})
ts1 := tc.Server(0).Clock().Now()
rangeFeedCtx, rangeFeedCancel := context.WithCancel(ctx)
defer rangeFeedCancel()
rangeFeedChs := make([]chan *roachpb.RangeFeedEvent, len(repls))
rangeFeedErrC := make(chan error, len(repls))
for i := range repls {
desc := repls[i].Desc()
ds := tc.Server(i).DistSenderI().(*kvcoord.DistSender)
rangeFeedCh := make(chan *roachpb.RangeFeedEvent)
rangeFeedChs[i] = rangeFeedCh
go func() {
span := roachpb.Span{
Key: desc.StartKey.AsRawKey(), EndKey: desc.EndKey.AsRawKey(),
}
rangeFeedErrC <- ds.RangeFeed(rangeFeedCtx, span, ts1, false /* withDiff */, rangeFeedCh)
}()
}
// Wait for a RangeFeed checkpoint on each RangeFeed after the RangeFeed
// initial scan time (which is the timestamp passed in the request) to make
// sure everything is set up. We intentionally don't care about the spans in
// the checkpoints, just verifying that something has made it past the
// initial scan and is running.
waitForCheckpoint := func(ts hlc.Timestamp) {
t.Helper()
for _, rangeFeedCh := range rangeFeedChs {
checkpointed := false
for !checkpointed {
select {
case event := <-rangeFeedCh:
if c := event.Checkpoint; c != nil && ts.Less(c.ResolvedTS) {
checkpointed = true
}
case err := <-rangeFeedErrC:
t.Fatal(err)
}
}
}
}
waitForCheckpoint(ts1)
// Start a transaction and write an intent on the range. This intent would
// prevent from the rangefeed's resolved timestamp from advancing. To get
// around this, the rangefeed periodically pushes all intents on its range
// to higher timestamps.
tx1, err := db.BeginTx(ctx, nil)
require.NoError(t, err)
_, err = tx1.ExecContext(ctx, "INSERT INTO cttest.kv VALUES (1, 'test')")
require.NoError(t, err)
// Read the current transaction timestamp. This prevents the txn from committing
// if it ever gets pushed.
var ts2Str string
require.NoError(t, tx1.QueryRowContext(ctx, "SELECT cluster_logical_timestamp()").Scan(&ts2Str))
ts2, err := sql.ParseHLC(ts2Str)
require.NoError(t, err)
// Wait for the RangeFeed checkpoint on each RangeFeed to exceed this timestamp.
// For this to be possible, it must push the transaction's timestamp forward.
waitForCheckpoint(ts2)
// The txn should not be able to commit since its commit timestamp was pushed
// and it has observed its timestamp.
require.Regexp(t, "TransactionRetryError: retry txn", tx1.Commit())
// Make sure the RangeFeed hasn't errored yet.
select {
case err := <-rangeFeedErrC:
t.Fatal(err)
default:
}
// Now cancel it and wait for it to shut down.
rangeFeedCancel()
}
// TestReplicaRangefeedNudgeSlowClosedTimestamp tests that rangefeed detects
// that its closed timestamp updates have stalled and requests new information
// from its Range's leaseholder. This is a regression test for #35142.
func TestReplicaRangefeedNudgeSlowClosedTimestamp(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
tc, db, desc, repls := setupClusterForClosedTimestampTesting(ctx, t, testingTargetDuration, testingCloseFraction, aggressiveResolvedTimestampClusterArgs)
defer tc.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `SET CLUSTER SETTING kv.rangefeed.enabled = true`)
// While we're here, drop the target duration. This was set to
// testingTargetDuration above, but this is higher then it needs to be now
// that cluster and schema setup is complete.
sqlDB.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '10ms'`)
// Make sure all the nodes have gotten the rangefeed enabled setting from
// gossip, so that they will immediately be able to accept RangeFeeds. The
// target_duration one is just to speed up the test, we don't care if it has
// propagated everywhere yet.
testutils.SucceedsSoon(t, func() error {
for i := 0; i < tc.NumServers(); i++ {
var enabled bool
if err := tc.ServerConn(i).QueryRow(
`SHOW CLUSTER SETTING kv.rangefeed.enabled`,
).Scan(&enabled); err != nil {
return err
}
if !enabled {
return errors.Errorf(`waiting for rangefeed to be enabled on node %d`, i)
}
}
return nil
})
ts1 := tc.Server(0).Clock().Now()
rangeFeedCtx, rangeFeedCancel := context.WithCancel(ctx)
defer rangeFeedCancel()
rangeFeedChs := make([]chan *roachpb.RangeFeedEvent, len(repls))
rangeFeedErrC := make(chan error, len(repls))
for i := range repls {
ds := tc.Server(i).DistSenderI().(*kvcoord.DistSender)
rangeFeedCh := make(chan *roachpb.RangeFeedEvent)
rangeFeedChs[i] = rangeFeedCh
go func() {
span := roachpb.Span{
Key: desc.StartKey.AsRawKey(), EndKey: desc.EndKey.AsRawKey(),
}
rangeFeedErrC <- ds.RangeFeed(rangeFeedCtx, span, ts1, false /* withDiff */, rangeFeedCh)
}()
}
// Wait for a RangeFeed checkpoint on each RangeFeed after the RangeFeed
// initial scan time (which is the timestamp passed in the request) to make
// sure everything is set up. We intentionally don't care about the spans in
// the checkpoints, just verifying that something has made it past the
// initial scan and is running.
waitForCheckpoint := func(ts hlc.Timestamp) {
t.Helper()
for _, rangeFeedCh := range rangeFeedChs {
checkpointed := false
for !checkpointed {
select {
case event := <-rangeFeedCh:
if c := event.Checkpoint; c != nil && ts.Less(c.ResolvedTS) {
checkpointed = true
}
case err := <-rangeFeedErrC:
t.Fatal(err)
}
}
}
}
waitForCheckpoint(ts1)
// Clear the closed timestamp storage on each server. This simulates the case
// where a closed timestamp message is lost or a node restarts. To recover,
// the servers will need to request an update from the leaseholder.
for i := 0; i < tc.NumServers(); i++ {
stores := tc.Server(i).GetStores().(*kvserver.Stores)
err := stores.VisitStores(func(s *kvserver.Store) error {
s.ClearClosedTimestampStorage()
return nil
})
require.NoError(t, err)
}
// Wait for another RangeFeed checkpoint after the store was cleared. Without
// RangeFeed nudging closed timestamps, this doesn't happen on its own. Again,
// we intentionally don't care about the spans in the checkpoints, just
// verifying that something has made it past the cleared time.
ts2 := tc.Server(0).Clock().Now()
waitForCheckpoint(ts2)
// Make sure the RangeFeed hasn't errored yet.
select {
case err := <-rangeFeedErrC:
t.Fatal(err)
default:
}
// Now cancel it and wait for it to shut down.
rangeFeedCancel()
}
| pkg/kv/kvserver/replica_rangefeed_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.0003958043525926769,
0.00017567384929861873,
0.00016092912119347602,
0.00016991273150779307,
0.000029457260097842664
] |
{
"id": 4,
"code_window": [
"\n",
"package log\n",
"\n",
"import \"github.com/cockroachdb/cockroach/pkg/cli/exit\"\n",
"\n",
"// Type of a stderr copy sink.\n",
"type stderrSink struct {\n",
"\t// the --no-color flag. When set it disables escapes code on the\n",
"\t// stderr copy.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"import (\n",
"\t\"github.com/cockroachdb/cockroach/pkg/cli/exit\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/syncutil\"\n",
")\n"
],
"file_path": "pkg/util/log/stderr_sink.go",
"type": "replace",
"edit_start_line_idx": 12
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package log
import "github.com/cockroachdb/cockroach/pkg/cli/exit"
// Type of a stderr copy sink.
type stderrSink struct {
// the --no-color flag. When set it disables escapes code on the
// stderr copy.
noColor bool
}
// activeAtSeverity implements the logSink interface.
func (l *stderrSink) active() bool { return true }
// attachHints implements the logSink interface.
func (l *stderrSink) attachHints(stacks []byte) []byte {
return stacks
}
// output implements the logSink interface.
func (l *stderrSink) output(_ bool, b []byte) error {
_, err := OrigStderr.Write(b)
return err
}
// exitCode implements the logSink interface.
func (l *stderrSink) exitCode() exit.Code {
return exit.LoggingStderrUnavailable()
}
// emergencyOutput implements the logSink interface.
func (l *stderrSink) emergencyOutput(b []byte) {
_, _ = OrigStderr.Write(b)
}
| pkg/util/log/stderr_sink.go | 1 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.9984028935432434,
0.7981403470039368,
0.00017209585348609835,
0.9974006414413452,
0.39898473024368286
] |
{
"id": 4,
"code_window": [
"\n",
"package log\n",
"\n",
"import \"github.com/cockroachdb/cockroach/pkg/cli/exit\"\n",
"\n",
"// Type of a stderr copy sink.\n",
"type stderrSink struct {\n",
"\t// the --no-color flag. When set it disables escapes code on the\n",
"\t// stderr copy.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"import (\n",
"\t\"github.com/cockroachdb/cockroach/pkg/cli/exit\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/syncutil\"\n",
")\n"
],
"file_path": "pkg/util/log/stderr_sink.go",
"type": "replace",
"edit_start_line_idx": 12
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package props
import (
"testing"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/stretchr/testify/require"
)
var bothIndeterminate = JoinMultiplicity{
LeftMultiplicity: MultiplicityIndeterminateVal,
RightMultiplicity: MultiplicityIndeterminateVal,
}
var bothNoDup = JoinMultiplicity{
LeftMultiplicity: MultiplicityNotDuplicatedVal,
RightMultiplicity: MultiplicityNotDuplicatedVal,
}
var bothPreserved = JoinMultiplicity{
LeftMultiplicity: MultiplicityPreservedVal,
RightMultiplicity: MultiplicityPreservedVal,
}
var bothNoDupBothPreserved = JoinMultiplicity{
LeftMultiplicity: MultiplicityNotDuplicatedVal | MultiplicityPreservedVal,
RightMultiplicity: MultiplicityNotDuplicatedVal | MultiplicityPreservedVal,
}
var leftIndeterminateRightPreserved = JoinMultiplicity{
LeftMultiplicity: MultiplicityIndeterminateVal,
RightMultiplicity: MultiplicityPreservedVal,
}
var leftIndeterminateRightNoDup = JoinMultiplicity{
LeftMultiplicity: MultiplicityIndeterminateVal,
RightMultiplicity: MultiplicityNotDuplicatedVal,
}
var rightIndeterminateLeftPreserved = JoinMultiplicity{
LeftMultiplicity: MultiplicityPreservedVal,
RightMultiplicity: MultiplicityIndeterminateVal,
}
var rightIndeterminateLeftNoDup = JoinMultiplicity{
LeftMultiplicity: MultiplicityNotDuplicatedVal,
RightMultiplicity: MultiplicityIndeterminateVal,
}
var bothNoDupLeftPreserved = JoinMultiplicity{
LeftMultiplicity: MultiplicityNotDuplicatedVal | MultiplicityPreservedVal,
RightMultiplicity: MultiplicityNotDuplicatedVal,
}
var bothPreservedLeftNoDup = JoinMultiplicity{
LeftMultiplicity: MultiplicityPreservedVal | MultiplicityNotDuplicatedVal,
RightMultiplicity: MultiplicityPreservedVal,
}
var bothNoDupRightPreserved = JoinMultiplicity{
LeftMultiplicity: MultiplicityNotDuplicatedVal,
RightMultiplicity: MultiplicityNotDuplicatedVal | MultiplicityPreservedVal,
}
var bothPreservedRightNoDup = JoinMultiplicity{
LeftMultiplicity: MultiplicityPreservedVal,
RightMultiplicity: MultiplicityPreservedVal | MultiplicityNotDuplicatedVal,
}
func TestJoinMultiplicity_JoinFiltersDoNotDuplicateLeftRows(t *testing.T) {
require.Equal(t, false, bothIndeterminate.JoinFiltersDoNotDuplicateLeftRows())
require.Equal(t, true, bothNoDup.JoinFiltersDoNotDuplicateLeftRows())
require.Equal(t, false, bothPreserved.JoinFiltersDoNotDuplicateLeftRows())
require.Equal(t, true, bothNoDupBothPreserved.JoinFiltersDoNotDuplicateLeftRows())
require.Equal(t, false, leftIndeterminateRightPreserved.JoinFiltersDoNotDuplicateLeftRows())
require.Equal(t, false, leftIndeterminateRightNoDup.JoinFiltersDoNotDuplicateLeftRows())
require.Equal(t, false, rightIndeterminateLeftPreserved.JoinFiltersDoNotDuplicateLeftRows())
require.Equal(t, true, rightIndeterminateLeftNoDup.JoinFiltersDoNotDuplicateLeftRows())
require.Equal(t, true, bothNoDupLeftPreserved.JoinFiltersDoNotDuplicateLeftRows())
require.Equal(t, true, bothPreservedLeftNoDup.JoinFiltersDoNotDuplicateLeftRows())
require.Equal(t, true, bothNoDupRightPreserved.JoinFiltersDoNotDuplicateLeftRows())
require.Equal(t, false, bothPreservedRightNoDup.JoinFiltersDoNotDuplicateLeftRows())
}
func TestJoinMultiplicity_JoinFiltersDoNotDuplicateRightRows(t *testing.T) {
require.Equal(t, false, bothIndeterminate.JoinFiltersDoNotDuplicateRightRows())
require.Equal(t, true, bothNoDup.JoinFiltersDoNotDuplicateRightRows())
require.Equal(t, false, bothPreserved.JoinFiltersDoNotDuplicateRightRows())
require.Equal(t, true, bothNoDupBothPreserved.JoinFiltersDoNotDuplicateRightRows())
require.Equal(t, false, leftIndeterminateRightPreserved.JoinFiltersDoNotDuplicateRightRows())
require.Equal(t, true, leftIndeterminateRightNoDup.JoinFiltersDoNotDuplicateRightRows())
require.Equal(t, false, rightIndeterminateLeftPreserved.JoinFiltersDoNotDuplicateRightRows())
require.Equal(t, false, rightIndeterminateLeftNoDup.JoinFiltersDoNotDuplicateRightRows())
require.Equal(t, true, bothNoDupLeftPreserved.JoinFiltersDoNotDuplicateRightRows())
require.Equal(t, false, bothPreservedLeftNoDup.JoinFiltersDoNotDuplicateRightRows())
require.Equal(t, true, bothNoDupRightPreserved.JoinFiltersDoNotDuplicateRightRows())
require.Equal(t, true, bothPreservedRightNoDup.JoinFiltersDoNotDuplicateRightRows())
}
func TestJoinMultiplicity_JoinFiltersMatchAllLeftRows(t *testing.T) {
require.Equal(t, false, bothIndeterminate.JoinFiltersMatchAllLeftRows())
require.Equal(t, false, bothNoDup.JoinFiltersMatchAllLeftRows())
require.Equal(t, true, bothPreserved.JoinFiltersMatchAllLeftRows())
require.Equal(t, true, bothNoDupBothPreserved.JoinFiltersMatchAllLeftRows())
require.Equal(t, false, leftIndeterminateRightPreserved.JoinFiltersMatchAllLeftRows())
require.Equal(t, false, leftIndeterminateRightNoDup.JoinFiltersMatchAllLeftRows())
require.Equal(t, true, rightIndeterminateLeftPreserved.JoinFiltersMatchAllLeftRows())
require.Equal(t, false, rightIndeterminateLeftNoDup.JoinFiltersMatchAllLeftRows())
require.Equal(t, true, bothNoDupLeftPreserved.JoinFiltersMatchAllLeftRows())
require.Equal(t, true, bothPreservedLeftNoDup.JoinFiltersMatchAllLeftRows())
require.Equal(t, false, bothNoDupRightPreserved.JoinFiltersMatchAllLeftRows())
require.Equal(t, true, bothPreservedRightNoDup.JoinFiltersMatchAllLeftRows())
}
func TestJoinMultiplicity_JoinFiltersMatchAllRightRows(t *testing.T) {
require.Equal(t, false, bothIndeterminate.JoinFiltersMatchAllRightRows())
require.Equal(t, false, bothNoDup.JoinFiltersMatchAllRightRows())
require.Equal(t, true, bothPreserved.JoinFiltersMatchAllRightRows())
require.Equal(t, true, bothNoDupBothPreserved.JoinFiltersMatchAllRightRows())
require.Equal(t, true, leftIndeterminateRightPreserved.JoinFiltersMatchAllRightRows())
require.Equal(t, false, leftIndeterminateRightNoDup.JoinFiltersMatchAllRightRows())
require.Equal(t, false, rightIndeterminateLeftPreserved.JoinFiltersMatchAllRightRows())
require.Equal(t, false, rightIndeterminateLeftNoDup.JoinFiltersMatchAllRightRows())
require.Equal(t, false, bothNoDupLeftPreserved.JoinFiltersMatchAllRightRows())
require.Equal(t, true, bothPreservedLeftNoDup.JoinFiltersMatchAllRightRows())
require.Equal(t, true, bothNoDupRightPreserved.JoinFiltersMatchAllRightRows())
require.Equal(t, true, bothPreservedRightNoDup.JoinFiltersMatchAllRightRows())
}
func TestJoinMultiplicity_JoinDoesNotDuplicateLeftRows(t *testing.T) {
require.Equal(t, false, bothIndeterminate.JoinDoesNotDuplicateLeftRows(opt.InnerJoinOp))
require.Equal(t, false, bothIndeterminate.JoinDoesNotDuplicateLeftRows(opt.LeftJoinOp))
require.Equal(t, false, bothIndeterminate.JoinDoesNotDuplicateLeftRows(opt.FullJoinOp))
require.Equal(t, true, bothIndeterminate.JoinDoesNotDuplicateLeftRows(opt.SemiJoinOp))
require.Equal(
t, true, rightIndeterminateLeftNoDup.JoinDoesNotDuplicateLeftRows(opt.InnerJoinOp))
require.Equal(
t, true, rightIndeterminateLeftNoDup.JoinDoesNotDuplicateLeftRows(opt.LeftJoinOp))
require.Equal(
t, true, rightIndeterminateLeftNoDup.JoinDoesNotDuplicateLeftRows(opt.FullJoinOp))
require.Equal(
t, true, rightIndeterminateLeftNoDup.JoinDoesNotDuplicateLeftRows(opt.SemiJoinOp))
}
func TestJoinMultiplicity_JoinDoesNotDuplicateRightRows(t *testing.T) {
require.Equal(t, false, bothIndeterminate.JoinDoesNotDuplicateRightRows(opt.InnerJoinOp))
require.Equal(t, false, bothIndeterminate.JoinDoesNotDuplicateRightRows(opt.LeftJoinOp))
require.Equal(t, false, bothIndeterminate.JoinDoesNotDuplicateRightRows(opt.FullJoinOp))
require.Equal(
t, true, leftIndeterminateRightNoDup.JoinDoesNotDuplicateRightRows(opt.InnerJoinOp))
require.Equal(
t, true, leftIndeterminateRightNoDup.JoinDoesNotDuplicateRightRows(opt.LeftJoinOp))
require.Equal(
t, true, leftIndeterminateRightNoDup.JoinDoesNotDuplicateRightRows(opt.FullJoinOp))
}
func TestJoinMultiplicity_JoinPreservesLeftRows(t *testing.T) {
require.Equal(t, false, bothIndeterminate.JoinPreservesLeftRows(opt.InnerJoinOp))
require.Equal(t, true, bothIndeterminate.JoinPreservesLeftRows(opt.LeftJoinOp))
require.Equal(t, true, bothIndeterminate.JoinPreservesLeftRows(opt.FullJoinOp))
require.Equal(t, false, bothIndeterminate.JoinPreservesLeftRows(opt.SemiJoinOp))
require.Equal(
t, true, rightIndeterminateLeftPreserved.JoinPreservesLeftRows(opt.InnerJoinOp))
require.Equal(
t, true, rightIndeterminateLeftPreserved.JoinPreservesLeftRows(opt.LeftJoinOp))
require.Equal(
t, true, rightIndeterminateLeftPreserved.JoinPreservesLeftRows(opt.FullJoinOp))
require.Equal(
t, true, rightIndeterminateLeftPreserved.JoinPreservesLeftRows(opt.SemiJoinOp))
}
func TestJoinMultiplicity_JoinPreservesRightRows(t *testing.T) {
require.Equal(t, false, bothIndeterminate.JoinPreservesRightRows(opt.InnerJoinOp))
require.Equal(t, false, bothIndeterminate.JoinPreservesRightRows(opt.LeftJoinOp))
require.Equal(t, true, bothIndeterminate.JoinPreservesRightRows(opt.FullJoinOp))
require.Equal(
t, true, leftIndeterminateRightPreserved.JoinPreservesRightRows(opt.InnerJoinOp))
require.Equal(
t, true, leftIndeterminateRightPreserved.JoinPreservesRightRows(opt.LeftJoinOp))
require.Equal(
t, true, leftIndeterminateRightPreserved.JoinPreservesRightRows(opt.FullJoinOp))
}
| pkg/sql/opt/props/multiplicity_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.0019771058578044176,
0.00026031132438220084,
0.00016747154586482793,
0.00016989896539598703,
0.00039386513526551425
] |
{
"id": 4,
"code_window": [
"\n",
"package log\n",
"\n",
"import \"github.com/cockroachdb/cockroach/pkg/cli/exit\"\n",
"\n",
"// Type of a stderr copy sink.\n",
"type stderrSink struct {\n",
"\t// the --no-color flag. When set it disables escapes code on the\n",
"\t// stderr copy.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"import (\n",
"\t\"github.com/cockroachdb/cockroach/pkg/cli/exit\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/syncutil\"\n",
")\n"
],
"file_path": "pkg/util/log/stderr_sink.go",
"type": "replace",
"edit_start_line_idx": 12
} | load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "colencoding",
srcs = [
"key_encoding.go",
"value_encoding.go",
],
importpath = "github.com/cockroachdb/cockroach/pkg/sql/colencoding",
visibility = ["//visibility:public"],
deps = [
"//pkg/col/coldata",
"//pkg/roachpb",
"//pkg/sql/catalog",
"//pkg/sql/catalog/descpb",
"//pkg/sql/rowenc",
"//pkg/sql/sem/tree",
"//pkg/sql/types",
"//pkg/util",
"//pkg/util/duration",
"//pkg/util/encoding",
"//pkg/util/log",
"//pkg/util/uuid",
"//vendor/github.com/cockroachdb/apd/v2:apd",
"//vendor/github.com/cockroachdb/errors",
],
)
go_test(
name = "colencoding_test",
srcs = ["value_encoding_test.go"],
embed = [":colencoding"],
deps = [
"//pkg/col/coldata",
"//pkg/col/coldataext",
"//pkg/sql/catalog/descpb",
"//pkg/sql/rowenc",
"//pkg/sql/sem/tree",
"//pkg/sql/types",
"//pkg/util/encoding",
"//pkg/util/randutil",
],
)
| pkg/sql/colencoding/BUILD.bazel | 0 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.0006557550514116883,
0.0003627913538366556,
0.00016847306687850505,
0.00028985340031795204,
0.00018734352488536388
] |
{
"id": 4,
"code_window": [
"\n",
"package log\n",
"\n",
"import \"github.com/cockroachdb/cockroach/pkg/cli/exit\"\n",
"\n",
"// Type of a stderr copy sink.\n",
"type stderrSink struct {\n",
"\t// the --no-color flag. When set it disables escapes code on the\n",
"\t// stderr copy.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"import (\n",
"\t\"github.com/cockroachdb/cockroach/pkg/cli/exit\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/util/syncutil\"\n",
")\n"
],
"file_path": "pkg/util/log/stderr_sink.go",
"type": "replace",
"edit_start_line_idx": 12
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colbuilder
import (
"context"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv"
"github.com/cockroachdb/cockroach/pkg/sql/colexec"
"github.com/cockroachdb/cockroach/pkg/sql/colexecbase"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/stretchr/testify/require"
)
// TestNewColOperatorExpectedTypeSchema ensures that NewColOperator call
// creates such an operator chain that its output type schema is exactly as the
// processor spec expects.
func TestNewColOperatorExpectedTypeSchema(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop(ctx)
// We will set up the following chain:
//
// ColBatchScan -> a binary projection operator -> a materializer
//
// such that the scan operator reads INT2 type but is expected to output
// INT4 column, then the projection operator performs a binary operation
// and returns an INT8 column.
//
// The crux of the test is an artificial setup of the table reader spec
// that forces the planning of a cast operator on top of the scan - if such
// doesn't occur, then the binary projection operator will panic because
// it expects an Int32 vector whereas an Int16 vector is provided.
const numRows = 10
sqlutils.CreateTable(
t, sqlDB, "t",
"k INT2 PRIMARY KEY",
numRows,
sqlutils.ToRowFn(sqlutils.RowIdxFn),
)
st := cluster.MakeTestingClusterSettings()
evalCtx := tree.MakeTestingEvalContext(st)
defer evalCtx.Stop(ctx)
txn := kv.NewTxn(ctx, s.DB(), s.NodeID())
flowCtx := &execinfra.FlowCtx{
EvalCtx: &evalCtx,
Cfg: &execinfra.ServerConfig{
Settings: st,
},
Txn: txn,
NodeID: evalCtx.NodeID,
}
streamingMemAcc := evalCtx.Mon.MakeBoundAccount()
defer streamingMemAcc.Close(ctx)
desc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t")
tr := execinfrapb.TableReaderSpec{
Table: *desc.TableDesc(),
Spans: make([]execinfrapb.TableReaderSpan, 1),
NeededColumns: []uint32{0},
}
var err error
tr.Spans[0].Span.Key, err = rowenc.TestingMakePrimaryIndexKey(desc, 0)
if err != nil {
t.Fatal(err)
}
tr.Spans[0].Span.EndKey, err = rowenc.TestingMakePrimaryIndexKey(desc, numRows+1)
if err != nil {
t.Fatal(err)
}
args := &colexec.NewColOperatorArgs{
Spec: &execinfrapb.ProcessorSpec{
Core: execinfrapb.ProcessorCoreUnion{TableReader: &tr},
ResultTypes: []*types.T{types.Int4},
},
StreamingMemAccount: &streamingMemAcc,
}
r, err := NewColOperator(ctx, flowCtx, args)
require.NoError(t, err)
args = &colexec.NewColOperatorArgs{
Spec: &execinfrapb.ProcessorSpec{
Input: []execinfrapb.InputSyncSpec{{ColumnTypes: []*types.T{types.Int4}}},
Core: execinfrapb.ProcessorCoreUnion{Noop: &execinfrapb.NoopCoreSpec{}},
Post: execinfrapb.PostProcessSpec{RenderExprs: []execinfrapb.Expression{{Expr: "@1 - 1"}}},
ResultTypes: []*types.T{types.Int},
},
Inputs: []colexecbase.Operator{r.Op},
StreamingMemAccount: &streamingMemAcc,
}
r, err = NewColOperator(ctx, flowCtx, args)
require.NoError(t, err)
m, err := colexec.NewMaterializer(
flowCtx,
0, /* processorID */
r.Op,
[]*types.T{types.Int},
nil, /* output */
nil, /* metadataSourcesQueue */
nil, /* toClose */
nil, /* execStatsForTrace */
nil, /* cancelFlow */
)
require.NoError(t, err)
m.Start(ctx)
var rowIdx int
for {
row, meta := m.Next()
require.Nil(t, meta)
if row == nil {
break
}
require.Equal(t, 1, len(row))
expected := tree.DInt(rowIdx)
require.True(t, row[0].Datum.Compare(&evalCtx, &expected) == 0)
rowIdx++
}
require.Equal(t, numRows, rowIdx)
}
| pkg/sql/colexec/colbuilder/execplan_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.004401656799018383,
0.0005218301084823906,
0.00016649473400320858,
0.000169955994351767,
0.001057399669662118
] |
{
"id": 5,
"code_window": [
"type stderrSink struct {\n",
"\t// the --no-color flag. When set it disables escapes code on the\n",
"\t// stderr copy.\n",
"\tnoColor bool\n",
"}\n",
"\n",
"// activeAtSeverity implements the logSink interface.\n",
"func (l *stderrSink) active() bool { return true }\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tnoColor syncutil.AtomicBool\n"
],
"file_path": "pkg/util/log/stderr_sink.go",
"type": "replace",
"edit_start_line_idx": 18
} | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package log
import (
"context"
"fmt"
"math"
"strings"
"github.com/cockroachdb/cockroach/pkg/util/log/channel"
"github.com/cockroachdb/cockroach/pkg/util/log/logconfig"
"github.com/cockroachdb/cockroach/pkg/util/log/logflags"
"github.com/cockroachdb/cockroach/pkg/util/log/logpb"
"github.com/cockroachdb/cockroach/pkg/util/log/severity"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/errors"
)
type config struct {
// showLogs reflects the use of -show-logs on the command line and is
// used for testing.
showLogs bool
// syncWrites can be set asynchronously to force all file output to
// synchronize to disk. This is set via SetSync() and used e.g. in
// start.go upon encountering errors.
syncWrites syncutil.AtomicBool
}
var debugLog *loggerT
func init() {
logflags.InitFlags(
&logging.showLogs,
&logging.vmoduleConfig.mu.vmodule,
)
// package is imported but not further initialized.
defaultConfig := logconfig.DefaultConfig()
if err := defaultConfig.Validate(nil /* no default directory */); err != nil {
panic(err)
}
// Default stderrThreshold to log everything to the process'
// external stderr (OrigStderr).
defaultConfig.Sinks.Stderr.Filter = severity.INFO
// We only register it for the DEV channels. No other
// channels get a configuration, whereby every channel
// ends up sharing the DEV logger (debugLog).
defaultConfig.Sinks.Stderr.Channels.Channels = []logpb.Channel{channel.DEV}
// We also don't capture internal writes to fd2 by default:
// let the writes go to the external stderr.
defaultConfig.CaptureFd2.Enable = false
// Since we are letting writes go to the external stderr,
// we cannot keep redaction markers there.
*defaultConfig.Sinks.Stderr.Redactable = false
// Remove all sinks other than stderr.
defaultConfig.Sinks.FileGroups = nil
if _, err := ApplyConfig(defaultConfig); err != nil {
panic(err)
}
// Reset the "active' flag so that the main commands can reset the
// configuration.
logging.mu.active = false
}
// IsActive returns true iff the main logger already has some events
// logged, or some secondary logger was created with configuration
// taken from the main logger.
//
// This is used to assert that configuration is performed
// before logging has been used for the first time.
func IsActive() (active bool, firstUse string) {
logging.mu.Lock()
defer logging.mu.Unlock()
return logging.mu.active, logging.mu.firstUseStack
}
// ApplyConfig applies the given configuration.
//
// The returned cleanup fn can be invoked by the caller to close
// asynchronous processes.
// NB: This is only useful in tests: for a long-running server process the
// cleanup function should likely not be called, to ensure that the
// file used to capture internal fd2 writes remains open up until the
// process entirely terminates. This ensures that any Go runtime
// assertion failures on the way to termination can be properly
// captured.
func ApplyConfig(config logconfig.Config) (cleanupFn func(), err error) {
// Sanity check.
if active, firstUse := IsActive(); active {
panic(errors.Newf("logging already active; first use:\n%s", firstUse))
}
// Our own cancellable context to stop the secondary loggers below.
//
// Note: we don't want to take a cancellable context from the
// caller, because in the usual case we don't want to stop the
// logger when the remainder of the process stops. See the
// discussion on cancel at the top of the function.
secLoggersCtx, secLoggersCancel := context.WithCancel(context.Background())
// secLoggers collects the secondary loggers derived by the configuration.
var secLoggers []*loggerT
// sinkInfos collects the sinkInfos derived by the configuration.
var sinkInfos []*sinkInfo
// fd2CaptureCleanupFn is the cleanup function for the fd2 capture,
// which is populated if fd2 capture is enabled, below.
fd2CaptureCleanupFn := func() {}
// cleanupFn is the returned cleanup function, whose purpose
// is to tear down the work we are doing here.
cleanupFn = func() {
// Reset the logging channels to default.
si := logging.stderrSinkInfoTemplate
logging.setChannelLoggers(make(map[Channel]*loggerT), &si)
fd2CaptureCleanupFn()
secLoggersCancel()
for _, l := range secLoggers {
allLoggers.del(l)
}
for _, l := range sinkInfos {
allSinkInfos.del(l)
}
}
// If capture of internal fd2 writes is enabled, set it up here.
if config.CaptureFd2.Enable {
if logging.testingFd2CaptureLogger != nil {
cleanupFn()
return nil, errors.New("fd2 capture already set up. Maybe use TestLogScope?")
}
// We use a secondary logger, even though no logging *event* will ever
// be logged to it, for the convenience of getting a standard log
// file header at the beginning of the file (which will contain
// a timestamp, command-line arguments, etc.).
secLogger := &loggerT{}
allLoggers.put(secLogger)
secLoggers = append(secLoggers, secLogger)
// A pseudo file sink. Again, for convenience, so we don't need
// to implement separate file management.
bt, bf := true, false
mf := logconfig.ByteSize(math.MaxInt64)
f := logconfig.DefaultFileFormat
fakeConfig := logconfig.FileConfig{
CommonSinkConfig: logconfig.CommonSinkConfig{
Filter: severity.INFO,
Criticality: &bt,
Format: &f,
Redact: &bf,
// Be careful about stripping the redaction markers from log
// entries. The captured fd2 writes are inherently unsafe, so
// we don't want the header entry to give a mistaken
// impression to the entry parser.
Redactable: &bf,
},
Dir: config.CaptureFd2.Dir,
MaxGroupSize: config.CaptureFd2.MaxGroupSize,
MaxFileSize: &mf,
SyncWrites: &bt,
}
fileSinkInfo, fileSink, err := newFileSinkInfo("stderr", fakeConfig)
if err != nil {
cleanupFn()
return nil, err
}
sinkInfos = append(sinkInfos, fileSinkInfo)
allSinkInfos.put(fileSinkInfo)
if fileSink.logFilesCombinedMaxSize > 0 {
// Do a start round of GC, so clear up past accumulated files.
fileSink.gcOldFiles()
// Start the GC process. This ensures that old capture files get
// erased as new files get created.
go fileSink.gcDaemon(secLoggersCtx)
}
// Connect the sink to the logger.
secLogger.sinkInfos = []*sinkInfo{fileSinkInfo}
// Force a log entry. This does two things: it forces the creation
// of a file and it also introduces a timestamp marker.
entry := MakeEntry(secLoggersCtx, severity.INFO, channel.DEV, 0, false,
"stderr capture started")
secLogger.outputLogEntry(entry)
// Now tell this logger to capture internal stderr writes.
if err := fileSink.takeOverInternalStderr(secLogger); err != nil {
// Oof, it turns out we can't use this logger after all. Give up
// on everything we did.
cleanupFn()
return nil, err
}
// Now inform the other functions using stderrLog that we
// have a new logger for it.
logging.testingFd2CaptureLogger = secLogger
fd2CaptureCleanupFn = func() {
// Relinquish the stderr redirect.
if err := secLogger.getFileSink().relinquishInternalStderr(); err != nil {
// This should not fail. If it does, some caller messed up by
// switching over stderr redirection to a different logger
// without our involvement. That's invalid API usage.
panic(err)
}
// Restore the apparent stderr logger used by Shout() and tests.
logging.testingFd2CaptureLogger = nil
// Note: the remainder of the code in cleanupFn() will remove
// the logger and close it. No need to also do it here.
}
}
// Apply the stderr sink configuration.
logging.stderrSink.noColor = config.Sinks.Stderr.NoColor
if err := logging.stderrSinkInfoTemplate.applyConfig(config.Sinks.Stderr.CommonSinkConfig); err != nil {
cleanupFn()
return nil, err
}
// Create the per-channel loggers.
chans := make(map[Channel]*loggerT, len(logpb.Channel_name))
for chi := range logpb.Channel_name {
ch := Channel(chi)
chans[ch] = &loggerT{}
if ch == channel.DEV {
debugLog = chans[ch]
}
}
// Make a copy of the template so that any subsequent config
// changes don't race with logging operations.
stderrSinkInfo := logging.stderrSinkInfoTemplate
// Connect the stderr channels.
for _, ch := range config.Sinks.Stderr.Channels.Channels {
// Note: we connect stderr even if the severity is NONE
// so that tests can raise the severity after configuration.
l := chans[ch]
l.sinkInfos = append(l.sinkInfos, &stderrSinkInfo)
}
// Create the file sinks.
for prefix, fc := range config.Sinks.FileGroups {
if fc.Filter == severity.NONE || fc.Dir == nil {
continue
}
if prefix == "default" {
prefix = ""
}
fileSinkInfo, _, err := newFileSinkInfo(prefix, *fc)
if err != nil {
cleanupFn()
return nil, err
}
sinkInfos = append(sinkInfos, fileSinkInfo)
allSinkInfos.put(fileSinkInfo)
// Connect the channels for this sink.
for _, ch := range fc.Channels.Channels {
l := chans[ch]
l.sinkInfos = append(l.sinkInfos, fileSinkInfo)
}
}
logging.setChannelLoggers(chans, &stderrSinkInfo)
setActive()
return cleanupFn, nil
}
// newFileSinkInfo creates a new fileSink and its accompanying sinkInfo
// from the provided configuration.
func newFileSinkInfo(fileNamePrefix string, c logconfig.FileConfig) (*sinkInfo, *fileSink, error) {
info := &sinkInfo{}
if err := info.applyConfig(c.CommonSinkConfig); err != nil {
return nil, nil, err
}
fileSink := newFileSink(
*c.Dir,
fileNamePrefix,
*c.SyncWrites,
int64(*c.MaxFileSize),
int64(*c.MaxGroupSize),
info.getStartLines)
info.sink = fileSink
return info, fileSink, nil
}
// applyConfig applies a common sink configuration to a sinkInfo.
func (l *sinkInfo) applyConfig(c logconfig.CommonSinkConfig) error {
l.threshold = c.Filter
l.redact = *c.Redact
l.redactable = *c.Redactable
l.editor = getEditor(SelectEditMode(*c.Redact, *c.Redactable))
l.criticality = *c.Criticality
f, ok := formatters[*c.Format]
if !ok {
return errors.Newf("unknown format: %q", *c.Format)
}
l.formatter = f
return nil
}
// describeAppliedConfig reports a sinkInfo's configuration as a
// CommonSinkConfig. Note that the returned config object
// holds into the sinkInfo parameters by reference and thus should
// not be reused if the configuration can change asynchronously.
func (l *sinkInfo) describeAppliedConfig() (c logconfig.CommonSinkConfig) {
c.Filter = l.threshold
c.Redact = &l.redact
c.Redactable = &l.redactable
c.Criticality = &l.criticality
f := l.formatter.formatterName()
c.Format = &f
return c
}
// TestingResetActive clears the active bit. This is for use in tests
// that use stderr redirection alongside other tests that use
// logging.
func TestingResetActive() {
logging.mu.Lock()
defer logging.mu.Unlock()
logging.mu.active = false
}
// DescribeAppliedConfig describes the current setup as effected by
// ApplyConfig(). This is useful in tests and also to check
// when something may be wrong with the logging configuration.
func DescribeAppliedConfig() string {
var config logconfig.Config
// Describe the fd2 capture, if installed.
if logging.testingFd2CaptureLogger != nil {
config.CaptureFd2.Enable = true
fs := logging.testingFd2CaptureLogger.sinkInfos[0].sink.(*fileSink)
fs.mu.Lock()
dir := fs.mu.logDir
fs.mu.Unlock()
config.CaptureFd2.Dir = &dir
m := logconfig.ByteSize(fs.logFilesCombinedMaxSize)
config.CaptureFd2.MaxGroupSize = &m
}
// Describe the stderr sink.
config.Sinks.Stderr.NoColor = logging.stderrSink.noColor
config.Sinks.Stderr.CommonSinkConfig = logging.stderrSinkInfoTemplate.describeAppliedConfig()
describeConnections := func(l *loggerT, ch Channel,
target *sinkInfo, list *logconfig.ChannelList) {
for _, s := range l.sinkInfos {
if s == target {
list.Channels = append(list.Channels, ch)
}
}
list.Sort()
}
// Describe the connections to the stderr sink.
logging.rmu.RLock()
chans := logging.rmu.channels
stderrSinkInfo := logging.rmu.currentStderrSinkInfo
logging.rmu.RUnlock()
for ch, logger := range chans {
describeConnections(logger, ch,
stderrSinkInfo, &config.Sinks.Stderr.Channels)
}
// Describe the file sinks.
config.Sinks.FileGroups = make(map[string]*logconfig.FileConfig)
_ = allSinkInfos.iter(func(l *sinkInfo) error {
if cl := logging.testingFd2CaptureLogger; cl != nil && cl.sinkInfos[0] == l {
// Not a real sink. Omit.
return nil
}
fileSink, ok := l.sink.(*fileSink)
if !ok {
return nil
}
fc := &logconfig.FileConfig{}
fc.CommonSinkConfig = l.describeAppliedConfig()
mf := logconfig.ByteSize(fileSink.logFileMaxSize)
fc.MaxFileSize = &mf
mg := logconfig.ByteSize(fileSink.logFilesCombinedMaxSize)
fc.MaxGroupSize = &mg
fileSink.mu.Lock()
dir := fileSink.mu.logDir
fileSink.mu.Unlock()
fc.Dir = &dir
fc.SyncWrites = &fileSink.syncWrites
// Describe the connections to this file sink.
for ch, logger := range chans {
describeConnections(logger, ch, l, &fc.Channels)
}
prefix := strings.TrimPrefix(fileSink.prefix, program)
if prefix == "" {
prefix = "default"
} else {
prefix = strings.TrimPrefix(prefix, "-")
}
if prev, ok := config.Sinks.FileGroups[prefix]; ok {
fmt.Fprintf(OrigStderr,
"warning: multiple file loggers with prefix %q, previous: %+v\n",
prefix, prev)
}
config.Sinks.FileGroups[prefix] = fc
return nil
})
// Note: we cannot return 'config' directly, because this captures
// certain variables from the loggers by reference and thus could be
// invalidated by concurrent uses of ApplyConfig().
return config.String()
}
| pkg/util/log/flags.go | 1 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.978821873664856,
0.0960698351264,
0.00015735611668787897,
0.0010961834341287613,
0.26332327723503113
] |
{
"id": 5,
"code_window": [
"type stderrSink struct {\n",
"\t// the --no-color flag. When set it disables escapes code on the\n",
"\t// stderr copy.\n",
"\tnoColor bool\n",
"}\n",
"\n",
"// activeAtSeverity implements the logSink interface.\n",
"func (l *stderrSink) active() bool { return true }\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tnoColor syncutil.AtomicBool\n"
],
"file_path": "pkg/util/log/stderr_sink.go",
"type": "replace",
"edit_start_line_idx": 18
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package faker
import (
"fmt"
"strconv"
"golang.org/x/exp/rand"
)
type addressFaker struct {
streetAddress *weightedEntries
streetSuffix *weightedEntries
name nameFaker
}
// StreetAddress returns a random en_US street address.
func (f *addressFaker) StreetAddress(rng *rand.Rand) string {
return f.streetAddress.Rand(rng).(func(rng *rand.Rand) string)(rng)
}
func (f *addressFaker) buildingNumber(rng *rand.Rand) string {
return strconv.Itoa(randInt(rng, 1000, 99999))
}
func (f *addressFaker) streetName(rng *rand.Rand) string {
return fmt.Sprintf(`%s %s`, f.firstOrLastName(rng), f.streetSuffix.Rand(rng))
}
func (f *addressFaker) firstOrLastName(rng *rand.Rand) string {
switch rng.Intn(3) {
case 0:
return f.name.firstNameFemale.Rand(rng).(string)
case 1:
return f.name.firstNameMale.Rand(rng).(string)
case 2:
return f.name.lastName.Rand(rng).(string)
}
panic(`unreachable`)
}
func secondaryAddress(rng *rand.Rand) string {
switch rng.Intn(2) {
case 0:
return fmt.Sprintf(`Apt. %d`, rng.Intn(100))
case 1:
return fmt.Sprintf(`Suite %d`, rng.Intn(100))
}
panic(`unreachable`)
}
func newAddressFaker(name nameFaker) addressFaker {
f := addressFaker{name: name}
f.streetSuffix = streetSuffix()
f.streetAddress = makeWeightedEntries(
func(rng *rand.Rand) string {
return fmt.Sprintf(`%s %s`, f.buildingNumber(rng), f.streetName(rng))
}, 0.5,
func(rng *rand.Rand) string {
return fmt.Sprintf(`%s %s %s`,
f.buildingNumber(rng), f.streetName(rng), secondaryAddress(rng))
}, 0.5,
)
return f
}
| pkg/workload/faker/address.go | 0 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.00017923804989550263,
0.00017242724425159395,
0.0001660576235735789,
0.00017208285862579942,
0.000005345879799278919
] |
{
"id": 5,
"code_window": [
"type stderrSink struct {\n",
"\t// the --no-color flag. When set it disables escapes code on the\n",
"\t// stderr copy.\n",
"\tnoColor bool\n",
"}\n",
"\n",
"// activeAtSeverity implements the logSink interface.\n",
"func (l *stderrSink) active() bool { return true }\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tnoColor syncutil.AtomicBool\n"
],
"file_path": "pkg/util/log/stderr_sink.go",
"type": "replace",
"edit_start_line_idx": 18
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sqltelemetry
import (
"context"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/log/logcrash"
"github.com/cockroachdb/errors"
)
// RecordError processes a SQL error. This includes both incrementing
// telemetry counters, and sending a sentry report for internal
// (assertion) errors.
func RecordError(ctx context.Context, err error, sv *settings.Values) {
// In any case, record the counters.
telemetry.RecordError(err)
code := pgerror.GetPGCode(err)
switch {
case code == pgcode.Uncategorized:
// For compatibility with 19.1 telemetry, keep track of the number
// of occurrences of errors without code in telemetry. Over time,
// we'll want this count to go down (i.e. more errors becoming
// qualified with a code).
//
// TODO(knz): figure out if this telemetry is still useful.
telemetry.Inc(UncategorizedErrorCounter)
case code == pgcode.Internal || errors.HasAssertionFailure(err):
// This is an assertion failure / crash.
//
// Note: not all assertion failures end up with code "internal".
// For example, an assertion failure "underneath" a schema change
// failure during a COMMIT for a multi-stmt txn will mask the
// internal code and replace it with
// TransactionCommittedWithSchemaChangeFailure.
//
// Conversely, not all errors with code "internal" are assertion
// failures, but we still want to log/register them.
// We want to log the internal error regardless of whether a
// report is sent to sentry below.
log.Errorf(ctx, "encountered internal error:\n%+v", err)
if logcrash.ShouldSendReport(sv) {
event, extraDetails := errors.BuildSentryReport(err)
logcrash.SendReport(ctx, logcrash.ReportTypeError, event, extraDetails)
}
}
}
| pkg/sql/sqltelemetry/report.go | 0 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.0004892052384093404,
0.00021619464678224176,
0.00016781890008132905,
0.00016987044364213943,
0.00011151550279464573
] |
{
"id": 5,
"code_window": [
"type stderrSink struct {\n",
"\t// the --no-color flag. When set it disables escapes code on the\n",
"\t// stderr copy.\n",
"\tnoColor bool\n",
"}\n",
"\n",
"// activeAtSeverity implements the logSink interface.\n",
"func (l *stderrSink) active() bool { return true }\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tnoColor syncutil.AtomicBool\n"
],
"file_path": "pkg/util/log/stderr_sink.go",
"type": "replace",
"edit_start_line_idx": 18
} | ["\u0060\u012a\u12AB"] | pkg/util/json/testdata/raw/string_1_2_3_bytes_UTF-8_sequences.json | 0 | https://github.com/cockroachdb/cockroach/commit/40f01b9e83cf28097a345df3eb564d3521884157 | [
0.0001736129925120622,
0.0001736129925120622,
0.0001736129925120622,
0.0001736129925120622,
0
] |
{
"id": 0,
"code_window": [
"\n",
"import (\n",
"\t\"context\"\n",
"\t\"runtime\"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t\"os\"\n"
],
"file_path": "pkg/cmd/roachtest/tests/fixtures.go",
"type": "add",
"edit_start_line_idx": 14
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tests
import (
"context"
gosql "database/sql"
"fmt"
"math/rand"
"path/filepath"
"runtime"
"time"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/option"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil/clusterupgrade"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil/mixedversion"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/roachprod/install"
"github.com/cockroachdb/cockroach/pkg/roachprod/logger"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/version"
"github.com/stretchr/testify/require"
)
type versionFeatureTest struct {
name string
statement string
}
// Feature tests that are invoked in mixed-version state during the
// upgrade test. A gotcha is that these feature tests are also
// invoked when the cluster is in the middle of upgrading -- i.e. a
// state where the cluster version has already been bumped, but not
// all nodes are aware). This should be considered a feature of this
// test, and feature tests that flake because of it need to be fixed.
var versionUpgradeTestFeatures = []versionFeatureTest{
// NB: the next four tests are ancient and supported since v2.0.
{
name: "ObjectAccess",
statement: `
-- We should be able to successfully select from objects created in ancient
-- versions of CRDB using their FQNs. Prevents bugs such as #43141, where
-- databases created before a migration were inaccessible after the
-- migration.
--
-- NB: the data has been baked into the fixtures. Originally created via:
-- create database persistent_db
-- create table persistent_db.persistent_table(a int)"))
-- on CRDB v1.0
select * from persistent_db.persistent_table;
show tables from persistent_db;
`,
},
{
name: "JSONB",
statement: `
CREATE DATABASE IF NOT EXISTS test;
CREATE TABLE test.t (j JSONB);
DROP TABLE test.t;
`,
},
{
name: "Sequences",
statement: `
CREATE DATABASE IF NOT EXISTS test;
CREATE SEQUENCE test.test_sequence;
DROP SEQUENCE test.test_sequence;
`,
},
{
name: "Computed Columns",
statement: `
CREATE DATABASE IF NOT EXISTS test;
CREATE TABLE test.t (x INT AS (3) STORED);
DROP TABLE test.t;
`,
},
{
name: "Split and Merge Ranges",
statement: `
CREATE DATABASE IF NOT EXISTS splitmerge;
CREATE TABLE splitmerge.t (k INT PRIMARY KEY);
ALTER TABLE splitmerge.t SPLIT AT VALUES (1), (2), (3);
ALTER TABLE splitmerge.t UNSPLIT AT VALUES (1), (2), (3);
DROP TABLE splitmerge.t;
`,
},
}
func runVersionUpgrade(ctx context.Context, t test.Test, c cluster.Cluster) {
if c.IsLocal() && runtime.GOARCH == "arm64" {
t.Skip("Skip under ARM64. See https://github.com/cockroachdb/cockroach/issues/89268")
}
c.Put(ctx, t.DeprecatedWorkload(), "./workload", c.All())
mvt := mixedversion.NewTest(ctx, t, t.L(), c, c.All())
mvt.OnStartup("setup schema changer workload", func(ctx context.Context, l *logger.Logger, r *rand.Rand, helper *mixedversion.Helper) error {
// Execute the workload init.
return c.RunE(ctx, c.All(), "./workload init schemachange")
})
mvt.InMixedVersion("run backup", func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error {
// Verify that backups can be created in various configurations. This is
// important to test because changes in system tables might cause backups to
// fail in mixed-version clusters.
dest := fmt.Sprintf("nodelocal://1/%d", timeutil.Now().UnixNano())
return h.Exec(rng, `BACKUP TO $1`, dest)
})
mvt.InMixedVersion(
"test features",
func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error {
for _, featureTest := range versionUpgradeTestFeatures {
l.Printf("running feature test %q", featureTest.name)
if err := h.Exec(rng, featureTest.statement); err != nil {
l.Printf("%q: ERROR (%s)", featureTest.name, err)
return err
}
l.Printf("%q: OK", featureTest.name)
}
return nil
},
)
mvt.InMixedVersion(
"test schema change step",
func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error {
l.Printf("running schema workload step")
runCmd := roachtestutil.NewCommand("./workload run schemachange").Flag("verbose", 1).Flag("max-ops", 10).Flag("concurrency", 2).Arg("{pgurl:1-%d}", len(c.All()))
randomNode := h.RandomNode(rng, c.All())
return c.RunE(ctx, option.NodeListOption{randomNode}, runCmd.String())
},
)
mvt.AfterUpgradeFinalized(
"check if GC TTL is pinned",
func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error {
// TODO(irfansharif): This can be removed when the predecessor version
// in this test is v23.1, where the default is 4h. This test was only to
// make sure that existing clusters that upgrade to 23.1 retained their
// existing GC TTL.
l.Printf("checking if GC TTL is pinned to 24h")
var ttlSeconds int
query := `
SELECT
(crdb_internal.pb_to_json('cockroach.config.zonepb.ZoneConfig', raw_config_protobuf)->'gc'->'ttlSeconds')::INT
FROM crdb_internal.zones
WHERE target = 'RANGE default'
LIMIT 1
`
if err := h.QueryRow(rng, query).Scan(&ttlSeconds); err != nil {
return fmt.Errorf("error querying GC TTL: %w", err)
}
expectedTTL := 24 * 60 * 60 // NB: 24h is what's used in the fixture
if ttlSeconds != expectedTTL {
return fmt.Errorf("unexpected GC TTL: actual (%d) != expected (%d)", ttlSeconds, expectedTTL)
}
return nil
},
)
mvt.Run()
}
func (u *versionUpgradeTest) run(ctx context.Context, t test.Test) {
defer func() {
for _, db := range u.conns {
_ = db.Close()
}
}()
for i, step := range u.steps {
if step != nil {
t.Status(fmt.Sprintf("versionUpgradeTest: starting step %d", i+1))
step(ctx, t, u)
}
}
}
type versionUpgradeTest struct {
goOS string
c cluster.Cluster
steps []versionStep
// Cache conns because opening one takes hundreds of ms, and we do it quite
// a lot.
conns []*gosql.DB
}
func newVersionUpgradeTest(c cluster.Cluster, steps ...versionStep) *versionUpgradeTest {
return &versionUpgradeTest{
goOS: ifLocal(c, runtime.GOOS, "linux"),
c: c,
steps: steps,
}
}
// Return a cached conn to the given node. Don't call .Close(), the test harness
// will do it.
func (u *versionUpgradeTest) conn(ctx context.Context, t test.Test, i int) *gosql.DB {
if u.conns == nil {
for _, i := range u.c.All() {
u.conns = append(u.conns, u.c.Conn(ctx, t.L(), i))
}
}
db := u.conns[i-1]
// Run a trivial query to shake out errors that can occur when the server has
// restarted in the meantime.
_ = db.PingContext(ctx)
return db
}
// uploadVersion is a thin wrapper around
// `clusterupgrade.UploadVersion` that calls t.Fatal if that call
// returns an error
func uploadVersion(
ctx context.Context,
t test.Test,
c cluster.Cluster,
nodes option.NodeListOption,
newVersion string,
) string {
path, err := clusterupgrade.UploadVersion(ctx, t, t.L(), c, nodes, newVersion)
if err != nil {
t.Fatal(err)
}
return path
}
// upgradeNodes is a thin wrapper around
// `clusterupgrade.RestartNodesWithNewBinary` that calls t.Fatal if
// that call returns an errror.
func upgradeNodes(
ctx context.Context,
t test.Test,
c cluster.Cluster,
nodes option.NodeListOption,
startOpts option.StartOpts,
newVersion string,
) {
if err := clusterupgrade.RestartNodesWithNewBinary(
ctx, t, t.L(), c, nodes, startOpts, newVersion,
); err != nil {
t.Fatal(err)
}
}
func (u *versionUpgradeTest) binaryVersion(
ctx context.Context, t test.Test, i int,
) roachpb.Version {
db := u.conn(ctx, t, i)
v, err := clusterupgrade.BinaryVersion(db)
if err != nil {
t.Fatal(err)
}
return v
}
// versionStep is an isolated version migration on a running cluster.
type versionStep func(ctx context.Context, t test.Test, u *versionUpgradeTest)
func uploadAndStartFromCheckpointFixture(nodes option.NodeListOption, v string) versionStep {
return func(ctx context.Context, t test.Test, u *versionUpgradeTest) {
if err := clusterupgrade.InstallFixtures(ctx, t.L(), u.c, nodes, v); err != nil {
t.Fatal(err)
}
binary := uploadVersion(ctx, t, u.c, nodes, v)
startOpts := option.DefaultStartOpts()
// NB: can't start sequentially since cluster already bootstrapped.
startOpts.RoachprodOpts.Sequential = false
clusterupgrade.StartWithBinary(ctx, t.L(), u.c, nodes, binary, startOpts)
}
}
func uploadAndStart(nodes option.NodeListOption, v string) versionStep {
return func(ctx context.Context, t test.Test, u *versionUpgradeTest) {
binary := uploadVersion(ctx, t, u.c, nodes, v)
startOpts := option.DefaultStartOpts()
// NB: can't start sequentially since cluster already bootstrapped.
startOpts.RoachprodOpts.Sequential = false
clusterupgrade.StartWithBinary(ctx, t.L(), u.c, nodes, binary, startOpts)
}
}
// binaryUpgradeStep rolling-restarts the given nodes into the new binary
// version. Note that this does *not* wait for the cluster version to upgrade.
// Use a waitForUpgradeStep() for that.
func binaryUpgradeStep(nodes option.NodeListOption, newVersion string) versionStep {
return func(ctx context.Context, t test.Test, u *versionUpgradeTest) {
if err := clusterupgrade.RestartNodesWithNewBinary(
ctx, t, t.L(), u.c, nodes, option.DefaultStartOpts(), newVersion,
); err != nil {
t.Fatal(err)
}
}
}
func preventAutoUpgradeStep(node int) versionStep {
return func(ctx context.Context, t test.Test, u *versionUpgradeTest) {
db := u.conn(ctx, t, node)
_, err := db.ExecContext(ctx, `SET CLUSTER SETTING cluster.preserve_downgrade_option = $1`, u.binaryVersion(ctx, t, node).String())
if err != nil {
t.Fatal(err)
}
}
}
func allowAutoUpgradeStep(node int) versionStep {
return func(ctx context.Context, t test.Test, u *versionUpgradeTest) {
db := u.conn(ctx, t, node)
_, err := db.ExecContext(ctx, `RESET CLUSTER SETTING cluster.preserve_downgrade_option`)
if err != nil {
t.Fatal(err)
}
}
}
// NB: this is intentionally kept separate from binaryUpgradeStep because we run
// feature tests between the steps, and we want to expose them (at least
// heuristically) to the real-world situation in which some nodes have already
// learned of a cluster version bump (from Gossip) where others haven't. This
// situation tends to exhibit unexpected behavior.
func waitForUpgradeStep(nodes option.NodeListOption) versionStep {
return func(ctx context.Context, t test.Test, u *versionUpgradeTest) {
dbFunc := func(node int) *gosql.DB { return u.conn(ctx, t, node) }
if err := clusterupgrade.WaitForClusterUpgrade(ctx, t.L(), nodes, dbFunc); err != nil {
t.Fatal(err)
}
}
}
// makeVersionFixtureAndFatal creates fixtures from which we can test
// mixed-version clusters (i.e. version X mixing with X-1). The fixtures date
// back all the way to v1.0; when development begins on version X, we make a
// fixture for version X-1 by running a starting the version X-2 cluster from
// the X-2 fixtures, upgrading it to version X-1, and copy the resulting store
// directories to the log directories (which are part of the artifacts). The
// test will then fail on purpose when it's done with instructions on where to
// move the files.
func makeVersionFixtureAndFatal(
ctx context.Context, t test.Test, c cluster.Cluster, makeFixtureVersion string,
) {
var useLocalBinary bool
if makeFixtureVersion == "" {
c.Start(ctx, t.L(), option.DefaultStartOpts(), install.MakeClusterSettings(), c.Node(1))
require.NoError(t, c.Conn(ctx, t.L(), 1).QueryRowContext(
ctx,
`select regexp_extract(value, '^v([0-9]+\.[0-9]+\.[0-9]+)') from crdb_internal.node_build_info where field = 'Version';`,
).Scan(&makeFixtureVersion))
c.Wipe(ctx, c.Node(1))
useLocalBinary = true
}
predecessorVersion, err := version.PredecessorVersion(*version.MustParse("v" + makeFixtureVersion))
if err != nil {
t.Fatal(err)
}
t.L().Printf("making fixture for %s (starting at %s)", makeFixtureVersion, predecessorVersion)
if useLocalBinary {
// Make steps below use the main cockroach binary (in particular, don't try
// to download the released version for makeFixtureVersion which may not yet
// exist)
makeFixtureVersion = ""
}
newVersionUpgradeTest(c,
// Start the cluster from a fixture. That fixture's cluster version may
// be at the predecessor version (though in practice it's fully up to
// date, if it was created via the checkpointer above), so add a
// waitForUpgradeStep to make sure we're upgraded all the way before
// moving on.
//
// See the comment on createCheckpoints for details on fixtures.
uploadAndStartFromCheckpointFixture(c.All(), predecessorVersion),
waitForUpgradeStep(c.All()),
// NB: at this point, cluster and binary version equal predecessorVersion,
// and auto-upgrades are on.
binaryUpgradeStep(c.All(), makeFixtureVersion),
waitForUpgradeStep(c.All()),
func(ctx context.Context, t test.Test, u *versionUpgradeTest) {
// If we're taking checkpoints, momentarily stop the cluster (we
// need to do that to get the checkpoints to reflect a
// consistent cluster state). The binary at this point will be
// the new one, but the cluster version was not explicitly
// bumped, though auto-update may have taken place already.
// For example, if newVersion is 2.1, the cluster version in
// the store directories may be 2.0 on some stores and 2.1 on
// the others (though if any are on 2.1, then that's what's
// stored in system.settings).
// This means that when we restart from that version, we're
// going to want to use the binary mentioned in the checkpoint,
// or at least one compatible with the *predecessor* of the
// checkpoint version. For example, for checkpoint-2.1, the
// cluster version might be 2.0, so we can only use the 2.0 or
// 2.1 binary, but not the 19.1 binary (as 19.1 and 2.0 are not
// compatible).
name := clusterupgrade.CheckpointName(u.binaryVersion(ctx, t, 1).String())
u.c.Stop(ctx, t.L(), option.DefaultStopOpts(), c.All())
binaryPath := clusterupgrade.BinaryPathFromVersion(makeFixtureVersion)
c.Run(ctx, c.All(), binaryPath, "debug", "pebble", "db", "checkpoint",
"{store-dir}", "{store-dir}/"+name)
// The `cluster-bootstrapped` marker can already be found within
// store-dir, but the rocksdb checkpoint step above does not pick it
// up as it isn't recognized by RocksDB. We copy the marker
// manually, it's necessary for roachprod created clusters. See
// #54761.
c.Run(ctx, c.Node(1), "cp", "{store-dir}/cluster-bootstrapped", "{store-dir}/"+name)
// Similar to the above - newer versions require the min version file to open a store.
c.Run(ctx, c.Node(1), "cp", fmt.Sprintf("{store-dir}/%s", storage.MinVersionFilename), "{store-dir}/"+name)
c.Run(ctx, c.All(), "tar", "-C", "{store-dir}/"+name, "-czf", "{log-dir}/"+name+".tgz", ".")
t.Fatalf(`successfully created checkpoints; failing test on purpose.
Invoke the following to move the archives to the right place and commit the
result:
for i in 1 2 3 4; do
mkdir -p pkg/cmd/roachtest/fixtures/${i} && \
mv artifacts/generate-fixtures/run_1/logs/${i}.unredacted/checkpoint-*.tgz \
pkg/cmd/roachtest/fixtures/${i}/
done
`)
}).run(ctx, t)
}
// importTPCCStep runs a TPCC import import on the first crdbNode (monitoring them all for
// crashes during the import). If oldV is nil, this runs the import using the specified
// version (for example "19.2.1", as provided by PredecessorVersion()) using the location
// used by c.Stage(). An empty oldV uses the main cockroach binary.
func importTPCCStep(
oldV string, headroomWarehouses int, crdbNodes option.NodeListOption,
) versionStep {
return func(ctx context.Context, t test.Test, u *versionUpgradeTest) {
// We need to use the predecessor binary to load into the
// predecessor cluster to avoid random breakage. For example, you
// can't use 21.1 to import into 20.2 due to some flag changes.
//
// TODO(tbg): also import a large dataset (for example 2TB bank)
// that will provide cold data that may need to be migrated.
var cmd string
if oldV == "" {
cmd = tpccImportCmd(headroomWarehouses)
} else {
cmd = tpccImportCmdWithCockroachBinary(filepath.Join("v"+oldV, "cockroach"), headroomWarehouses, "--checks=false")
}
// Use a monitor so that we fail cleanly if the cluster crashes
// during import.
m := u.c.NewMonitor(ctx, crdbNodes)
m.Go(func(ctx context.Context) error {
return u.c.RunE(ctx, u.c.Node(crdbNodes[0]), cmd)
})
m.Wait()
}
}
func importLargeBankStep(oldV string, rows int, crdbNodes option.NodeListOption) versionStep {
return func(ctx context.Context, t test.Test, u *versionUpgradeTest) {
// Use the predecessor binary to load into the predecessor
// cluster to avoid random breakage due to flag changes, etc.
binary := "./cockroach"
if oldV != "" {
binary = filepath.Join("v"+oldV, "cockroach")
}
// Use a monitor so that we fail cleanly if the cluster crashes
// during import.
m := u.c.NewMonitor(ctx, crdbNodes)
m.Go(func(ctx context.Context) error {
return u.c.RunE(ctx, u.c.Node(crdbNodes[0]), binary, "workload", "fixtures", "import", "bank",
"--payload-bytes=10240", "--rows="+fmt.Sprint(rows), "--seed=4", "--db=bigbank")
})
m.Wait()
}
}
func sleepStep(d time.Duration) versionStep {
return func(ctx context.Context, t test.Test, u *versionUpgradeTest) {
time.Sleep(d)
}
}
| pkg/cmd/roachtest/tests/versionupgrade.go | 1 | https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298 | [
0.5817038416862488,
0.014725202694535255,
0.0001668058685027063,
0.0002993546659126878,
0.08158999681472778
] |
{
"id": 0,
"code_window": [
"\n",
"import (\n",
"\t\"context\"\n",
"\t\"runtime\"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t\"os\"\n"
],
"file_path": "pkg/cmd/roachtest/tests/fixtures.go",
"type": "add",
"edit_start_line_idx": 14
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package rttanalysis
import (
"encoding/csv"
"flag"
"os"
"os/exec"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"testing"
"github.com/cockroachdb/cockroach/pkg/testutils/datapathutils"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/quotapool"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/system"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
)
type benchmarkResult struct {
name string
result int
}
type benchmarkExpectation struct {
name string
// min and max are the expected number KV round-trips that should be performed
// in this benchmark.
min, max int
}
const expectationsFilename = "benchmark_expectations"
var expectationsHeader = []string{"exp", "benchmark"}
var (
rewriteFlag = flag.Bool("rewrite", false,
"if non-empty, a regexp of benchmarks to rewrite")
rewriteIterations = flag.Int("rewrite-iterations", 50,
"if re-writing, the number of times to execute each benchmark to "+
"determine the range of possible values")
allowOffByOne = flag.Bool("allow-off-by-one", true,
"if set, expectations that are not a range get a ±1 tolerance")
)
// RunBenchmarkExpectationTests runs tests to validate or rewrite the contents
// of the benchmark expectations file.
func runBenchmarkExpectationTests(t *testing.T, r *Registry) {
if util.IsMetamorphicBuild() {
execTestSubprocess(t)
return
}
// Only create the scope after we've checked if we need to exec the subprocess.
scope := log.Scope(t)
defer scope.Close(t)
defer func() {
if t.Failed() {
t.Log("see the --rewrite flag to re-run the benchmarks and adjust the expectations")
}
}()
var results resultSet
var wg sync.WaitGroup
concurrency := ((system.NumCPU() - 1) / r.numNodes) + 1 // arbitrary
limiter := quotapool.NewIntPool("rttanalysis", uint64(concurrency))
isRewrite := *rewriteFlag
for b, cases := range r.r {
wg.Add(1)
go func(b string, cases []RoundTripBenchTestCase) {
defer wg.Done()
t.Run(b, func(t *testing.T) {
runs := 1
if isRewrite {
runs = *rewriteIterations
}
runRoundTripBenchmarkTest(t, scope, &results, cases, r.cc, runs, limiter)
})
}(b, cases)
}
wg.Wait()
if isRewrite {
writeExpectationsFile(t,
mergeExpectations(
readExpectationsFile(t),
resultsToExpectations(t, results.toSlice()),
))
} else {
checkResults(t, &results, readExpectationsFile(t))
}
}
func checkResults(t *testing.T, results *resultSet, expectations benchmarkExpectations) {
results.iterate(func(r benchmarkResult) {
exp, ok := expectations.find(r.name)
if !ok {
t.Logf("no expectation for benchmark %s, got %d", r.name, r.result)
return
}
if !exp.matches(r.result) {
t.Errorf("fail: expected %s to perform KV lookups in [%d, %d], got %d",
r.name, exp.min, exp.max, r.result)
} else {
t.Logf("success: expected %s to perform KV lookups in [%d, %d], got %d",
r.name, exp.min, exp.max, r.result)
}
})
}
func mergeExpectations(existing, new benchmarkExpectations) (merged benchmarkExpectations) {
sort.Sort(existing)
sort.Sort(new)
pop := func(be *benchmarkExpectations) (ret benchmarkExpectation) {
ret = (*be)[0]
*be = (*be)[1:]
return ret
}
for len(existing) > 0 && len(new) > 0 {
switch {
case existing[0].name < new[0].name:
merged = append(merged, pop(&existing))
case existing[0].name > new[0].name:
merged = append(merged, pop(&new))
default:
pop(&existing) // discard the existing value if they are equal
merged = append(merged, pop(&new))
}
}
// Only one of existing or new will be non-empty.
merged = append(append(merged, new...), existing...)
return merged
}
// execTestSubprocess execs the testing binary with all the same flags in order
// to run it without metamorphic testing enabled. Metamorphic testing messes
// with the benchmark results. It's particularly important to do this as we
// always run with metamorphic testing enabled in CI.
func execTestSubprocess(t *testing.T) {
var args []string
flag.CommandLine.Visit(func(f *flag.Flag) {
vs := f.Value.String()
switch f.Name {
case "test.run":
// Only run the current outermost test in the subprocess.
prefix := "^" + regexp.QuoteMeta(t.Name()) + "$"
if idx := strings.Index(vs, "/"); idx >= 0 {
vs = prefix + vs[idx:]
} else {
vs = prefix
}
case "test.bench":
// Omit the benchmark flags, we'll add a flag below to disable
// benchmarks. Consider the below command. We don't want to
// run the benchmarks again in this subprocess. We only want
// to run exactly this one test.
//
// go test --run Expectations --bench .
//
return
}
args = append(args, "--"+f.Name+"="+vs)
})
args = append(args, "--test.bench=^$") // disable benchmarks
args = append(args, flag.CommandLine.Args()...)
cmd := exec.Command(os.Args[0], args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, util.DisableMetamorphicEnvVar+"=t")
t.Log(cmd.Args)
if err := cmd.Run(); err != nil {
t.FailNow()
}
}
type resultSet struct {
mu struct {
syncutil.Mutex
results []benchmarkResult
}
}
func (s *resultSet) add(result benchmarkResult) {
s.mu.Lock()
defer s.mu.Unlock()
s.mu.results = append(s.mu.results, result)
}
func (s *resultSet) iterate(f func(res benchmarkResult)) {
s.mu.Lock()
defer s.mu.Unlock()
for _, res := range s.mu.results {
f(res)
}
}
func (s *resultSet) toSlice() (res []benchmarkResult) {
s.iterate(func(result benchmarkResult) {
res = append(res, result)
})
return res
}
func resultsToExpectations(t *testing.T, results []benchmarkResult) benchmarkExpectations {
sort.Slice(results, func(i, j int) bool {
return results[i].name < results[j].name
})
var res benchmarkExpectations
var cur benchmarkExpectation
for _, result := range results {
if result.name != cur.name {
if cur != (benchmarkExpectation{}) {
res = append(res, cur)
cur = benchmarkExpectation{}
}
cur = benchmarkExpectation{
name: result.name,
min: result.result,
max: result.result,
}
}
if result.result < cur.min {
cur.min = result.result
}
if result.result > cur.max {
cur.max = result.result
}
}
if cur != (benchmarkExpectation{}) {
res = append(res, cur)
}
// Verify there aren't any duplicates.
for i := 1; i < len(res); i++ {
if res[i-1].name == res[i].name {
t.Fatalf("duplicate expectations for Name %s", res[i].name)
}
}
return res
}
func writeExpectationsFile(t *testing.T, expectations benchmarkExpectations) {
f, err := os.Create(datapathutils.TestDataPath(t, expectationsFilename))
require.NoError(t, err)
defer func() { require.NoError(t, f.Close()) }()
w := csv.NewWriter(f)
w.Comma = ','
require.NoError(t, w.Write(expectationsHeader))
for _, exp := range expectations {
require.NoError(t, w.Write([]string{exp.String(), exp.name}))
}
w.Flush()
require.NoError(t, w.Error())
}
func readExpectationsFile(t testing.TB) benchmarkExpectations {
f, err := os.Open(datapathutils.TestDataPath(t, expectationsFilename))
require.NoError(t, err)
defer func() { _ = f.Close() }()
r := csv.NewReader(f)
r.Comma = ','
records, err := r.ReadAll()
require.NoError(t, err)
require.GreaterOrEqual(t, len(records), 1, "must have at least a header")
require.Equal(t, expectationsHeader, records[0])
records = records[1:] // strip header
ret := make(benchmarkExpectations, len(records))
parseExp := func(expStr string) (min, max int, err error) {
split := strings.Split(expStr, "-")
if len(split) > 2 {
return 0, 0, errors.Errorf("expected <min>-<max>, got %q", expStr)
}
min, err = strconv.Atoi(split[0])
if err != nil {
return 0, 0, err
}
if len(split) == 1 {
max = min
return min, max, err
}
max, err = strconv.Atoi(split[1])
return min, max, err
}
for i, r := range records {
min, max, err := parseExp(r[0])
require.NoErrorf(t, err, "line %d", i+1)
ret[i] = benchmarkExpectation{min: min, max: max, name: r[1]}
}
sort.Sort(ret)
return ret
}
func (b benchmarkExpectations) find(name string) (benchmarkExpectation, bool) {
idx := sort.Search(len(b), func(i int) bool {
return b[i].name >= name
})
if idx < len(b) && b[idx].name == name {
return b[idx], true
}
return benchmarkExpectation{}, false
}
func (e benchmarkExpectation) matches(roundTrips int) bool {
// Does the value fall in the range?
if e.min <= roundTrips && roundTrips <= e.max {
return true
}
// If the expectation isn't a range, it gets a leeway of one because we got
// tired of small indeterminism.
if (e.min == e.max) && *allowOffByOne && (roundTrips == e.min-1 || roundTrips == e.min+1) {
return true
}
return false
}
func (e benchmarkExpectation) String() string {
expStr := strconv.Itoa(e.min)
if e.min != e.max {
expStr += "-"
expStr += strconv.Itoa(e.max)
}
return expStr
}
type benchmarkExpectations []benchmarkExpectation
var _ sort.Interface = (benchmarkExpectations)(nil)
func (b benchmarkExpectations) Len() int { return len(b) }
func (b benchmarkExpectations) Less(i, j int) bool { return b[i].name < b[j].name }
func (b benchmarkExpectations) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
| pkg/bench/rttanalysis/validate_benchmark_data.go | 0 | https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298 | [
0.0007318792049773037,
0.00021065969485789537,
0.0001665815943852067,
0.0001741014129947871,
0.0000986949453363195
] |
{
"id": 0,
"code_window": [
"\n",
"import (\n",
"\t\"context\"\n",
"\t\"runtime\"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t\"os\"\n"
],
"file_path": "pkg/cmd/roachtest/tests/fixtures.go",
"type": "add",
"edit_start_line_idx": 14
} | # Comparisons.
eval
0 = 1
----
false
eval
0 != 1
----
true
eval
0 < 1
----
true
eval
0 <= 1
----
true
eval
0 > 1
----
false
eval
1 > -1
----
true
eval
1 <= -1
----
false
eval
-1 >= 1
----
false
eval
-1 < 1
----
true
eval
0 >= 1
----
false
eval
true = false
----
false
eval
true != false
----
true
eval
true < false
----
false
eval
true <= false
----
false
eval
true > false
----
true
eval
true >= false
----
true
eval
'a' = 'b'
----
false
eval
'a' != 'b'
----
true
eval
'a' < 'b'
----
true
eval
'a' <= 'b'
----
true
eval
'a' > 'b'
----
false
eval
'a' >= 'b'
----
false
eval
'a' >= 'b'
----
false
eval
'10' > '2'
----
false
eval
1.1 = 1.2
----
false
eval
1.1 = 1.1
----
true
eval
1.1 != 1.2
----
true
eval
1.1 < 1.2
----
true
eval
1.1 <= 1.2
----
true
eval
1.1 > 1.2
----
false
eval
1.1 >= 1.2
----
false
eval
1.1::float = 1.2::float
----
false
eval
1.1::float = 1.1::float
----
true
eval
1.1::float != 1.2::float
----
true
eval
1.1::float < 1.2::float
----
true
eval
1.1::float <= 1.2::float
----
true
eval
1.1::float > 1.2::float
----
false
eval
1.1::float >= 1.2::float
----
false
eval
'2015-10-01'::date = '2015-10-02'::date
----
false
eval
'2015-10-01'::date = '2015-10-01'::date
----
true
eval
'2016-07-19 +0:0:0'::date = '2016-07-19'::date
----
true
eval
'2016-7-19 +0:0:0'::date = '2016-07-19'::date
----
true
eval
'2015-10-01'::date != '2015-10-02'::date
----
true
eval
'2015-10-01'::date < '2015-10-02'::date
----
true
eval
'2015-10-01'::date <= '2015-10-02'::date
----
true
eval
'2015-10-01'::date > '2015-10-02'::date
----
false
eval
'2015-10-01'::date >= '2015-10-02'::date
----
false
eval
'12:00:00'::time = '12:00:01'::time
----
false
eval
'12:00:00'::time = '12:00:00'::time
----
true
eval
'12:00:00.000000'::time = '12:00:00'::time
----
true
eval
'12:00:00'::time != '12:00:01'::time
----
true
eval
'12:00:00'::time < '12:00:01'::time
----
true
eval
'12:00:00'::time <= '12:00:01'::time
----
true
eval
'12:00:00'::time > '12:00:01'::time
----
false
eval
'12:00:00'::time >= '12:00:01'::time
----
false
eval
'12:00:00'::time = '12:00:00+0'::timetz
----
true
eval
'12:00:00'::time != '12:00:00+0'::timetz
----
false
eval
'12:00:00'::time < '11:00:00-1'::timetz
----
true
eval
'12:00:00'::time >= '11:00:00-1'::timetz
----
false
eval
'12:00:00'::time > '13:00:00+1'::timetz
----
true
eval
'12:00:00'::time <= '13:00:00+1'::timetz
----
false
eval
'12:00:00-07'::timetz = '12:00:00-08'::timetz
----
false
eval
'12:00:00+09'::timetz = '12:00:00+09'::timetz
----
true
eval
'12:00:00+01'::timetz != '12:00:00-01'::timetz
----
true
eval
'12:00:00+01'::timetz < '12:00:00-01'::timetz
----
true
eval
'12:00:00+01'::timetz <= '12:00:00-01'::timetz
----
true
eval
'12:00:00+10'::timetz > '12:00:00+09'::timetz
----
false
eval
'12:00:00+10'::timetz >= '12:00:00+09'::timetz
----
false
eval
'2015-10-01'::timestamp = '2015-10-02'::timestamp
----
false
eval
'2015-10-01'::timestamp != '2015-10-02'::timestamp
----
true
eval
'2015-10-01'::timestamp < '2015-10-02'::timestamp
----
true
eval
'2015-10-01'::timestamp <= '2015-10-02'::timestamp
----
true
eval
'2015-10-01'::timestamp > '2015-10-02'::timestamp
----
false
eval
'2015-10-01'::timestamp >= '2015-10-02'::timestamp
----
false
eval
'2015-10-01 -01:00'::timestamptz = '2015-10-01 01:00:00'::timestamp
----
true
eval
'12h2m1s23ms'::interval = '12h2m1s24ms'::interval
----
false
eval
'12h2m1s23ms'::interval != '12h2m1s24ms'::interval
----
true
eval
'12h2m1s23ms'::interval < '12h2m1s24ms'::interval
----
true
eval
'12h2m1s23ms'::interval <= '12h2m1s24ms'::interval
----
true
eval
'12h2m1s23ms'::interval > '12h2m1s24ms'::interval
----
false
eval
'12h2m1s23ms'::interval >= '12h2m1s24ms'::interval
----
false
eval
'P1Y2M10DT2H30M'::interval = 'P1Y2M10DT2H31M'::interval
----
false
eval
'P1Y2M10DT2H30M'::interval != 'P1Y2M10DT2H31M'::interval
----
true
eval
'P1Y2M10DT2H29M'::interval < 'P1Y2M10DT2H30M'::interval
----
true
eval
'P1Y2M10DT2H29M'::interval <= 'P1Y2M10DT2H30M'::interval
----
true
eval
'P1Y2M10DT2H29M'::interval > 'P1Y2M10DT2H30M'::interval
----
false
eval
'P1Y2M10DT2H29M'::interval >= 'P1Y2M10DT2H30M'::interval
----
false
eval
'1-2 10 2:30'::interval = 'P1Y2M10DT2H31M'::interval
----
false
eval
'1-2 10 2:30'::interval != 'P1Y2M10DT2H31M'::interval
----
true
eval
'1-2 10 2:29'::interval < 'P1Y2M10DT2H30M'::interval
----
true
eval
'1-2 10 2:29'::interval <= 'P1Y2M10DT2H30M'::interval
----
true
eval
'1-2 10 2:29'::interval > 'P1Y2M10DT2H30M'::interval
----
false
eval
'1-2 10 2:29'::interval >= 'P1Y2M10DT2H30M'::interval
----
false
eval
'1 year 2 months 3 days 4 hours 5 minutes 6 seconds'::interval = '1 year 2 months 3 days 4 hours 5 minutes 7 seconds'::interval
----
false
eval
'1 year 2 months 3 days 4 hours 5 minutes 6 seconds'::interval != '1 year 2 months 3 days 4 hours 5 minutes 7 seconds'::interval
----
true
eval
'1 year 2 months 3 days 4 hours 5 minutes 6 seconds'::interval < '1 year 2 months 3 days 4 hours 5 minutes 7 seconds'::interval
----
true
eval
'1 year 2 months 3 days 4 hours 5 minutes 6 seconds'::interval <= '1 year 2 months 3 days 4 hours 5 minutes 7 seconds'::interval
----
true
eval
'1 year 2 months 3 days 4 hours 5 minutes 6 seconds'::interval > '1 year 2 months 3 days 4 hours 5 minutes 7 seconds'::interval
----
false
eval
'1 year 2 months 3 days 4 hours 5 minutes 6 seconds'::interval >= '1 year 2 months 3 days 4 hours 5 minutes 7 seconds'::interval
----
false
eval
'5 minutes 6 seconds'::interval = '5 minutes 6 seconds'::interval
----
true
eval
'PT2H30M'::interval = 'PT2H30M'::interval
----
true
# Comparisons against NULL result in NULL.
eval
0 = NULL
----
NULL
eval
0 < NULL
----
NULL
eval
0 <= NULL
----
NULL
eval
NULL = 0
----
NULL
eval
NULL < 0
----
NULL
eval
NULL <= 0
----
NULL
eval
0.1 = NULL
----
NULL
eval
0.1 < NULL
----
NULL
eval
0.1 <= NULL
----
NULL
eval
NULL = 0.1
----
NULL
eval
NULL < 0.1
----
NULL
eval
NULL <= 0.1
----
NULL
eval
0.1::float = NULL
----
NULL
eval
0.1::float < NULL
----
NULL
eval
0.1::float <= NULL
----
NULL
eval
NULL = 0.1::float
----
NULL
eval
NULL < 0.1::float
----
NULL
eval
NULL <= 0.1::float
----
NULL
eval
true = NULL
----
NULL
eval
true < NULL
----
NULL
eval
true <= NULL
----
NULL
eval
NULL = true
----
NULL
eval
NULL < true
----
NULL
eval
NULL <= true
----
NULL
eval
'a' = NULL
----
NULL
eval
'a' < NULL
----
NULL
eval
'a' <= NULL
----
NULL
eval
NULL = 'a'
----
NULL
eval
NULL < 'a'
----
NULL
eval
NULL <= 'a'
----
NULL
eval
'2015-10-01'::date = NULL
----
NULL
eval
'2015-10-01'::date < NULL
----
NULL
eval
'2015-10-01'::date <= NULL
----
NULL
eval
NULL = '2015-10-01'::date
----
NULL
eval
NULL < '2015-10-01'::date
----
NULL
eval
NULL <= '2015-10-01'::date
----
NULL
eval
'12:00:00'::time = NULL
----
NULL
eval
'12:00:00'::time < NULL
----
NULL
eval
'12:00:00'::time <= NULL
----
NULL
eval
NULL = '12:00:00'::time
----
NULL
eval
NULL < '12:00:00'::time
----
NULL
eval
NULL <= '12:00:00'::time
----
NULL
eval
'12:00:00+01'::timetz = NULL
----
NULL
eval
'12:00:00+01'::timetz < NULL
----
NULL
eval
'12:00:00+01'::timetz <= NULL
----
NULL
eval
NULL = '12:00:00+01'::timetz
----
NULL
eval
NULL < '12:00:00+01'::timetz
----
NULL
eval
NULL <= '12:00:00+01'::timetz
----
NULL
eval
'2015-10-01'::timestamp = NULL
----
NULL
eval
'2015-10-01'::timestamp < NULL
----
NULL
eval
'2015-10-01'::timestamp <= NULL
----
NULL
eval
NULL = '2015-10-01'::timestamp
----
NULL
eval
NULL < '2015-10-01'::timestamp
----
NULL
eval
NULL <= '2015-10-01'::timestamp
----
NULL
eval
'2015-10-01'::timestamptz = NULL
----
NULL
eval
'2015-10-01'::timestamptz < NULL
----
NULL
eval
'2015-10-01'::timestamptz <= NULL
----
NULL
eval
NULL = '2015-10-01'::timestamptz
----
NULL
eval
NULL < '2015-10-01'::timestamptz
----
NULL
eval
NULL <= '2015-10-01'::timestamptz
----
NULL
eval
'1-2 10 2:30'::interval = NULL
----
NULL
eval
'1-2 10 2:30'::interval < NULL
----
NULL
eval
'1-2 10 2:30'::interval <= NULL
----
NULL
eval
NULL = '1-2 10 2:30'::interval
----
NULL
eval
NULL < '1-2 10 2:30'::interval
----
NULL
eval
NULL <= '1-2 10 2:30'::interval
----
NULL
eval
NULL = NULL
----
NULL
eval
NULL < NULL
----
NULL
eval
NULL <= NULL
----
NULL
eval
NULL < ARRAY[1]
----
NULL
eval
NULL <= ARRAY[1]
----
NULL
eval
ARRAY[1] < NULL
----
NULL
eval
ARRAY[1] <= NULL
----
NULL
eval
ARRAY[1] < ARRAY[2]
----
true
eval
ARRAY[2] < ARRAY[2, 3]
----
true
eval
ARRAY[2] <= ARRAY[2]
----
true
eval
ARRAY[]:::INT[] < ARRAY[1]
----
true
eval
ARRAY[1, 2, 3] < ARRAY[1, 2]
----
false
eval
ARRAY[1, 2, 3] < ARRAY[1, 1]
----
false
eval
ARRAY[2, 2, 2, 4] < ARRAY[2, 2, 2, 5]
----
true
eval
ARRAY[1, 2, 3] = ARRAY[1, 2, 3]
----
true
eval
ARRAY[NULL]:::INT[] <= ARRAY[NULL]:::INT[]
----
true
eval
ARRAY[NULL, 1] >= ARRAY[NULL, 1]
----
true
eval
ARRAY[NULL, 1, NULL] > ARRAY[NULL, 2, NULL]
----
false
# This differs from postgres -- crdb nulls come first.
eval
ARRAY[NULL, NULL]:::INT[] < ARRAY[NULL, 1]
----
true
| pkg/sql/sem/eval/testdata/eval/comparison | 0 | https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298 | [
0.00019127913401462138,
0.0001718494459055364,
0.00016411376418545842,
0.00017146565369330347,
0.000004165476184425643
] |
{
"id": 0,
"code_window": [
"\n",
"import (\n",
"\t\"context\"\n",
"\t\"runtime\"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t\"os\"\n"
],
"file_path": "pkg/cmd/roachtest/tests/fixtures.go",
"type": "add",
"edit_start_line_idx": 14
} | // Copyright 2022 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package spanconfig
import (
"bytes"
"fmt"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/errors"
)
// SystemTarget specifies the target of a system span configuration.
type SystemTarget struct {
// sourceTenantID is the ID of the tenant that specified the system span
// configuration.
sourceTenantID roachpb.TenantID
// targetTenantID is the ID of the tenant whose kesypace the associated system
// span configuration applies. This field can only be set in conjunction with
// the type being SystemTargetTypeSpecificTenantKeyspace; it must be left
// unset for all other system target types.
//
// Secondary tenants are only allowed to target their own keyspace. The host
// tenant may use this field to target a specific secondary tenant.
targetTenantID roachpb.TenantID
// systemTargetType indicates the type of the system target. targetTenantID
// can only be set if the system target is specific.
systemTargetType systemTargetType
}
// systemTargetType indicates the type of SystemTarget.
type systemTargetType int
const (
_ systemTargetType = iota
// SystemTargetTypeSpecificTenantKeyspace indicates that the system target is
// targeting a specific tenant's keyspace.
SystemTargetTypeSpecificTenantKeyspace
// SystemTargetTypeEntireKeyspace indicates that the system target is
// targeting the entire keyspace. Only the host tenant is allowed to do so.
SystemTargetTypeEntireKeyspace
// SystemTargetTypeAllTenantKeyspaceTargetsSet represents a system target that
// encompasses all system targets that have been set by the source tenant over
// specific tenant's keyspace.
//
// This is a read-only system target type as it may translate to more than one
// system targets that may have been persisted. This target type is useful in
// fetching all system span configurations a tenant may have set on tenant
// keyspaces without knowing the tenant ID of all other tenants in the system.
// This is only ever significant for the host tenant as it can set system span
// configurations that target other tenant's keyspaces.
SystemTargetTypeAllTenantKeyspaceTargetsSet
)
// MakeTenantKeyspaceTarget constructs, validates, and returns a new
// SystemTarget that targets the keyspace of the target tenant.
func MakeTenantKeyspaceTarget(
sourceTenantID roachpb.TenantID, targetTenantID roachpb.TenantID,
) (SystemTarget, error) {
t := SystemTarget{
sourceTenantID: sourceTenantID,
targetTenantID: targetTenantID,
systemTargetType: SystemTargetTypeSpecificTenantKeyspace,
}
return t, t.validate()
}
// makeSystemTargetFromProto constructs a SystemTarget from a
// roachpb.SystemSpanConfigTarget and validates it.
func makeSystemTargetFromProto(proto *roachpb.SystemSpanConfigTarget) (SystemTarget, error) {
var t SystemTarget
switch {
case proto.IsSpecificTenantKeyspaceTarget():
t = SystemTarget{
sourceTenantID: proto.SourceTenantID,
targetTenantID: proto.Type.GetSpecificTenantKeyspace().TenantID,
systemTargetType: SystemTargetTypeSpecificTenantKeyspace,
}
case proto.IsEntireKeyspaceTarget():
t = SystemTarget{
sourceTenantID: proto.SourceTenantID,
targetTenantID: roachpb.TenantID{},
systemTargetType: SystemTargetTypeEntireKeyspace,
}
case proto.IsAllTenantKeyspaceTargetsSetTarget():
t = SystemTarget{
sourceTenantID: proto.SourceTenantID,
targetTenantID: roachpb.TenantID{},
systemTargetType: SystemTargetTypeAllTenantKeyspaceTargetsSet,
}
default:
return SystemTarget{}, errors.AssertionFailedf("unknown system target type")
}
return t, t.validate()
}
func (st SystemTarget) toProto() *roachpb.SystemSpanConfigTarget {
var systemTargetType *roachpb.SystemSpanConfigTarget_Type
switch st.systemTargetType {
case SystemTargetTypeEntireKeyspace:
systemTargetType = roachpb.NewEntireKeyspaceTargetType()
case SystemTargetTypeAllTenantKeyspaceTargetsSet:
systemTargetType = roachpb.NewAllTenantKeyspaceTargetsSetTargetType()
case SystemTargetTypeSpecificTenantKeyspace:
systemTargetType = roachpb.NewSpecificTenantKeyspaceTargetType(st.targetTenantID)
default:
panic("unknown system target type")
}
return &roachpb.SystemSpanConfigTarget{
SourceTenantID: st.sourceTenantID,
Type: systemTargetType,
}
}
// MakeEntireKeyspaceTarget returns a new system target that targets the entire
// keyspace. Only the host tenant is allowed to target the entire keyspace.
func MakeEntireKeyspaceTarget() SystemTarget {
return SystemTarget{
sourceTenantID: roachpb.SystemTenantID,
systemTargetType: SystemTargetTypeEntireKeyspace,
}
}
// MakeAllTenantKeyspaceTargetsSet returns a new SystemTarget that
// represents all system span configurations installed by the given tenant ID
// on specific tenant's keyspace (including itself and other tenants).
func MakeAllTenantKeyspaceTargetsSet(sourceID roachpb.TenantID) SystemTarget {
return SystemTarget{
sourceTenantID: sourceID,
systemTargetType: SystemTargetTypeAllTenantKeyspaceTargetsSet,
}
}
// targetsEntireKeyspace returns true if the target applies to all ranges in the
// system (including those belonging to secondary tenants).
func (st SystemTarget) targetsEntireKeyspace() bool {
return st.systemTargetType == SystemTargetTypeEntireKeyspace
}
// keyspaceTargeted returns the keyspan the system target applies to.
func (st SystemTarget) keyspaceTargeted() roachpb.Span {
switch st.systemTargetType {
case SystemTargetTypeEntireKeyspace:
return keys.EverythingSpan
case SystemTargetTypeSpecificTenantKeyspace:
// If the system tenant's keyspace is being targeted then this means
// everything from the start of the keyspace to where all non-system tenant
// keys begin.
if st.targetTenantID == roachpb.SystemTenantID {
return roachpb.Span{
Key: keys.MinKey,
EndKey: keys.TenantTableDataMin,
}
}
k := keys.MakeTenantPrefix(st.targetTenantID)
return roachpb.Span{
Key: k,
EndKey: k.PrefixEnd(),
}
case SystemTargetTypeAllTenantKeyspaceTargetsSet:
// AllTenantKeyspaceTarget encapsulates other target; by itself, it doesn't
// target a single contiguous keyspace.
panic("not applicable")
default:
panic("unknown target type")
}
}
// IsReadOnly returns true if the system target is read-only. Read only targets
// should not be persisted.
func (st SystemTarget) IsReadOnly() bool {
return st.systemTargetType == SystemTargetTypeAllTenantKeyspaceTargetsSet
}
// encode returns an encoded span associated with the receiver which is suitable
// for interaction with system.span_configurations table.
func (st SystemTarget) encode() roachpb.Span {
var k roachpb.Key
switch st.systemTargetType {
case SystemTargetTypeEntireKeyspace:
k = keys.SystemSpanConfigEntireKeyspace
case SystemTargetTypeSpecificTenantKeyspace:
if st.sourceTenantID == roachpb.SystemTenantID {
k = encoding.EncodeUvarintAscending(
keys.SystemSpanConfigHostOnTenantKeyspace, st.targetTenantID.ToUint64(),
)
} else {
k = encoding.EncodeUvarintAscending(
keys.SystemSpanConfigSecondaryTenantOnEntireKeyspace, st.sourceTenantID.ToUint64(),
)
}
case SystemTargetTypeAllTenantKeyspaceTargetsSet:
if st.sourceTenantID == roachpb.SystemTenantID {
k = keys.SystemSpanConfigHostOnTenantKeyspace
} else {
k = encoding.EncodeUvarintAscending(
keys.SystemSpanConfigSecondaryTenantOnEntireKeyspace, st.sourceTenantID.ToUint64(),
)
}
}
return roachpb.Span{Key: k, EndKey: k.PrefixEnd()}
}
// validate ensures that the receiver is well-formed.
func (st SystemTarget) validate() error {
switch st.systemTargetType {
case SystemTargetTypeAllTenantKeyspaceTargetsSet:
if st.targetTenantID.IsSet() {
return errors.AssertionFailedf(
"targetTenantID must be unset when targeting everything installed on tenants",
)
}
case SystemTargetTypeEntireKeyspace:
if st.sourceTenantID != roachpb.SystemTenantID {
return errors.AssertionFailedf("only the host tenant is allowed to target the entire keyspace")
}
if st.targetTenantID.IsSet() {
return errors.AssertionFailedf("malformed system target for entire keyspace; targetTenantID set")
}
case SystemTargetTypeSpecificTenantKeyspace:
if !st.targetTenantID.IsSet() {
return errors.AssertionFailedf(
"malformed system target for specific tenant keyspace; targetTenantID unset",
)
}
if st.sourceTenantID != roachpb.SystemTenantID && st.sourceTenantID != st.targetTenantID {
return errors.AssertionFailedf(
"secondary tenant %s cannot target another tenant with ID %s",
st.sourceTenantID,
st.targetTenantID,
)
}
default:
return errors.AssertionFailedf("invalid system target type")
}
return nil
}
// IsEmpty returns true if the receiver is empty.
func (st SystemTarget) IsEmpty() bool {
return !st.sourceTenantID.IsSet() && !st.targetTenantID.IsSet() &&
st.systemTargetType == 0 // unset
}
// less returns true if the receiver is considered less than the supplied
// target. The semantics are defined as follows:
// - read only targets come first, ordered by tenant ID.
// - targets that target the entire keyspace come next.
// - targets that target a specific tenant's keyspace come last, sorted by
// source tenant ID; target tenant ID is used as a tiebreaker if two targets
// have the same source.
func (st SystemTarget) less(ot SystemTarget) bool {
if st.IsReadOnly() && ot.IsReadOnly() {
return st.sourceTenantID.ToUint64() < ot.sourceTenantID.ToUint64()
}
if st.IsReadOnly() {
return true
} else if ot.IsReadOnly() {
return false
}
if st.targetsEntireKeyspace() {
return true
} else if ot.targetsEntireKeyspace() {
return false
}
if st.sourceTenantID.ToUint64() == ot.sourceTenantID.ToUint64() {
return st.targetTenantID.ToUint64() < ot.targetTenantID.ToUint64()
}
return st.sourceTenantID.ToUint64() < ot.sourceTenantID.ToUint64()
}
// equal returns true iff the receiver is equal to the supplied system target.
func (st SystemTarget) equal(ot SystemTarget) bool {
return st.sourceTenantID.Equal(ot.sourceTenantID) &&
st.targetTenantID.Equal(ot.targetTenantID) &&
st.systemTargetType == ot.systemTargetType
}
// String returns a pretty printed version of a system target.
func (st SystemTarget) String() string {
switch st.systemTargetType {
case SystemTargetTypeEntireKeyspace:
return "{entire-keyspace}"
case SystemTargetTypeAllTenantKeyspaceTargetsSet:
return fmt.Sprintf("{source=%d, all-tenant-keyspace-targets-set}", st.sourceTenantID)
case SystemTargetTypeSpecificTenantKeyspace:
return fmt.Sprintf(
"{source=%d,target=%d}",
st.sourceTenantID.ToUint64(),
st.targetTenantID.ToUint64(),
)
default:
panic("unreachable")
}
}
// decodeSystemTarget converts the given span into a SystemTarget. An error is
// returned if the supplied span does not conform to a system target's encoding.
func decodeSystemTarget(span roachpb.Span) (SystemTarget, error) {
// Validate the end key is well-formed.
if !span.EndKey.Equal(span.Key.PrefixEnd()) {
return SystemTarget{}, errors.AssertionFailedf("invalid end key in span %s", span)
}
switch {
case bytes.Equal(span.Key, keys.SystemSpanConfigEntireKeyspace):
return MakeEntireKeyspaceTarget(), nil
case bytes.HasPrefix(span.Key, keys.SystemSpanConfigHostOnTenantKeyspace):
// System span config was applied by the host tenant over a secondary
// tenant's entire keyspace.
tenIDBytes := span.Key[len(keys.SystemSpanConfigHostOnTenantKeyspace):]
_, tenIDRaw, err := encoding.DecodeUvarintAscending(tenIDBytes)
if err != nil {
return SystemTarget{}, err
}
tenID := roachpb.MustMakeTenantID(tenIDRaw)
return MakeTenantKeyspaceTarget(roachpb.SystemTenantID, tenID)
case bytes.HasPrefix(span.Key, keys.SystemSpanConfigSecondaryTenantOnEntireKeyspace):
// System span config was applied by a secondary tenant over its entire
// keyspace.
tenIDBytes := span.Key[len(keys.SystemSpanConfigSecondaryTenantOnEntireKeyspace):]
_, tenIDRaw, err := encoding.DecodeUvarintAscending(tenIDBytes)
if err != nil {
return SystemTarget{}, err
}
tenID := roachpb.MustMakeTenantID(tenIDRaw)
return MakeTenantKeyspaceTarget(tenID, tenID)
default:
return SystemTarget{},
errors.AssertionFailedf("span %s did not conform to SystemTarget encoding", span)
}
}
// spanStartKeyConformsToSystemTargetEncoding returns true if the given span's
// start key conforms to the key encoding of system span configurations.
func spanStartKeyConformsToSystemTargetEncoding(span roachpb.Span) bool {
return bytes.Equal(span.Key, keys.SystemSpanConfigEntireKeyspace) ||
bytes.HasPrefix(span.Key, keys.SystemSpanConfigHostOnTenantKeyspace) ||
bytes.HasPrefix(span.Key, keys.SystemSpanConfigSecondaryTenantOnEntireKeyspace)
}
| pkg/spanconfig/systemtarget.go | 0 | https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298 | [
0.000656583986710757,
0.0002253744751214981,
0.00016585411503911018,
0.0001780776074156165,
0.00010052844299934804
] |
{
"id": 1,
"code_window": [
"\t\"runtime\"\n",
"\t\"strings\"\n",
"\t\"time\"\n",
"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster\"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/cmd/roachtest/tests/fixtures.go",
"type": "replace",
"edit_start_line_idx": 15
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tests
import (
"context"
"runtime"
"strings"
"time"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/registry"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test"
)
func registerFixtures(r registry.Registry) {
// Run this test to create a new fixture for the version upgrade test. This
// is necessary after every release. For example, the day `master` becomes
// the 20.2 release, this test will fail because it is missing a fixture for
// 20.1; run the test (on 20.1). Check it in (instructions will be logged
// below) and off we go.
//
// The version to create/update the fixture for. Must be released (i.e.
// can download it from the homepage); if that is not the case use the
// empty string which uses the local cockroach binary. Make sure that
// this binary then has the correct version. For example, to make a
// "v20.2" fixture, you will need a binary that has "v20.2" in the
// output of `./cockroach version`, and this process will end up
// creating fixtures that have "v20.2" in them. This would be part
// of tagging the master branch as v21.1 in the process of going
// through the major release for v20.2. The version is passed in as
// FIXTURE_VERSION environment variable.
//
// In the common case, one should populate this with the version (instead of
// using the empty string) as this is the most straightforward and least
// error-prone way to generate the fixtures.
//
// Please note that you do *NOT* need to update the fixtures in a patch
// release. This only happens as part of preparing the master branch for the
// next release. The release team runbooks, at time of writing, reflect
// this.
//
// Example invocation:
// roachtest --local run generate-fixtures --debug --cockroach ./cockroach \
// --build-tag v22.1.0-beta.3 tag:fixtures
runFixtures := func(
ctx context.Context,
t test.Test,
c cluster.Cluster,
) {
if c.IsLocal() && runtime.GOARCH == "arm64" {
t.Skip("Skip under ARM64. See https://github.com/cockroachdb/cockroach/issues/89268")
}
fixtureVersion := strings.TrimPrefix(t.BuildVersion().String(), "v")
makeVersionFixtureAndFatal(ctx, t, c, fixtureVersion)
}
spec := registry.TestSpec{
Name: "generate-fixtures",
Timeout: 30 * time.Minute,
Tags: registry.Tags("fixtures"),
Owner: registry.OwnerDevInf,
Cluster: r.MakeClusterSpec(4),
Run: runFixtures,
}
r.Add(spec)
}
| pkg/cmd/roachtest/tests/fixtures.go | 1 | https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298 | [
0.24886246025562286,
0.03160512447357178,
0.00016891604172997177,
0.0003541782789397985,
0.08211811631917953
] |
{
"id": 1,
"code_window": [
"\t\"runtime\"\n",
"\t\"strings\"\n",
"\t\"time\"\n",
"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster\"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/cmd/roachtest/tests/fixtures.go",
"type": "replace",
"edit_start_line_idx": 15
} | # LogicTest: local
statement ok
CREATE TABLE abcd (
a INT PRIMARY KEY,
b INT,
c INT,
d INT,
INDEX b (b),
INDEX cd (c,d),
UNIQUE INDEX bcd (b,c,d)
)
# No hint
query T
EXPLAIN SELECT * FROM abcd WHERE a >= 20 AND a <= 30
----
distribution: local
vectorized: true
·
• scan
missing stats
table: abcd@abcd_pkey
spans: [/20 - /30]
# No hint, reverse scan.
query T
EXPLAIN SELECT * FROM abcd WHERE a >= 20 AND a <= 30 ORDER BY a DESC
----
distribution: local
vectorized: true
·
• revscan
missing stats
table: abcd@abcd_pkey
spans: [/20 - /30]
# Force primary
query T
EXPLAIN SELECT * FROM abcd@abcd_pkey WHERE a >= 20 AND a <= 30
----
distribution: local
vectorized: true
·
• scan
missing stats
table: abcd@abcd_pkey
spans: [/20 - /30]
# Force primary, reverse scan.
query T
EXPLAIN SELECT * FROM abcd@{FORCE_INDEX=abcd_pkey,DESC} WHERE a >= 20 AND a <= 30
----
distribution: local
vectorized: true
·
• revscan
missing stats
table: abcd@abcd_pkey
spans: [/20 - /30]
# Force primary, allow reverse scan.
query T
EXPLAIN SELECT * FROM abcd@abcd_pkey WHERE a >= 20 AND a <= 30 ORDER BY a DESC
----
distribution: local
vectorized: true
·
• revscan
missing stats
table: abcd@abcd_pkey
spans: [/20 - /30]
# Force primary, forward scan.
query T
EXPLAIN SELECT * FROM abcd@{FORCE_INDEX=abcd_pkey,ASC} WHERE a >= 20 AND a <= 30 ORDER BY a DESC
----
distribution: local
vectorized: true
·
• sort
│ order: -a
│
└── • scan
missing stats
table: abcd@abcd_pkey
spans: [/20 - /30]
# Force index b
query T
EXPLAIN SELECT * FROM abcd@b WHERE a >= 20 AND a <= 30
----
distribution: local
vectorized: true
·
• filter
│ filter: (a >= 20) AND (a <= 30)
│
└── • index join
│ table: abcd@abcd_pkey
│
└── • scan
missing stats
table: abcd@b
spans: FULL SCAN
# Force index b, reverse scan.
query T
EXPLAIN SELECT * FROM abcd@{FORCE_INDEX=b,DESC} WHERE a >= 20 AND a <= 30
----
distribution: local
vectorized: true
·
• filter
│ filter: (a >= 20) AND (a <= 30)
│
└── • index join
│ table: abcd@abcd_pkey
│
└── • revscan
missing stats
table: abcd@b
spans: FULL SCAN
# Force index b, allowing reverse scan.
query T
EXPLAIN SELECT * FROM abcd@b ORDER BY b DESC LIMIT 5
----
distribution: local
vectorized: true
·
• index join
│ table: abcd@abcd_pkey
│
└── • revscan
missing stats
table: abcd@b
spans: LIMITED SCAN
limit: 5
# Force index b, reverse scan.
query T
EXPLAIN SELECT * FROM abcd@{FORCE_INDEX=b,DESC} ORDER BY b DESC LIMIT 5
----
distribution: local
vectorized: true
·
• index join
│ table: abcd@abcd_pkey
│
└── • revscan
missing stats
table: abcd@b
spans: LIMITED SCAN
limit: 5
# Force index b, forward scan.
query T
EXPLAIN SELECT * FROM abcd@{FORCE_INDEX=b,ASC} ORDER BY b DESC LIMIT 5
----
distribution: local
vectorized: true
·
• index join
│ table: abcd@abcd_pkey
│
└── • top-k
│ order: -b
│ k: 5
│
└── • scan
missing stats
table: abcd@b
spans: FULL SCAN
# Force index cd
query T
EXPLAIN SELECT * FROM abcd@cd WHERE a >= 20 AND a <= 30
----
distribution: local
vectorized: true
·
• filter
│ filter: (a >= 20) AND (a <= 30)
│
└── • index join
│ table: abcd@abcd_pkey
│
└── • scan
missing stats
table: abcd@cd
spans: FULL SCAN
# Force index bcd
query T
EXPLAIN SELECT * FROM abcd@bcd WHERE a >= 20 AND a <= 30
----
distribution: local
vectorized: true
·
• filter
│ filter: (a >= 20) AND (a <= 30)
│
└── • scan
missing stats
table: abcd@bcd
spans: FULL SCAN
# Force index b (covering)
query T
EXPLAIN SELECT b FROM abcd@b WHERE a >= 20 AND a <= 30
----
distribution: local
vectorized: true
·
• filter
│ filter: (a >= 20) AND (a <= 30)
│
└── • scan
missing stats
table: abcd@b
spans: FULL SCAN
# Force index b (non-covering due to WHERE clause)
query T
EXPLAIN SELECT b FROM abcd@b WHERE c >= 20 AND c <= 30
----
distribution: local
vectorized: true
·
• filter
│ filter: (c >= 20) AND (c <= 30)
│
└── • index join
│ table: abcd@abcd_pkey
│
└── • scan
missing stats
table: abcd@b
spans: FULL SCAN
# No hint, should be using index cd
query T
EXPLAIN SELECT c, d FROM abcd WHERE c >= 20 AND c < 40
----
distribution: local
vectorized: true
·
• scan
missing stats
table: abcd@cd
spans: [/20 - /39]
# Force primary index
query T
EXPLAIN SELECT c, d FROM abcd@abcd_pkey WHERE c >= 20 AND c < 40
----
distribution: local
vectorized: true
·
• filter
│ filter: (c >= 20) AND (c < 40)
│
└── • scan
missing stats
table: abcd@abcd_pkey
spans: FULL SCAN
# Force index b
query T
EXPLAIN SELECT c, d FROM abcd@b WHERE c >= 20 AND c < 40
----
distribution: local
vectorized: true
·
• filter
│ filter: (c >= 20) AND (c < 40)
│
└── • index join
│ table: abcd@abcd_pkey
│
└── • scan
missing stats
table: abcd@b
spans: FULL SCAN
query T
EXPLAIN SELECT * FROM abcd@{FORCE_INDEX=b} WHERE a >= 20 AND a <= 30
----
distribution: local
vectorized: true
·
• filter
│ filter: (a >= 20) AND (a <= 30)
│
└── • index join
│ table: abcd@abcd_pkey
│
└── • scan
missing stats
table: abcd@b
spans: FULL SCAN
query T
EXPLAIN SELECT b, c, d FROM abcd WHERE c = 10
----
distribution: local
vectorized: true
·
• index join
│ table: abcd@abcd_pkey
│
└── • scan
missing stats
table: abcd@cd
spans: [/10 - /10]
query T
EXPLAIN SELECT b, c, d FROM abcd@{NO_INDEX_JOIN} WHERE c = 10
----
distribution: local
vectorized: true
·
• filter
│ filter: c = 10
│
└── • scan
missing stats
table: abcd@abcd_pkey
spans: FULL SCAN
query T
EXPLAIN SELECT b, c, d FROM abcd@{FORCE_INDEX=bcd} WHERE c = 10
----
distribution: local
vectorized: true
·
• filter
│ filter: c = 10
│
└── • scan
missing stats
table: abcd@bcd
spans: FULL SCAN
query T
EXPLAIN SELECT b, c, d FROM abcd@{FORCE_INDEX=abcd_pkey} WHERE c = 10
----
distribution: local
vectorized: true
·
• filter
│ filter: c = 10
│
└── • scan
missing stats
table: abcd@abcd_pkey
spans: FULL SCAN
| pkg/sql/opt/exec/execbuilder/testdata/select_index_flags | 0 | https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298 | [
0.0001762797764968127,
0.0001712529337964952,
0.00016458625032100827,
0.00017190641665365547,
0.0000032577245292486623
] |
{
"id": 1,
"code_window": [
"\t\"runtime\"\n",
"\t\"strings\"\n",
"\t\"time\"\n",
"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster\"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/cmd/roachtest/tests/fixtures.go",
"type": "replace",
"edit_start_line_idx": 15
} | post
----
----
searchIssue repo:"cockroach" user:"cockroachdb" is:issue is:open in:title label:"C-test-failure" sort:created-desc "storage: TestReplicateQueueRebalance failed" label:branch-release-0.1 -label:X-noreuse: [github.Issue{Number:30, Title:"storage: TestReplicateQueueRebalance failed", Labels:[github.Label{URL:"fake", Name:"C-test-failure"} github.Label{URL:"fake", Name:"O-robot"} github.Label{URL:"fake", Name:"release-0.1"}]} github.Issue{Number:32, Title:"storage: TestReplicateQueueRebalance-similar failed", Labels:[github.Label{URL:"fake", Name:"C-test-failure"} github.Label{URL:"fake", Name:"O-robot"} github.Label{URL:"fake", Name:"release-0.1"}]}]
searchIssue repo:"cockroach" user:"cockroachdb" is:issue is:open in:title label:"C-test-failure" sort:created-desc "storage: TestReplicateQueueRebalance failed" -label:branch-release-0.1: []
createComment owner=cockroachdb repo=cockroach issue=30:
storage.TestReplicateQueueRebalance [failed](https://teamcity.example.com/buildConfiguration/nightly123/8008135?buildTab=log) on release-0.1 @ [abcd123](https://github.com/cockroachdb/cockroach/commits/abcd123):
```
<autogenerated>:12: storage/replicate_queue_test.go:103, condition failed to evaluate within 45s: not balanced: [10 1 10 1 8]
```
<p>Parameters: <code>GOFLAGS=race</code>
, <code>ROACHTEST_cloud=gce</code>
, <code>TAGS=deadlock</code>
</p>
<details><summary>Help</summary>
<p>
See also: [How To Investigate a Go Test Failure \(internal\)](https://cockroachlabs.atlassian.net/l/c/HgfXfJgM)
</p>
</details>
<sub>
[This test on roachdash](https://roachdash.crdb.dev/?filter=status:open%20t:.*TestReplicateQueueRebalance.*&sort=title+created&display=lastcommented+project) | [Improve this report!](https://github.com/cockroachdb/cockroach/tree/master/pkg/cmd/internal/issues)
</sub>
Rendered: https://github.com/cockroachdb/cockroach/issues/new?body=storage.TestReplicateQueueRebalance+%5Bfailed%5D%28https%3A%2F%2Fteamcity.example.com%2FbuildConfiguration%2Fnightly123%2F8008135%3FbuildTab%3Dlog%29+on+release-0.1+%40+%5Babcd123%5D%28https%3A%2F%2Fgithub.com%2Fcockroachdb%2Fcockroach%2Fcommits%2Fabcd123%29%3A%0A%0A%0A%60%60%60%0A%09%3Cautogenerated%3E%3A12%3A+storage%2Freplicate_queue_test.go%3A103%2C+condition+failed+to+evaluate+within+45s%3A+not+balanced%3A+%5B10+1+10+1+8%5D%0A%60%60%60%0A%3Cp%3EParameters%3A+%3Ccode%3EGOFLAGS%3Drace%3C%2Fcode%3E%0A%2C+%3Ccode%3EROACHTEST_cloud%3Dgce%3C%2Fcode%3E%0A%2C+%3Ccode%3ETAGS%3Ddeadlock%3C%2Fcode%3E%0A%3C%2Fp%3E%0A%3Cdetails%3E%3Csummary%3EHelp%3C%2Fsummary%3E%0A%3Cp%3E%0A%0ASee+also%3A+%5BHow+To+Investigate+a+Go+Test+Failure+%5C%28internal%5C%29%5D%28https%3A%2F%2Fcockroachlabs.atlassian.net%2Fl%2Fc%2FHgfXfJgM%29%0A%3C%2Fp%3E%0A%3C%2Fdetails%3E%0A%3Csub%3E%0A%0A%5BThis+test+on+roachdash%5D%28https%3A%2F%2Froachdash.crdb.dev%2F%3Ffilter%3Dstatus%3Aopen%2520t%3A.%2ATestReplicateQueueRebalance.%2A%26sort%3Dtitle%2Bcreated%26display%3Dlastcommented%2Bproject%29+%7C+%5BImprove+this+report%21%5D%28https%3A%2F%2Fgithub.com%2Fcockroachdb%2Fcockroach%2Ftree%2Fmaster%2Fpkg%2Fcmd%2Finternal%2Fissues%29%0A%3C%2Fsub%3E%0A&title=%3Ccomment%3E
----
----
| pkg/cmd/internal/issues/testdata/post/failure-matching-issue.txt | 0 | https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298 | [
0.0015856503741815686,
0.000720735639333725,
0.000171233230503276,
0.0005630294908769429,
0.0005893182824365795
] |
{
"id": 1,
"code_window": [
"\t\"runtime\"\n",
"\t\"strings\"\n",
"\t\"time\"\n",
"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster\"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/cmd/roachtest/tests/fixtures.go",
"type": "replace",
"edit_start_line_idx": 15
} | // Copyright 2016 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
syntax = "proto3";
package cockroach.ccl.backupccl;
option go_package = "github.com/cockroachdb/cockroach/pkg/ccl/backupccl/backuppb";
import "build/info.proto";
import "cloud/cloudpb/external_storage.proto";
import "roachpb/data.proto";
import "roachpb/metadata.proto";
import "sql/stats/table_statistic.proto";
import "sql/catalog/descpb/structured.proto";
import "multitenant/mtinfopb/info.proto";
import "util/hlc/timestamp.proto";
import "gogoproto/gogo.proto";
enum MVCCFilter {
Latest = 0;
All = 1;
}
// BackupManifest represents a consistent snapshot of ranges.
//
// Each range snapshot includes a path to data that is a diff of the data in
// that key range between a start and end timestamp. The end timestamp of all
// ranges in a backup is the same, but the start may vary (to allow individual
// tables to be backed up on different schedules).
message BackupManifest {
// BackupManifest_File represents a diff for a key range between two
// timestamps. Note that many BackupManifest_File spans can get written to a
// single SST.
message File {
roachpb.Span span = 1 [(gogoproto.nullable) = false];
string path = 2;
reserved 3;
reserved 4;
reserved 5;
roachpb.RowCount entry_counts = 6 [(gogoproto.nullable) = false];
// StartTime 0 is sometimes legitimately used, so it is only meaningful if
// EndTime is non-zero, otherwise both just inherit from containing backup.
util.hlc.Timestamp start_time = 7 [(gogoproto.nullable) = false];
util.hlc.Timestamp end_time = 8 [(gogoproto.nullable) = false];
string locality_kv = 9 [(gogoproto.customname) = "LocalityKV"];
}
message DescriptorRevision {
util.hlc.Timestamp time = 1 [(gogoproto.nullable) = false];
uint32 ID = 2 [(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
sql.sqlbase.Descriptor desc = 3;
}
message Progress {
repeated File files = 1 [(gogoproto.nullable) = false];
util.hlc.Timestamp rev_start_time = 2 [(gogoproto.nullable) = false];
int32 completed_spans = 3;
}
util.hlc.Timestamp start_time = 1 [(gogoproto.nullable) = false];
util.hlc.Timestamp end_time = 2 [(gogoproto.nullable) = false];
MVCCFilter mvcc_filter = 13 [(gogoproto.customname) = "MVCCFilter"];
// Even if StartTime is zero, we only get revisions since gc threshold, so
// do not allow AS OF SYSTEM TIME before revision_start_time.
util.hlc.Timestamp revision_start_time = 17 [(gogoproto.nullable) = false];
// Spans contains the spans requested for backup. The keyranges covered by
// `files` may be a subset of this if there were ranges with no changes since
// the last backup. For all tables in the backup descriptor, these spans must
// completely cover each table's span. For example, if a table with ID 51 were
// being backed up, then the span `/Table/5{1-2}` must be completely covered.
repeated roachpb.Span spans = 3 [(gogoproto.nullable) = false];
// IntroducedSpans are a subset of spans, set only when creating incremental
// backups that cover spans not included in a previous backup. Spans contained
// here are covered in the interval (0, startTime], which, in conjunction with
// the coverage from (startTime, endTime] implied for all spans in Spans,
// results in coverage from [0, endTime] for these spans.
//
// The first set of spans in this field are new spans that did not
// exist in the previous backup (a new index, for example), while the remaining
// spans are re-introduced spans, which need to be backed up again from (0,
// startTime] because a non-mvcc operation may have occurred on this span. See
// the getReintroducedSpans() for more information.
repeated roachpb.Span introduced_spans = 15 [(gogoproto.nullable) = false];
repeated DescriptorRevision descriptor_changes = 16 [(gogoproto.nullable) = false];
repeated File files = 4 [(gogoproto.nullable) = false];
repeated sql.sqlbase.Descriptor descriptors = 5 [(gogoproto.nullable) = false];
repeated cockroach.multitenant.TenantInfoWithUsage tenants = 26 [(gogoproto.nullable) = false];
// This field is deprecated; it is only retained to allow restoring older
// backups.
repeated cockroach.multitenant.ProtoInfo tenants_deprecated = 24 [(gogoproto.nullable) = false];
// databases in descriptors that have all tables also in descriptors.
repeated uint32 complete_dbs = 14 [
(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"];
reserved 6;
roachpb.RowCount entry_counts = 12 [(gogoproto.nullable) = false];
cloud.cloudpb.ExternalStorage dir = 7 [(gogoproto.nullable) = false];
uint32 format_version = 8;
bytes cluster_id = 9 [(gogoproto.nullable) = false, (gogoproto.customname) = "ClusterID",
(gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/uuid.UUID"];
// node_id and build_info of the gateway node (which writes the descriptor).
int32 node_id = 10 [(gogoproto.customname) = "NodeID",
(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"];
build.Info build_info = 11 [(gogoproto.nullable) = false];
roachpb.Version cluster_version = 25 [(gogoproto.nullable) = false];
bytes id = 18 [(gogoproto.nullable) = false,
(gogoproto.customname) = "ID",
(gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/uuid.UUID"];
repeated string partition_descriptor_filenames = 19;
repeated string locality_kvs = 20 [(gogoproto.customname) = "LocalityKVs"];
// This field is used by backups in 19.2 and 20.1 where a backup manifest stores all the table
// statistics in the field, the later versions all write the statistics to a separate file
// indicated in the table_statistic_files field.
repeated sql.stats.TableStatisticProto deprecated_statistics = 21;
map<uint32, string> statistics_filenames = 23 [
(gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"
];
int32 descriptor_coverage = 22 [
(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/tree.DescriptorCoverage"];
// HasExternalManifestSSTs is set to true if the backup manifest has its
// `Files`, `Descriptors`, and DescriptorChanges fields nil'ed out and written
// as a supporting SST file instead.
//
// TODO(adityamaru): Delete when backwards compatibility with 22.2 is dropped
// since all backups in 23.1+ will write slim manifests.
bool has_external_manifest_ssts = 27 [(gogoproto.customname) = "HasExternalManifestSSTs"];
// NEXT ID: 28
}
message BackupPartitionDescriptor{
string locality_kv = 1 [(gogoproto.customname) = "LocalityKV"];
repeated BackupManifest.File files = 2 [(gogoproto.nullable) = false];
bytes backup_id = 3 [(gogoproto.nullable) = false,
(gogoproto.customname) = "BackupID",
(gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/uuid.UUID"];
}
// In 20.2 and later, the Statistics object is stored separately from the backup manifest.
// StatsTables is a struct containing an array of sql.stats.TableStatisticProto object so
// that it can be easily marshaled into or unmarshaled from a file.
message StatsTable {
repeated sql.stats.TableStatisticProto statistics = 1;
}
// ScheduledBackupExecutionArgs is the arguments to the scheduled backup executor.
message ScheduledBackupExecutionArgs {
enum BackupType {
FULL = 0;
INCREMENTAL = 1;
}
BackupType backup_type = 1;
string backup_statement = 2;
int64 unpause_on_success = 3;
bool updates_last_backup_metric = 4;
// If the schedule is one of the two that were created when setting up a
// full+incremental schedule backup, then DependentScheduleID will be set to
// the schedule ID of the other "dependent" schedule.
// i.e. the full schedule will have the inc schedules ID and vice versa.
// A value of 0 indicates that there is no dependent schedule.
int64 dependent_schedule_id = 6 [(gogoproto.customname) = "DependentScheduleID"];
// ChainProtectedTimestampRecords indicates that chaining of protected
// timestamp records is enabled for this schedule. The chaining scheme works
// as described in `schedule_pts_chaining.go`.
bool chain_protected_timestamp_records = 7;
bytes protected_timestamp_record = 8 [
(gogoproto.customname) = "ProtectedTimestampRecord",
(gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/uuid.UUID"
];
reserved 5;
}
// RestoreProgress is the information that the RestoreData processor sends back
// to the restore coordinator to update the job progress.
message RestoreProgress {
roachpb.RowCount summary = 1 [(gogoproto.nullable) = false];
int64 progressIdx = 2;
roachpb.Span dataSpan = 3 [(gogoproto.nullable) = false];
}
message BackupProcessorPlanningTraceEvent {
map<int32, int64> node_to_num_spans = 1 [(gogoproto.nullable) = false];
int64 total_num_spans = 2;
}
message BackupProgressTraceEvent {
int64 total_num_files = 1;
roachpb.RowCount total_entry_counts = 2 [(gogoproto.nullable) = false];
util.hlc.Timestamp revision_start_time = 3 [(gogoproto.nullable) = false];
}
// ExportStats is a message containing information about each
// Export{Request,Response}.
message ExportStats {
// NumFiles is the number of SST files produced by the ExportRequest.
int64 num_files = 1;
// DataSize is the byte size of all the SST files produced by the
// ExportRequest.
int64 data_size = 2;
// Duration is the total time taken to send an ExportRequest, receive an
// ExportResponse and push the response on a channel.
int64 duration = 3 [(gogoproto.casttype) = "time.Duration"];
// StartTime is the timestamp at which the ExportRequest was sent.
util.hlc.Timestamp start_time = 4 [(gogoproto.nullable) = false];
// EndTime is the timestamp at which an ExportResponse was received and pushed
// onto a channel.
util.hlc.Timestamp end_time = 5 [(gogoproto.nullable) = false];
}
| pkg/ccl/backupccl/backuppb/backup.proto | 0 | https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298 | [
0.006515631452202797,
0.0008350739371962845,
0.00016610750753898174,
0.0001820793841034174,
0.0014553653309121728
] |
{
"id": 2,
"code_window": [
"\t// the 20.2 release, this test will fail because it is missing a fixture for\n",
"\t// 20.1; run the test (on 20.1). Check it in (instructions will be logged\n",
"\t// below) and off we go.\n",
"\t//\n",
"\t// The version to create/update the fixture for. Must be released (i.e.\n",
"\t// can download it from the homepage); if that is not the case use the\n",
"\t// empty string which uses the local cockroach binary. Make sure that\n",
"\t// this binary then has the correct version. For example, to make a\n",
"\t// \"v20.2\" fixture, you will need a binary that has \"v20.2\" in the\n",
"\t// output of `./cockroach version`, and this process will end up\n",
"\t// creating fixtures that have \"v20.2\" in them. This would be part\n",
"\t// of tagging the master branch as v21.1 in the process of going\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// The version to create/update the fixture for must be released\n",
"\t// (i.e. can download it from the homepage). For example, to make a\n"
],
"file_path": "pkg/cmd/roachtest/tests/fixtures.go",
"type": "replace",
"edit_start_line_idx": 30
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tests
import (
"context"
"runtime"
"strings"
"time"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/registry"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test"
)
func registerFixtures(r registry.Registry) {
// Run this test to create a new fixture for the version upgrade test. This
// is necessary after every release. For example, the day `master` becomes
// the 20.2 release, this test will fail because it is missing a fixture for
// 20.1; run the test (on 20.1). Check it in (instructions will be logged
// below) and off we go.
//
// The version to create/update the fixture for. Must be released (i.e.
// can download it from the homepage); if that is not the case use the
// empty string which uses the local cockroach binary. Make sure that
// this binary then has the correct version. For example, to make a
// "v20.2" fixture, you will need a binary that has "v20.2" in the
// output of `./cockroach version`, and this process will end up
// creating fixtures that have "v20.2" in them. This would be part
// of tagging the master branch as v21.1 in the process of going
// through the major release for v20.2. The version is passed in as
// FIXTURE_VERSION environment variable.
//
// In the common case, one should populate this with the version (instead of
// using the empty string) as this is the most straightforward and least
// error-prone way to generate the fixtures.
//
// Please note that you do *NOT* need to update the fixtures in a patch
// release. This only happens as part of preparing the master branch for the
// next release. The release team runbooks, at time of writing, reflect
// this.
//
// Example invocation:
// roachtest --local run generate-fixtures --debug --cockroach ./cockroach \
// --build-tag v22.1.0-beta.3 tag:fixtures
runFixtures := func(
ctx context.Context,
t test.Test,
c cluster.Cluster,
) {
if c.IsLocal() && runtime.GOARCH == "arm64" {
t.Skip("Skip under ARM64. See https://github.com/cockroachdb/cockroach/issues/89268")
}
fixtureVersion := strings.TrimPrefix(t.BuildVersion().String(), "v")
makeVersionFixtureAndFatal(ctx, t, c, fixtureVersion)
}
spec := registry.TestSpec{
Name: "generate-fixtures",
Timeout: 30 * time.Minute,
Tags: registry.Tags("fixtures"),
Owner: registry.OwnerDevInf,
Cluster: r.MakeClusterSpec(4),
Run: runFixtures,
}
r.Add(spec)
}
| pkg/cmd/roachtest/tests/fixtures.go | 1 | https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298 | [
0.9846428632736206,
0.17895294725894928,
0.00016520479402970523,
0.00029556037043221295,
0.3374958038330078
] |
{
"id": 2,
"code_window": [
"\t// the 20.2 release, this test will fail because it is missing a fixture for\n",
"\t// 20.1; run the test (on 20.1). Check it in (instructions will be logged\n",
"\t// below) and off we go.\n",
"\t//\n",
"\t// The version to create/update the fixture for. Must be released (i.e.\n",
"\t// can download it from the homepage); if that is not the case use the\n",
"\t// empty string which uses the local cockroach binary. Make sure that\n",
"\t// this binary then has the correct version. For example, to make a\n",
"\t// \"v20.2\" fixture, you will need a binary that has \"v20.2\" in the\n",
"\t// output of `./cockroach version`, and this process will end up\n",
"\t// creating fixtures that have \"v20.2\" in them. This would be part\n",
"\t// of tagging the master branch as v21.1 in the process of going\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// The version to create/update the fixture for must be released\n",
"\t// (i.e. can download it from the homepage). For example, to make a\n"
],
"file_path": "pkg/cmd/roachtest/tests/fixtures.go",
"type": "replace",
"edit_start_line_idx": 30
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package norm
import (
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/memo"
)
// ConcatLeftDeepAnds concatenates any left-deep And expressions in the right
// expression with any left-deep And expressions in the left expression. The
// result is a combined left-deep And expression. Note that NormalizeNestedAnds
// has already guaranteed that both inputs will already be left-deep.
func (c *CustomFuncs) ConcatLeftDeepAnds(left, right opt.ScalarExpr) opt.ScalarExpr {
if and, ok := right.(*memo.AndExpr); ok {
return c.f.ConstructAnd(c.ConcatLeftDeepAnds(left, and.Left), and.Right)
}
return c.f.ConstructAnd(left, right)
}
// NegateComparison negates a comparison op like:
//
// a.x = 5
//
// to:
//
// a.x <> 5
func (c *CustomFuncs) NegateComparison(
cmp opt.Operator, left, right opt.ScalarExpr,
) opt.ScalarExpr {
negate := opt.NegateOpMap[cmp]
return c.f.DynamicConstruct(negate, left, right).(opt.ScalarExpr)
}
// CanNegateComparison returns whether the given comparison op can be negated.
func (c *CustomFuncs) CanNegateComparison(cmp opt.Operator) bool {
_, ok := opt.NegateOpMap[cmp]
return ok
}
// FindRedundantConjunct takes the left and right operands of an Or operator as
// input. It examines each conjunct from the left expression and determines
// whether it appears as a conjunct in the right expression. If so, it returns
// the matching conjunct. Otherwise, it returns ok=false. For example:
//
// A OR A => A
// B OR A => nil
// A OR (A AND B) => A
// (A AND B) OR (A AND C) => A
// (A AND B AND C) OR (A AND (D OR E)) => A
//
// Once a redundant conjunct has been found, it is extracted via a call to the
// ExtractRedundantConjunct function. Redundant conjuncts are extracted from
// multiple nested Or operators by repeated application of these functions.
func (c *CustomFuncs) FindRedundantConjunct(
left, right opt.ScalarExpr,
) (_ opt.ScalarExpr, ok bool) {
// Recurse over each conjunct from the left expression and determine whether
// it's redundant.
for {
// Assume a left-deep And expression tree normalized by NormalizeNestedAnds.
if and, ok := left.(*memo.AndExpr); ok {
if c.isConjunct(and.Right, right) {
return and.Right, true
}
left = and.Left
} else {
if c.isConjunct(left, right) {
return left, true
}
return nil, false
}
}
}
// isConjunct returns true if the candidate expression is a conjunct within the
// given conjunction. The conjunction is assumed to be left-deep (normalized by
// the NormalizeNestedAnds rule).
func (c *CustomFuncs) isConjunct(candidate, conjunction opt.ScalarExpr) bool {
for {
if and, ok := conjunction.(*memo.AndExpr); ok {
if and.Right == candidate {
return true
}
conjunction = and.Left
} else {
return conjunction == candidate
}
}
}
// ExtractRedundantConjunct extracts a redundant conjunct from an Or expression,
// and returns an And of the conjunct with the remaining Or expression (a
// logically equivalent expression). For example:
//
// (A AND B) OR (A AND C) => A AND (B OR C)
//
// If extracting the conjunct from one of the OR conditions would result in an
// empty condition, the conjunct itself is returned (a logically equivalent
// expression). For example:
//
// A OR (A AND B) => A
//
// These transformations are useful for finding a conjunct that can be pushed
// down in the query tree. For example, if the redundant conjunct A is fully
// bound by one side of a join, it can be pushed through the join, even if B and
// C cannot.
func (c *CustomFuncs) ExtractRedundantConjunct(
conjunct, left, right opt.ScalarExpr,
) opt.ScalarExpr {
if conjunct == left || conjunct == right {
return conjunct
}
return c.f.ConstructAnd(
conjunct,
c.f.ConstructOr(
c.extractConjunct(conjunct, left.(*memo.AndExpr)),
c.extractConjunct(conjunct, right.(*memo.AndExpr)),
),
)
}
// extractConjunct traverses the And subtree looking for the given conjunct,
// which must be present. Once it's located, it's removed from the tree, and
// the remaining expression is returned.
func (c *CustomFuncs) extractConjunct(conjunct opt.ScalarExpr, and *memo.AndExpr) opt.ScalarExpr {
if and.Right == conjunct {
return and.Left
}
if and.Left == conjunct {
return and.Right
}
return c.f.ConstructAnd(c.extractConjunct(conjunct, and.Left.(*memo.AndExpr)), and.Right)
}
| pkg/sql/opt/norm/bool_funcs.go | 0 | https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298 | [
0.00017538649262860417,
0.00017117374227382243,
0.00016147557471413165,
0.0001716838014544919,
0.000003672045977509697
] |
{
"id": 2,
"code_window": [
"\t// the 20.2 release, this test will fail because it is missing a fixture for\n",
"\t// 20.1; run the test (on 20.1). Check it in (instructions will be logged\n",
"\t// below) and off we go.\n",
"\t//\n",
"\t// The version to create/update the fixture for. Must be released (i.e.\n",
"\t// can download it from the homepage); if that is not the case use the\n",
"\t// empty string which uses the local cockroach binary. Make sure that\n",
"\t// this binary then has the correct version. For example, to make a\n",
"\t// \"v20.2\" fixture, you will need a binary that has \"v20.2\" in the\n",
"\t// output of `./cockroach version`, and this process will end up\n",
"\t// creating fixtures that have \"v20.2\" in them. This would be part\n",
"\t// of tagging the master branch as v21.1 in the process of going\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// The version to create/update the fixture for must be released\n",
"\t// (i.e. can download it from the homepage). For example, to make a\n"
],
"file_path": "pkg/cmd/roachtest/tests/fixtures.go",
"type": "replace",
"edit_start_line_idx": 30
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tests
var rustPostgresBlocklist = blocklist{
"binary_copy.read_basic": "No COPY TO support - https://github.com/cockroachdb/cockroach/issues/85571",
"binary_copy.read_big_rows": "default int size (int4 vs int8) mismatch",
"binary_copy.read_many_rows": "No COPY TO support - https://github.com/cockroachdb/cockroach/issues/85571",
"binary_copy.write_basic": "COPY FROM not supported in extended protocol",
"binary_copy.write_big_rows": "COPY FROM not supported in extended protocol",
"binary_copy.write_many_rows": "COPY FROM not supported in extended protocol",
"composites.defaults": "unsupported feature - https://github.com/cockroachdb/cockroach/issues/27792",
"composites.extra_field": "unsupported feature - https://github.com/cockroachdb/cockroach/issues/27792",
"composites.missing_field": "unsupported feature - https://github.com/cockroachdb/cockroach/issues/27792",
"composites.name_overrides": "unsupported feature - https://github.com/cockroachdb/cockroach/issues/27796",
"composites.raw_ident_field": "unsupported feature - https://github.com/cockroachdb/cockroach/issues/27792",
"composites.wrong_name": "unsupported feature - https://github.com/cockroachdb/cockroach/issues/27792",
"composites.wrong_type": "unsupported feature - https://github.com/cockroachdb/cockroach/issues/27796",
"domains.defaults": "unsupported feature - https://github.com/cockroachdb/cockroach/issues/27796",
"domains.domain_in_composite": "unsupported feature - https://github.com/cockroachdb/cockroach/issues/27796",
"domains.name_overrides": "unsupported feature - https://github.com/cockroachdb/cockroach/issues/27796",
"domains.wrong_name": "unsupported feature - https://github.com/cockroachdb/cockroach/issues/27796",
"domains.wrong_type": "unsupported feature - https://github.com/cockroachdb/cockroach/issues/27796",
"enums.defaults": "experimental feature - https://github.com/cockroachdb/cockroach/issues/46260",
"enums.extra_variant": "experimental feature - https://github.com/cockroachdb/cockroach/issues/46260",
"enums.missing_variant": "experimental feature - https://github.com/cockroachdb/cockroach/issues/46260",
"enums.name_overrides": "experimental feature - https://github.com/cockroachdb/cockroach/issues/46260",
"enums.wrong_name": "experimental feature - https://github.com/cockroachdb/cockroach/issues/46260",
"runtime.multiple_hosts_multiple_ports": "default int size (int4 vs int8) mismatch",
"runtime.multiple_hosts_one_port": "default int size (int4 vs int8) mismatch",
"runtime.target_session_attrs_ok": "default int size (int4 vs int8) mismatch",
"runtime.tcp": "default int size (int4 vs int8) mismatch",
"test.binary_copy_in": "COPY FROM not supported in extended protocol",
"test.binary_copy_out": "No COPY TO support - https://github.com/cockroachdb/cockroach/issues/85571",
"test.copy_in": "COPY FROM not supported in extended protocol",
"test.copy_in_abort": "COPY FROM not supported in extended protocol",
"test.copy_out": "No COPY TO support - https://github.com/cockroachdb/cockroach/issues/85571",
"test.nested_transactions": "default int size (int4 vs int8) mismatch",
"test.notice_callback": "unsupported feature - https://github.com/cockroachdb/cockroach/issues/17511",
"test.notifications_blocking_iter": "unsupported feature - https://github.com/cockroachdb/cockroach/issues/41522",
"test.notifications_iter": "unsupported feature - https://github.com/cockroachdb/cockroach/issues/41522",
"test.notifications_timeout_iter": "unsupported feature - https://github.com/cockroachdb/cockroach/issues/41522",
"test.portal": "default int size (int4 vs int8) mismatch",
"test.prefer": "password authentication failed",
"test.prepare": "default int size (int4 vs int8) mismatch",
"test.require": "server does not support TLS",
"test.require_channel_binding_ok": "password authentication failed",
"test.runtime": "server does not support TLS",
"test.savepoints": "default int size (int4 vs int8) mismatch",
"test.scram_user": "server does not support TLS",
"test.transaction_commit": "unknown function: txid_current()",
"transparent.round_trip": "default int size (int4 vs int8) mismatch",
"types.composite": "unsupported feature - https://github.com/cockroachdb/cockroach/issues/27792",
"types.domain": "unsupported feature - https://github.com/cockroachdb/cockroach/issues/27796",
"types.enum_": "experimental feature - https://github.com/cockroachdb/cockroach/issues/46260",
"types.lquery": "unsupported datatype - https://github.com/cockroachdb/cockroach/issues/44657",
"types.lquery_any": "unsupported datatype - https://github.com/cockroachdb/cockroach/issues/44657",
"types.ltree": "unsupported datatype - https://github.com/cockroachdb/cockroach/issues/44657",
"types.ltree_any": "unsupported datatype - https://github.com/cockroachdb/cockroach/issues/44657",
"types.ltxtquery": "unsupported datatype - https://github.com/cockroachdb/cockroach/issues/44657",
"types.ltxtquery_any": "unsupported datatype - https://github.com/cockroachdb/cockroach/issues/44657",
"types.test_array_vec_params": "default int size (int4 vs int8) mismatch",
"types.test_citext_params": "unsupported citext type alias - https://github.com/cockroachdb/cockroach/issues/22463",
"types.test_hstore_params": "unsupported datatype - https://github.com/cockroachdb/cockroach/issues/41284",
"types.test_i16_params": "default int size (int4 vs int8) mismatch",
"types.test_i32_params": "default int size (int4 vs int8) mismatch",
"types.test_lsn_params": "unsupported datatype - https://github.com/cockroachdb/cockroach/issues/54516",
"types.test_pg_database_datname": "default database name mismatch",
"types.test_slice": "default int size (int4 vs int8) mismatch",
"types.test_slice_range": "unsupported feature - https://github.com/cockroachdb/cockroach/issues/27791",
}
var rustPostgresIgnoreList = blocklist{
"runtime.unix_socket": "unknown",
}
| pkg/cmd/roachtest/tests/rust_postgres_blocklist.go | 0 | https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298 | [
0.00017640336591284722,
0.00016710198542568833,
0.00016456852608826011,
0.0001658634573686868,
0.0000034624754334799945
] |
{
"id": 2,
"code_window": [
"\t// the 20.2 release, this test will fail because it is missing a fixture for\n",
"\t// 20.1; run the test (on 20.1). Check it in (instructions will be logged\n",
"\t// below) and off we go.\n",
"\t//\n",
"\t// The version to create/update the fixture for. Must be released (i.e.\n",
"\t// can download it from the homepage); if that is not the case use the\n",
"\t// empty string which uses the local cockroach binary. Make sure that\n",
"\t// this binary then has the correct version. For example, to make a\n",
"\t// \"v20.2\" fixture, you will need a binary that has \"v20.2\" in the\n",
"\t// output of `./cockroach version`, and this process will end up\n",
"\t// creating fixtures that have \"v20.2\" in them. This would be part\n",
"\t// of tagging the master branch as v21.1 in the process of going\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// The version to create/update the fixture for must be released\n",
"\t// (i.e. can download it from the homepage). For example, to make a\n"
],
"file_path": "pkg/cmd/roachtest/tests/fixtures.go",
"type": "replace",
"edit_start_line_idx": 30
} | diff -urN a/cmd/protoc-gen-doc/BUILD.bazel b/cmd/protoc-gen-doc/BUILD.bazel
--- a/cmd/protoc-gen-doc/BUILD.bazel 1969-12-31 19:00:00.000000000 -0500
+++ b/cmd/protoc-gen-doc/BUILD.bazel 2000-01-01 00:00:00.000000000 -0000
@@ -11,7 +11,6 @@
deps = [
"//:protoc-gen-doc",
"//extensions/google_api_http",
- "//extensions/lyft_validate",
"//extensions/validator_field",
"@com_github_pseudomuto_protokit//:protokit",
],
diff -urN a/cmd/protoc-gen-doc/main.go b/cmd/protoc-gen-doc/main.go
--- a/cmd/protoc-gen-doc/main.go 1969-12-31 19:00:00.000000000 -0500
+++ b/cmd/protoc-gen-doc/main.go 2000-01-01 00:00:00.000000000 -0000
@@ -21,7 +21,6 @@
gendoc "github.com/pseudomuto/protoc-gen-doc"
_ "github.com/pseudomuto/protoc-gen-doc/extensions/google_api_http" // imported for side effects
- _ "github.com/pseudomuto/protoc-gen-doc/extensions/lyft_validate" // imported for side effects
_ "github.com/pseudomuto/protoc-gen-doc/extensions/validator_field" // imported for side effects
)
| build/patches/com_github_pseudomuto_protoc_gen_doc.patch | 0 | https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298 | [
0.00017374464368913323,
0.00017038594523910433,
0.00016491881979163736,
0.00017249440134037286,
0.000003899397597706411
] |
{
"id": 3,
"code_window": [
"\t// \"v20.2\" fixture, you will need a binary that has \"v20.2\" in the\n",
"\t// output of `./cockroach version`, and this process will end up\n",
"\t// creating fixtures that have \"v20.2\" in them. This would be part\n",
"\t// of tagging the master branch as v21.1 in the process of going\n",
"\t// through the major release for v20.2. The version is passed in as\n",
"\t// FIXTURE_VERSION environment variable.\n",
"\t//\n",
"\t// In the common case, one should populate this with the version (instead of\n",
"\t// using the empty string) as this is the most straightforward and least\n",
"\t// error-prone way to generate the fixtures.\n",
"\t//\n",
"\t// Please note that you do *NOT* need to update the fixtures in a patch\n",
"\t// release. This only happens as part of preparing the master branch for the\n",
"\t// next release. The release team runbooks, at time of writing, reflect\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// FIXTURE_VERSION environment variable. The contents of this\n",
"\t// environment variable must match a released cockroach binary.\n"
],
"file_path": "pkg/cmd/roachtest/tests/fixtures.go",
"type": "replace",
"edit_start_line_idx": 39
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tests
import (
"context"
"runtime"
"strings"
"time"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/registry"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test"
)
func registerFixtures(r registry.Registry) {
// Run this test to create a new fixture for the version upgrade test. This
// is necessary after every release. For example, the day `master` becomes
// the 20.2 release, this test will fail because it is missing a fixture for
// 20.1; run the test (on 20.1). Check it in (instructions will be logged
// below) and off we go.
//
// The version to create/update the fixture for. Must be released (i.e.
// can download it from the homepage); if that is not the case use the
// empty string which uses the local cockroach binary. Make sure that
// this binary then has the correct version. For example, to make a
// "v20.2" fixture, you will need a binary that has "v20.2" in the
// output of `./cockroach version`, and this process will end up
// creating fixtures that have "v20.2" in them. This would be part
// of tagging the master branch as v21.1 in the process of going
// through the major release for v20.2. The version is passed in as
// FIXTURE_VERSION environment variable.
//
// In the common case, one should populate this with the version (instead of
// using the empty string) as this is the most straightforward and least
// error-prone way to generate the fixtures.
//
// Please note that you do *NOT* need to update the fixtures in a patch
// release. This only happens as part of preparing the master branch for the
// next release. The release team runbooks, at time of writing, reflect
// this.
//
// Example invocation:
// roachtest --local run generate-fixtures --debug --cockroach ./cockroach \
// --build-tag v22.1.0-beta.3 tag:fixtures
runFixtures := func(
ctx context.Context,
t test.Test,
c cluster.Cluster,
) {
if c.IsLocal() && runtime.GOARCH == "arm64" {
t.Skip("Skip under ARM64. See https://github.com/cockroachdb/cockroach/issues/89268")
}
fixtureVersion := strings.TrimPrefix(t.BuildVersion().String(), "v")
makeVersionFixtureAndFatal(ctx, t, c, fixtureVersion)
}
spec := registry.TestSpec{
Name: "generate-fixtures",
Timeout: 30 * time.Minute,
Tags: registry.Tags("fixtures"),
Owner: registry.OwnerDevInf,
Cluster: r.MakeClusterSpec(4),
Run: runFixtures,
}
r.Add(spec)
}
| pkg/cmd/roachtest/tests/fixtures.go | 1 | https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298 | [
0.9922569990158081,
0.2378157079219818,
0.00015904013707768172,
0.00023906909336801618,
0.4118303954601288
] |
{
"id": 3,
"code_window": [
"\t// \"v20.2\" fixture, you will need a binary that has \"v20.2\" in the\n",
"\t// output of `./cockroach version`, and this process will end up\n",
"\t// creating fixtures that have \"v20.2\" in them. This would be part\n",
"\t// of tagging the master branch as v21.1 in the process of going\n",
"\t// through the major release for v20.2. The version is passed in as\n",
"\t// FIXTURE_VERSION environment variable.\n",
"\t//\n",
"\t// In the common case, one should populate this with the version (instead of\n",
"\t// using the empty string) as this is the most straightforward and least\n",
"\t// error-prone way to generate the fixtures.\n",
"\t//\n",
"\t// Please note that you do *NOT* need to update the fixtures in a patch\n",
"\t// release. This only happens as part of preparing the master branch for the\n",
"\t// next release. The release team runbooks, at time of writing, reflect\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// FIXTURE_VERSION environment variable. The contents of this\n",
"\t// environment variable must match a released cockroach binary.\n"
],
"file_path": "pkg/cmd/roachtest/tests/fixtures.go",
"type": "replace",
"edit_start_line_idx": 39
} | [128 0 0 1 144 0 0 4 240 157 132 158] | pkg/util/json/testdata/encoded/string_surrogates_U+1D11E_MUSICAL_SYMBOL_G_CLEF.json.bytes | 0 | https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298 | [
0.0001695127139100805,
0.0001695127139100805,
0.0001695127139100805,
0.0001695127139100805,
0
] |
{
"id": 3,
"code_window": [
"\t// \"v20.2\" fixture, you will need a binary that has \"v20.2\" in the\n",
"\t// output of `./cockroach version`, and this process will end up\n",
"\t// creating fixtures that have \"v20.2\" in them. This would be part\n",
"\t// of tagging the master branch as v21.1 in the process of going\n",
"\t// through the major release for v20.2. The version is passed in as\n",
"\t// FIXTURE_VERSION environment variable.\n",
"\t//\n",
"\t// In the common case, one should populate this with the version (instead of\n",
"\t// using the empty string) as this is the most straightforward and least\n",
"\t// error-prone way to generate the fixtures.\n",
"\t//\n",
"\t// Please note that you do *NOT* need to update the fixtures in a patch\n",
"\t// release. This only happens as part of preparing the master branch for the\n",
"\t// next release. The release team runbooks, at time of writing, reflect\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// FIXTURE_VERSION environment variable. The contents of this\n",
"\t// environment variable must match a released cockroach binary.\n"
],
"file_path": "pkg/cmd/roachtest/tests/fixtures.go",
"type": "replace",
"edit_start_line_idx": 39
} | // Copyright 2022 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package streampb
// StreamID is the ID of a replication stream.
type StreamID int64
// SafeValue implements the redact.SafeValue interface.
func (j StreamID) SafeValue() {}
// InvalidStreamID is the zero value for StreamID corresponding to no stream.
const InvalidStreamID StreamID = 0
| pkg/repstream/streampb/streamid.go | 0 | https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298 | [
0.00017415813636034727,
0.00016911688726395369,
0.00016307503392454237,
0.00017011749150697142,
0.0000045796432459610514
] |
{
"id": 3,
"code_window": [
"\t// \"v20.2\" fixture, you will need a binary that has \"v20.2\" in the\n",
"\t// output of `./cockroach version`, and this process will end up\n",
"\t// creating fixtures that have \"v20.2\" in them. This would be part\n",
"\t// of tagging the master branch as v21.1 in the process of going\n",
"\t// through the major release for v20.2. The version is passed in as\n",
"\t// FIXTURE_VERSION environment variable.\n",
"\t//\n",
"\t// In the common case, one should populate this with the version (instead of\n",
"\t// using the empty string) as this is the most straightforward and least\n",
"\t// error-prone way to generate the fixtures.\n",
"\t//\n",
"\t// Please note that you do *NOT* need to update the fixtures in a patch\n",
"\t// release. This only happens as part of preparing the master branch for the\n",
"\t// next release. The release team runbooks, at time of writing, reflect\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// FIXTURE_VERSION environment variable. The contents of this\n",
"\t// environment variable must match a released cockroach binary.\n"
],
"file_path": "pkg/cmd/roachtest/tests/fixtures.go",
"type": "replace",
"edit_start_line_idx": 39
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexec
import (
"context"
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecbase"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexectestutils"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/randgen"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
)
func TestCaseOp(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := eval.MakeTestingEvalContext(st)
defer evalCtx.Stop(ctx)
flowCtx := &execinfra.FlowCtx{
EvalCtx: &evalCtx,
Mon: evalCtx.TestingMon,
Cfg: &execinfra.ServerConfig{
Settings: st,
},
}
for _, tc := range []struct {
tuples colexectestutils.Tuples
renderExpr string
expected colexectestutils.Tuples
inputTypes []*types.T
}{
{
// Basic test.
tuples: colexectestutils.Tuples{{1}, {2}, {nil}, {3}},
renderExpr: "CASE WHEN @1 = 2 THEN 1 ELSE 42 END",
expected: colexectestutils.Tuples{{42}, {1}, {42}, {42}},
inputTypes: []*types.T{types.Int},
},
{
// Test "reordered when's."
tuples: colexectestutils.Tuples{{1, 1}, {2, 0}, {nil, nil}, {3, 3}},
renderExpr: "CASE WHEN @1 + @2 > 3 THEN 0 WHEN @1 = 2 THEN 1 ELSE 2 END",
expected: colexectestutils.Tuples{{2}, {1}, {2}, {0}},
inputTypes: []*types.T{types.Int, types.Int},
},
{
// Test the short-circuiting behavior.
tuples: colexectestutils.Tuples{{1, 2}, {2, 0}, {nil, nil}, {3, 3}},
renderExpr: "CASE WHEN @1 = 2 THEN 0::FLOAT WHEN @1 / @2 = 1 THEN 1::FLOAT END",
expected: colexectestutils.Tuples{{nil}, {0.0}, {nil}, {1.0}},
inputTypes: []*types.T{types.Int, types.Int},
},
{
// Test when only the ELSE arm matches.
//
// Note that all input values are NULLs so that the "all nulls
// injection" subtest is skipped.
tuples: colexectestutils.Tuples{{nil}, {nil}, {nil}, {nil}},
renderExpr: "CASE WHEN @1 = 42 THEN 1 WHEN @1 IS NOT NULL THEN 2 ELSE 42 END",
expected: colexectestutils.Tuples{{42}, {42}, {42}, {42}},
inputTypes: []*types.T{types.Int},
},
} {
colexectestutils.RunTests(t, testAllocator, []colexectestutils.Tuples{tc.tuples}, tc.expected, colexectestutils.OrderedVerifier, func(inputs []colexecop.Operator) (colexecop.Operator, error) {
caseOp, err := colexectestutils.CreateTestProjectingOperator(
ctx, flowCtx, inputs[0], tc.inputTypes, tc.renderExpr, testMemAcc,
)
if err != nil {
return nil, err
}
// We will project out the input columns in order to have test
// cases be less verbose.
return colexecbase.NewSimpleProjectOp(caseOp, len(tc.inputTypes)+1, []uint32{uint32(len(tc.inputTypes))}), nil
})
}
}
func TestCaseOpRandomized(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := eval.MakeTestingEvalContext(st)
defer evalCtx.Stop(ctx)
flowCtx := &execinfra.FlowCtx{
EvalCtx: &evalCtx,
Mon: evalCtx.TestingMon,
Cfg: &execinfra.ServerConfig{
Settings: st,
},
}
rng, _ := randutil.NewTestRand()
numWhenArms := 1 + rng.Intn(5)
hasElseArm := rng.Float64() < 0.5
outputType := getRandomTypeFavorNative(rng)
// Construct such a CASE expression that the first column from the input is
// used as the "partitioning" column (used by WHEN arms for matching), the
// following numWhenArms columns are the projections for the corresponding
// WHEN "partitions", and then optionally we have an ELSE projection.
caseExpr := "CASE @1"
for i := 0; i < numWhenArms; i++ {
caseExpr += fmt.Sprintf(" WHEN %d THEN @%d", i, i+2)
}
if hasElseArm {
caseExpr += fmt.Sprintf(" ELSE @%d", numWhenArms+2)
}
caseExpr += " END"
numInputCols := 1 + numWhenArms
if hasElseArm {
numInputCols++
}
numInputRows := 1 + rng.Intn(coldata.BatchSize()) + coldata.BatchSize()*rng.Intn(5)
inputRows := make(rowenc.EncDatumRows, numInputRows)
// We always have an extra partition, regardless of whether we use an ELSE
// projection or not (if we don't, the ELSE arm will project all NULLs).
numPartitions := numWhenArms + 1
// We will populate the expected output at the same time as we're generating
// the input data set. Note that all input columns will be projected out, so
// we memorize only the output column of the CASE expression.
expectedOutput := make([]rowenc.EncDatum, numInputRows)
for i := range inputRows {
inputRow := make(rowenc.EncDatumRow, numInputCols)
partitionIdx := rng.Intn(numPartitions)
inputRow[0].Datum = tree.NewDInt(tree.DInt(partitionIdx))
for j := 1; j < numInputCols; j++ {
inputRow[j] = rowenc.DatumToEncDatum(outputType, randgen.RandDatum(rng, outputType, true /* nullOk */))
}
inputRows[i] = inputRow
if !hasElseArm && partitionIdx == numWhenArms {
expectedOutput[i] = rowenc.DatumToEncDatum(outputType, tree.DNull)
} else {
expectedOutput[i] = inputRow[partitionIdx+1]
}
}
inputTypes := make([]*types.T, numInputCols)
inputTypes[0] = types.Int
for i := 1; i < numInputCols; i++ {
inputTypes[i] = outputType
}
assertProjOpAgainstRowByRow(
t, flowCtx, &evalCtx, caseExpr, inputTypes, inputRows, expectedOutput, outputType,
)
}
| pkg/sql/colexec/case_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/979f53cfd88093fd1cde76cc2b2d017e930ef298 | [
0.00017744845536071807,
0.0001710444048512727,
0.00016051337297540158,
0.00017346101230941713,
0.000004853855443798238
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.