hunk
dict
file
stringlengths
0
11.8M
file_path
stringlengths
2
234
label
int64
0
1
commit_url
stringlengths
74
103
dependency_score
sequencelengths
5
5
{ "id": 3, "code_window": [ "\tconfig.GenericConfig.LegacyAPIGroupPrefixes = sets.NewString(\"/api\")\n", "\tconfig.GenericConfig.RequestContextMapper = genericapirequest.NewRequestContextMapper()\n", "\tconfig.GenericConfig.LoopbackClientConfig = &restclient.Config{APIPath: \"/api\", ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}\n", "\tconfig.ExtraConfig.EnableCoreControllers = false\n", "\tconfig.ExtraConfig.KubeletClientConfig = kubeletclient.KubeletClientConfig{Port: 10250}\n", "\tconfig.ExtraConfig.ProxyTransport = utilnet.SetTransportDefaults(&http.Transport{\n", "\t\tDial: func(network, addr string) (net.Conn, error) { return nil, nil },\n", "\t\tTLSClientConfig: &tls.Config{},\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/master/master_test.go", "type": "replace", "edit_start_line_idx": 110 }
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package framework import ( "net" "net/http" "net/http/httptest" "path" "time" "github.com/go-openapi/spec" "github.com/golang/glog" "github.com/pborman/uuid" apps "k8s.io/api/apps/v1beta1" autoscaling "k8s.io/api/autoscaling/v1" certificates "k8s.io/api/certificates/v1beta1" "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" rbac "k8s.io/api/rbac/v1alpha1" storage "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" authauthenticator "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/authenticatorfactory" authenticatorunion "k8s.io/apiserver/pkg/authentication/request/union" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/authorization/authorizerfactory" authorizerunion "k8s.io/apiserver/pkg/authorization/union" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/options" serverstorage "k8s.io/apiserver/pkg/server/storage" "k8s.io/apiserver/pkg/storage/storagebackend" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/apis/batch" policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1" "k8s.io/kubernetes/pkg/generated/openapi" kubeletclient "k8s.io/kubernetes/pkg/kubelet/client" "k8s.io/kubernetes/pkg/master" "k8s.io/kubernetes/pkg/version" ) // Config is a struct of configuration directives for NewMasterComponents. type Config struct { // If nil, a default is used, partially filled configs will not get populated. MasterConfig *master.Config StartReplicationManager bool // Client throttling qps QPS float32 // Client burst qps, also burst replicas allowed in rc manager Burst int // TODO: Add configs for endpoints controller, scheduler etc } // alwaysAllow always allows an action type alwaysAllow struct{} func (alwaysAllow) Authorize(requestAttributes authorizer.Attributes) (authorizer.Decision, string, error) { return authorizer.DecisionAllow, "always allow", nil } // alwaysEmpty simulates "no authentication" for old tests func alwaysEmpty(req *http.Request) (user.Info, bool, error) { return &user.DefaultInfo{ Name: "", }, true, nil } // MasterReceiver can be used to provide the master to a custom incoming server function type MasterReceiver interface { SetMaster(m *master.Master) } // MasterHolder implements type MasterHolder struct { Initialized chan struct{} M *master.Master } func (h *MasterHolder) SetMaster(m *master.Master) { h.M = m close(h.Initialized) } // startMasterOrDie starts a kubernetes master and an httpserver to handle api requests func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) { var m *master.Master var s *httptest.Server if incomingServer != nil { s = incomingServer } else { s = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.GenericAPIServer.Handler.ServeHTTP(w, req) })) } stopCh := make(chan struct{}) closeFn := func() { close(stopCh) s.Close() } if masterConfig == nil { masterConfig = NewMasterConfig() masterConfig.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(openapi.GetOpenAPIDefinitions, legacyscheme.Scheme) masterConfig.GenericConfig.OpenAPIConfig.Info = &spec.Info{ InfoProps: spec.InfoProps{ Title: "Kubernetes", Version: "unversioned", }, } masterConfig.GenericConfig.OpenAPIConfig.DefaultResponse = &spec.Response{ ResponseProps: spec.ResponseProps{ Description: "Default Response.", }, } masterConfig.GenericConfig.OpenAPIConfig.GetDefinitions = openapi.GetOpenAPIDefinitions masterConfig.GenericConfig.SwaggerConfig = genericapiserver.DefaultSwaggerConfig() } // set the loopback client config if masterConfig.GenericConfig.LoopbackClientConfig == nil { masterConfig.GenericConfig.LoopbackClientConfig = &restclient.Config{QPS: 50, Burst: 100, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}} } masterConfig.GenericConfig.LoopbackClientConfig.Host = s.URL privilegedLoopbackToken := uuid.NewRandom().String() // wrap any available authorizer tokens := make(map[string]*user.DefaultInfo) tokens[privilegedLoopbackToken] = &user.DefaultInfo{ Name: user.APIServerUser, UID: uuid.NewRandom().String(), Groups: []string{user.SystemPrivilegedGroup}, } tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens) if masterConfig.GenericConfig.Authentication.Authenticator == nil { masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, authauthenticator.RequestFunc(alwaysEmpty)) } else { masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, masterConfig.GenericConfig.Authentication.Authenticator) } if masterConfig.GenericConfig.Authorization.Authorizer != nil { tokenAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup) masterConfig.GenericConfig.Authorization.Authorizer = authorizerunion.New(tokenAuthorizer, masterConfig.GenericConfig.Authorization.Authorizer) } else { masterConfig.GenericConfig.Authorization.Authorizer = alwaysAllow{} } masterConfig.GenericConfig.LoopbackClientConfig.BearerToken = privilegedLoopbackToken clientset, err := clientset.NewForConfig(masterConfig.GenericConfig.LoopbackClientConfig) if err != nil { glog.Fatal(err) } sharedInformers := informers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout) m, err = masterConfig.Complete(sharedInformers).New(genericapiserver.EmptyDelegate) if err != nil { closeFn() glog.Fatalf("error in bringing up the master: %v", err) } if masterReceiver != nil { masterReceiver.SetMaster(m) } // TODO have this start method actually use the normal start sequence for the API server // this method never actually calls the `Run` method for the API server // fire the post hooks ourselves m.GenericAPIServer.PrepareRun() m.GenericAPIServer.RunPostStartHooks(stopCh) cfg := *masterConfig.GenericConfig.LoopbackClientConfig cfg.ContentConfig.GroupVersion = &schema.GroupVersion{} privilegedClient, err := restclient.RESTClientFor(&cfg) if err != nil { closeFn() glog.Fatal(err) } err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { result := privilegedClient.Get().AbsPath("/healthz").Do() status := 0 result.StatusCode(&status) if status == 200 { return true, nil } return false, nil }) if err != nil { closeFn() glog.Fatal(err) } return m, s, closeFn } // Returns the master config appropriate for most integration tests. func NewIntegrationTestMasterConfig() *master.Config { masterConfig := NewMasterConfig() masterConfig.ExtraConfig.EnableCoreControllers = true masterConfig.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4") masterConfig.ExtraConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource() return masterConfig } // Returns a basic master config. func NewMasterConfig() *master.Config { // This causes the integration tests to exercise the etcd // prefix code, so please don't change without ensuring // sufficient coverage in other ways. etcdOptions := options.NewEtcdOptions(storagebackend.NewDefaultConfig(uuid.New(), nil)) etcdOptions.StorageConfig.ServerList = []string{GetEtcdURL()} info, _ := runtime.SerializerInfoForMediaType(legacyscheme.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON) ns := NewSingleContentTypeSerializer(legacyscheme.Scheme, info) resourceEncoding := serverstorage.NewDefaultResourceEncodingConfig(legacyscheme.Registry) // FIXME (soltysh): this GroupVersionResource override should be configurable // we need to set both for the whole group and for cronjobs, separately resourceEncoding.SetVersionEncoding(batch.GroupName, *testapi.Batch.GroupVersion(), schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: batch.GroupName, Resource: "cronjobs"}, schema.GroupVersion{Group: batch.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal}) // we also need to set both for the storage group and for volumeattachments, separately resourceEncoding.SetVersionEncoding(storage.GroupName, *testapi.Storage.GroupVersion(), schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: storage.GroupName, Resource: "volumeattachments"}, schema.GroupVersion{Group: storage.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal}) storageFactory := serverstorage.NewDefaultStorageFactory(etcdOptions.StorageConfig, runtime.ContentTypeJSON, ns, resourceEncoding, master.DefaultAPIResourceConfigSource(), nil) storageFactory.SetSerializer( schema.GroupResource{Group: v1.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: autoscaling.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: batch.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: apps.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: extensions.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: policy.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: rbac.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: certificates.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: storage.GroupName, Resource: serverstorage.AllResources}, "", ns) genericConfig := genericapiserver.NewConfig(legacyscheme.Codecs) kubeVersion := version.Get() genericConfig.Version = &kubeVersion genericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer() err := etcdOptions.ApplyWithStorageFactoryTo(storageFactory, genericConfig) if err != nil { panic(err) } return &master.Config{ GenericConfig: genericConfig, ExtraConfig: master.ExtraConfig{ APIResourceConfigSource: master.DefaultAPIResourceConfigSource(), StorageFactory: storageFactory, EnableCoreControllers: true, KubeletClientConfig: kubeletclient.KubeletClientConfig{Port: 10250}, APIServerServicePort: 443, MasterCount: 1, }, } } // CloseFunc can be called to cleanup the master type CloseFunc func() func RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server, CloseFunc) { if masterConfig == nil { masterConfig = NewMasterConfig() masterConfig.GenericConfig.EnableProfiling = true } return startMasterOrDie(masterConfig, nil, nil) } func RunAMasterUsingServer(masterConfig *master.Config, s *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) { return startMasterOrDie(masterConfig, s, masterReceiver) } // SharedEtcd creates a storage config for a shared etcd instance, with a unique prefix. func SharedEtcd() *storagebackend.Config { cfg := storagebackend.NewDefaultConfig(path.Join(uuid.New(), "registry"), nil) cfg.ServerList = []string{GetEtcdURL()} return cfg }
test/integration/framework/master_utils.go
1
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.008444512262940407, 0.001034544431604445, 0.00016273702203761786, 0.00017370174464304, 0.0019219942623749375 ]
{ "id": 3, "code_window": [ "\tconfig.GenericConfig.LegacyAPIGroupPrefixes = sets.NewString(\"/api\")\n", "\tconfig.GenericConfig.RequestContextMapper = genericapirequest.NewRequestContextMapper()\n", "\tconfig.GenericConfig.LoopbackClientConfig = &restclient.Config{APIPath: \"/api\", ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}\n", "\tconfig.ExtraConfig.EnableCoreControllers = false\n", "\tconfig.ExtraConfig.KubeletClientConfig = kubeletclient.KubeletClientConfig{Port: 10250}\n", "\tconfig.ExtraConfig.ProxyTransport = utilnet.SetTransportDefaults(&http.Transport{\n", "\t\tDial: func(network, addr string) (net.Conn, error) { return nil, nil },\n", "\t\tTLSClientConfig: &tls.Config{},\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/master/master_test.go", "type": "replace", "edit_start_line_idx": 110 }
/* Package routers enables management and retrieval of Routers from the OpenStack Networking service. Example to List Routers listOpts := routers.ListOpts{} allPages, err := routers.List(networkClient, listOpts).AllPages() if err != nil { panic(err) } allRouters, err := routers.ExtractRouters(allPages) if err != nil { panic(err) } for _, router := range allRoutes { fmt.Printf("%+v\n", router) } Example to Create a Router iTrue := true gwi := routers.GatewayInfo{ NetworkID: "8ca37218-28ff-41cb-9b10-039601ea7e6b", } createOpts := routers.CreateOpts{ Name: "router_1", AdminStateUp: &iTrue, GatewayInfo: &gwi, } router, err := routers.Create(networkClient, createOpts).Extract() if err != nil { panic(err) } Example to Update a Router routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" routes := []routers.Route{{ DestinationCIDR: "40.0.1.0/24", NextHop: "10.1.0.10", }} updateOpts := routers.UpdateOpts{ Name: "new_name", Routes: routes, } router, err := routers.Update(networkClient, routerID, updateOpts).Extract() if err != nil { panic(err) } Example to Remove all Routes from a Router routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" routes := []routers.Route{} updateOpts := routers.UpdateOpts{ Routes: routes, } router, err := routers.Update(networkClient, routerID, updateOpts).Extract() if err != nil { panic(err) } Example to Delete a Router routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" err := routers.Delete(networkClient, routerID).ExtractErr() if err != nil { panic(err) } Example to Add an Interface to a Router routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" intOpts := routers.AddInterfaceOpts{ SubnetID: "a2f1f29d-571b-4533-907f-5803ab96ead1", } interface, err := routers.AddInterface(networkClient, routerID, intOpts).Extract() if err != nil { panic(err) } Example to Remove an Interface from a Router routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" intOpts := routers.RemoveInterfaceOpts{ SubnetID: "a2f1f29d-571b-4533-907f-5803ab96ead1", } interface, err := routers.RemoveInterface(networkClient, routerID, intOpts).Extract() if err != nil { panic(err) } */ package routers
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/doc.go
0
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.00017132876382675022, 0.00016577524365857244, 0.00016005150973796844, 0.0001654543448239565, 0.0000031820868571230676 ]
{ "id": 3, "code_window": [ "\tconfig.GenericConfig.LegacyAPIGroupPrefixes = sets.NewString(\"/api\")\n", "\tconfig.GenericConfig.RequestContextMapper = genericapirequest.NewRequestContextMapper()\n", "\tconfig.GenericConfig.LoopbackClientConfig = &restclient.Config{APIPath: \"/api\", ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}\n", "\tconfig.ExtraConfig.EnableCoreControllers = false\n", "\tconfig.ExtraConfig.KubeletClientConfig = kubeletclient.KubeletClientConfig{Port: 10250}\n", "\tconfig.ExtraConfig.ProxyTransport = utilnet.SetTransportDefaults(&http.Transport{\n", "\t\tDial: func(network, addr string) (net.Conn, error) { return nil, nil },\n", "\t\tTLSClientConfig: &tls.Config{},\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/master/master_test.go", "type": "replace", "edit_start_line_idx": 110 }
/* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1beta1 // This file contains a collection of methods that can be used from go-restful to // generate Swagger API documentation for its models. Please read this PR for more // information on the implementation: https://github.com/emicklei/go-restful/pull/215 // // TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PartialObjectMetadata = map[string]string{ "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", } func (PartialObjectMetadata) SwaggerDoc() map[string]string { return map_PartialObjectMetadata } var map_PartialObjectMetadataList = map[string]string{ "": "PartialObjectMetadataList contains a list of objects containing only their metadata", "items": "items contains each of the included items.", } func (PartialObjectMetadataList) SwaggerDoc() map[string]string { return map_PartialObjectMetadataList } var map_Table = map[string]string{ "": "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.", "rows": "rows is the list of items in the table.", } func (Table) SwaggerDoc() map[string]string { return map_Table } var map_TableColumnDefinition = map[string]string{ "": "TableColumnDefinition contains information about a column returned in the Table.", "name": "name is a human readable name for the column.", "type": "type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", "format": "format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", "description": "description is a human readable description of this column.", "priority": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.", } func (TableColumnDefinition) SwaggerDoc() map[string]string { return map_TableColumnDefinition } var map_TableOptions = map[string]string{ "": "TableOptions are used when a Table is requested by the caller.", "includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.", } func (TableOptions) SwaggerDoc() map[string]string { return map_TableOptions } var map_TableRow = map[string]string{ "": "TableRow is an individual row in a table.", "cells": "cells will be as wide as headers and may contain strings, numbers, booleans, simple maps, or lists, or null. See the type field of the column definition for a more detailed description.", "conditions": "conditions describe additional status of a row that are relevant for a human user.", "object": "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing.", } func (TableRow) SwaggerDoc() map[string]string { return map_TableRow } var map_TableRowCondition = map[string]string{ "": "TableRowCondition allows a row to be marked with additional information.", "type": "Type of row condition.", "status": "Status of the condition, one of True, False, Unknown.", "reason": "(brief) machine readable reason for the condition's last transition.", "message": "Human readable message indicating details about last transition.", } func (TableRowCondition) SwaggerDoc() map[string]string { return map_TableRowCondition } // AUTO-GENERATED FUNCTIONS END HERE
staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go
0
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.00017730925173964351, 0.00017022575775627047, 0.0001623182906769216, 0.00016936536121647805, 0.000004459117008082103 ]
{ "id": 3, "code_window": [ "\tconfig.GenericConfig.LegacyAPIGroupPrefixes = sets.NewString(\"/api\")\n", "\tconfig.GenericConfig.RequestContextMapper = genericapirequest.NewRequestContextMapper()\n", "\tconfig.GenericConfig.LoopbackClientConfig = &restclient.Config{APIPath: \"/api\", ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}\n", "\tconfig.ExtraConfig.EnableCoreControllers = false\n", "\tconfig.ExtraConfig.KubeletClientConfig = kubeletclient.KubeletClientConfig{Port: 10250}\n", "\tconfig.ExtraConfig.ProxyTransport = utilnet.SetTransportDefaults(&http.Transport{\n", "\t\tDial: func(network, addr string) (net.Conn, error) { return nil, nil },\n", "\t\tTLSClientConfig: &tls.Config{},\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/master/master_test.go", "type": "replace", "edit_start_line_idx": 110 }
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1 import ( batchv1 "k8s.io/api/batch/v1" "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "batch" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } var ( localSchemeBuilder = &batchv1.SchemeBuilder AddToScheme = localSchemeBuilder.AddToScheme ) func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs) }
pkg/apis/batch/v1/register.go
0
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.00017864191613625735, 0.0001711744407657534, 0.0001632699859328568, 0.00017267820658162236, 0.000006586808467545779 ]
{ "id": 4, "code_window": [ "\t\"k8s.io/kubernetes/test/integration/framework\"\n", ")\n", "\n", "func setup(t *testing.T, groupVersions ...schema.GroupVersion) (*httptest.Server, clientset.Interface, framework.CloseFunc) {\n", "\tmasterConfig := framework.NewIntegrationTestMasterConfig()\n", "\tmasterConfig.ExtraConfig.EnableCoreControllers = false\n", "\tif len(groupVersions) > 0 {\n", "\t\tresourceConfig := master.DefaultAPIResourceConfigSource()\n", "\t\tresourceConfig.EnableVersions(groupVersions...)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "test/integration/apiserver/apiserver_test.go", "type": "replace", "edit_start_line_idx": 48 }
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package framework import ( "net" "net/http" "net/http/httptest" "path" "time" "github.com/go-openapi/spec" "github.com/golang/glog" "github.com/pborman/uuid" apps "k8s.io/api/apps/v1beta1" autoscaling "k8s.io/api/autoscaling/v1" certificates "k8s.io/api/certificates/v1beta1" "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" rbac "k8s.io/api/rbac/v1alpha1" storage "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" authauthenticator "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/authenticatorfactory" authenticatorunion "k8s.io/apiserver/pkg/authentication/request/union" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/authorization/authorizerfactory" authorizerunion "k8s.io/apiserver/pkg/authorization/union" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/options" serverstorage "k8s.io/apiserver/pkg/server/storage" "k8s.io/apiserver/pkg/storage/storagebackend" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/apis/batch" policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1" "k8s.io/kubernetes/pkg/generated/openapi" kubeletclient "k8s.io/kubernetes/pkg/kubelet/client" "k8s.io/kubernetes/pkg/master" "k8s.io/kubernetes/pkg/version" ) // Config is a struct of configuration directives for NewMasterComponents. type Config struct { // If nil, a default is used, partially filled configs will not get populated. MasterConfig *master.Config StartReplicationManager bool // Client throttling qps QPS float32 // Client burst qps, also burst replicas allowed in rc manager Burst int // TODO: Add configs for endpoints controller, scheduler etc } // alwaysAllow always allows an action type alwaysAllow struct{} func (alwaysAllow) Authorize(requestAttributes authorizer.Attributes) (authorizer.Decision, string, error) { return authorizer.DecisionAllow, "always allow", nil } // alwaysEmpty simulates "no authentication" for old tests func alwaysEmpty(req *http.Request) (user.Info, bool, error) { return &user.DefaultInfo{ Name: "", }, true, nil } // MasterReceiver can be used to provide the master to a custom incoming server function type MasterReceiver interface { SetMaster(m *master.Master) } // MasterHolder implements type MasterHolder struct { Initialized chan struct{} M *master.Master } func (h *MasterHolder) SetMaster(m *master.Master) { h.M = m close(h.Initialized) } // startMasterOrDie starts a kubernetes master and an httpserver to handle api requests func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) { var m *master.Master var s *httptest.Server if incomingServer != nil { s = incomingServer } else { s = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.GenericAPIServer.Handler.ServeHTTP(w, req) })) } stopCh := make(chan struct{}) closeFn := func() { close(stopCh) s.Close() } if masterConfig == nil { masterConfig = NewMasterConfig() masterConfig.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(openapi.GetOpenAPIDefinitions, legacyscheme.Scheme) masterConfig.GenericConfig.OpenAPIConfig.Info = &spec.Info{ InfoProps: spec.InfoProps{ Title: "Kubernetes", Version: "unversioned", }, } masterConfig.GenericConfig.OpenAPIConfig.DefaultResponse = &spec.Response{ ResponseProps: spec.ResponseProps{ Description: "Default Response.", }, } masterConfig.GenericConfig.OpenAPIConfig.GetDefinitions = openapi.GetOpenAPIDefinitions masterConfig.GenericConfig.SwaggerConfig = genericapiserver.DefaultSwaggerConfig() } // set the loopback client config if masterConfig.GenericConfig.LoopbackClientConfig == nil { masterConfig.GenericConfig.LoopbackClientConfig = &restclient.Config{QPS: 50, Burst: 100, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}} } masterConfig.GenericConfig.LoopbackClientConfig.Host = s.URL privilegedLoopbackToken := uuid.NewRandom().String() // wrap any available authorizer tokens := make(map[string]*user.DefaultInfo) tokens[privilegedLoopbackToken] = &user.DefaultInfo{ Name: user.APIServerUser, UID: uuid.NewRandom().String(), Groups: []string{user.SystemPrivilegedGroup}, } tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens) if masterConfig.GenericConfig.Authentication.Authenticator == nil { masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, authauthenticator.RequestFunc(alwaysEmpty)) } else { masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, masterConfig.GenericConfig.Authentication.Authenticator) } if masterConfig.GenericConfig.Authorization.Authorizer != nil { tokenAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup) masterConfig.GenericConfig.Authorization.Authorizer = authorizerunion.New(tokenAuthorizer, masterConfig.GenericConfig.Authorization.Authorizer) } else { masterConfig.GenericConfig.Authorization.Authorizer = alwaysAllow{} } masterConfig.GenericConfig.LoopbackClientConfig.BearerToken = privilegedLoopbackToken clientset, err := clientset.NewForConfig(masterConfig.GenericConfig.LoopbackClientConfig) if err != nil { glog.Fatal(err) } sharedInformers := informers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout) m, err = masterConfig.Complete(sharedInformers).New(genericapiserver.EmptyDelegate) if err != nil { closeFn() glog.Fatalf("error in bringing up the master: %v", err) } if masterReceiver != nil { masterReceiver.SetMaster(m) } // TODO have this start method actually use the normal start sequence for the API server // this method never actually calls the `Run` method for the API server // fire the post hooks ourselves m.GenericAPIServer.PrepareRun() m.GenericAPIServer.RunPostStartHooks(stopCh) cfg := *masterConfig.GenericConfig.LoopbackClientConfig cfg.ContentConfig.GroupVersion = &schema.GroupVersion{} privilegedClient, err := restclient.RESTClientFor(&cfg) if err != nil { closeFn() glog.Fatal(err) } err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { result := privilegedClient.Get().AbsPath("/healthz").Do() status := 0 result.StatusCode(&status) if status == 200 { return true, nil } return false, nil }) if err != nil { closeFn() glog.Fatal(err) } return m, s, closeFn } // Returns the master config appropriate for most integration tests. func NewIntegrationTestMasterConfig() *master.Config { masterConfig := NewMasterConfig() masterConfig.ExtraConfig.EnableCoreControllers = true masterConfig.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4") masterConfig.ExtraConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource() return masterConfig } // Returns a basic master config. func NewMasterConfig() *master.Config { // This causes the integration tests to exercise the etcd // prefix code, so please don't change without ensuring // sufficient coverage in other ways. etcdOptions := options.NewEtcdOptions(storagebackend.NewDefaultConfig(uuid.New(), nil)) etcdOptions.StorageConfig.ServerList = []string{GetEtcdURL()} info, _ := runtime.SerializerInfoForMediaType(legacyscheme.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON) ns := NewSingleContentTypeSerializer(legacyscheme.Scheme, info) resourceEncoding := serverstorage.NewDefaultResourceEncodingConfig(legacyscheme.Registry) // FIXME (soltysh): this GroupVersionResource override should be configurable // we need to set both for the whole group and for cronjobs, separately resourceEncoding.SetVersionEncoding(batch.GroupName, *testapi.Batch.GroupVersion(), schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: batch.GroupName, Resource: "cronjobs"}, schema.GroupVersion{Group: batch.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal}) // we also need to set both for the storage group and for volumeattachments, separately resourceEncoding.SetVersionEncoding(storage.GroupName, *testapi.Storage.GroupVersion(), schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: storage.GroupName, Resource: "volumeattachments"}, schema.GroupVersion{Group: storage.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal}) storageFactory := serverstorage.NewDefaultStorageFactory(etcdOptions.StorageConfig, runtime.ContentTypeJSON, ns, resourceEncoding, master.DefaultAPIResourceConfigSource(), nil) storageFactory.SetSerializer( schema.GroupResource{Group: v1.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: autoscaling.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: batch.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: apps.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: extensions.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: policy.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: rbac.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: certificates.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: storage.GroupName, Resource: serverstorage.AllResources}, "", ns) genericConfig := genericapiserver.NewConfig(legacyscheme.Codecs) kubeVersion := version.Get() genericConfig.Version = &kubeVersion genericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer() err := etcdOptions.ApplyWithStorageFactoryTo(storageFactory, genericConfig) if err != nil { panic(err) } return &master.Config{ GenericConfig: genericConfig, ExtraConfig: master.ExtraConfig{ APIResourceConfigSource: master.DefaultAPIResourceConfigSource(), StorageFactory: storageFactory, EnableCoreControllers: true, KubeletClientConfig: kubeletclient.KubeletClientConfig{Port: 10250}, APIServerServicePort: 443, MasterCount: 1, }, } } // CloseFunc can be called to cleanup the master type CloseFunc func() func RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server, CloseFunc) { if masterConfig == nil { masterConfig = NewMasterConfig() masterConfig.GenericConfig.EnableProfiling = true } return startMasterOrDie(masterConfig, nil, nil) } func RunAMasterUsingServer(masterConfig *master.Config, s *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) { return startMasterOrDie(masterConfig, s, masterReceiver) } // SharedEtcd creates a storage config for a shared etcd instance, with a unique prefix. func SharedEtcd() *storagebackend.Config { cfg := storagebackend.NewDefaultConfig(path.Join(uuid.New(), "registry"), nil) cfg.ServerList = []string{GetEtcdURL()} return cfg }
test/integration/framework/master_utils.go
1
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.9979473948478699, 0.286044716835022, 0.00016520684584975243, 0.00028717477107420564, 0.4372846186161041 ]
{ "id": 4, "code_window": [ "\t\"k8s.io/kubernetes/test/integration/framework\"\n", ")\n", "\n", "func setup(t *testing.T, groupVersions ...schema.GroupVersion) (*httptest.Server, clientset.Interface, framework.CloseFunc) {\n", "\tmasterConfig := framework.NewIntegrationTestMasterConfig()\n", "\tmasterConfig.ExtraConfig.EnableCoreControllers = false\n", "\tif len(groupVersions) > 0 {\n", "\t\tresourceConfig := master.DefaultAPIResourceConfigSource()\n", "\t\tresourceConfig.EnableVersions(groupVersions...)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "test/integration/apiserver/apiserver_test.go", "type": "replace", "edit_start_line_idx": 48 }
/* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by client-gen. DO NOT EDIT. package fake import ( v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" ) // FakePodDisruptionBudgets implements PodDisruptionBudgetInterface type FakePodDisruptionBudgets struct { Fake *FakePolicyV1beta1 ns string } var poddisruptionbudgetsResource = schema.GroupVersionResource{Group: "policy", Version: "v1beta1", Resource: "poddisruptionbudgets"} var poddisruptionbudgetsKind = schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodDisruptionBudget"} // Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. func (c *FakePodDisruptionBudgets) Get(name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(poddisruptionbudgetsResource, c.ns, name), &v1beta1.PodDisruptionBudget{}) if obj == nil { return nil, err } return obj.(*v1beta1.PodDisruptionBudget), err } // List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. func (c *FakePodDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), &v1beta1.PodDisruptionBudgetList{}) if obj == nil { return nil, err } label, _, _ := testing.ExtractFromListOptions(opts) if label == nil { label = labels.Everything() } list := &v1beta1.PodDisruptionBudgetList{} for _, item := range obj.(*v1beta1.PodDisruptionBudgetList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } } return list, err } // Watch returns a watch.Interface that watches the requested podDisruptionBudgets. func (c *FakePodDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(poddisruptionbudgetsResource, c.ns, opts)) } // Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. func (c *FakePodDisruptionBudgets) Create(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) if obj == nil { return nil, err } return obj.(*v1beta1.PodDisruptionBudget), err } // Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. func (c *FakePodDisruptionBudgets) Update(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) if obj == nil { return nil, err } return obj.(*v1beta1.PodDisruptionBudget), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *FakePodDisruptionBudgets) UpdateStatus(podDisruptionBudget *v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) if obj == nil { return nil, err } return obj.(*v1beta1.PodDisruptionBudget), err } // Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. func (c *FakePodDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(poddisruptionbudgetsResource, c.ns, name), &v1beta1.PodDisruptionBudget{}) return err } // DeleteCollection deletes a collection of objects. func (c *FakePodDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(poddisruptionbudgetsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.PodDisruptionBudgetList{}) return err } // Patch applies the patch and returns the patched podDisruptionBudget. func (c *FakePodDisruptionBudgets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, data, subresources...), &v1beta1.PodDisruptionBudget{}) if obj == nil { return nil, err } return obj.(*v1beta1.PodDisruptionBudget), err }
staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go
0
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.0001753458782332018, 0.0001683592126937583, 0.0001653023500693962, 0.00016761390725150704, 0.0000026495390557101928 ]
{ "id": 4, "code_window": [ "\t\"k8s.io/kubernetes/test/integration/framework\"\n", ")\n", "\n", "func setup(t *testing.T, groupVersions ...schema.GroupVersion) (*httptest.Server, clientset.Interface, framework.CloseFunc) {\n", "\tmasterConfig := framework.NewIntegrationTestMasterConfig()\n", "\tmasterConfig.ExtraConfig.EnableCoreControllers = false\n", "\tif len(groupVersions) > 0 {\n", "\t\tresourceConfig := master.DefaultAPIResourceConfigSource()\n", "\t\tresourceConfig.EnableVersions(groupVersions...)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "test/integration/apiserver/apiserver_test.go", "type": "replace", "edit_start_line_idx": 48 }
# Logs Generator ## Overview Logs generator is a tool to create predictable load on the logs delivery system. Is generates random lines with predictable format and predictable average length. Each line can be later uniquely identified to ensure logs delivery. ## Usage Tool is parametrized with the total number of number that should be generated and the duration of the generation process. For example, if you want to create a throughput of 100 lines per second for a minute, you set total number of lines to 6000 and duration to 1 minute. Parameters are passed through environment variables. There are no defaults, you should always set up container parameters. Total number of line is parametrized through env variable `LOGS_GENERATOR_LINES_TOTAL` and duration in go format is parametrized through env variable `LOGS_GENERATOR_DURATION`. Inside the container all log lines are written to the stdout. Each line is on average 100 bytes long and follows this pattern: ``` 2000-12-31T12:59:59Z <id> <method> /api/v1/namespaces/<namespace>/endpoints/<random_string> <random_number> ``` Where `<id>` refers to the number from 0 to `total_lines - 1`, which is unique for each line in a given run of the container. ## Image Image is located in the public repository of Google Container Registry under the name ``` k8s.gcr.io/logs-generator:v0.1.1 ``` ## Examples ``` docker run -i \ -e "LOGS_GENERATOR_LINES_TOTAL=10" \ -e "LOGS_GENERATOR_DURATION=1s" \ k8s.gcr.io/logs-generator:v0.1.1 ``` ``` kubectl run logs-generator \ --generator=run-pod/v1 \ --image=k8s.gcr.io/logs-generator:v0.1.1 \ --restart=Never \ --env "LOGS_GENERATOR_LINES_TOTAL=1000" \ --env "LOGS_GENERATOR_DURATION=1m" ``` [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/test/images/logs-generator/README.md?pixel)]()
test/images/logs-generator/README.md
0
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.00021272296726237983, 0.00017419828509446234, 0.00016558960487600416, 0.00016680339467711747, 0.0000172443178598769 ]
{ "id": 4, "code_window": [ "\t\"k8s.io/kubernetes/test/integration/framework\"\n", ")\n", "\n", "func setup(t *testing.T, groupVersions ...schema.GroupVersion) (*httptest.Server, clientset.Interface, framework.CloseFunc) {\n", "\tmasterConfig := framework.NewIntegrationTestMasterConfig()\n", "\tmasterConfig.ExtraConfig.EnableCoreControllers = false\n", "\tif len(groupVersions) > 0 {\n", "\t\tresourceConfig := master.DefaultAPIResourceConfigSource()\n", "\t\tresourceConfig.EnableVersions(groupVersions...)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "test/integration/apiserver/apiserver_test.go", "type": "replace", "edit_start_line_idx": 48 }
# Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof
vendor/github.com/exponent-io/jsonpath/.gitignore
0
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.00017283338820561767, 0.00016909633995965123, 0.0001644514995859936, 0.00017000413208734244, 0.0000034815782328223577 ]
{ "id": 5, "code_window": [ "\n", "// Returns the master config appropriate for most integration tests.\n", "func NewIntegrationTestMasterConfig() *master.Config {\n", "\tmasterConfig := NewMasterConfig()\n", "\tmasterConfig.ExtraConfig.EnableCoreControllers = true\n", "\tmasterConfig.GenericConfig.PublicAddress = net.ParseIP(\"192.168.10.4\")\n", "\tmasterConfig.ExtraConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource()\n", "\treturn masterConfig\n", "}\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "test/integration/framework/master_utils.go", "type": "replace", "edit_start_line_idx": 221 }
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package framework import ( "net" "net/http" "net/http/httptest" "path" "time" "github.com/go-openapi/spec" "github.com/golang/glog" "github.com/pborman/uuid" apps "k8s.io/api/apps/v1beta1" autoscaling "k8s.io/api/autoscaling/v1" certificates "k8s.io/api/certificates/v1beta1" "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" rbac "k8s.io/api/rbac/v1alpha1" storage "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" authauthenticator "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/authenticatorfactory" authenticatorunion "k8s.io/apiserver/pkg/authentication/request/union" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/authorization/authorizerfactory" authorizerunion "k8s.io/apiserver/pkg/authorization/union" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/options" serverstorage "k8s.io/apiserver/pkg/server/storage" "k8s.io/apiserver/pkg/storage/storagebackend" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/apis/batch" policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1" "k8s.io/kubernetes/pkg/generated/openapi" kubeletclient "k8s.io/kubernetes/pkg/kubelet/client" "k8s.io/kubernetes/pkg/master" "k8s.io/kubernetes/pkg/version" ) // Config is a struct of configuration directives for NewMasterComponents. type Config struct { // If nil, a default is used, partially filled configs will not get populated. MasterConfig *master.Config StartReplicationManager bool // Client throttling qps QPS float32 // Client burst qps, also burst replicas allowed in rc manager Burst int // TODO: Add configs for endpoints controller, scheduler etc } // alwaysAllow always allows an action type alwaysAllow struct{} func (alwaysAllow) Authorize(requestAttributes authorizer.Attributes) (authorizer.Decision, string, error) { return authorizer.DecisionAllow, "always allow", nil } // alwaysEmpty simulates "no authentication" for old tests func alwaysEmpty(req *http.Request) (user.Info, bool, error) { return &user.DefaultInfo{ Name: "", }, true, nil } // MasterReceiver can be used to provide the master to a custom incoming server function type MasterReceiver interface { SetMaster(m *master.Master) } // MasterHolder implements type MasterHolder struct { Initialized chan struct{} M *master.Master } func (h *MasterHolder) SetMaster(m *master.Master) { h.M = m close(h.Initialized) } // startMasterOrDie starts a kubernetes master and an httpserver to handle api requests func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) { var m *master.Master var s *httptest.Server if incomingServer != nil { s = incomingServer } else { s = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.GenericAPIServer.Handler.ServeHTTP(w, req) })) } stopCh := make(chan struct{}) closeFn := func() { close(stopCh) s.Close() } if masterConfig == nil { masterConfig = NewMasterConfig() masterConfig.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(openapi.GetOpenAPIDefinitions, legacyscheme.Scheme) masterConfig.GenericConfig.OpenAPIConfig.Info = &spec.Info{ InfoProps: spec.InfoProps{ Title: "Kubernetes", Version: "unversioned", }, } masterConfig.GenericConfig.OpenAPIConfig.DefaultResponse = &spec.Response{ ResponseProps: spec.ResponseProps{ Description: "Default Response.", }, } masterConfig.GenericConfig.OpenAPIConfig.GetDefinitions = openapi.GetOpenAPIDefinitions masterConfig.GenericConfig.SwaggerConfig = genericapiserver.DefaultSwaggerConfig() } // set the loopback client config if masterConfig.GenericConfig.LoopbackClientConfig == nil { masterConfig.GenericConfig.LoopbackClientConfig = &restclient.Config{QPS: 50, Burst: 100, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}} } masterConfig.GenericConfig.LoopbackClientConfig.Host = s.URL privilegedLoopbackToken := uuid.NewRandom().String() // wrap any available authorizer tokens := make(map[string]*user.DefaultInfo) tokens[privilegedLoopbackToken] = &user.DefaultInfo{ Name: user.APIServerUser, UID: uuid.NewRandom().String(), Groups: []string{user.SystemPrivilegedGroup}, } tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens) if masterConfig.GenericConfig.Authentication.Authenticator == nil { masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, authauthenticator.RequestFunc(alwaysEmpty)) } else { masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, masterConfig.GenericConfig.Authentication.Authenticator) } if masterConfig.GenericConfig.Authorization.Authorizer != nil { tokenAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup) masterConfig.GenericConfig.Authorization.Authorizer = authorizerunion.New(tokenAuthorizer, masterConfig.GenericConfig.Authorization.Authorizer) } else { masterConfig.GenericConfig.Authorization.Authorizer = alwaysAllow{} } masterConfig.GenericConfig.LoopbackClientConfig.BearerToken = privilegedLoopbackToken clientset, err := clientset.NewForConfig(masterConfig.GenericConfig.LoopbackClientConfig) if err != nil { glog.Fatal(err) } sharedInformers := informers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout) m, err = masterConfig.Complete(sharedInformers).New(genericapiserver.EmptyDelegate) if err != nil { closeFn() glog.Fatalf("error in bringing up the master: %v", err) } if masterReceiver != nil { masterReceiver.SetMaster(m) } // TODO have this start method actually use the normal start sequence for the API server // this method never actually calls the `Run` method for the API server // fire the post hooks ourselves m.GenericAPIServer.PrepareRun() m.GenericAPIServer.RunPostStartHooks(stopCh) cfg := *masterConfig.GenericConfig.LoopbackClientConfig cfg.ContentConfig.GroupVersion = &schema.GroupVersion{} privilegedClient, err := restclient.RESTClientFor(&cfg) if err != nil { closeFn() glog.Fatal(err) } err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { result := privilegedClient.Get().AbsPath("/healthz").Do() status := 0 result.StatusCode(&status) if status == 200 { return true, nil } return false, nil }) if err != nil { closeFn() glog.Fatal(err) } return m, s, closeFn } // Returns the master config appropriate for most integration tests. func NewIntegrationTestMasterConfig() *master.Config { masterConfig := NewMasterConfig() masterConfig.ExtraConfig.EnableCoreControllers = true masterConfig.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4") masterConfig.ExtraConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource() return masterConfig } // Returns a basic master config. func NewMasterConfig() *master.Config { // This causes the integration tests to exercise the etcd // prefix code, so please don't change without ensuring // sufficient coverage in other ways. etcdOptions := options.NewEtcdOptions(storagebackend.NewDefaultConfig(uuid.New(), nil)) etcdOptions.StorageConfig.ServerList = []string{GetEtcdURL()} info, _ := runtime.SerializerInfoForMediaType(legacyscheme.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON) ns := NewSingleContentTypeSerializer(legacyscheme.Scheme, info) resourceEncoding := serverstorage.NewDefaultResourceEncodingConfig(legacyscheme.Registry) // FIXME (soltysh): this GroupVersionResource override should be configurable // we need to set both for the whole group and for cronjobs, separately resourceEncoding.SetVersionEncoding(batch.GroupName, *testapi.Batch.GroupVersion(), schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: batch.GroupName, Resource: "cronjobs"}, schema.GroupVersion{Group: batch.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal}) // we also need to set both for the storage group and for volumeattachments, separately resourceEncoding.SetVersionEncoding(storage.GroupName, *testapi.Storage.GroupVersion(), schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: storage.GroupName, Resource: "volumeattachments"}, schema.GroupVersion{Group: storage.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal}) storageFactory := serverstorage.NewDefaultStorageFactory(etcdOptions.StorageConfig, runtime.ContentTypeJSON, ns, resourceEncoding, master.DefaultAPIResourceConfigSource(), nil) storageFactory.SetSerializer( schema.GroupResource{Group: v1.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: autoscaling.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: batch.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: apps.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: extensions.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: policy.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: rbac.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: certificates.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: storage.GroupName, Resource: serverstorage.AllResources}, "", ns) genericConfig := genericapiserver.NewConfig(legacyscheme.Codecs) kubeVersion := version.Get() genericConfig.Version = &kubeVersion genericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer() err := etcdOptions.ApplyWithStorageFactoryTo(storageFactory, genericConfig) if err != nil { panic(err) } return &master.Config{ GenericConfig: genericConfig, ExtraConfig: master.ExtraConfig{ APIResourceConfigSource: master.DefaultAPIResourceConfigSource(), StorageFactory: storageFactory, EnableCoreControllers: true, KubeletClientConfig: kubeletclient.KubeletClientConfig{Port: 10250}, APIServerServicePort: 443, MasterCount: 1, }, } } // CloseFunc can be called to cleanup the master type CloseFunc func() func RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server, CloseFunc) { if masterConfig == nil { masterConfig = NewMasterConfig() masterConfig.GenericConfig.EnableProfiling = true } return startMasterOrDie(masterConfig, nil, nil) } func RunAMasterUsingServer(masterConfig *master.Config, s *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) { return startMasterOrDie(masterConfig, s, masterReceiver) } // SharedEtcd creates a storage config for a shared etcd instance, with a unique prefix. func SharedEtcd() *storagebackend.Config { cfg := storagebackend.NewDefaultConfig(path.Join(uuid.New(), "registry"), nil) cfg.ServerList = []string{GetEtcdURL()} return cfg }
test/integration/framework/master_utils.go
1
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.9896204471588135, 0.17812268435955048, 0.0001667821779847145, 0.0003118442837148905, 0.35722729563713074 ]
{ "id": 5, "code_window": [ "\n", "// Returns the master config appropriate for most integration tests.\n", "func NewIntegrationTestMasterConfig() *master.Config {\n", "\tmasterConfig := NewMasterConfig()\n", "\tmasterConfig.ExtraConfig.EnableCoreControllers = true\n", "\tmasterConfig.GenericConfig.PublicAddress = net.ParseIP(\"192.168.10.4\")\n", "\tmasterConfig.ExtraConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource()\n", "\treturn masterConfig\n", "}\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "test/integration/framework/master_utils.go", "type": "replace", "edit_start_line_idx": 221 }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package validation import ( unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" api "k8s.io/kubernetes/pkg/apis/core" apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/apis/networking" ) // ValidateNetworkPolicyName can be used to check whether the given networkpolicy // name is valid. func ValidateNetworkPolicyName(name string, prefix bool) []string { return apivalidation.NameIsDNSSubdomain(name, prefix) } // ValidateNetworkPolicyPort validates a NetworkPolicyPort func ValidateNetworkPolicyPort(port *networking.NetworkPolicyPort, portPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if port.Protocol != nil && *port.Protocol != api.ProtocolTCP && *port.Protocol != api.ProtocolUDP { allErrs = append(allErrs, field.NotSupported(portPath.Child("protocol"), *port.Protocol, []string{string(api.ProtocolTCP), string(api.ProtocolUDP)})) } if port.Port != nil { if port.Port.Type == intstr.Int { for _, msg := range validation.IsValidPortNum(int(port.Port.IntVal)) { allErrs = append(allErrs, field.Invalid(portPath.Child("port"), port.Port.IntVal, msg)) } } else { for _, msg := range validation.IsValidPortName(port.Port.StrVal) { allErrs = append(allErrs, field.Invalid(portPath.Child("port"), port.Port.StrVal, msg)) } } } return allErrs } // ValidateNetworkPolicyPeer validates a NetworkPolicyPeer func ValidateNetworkPolicyPeer(peer *networking.NetworkPolicyPeer, peerPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} numPeers := 0 if peer.PodSelector != nil { numPeers++ allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(peer.PodSelector, peerPath.Child("podSelector"))...) } if peer.NamespaceSelector != nil { numPeers++ allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(peer.NamespaceSelector, peerPath.Child("namespaceSelector"))...) } if peer.IPBlock != nil { numPeers++ allErrs = append(allErrs, ValidateIPBlock(peer.IPBlock, peerPath.Child("ipBlock"))...) } if numPeers == 0 { allErrs = append(allErrs, field.Required(peerPath, "must specify a peer")) } else if numPeers > 1 && peer.IPBlock != nil { allErrs = append(allErrs, field.Forbidden(peerPath, "may not specify both ipBlock and another peer")) } return allErrs } // ValidateNetworkPolicySpec tests if required fields in the networkpolicy spec are set. func ValidateNetworkPolicySpec(spec *networking.NetworkPolicySpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(&spec.PodSelector, fldPath.Child("podSelector"))...) // Validate ingress rules. for i, ingress := range spec.Ingress { ingressPath := fldPath.Child("ingress").Index(i) for i, port := range ingress.Ports { portPath := ingressPath.Child("ports").Index(i) allErrs = append(allErrs, ValidateNetworkPolicyPort(&port, portPath)...) } for i, from := range ingress.From { fromPath := ingressPath.Child("from").Index(i) allErrs = append(allErrs, ValidateNetworkPolicyPeer(&from, fromPath)...) } } // Validate egress rules for i, egress := range spec.Egress { egressPath := fldPath.Child("egress").Index(i) for i, port := range egress.Ports { portPath := egressPath.Child("ports").Index(i) allErrs = append(allErrs, ValidateNetworkPolicyPort(&port, portPath)...) } for i, to := range egress.To { toPath := egressPath.Child("to").Index(i) allErrs = append(allErrs, ValidateNetworkPolicyPeer(&to, toPath)...) } } // Validate PolicyTypes allowed := sets.NewString(string(networking.PolicyTypeIngress), string(networking.PolicyTypeEgress)) if len(spec.PolicyTypes) > len(allowed) { allErrs = append(allErrs, field.Invalid(fldPath.Child("policyTypes"), &spec.PolicyTypes, "may not specify more than two policyTypes")) return allErrs } for i, pType := range spec.PolicyTypes { policyPath := fldPath.Child("policyTypes").Index(i) for _, p := range spec.PolicyTypes { if !allowed.Has(string(p)) { allErrs = append(allErrs, field.NotSupported(policyPath, pType, []string{string(networking.PolicyTypeIngress), string(networking.PolicyTypeEgress)})) } } } return allErrs } // ValidateNetworkPolicy validates a networkpolicy. func ValidateNetworkPolicy(np *networking.NetworkPolicy) field.ErrorList { allErrs := apivalidation.ValidateObjectMeta(&np.ObjectMeta, true, ValidateNetworkPolicyName, field.NewPath("metadata")) allErrs = append(allErrs, ValidateNetworkPolicySpec(&np.Spec, field.NewPath("spec"))...) return allErrs } // ValidateNetworkPolicyUpdate tests if an update to a NetworkPolicy is valid. func ValidateNetworkPolicyUpdate(update, old *networking.NetworkPolicy) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...) allErrs = append(allErrs, ValidateNetworkPolicySpec(&update.Spec, field.NewPath("spec"))...) return allErrs } // ValidateIPBlock validates a cidr and the except fields of an IpBlock NetworkPolicyPeer func ValidateIPBlock(ipb *networking.IPBlock, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(ipb.CIDR) == 0 || ipb.CIDR == "" { allErrs = append(allErrs, field.Required(fldPath.Child("cidr"), "")) return allErrs } cidrIPNet, err := apivalidation.ValidateCIDR(ipb.CIDR) if err != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("cidr"), ipb.CIDR, "not a valid CIDR")) return allErrs } exceptCIDR := ipb.Except for i, exceptIP := range exceptCIDR { exceptPath := fldPath.Child("except").Index(i) exceptCIDR, err := apivalidation.ValidateCIDR(exceptIP) if err != nil { allErrs = append(allErrs, field.Invalid(exceptPath, exceptIP, "not a valid CIDR")) return allErrs } if !cidrIPNet.Contains(exceptCIDR.IP) { allErrs = append(allErrs, field.Invalid(exceptPath, exceptCIDR.IP, "not within CIDR range")) } } return allErrs }
pkg/apis/networking/validation/validation.go
0
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.0005724803195334971, 0.000208878394914791, 0.0001648614415898919, 0.00017567633767612278, 0.00010451889829710126 ]
{ "id": 5, "code_window": [ "\n", "// Returns the master config appropriate for most integration tests.\n", "func NewIntegrationTestMasterConfig() *master.Config {\n", "\tmasterConfig := NewMasterConfig()\n", "\tmasterConfig.ExtraConfig.EnableCoreControllers = true\n", "\tmasterConfig.GenericConfig.PublicAddress = net.ParseIP(\"192.168.10.4\")\n", "\tmasterConfig.ExtraConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource()\n", "\treturn masterConfig\n", "}\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "test/integration/framework/master_utils.go", "type": "replace", "edit_start_line_idx": 221 }
approvers: - caesarxuchao - deads2k - krousey - lavalamp - smarterclayton - sttts - liggitt reviewers: - thockin - lavalamp - smarterclayton - wojtek-t - deads2k - yujuhong - derekwaynecarr - caesarxuchao - vishh - mikedanese - liggitt - nikhiljindal - gmarek - erictune - davidopp - pmorie - sttts - dchen1107 - saad-ali - zmerlynn - luxas - janetkuo - justinsb - roberthbailey - ncdc - tallclair - yifan-gu - eparis - mwielgus - timothysc - feiskyer - jlowdermilk - soltysh - piosz - jsafrane
staging/src/k8s.io/client-go/OWNERS
0
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.0001768410438671708, 0.0001756980491336435, 0.00017520147957839072, 0.00017541510169394314, 5.979906632092025e-7 ]
{ "id": 5, "code_window": [ "\n", "// Returns the master config appropriate for most integration tests.\n", "func NewIntegrationTestMasterConfig() *master.Config {\n", "\tmasterConfig := NewMasterConfig()\n", "\tmasterConfig.ExtraConfig.EnableCoreControllers = true\n", "\tmasterConfig.GenericConfig.PublicAddress = net.ParseIP(\"192.168.10.4\")\n", "\tmasterConfig.ExtraConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource()\n", "\treturn masterConfig\n", "}\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "test/integration/framework/master_utils.go", "type": "replace", "edit_start_line_idx": 221 }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package vsphere import ( "fmt" "sync" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "k8s.io/api/core/v1" storageV1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/utils" ) /* Induce stress to create volumes in parallel with multiple threads based on user configurable values for number of threads and iterations per thread. The following actions will be performed as part of this test. 1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capalibilies.) 2. READ VCP_STRESS_INSTANCES, VCP_STRESS_ITERATIONS, VSPHERE_SPBM_POLICY_NAME and VSPHERE_DATASTORE from System Environment. 3. Launch goroutine for volume lifecycle operations. 4. Each instance of routine iterates for n times, where n is read from system env - VCP_STRESS_ITERATIONS 5. Each iteration creates 1 PVC, 1 POD using the provisioned PV, Verify disk is attached to the node, Verify pod can access the volume, delete the pod and finally delete the PVC. */ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", func() { f := framework.NewDefaultFramework("vcp-stress") var ( client clientset.Interface namespace string instances int iterations int policyName string datastoreName string err error scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4} ) BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") client = f.ClientSet namespace = f.Namespace.Name nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") // if VCP_STRESS_INSTANCES = 12 and VCP_STRESS_ITERATIONS is 10. 12 threads will run in parallel for 10 times. // Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class, // Each iteration creates PVC, verify PV is provisioned, then creates a pod, verify volume is attached to the node, and then delete the pod and delete pvc. instances = GetAndExpectIntEnvVar(VCPStressInstances) Expect(instances <= volumesPerNode*len(nodeList.Items)).To(BeTrue(), fmt.Sprintf("Number of Instances should be less or equal: %v", volumesPerNode*len(nodeList.Items))) Expect(instances > len(scNames)).To(BeTrue(), "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes") iterations = GetAndExpectIntEnvVar(VCPStressIterations) Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_STRESS_ITERATIONS") Expect(iterations > 0).To(BeTrue(), "VCP_STRESS_ITERATIONS should be greater than 0") policyName = GetAndExpectStringEnvVar(SPBMPolicyName) datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) }) It("vsphere stress tests", func() { scArrays := make([]*storageV1.StorageClass, len(scNames)) for index, scname := range scNames { // Create vSphere Storage Class By(fmt.Sprintf("Creating Storage Class : %v", scname)) var sc *storageV1.StorageClass var err error switch scname { case storageclass1: sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil)) case storageclass2: var scVSanParameters map[string]string scVSanParameters = make(map[string]string) scVSanParameters[Policy_HostFailuresToTolerate] = "1" sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters)) case storageclass3: var scSPBMPolicyParameters map[string]string scSPBMPolicyParameters = make(map[string]string) scSPBMPolicyParameters[SpbmStoragePolicy] = policyName sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters)) case storageclass4: var scWithDSParameters map[string]string scWithDSParameters = make(map[string]string) scWithDSParameters[Datastore] = datastoreName scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters) sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec) } Expect(sc).NotTo(BeNil()) Expect(err).NotTo(HaveOccurred()) defer client.StorageV1().StorageClasses().Delete(scname, nil) scArrays[index] = sc } var wg sync.WaitGroup wg.Add(instances) for instanceCount := 0; instanceCount < instances; instanceCount++ { instanceId := fmt.Sprintf("Thread:%v", instanceCount+1) go PerformVolumeLifeCycleInParallel(f, client, namespace, instanceId, scArrays[instanceCount%len(scArrays)], iterations, &wg) } wg.Wait() }) }) // goroutine to perform volume lifecycle operations in parallel func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceId string, sc *storageV1.StorageClass, iterations int, wg *sync.WaitGroup) { defer wg.Done() defer GinkgoRecover() for iterationCount := 0; iterationCount < iterations; iterationCount++ { logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceId, iterationCount+1) By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, "1Gi", sc)) Expect(err).NotTo(HaveOccurred()) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name)) persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name)) // Create pod to attach Volume to Node pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name)) Expect(f.WaitForPodRunningSlow(pod.Name)).NotTo(HaveOccurred()) // Get the copy of the Pod to know the assigned node name. pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) Expect(isVolumeAttached).To(BeTrue()) Expect(verifyDiskAttachedError).NotTo(HaveOccurred()) By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name)) verifyVSphereVolumesAccessible(client, pod, persistentvolumes) By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name)) err = framework.DeletePodWithWait(f, client, pod) Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) err = waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name)) Expect(framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)).NotTo(HaveOccurred()) } }
test/e2e/storage/vsphere/vsphere_stress.go
0
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.00017997770919464529, 0.00017505483992863446, 0.00016530926222912967, 0.0001761142339091748, 0.000003924139491573442 ]
{ "id": 6, "code_window": [ "\treturn &master.Config{\n", "\t\tGenericConfig: genericConfig,\n", "\t\tExtraConfig: master.ExtraConfig{\n", "\t\t\tAPIResourceConfigSource: master.DefaultAPIResourceConfigSource(),\n", "\t\t\tStorageFactory: storageFactory,\n", "\t\t\tEnableCoreControllers: true,\n", "\t\t\tKubeletClientConfig: kubeletclient.KubeletClientConfig{Port: 10250},\n", "\t\t\tAPIServerServicePort: 443,\n", "\t\t\tMasterCount: 1,\n", "\t\t},\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "test/integration/framework/master_utils.go", "type": "replace", "edit_start_line_idx": 300 }
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package framework import ( "net" "net/http" "net/http/httptest" "path" "time" "github.com/go-openapi/spec" "github.com/golang/glog" "github.com/pborman/uuid" apps "k8s.io/api/apps/v1beta1" autoscaling "k8s.io/api/autoscaling/v1" certificates "k8s.io/api/certificates/v1beta1" "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" rbac "k8s.io/api/rbac/v1alpha1" storage "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" authauthenticator "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/authenticatorfactory" authenticatorunion "k8s.io/apiserver/pkg/authentication/request/union" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/authorization/authorizerfactory" authorizerunion "k8s.io/apiserver/pkg/authorization/union" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/options" serverstorage "k8s.io/apiserver/pkg/server/storage" "k8s.io/apiserver/pkg/storage/storagebackend" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/apis/batch" policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1" "k8s.io/kubernetes/pkg/generated/openapi" kubeletclient "k8s.io/kubernetes/pkg/kubelet/client" "k8s.io/kubernetes/pkg/master" "k8s.io/kubernetes/pkg/version" ) // Config is a struct of configuration directives for NewMasterComponents. type Config struct { // If nil, a default is used, partially filled configs will not get populated. MasterConfig *master.Config StartReplicationManager bool // Client throttling qps QPS float32 // Client burst qps, also burst replicas allowed in rc manager Burst int // TODO: Add configs for endpoints controller, scheduler etc } // alwaysAllow always allows an action type alwaysAllow struct{} func (alwaysAllow) Authorize(requestAttributes authorizer.Attributes) (authorizer.Decision, string, error) { return authorizer.DecisionAllow, "always allow", nil } // alwaysEmpty simulates "no authentication" for old tests func alwaysEmpty(req *http.Request) (user.Info, bool, error) { return &user.DefaultInfo{ Name: "", }, true, nil } // MasterReceiver can be used to provide the master to a custom incoming server function type MasterReceiver interface { SetMaster(m *master.Master) } // MasterHolder implements type MasterHolder struct { Initialized chan struct{} M *master.Master } func (h *MasterHolder) SetMaster(m *master.Master) { h.M = m close(h.Initialized) } // startMasterOrDie starts a kubernetes master and an httpserver to handle api requests func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) { var m *master.Master var s *httptest.Server if incomingServer != nil { s = incomingServer } else { s = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.GenericAPIServer.Handler.ServeHTTP(w, req) })) } stopCh := make(chan struct{}) closeFn := func() { close(stopCh) s.Close() } if masterConfig == nil { masterConfig = NewMasterConfig() masterConfig.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(openapi.GetOpenAPIDefinitions, legacyscheme.Scheme) masterConfig.GenericConfig.OpenAPIConfig.Info = &spec.Info{ InfoProps: spec.InfoProps{ Title: "Kubernetes", Version: "unversioned", }, } masterConfig.GenericConfig.OpenAPIConfig.DefaultResponse = &spec.Response{ ResponseProps: spec.ResponseProps{ Description: "Default Response.", }, } masterConfig.GenericConfig.OpenAPIConfig.GetDefinitions = openapi.GetOpenAPIDefinitions masterConfig.GenericConfig.SwaggerConfig = genericapiserver.DefaultSwaggerConfig() } // set the loopback client config if masterConfig.GenericConfig.LoopbackClientConfig == nil { masterConfig.GenericConfig.LoopbackClientConfig = &restclient.Config{QPS: 50, Burst: 100, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}} } masterConfig.GenericConfig.LoopbackClientConfig.Host = s.URL privilegedLoopbackToken := uuid.NewRandom().String() // wrap any available authorizer tokens := make(map[string]*user.DefaultInfo) tokens[privilegedLoopbackToken] = &user.DefaultInfo{ Name: user.APIServerUser, UID: uuid.NewRandom().String(), Groups: []string{user.SystemPrivilegedGroup}, } tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens) if masterConfig.GenericConfig.Authentication.Authenticator == nil { masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, authauthenticator.RequestFunc(alwaysEmpty)) } else { masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, masterConfig.GenericConfig.Authentication.Authenticator) } if masterConfig.GenericConfig.Authorization.Authorizer != nil { tokenAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup) masterConfig.GenericConfig.Authorization.Authorizer = authorizerunion.New(tokenAuthorizer, masterConfig.GenericConfig.Authorization.Authorizer) } else { masterConfig.GenericConfig.Authorization.Authorizer = alwaysAllow{} } masterConfig.GenericConfig.LoopbackClientConfig.BearerToken = privilegedLoopbackToken clientset, err := clientset.NewForConfig(masterConfig.GenericConfig.LoopbackClientConfig) if err != nil { glog.Fatal(err) } sharedInformers := informers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout) m, err = masterConfig.Complete(sharedInformers).New(genericapiserver.EmptyDelegate) if err != nil { closeFn() glog.Fatalf("error in bringing up the master: %v", err) } if masterReceiver != nil { masterReceiver.SetMaster(m) } // TODO have this start method actually use the normal start sequence for the API server // this method never actually calls the `Run` method for the API server // fire the post hooks ourselves m.GenericAPIServer.PrepareRun() m.GenericAPIServer.RunPostStartHooks(stopCh) cfg := *masterConfig.GenericConfig.LoopbackClientConfig cfg.ContentConfig.GroupVersion = &schema.GroupVersion{} privilegedClient, err := restclient.RESTClientFor(&cfg) if err != nil { closeFn() glog.Fatal(err) } err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { result := privilegedClient.Get().AbsPath("/healthz").Do() status := 0 result.StatusCode(&status) if status == 200 { return true, nil } return false, nil }) if err != nil { closeFn() glog.Fatal(err) } return m, s, closeFn } // Returns the master config appropriate for most integration tests. func NewIntegrationTestMasterConfig() *master.Config { masterConfig := NewMasterConfig() masterConfig.ExtraConfig.EnableCoreControllers = true masterConfig.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4") masterConfig.ExtraConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource() return masterConfig } // Returns a basic master config. func NewMasterConfig() *master.Config { // This causes the integration tests to exercise the etcd // prefix code, so please don't change without ensuring // sufficient coverage in other ways. etcdOptions := options.NewEtcdOptions(storagebackend.NewDefaultConfig(uuid.New(), nil)) etcdOptions.StorageConfig.ServerList = []string{GetEtcdURL()} info, _ := runtime.SerializerInfoForMediaType(legacyscheme.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON) ns := NewSingleContentTypeSerializer(legacyscheme.Scheme, info) resourceEncoding := serverstorage.NewDefaultResourceEncodingConfig(legacyscheme.Registry) // FIXME (soltysh): this GroupVersionResource override should be configurable // we need to set both for the whole group and for cronjobs, separately resourceEncoding.SetVersionEncoding(batch.GroupName, *testapi.Batch.GroupVersion(), schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: batch.GroupName, Resource: "cronjobs"}, schema.GroupVersion{Group: batch.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal}) // we also need to set both for the storage group and for volumeattachments, separately resourceEncoding.SetVersionEncoding(storage.GroupName, *testapi.Storage.GroupVersion(), schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: storage.GroupName, Resource: "volumeattachments"}, schema.GroupVersion{Group: storage.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal}) storageFactory := serverstorage.NewDefaultStorageFactory(etcdOptions.StorageConfig, runtime.ContentTypeJSON, ns, resourceEncoding, master.DefaultAPIResourceConfigSource(), nil) storageFactory.SetSerializer( schema.GroupResource{Group: v1.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: autoscaling.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: batch.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: apps.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: extensions.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: policy.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: rbac.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: certificates.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: storage.GroupName, Resource: serverstorage.AllResources}, "", ns) genericConfig := genericapiserver.NewConfig(legacyscheme.Codecs) kubeVersion := version.Get() genericConfig.Version = &kubeVersion genericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer() err := etcdOptions.ApplyWithStorageFactoryTo(storageFactory, genericConfig) if err != nil { panic(err) } return &master.Config{ GenericConfig: genericConfig, ExtraConfig: master.ExtraConfig{ APIResourceConfigSource: master.DefaultAPIResourceConfigSource(), StorageFactory: storageFactory, EnableCoreControllers: true, KubeletClientConfig: kubeletclient.KubeletClientConfig{Port: 10250}, APIServerServicePort: 443, MasterCount: 1, }, } } // CloseFunc can be called to cleanup the master type CloseFunc func() func RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server, CloseFunc) { if masterConfig == nil { masterConfig = NewMasterConfig() masterConfig.GenericConfig.EnableProfiling = true } return startMasterOrDie(masterConfig, nil, nil) } func RunAMasterUsingServer(masterConfig *master.Config, s *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) { return startMasterOrDie(masterConfig, s, masterReceiver) } // SharedEtcd creates a storage config for a shared etcd instance, with a unique prefix. func SharedEtcd() *storagebackend.Config { cfg := storagebackend.NewDefaultConfig(path.Join(uuid.New(), "registry"), nil) cfg.ServerList = []string{GetEtcdURL()} return cfg }
test/integration/framework/master_utils.go
1
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.10397279262542725, 0.0063207149505615234, 0.0001615063811186701, 0.00020992272766306996, 0.021092934533953667 ]
{ "id": 6, "code_window": [ "\treturn &master.Config{\n", "\t\tGenericConfig: genericConfig,\n", "\t\tExtraConfig: master.ExtraConfig{\n", "\t\t\tAPIResourceConfigSource: master.DefaultAPIResourceConfigSource(),\n", "\t\t\tStorageFactory: storageFactory,\n", "\t\t\tEnableCoreControllers: true,\n", "\t\t\tKubeletClientConfig: kubeletclient.KubeletClientConfig{Port: 10250},\n", "\t\t\tAPIServerServicePort: 443,\n", "\t\t\tMasterCount: 1,\n", "\t\t},\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "test/integration/framework/master_utils.go", "type": "replace", "edit_start_line_idx": 300 }
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package git_repo import ( "fmt" "io/ioutil" "path" "strings" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) // This is the primary entrypoint for volume plugins. func ProbeVolumePlugins() []volume.VolumePlugin { return []volume.VolumePlugin{&gitRepoPlugin{nil}} } type gitRepoPlugin struct { host volume.VolumeHost } var _ volume.VolumePlugin = &gitRepoPlugin{} func wrappedVolumeSpec() volume.Spec { return volume.Spec{ Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}}, } } const ( gitRepoPluginName = "kubernetes.io/git-repo" ) func (plugin *gitRepoPlugin) Init(host volume.VolumeHost) error { plugin.host = host return nil } func (plugin *gitRepoPlugin) GetPluginName() string { return gitRepoPluginName } func (plugin *gitRepoPlugin) GetVolumeName(spec *volume.Spec) (string, error) { volumeSource, _ := getVolumeSource(spec) if volumeSource == nil { return "", fmt.Errorf("Spec does not reference a Git repo volume type") } return fmt.Sprintf( "%v:%v:%v", volumeSource.Repository, volumeSource.Revision, volumeSource.Directory), nil } func (plugin *gitRepoPlugin) CanSupport(spec *volume.Spec) bool { return spec.Volume != nil && spec.Volume.GitRepo != nil } func (plugin *gitRepoPlugin) RequiresRemount() bool { return false } func (plugin *gitRepoPlugin) SupportsMountOption() bool { return false } func (plugin *gitRepoPlugin) SupportsBulkVolumeVerification() bool { return false } func (plugin *gitRepoPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) { return &gitRepoVolumeMounter{ gitRepoVolume: &gitRepoVolume{ volName: spec.Name(), podUID: pod.UID, plugin: plugin, }, pod: *pod, source: spec.Volume.GitRepo.Repository, revision: spec.Volume.GitRepo.Revision, target: spec.Volume.GitRepo.Directory, mounter: plugin.host.GetMounter(plugin.GetPluginName()), exec: plugin.host.GetExec(plugin.GetPluginName()), opts: opts, }, nil } func (plugin *gitRepoPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { return &gitRepoVolumeUnmounter{ &gitRepoVolume{ volName: volName, podUID: podUID, plugin: plugin, }, }, nil } func (plugin *gitRepoPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { gitVolume := &v1.Volume{ Name: volumeName, VolumeSource: v1.VolumeSource{ GitRepo: &v1.GitRepoVolumeSource{}, }, } return volume.NewSpecFromVolume(gitVolume), nil } // gitRepo volumes are directories which are pre-filled from a git repository. // These do not persist beyond the lifetime of a pod. type gitRepoVolume struct { volName string podUID types.UID plugin *gitRepoPlugin volume.MetricsNil } var _ volume.Volume = &gitRepoVolume{} func (gr *gitRepoVolume) GetPath() string { name := gitRepoPluginName return gr.plugin.host.GetPodVolumeDir(gr.podUID, utilstrings.EscapeQualifiedNameForDisk(name), gr.volName) } // gitRepoVolumeMounter builds git repo volumes. type gitRepoVolumeMounter struct { *gitRepoVolume pod v1.Pod source string revision string target string mounter mount.Interface exec mount.Exec opts volume.VolumeOptions } var _ volume.Mounter = &gitRepoVolumeMounter{} func (b *gitRepoVolumeMounter) GetAttributes() volume.Attributes { return volume.Attributes{ ReadOnly: false, Managed: true, SupportsSELinux: true, // xattr change should be okay, TODO: double check } } // Checks prior to mount operations to verify that the required components (binaries, etc.) // to mount the volume are available on the underlying node. // If not, it returns an error func (b *gitRepoVolumeMounter) CanMount() error { return nil } // SetUp creates new directory and clones a git repo. func (b *gitRepoVolumeMounter) SetUp(fsGroup *int64) error { return b.SetUpAt(b.GetPath(), fsGroup) } // SetUpAt creates new directory and clones a git repo. func (b *gitRepoVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { if volumeutil.IsReady(b.getMetaDir()) { return nil } // Wrap EmptyDir, let it do the setup. wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, b.opts) if err != nil { return err } if err := wrapped.SetUpAt(dir, fsGroup); err != nil { return err } args := []string{"clone", b.source} if len(b.target) != 0 { args = append(args, b.target) } if output, err := b.execGit(args, dir); err != nil { return fmt.Errorf("failed to exec 'git %s': %s: %v", strings.Join(args, " "), output, err) } files, err := ioutil.ReadDir(dir) if err != nil { return err } if len(b.revision) == 0 { // Done! volumeutil.SetReady(b.getMetaDir()) return nil } var subdir string switch { case b.target == ".": // if target dir is '.', use the current dir subdir = path.Join(dir) case len(files) == 1: // if target is not '.', use the generated folder subdir = path.Join(dir, files[0].Name()) default: // if target is not '.', but generated many files, it's wrong return fmt.Errorf("unexpected directory contents: %v", files) } if output, err := b.execGit([]string{"checkout", b.revision}, subdir); err != nil { return fmt.Errorf("failed to exec 'git checkout %s': %s: %v", b.revision, output, err) } if output, err := b.execGit([]string{"reset", "--hard"}, subdir); err != nil { return fmt.Errorf("failed to exec 'git reset --hard': %s: %v", output, err) } volume.SetVolumeOwnership(b, fsGroup) volumeutil.SetReady(b.getMetaDir()) return nil } func (b *gitRepoVolumeMounter) getMetaDir() string { return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedNameForDisk(gitRepoPluginName)), b.volName) } func (b *gitRepoVolumeMounter) execGit(args []string, dir string) ([]byte, error) { // run git -C <dir> <args> fullArgs := append([]string{"-C", dir}, args...) return b.exec.Run("git", fullArgs...) } // gitRepoVolumeUnmounter cleans git repo volumes. type gitRepoVolumeUnmounter struct { *gitRepoVolume } var _ volume.Unmounter = &gitRepoVolumeUnmounter{} // TearDown simply deletes everything in the directory. func (c *gitRepoVolumeUnmounter) TearDown() error { return c.TearDownAt(c.GetPath()) } // TearDownAt simply deletes everything in the directory. func (c *gitRepoVolumeUnmounter) TearDownAt(dir string) error { return volumeutil.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID) } func getVolumeSource(spec *volume.Spec) (*v1.GitRepoVolumeSource, bool) { var readOnly bool var volumeSource *v1.GitRepoVolumeSource if spec.Volume != nil && spec.Volume.GitRepo != nil { volumeSource = spec.Volume.GitRepo readOnly = spec.ReadOnly } return volumeSource, readOnly }
pkg/volume/git_repo/git_repo.go
0
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.0001776248391252011, 0.00017080926045309752, 0.00016432703705504537, 0.00017079588724300265, 0.0000028307024422247196 ]
{ "id": 6, "code_window": [ "\treturn &master.Config{\n", "\t\tGenericConfig: genericConfig,\n", "\t\tExtraConfig: master.ExtraConfig{\n", "\t\t\tAPIResourceConfigSource: master.DefaultAPIResourceConfigSource(),\n", "\t\t\tStorageFactory: storageFactory,\n", "\t\t\tEnableCoreControllers: true,\n", "\t\t\tKubeletClientConfig: kubeletclient.KubeletClientConfig{Port: 10250},\n", "\t\t\tAPIServerServicePort: 443,\n", "\t\t\tMasterCount: 1,\n", "\t\t},\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "test/integration/framework/master_utils.go", "type": "replace", "edit_start_line_idx": 300 }
package netlink import ( "fmt" "io/ioutil" "strconv" "strings" "syscall" "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" ) // NOTE function is here because it uses other linux functions func NewNetem(attrs QdiscAttrs, nattrs NetemQdiscAttrs) *Netem { var limit uint32 = 1000 var lossCorr, delayCorr, duplicateCorr uint32 var reorderProb, reorderCorr uint32 var corruptProb, corruptCorr uint32 latency := nattrs.Latency loss := Percentage2u32(nattrs.Loss) gap := nattrs.Gap duplicate := Percentage2u32(nattrs.Duplicate) jitter := nattrs.Jitter // Correlation if latency > 0 && jitter > 0 { delayCorr = Percentage2u32(nattrs.DelayCorr) } if loss > 0 { lossCorr = Percentage2u32(nattrs.LossCorr) } if duplicate > 0 { duplicateCorr = Percentage2u32(nattrs.DuplicateCorr) } // FIXME should validate values(like loss/duplicate are percentages...) latency = time2Tick(latency) if nattrs.Limit != 0 { limit = nattrs.Limit } // Jitter is only value if latency is > 0 if latency > 0 { jitter = time2Tick(jitter) } reorderProb = Percentage2u32(nattrs.ReorderProb) reorderCorr = Percentage2u32(nattrs.ReorderCorr) if reorderProb > 0 { // ERROR if lantency == 0 if gap == 0 { gap = 1 } } corruptProb = Percentage2u32(nattrs.CorruptProb) corruptCorr = Percentage2u32(nattrs.CorruptCorr) return &Netem{ QdiscAttrs: attrs, Latency: latency, DelayCorr: delayCorr, Limit: limit, Loss: loss, LossCorr: lossCorr, Gap: gap, Duplicate: duplicate, DuplicateCorr: duplicateCorr, Jitter: jitter, ReorderProb: reorderProb, ReorderCorr: reorderCorr, CorruptProb: corruptProb, CorruptCorr: corruptCorr, } } // QdiscDel will delete a qdisc from the system. // Equivalent to: `tc qdisc del $qdisc` func QdiscDel(qdisc Qdisc) error { return pkgHandle.QdiscDel(qdisc) } // QdiscDel will delete a qdisc from the system. // Equivalent to: `tc qdisc del $qdisc` func (h *Handle) QdiscDel(qdisc Qdisc) error { return h.qdiscModify(unix.RTM_DELQDISC, 0, qdisc) } // QdiscChange will change a qdisc in place // Equivalent to: `tc qdisc change $qdisc` // The parent and handle MUST NOT be changed. func QdiscChange(qdisc Qdisc) error { return pkgHandle.QdiscChange(qdisc) } // QdiscChange will change a qdisc in place // Equivalent to: `tc qdisc change $qdisc` // The parent and handle MUST NOT be changed. func (h *Handle) QdiscChange(qdisc Qdisc) error { return h.qdiscModify(unix.RTM_NEWQDISC, 0, qdisc) } // QdiscReplace will replace a qdisc to the system. // Equivalent to: `tc qdisc replace $qdisc` // The handle MUST change. func QdiscReplace(qdisc Qdisc) error { return pkgHandle.QdiscReplace(qdisc) } // QdiscReplace will replace a qdisc to the system. // Equivalent to: `tc qdisc replace $qdisc` // The handle MUST change. func (h *Handle) QdiscReplace(qdisc Qdisc) error { return h.qdiscModify( unix.RTM_NEWQDISC, unix.NLM_F_CREATE|unix.NLM_F_REPLACE, qdisc) } // QdiscAdd will add a qdisc to the system. // Equivalent to: `tc qdisc add $qdisc` func QdiscAdd(qdisc Qdisc) error { return pkgHandle.QdiscAdd(qdisc) } // QdiscAdd will add a qdisc to the system. // Equivalent to: `tc qdisc add $qdisc` func (h *Handle) QdiscAdd(qdisc Qdisc) error { return h.qdiscModify( unix.RTM_NEWQDISC, unix.NLM_F_CREATE|unix.NLM_F_EXCL, qdisc) } func (h *Handle) qdiscModify(cmd, flags int, qdisc Qdisc) error { req := h.newNetlinkRequest(cmd, flags|unix.NLM_F_ACK) base := qdisc.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, Ifindex: int32(base.LinkIndex), Handle: base.Handle, Parent: base.Parent, } req.AddData(msg) // When deleting don't bother building the rest of the netlink payload if cmd != unix.RTM_DELQDISC { if err := qdiscPayload(req, qdisc); err != nil { return err } } _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(qdisc.Type()))) options := nl.NewRtAttr(nl.TCA_OPTIONS, nil) switch qdisc := qdisc.(type) { case *Prio: tcmap := nl.TcPrioMap{ Bands: int32(qdisc.Bands), Priomap: qdisc.PriorityMap, } options = nl.NewRtAttr(nl.TCA_OPTIONS, tcmap.Serialize()) case *Tbf: opt := nl.TcTbfQopt{} opt.Rate.Rate = uint32(qdisc.Rate) opt.Peakrate.Rate = uint32(qdisc.Peakrate) opt.Limit = qdisc.Limit opt.Buffer = qdisc.Buffer nl.NewRtAttrChild(options, nl.TCA_TBF_PARMS, opt.Serialize()) if qdisc.Rate >= uint64(1<<32) { nl.NewRtAttrChild(options, nl.TCA_TBF_RATE64, nl.Uint64Attr(qdisc.Rate)) } if qdisc.Peakrate >= uint64(1<<32) { nl.NewRtAttrChild(options, nl.TCA_TBF_PRATE64, nl.Uint64Attr(qdisc.Peakrate)) } if qdisc.Peakrate > 0 { nl.NewRtAttrChild(options, nl.TCA_TBF_PBURST, nl.Uint32Attr(qdisc.Minburst)) } case *Htb: opt := nl.TcHtbGlob{} opt.Version = qdisc.Version opt.Rate2Quantum = qdisc.Rate2Quantum opt.Defcls = qdisc.Defcls // TODO: Handle Debug properly. For now default to 0 opt.Debug = qdisc.Debug opt.DirectPkts = qdisc.DirectPkts nl.NewRtAttrChild(options, nl.TCA_HTB_INIT, opt.Serialize()) // nl.NewRtAttrChild(options, nl.TCA_HTB_DIRECT_QLEN, opt.Serialize()) case *Netem: opt := nl.TcNetemQopt{} opt.Latency = qdisc.Latency opt.Limit = qdisc.Limit opt.Loss = qdisc.Loss opt.Gap = qdisc.Gap opt.Duplicate = qdisc.Duplicate opt.Jitter = qdisc.Jitter options = nl.NewRtAttr(nl.TCA_OPTIONS, opt.Serialize()) // Correlation corr := nl.TcNetemCorr{} corr.DelayCorr = qdisc.DelayCorr corr.LossCorr = qdisc.LossCorr corr.DupCorr = qdisc.DuplicateCorr if corr.DelayCorr > 0 || corr.LossCorr > 0 || corr.DupCorr > 0 { nl.NewRtAttrChild(options, nl.TCA_NETEM_CORR, corr.Serialize()) } // Corruption corruption := nl.TcNetemCorrupt{} corruption.Probability = qdisc.CorruptProb corruption.Correlation = qdisc.CorruptCorr if corruption.Probability > 0 { nl.NewRtAttrChild(options, nl.TCA_NETEM_CORRUPT, corruption.Serialize()) } // Reorder reorder := nl.TcNetemReorder{} reorder.Probability = qdisc.ReorderProb reorder.Correlation = qdisc.ReorderCorr if reorder.Probability > 0 { nl.NewRtAttrChild(options, nl.TCA_NETEM_REORDER, reorder.Serialize()) } case *Ingress: // ingress filters must use the proper handle if qdisc.Attrs().Parent != HANDLE_INGRESS { return fmt.Errorf("Ingress filters must set Parent to HANDLE_INGRESS") } } req.AddData(options) return nil } // QdiscList gets a list of qdiscs in the system. // Equivalent to: `tc qdisc show`. // The list can be filtered by link. func QdiscList(link Link) ([]Qdisc, error) { return pkgHandle.QdiscList(link) } // QdiscList gets a list of qdiscs in the system. // Equivalent to: `tc qdisc show`. // The list can be filtered by link. func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { req := h.newNetlinkRequest(unix.RTM_GETQDISC, unix.NLM_F_DUMP) index := int32(0) if link != nil { base := link.Attrs() h.ensureIndex(base) index = int32(base.Index) } msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, Ifindex: index, } req.AddData(msg) msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWQDISC) if err != nil { return nil, err } var res []Qdisc for _, m := range msgs { msg := nl.DeserializeTcMsg(m) attrs, err := nl.ParseRouteAttr(m[msg.Len():]) if err != nil { return nil, err } // skip qdiscs from other interfaces if link != nil && msg.Ifindex != index { continue } base := QdiscAttrs{ LinkIndex: int(msg.Ifindex), Handle: msg.Handle, Parent: msg.Parent, Refcnt: msg.Info, } var qdisc Qdisc qdiscType := "" for _, attr := range attrs { switch attr.Attr.Type { case nl.TCA_KIND: qdiscType = string(attr.Value[:len(attr.Value)-1]) switch qdiscType { case "pfifo_fast": qdisc = &PfifoFast{} case "prio": qdisc = &Prio{} case "tbf": qdisc = &Tbf{} case "ingress": qdisc = &Ingress{} case "htb": qdisc = &Htb{} case "netem": qdisc = &Netem{} default: qdisc = &GenericQdisc{QdiscType: qdiscType} } case nl.TCA_OPTIONS: switch qdiscType { case "pfifo_fast": // pfifo returns TcPrioMap directly without wrapping it in rtattr if err := parsePfifoFastData(qdisc, attr.Value); err != nil { return nil, err } case "prio": // prio returns TcPrioMap directly without wrapping it in rtattr if err := parsePrioData(qdisc, attr.Value); err != nil { return nil, err } case "tbf": data, err := nl.ParseRouteAttr(attr.Value) if err != nil { return nil, err } if err := parseTbfData(qdisc, data); err != nil { return nil, err } case "htb": data, err := nl.ParseRouteAttr(attr.Value) if err != nil { return nil, err } if err := parseHtbData(qdisc, data); err != nil { return nil, err } case "netem": if err := parseNetemData(qdisc, attr.Value); err != nil { return nil, err } // no options for ingress } } } *qdisc.Attrs() = base res = append(res, qdisc) } return res, nil } func parsePfifoFastData(qdisc Qdisc, value []byte) error { pfifo := qdisc.(*PfifoFast) tcmap := nl.DeserializeTcPrioMap(value) pfifo.PriorityMap = tcmap.Priomap pfifo.Bands = uint8(tcmap.Bands) return nil } func parsePrioData(qdisc Qdisc, value []byte) error { prio := qdisc.(*Prio) tcmap := nl.DeserializeTcPrioMap(value) prio.PriorityMap = tcmap.Priomap prio.Bands = uint8(tcmap.Bands) return nil } func parseHtbData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { native = nl.NativeEndian() htb := qdisc.(*Htb) for _, datum := range data { switch datum.Attr.Type { case nl.TCA_HTB_INIT: opt := nl.DeserializeTcHtbGlob(datum.Value) htb.Version = opt.Version htb.Rate2Quantum = opt.Rate2Quantum htb.Defcls = opt.Defcls htb.Debug = opt.Debug htb.DirectPkts = opt.DirectPkts case nl.TCA_HTB_DIRECT_QLEN: // TODO //htb.DirectQlen = native.uint32(datum.Value) } } return nil } func parseNetemData(qdisc Qdisc, value []byte) error { netem := qdisc.(*Netem) opt := nl.DeserializeTcNetemQopt(value) netem.Latency = opt.Latency netem.Limit = opt.Limit netem.Loss = opt.Loss netem.Gap = opt.Gap netem.Duplicate = opt.Duplicate netem.Jitter = opt.Jitter data, err := nl.ParseRouteAttr(value[nl.SizeofTcNetemQopt:]) if err != nil { return err } for _, datum := range data { switch datum.Attr.Type { case nl.TCA_NETEM_CORR: opt := nl.DeserializeTcNetemCorr(datum.Value) netem.DelayCorr = opt.DelayCorr netem.LossCorr = opt.LossCorr netem.DuplicateCorr = opt.DupCorr case nl.TCA_NETEM_CORRUPT: opt := nl.DeserializeTcNetemCorrupt(datum.Value) netem.CorruptProb = opt.Probability netem.CorruptCorr = opt.Correlation case nl.TCA_NETEM_REORDER: opt := nl.DeserializeTcNetemReorder(datum.Value) netem.ReorderProb = opt.Probability netem.ReorderCorr = opt.Correlation } } return nil } func parseTbfData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { native = nl.NativeEndian() tbf := qdisc.(*Tbf) for _, datum := range data { switch datum.Attr.Type { case nl.TCA_TBF_PARMS: opt := nl.DeserializeTcTbfQopt(datum.Value) tbf.Rate = uint64(opt.Rate.Rate) tbf.Peakrate = uint64(opt.Peakrate.Rate) tbf.Limit = opt.Limit tbf.Buffer = opt.Buffer case nl.TCA_TBF_RATE64: tbf.Rate = native.Uint64(datum.Value[0:8]) case nl.TCA_TBF_PRATE64: tbf.Peakrate = native.Uint64(datum.Value[0:8]) case nl.TCA_TBF_PBURST: tbf.Minburst = native.Uint32(datum.Value[0:4]) } } return nil } const ( TIME_UNITS_PER_SEC = 1000000 ) var ( tickInUsec float64 clockFactor float64 hz float64 ) func initClock() { data, err := ioutil.ReadFile("/proc/net/psched") if err != nil { return } parts := strings.Split(strings.TrimSpace(string(data)), " ") if len(parts) < 3 { return } var vals [3]uint64 for i := range vals { val, err := strconv.ParseUint(parts[i], 16, 32) if err != nil { return } vals[i] = val } // compatibility if vals[2] == 1000000000 { vals[0] = vals[1] } clockFactor = float64(vals[2]) / TIME_UNITS_PER_SEC tickInUsec = float64(vals[0]) / float64(vals[1]) * clockFactor hz = float64(vals[0]) } func TickInUsec() float64 { if tickInUsec == 0.0 { initClock() } return tickInUsec } func ClockFactor() float64 { if clockFactor == 0.0 { initClock() } return clockFactor } func Hz() float64 { if hz == 0.0 { initClock() } return hz } func time2Tick(time uint32) uint32 { return uint32(float64(time) * TickInUsec()) } func tick2Time(tick uint32) uint32 { return uint32(float64(tick) / TickInUsec()) } func time2Ktime(time uint32) uint32 { return uint32(float64(time) * ClockFactor()) } func ktime2Time(ktime uint32) uint32 { return uint32(float64(ktime) / ClockFactor()) } func burst(rate uint64, buffer uint32) uint32 { return uint32(float64(rate) * float64(tick2Time(buffer)) / TIME_UNITS_PER_SEC) } func latency(rate uint64, limit, buffer uint32) float64 { return TIME_UNITS_PER_SEC*(float64(limit)/float64(rate)) - float64(tick2Time(buffer)) } func Xmittime(rate uint64, size uint32) float64 { return TickInUsec() * TIME_UNITS_PER_SEC * (float64(size) / float64(rate)) }
vendor/github.com/vishvananda/netlink/qdisc_linux.go
0
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.00017690985987428576, 0.00017126895545516163, 0.00016482920909766108, 0.000171230873093009, 0.0000030012720344529953 ]
{ "id": 6, "code_window": [ "\treturn &master.Config{\n", "\t\tGenericConfig: genericConfig,\n", "\t\tExtraConfig: master.ExtraConfig{\n", "\t\t\tAPIResourceConfigSource: master.DefaultAPIResourceConfigSource(),\n", "\t\t\tStorageFactory: storageFactory,\n", "\t\t\tEnableCoreControllers: true,\n", "\t\t\tKubeletClientConfig: kubeletclient.KubeletClientConfig{Port: 10250},\n", "\t\t\tAPIServerServicePort: 443,\n", "\t\t\tMasterCount: 1,\n", "\t\t},\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "test/integration/framework/master_utils.go", "type": "replace", "edit_start_line_idx": 300 }
package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", "go_test", ) go_library( name = "go_default_library", srcs = ["reconciler.go"], importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler", deps = [ "//pkg/features:go_default_library", "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/volumemanager/cache:go_default_library", "//pkg/util/file:go_default_library", "//pkg/util/goroutinemap/exponentialbackoff:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/nestedpendingoperations:go_default_library", "//pkg/volume/util/operationexecutor:go_default_library", "//pkg/volume/util/types:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", ], ) go_test( name = "go_default_test", srcs = ["reconciler_test.go"], embed = [":go_default_library"], deps = [ "//pkg/kubelet/volumemanager/cache:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/operationexecutor:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/testing:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", ], ) filegroup( name = "package-srcs", srcs = glob(["**"]), tags = ["automanaged"], visibility = ["//visibility:private"], ) filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], )
pkg/kubelet/volumemanager/reconciler/BUILD
0
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.00017406784172635525, 0.00017074891366064548, 0.0001661195419728756, 0.00017121146083809435, 0.0000022939109385333722 ]
{ "id": 7, "code_window": [ "\t\t\tEnableMetrics: true,\n", "\t\t},\n", "\t\tExtraConfig: master.ExtraConfig{\n", "\t\t\tEnableCoreControllers: false,\n", "\t\t\tEnableLogsSupport: false,\n", "\t\t},\n", "\t}\n", "\t_ = &master.Master{\n", "\t\tGenericAPIServer: &genericapiserver.GenericAPIServer{},\n", "\t}\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tEnableLogsSupport: false,\n" ], "file_path": "test/integration/openshift/openshift_test.go", "type": "replace", "edit_start_line_idx": 33 }
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package master import ( "fmt" "net" "net/http" "reflect" "strconv" "time" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" authenticationv1 "k8s.io/api/authentication/v1" authenticationv1beta1 "k8s.io/api/authentication/v1beta1" authorizationapiv1 "k8s.io/api/authorization/v1" authorizationapiv1beta1 "k8s.io/api/authorization/v1beta1" autoscalingapiv1 "k8s.io/api/autoscaling/v1" autoscalingapiv2beta1 "k8s.io/api/autoscaling/v2beta1" batchapiv1 "k8s.io/api/batch/v1" batchapiv1beta1 "k8s.io/api/batch/v1beta1" certificatesapiv1beta1 "k8s.io/api/certificates/v1beta1" apiv1 "k8s.io/api/core/v1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsapiv1beta1 "k8s.io/api/extensions/v1beta1" networkingapiv1 "k8s.io/api/networking/v1" policyapiv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" storageapiv1 "k8s.io/api/storage/v1" storageapiv1beta1 "k8s.io/api/storage/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apiserver/pkg/endpoints/discovery" "k8s.io/apiserver/pkg/registry/generic" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/healthz" serverstorage "k8s.io/apiserver/pkg/server/storage" storagefactory "k8s.io/apiserver/pkg/storage/storagebackend/factory" "k8s.io/client-go/informers" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" api "k8s.io/kubernetes/pkg/apis/core" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options" kubeletclient "k8s.io/kubernetes/pkg/kubelet/client" "k8s.io/kubernetes/pkg/master/reconcilers" "k8s.io/kubernetes/pkg/master/tunneler" "k8s.io/kubernetes/pkg/registry/core/endpoint" endpointsstorage "k8s.io/kubernetes/pkg/registry/core/endpoint/storage" "k8s.io/kubernetes/pkg/routes" "k8s.io/kubernetes/pkg/serviceaccount" nodeutil "k8s.io/kubernetes/pkg/util/node" "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" // RESTStorage installers admissionregistrationrest "k8s.io/kubernetes/pkg/registry/admissionregistration/rest" appsrest "k8s.io/kubernetes/pkg/registry/apps/rest" authenticationrest "k8s.io/kubernetes/pkg/registry/authentication/rest" authorizationrest "k8s.io/kubernetes/pkg/registry/authorization/rest" autoscalingrest "k8s.io/kubernetes/pkg/registry/autoscaling/rest" batchrest "k8s.io/kubernetes/pkg/registry/batch/rest" certificatesrest "k8s.io/kubernetes/pkg/registry/certificates/rest" corerest "k8s.io/kubernetes/pkg/registry/core/rest" eventsrest "k8s.io/kubernetes/pkg/registry/events/rest" extensionsrest "k8s.io/kubernetes/pkg/registry/extensions/rest" networkingrest "k8s.io/kubernetes/pkg/registry/networking/rest" policyrest "k8s.io/kubernetes/pkg/registry/policy/rest" rbacrest "k8s.io/kubernetes/pkg/registry/rbac/rest" schedulingrest "k8s.io/kubernetes/pkg/registry/scheduling/rest" settingsrest "k8s.io/kubernetes/pkg/registry/settings/rest" storagerest "k8s.io/kubernetes/pkg/registry/storage/rest" ) const ( // DefaultEndpointReconcilerInterval is the default amount of time for how often the endpoints for // the kubernetes Service are reconciled. DefaultEndpointReconcilerInterval = 10 * time.Second // DefaultEndpointReconcilerTTL is the default TTL timeout for the storage layer DefaultEndpointReconcilerTTL = 15 * time.Second ) type ExtraConfig struct { ClientCARegistrationHook ClientCARegistrationHook APIResourceConfigSource serverstorage.APIResourceConfigSource StorageFactory serverstorage.StorageFactory EnableCoreControllers bool EndpointReconcilerConfig EndpointReconcilerConfig EventTTL time.Duration KubeletClientConfig kubeletclient.KubeletClientConfig // Used to start and monitor tunneling Tunneler tunneler.Tunneler EnableLogsSupport bool ProxyTransport http.RoundTripper // Values to build the IP addresses used by discovery // The range of IPs to be assigned to services with type=ClusterIP or greater ServiceIPRange net.IPNet // The IP address for the GenericAPIServer service (must be inside ServiceIPRange) APIServerServiceIP net.IP // Port for the apiserver service. APIServerServicePort int // TODO, we can probably group service related items into a substruct to make it easier to configure // the API server items and `Extra*` fields likely fit nicely together. // The range of ports to be assigned to services with type=NodePort or greater ServiceNodePortRange utilnet.PortRange // Additional ports to be exposed on the GenericAPIServer service // extraServicePorts is injectable in the event that more ports // (other than the default 443/tcp) are exposed on the GenericAPIServer // and those ports need to be load balanced by the GenericAPIServer // service because this pkg is linked by out-of-tree projects // like openshift which want to use the GenericAPIServer but also do // more stuff. ExtraServicePorts []api.ServicePort // Additional ports to be exposed on the GenericAPIServer endpoints // Port names should align with ports defined in ExtraServicePorts ExtraEndpointPorts []api.EndpointPort // If non-zero, the "kubernetes" services uses this port as NodePort. KubernetesServiceNodePort int // Number of masters running; all masters must be started with the // same value for this field. (Numbers > 1 currently untested.) MasterCount int // MasterEndpointReconcileTTL sets the time to live in seconds of an // endpoint record recorded by each master. The endpoints are checked at an // interval that is 2/3 of this value and this value defaults to 15s if // unset. In very large clusters, this value may be increased to reduce the // possibility that the master endpoint record expires (due to other load // on the etcd server) and causes masters to drop in and out of the // kubernetes service record. It is not recommended to set this value below // 15s. MasterEndpointReconcileTTL time.Duration // Selects which reconciler to use EndpointReconcilerType reconcilers.Type ServiceAccountIssuer serviceaccount.TokenGenerator ServiceAccountAPIAudiences []string } type Config struct { GenericConfig *genericapiserver.Config ExtraConfig ExtraConfig } type completedConfig struct { GenericConfig genericapiserver.CompletedConfig ExtraConfig *ExtraConfig } type CompletedConfig struct { // Embed a private pointer that cannot be instantiated outside of this package. *completedConfig } // EndpointReconcilerConfig holds the endpoint reconciler and endpoint reconciliation interval to be // used by the master. type EndpointReconcilerConfig struct { Reconciler reconcilers.EndpointReconciler Interval time.Duration } // Master contains state for a Kubernetes cluster master/api server. type Master struct { GenericAPIServer *genericapiserver.GenericAPIServer ClientCARegistrationHook ClientCARegistrationHook } func (c *Config) createMasterCountReconciler() reconcilers.EndpointReconciler { endpointClient := coreclient.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig) return reconcilers.NewMasterCountEndpointReconciler(c.ExtraConfig.MasterCount, endpointClient) } func (c *Config) createNoneReconciler() reconcilers.EndpointReconciler { return reconcilers.NewNoneEndpointReconciler() } func (c *Config) createLeaseReconciler() reconcilers.EndpointReconciler { ttl := c.ExtraConfig.MasterEndpointReconcileTTL config, err := c.ExtraConfig.StorageFactory.NewConfig(api.Resource("apiServerIPInfo")) if err != nil { glog.Fatalf("Error determining service IP ranges: %v", err) } leaseStorage, _, err := storagefactory.Create(*config) if err != nil { glog.Fatalf("Error creating storage factory: %v", err) } endpointConfig, err := c.ExtraConfig.StorageFactory.NewConfig(api.Resource("endpoints")) if err != nil { glog.Fatalf("Error getting storage config: %v", err) } endpointsStorage := endpointsstorage.NewREST(generic.RESTOptions{ StorageConfig: endpointConfig, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 0, ResourcePrefix: c.ExtraConfig.StorageFactory.ResourcePrefix(api.Resource("endpoints")), }) endpointRegistry := endpoint.NewRegistry(endpointsStorage) masterLeases := reconcilers.NewLeases(leaseStorage, "/masterleases/", ttl) return reconcilers.NewLeaseEndpointReconciler(endpointRegistry, masterLeases) } func (c *Config) createEndpointReconciler() reconcilers.EndpointReconciler { glog.Infof("Using reconciler: %v", c.ExtraConfig.EndpointReconcilerType) switch c.ExtraConfig.EndpointReconcilerType { // there are numerous test dependencies that depend on a default controller case "", reconcilers.MasterCountReconcilerType: return c.createMasterCountReconciler() case reconcilers.LeaseEndpointReconcilerType: return c.createLeaseReconciler() case reconcilers.NoneEndpointReconcilerType: return c.createNoneReconciler() default: glog.Fatalf("Reconciler not implemented: %v", c.ExtraConfig.EndpointReconcilerType) } return nil } // Complete fills in any fields not set that are required to have valid data. It's mutating the receiver. func (cfg *Config) Complete(informers informers.SharedInformerFactory) CompletedConfig { c := completedConfig{ cfg.GenericConfig.Complete(informers), &cfg.ExtraConfig, } serviceIPRange, apiServerServiceIP, err := DefaultServiceIPRange(c.ExtraConfig.ServiceIPRange) if err != nil { glog.Fatalf("Error determining service IP ranges: %v", err) } if c.ExtraConfig.ServiceIPRange.IP == nil { c.ExtraConfig.ServiceIPRange = serviceIPRange } if c.ExtraConfig.APIServerServiceIP == nil { c.ExtraConfig.APIServerServiceIP = apiServerServiceIP } discoveryAddresses := discovery.DefaultAddresses{DefaultAddress: c.GenericConfig.ExternalAddress} discoveryAddresses.CIDRRules = append(discoveryAddresses.CIDRRules, discovery.CIDRRule{IPRange: c.ExtraConfig.ServiceIPRange, Address: net.JoinHostPort(c.ExtraConfig.APIServerServiceIP.String(), strconv.Itoa(c.ExtraConfig.APIServerServicePort))}) c.GenericConfig.DiscoveryAddresses = discoveryAddresses if c.ExtraConfig.ServiceNodePortRange.Size == 0 { // TODO: Currently no way to specify an empty range (do we need to allow this?) // We should probably allow this for clouds that don't require NodePort to do load-balancing (GCE) // but then that breaks the strict nestedness of ServiceType. // Review post-v1 c.ExtraConfig.ServiceNodePortRange = kubeoptions.DefaultServiceNodePortRange glog.Infof("Node port range unspecified. Defaulting to %v.", c.ExtraConfig.ServiceNodePortRange) } if c.ExtraConfig.EndpointReconcilerConfig.Interval == 0 { c.ExtraConfig.EndpointReconcilerConfig.Interval = DefaultEndpointReconcilerInterval } if c.ExtraConfig.MasterEndpointReconcileTTL == 0 { c.ExtraConfig.MasterEndpointReconcileTTL = DefaultEndpointReconcilerTTL } if c.ExtraConfig.EndpointReconcilerConfig.Reconciler == nil { c.ExtraConfig.EndpointReconcilerConfig.Reconciler = cfg.createEndpointReconciler() } return CompletedConfig{&c} } // New returns a new instance of Master from the given config. // Certain config fields will be set to a default value if unset. // Certain config fields must be specified, including: // KubeletClientConfig func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) (*Master, error) { if reflect.DeepEqual(c.ExtraConfig.KubeletClientConfig, kubeletclient.KubeletClientConfig{}) { return nil, fmt.Errorf("Master.New() called with empty config.KubeletClientConfig") } s, err := c.GenericConfig.New("kube-apiserver", delegationTarget) if err != nil { return nil, err } if c.ExtraConfig.EnableLogsSupport { routes.Logs{}.Install(s.Handler.GoRestfulContainer) } m := &Master{ GenericAPIServer: s, } // install legacy rest storage if c.ExtraConfig.APIResourceConfigSource.VersionEnabled(apiv1.SchemeGroupVersion) { legacyRESTStorageProvider := corerest.LegacyRESTStorageProvider{ StorageFactory: c.ExtraConfig.StorageFactory, ProxyTransport: c.ExtraConfig.ProxyTransport, KubeletClientConfig: c.ExtraConfig.KubeletClientConfig, EventTTL: c.ExtraConfig.EventTTL, ServiceIPRange: c.ExtraConfig.ServiceIPRange, ServiceNodePortRange: c.ExtraConfig.ServiceNodePortRange, LoopbackClientConfig: c.GenericConfig.LoopbackClientConfig, ServiceAccountIssuer: c.ExtraConfig.ServiceAccountIssuer, ServiceAccountAPIAudiences: c.ExtraConfig.ServiceAccountAPIAudiences, } m.InstallLegacyAPI(&c, c.GenericConfig.RESTOptionsGetter, legacyRESTStorageProvider) } // The order here is preserved in discovery. // If resources with identical names exist in more than one of these groups (e.g. "deployments.apps"" and "deployments.extensions"), // the order of this list determines which group an unqualified resource name (e.g. "deployments") should prefer. // This priority order is used for local discovery, but it ends up aggregated in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go // with specific priorities. // TODO: describe the priority all the way down in the RESTStorageProviders and plumb it back through the various discovery // handlers that we have. restStorageProviders := []RESTStorageProvider{ authenticationrest.RESTStorageProvider{Authenticator: c.GenericConfig.Authentication.Authenticator}, authorizationrest.RESTStorageProvider{Authorizer: c.GenericConfig.Authorization.Authorizer, RuleResolver: c.GenericConfig.RuleResolver}, autoscalingrest.RESTStorageProvider{}, batchrest.RESTStorageProvider{}, certificatesrest.RESTStorageProvider{}, extensionsrest.RESTStorageProvider{}, networkingrest.RESTStorageProvider{}, policyrest.RESTStorageProvider{}, rbacrest.RESTStorageProvider{Authorizer: c.GenericConfig.Authorization.Authorizer}, schedulingrest.RESTStorageProvider{}, settingsrest.RESTStorageProvider{}, storagerest.RESTStorageProvider{}, // keep apps after extensions so legacy clients resolve the extensions versions of shared resource names. // See https://github.com/kubernetes/kubernetes/issues/42392 appsrest.RESTStorageProvider{}, admissionregistrationrest.RESTStorageProvider{}, eventsrest.RESTStorageProvider{TTL: c.ExtraConfig.EventTTL}, } m.InstallAPIs(c.ExtraConfig.APIResourceConfigSource, c.GenericConfig.RESTOptionsGetter, restStorageProviders...) if c.ExtraConfig.Tunneler != nil { m.installTunneler(c.ExtraConfig.Tunneler, corev1client.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig).Nodes()) } m.GenericAPIServer.AddPostStartHookOrDie("ca-registration", c.ExtraConfig.ClientCARegistrationHook.PostStartHook) return m, nil } func (m *Master) InstallLegacyAPI(c *completedConfig, restOptionsGetter generic.RESTOptionsGetter, legacyRESTStorageProvider corerest.LegacyRESTStorageProvider) { legacyRESTStorage, apiGroupInfo, err := legacyRESTStorageProvider.NewLegacyRESTStorage(restOptionsGetter) if err != nil { glog.Fatalf("Error building core storage: %v", err) } if c.ExtraConfig.EnableCoreControllers { controllerName := "bootstrap-controller" coreClient := coreclient.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig) bootstrapController := c.NewBootstrapController(legacyRESTStorage, coreClient, coreClient, coreClient) m.GenericAPIServer.AddPostStartHookOrDie(controllerName, bootstrapController.PostStartHook) m.GenericAPIServer.AddPreShutdownHookOrDie(controllerName, bootstrapController.PreShutdownHook) } if err := m.GenericAPIServer.InstallLegacyAPIGroup(genericapiserver.DefaultLegacyAPIPrefix, &apiGroupInfo); err != nil { glog.Fatalf("Error in registering group versions: %v", err) } } func (m *Master) installTunneler(nodeTunneler tunneler.Tunneler, nodeClient corev1client.NodeInterface) { nodeTunneler.Run(nodeAddressProvider{nodeClient}.externalAddresses) m.GenericAPIServer.AddHealthzChecks(healthz.NamedCheck("SSH Tunnel Check", tunneler.TunnelSyncHealthChecker(nodeTunneler))) prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Name: "apiserver_proxy_tunnel_sync_latency_secs", Help: "The time since the last successful synchronization of the SSH tunnels for proxy requests.", }, func() float64 { return float64(nodeTunneler.SecondsSinceSync()) }) } // RESTStorageProvider is a factory type for REST storage. type RESTStorageProvider interface { GroupName() string NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) } // InstallAPIs will install the APIs for the restStorageProviders if they are enabled. func (m *Master) InstallAPIs(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter, restStorageProviders ...RESTStorageProvider) { apiGroupsInfo := []genericapiserver.APIGroupInfo{} for _, restStorageBuilder := range restStorageProviders { groupName := restStorageBuilder.GroupName() if !apiResourceConfigSource.AnyVersionForGroupEnabled(groupName) { glog.V(1).Infof("Skipping disabled API group %q.", groupName) continue } apiGroupInfo, enabled := restStorageBuilder.NewRESTStorage(apiResourceConfigSource, restOptionsGetter) if !enabled { glog.Warningf("Problem initializing API group %q, skipping.", groupName) continue } glog.V(1).Infof("Enabling API group %q.", groupName) if postHookProvider, ok := restStorageBuilder.(genericapiserver.PostStartHookProvider); ok { name, hook, err := postHookProvider.PostStartHook() if err != nil { glog.Fatalf("Error building PostStartHook: %v", err) } m.GenericAPIServer.AddPostStartHookOrDie(name, hook) } apiGroupsInfo = append(apiGroupsInfo, apiGroupInfo) } for i := range apiGroupsInfo { if err := m.GenericAPIServer.InstallAPIGroup(&apiGroupsInfo[i]); err != nil { glog.Fatalf("Error in registering group versions: %v", err) } } } type nodeAddressProvider struct { nodeClient corev1client.NodeInterface } func (n nodeAddressProvider) externalAddresses() ([]string, error) { preferredAddressTypes := []apiv1.NodeAddressType{ apiv1.NodeExternalIP, } nodes, err := n.nodeClient.List(metav1.ListOptions{}) if err != nil { return nil, err } addrs := []string{} for ix := range nodes.Items { node := &nodes.Items[ix] addr, err := nodeutil.GetPreferredNodeAddress(node, preferredAddressTypes) if err != nil { return nil, err } addrs = append(addrs, addr) } return addrs, nil } func DefaultAPIResourceConfigSource() *serverstorage.ResourceConfig { ret := serverstorage.NewResourceConfig() // NOTE: GroupVersions listed here will be enabled by default. Don't put alpha versions in the list. ret.EnableVersions( apiv1.SchemeGroupVersion, extensionsapiv1beta1.SchemeGroupVersion, batchapiv1.SchemeGroupVersion, batchapiv1beta1.SchemeGroupVersion, authenticationv1.SchemeGroupVersion, authenticationv1beta1.SchemeGroupVersion, autoscalingapiv1.SchemeGroupVersion, autoscalingapiv2beta1.SchemeGroupVersion, appsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion, appsv1.SchemeGroupVersion, policyapiv1beta1.SchemeGroupVersion, rbacv1.SchemeGroupVersion, rbacv1beta1.SchemeGroupVersion, storageapiv1.SchemeGroupVersion, storageapiv1beta1.SchemeGroupVersion, certificatesapiv1beta1.SchemeGroupVersion, authorizationapiv1.SchemeGroupVersion, authorizationapiv1beta1.SchemeGroupVersion, networkingapiv1.SchemeGroupVersion, eventsv1beta1.SchemeGroupVersion, admissionregistrationv1beta1.SchemeGroupVersion, ) return ret }
pkg/master/master.go
1
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.42708879709243774, 0.009478282183408737, 0.00016161358507815748, 0.00018629040278028697, 0.060290880501270294 ]
{ "id": 7, "code_window": [ "\t\t\tEnableMetrics: true,\n", "\t\t},\n", "\t\tExtraConfig: master.ExtraConfig{\n", "\t\t\tEnableCoreControllers: false,\n", "\t\t\tEnableLogsSupport: false,\n", "\t\t},\n", "\t}\n", "\t_ = &master.Master{\n", "\t\tGenericAPIServer: &genericapiserver.GenericAPIServer{},\n", "\t}\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tEnableLogsSupport: false,\n" ], "file_path": "test/integration/openshift/openshift_test.go", "type": "replace", "edit_start_line_idx": 33 }
/* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by client-gen. DO NOT EDIT. package fake import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" core "k8s.io/kubernetes/pkg/apis/core" ) // FakeEvents implements EventInterface type FakeEvents struct { Fake *FakeCore ns string } var eventsResource = schema.GroupVersionResource{Group: "", Version: "", Resource: "events"} var eventsKind = schema.GroupVersionKind{Group: "", Version: "", Kind: "Event"} // Get takes name of the event, and returns the corresponding event object, and an error if there is any. func (c *FakeEvents) Get(name string, options v1.GetOptions) (result *core.Event, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(eventsResource, c.ns, name), &core.Event{}) if obj == nil { return nil, err } return obj.(*core.Event), err } // List takes label and field selectors, and returns the list of Events that match those selectors. func (c *FakeEvents) List(opts v1.ListOptions) (result *core.EventList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &core.EventList{}) if obj == nil { return nil, err } label, _, _ := testing.ExtractFromListOptions(opts) if label == nil { label = labels.Everything() } list := &core.EventList{} for _, item := range obj.(*core.EventList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } } return list, err } // Watch returns a watch.Interface that watches the requested events. func (c *FakeEvents) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts)) } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. func (c *FakeEvents) Create(event *core.Event) (result *core.Event, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &core.Event{}) if obj == nil { return nil, err } return obj.(*core.Event), err } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. func (c *FakeEvents) Update(event *core.Event) (result *core.Event, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &core.Event{}) if obj == nil { return nil, err } return obj.(*core.Event), err } // Delete takes name of the event and deletes it. Returns an error if one occurs. func (c *FakeEvents) Delete(name string, options *v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(eventsResource, c.ns, name), &core.Event{}) return err } // DeleteCollection deletes a collection of objects. func (c *FakeEvents) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &core.EventList{}) return err } // Patch applies the patch and returns the patched event. func (c *FakeEvents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Event, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, data, subresources...), &core.Event{}) if obj == nil { return nil, err } return obj.(*core.Event), err }
pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_event.go
0
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.001427952665835619, 0.0003605033562052995, 0.00016508495900779963, 0.00017298769671469927, 0.0003780311672016978 ]
{ "id": 7, "code_window": [ "\t\t\tEnableMetrics: true,\n", "\t\t},\n", "\t\tExtraConfig: master.ExtraConfig{\n", "\t\t\tEnableCoreControllers: false,\n", "\t\t\tEnableLogsSupport: false,\n", "\t\t},\n", "\t}\n", "\t_ = &master.Master{\n", "\t\tGenericAPIServer: &genericapiserver.GenericAPIServer{},\n", "\t}\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tEnableLogsSupport: false,\n" ], "file_path": "test/integration/openshift/openshift_test.go", "type": "replace", "edit_start_line_idx": 33 }
-----BEGIN CERTIFICATE----- MIIDEjCCAfqgAwIBAgIBATANBgkqhkiG9w0BAQsFADAeMRwwGgYDVQQDDBN0ZXN0 LmNvbUAxNDk2MzMwNjYwMCAXDTcwMDEwMTAwMDAwMFoYDzIxMTcwNTA4MTUyNDIw WjAeMRwwGgYDVQQDDBN0ZXN0LmNvbUAxNDk2MzMwNjYwMIIBIjANBgkqhkiG9w0B AQEFAAOCAQ8AMIIBCgKCAQEAwLNxhHFPJFGnDoOnt/ELutNthoJNYLwus52+GLD1 VYDDBA7766rzAxK8wGF9vGbXZX7L1uW3VJyJJLzZR1bBTdeOWXpyscz+33+jETbn Eg2Dp7KbdFAFw66B90vxLbHKbQtH63VtNg9lh+d0K4QI6SGFlI/Mv9VWawpKk1P5 X1cgl1EgR5e4kIgQsrkO+MRc0SLZG/s9MvThrHVlZLWPjRaiqk1GDxvBjfcBoPzZ 0jOHhWLJGWZcwXZ5brqPcqn+YMceXQlxrjxJvyq02DEWjtfimu7qoZ3+fgQy4rJ0 GzPaDvwnkwvJQ2iN59mcybfg6AoblCOt1ypIqouMrI/J3QIDAQABo1kwVzAOBgNV HQ8BAf8EBAMCAqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUwAwEB /zAfBgNVHREEGDAWggh0ZXN0LmNvbYIKKi50ZXN0LmNvbTANBgkqhkiG9w0BAQsF AAOCAQEAGQ+K0dnTaX5Ry9PLA6M8yrMhq5gnX2RfyIMXCuFfMqjdju4BkTR+6zm2 El8Z6glQ6QsKYHR4XhlhHFOP+gGyaTrsDSV2qCgphJmtn6QWOSfEmBRNNCTpMXfS 5Ek/2dXItMjmnMdDOqcLofQyIIQFE9VpLyaFN0n1w9k6EscwSxsMiBVFwOhHpBex BPJnrKBDWNVHjgocUI3YzN2TzzRxVxB/xc5+Sl/jnpguad+q/wjFgpr9p2a4yAS7 W5bXcA1S4iSp8uKVv0JM/cfFlF094ft88A/SIt8Sn8BmeOGQtSk/sf5mFbr7TRqE oDuKNM5AIM/fClQdlbKo7xpcJCiRkQ== -----END CERTIFICATE-----
staging/src/k8s.io/apiserver/pkg/server/options/testdata/localhost__127.0.0.1/test.com_star.test.com_/cert
0
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.0003286466235294938, 0.00024922945885919034, 0.0001698123087408021, 0.00024922945885919034, 0.00007941715739434585 ]
{ "id": 7, "code_window": [ "\t\t\tEnableMetrics: true,\n", "\t\t},\n", "\t\tExtraConfig: master.ExtraConfig{\n", "\t\t\tEnableCoreControllers: false,\n", "\t\t\tEnableLogsSupport: false,\n", "\t\t},\n", "\t}\n", "\t_ = &master.Master{\n", "\t\tGenericAPIServer: &genericapiserver.GenericAPIServer{},\n", "\t}\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tEnableLogsSupport: false,\n" ], "file_path": "test/integration/openshift/openshift_test.go", "type": "replace", "edit_start_line_idx": 33 }
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kubeadm import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName is the group name use in this package const GroupName = "kubeadm.k8s.io" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} var ( // SchemeBuilder points to a list of functions added to Scheme. SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) // AddToScheme applies all the stored functions to the scheme. AddToScheme = SchemeBuilder.AddToScheme ) // Kind takes an unqualified kind and returns a Group qualified GroupKind func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &MasterConfiguration{}, &NodeConfiguration{}, ) return nil }
cmd/kubeadm/app/apis/kubeadm/register.go
0
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.00018004911544267088, 0.00017354411829728633, 0.00016681732085999101, 0.00017472502076998353, 0.00000476333389087813 ]
{ "id": 8, "code_window": [ "\t\t}\n", "\t\tkubeAPIServerConfig, sharedInformers, versionedInformers, _, _, err := app.CreateKubeAPIServerConfig(kubeAPIServerOptions, tunneler, proxyTransport)\n", "\t\tif err != nil {\n", "\t\t\tt.Fatal(err)\n", "\t\t}\n", "\t\tkubeAPIServerConfig.ExtraConfig.EnableCoreControllers = false\n", "\t\tkubeClientConfigValue.Store(kubeAPIServerConfig.GenericConfig.LoopbackClientConfig)\n", "\n", "\t\tkubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.EmptyDelegate, sharedInformers, versionedInformers)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "test/integration/tls/ciphers_test.go", "type": "replace", "edit_start_line_idx": 72 }
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package framework import ( "net" "net/http" "net/http/httptest" "path" "time" "github.com/go-openapi/spec" "github.com/golang/glog" "github.com/pborman/uuid" apps "k8s.io/api/apps/v1beta1" autoscaling "k8s.io/api/autoscaling/v1" certificates "k8s.io/api/certificates/v1beta1" "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" rbac "k8s.io/api/rbac/v1alpha1" storage "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" authauthenticator "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/authenticatorfactory" authenticatorunion "k8s.io/apiserver/pkg/authentication/request/union" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/authorization/authorizerfactory" authorizerunion "k8s.io/apiserver/pkg/authorization/union" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/options" serverstorage "k8s.io/apiserver/pkg/server/storage" "k8s.io/apiserver/pkg/storage/storagebackend" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/apis/batch" policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1" "k8s.io/kubernetes/pkg/generated/openapi" kubeletclient "k8s.io/kubernetes/pkg/kubelet/client" "k8s.io/kubernetes/pkg/master" "k8s.io/kubernetes/pkg/version" ) // Config is a struct of configuration directives for NewMasterComponents. type Config struct { // If nil, a default is used, partially filled configs will not get populated. MasterConfig *master.Config StartReplicationManager bool // Client throttling qps QPS float32 // Client burst qps, also burst replicas allowed in rc manager Burst int // TODO: Add configs for endpoints controller, scheduler etc } // alwaysAllow always allows an action type alwaysAllow struct{} func (alwaysAllow) Authorize(requestAttributes authorizer.Attributes) (authorizer.Decision, string, error) { return authorizer.DecisionAllow, "always allow", nil } // alwaysEmpty simulates "no authentication" for old tests func alwaysEmpty(req *http.Request) (user.Info, bool, error) { return &user.DefaultInfo{ Name: "", }, true, nil } // MasterReceiver can be used to provide the master to a custom incoming server function type MasterReceiver interface { SetMaster(m *master.Master) } // MasterHolder implements type MasterHolder struct { Initialized chan struct{} M *master.Master } func (h *MasterHolder) SetMaster(m *master.Master) { h.M = m close(h.Initialized) } // startMasterOrDie starts a kubernetes master and an httpserver to handle api requests func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) { var m *master.Master var s *httptest.Server if incomingServer != nil { s = incomingServer } else { s = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.GenericAPIServer.Handler.ServeHTTP(w, req) })) } stopCh := make(chan struct{}) closeFn := func() { close(stopCh) s.Close() } if masterConfig == nil { masterConfig = NewMasterConfig() masterConfig.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(openapi.GetOpenAPIDefinitions, legacyscheme.Scheme) masterConfig.GenericConfig.OpenAPIConfig.Info = &spec.Info{ InfoProps: spec.InfoProps{ Title: "Kubernetes", Version: "unversioned", }, } masterConfig.GenericConfig.OpenAPIConfig.DefaultResponse = &spec.Response{ ResponseProps: spec.ResponseProps{ Description: "Default Response.", }, } masterConfig.GenericConfig.OpenAPIConfig.GetDefinitions = openapi.GetOpenAPIDefinitions masterConfig.GenericConfig.SwaggerConfig = genericapiserver.DefaultSwaggerConfig() } // set the loopback client config if masterConfig.GenericConfig.LoopbackClientConfig == nil { masterConfig.GenericConfig.LoopbackClientConfig = &restclient.Config{QPS: 50, Burst: 100, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}} } masterConfig.GenericConfig.LoopbackClientConfig.Host = s.URL privilegedLoopbackToken := uuid.NewRandom().String() // wrap any available authorizer tokens := make(map[string]*user.DefaultInfo) tokens[privilegedLoopbackToken] = &user.DefaultInfo{ Name: user.APIServerUser, UID: uuid.NewRandom().String(), Groups: []string{user.SystemPrivilegedGroup}, } tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens) if masterConfig.GenericConfig.Authentication.Authenticator == nil { masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, authauthenticator.RequestFunc(alwaysEmpty)) } else { masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, masterConfig.GenericConfig.Authentication.Authenticator) } if masterConfig.GenericConfig.Authorization.Authorizer != nil { tokenAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup) masterConfig.GenericConfig.Authorization.Authorizer = authorizerunion.New(tokenAuthorizer, masterConfig.GenericConfig.Authorization.Authorizer) } else { masterConfig.GenericConfig.Authorization.Authorizer = alwaysAllow{} } masterConfig.GenericConfig.LoopbackClientConfig.BearerToken = privilegedLoopbackToken clientset, err := clientset.NewForConfig(masterConfig.GenericConfig.LoopbackClientConfig) if err != nil { glog.Fatal(err) } sharedInformers := informers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout) m, err = masterConfig.Complete(sharedInformers).New(genericapiserver.EmptyDelegate) if err != nil { closeFn() glog.Fatalf("error in bringing up the master: %v", err) } if masterReceiver != nil { masterReceiver.SetMaster(m) } // TODO have this start method actually use the normal start sequence for the API server // this method never actually calls the `Run` method for the API server // fire the post hooks ourselves m.GenericAPIServer.PrepareRun() m.GenericAPIServer.RunPostStartHooks(stopCh) cfg := *masterConfig.GenericConfig.LoopbackClientConfig cfg.ContentConfig.GroupVersion = &schema.GroupVersion{} privilegedClient, err := restclient.RESTClientFor(&cfg) if err != nil { closeFn() glog.Fatal(err) } err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { result := privilegedClient.Get().AbsPath("/healthz").Do() status := 0 result.StatusCode(&status) if status == 200 { return true, nil } return false, nil }) if err != nil { closeFn() glog.Fatal(err) } return m, s, closeFn } // Returns the master config appropriate for most integration tests. func NewIntegrationTestMasterConfig() *master.Config { masterConfig := NewMasterConfig() masterConfig.ExtraConfig.EnableCoreControllers = true masterConfig.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4") masterConfig.ExtraConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource() return masterConfig } // Returns a basic master config. func NewMasterConfig() *master.Config { // This causes the integration tests to exercise the etcd // prefix code, so please don't change without ensuring // sufficient coverage in other ways. etcdOptions := options.NewEtcdOptions(storagebackend.NewDefaultConfig(uuid.New(), nil)) etcdOptions.StorageConfig.ServerList = []string{GetEtcdURL()} info, _ := runtime.SerializerInfoForMediaType(legacyscheme.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON) ns := NewSingleContentTypeSerializer(legacyscheme.Scheme, info) resourceEncoding := serverstorage.NewDefaultResourceEncodingConfig(legacyscheme.Registry) // FIXME (soltysh): this GroupVersionResource override should be configurable // we need to set both for the whole group and for cronjobs, separately resourceEncoding.SetVersionEncoding(batch.GroupName, *testapi.Batch.GroupVersion(), schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: batch.GroupName, Resource: "cronjobs"}, schema.GroupVersion{Group: batch.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal}) // we also need to set both for the storage group and for volumeattachments, separately resourceEncoding.SetVersionEncoding(storage.GroupName, *testapi.Storage.GroupVersion(), schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal}) resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: storage.GroupName, Resource: "volumeattachments"}, schema.GroupVersion{Group: storage.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal}) storageFactory := serverstorage.NewDefaultStorageFactory(etcdOptions.StorageConfig, runtime.ContentTypeJSON, ns, resourceEncoding, master.DefaultAPIResourceConfigSource(), nil) storageFactory.SetSerializer( schema.GroupResource{Group: v1.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: autoscaling.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: batch.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: apps.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: extensions.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: policy.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: rbac.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: certificates.GroupName, Resource: serverstorage.AllResources}, "", ns) storageFactory.SetSerializer( schema.GroupResource{Group: storage.GroupName, Resource: serverstorage.AllResources}, "", ns) genericConfig := genericapiserver.NewConfig(legacyscheme.Codecs) kubeVersion := version.Get() genericConfig.Version = &kubeVersion genericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer() err := etcdOptions.ApplyWithStorageFactoryTo(storageFactory, genericConfig) if err != nil { panic(err) } return &master.Config{ GenericConfig: genericConfig, ExtraConfig: master.ExtraConfig{ APIResourceConfigSource: master.DefaultAPIResourceConfigSource(), StorageFactory: storageFactory, EnableCoreControllers: true, KubeletClientConfig: kubeletclient.KubeletClientConfig{Port: 10250}, APIServerServicePort: 443, MasterCount: 1, }, } } // CloseFunc can be called to cleanup the master type CloseFunc func() func RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server, CloseFunc) { if masterConfig == nil { masterConfig = NewMasterConfig() masterConfig.GenericConfig.EnableProfiling = true } return startMasterOrDie(masterConfig, nil, nil) } func RunAMasterUsingServer(masterConfig *master.Config, s *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) { return startMasterOrDie(masterConfig, s, masterReceiver) } // SharedEtcd creates a storage config for a shared etcd instance, with a unique prefix. func SharedEtcd() *storagebackend.Config { cfg := storagebackend.NewDefaultConfig(path.Join(uuid.New(), "registry"), nil) cfg.ServerList = []string{GetEtcdURL()} return cfg }
test/integration/framework/master_utils.go
1
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.008532260544598103, 0.0008313844446092844, 0.0001612612250028178, 0.00017275760183110833, 0.0017410187283530831 ]
{ "id": 8, "code_window": [ "\t\t}\n", "\t\tkubeAPIServerConfig, sharedInformers, versionedInformers, _, _, err := app.CreateKubeAPIServerConfig(kubeAPIServerOptions, tunneler, proxyTransport)\n", "\t\tif err != nil {\n", "\t\t\tt.Fatal(err)\n", "\t\t}\n", "\t\tkubeAPIServerConfig.ExtraConfig.EnableCoreControllers = false\n", "\t\tkubeClientConfigValue.Store(kubeAPIServerConfig.GenericConfig.LoopbackClientConfig)\n", "\n", "\t\tkubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.EmptyDelegate, sharedInformers, versionedInformers)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "test/integration/tls/ciphers_test.go", "type": "replace", "edit_start_line_idx": 72 }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package discovery import ( "net" "net/http" "reflect" "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilnet "k8s.io/apimachinery/pkg/util/net" ) func TestGetServerAddressByClientCIDRs(t *testing.T) { publicAddressCIDRMap := []metav1.ServerAddressByClientCIDR{ { ClientCIDR: "0.0.0.0/0", ServerAddress: "ExternalAddress", }, } internalAddressCIDRMap := []metav1.ServerAddressByClientCIDR{ publicAddressCIDRMap[0], { ClientCIDR: "10.0.0.0/24", ServerAddress: "serviceIP", }, } internalIP := "10.0.0.1" publicIP := "1.1.1.1" testCases := []struct { Request http.Request ExpectedMap []metav1.ServerAddressByClientCIDR }{ { Request: http.Request{}, ExpectedMap: publicAddressCIDRMap, }, { Request: http.Request{ Header: map[string][]string{ "X-Real-Ip": {internalIP}, }, }, ExpectedMap: internalAddressCIDRMap, }, { Request: http.Request{ Header: map[string][]string{ "X-Real-Ip": {publicIP}, }, }, ExpectedMap: publicAddressCIDRMap, }, { Request: http.Request{ Header: map[string][]string{ "X-Forwarded-For": {internalIP}, }, }, ExpectedMap: internalAddressCIDRMap, }, { Request: http.Request{ Header: map[string][]string{ "X-Forwarded-For": {publicIP}, }, }, ExpectedMap: publicAddressCIDRMap, }, { Request: http.Request{ RemoteAddr: internalIP, }, ExpectedMap: internalAddressCIDRMap, }, { Request: http.Request{ RemoteAddr: publicIP, }, ExpectedMap: publicAddressCIDRMap, }, { Request: http.Request{ RemoteAddr: "invalidIP", }, ExpectedMap: publicAddressCIDRMap, }, } _, ipRange, _ := net.ParseCIDR("10.0.0.0/24") discoveryAddresses := DefaultAddresses{DefaultAddress: "ExternalAddress"} discoveryAddresses.CIDRRules = append(discoveryAddresses.CIDRRules, CIDRRule{IPRange: *ipRange, Address: "serviceIP"}) for i, test := range testCases { if a, e := discoveryAddresses.ServerAddressByClientCIDRs(utilnet.GetClientIP(&test.Request)), test.ExpectedMap; reflect.DeepEqual(e, a) != true { t.Fatalf("test case %d failed. expected: %v, actual: %v", i+1, e, a) } } }
staging/src/k8s.io/apiserver/pkg/endpoints/discovery/addresses_test.go
0
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.000784276460763067, 0.00022230752801988274, 0.00016437484009657055, 0.00017205455515068024, 0.0001694897364359349 ]
{ "id": 8, "code_window": [ "\t\t}\n", "\t\tkubeAPIServerConfig, sharedInformers, versionedInformers, _, _, err := app.CreateKubeAPIServerConfig(kubeAPIServerOptions, tunneler, proxyTransport)\n", "\t\tif err != nil {\n", "\t\t\tt.Fatal(err)\n", "\t\t}\n", "\t\tkubeAPIServerConfig.ExtraConfig.EnableCoreControllers = false\n", "\t\tkubeClientConfigValue.Store(kubeAPIServerConfig.GenericConfig.LoopbackClientConfig)\n", "\n", "\t\tkubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.EmptyDelegate, sharedInformers, versionedInformers)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "test/integration/tls/ciphers_test.go", "type": "replace", "edit_start_line_idx": 72 }
/* Copyright 2013 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package lru implements an LRU cache. package lru import "container/list" // Cache is an LRU cache. It is not safe for concurrent access. type Cache struct { // MaxEntries is the maximum number of cache entries before // an item is evicted. Zero means no limit. MaxEntries int // OnEvicted optionally specificies a callback function to be // executed when an entry is purged from the cache. OnEvicted func(key Key, value interface{}) ll *list.List cache map[interface{}]*list.Element } // A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators type Key interface{} type entry struct { key Key value interface{} } // New creates a new Cache. // If maxEntries is zero, the cache has no limit and it's assumed // that eviction is done by the caller. func New(maxEntries int) *Cache { return &Cache{ MaxEntries: maxEntries, ll: list.New(), cache: make(map[interface{}]*list.Element), } } // Add adds a value to the cache. func (c *Cache) Add(key Key, value interface{}) { if c.cache == nil { c.cache = make(map[interface{}]*list.Element) c.ll = list.New() } if ee, ok := c.cache[key]; ok { c.ll.MoveToFront(ee) ee.Value.(*entry).value = value return } ele := c.ll.PushFront(&entry{key, value}) c.cache[key] = ele if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { c.RemoveOldest() } } // Get looks up a key's value from the cache. func (c *Cache) Get(key Key) (value interface{}, ok bool) { if c.cache == nil { return } if ele, hit := c.cache[key]; hit { c.ll.MoveToFront(ele) return ele.Value.(*entry).value, true } return } // Remove removes the provided key from the cache. func (c *Cache) Remove(key Key) { if c.cache == nil { return } if ele, hit := c.cache[key]; hit { c.removeElement(ele) } } // RemoveOldest removes the oldest item from the cache. func (c *Cache) RemoveOldest() { if c.cache == nil { return } ele := c.ll.Back() if ele != nil { c.removeElement(ele) } } func (c *Cache) removeElement(e *list.Element) { c.ll.Remove(e) kv := e.Value.(*entry) delete(c.cache, kv.key) if c.OnEvicted != nil { c.OnEvicted(kv.key, kv.value) } } // Len returns the number of items in the cache. func (c *Cache) Len() int { if c.cache == nil { return 0 } return c.ll.Len() }
vendor/github.com/golang/groupcache/lru/lru.go
0
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.00017812738951761276, 0.00017099585966207087, 0.0001619631511857733, 0.00017170148203149438, 0.000004418966454977635 ]
{ "id": 8, "code_window": [ "\t\t}\n", "\t\tkubeAPIServerConfig, sharedInformers, versionedInformers, _, _, err := app.CreateKubeAPIServerConfig(kubeAPIServerOptions, tunneler, proxyTransport)\n", "\t\tif err != nil {\n", "\t\t\tt.Fatal(err)\n", "\t\t}\n", "\t\tkubeAPIServerConfig.ExtraConfig.EnableCoreControllers = false\n", "\t\tkubeClientConfigValue.Store(kubeAPIServerConfig.GenericConfig.LoopbackClientConfig)\n", "\n", "\t\tkubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.EmptyDelegate, sharedInformers, versionedInformers)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "test/integration/tls/ciphers_test.go", "type": "replace", "edit_start_line_idx": 72 }
/* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */
staging/src/k8s.io/apiextensions-apiserver/hack/boilerplate.go.txt
0
https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b
[ 0.00017776664753910154, 0.00017761735944077373, 0.0001774680713424459, 0.00017761735944077373, 1.4928809832781553e-7 ]
{ "id": 0, "code_window": [ " configMap:\n", " name: kube-dns\n", " optional: true\n", " containers:\n", " - name: kubedns\n", " image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ " nodeSelector:\n", " kubernetes.io/os: linux\n" ], "file_path": "cluster/addons/dns/kube-dns/kube-dns.yaml.base", "type": "add", "edit_start_line_idx": 100 }
# Copyright 2016 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml # in sync with this file. # Warning: This is a file generated from the base underscore template file: kube-dns.yaml.base apiVersion: v1 kind: Service metadata: name: kube-dns namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "KubeDNS" spec: selector: k8s-app: kube-dns clusterIP: $DNS_SERVER_IP ports: - name: dns port: 53 protocol: UDP - name: dns-tcp port: 53 protocol: TCP --- apiVersion: v1 kind: ServiceAccount metadata: name: kube-dns namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile --- apiVersion: v1 kind: ConfigMap metadata: name: kube-dns namespace: kube-system labels: addonmanager.kubernetes.io/mode: EnsureExists --- apiVersion: apps/v1 kind: Deployment metadata: name: kube-dns namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: # replicas: not specified here: # 1. In order to make Addon Manager do not reconcile this replicas parameter. # 2. Default is 1. # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on. strategy: rollingUpdate: maxSurge: 10% maxUnavailable: 0 selector: matchLabels: k8s-app: kube-dns template: metadata: labels: k8s-app: kube-dns annotations: seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' prometheus.io/port: "10054" prometheus.io/scrape: "true" spec: priorityClassName: system-cluster-critical securityContext: supplementalGroups: [ 65534 ] fsGroup: 65534 tolerations: - key: "CriticalAddonsOnly" operator: "Exists" volumes: - name: kube-dns-config configMap: name: kube-dns optional: true containers: - name: kubedns image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in # guaranteed class. Currently, this container falls into the # "burstable" category so the kubelet doesn't backoff from restarting it. limits: memory: $DNS_MEMORY_LIMIT requests: cpu: 100m memory: 70Mi livenessProbe: httpGet: path: /healthcheck/kubedns port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: httpGet: path: /readiness port: 8081 scheme: HTTP # we poll on pod startup for the Kubernetes master service and # only setup the /readiness HTTP server once that's available. initialDelaySeconds: 3 timeoutSeconds: 5 args: - --domain=$DNS_DOMAIN. - --dns-port=10053 - --config-dir=/kube-dns-config - --v=2 env: - name: PROMETHEUS_PORT value: "10055" ports: - containerPort: 10053 name: dns-local protocol: UDP - containerPort: 10053 name: dns-tcp-local protocol: TCP - containerPort: 10055 name: metrics protocol: TCP volumeMounts: - name: kube-dns-config mountPath: /kube-dns-config securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true runAsUser: 1001 runAsGroup: 1001 - name: dnsmasq image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13 livenessProbe: httpGet: path: /healthcheck/dnsmasq port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 args: - -v=2 - -logtostderr - -configDir=/etc/k8s/dns/dnsmasq-nanny - -restartDnsmasq=true - -- - -k - --cache-size=1000 - --no-negcache - --dns-loop-detect - --log-facility=- - --server=/$DNS_DOMAIN/127.0.0.1#10053 - --server=/in-addr.arpa/127.0.0.1#10053 - --server=/ip6.arpa/127.0.0.1#10053 ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP # see: https://github.com/kubernetes/kubernetes/issues/29055 for details resources: requests: cpu: 150m memory: 20Mi volumeMounts: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny securityContext: capabilities: drop: - all add: - NET_BIND_SERVICE - SETGID - name: sidecar image: k8s.gcr.io/k8s-dns-sidecar:1.14.13 livenessProbe: httpGet: path: /metrics port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 args: - --v=2 - --logtostderr - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.$DNS_DOMAIN,5,SRV - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.$DNS_DOMAIN,5,SRV ports: - containerPort: 10054 name: metrics protocol: TCP resources: requests: memory: 20Mi cpu: 10m securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true runAsUser: 1001 runAsGroup: 1001 dnsPolicy: Default # Don't use cluster DNS. serviceAccountName: kube-dns
cluster/addons/dns/kube-dns/kube-dns.yaml.sed
1
https://github.com/kubernetes/kubernetes/commit/845c223d130287eb40897d8bfbe8076008823c64
[ 0.009149391204118729, 0.0023819587659090757, 0.00016194370982702821, 0.0008175366674549878, 0.0029749898239970207 ]
{ "id": 0, "code_window": [ " configMap:\n", " name: kube-dns\n", " optional: true\n", " containers:\n", " - name: kubedns\n", " image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ " nodeSelector:\n", " kubernetes.io/os: linux\n" ], "file_path": "cluster/addons/dns/kube-dns/kube-dns.yaml.base", "type": "add", "edit_start_line_idx": 100 }
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache import ( "sync" "time" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/klog" ) // ExpirationCache implements the store interface // 1. All entries are automatically time stamped on insert // a. The key is computed based off the original item/keyFunc // b. The value inserted under that key is the timestamped item // 2. Expiration happens lazily on read based on the expiration policy // a. No item can be inserted into the store while we're expiring // *any* item in the cache. // 3. Time-stamps are stripped off unexpired entries before return // Note that the ExpirationCache is inherently slower than a normal // threadSafeStore because it takes a write lock every time it checks if // an item has expired. type ExpirationCache struct { cacheStorage ThreadSafeStore keyFunc KeyFunc clock clock.Clock expirationPolicy ExpirationPolicy // expirationLock is a write lock used to guarantee that we don't clobber // newly inserted objects because of a stale expiration timestamp comparison expirationLock sync.Mutex } // ExpirationPolicy dictates when an object expires. Currently only abstracted out // so unittests don't rely on the system clock. type ExpirationPolicy interface { IsExpired(obj *TimestampedEntry) bool } // TTLPolicy implements a ttl based ExpirationPolicy. type TTLPolicy struct { // >0: Expire entries with an age > ttl // <=0: Don't expire any entry TTL time.Duration // Clock used to calculate ttl expiration Clock clock.Clock } // IsExpired returns true if the given object is older than the ttl, or it can't // determine its age. func (p *TTLPolicy) IsExpired(obj *TimestampedEntry) bool { return p.TTL > 0 && p.Clock.Since(obj.Timestamp) > p.TTL } // TimestampedEntry is the only type allowed in a ExpirationCache. // Keep in mind that it is not safe to share timestamps between computers. // Behavior may be inconsistent if you get a timestamp from the API Server and // use it on the client machine as part of your ExpirationCache. type TimestampedEntry struct { Obj interface{} Timestamp time.Time key string } // getTimestampedEntry returns the TimestampedEntry stored under the given key. func (c *ExpirationCache) getTimestampedEntry(key string) (*TimestampedEntry, bool) { item, _ := c.cacheStorage.Get(key) if tsEntry, ok := item.(*TimestampedEntry); ok { return tsEntry, true } return nil, false } // getOrExpire retrieves the object from the TimestampedEntry if and only if it hasn't // already expired. It holds a write lock across deletion. func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { // Prevent all inserts from the time we deem an item as "expired" to when we // delete it, so an un-expired item doesn't sneak in under the same key, just // before the Delete. c.expirationLock.Lock() defer c.expirationLock.Unlock() timestampedItem, exists := c.getTimestampedEntry(key) if !exists { return nil, false } if c.expirationPolicy.IsExpired(timestampedItem) { klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.Obj) c.cacheStorage.Delete(key) return nil, false } return timestampedItem.Obj, true } // GetByKey returns the item stored under the key, or sets exists=false. func (c *ExpirationCache) GetByKey(key string) (interface{}, bool, error) { obj, exists := c.getOrExpire(key) return obj, exists, nil } // Get returns unexpired items. It purges the cache of expired items in the // process. func (c *ExpirationCache) Get(obj interface{}) (interface{}, bool, error) { key, err := c.keyFunc(obj) if err != nil { return nil, false, KeyError{obj, err} } obj, exists := c.getOrExpire(key) return obj, exists, nil } // List retrieves a list of unexpired items. It purges the cache of expired // items in the process. func (c *ExpirationCache) List() []interface{} { items := c.cacheStorage.List() list := make([]interface{}, 0, len(items)) for _, item := range items { key := item.(*TimestampedEntry).key if obj, exists := c.getOrExpire(key); exists { list = append(list, obj) } } return list } // ListKeys returns a list of all keys in the expiration cache. func (c *ExpirationCache) ListKeys() []string { return c.cacheStorage.ListKeys() } // Add timestamps an item and inserts it into the cache, overwriting entries // that might exist under the same key. func (c *ExpirationCache) Add(obj interface{}) error { key, err := c.keyFunc(obj) if err != nil { return KeyError{obj, err} } c.expirationLock.Lock() defer c.expirationLock.Unlock() c.cacheStorage.Add(key, &TimestampedEntry{obj, c.clock.Now(), key}) return nil } // Update has not been implemented yet for lack of a use case, so this method // simply calls `Add`. This effectively refreshes the timestamp. func (c *ExpirationCache) Update(obj interface{}) error { return c.Add(obj) } // Delete removes an item from the cache. func (c *ExpirationCache) Delete(obj interface{}) error { key, err := c.keyFunc(obj) if err != nil { return KeyError{obj, err} } c.expirationLock.Lock() defer c.expirationLock.Unlock() c.cacheStorage.Delete(key) return nil } // Replace will convert all items in the given list to TimestampedEntries // before attempting the replace operation. The replace operation will // delete the contents of the ExpirationCache `c`. func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error { items := make(map[string]interface{}, len(list)) ts := c.clock.Now() for _, item := range list { key, err := c.keyFunc(item) if err != nil { return KeyError{item, err} } items[key] = &TimestampedEntry{item, ts, key} } c.expirationLock.Lock() defer c.expirationLock.Unlock() c.cacheStorage.Replace(items, resourceVersion) return nil } // Resync will touch all objects to put them into the processing queue func (c *ExpirationCache) Resync() error { return c.cacheStorage.Resync() } // NewTTLStore creates and returns a ExpirationCache with a TTLPolicy func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store { return NewExpirationStore(keyFunc, &TTLPolicy{ttl, clock.RealClock{}}) } // NewExpirationStore creates and returns a ExpirationCache for a given policy func NewExpirationStore(keyFunc KeyFunc, expirationPolicy ExpirationPolicy) Store { return &ExpirationCache{ cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), keyFunc: keyFunc, clock: clock.RealClock{}, expirationPolicy: expirationPolicy, } }
staging/src/k8s.io/client-go/tools/cache/expiration_cache.go
0
https://github.com/kubernetes/kubernetes/commit/845c223d130287eb40897d8bfbe8076008823c64
[ 0.0013416940346360207, 0.00022757398255635053, 0.00016298580158036202, 0.00016832676192279905, 0.00024406224838458002 ]
{ "id": 0, "code_window": [ " configMap:\n", " name: kube-dns\n", " optional: true\n", " containers:\n", " - name: kubedns\n", " image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ " nodeSelector:\n", " kubernetes.io/os: linux\n" ], "file_path": "cluster/addons/dns/kube-dns/kube-dns.yaml.base", "type": "add", "edit_start_line_idx": 100 }
package reflect2 import ( "reflect" "unsafe" ) type UnsafePtrType struct { unsafeType } func newUnsafePtrType(cfg *frozenConfig, type1 reflect.Type) *UnsafePtrType { return &UnsafePtrType{ unsafeType: *newUnsafeType(cfg, type1), } } func (type2 *UnsafePtrType) IsNil(obj interface{}) bool { if obj == nil { return true } objEFace := unpackEFace(obj) assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype) return type2.UnsafeIsNil(objEFace.data) } func (type2 *UnsafePtrType) UnsafeIsNil(ptr unsafe.Pointer) bool { if ptr == nil { return true } return *(*unsafe.Pointer)(ptr) == nil } func (type2 *UnsafePtrType) LikePtr() bool { return true } func (type2 *UnsafePtrType) Indirect(obj interface{}) interface{} { objEFace := unpackEFace(obj) assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype) return type2.UnsafeIndirect(objEFace.data) } func (type2 *UnsafePtrType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr)) }
vendor/github.com/modern-go/reflect2/unsafe_ptr.go
0
https://github.com/kubernetes/kubernetes/commit/845c223d130287eb40897d8bfbe8076008823c64
[ 0.0002486825396772474, 0.00019999875803478062, 0.00016408623196184635, 0.000183442301931791, 0.00003635910616139881 ]
{ "id": 0, "code_window": [ " configMap:\n", " name: kube-dns\n", " optional: true\n", " containers:\n", " - name: kubedns\n", " image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ " nodeSelector:\n", " kubernetes.io/os: linux\n" ], "file_path": "cluster/addons/dns/kube-dns/kube-dns.yaml.base", "type": "add", "edit_start_line_idx": 100 }
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package core import ( "testing" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" api "k8s.io/kubernetes/pkg/apis/core" quota "k8s.io/kubernetes/pkg/quota/v1" "k8s.io/kubernetes/pkg/quota/v1/generic" ) func testVolumeClaim(name string, namespace string, spec api.PersistentVolumeClaimSpec) *api.PersistentVolumeClaim { return &api.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, Spec: spec, } } func TestPersistentVolumeClaimEvaluatorUsage(t *testing.T) { classGold := "gold" validClaim := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{ Selector: &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ { Key: "key2", Operator: "Exists", }, }, }, AccessModes: []api.PersistentVolumeAccessMode{ api.ReadWriteOnce, api.ReadOnlyMany, }, Resources: api.ResourceRequirements{ Requests: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"), }, }, }) validClaimByStorageClass := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{ Selector: &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ { Key: "key2", Operator: "Exists", }, }, }, AccessModes: []api.PersistentVolumeAccessMode{ api.ReadWriteOnce, api.ReadOnlyMany, }, Resources: api.ResourceRequirements{ Requests: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"), }, }, StorageClassName: &classGold, }) evaluator := NewPersistentVolumeClaimEvaluator(nil) testCases := map[string]struct { pvc *api.PersistentVolumeClaim usage corev1.ResourceList }{ "pvc-usage": { pvc: validClaim, usage: corev1.ResourceList{ corev1.ResourceRequestsStorage: resource.MustParse("10Gi"), corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "persistentvolumeclaims"}): resource.MustParse("1"), }, }, "pvc-usage-by-class": { pvc: validClaimByStorageClass, usage: corev1.ResourceList{ corev1.ResourceRequestsStorage: resource.MustParse("10Gi"), corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"), V1ResourceByStorageClass(classGold, corev1.ResourceRequestsStorage): resource.MustParse("10Gi"), V1ResourceByStorageClass(classGold, corev1.ResourcePersistentVolumeClaims): resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "persistentvolumeclaims"}): resource.MustParse("1"), }, }, } for testName, testCase := range testCases { actual, err := evaluator.Usage(testCase.pvc) if err != nil { t.Errorf("%s unexpected error: %v", testName, err) } if !quota.Equals(testCase.usage, actual) { t.Errorf("%s expected: %v, actual: %v", testName, testCase.usage, actual) } } }
pkg/quota/v1/evaluator/core/persistent_volume_claims_test.go
0
https://github.com/kubernetes/kubernetes/commit/845c223d130287eb40897d8bfbe8076008823c64
[ 0.00017620700236875564, 0.00017118232790380716, 0.00016541333752684295, 0.0001706683251541108, 0.0000033862518193927826 ]
{ "id": 1, "code_window": [ " configMap:\n", " name: kube-dns\n", " optional: true\n", " containers:\n", " - name: kubedns\n", " image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ " nodeSelector:\n", " kubernetes.io/os: linux\n" ], "file_path": "cluster/addons/dns/kube-dns/kube-dns.yaml.in", "type": "add", "edit_start_line_idx": 100 }
# Copyright 2016 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml # in sync with this file. # Warning: This is a file generated from the base underscore template file: kube-dns.yaml.base apiVersion: v1 kind: Service metadata: name: kube-dns namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "KubeDNS" spec: selector: k8s-app: kube-dns clusterIP: {{ pillar['dns_server'] }} ports: - name: dns port: 53 protocol: UDP - name: dns-tcp port: 53 protocol: TCP --- apiVersion: v1 kind: ServiceAccount metadata: name: kube-dns namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile --- apiVersion: v1 kind: ConfigMap metadata: name: kube-dns namespace: kube-system labels: addonmanager.kubernetes.io/mode: EnsureExists --- apiVersion: apps/v1 kind: Deployment metadata: name: kube-dns namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: # replicas: not specified here: # 1. In order to make Addon Manager do not reconcile this replicas parameter. # 2. Default is 1. # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on. strategy: rollingUpdate: maxSurge: 10% maxUnavailable: 0 selector: matchLabels: k8s-app: kube-dns template: metadata: labels: k8s-app: kube-dns annotations: seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' prometheus.io/port: "10054" prometheus.io/scrape: "true" spec: priorityClassName: system-cluster-critical securityContext: supplementalGroups: [ 65534 ] fsGroup: 65534 tolerations: - key: "CriticalAddonsOnly" operator: "Exists" volumes: - name: kube-dns-config configMap: name: kube-dns optional: true containers: - name: kubedns image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in # guaranteed class. Currently, this container falls into the # "burstable" category so the kubelet doesn't backoff from restarting it. limits: memory: {{ pillar['dns_memory_limit'] }} requests: cpu: 100m memory: 70Mi livenessProbe: httpGet: path: /healthcheck/kubedns port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: httpGet: path: /readiness port: 8081 scheme: HTTP # we poll on pod startup for the Kubernetes master service and # only setup the /readiness HTTP server once that's available. initialDelaySeconds: 3 timeoutSeconds: 5 args: - --domain={{ pillar['dns_domain'] }}. - --dns-port=10053 - --config-dir=/kube-dns-config - --v=2 env: - name: PROMETHEUS_PORT value: "10055" ports: - containerPort: 10053 name: dns-local protocol: UDP - containerPort: 10053 name: dns-tcp-local protocol: TCP - containerPort: 10055 name: metrics protocol: TCP volumeMounts: - name: kube-dns-config mountPath: /kube-dns-config securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true runAsUser: 1001 runAsGroup: 1001 - name: dnsmasq image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13 livenessProbe: httpGet: path: /healthcheck/dnsmasq port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 args: - -v=2 - -logtostderr - -configDir=/etc/k8s/dns/dnsmasq-nanny - -restartDnsmasq=true - -- - -k - --cache-size=1000 - --no-negcache - --dns-loop-detect - --log-facility=- - --server=/{{ pillar['dns_domain'] }}/127.0.0.1#10053 - --server=/in-addr.arpa/127.0.0.1#10053 - --server=/ip6.arpa/127.0.0.1#10053 ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP # see: https://github.com/kubernetes/kubernetes/issues/29055 for details resources: requests: cpu: 150m memory: 20Mi volumeMounts: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny securityContext: capabilities: drop: - all add: - NET_BIND_SERVICE - SETGID - name: sidecar image: k8s.gcr.io/k8s-dns-sidecar:1.14.13 livenessProbe: httpGet: path: /metrics port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 args: - --v=2 - --logtostderr - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ pillar['dns_domain'] }},5,SRV - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ pillar['dns_domain'] }},5,SRV ports: - containerPort: 10054 name: metrics protocol: TCP resources: requests: memory: 20Mi cpu: 10m securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true runAsUser: 1001 runAsGroup: 1001 dnsPolicy: Default # Don't use cluster DNS. serviceAccountName: kube-dns
cluster/addons/dns/kube-dns/kube-dns.yaml.in
1
https://github.com/kubernetes/kubernetes/commit/845c223d130287eb40897d8bfbe8076008823c64
[ 0.009149391204118729, 0.0022854118142277002, 0.00016194370982702821, 0.0006953375996090472, 0.0029955534264445305 ]
{ "id": 1, "code_window": [ " configMap:\n", " name: kube-dns\n", " optional: true\n", " containers:\n", " - name: kubedns\n", " image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ " nodeSelector:\n", " kubernetes.io/os: linux\n" ], "file_path": "cluster/addons/dns/kube-dns/kube-dns.yaml.in", "type": "add", "edit_start_line_idx": 100 }
/* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by client-gen. DO NOT EDIT. package v1 import ( rest "k8s.io/client-go/rest" ) // SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. // A group's client should implement this interface. type SelfSubjectAccessReviewsGetter interface { SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface } // SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. type SelfSubjectAccessReviewInterface interface { SelfSubjectAccessReviewExpansion } // selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface type selfSubjectAccessReviews struct { client rest.Interface } // newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews func newSelfSubjectAccessReviews(c *AuthorizationV1Client) *selfSubjectAccessReviews { return &selfSubjectAccessReviews{ client: c.RESTClient(), } }
staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go
0
https://github.com/kubernetes/kubernetes/commit/845c223d130287eb40897d8bfbe8076008823c64
[ 0.0020220407750457525, 0.0005542772123590112, 0.0001668609183980152, 0.00017631775699555874, 0.0007342547760345042 ]
{ "id": 1, "code_window": [ " configMap:\n", " name: kube-dns\n", " optional: true\n", " containers:\n", " - name: kubedns\n", " image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ " nodeSelector:\n", " kubernetes.io/os: linux\n" ], "file_path": "cluster/addons/dns/kube-dns/kube-dns.yaml.in", "type": "add", "edit_start_line_idx": 100 }
// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package ct holds core types and utilities for Certificate Transparency. package ct import ( "crypto/sha256" "encoding/base64" "encoding/json" "fmt" "github.com/google/certificate-transparency-go/tls" "github.com/google/certificate-transparency-go/x509" ) /////////////////////////////////////////////////////////////////////////////// // The following structures represent those outlined in RFC6962; any section // numbers mentioned refer to that RFC. /////////////////////////////////////////////////////////////////////////////// // LogEntryType represents the LogEntryType enum from section 3.1: // enum { x509_entry(0), precert_entry(1), (65535) } LogEntryType; type LogEntryType tls.Enum // tls:"maxval:65535" // LogEntryType constants from section 3.1. const ( X509LogEntryType LogEntryType = 0 PrecertLogEntryType LogEntryType = 1 XJSONLogEntryType LogEntryType = 0x8000 // Experimental. Don't rely on this! ) func (e LogEntryType) String() string { switch e { case X509LogEntryType: return "X509LogEntryType" case PrecertLogEntryType: return "PrecertLogEntryType" case XJSONLogEntryType: return "XJSONLogEntryType" default: return fmt.Sprintf("UnknownEntryType(%d)", e) } } // RFC6962 section 2.1 requires a prefix byte on hash inputs for second preimage resistance. const ( TreeLeafPrefix = byte(0x00) TreeNodePrefix = byte(0x01) ) // MerkleLeafType represents the MerkleLeafType enum from section 3.4: // enum { timestamped_entry(0), (255) } MerkleLeafType; type MerkleLeafType tls.Enum // tls:"maxval:255" // TimestampedEntryLeafType is the only defined MerkleLeafType constant from section 3.4. const TimestampedEntryLeafType MerkleLeafType = 0 // Entry type for an SCT func (m MerkleLeafType) String() string { switch m { case TimestampedEntryLeafType: return "TimestampedEntryLeafType" default: return fmt.Sprintf("UnknownLeafType(%d)", m) } } // Version represents the Version enum from section 3.2: // enum { v1(0), (255) } Version; type Version tls.Enum // tls:"maxval:255" // CT Version constants from section 3.2. const ( V1 Version = 0 ) func (v Version) String() string { switch v { case V1: return "V1" default: return fmt.Sprintf("UnknownVersion(%d)", v) } } // SignatureType differentiates STH signatures from SCT signatures, see section 3.2. // enum { certificate_timestamp(0), tree_hash(1), (255) } SignatureType; type SignatureType tls.Enum // tls:"maxval:255" // SignatureType constants from section 3.2. const ( CertificateTimestampSignatureType SignatureType = 0 TreeHashSignatureType SignatureType = 1 ) func (st SignatureType) String() string { switch st { case CertificateTimestampSignatureType: return "CertificateTimestamp" case TreeHashSignatureType: return "TreeHash" default: return fmt.Sprintf("UnknownSignatureType(%d)", st) } } // ASN1Cert type for holding the raw DER bytes of an ASN.1 Certificate // (section 3.1). type ASN1Cert struct { Data []byte `tls:"minlen:1,maxlen:16777215"` } // LogID holds the hash of the Log's public key (section 3.2). // TODO(pphaneuf): Users should be migrated to the one in the logid package. type LogID struct { KeyID [sha256.Size]byte } // PreCert represents a Precertificate (section 3.2). type PreCert struct { IssuerKeyHash [sha256.Size]byte TBSCertificate []byte `tls:"minlen:1,maxlen:16777215"` // DER-encoded TBSCertificate } // CTExtensions is a representation of the raw bytes of any CtExtension // structure (see section 3.2). // nolint: golint type CTExtensions []byte // tls:"minlen:0,maxlen:65535"` // MerkleTreeNode represents an internal node in the CT tree. type MerkleTreeNode []byte // ConsistencyProof represents a CT consistency proof (see sections 2.1.2 and // 4.4). type ConsistencyProof []MerkleTreeNode // AuditPath represents a CT inclusion proof (see sections 2.1.1 and 4.5). type AuditPath []MerkleTreeNode // LeafInput represents a serialized MerkleTreeLeaf structure. type LeafInput []byte // DigitallySigned is a local alias for tls.DigitallySigned so that we can // attach a MarshalJSON method. type DigitallySigned tls.DigitallySigned // FromBase64String populates the DigitallySigned structure from the base64 data passed in. // Returns an error if the base64 data is invalid. func (d *DigitallySigned) FromBase64String(b64 string) error { raw, err := base64.StdEncoding.DecodeString(b64) if err != nil { return fmt.Errorf("failed to unbase64 DigitallySigned: %v", err) } var ds tls.DigitallySigned if rest, err := tls.Unmarshal(raw, &ds); err != nil { return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err) } else if len(rest) > 0 { return fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)) } *d = DigitallySigned(ds) return nil } // Base64String returns the base64 representation of the DigitallySigned struct. func (d DigitallySigned) Base64String() (string, error) { b, err := tls.Marshal(d) if err != nil { return "", err } return base64.StdEncoding.EncodeToString(b), nil } // MarshalJSON implements the json.Marshaller interface. func (d DigitallySigned) MarshalJSON() ([]byte, error) { b64, err := d.Base64String() if err != nil { return []byte{}, err } return []byte(`"` + b64 + `"`), nil } // UnmarshalJSON implements the json.Unmarshaler interface. func (d *DigitallySigned) UnmarshalJSON(b []byte) error { var content string if err := json.Unmarshal(b, &content); err != nil { return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err) } return d.FromBase64String(content) } // RawLogEntry represents the (TLS-parsed) contents of an entry in a CT log. type RawLogEntry struct { // Index is a position of the entry in the log. Index int64 // Leaf is a parsed Merkle leaf hash input. Leaf MerkleTreeLeaf // Cert is: // - A certificate if Leaf.TimestampedEntry.EntryType is X509LogEntryType. // - A precertificate if Leaf.TimestampedEntry.EntryType is // PrecertLogEntryType, in the form of a DER-encoded Certificate as // originally added (which includes the poison extension and a signature // generated over the pre-cert by the pre-cert issuer). // - Empty otherwise. Cert ASN1Cert // Chain is the issuing certificate chain starting with the issuer of Cert, // or an empty slice if Cert is empty. Chain []ASN1Cert } // LogEntry represents the (parsed) contents of an entry in a CT log. This is described // in section 3.1, but note that this structure does *not* match the TLS structure // defined there (the TLS structure is never used directly in RFC6962). type LogEntry struct { Index int64 Leaf MerkleTreeLeaf // Exactly one of the following three fields should be non-empty. X509Cert *x509.Certificate // Parsed X.509 certificate Precert *Precertificate // Extracted precertificate JSONData []byte // Chain holds the issuing certificate chain, starting with the // issuer of the leaf certificate / pre-certificate. Chain []ASN1Cert } // PrecertChainEntry holds an precertificate together with a validation chain // for it; see section 3.1. type PrecertChainEntry struct { PreCertificate ASN1Cert `tls:"minlen:1,maxlen:16777215"` CertificateChain []ASN1Cert `tls:"minlen:0,maxlen:16777215"` } // CertificateChain holds a chain of certificates, as returned as extra data // for get-entries (section 4.6). type CertificateChain struct { Entries []ASN1Cert `tls:"minlen:0,maxlen:16777215"` } // JSONDataEntry holds arbitrary data. type JSONDataEntry struct { Data []byte `tls:"minlen:0,maxlen:1677215"` } // SHA256Hash represents the output from the SHA256 hash function. type SHA256Hash [sha256.Size]byte // FromBase64String populates the SHA256 struct with the contents of the base64 data passed in. func (s *SHA256Hash) FromBase64String(b64 string) error { bs, err := base64.StdEncoding.DecodeString(b64) if err != nil { return fmt.Errorf("failed to unbase64 LogID: %v", err) } if len(bs) != sha256.Size { return fmt.Errorf("invalid SHA256 length, expected 32 but got %d", len(bs)) } copy(s[:], bs) return nil } // Base64String returns the base64 representation of this SHA256Hash. func (s SHA256Hash) Base64String() string { return base64.StdEncoding.EncodeToString(s[:]) } // MarshalJSON implements the json.Marshaller interface for SHA256Hash. func (s SHA256Hash) MarshalJSON() ([]byte, error) { return []byte(`"` + s.Base64String() + `"`), nil } // UnmarshalJSON implements the json.Unmarshaller interface. func (s *SHA256Hash) UnmarshalJSON(b []byte) error { var content string if err := json.Unmarshal(b, &content); err != nil { return fmt.Errorf("failed to unmarshal SHA256Hash: %v", err) } return s.FromBase64String(content) } // SignedTreeHead represents the structure returned by the get-sth CT method // after base64 decoding; see sections 3.5 and 4.3. type SignedTreeHead struct { Version Version `json:"sth_version"` // The version of the protocol to which the STH conforms TreeSize uint64 `json:"tree_size"` // The number of entries in the new tree Timestamp uint64 `json:"timestamp"` // The time at which the STH was created SHA256RootHash SHA256Hash `json:"sha256_root_hash"` // The root hash of the log's Merkle tree TreeHeadSignature DigitallySigned `json:"tree_head_signature"` // Log's signature over a TLS-encoded TreeHeadSignature LogID SHA256Hash `json:"log_id"` // The SHA256 hash of the log's public key } // TreeHeadSignature holds the data over which the signature in an STH is // generated; see section 3.5 type TreeHeadSignature struct { Version Version `tls:"maxval:255"` SignatureType SignatureType `tls:"maxval:255"` // == TreeHashSignatureType Timestamp uint64 TreeSize uint64 SHA256RootHash SHA256Hash } // SignedCertificateTimestamp represents the structure returned by the // add-chain and add-pre-chain methods after base64 decoding; see sections // 3.2, 4.1 and 4.2. type SignedCertificateTimestamp struct { SCTVersion Version `tls:"maxval:255"` LogID LogID Timestamp uint64 Extensions CTExtensions `tls:"minlen:0,maxlen:65535"` Signature DigitallySigned // Signature over TLS-encoded CertificateTimestamp } // CertificateTimestamp is the collection of data that the signature in an // SCT is over; see section 3.2. type CertificateTimestamp struct { SCTVersion Version `tls:"maxval:255"` SignatureType SignatureType `tls:"maxval:255"` Timestamp uint64 EntryType LogEntryType `tls:"maxval:65535"` X509Entry *ASN1Cert `tls:"selector:EntryType,val:0"` PrecertEntry *PreCert `tls:"selector:EntryType,val:1"` JSONEntry *JSONDataEntry `tls:"selector:EntryType,val:32768"` Extensions CTExtensions `tls:"minlen:0,maxlen:65535"` } func (s SignedCertificateTimestamp) String() string { return fmt.Sprintf("{Version:%d LogId:%s Timestamp:%d Extensions:'%s' Signature:%v}", s.SCTVersion, base64.StdEncoding.EncodeToString(s.LogID.KeyID[:]), s.Timestamp, s.Extensions, s.Signature) } // TimestampedEntry is part of the MerkleTreeLeaf structure; see section 3.4. type TimestampedEntry struct { Timestamp uint64 EntryType LogEntryType `tls:"maxval:65535"` X509Entry *ASN1Cert `tls:"selector:EntryType,val:0"` PrecertEntry *PreCert `tls:"selector:EntryType,val:1"` JSONEntry *JSONDataEntry `tls:"selector:EntryType,val:32768"` Extensions CTExtensions `tls:"minlen:0,maxlen:65535"` } // MerkleTreeLeaf represents the deserialized structure of the hash input for the // leaves of a log's Merkle tree; see section 3.4. type MerkleTreeLeaf struct { Version Version `tls:"maxval:255"` LeafType MerkleLeafType `tls:"maxval:255"` TimestampedEntry *TimestampedEntry `tls:"selector:LeafType,val:0"` } // Precertificate represents the parsed CT Precertificate structure. type Precertificate struct { // DER-encoded pre-certificate as originally added, which includes a // poison extension and a signature generated over the pre-cert by // the pre-cert issuer (which might differ from the issuer of the final // cert, see RFC6962 s3.1). Submitted ASN1Cert // SHA256 hash of the issuing key IssuerKeyHash [sha256.Size]byte // Parsed TBSCertificate structure, held in an x509.Certificate for convenience. TBSCertificate *x509.Certificate } // X509Certificate returns the X.509 Certificate contained within the // MerkleTreeLeaf. func (m *MerkleTreeLeaf) X509Certificate() (*x509.Certificate, error) { if m.TimestampedEntry.EntryType != X509LogEntryType { return nil, fmt.Errorf("cannot call X509Certificate on a MerkleTreeLeaf that is not an X509 entry") } return x509.ParseCertificate(m.TimestampedEntry.X509Entry.Data) } // Precertificate returns the X.509 Precertificate contained within the MerkleTreeLeaf. // // The returned precertificate is embedded in an x509.Certificate, but is in the // form stored internally in the log rather than the original submitted form // (i.e. it does not include the poison extension and any changes to reflect the // final certificate's issuer have been made; see x509.BuildPrecertTBS). func (m *MerkleTreeLeaf) Precertificate() (*x509.Certificate, error) { if m.TimestampedEntry.EntryType != PrecertLogEntryType { return nil, fmt.Errorf("cannot call Precertificate on a MerkleTreeLeaf that is not a precert entry") } return x509.ParseTBSCertificate(m.TimestampedEntry.PrecertEntry.TBSCertificate) } // APIEndpoint is a string that represents one of the Certificate Transparency // Log API endpoints. type APIEndpoint string // Certificate Transparency Log API endpoints; see section 4. // WARNING: Should match the URI paths without the "/ct/v1/" prefix. If // changing these constants, may need to change those too. const ( AddChainStr APIEndpoint = "add-chain" AddPreChainStr APIEndpoint = "add-pre-chain" GetSTHStr APIEndpoint = "get-sth" GetEntriesStr APIEndpoint = "get-entries" GetProofByHashStr APIEndpoint = "get-proof-by-hash" GetSTHConsistencyStr APIEndpoint = "get-sth-consistency" GetRootsStr APIEndpoint = "get-roots" GetEntryAndProofStr APIEndpoint = "get-entry-and-proof" ) // URI paths for Log requests; see section 4. // WARNING: Should match the API endpoints, with the "/ct/v1/" prefix. If // changing these constants, may need to change those too. const ( AddChainPath = "/ct/v1/add-chain" AddPreChainPath = "/ct/v1/add-pre-chain" GetSTHPath = "/ct/v1/get-sth" GetEntriesPath = "/ct/v1/get-entries" GetProofByHashPath = "/ct/v1/get-proof-by-hash" GetSTHConsistencyPath = "/ct/v1/get-sth-consistency" GetRootsPath = "/ct/v1/get-roots" GetEntryAndProofPath = "/ct/v1/get-entry-and-proof" AddJSONPath = "/ct/v1/add-json" // Experimental addition ) // AddChainRequest represents the JSON request body sent to the add-chain and // add-pre-chain POST methods from sections 4.1 and 4.2. type AddChainRequest struct { Chain [][]byte `json:"chain"` } // AddChainResponse represents the JSON response to the add-chain and // add-pre-chain POST methods. // An SCT represents a Log's promise to integrate a [pre-]certificate into the // log within a defined period of time. type AddChainResponse struct { SCTVersion Version `json:"sct_version"` // SCT structure version ID []byte `json:"id"` // Log ID Timestamp uint64 `json:"timestamp"` // Timestamp of issuance Extensions string `json:"extensions"` // Holder for any CT extensions Signature []byte `json:"signature"` // Log signature for this SCT } // AddJSONRequest represents the JSON request body sent to the add-json POST method. // The corresponding response re-uses AddChainResponse. // This is an experimental addition not covered by RFC6962. type AddJSONRequest struct { Data interface{} `json:"data"` } // GetSTHResponse respresents the JSON response to the get-sth GET method from section 4.3. type GetSTHResponse struct { TreeSize uint64 `json:"tree_size"` // Number of certs in the current tree Timestamp uint64 `json:"timestamp"` // Time that the tree was created SHA256RootHash []byte `json:"sha256_root_hash"` // Root hash of the tree TreeHeadSignature []byte `json:"tree_head_signature"` // Log signature for this STH } // ToSignedTreeHead creates a SignedTreeHead from the GetSTHResponse. func (r *GetSTHResponse) ToSignedTreeHead() (*SignedTreeHead, error) { sth := SignedTreeHead{ TreeSize: r.TreeSize, Timestamp: r.Timestamp, } if len(r.SHA256RootHash) != sha256.Size { return nil, fmt.Errorf("sha256_root_hash is invalid length, expected %d got %d", sha256.Size, len(r.SHA256RootHash)) } copy(sth.SHA256RootHash[:], r.SHA256RootHash) var ds DigitallySigned if rest, err := tls.Unmarshal(r.TreeHeadSignature, &ds); err != nil { return nil, fmt.Errorf("tls.Unmarshal(): %s", err) } else if len(rest) > 0 { return nil, fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)) } sth.TreeHeadSignature = ds return &sth, nil } // GetSTHConsistencyResponse represents the JSON response to the get-sth-consistency // GET method from section 4.4. (The corresponding GET request has parameters 'first' and // 'second'.) type GetSTHConsistencyResponse struct { Consistency [][]byte `json:"consistency"` } // GetProofByHashResponse represents the JSON response to the get-proof-by-hash GET // method from section 4.5. (The corresponding GET request has parameters 'hash' // and 'tree_size'.) type GetProofByHashResponse struct { LeafIndex int64 `json:"leaf_index"` // The 0-based index of the end entity corresponding to the "hash" parameter. AuditPath [][]byte `json:"audit_path"` // An array of base64-encoded Merkle Tree nodes proving the inclusion of the chosen certificate. } // LeafEntry represents a leaf in the Log's Merkle tree, as returned by the get-entries // GET method from section 4.6. type LeafEntry struct { // LeafInput is a TLS-encoded MerkleTreeLeaf LeafInput []byte `json:"leaf_input"` // ExtraData holds (unsigned) extra data, normally the cert validation chain. ExtraData []byte `json:"extra_data"` } // GetEntriesResponse respresents the JSON response to the get-entries GET method // from section 4.6. type GetEntriesResponse struct { Entries []LeafEntry `json:"entries"` // the list of returned entries } // GetRootsResponse represents the JSON response to the get-roots GET method from section 4.7. type GetRootsResponse struct { Certificates []string `json:"certificates"` } // GetEntryAndProofResponse represents the JSON response to the get-entry-and-proof // GET method from section 4.8. (The corresponding GET request has parameters 'leaf_index' // and 'tree_size'.) type GetEntryAndProofResponse struct { LeafInput []byte `json:"leaf_input"` // the entry itself ExtraData []byte `json:"extra_data"` // any chain provided when the entry was added to the log AuditPath [][]byte `json:"audit_path"` // the corresponding proof }
vendor/github.com/google/certificate-transparency-go/types.go
0
https://github.com/kubernetes/kubernetes/commit/845c223d130287eb40897d8bfbe8076008823c64
[ 0.006614706479012966, 0.000500124238897115, 0.00016159463848453015, 0.00017638015560805798, 0.0010671995114535093 ]
{ "id": 1, "code_window": [ " configMap:\n", " name: kube-dns\n", " optional: true\n", " containers:\n", " - name: kubedns\n", " image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ " nodeSelector:\n", " kubernetes.io/os: linux\n" ], "file_path": "cluster/addons/dns/kube-dns/kube-dns.yaml.in", "type": "add", "edit_start_line_idx": 100 }
// Copyright 2016 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build !linux package netutil import ( "fmt" "runtime" ) // GetDefaultHost fetches the a resolvable name that corresponds // to the machine's default routable interface func GetDefaultHost() (string, error) { return "", fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH) } // GetDefaultInterfaces fetches the device name of default routable interface. func GetDefaultInterfaces() (map[string]uint8, error) { return nil, fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH) }
vendor/github.com/coreos/etcd/pkg/netutil/routes.go
0
https://github.com/kubernetes/kubernetes/commit/845c223d130287eb40897d8bfbe8076008823c64
[ 0.0006502845790237188, 0.00029776449082419276, 0.00017442170064896345, 0.00018317582726012915, 0.00020363992371130735 ]
{ "id": 2, "code_window": [ " - name: kube-dns-config\n", " configMap:\n", " name: kube-dns\n", " optional: true\n", " containers:\n", " - name: kubedns\n", " image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13\n", " resources:\n", " # TODO: Set memory limits when we've profiled the container for large\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " nodeSelector:\n", " kubernetes.io/os: linux\n" ], "file_path": "cluster/addons/dns/kube-dns/kube-dns.yaml.sed", "type": "add", "edit_start_line_idx": 100 }
# Copyright 2016 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml # in sync with this file. # Warning: This is a file generated from the base underscore template file: kube-dns.yaml.base apiVersion: v1 kind: Service metadata: name: kube-dns namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "KubeDNS" spec: selector: k8s-app: kube-dns clusterIP: {{ pillar['dns_server'] }} ports: - name: dns port: 53 protocol: UDP - name: dns-tcp port: 53 protocol: TCP --- apiVersion: v1 kind: ServiceAccount metadata: name: kube-dns namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile --- apiVersion: v1 kind: ConfigMap metadata: name: kube-dns namespace: kube-system labels: addonmanager.kubernetes.io/mode: EnsureExists --- apiVersion: apps/v1 kind: Deployment metadata: name: kube-dns namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: # replicas: not specified here: # 1. In order to make Addon Manager do not reconcile this replicas parameter. # 2. Default is 1. # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on. strategy: rollingUpdate: maxSurge: 10% maxUnavailable: 0 selector: matchLabels: k8s-app: kube-dns template: metadata: labels: k8s-app: kube-dns annotations: seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' prometheus.io/port: "10054" prometheus.io/scrape: "true" spec: priorityClassName: system-cluster-critical securityContext: supplementalGroups: [ 65534 ] fsGroup: 65534 tolerations: - key: "CriticalAddonsOnly" operator: "Exists" volumes: - name: kube-dns-config configMap: name: kube-dns optional: true containers: - name: kubedns image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in # guaranteed class. Currently, this container falls into the # "burstable" category so the kubelet doesn't backoff from restarting it. limits: memory: {{ pillar['dns_memory_limit'] }} requests: cpu: 100m memory: 70Mi livenessProbe: httpGet: path: /healthcheck/kubedns port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: httpGet: path: /readiness port: 8081 scheme: HTTP # we poll on pod startup for the Kubernetes master service and # only setup the /readiness HTTP server once that's available. initialDelaySeconds: 3 timeoutSeconds: 5 args: - --domain={{ pillar['dns_domain'] }}. - --dns-port=10053 - --config-dir=/kube-dns-config - --v=2 env: - name: PROMETHEUS_PORT value: "10055" ports: - containerPort: 10053 name: dns-local protocol: UDP - containerPort: 10053 name: dns-tcp-local protocol: TCP - containerPort: 10055 name: metrics protocol: TCP volumeMounts: - name: kube-dns-config mountPath: /kube-dns-config securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true runAsUser: 1001 runAsGroup: 1001 - name: dnsmasq image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13 livenessProbe: httpGet: path: /healthcheck/dnsmasq port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 args: - -v=2 - -logtostderr - -configDir=/etc/k8s/dns/dnsmasq-nanny - -restartDnsmasq=true - -- - -k - --cache-size=1000 - --no-negcache - --dns-loop-detect - --log-facility=- - --server=/{{ pillar['dns_domain'] }}/127.0.0.1#10053 - --server=/in-addr.arpa/127.0.0.1#10053 - --server=/ip6.arpa/127.0.0.1#10053 ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP # see: https://github.com/kubernetes/kubernetes/issues/29055 for details resources: requests: cpu: 150m memory: 20Mi volumeMounts: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny securityContext: capabilities: drop: - all add: - NET_BIND_SERVICE - SETGID - name: sidecar image: k8s.gcr.io/k8s-dns-sidecar:1.14.13 livenessProbe: httpGet: path: /metrics port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 args: - --v=2 - --logtostderr - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ pillar['dns_domain'] }},5,SRV - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ pillar['dns_domain'] }},5,SRV ports: - containerPort: 10054 name: metrics protocol: TCP resources: requests: memory: 20Mi cpu: 10m securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true runAsUser: 1001 runAsGroup: 1001 dnsPolicy: Default # Don't use cluster DNS. serviceAccountName: kube-dns
cluster/addons/dns/kube-dns/kube-dns.yaml.in
1
https://github.com/kubernetes/kubernetes/commit/845c223d130287eb40897d8bfbe8076008823c64
[ 0.10789649188518524, 0.005461502820253372, 0.00016399800369981676, 0.00024084470351226628, 0.021432960405945778 ]
{ "id": 2, "code_window": [ " - name: kube-dns-config\n", " configMap:\n", " name: kube-dns\n", " optional: true\n", " containers:\n", " - name: kubedns\n", " image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13\n", " resources:\n", " # TODO: Set memory limits when we've profiled the container for large\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " nodeSelector:\n", " kubernetes.io/os: linux\n" ], "file_path": "cluster/addons/dns/kube-dns/kube-dns.yaml.sed", "type": "add", "edit_start_line_idx": 100 }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package configmap import v1 "k8s.io/api/core/v1" // fakeManager implements Manager interface for testing purposes. // simple operations to apiserver. type fakeManager struct { } // NewFakeManager creates empty/fake ConfigMap manager func NewFakeManager() Manager { return &fakeManager{} } func (s *fakeManager) GetConfigMap(namespace, name string) (*v1.ConfigMap, error) { return nil, nil } func (s *fakeManager) RegisterPod(pod *v1.Pod) { } func (s *fakeManager) UnregisterPod(pod *v1.Pod) { }
pkg/kubelet/configmap/fake_manager.go
0
https://github.com/kubernetes/kubernetes/commit/845c223d130287eb40897d8bfbe8076008823c64
[ 0.0008168758940882981, 0.0003632557054515928, 0.00017424783436581492, 0.00023094951757229865, 0.00026569110923446715 ]
{ "id": 2, "code_window": [ " - name: kube-dns-config\n", " configMap:\n", " name: kube-dns\n", " optional: true\n", " containers:\n", " - name: kubedns\n", " image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13\n", " resources:\n", " # TODO: Set memory limits when we've profiled the container for large\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " nodeSelector:\n", " kubernetes.io/os: linux\n" ], "file_path": "cluster/addons/dns/kube-dns/kube-dns.yaml.sed", "type": "add", "edit_start_line_idx": 100 }
# certdb usage Using a database enables additional functionality for existing commands when a db config is provided: - `sign` and `gencert` add a certificate to the certdb after signing it - `serve` enables database functionality for the sign and revoke endpoints A database is required for the following: - `revoke` marks certificates revoked in the database with an optional reason - `ocsprefresh` refreshes the table of cached OCSP responses - `ocspdump` outputs cached OCSP responses in a concatenated base64-encoded format ## Setup/Migration This directory stores [goose](https://bitbucket.org/liamstask/goose/) db migration scripts for various DB backends. Currently supported: - MySQL in mysql - PostgreSQL in pg - SQLite in sqlite ### Get goose go get bitbucket.org/liamstask/goose/cmd/goose ### Use goose to start and terminate a MySQL DB To start a MySQL using goose: goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/mysql up To tear down a MySQL DB using goose goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/mysql down Note: the administration of MySQL DB is not included. We assume the databases being connected to are already created and access control is properly handled. ### Use goose to start and terminate a PostgreSQL DB To start a PostgreSQL using goose: goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg up To tear down a PostgreSQL DB using goose goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg down Note: the administration of PostgreSQL DB is not included. We assume the databases being connected to are already created and access control is properly handled. ### Use goose to start and terminate a SQLite DB To start a SQLite DB using goose: goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite up To tear down a SQLite DB using goose goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite down ## CFSSL Configuration Several cfssl commands take a -db-config flag. Create a file with a JSON dictionary: {"driver":"sqlite3","data_source":"certs.db"} or {"driver":"postgres","data_source":"postgres://user:password@host/db"} or {"driver":"mysql","data_source":"user:password@tcp(hostname:3306)/db?parseTime=true"}
vendor/github.com/cloudflare/cfssl/certdb/README.md
0
https://github.com/kubernetes/kubernetes/commit/845c223d130287eb40897d8bfbe8076008823c64
[ 0.00017039605882018805, 0.0001634002837818116, 0.00016077391046565026, 0.00016256669186986983, 0.000002881127102227765 ]
{ "id": 2, "code_window": [ " - name: kube-dns-config\n", " configMap:\n", " name: kube-dns\n", " optional: true\n", " containers:\n", " - name: kubedns\n", " image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13\n", " resources:\n", " # TODO: Set memory limits when we've profiled the container for large\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " nodeSelector:\n", " kubernetes.io/os: linux\n" ], "file_path": "cluster/addons/dns/kube-dns/kube-dns.yaml.sed", "type": "add", "edit_start_line_idx": 100 }
package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", ) go_library( name = "go_default_library", srcs = [ "customresourcedefinition.go", "interface.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion", importpath = "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion", deps = [ "//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset:go_default_library", "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces:go_default_library", "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", ], ) filegroup( name = "package-srcs", srcs = glob(["**"]), tags = ["automanaged"], visibility = ["//visibility:private"], ) filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], )
staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/BUILD
0
https://github.com/kubernetes/kubernetes/commit/845c223d130287eb40897d8bfbe8076008823c64
[ 0.00017348455730825663, 0.00016774964751675725, 0.0001629996404517442, 0.00016725721070542932, 0.000003969251338276081 ]
{ "id": 0, "code_window": [ "func (s *ParallelUnorderedSynchronizer) Init(ctx context.Context) {\n", "\tif !s.InitHelper.Init(ctx) {\n", "\t\treturn\n", "\t}\n", "\tfor _, input := range s.inputs {\n", "\t\tinput.Root.Init(s.Ctx)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "\tfor i, input := range s.inputs {\n" ], "file_path": "pkg/sql/colexec/parallel_unordered_synchronizer.go", "type": "replace", "edit_start_line_idx": 154 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colrpc import ( "bytes" "context" "fmt" "io" "sync/atomic" "time" "github.com/cockroachdb/cockroach/pkg/col/colserde" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" "github.com/cockroachdb/logtags" ) // flowStreamClient is a utility interface used to mock out the RPC layer. type flowStreamClient interface { Send(*execinfrapb.ProducerMessage) error Recv() (*execinfrapb.ConsumerSignal, error) CloseSend() error } // Outbox is used to push data from local flows to a remote endpoint. Run may // be called with the necessary information to establish a connection to a // given remote endpoint. type Outbox struct { colexecop.OneInputNode typs []*types.T converter *colserde.ArrowBatchConverter serializer *colserde.RecordBatchSerializer // draining is an atomic that represents whether the Outbox is draining. draining uint32 metadataSources colexecop.MetadataSources // closers is a slice of Closers that need to be Closed on termination. closers colexecop.Closers scratch struct { buf *bytes.Buffer msg *execinfrapb.ProducerMessage } span *tracing.Span // getStats, when non-nil, returns all of the execution statistics of the // operators that are in the same tree as this Outbox. The stats will be // added into the span as Structured payload and returned to the gateway as // execinfrapb.ProducerMetadata. getStats func() []*execinfrapb.ComponentStats // A copy of Run's caller ctx, with no StreamID tag. // Used to pass a clean context to the input.Next. runnerCtx context.Context } // NewOutbox creates a new Outbox. // - getStats, when non-nil, returns all of the execution statistics of the // operators that are in the same tree as this Outbox. func NewOutbox( allocator *colmem.Allocator, input colexecop.Operator, typs []*types.T, getStats func() []*execinfrapb.ComponentStats, metadataSources []colexecop.MetadataSource, toClose []colexecop.Closer, ) (*Outbox, error) { c, err := colserde.NewArrowBatchConverter(typs) if err != nil { return nil, err } s, err := colserde.NewRecordBatchSerializer(typs) if err != nil { return nil, err } o := &Outbox{ // Add a deselector as selection vectors are not serialized (nor should they // be). OneInputNode: colexecop.NewOneInputNode(colexecutils.NewDeselectorOp(allocator, input, typs)), typs: typs, converter: c, serializer: s, getStats: getStats, metadataSources: metadataSources, closers: toClose, } o.scratch.buf = &bytes.Buffer{} o.scratch.msg = &execinfrapb.ProducerMessage{} return o, nil } func (o *Outbox) close(ctx context.Context) { o.closers.CloseAndLogOnErr(ctx, "outbox") } // Run starts an outbox by connecting to the provided node and pushing // coldata.Batches over the stream after sending a header with the provided flow // and stream ID. Note that an extra goroutine is spawned so that Recv may be // called concurrently wrt the Send goroutine to listen for drain signals. // If an io.EOF is received while sending, the outbox will cancel all components // from the same tree as the outbox. // If non-io.EOF is received while sending, the outbox will call flowCtxCancel // to shutdown all parts of the flow on this node. // If an error is encountered that cannot be sent over the stream, the error // will be logged but not returned. // There are several ways the bidirectional FlowStream RPC may terminate. // 1) Execution is finished. In this case, the upstream operator signals // termination by returning a zero-length batch. The Outbox will drain its // metadata sources, send the metadata, and then call CloseSend on the // stream. The Outbox will wait until its Recv goroutine receives a non-nil // error to not leak resources. // 2) A cancellation happened. This can come from the provided context or the // remote reader. Refer to tests for expected behavior. // 3) A drain signal was received from the server (consumer). In this case, the // Outbox goes through the same steps as 1). func (o *Outbox) Run( ctx context.Context, dialer execinfra.Dialer, nodeID roachpb.NodeID, flowID execinfrapb.FlowID, streamID execinfrapb.StreamID, flowCtxCancel context.CancelFunc, connectionTimeout time.Duration, ) { // Derive a child context so that we can cancel all components rooted in // this outbox. var outboxCtxCancel context.CancelFunc ctx, outboxCtxCancel = context.WithCancel(ctx) // Calling outboxCtxCancel is not strictly necessary, but we do it just to // be safe. defer outboxCtxCancel() ctx, o.span = execinfra.ProcessorSpan(ctx, "outbox") if o.span != nil { defer o.span.Finish() } o.runnerCtx = ctx ctx = logtags.AddTag(ctx, "streamID", streamID) log.VEventf(ctx, 2, "Outbox Dialing %s", nodeID) var stream execinfrapb.DistSQL_FlowStreamClient if err := func() error { conn, err := execinfra.GetConnForOutbox(ctx, dialer, nodeID, connectionTimeout) if err != nil { log.Warningf( ctx, "Outbox Dial connection error, distributed query will fail: %+v", err, ) return err } client := execinfrapb.NewDistSQLClient(conn) stream, err = client.FlowStream(ctx) if err != nil { log.Warningf( ctx, "Outbox FlowStream connection error, distributed query will fail: %+v", err, ) return err } log.VEvent(ctx, 2, "Outbox sending header") // Send header message to establish the remote server (consumer). if err := stream.Send( &execinfrapb.ProducerMessage{Header: &execinfrapb.ProducerHeader{FlowID: flowID, StreamID: streamID}}, ); err != nil { log.Warningf( ctx, "Outbox Send header error, distributed query will fail: %+v", err, ) return err } return nil }(); err != nil { // error during stream set up. o.close(ctx) return } log.VEvent(ctx, 2, "Outbox starting normal operation") o.runWithStream(ctx, stream, flowCtxCancel, outboxCtxCancel) log.VEvent(ctx, 2, "Outbox exiting") } // handleStreamErr is a utility method used to handle an error when calling // a method on a flowStreamClient. If err is an io.EOF, outboxCtxCancel is // called, for all other error flowCtxCancel is. The given error is logged with // the associated opName. func (o *Outbox) handleStreamErr( ctx context.Context, opName string, err error, flowCtxCancel, outboxCtxCancel context.CancelFunc, ) { if err == io.EOF { if log.V(1) { log.Infof(ctx, "Outbox calling outboxCtxCancel after %s EOF", opName) } outboxCtxCancel() } else { log.Warningf(ctx, "Outbox calling flowCtxCancel after %s connection error: %+v", opName, err) flowCtxCancel() } } func (o *Outbox) moveToDraining(ctx context.Context, reason string) { if atomic.CompareAndSwapUint32(&o.draining, 0, 1) { log.VEventf(ctx, 2, "Outbox moved to draining (%s)", reason) } } // sendBatches reads from the Outbox's input in a loop and sends the // coldata.Batches over the stream. A boolean is returned, indicating whether // execution completed gracefully (either received a zero-length batch or a // drain signal) as well as an error which is non-nil if an error was // encountered AND the error should be sent over the stream as metadata. The for // loop continues iterating until one of the following conditions becomes true: // 1) A zero-length batch is received from the input. This indicates graceful // termination. true, nil is returned. // 2) Outbox.draining is observed to be true. This is also considered graceful // termination. true, nil is returned. // 3) An error unrelated to the stream occurs (e.g. while deserializing a // coldata.Batch). false, err is returned. This err should be sent over the // stream as metadata. // 4) An error related to the stream occurs. In this case, the error is logged // but not returned, as there is no way to propagate this error anywhere // meaningful. false, nil is returned. // NOTE: if non-io.EOF error is encountered (indicating ungraceful shutdown // of the stream), flowCtxCancel will be called. If an io.EOF is encountered // (indicating a graceful shutdown initiated by the remote Inbox), // outboxCtxCancel will be called. func (o *Outbox) sendBatches( ctx context.Context, stream flowStreamClient, flowCtxCancel, outboxCtxCancel context.CancelFunc, ) (terminatedGracefully bool, errToSend error) { if o.runnerCtx == nil { // In the non-testing path, runnerCtx has been set in Run() method; // however, the tests might use runWithStream() directly in which case // runnerCtx will remain unset, so we have this check. o.runnerCtx = ctx } errToSend = colexecerror.CatchVectorizedRuntimeError(func() { o.Input.Init(o.runnerCtx) for { if atomic.LoadUint32(&o.draining) == 1 { terminatedGracefully = true return } batch := o.Input.Next() n := batch.Length() if n == 0 { terminatedGracefully = true return } o.scratch.buf.Reset() d, err := o.converter.BatchToArrow(batch) if err != nil { colexecerror.InternalError(errors.Wrap(err, "Outbox BatchToArrow data serialization error")) } if _, _, err := o.serializer.Serialize(o.scratch.buf, d, n); err != nil { colexecerror.InternalError(errors.Wrap(err, "Outbox Serialize data error")) } o.scratch.msg.Data.RawBytes = o.scratch.buf.Bytes() // o.scratch.msg can be reused as soon as Send returns since it returns as // soon as the message is written to the control buffer. The message is // marshaled (bytes are copied) before writing. if err := stream.Send(o.scratch.msg); err != nil { o.handleStreamErr(ctx, "Send (batches)", err, flowCtxCancel, outboxCtxCancel) return } } }) return terminatedGracefully, errToSend } // sendMetadata drains the Outbox.metadataSources and sends the metadata over // the given stream, returning the Send error, if any. sendMetadata also sends // errToSend as metadata if non-nil. func (o *Outbox) sendMetadata(ctx context.Context, stream flowStreamClient, errToSend error) error { msg := &execinfrapb.ProducerMessage{} if errToSend != nil { log.VEventf(ctx, 1, "Outbox sending an error as metadata: %v", errToSend) msg.Data.Metadata = append( msg.Data.Metadata, execinfrapb.LocalMetaToRemoteProducerMeta(ctx, execinfrapb.ProducerMetadata{Err: errToSend}), ) } if o.span != nil && o.getStats != nil { for _, s := range o.getStats() { o.span.RecordStructured(s) } } if trace := execinfra.GetTraceData(ctx); trace != nil { msg.Data.Metadata = append(msg.Data.Metadata, execinfrapb.RemoteProducerMetadata{ Value: &execinfrapb.RemoteProducerMetadata_TraceData_{ TraceData: &execinfrapb.RemoteProducerMetadata_TraceData{ CollectedSpans: trace, }, }, }) } for _, meta := range o.metadataSources.DrainMeta() { msg.Data.Metadata = append(msg.Data.Metadata, execinfrapb.LocalMetaToRemoteProducerMeta(ctx, meta)) } if len(msg.Data.Metadata) == 0 { return nil } return stream.Send(msg) } // runWithStream should be called after sending the ProducerHeader on the // stream. It implements the behavior described in Run. func (o *Outbox) runWithStream( ctx context.Context, stream flowStreamClient, flowCtxCancel, outboxCtxCancel context.CancelFunc, ) { if flowCtxCancel == nil { // The flowCtxCancel might be nil in some tests, but we'll make it a // noop for convenience. flowCtxCancel = func() {} } waitCh := make(chan struct{}) go func() { // This goroutine's job is to listen continually on the stream from the // consumer for errors or drain requests, while the remainder of this // function concurrently is producing data and sending it over the // network. This goroutine will tear down the flow if non-io.EOF error // is received - without it, a producer goroutine might spin doing work // forever after a connection is closed, since it wouldn't notice a // closed connection until it tried to Send over that connection. for { msg, err := stream.Recv() if err != nil { if err != io.EOF { log.Warningf(ctx, "Outbox calling flowCtxCancel after Recv connection error: %+v", err) flowCtxCancel() } break } switch { case msg.Handshake != nil: log.VEventf(ctx, 2, "Outbox received handshake: %v", msg.Handshake) case msg.DrainRequest != nil: o.moveToDraining(ctx, "consumer requested draining" /* reason */) } } close(waitCh) }() terminatedGracefully, errToSend := o.sendBatches(ctx, stream, flowCtxCancel, outboxCtxCancel) if terminatedGracefully || errToSend != nil { reason := "terminated gracefully" if errToSend != nil { reason = fmt.Sprintf("encountered error when sending batches: %v", errToSend) } o.moveToDraining(ctx, reason) if err := o.sendMetadata(ctx, stream, errToSend); err != nil { o.handleStreamErr(ctx, "Send (metadata)", err, flowCtxCancel, outboxCtxCancel) } else { // Close the stream. Note that if this block isn't reached, the stream // is unusable. // The receiver goroutine will read from the stream until any error // is returned (most likely an io.EOF). if err := stream.CloseSend(); err != nil { o.handleStreamErr(ctx, "CloseSend", err, flowCtxCancel, outboxCtxCancel) } } } o.close(ctx) <-waitCh }
pkg/sql/colflow/colrpc/outbox.go
1
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.9895054697990417, 0.047415729612112045, 0.00016333024541381747, 0.00016939712804742157, 0.2053627073764801 ]
{ "id": 0, "code_window": [ "func (s *ParallelUnorderedSynchronizer) Init(ctx context.Context) {\n", "\tif !s.InitHelper.Init(ctx) {\n", "\t\treturn\n", "\t}\n", "\tfor _, input := range s.inputs {\n", "\t\tinput.Root.Init(s.Ctx)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "\tfor i, input := range s.inputs {\n" ], "file_path": "pkg/sql/colexec/parallel_unordered_synchronizer.go", "type": "replace", "edit_start_line_idx": 154 }
# LogicTest: multiregion-9node-3region-3azs statement error pgcode XXC01 creating multi-region databases requires a CCL binary CREATE DATABASE region_test_db PRIMARY REGION "ap-southeast-2" SURVIVE ZONE FAILURE statement error pgcode XXC01 creating multi-region databases requires a CCL binary ALTER DATABASE test PRIMARY REGION "ap-southeast-2"
pkg/sql/logictest/testdata/logic_test/multi_region
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.00016398532898165286, 0.00016398532898165286, 0.00016398532898165286, 0.00016398532898165286, 0 ]
{ "id": 0, "code_window": [ "func (s *ParallelUnorderedSynchronizer) Init(ctx context.Context) {\n", "\tif !s.InitHelper.Init(ctx) {\n", "\t\treturn\n", "\t}\n", "\tfor _, input := range s.inputs {\n", "\t\tinput.Root.Init(s.Ctx)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "\tfor i, input := range s.inputs {\n" ], "file_path": "pkg/sql/colexec/parallel_unordered_synchronizer.go", "type": "replace", "edit_start_line_idx": 154 }
// Copyright 2015 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package tests_test import ( "bytes" "context" "testing" "time" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" ) // getRangeKeys returns the end keys of all ranges. func getRangeKeys(db *kv.DB) ([]roachpb.Key, error) { rows, err := db.Scan(context.Background(), keys.Meta2Prefix, keys.MetaMax, 0) if err != nil { return nil, err } ret := make([]roachpb.Key, len(rows)) for i := 0; i < len(rows); i++ { ret[i] = bytes.TrimPrefix(rows[i].Key, keys.Meta2Prefix) } return ret, nil } func getNumRanges(db *kv.DB) (int, error) { rows, err := getRangeKeys(db) if err != nil { return 0, err } return len(rows), nil } func rangesMatchSplits(ranges []roachpb.Key, splits []roachpb.RKey) bool { if len(ranges) != len(splits) { return false } for i := 0; i < len(ranges); i++ { if !splits[i].Equal(ranges[i]) { return false } } return true } // TestSplitOnTableBoundaries verifies that ranges get split // as new tables get created. func TestSplitOnTableBoundaries(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) params, _ := tests.CreateTestServerParams() // We want fast scan. params.ScanInterval = time.Millisecond params.ScanMinIdleTime = time.Millisecond params.ScanMaxIdleTime = time.Millisecond s, sqlDB, kvDB := serverutils.StartServer(t, params) defer s.Stopper().Stop(context.Background()) expectedInitialRanges, err := server.ExpectedInitialRangeCount(kvDB, &s.(*server.TestServer).Cfg.DefaultZoneConfig, &s.(*server.TestServer).Cfg.DefaultSystemZoneConfig) if err != nil { t.Fatal(err) } if _, err := sqlDB.Exec(`CREATE DATABASE test`); err != nil { t.Fatal(err) } // We split up to the largest allocated descriptor ID, if it's a table. // Ensure that no split happens if a database is created. testutils.SucceedsSoon(t, func() error { num, err := getNumRanges(kvDB) if err != nil { return err } if e := expectedInitialRanges; num != e { return errors.Errorf("expected %d splits, found %d", e, num) } return nil }) // Verify the actual splits. objectID := uint32(keys.MinUserDescID) splits := []roachpb.RKey{roachpb.RKeyMax} ranges, err := getRangeKeys(kvDB) if err != nil { t.Fatal(err) } if a, e := ranges[expectedInitialRanges-1:], splits; !rangesMatchSplits(a, e) { t.Fatalf("Found ranges: %v\nexpected: %v", a, e) } // Let's create a table. if _, err := sqlDB.Exec(`CREATE TABLE test.test (k INT PRIMARY KEY, v INT)`); err != nil { t.Fatal(err) } testutils.SucceedsSoon(t, func() error { num, err := getNumRanges(kvDB) if err != nil { return err } if e := expectedInitialRanges + 1; num != e { return errors.Errorf("expected %d splits, found %d", e, num) } return nil }) // Verify the actual splits. splits = []roachpb.RKey{roachpb.RKey(keys.SystemSQLCodec.TablePrefix(objectID + 3)), roachpb.RKeyMax} ranges, err = getRangeKeys(kvDB) if err != nil { t.Fatal(err) } if a, e := ranges[expectedInitialRanges-1:], splits; !rangesMatchSplits(a, e) { t.Fatalf("Found ranges: %v\nexpected: %v", a, e) } }
pkg/sql/tests/split_test.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.023584330454468727, 0.0021598287858068943, 0.00016739220882300287, 0.0001719412102829665, 0.005983728915452957 ]
{ "id": 0, "code_window": [ "func (s *ParallelUnorderedSynchronizer) Init(ctx context.Context) {\n", "\tif !s.InitHelper.Init(ctx) {\n", "\t\treturn\n", "\t}\n", "\tfor _, input := range s.inputs {\n", "\t\tinput.Root.Init(s.Ctx)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "\tfor i, input := range s.inputs {\n" ], "file_path": "pkg/sql/colexec/parallel_unordered_synchronizer.go", "type": "replace", "edit_start_line_idx": 154 }
* 5105ca93f3c4cde8865f840f810f4d2b0c4e7e62 Merge pull request #200 from foo/bar |\ | * 0f4c2b0500c7fcc27e95b488bed6bb748f389cbb merge pr canary |/ * 38b0488f9b16ea35deeb82d45efde8633a4f40bf Merge #2 |\ | * 97eb9fa9a4b1fbc48444bd8000b9ac8b7cfb9b87 feature B - commit 2 | * d456d947b78edb7dbc096a9693f73bde14c41d5d feature B - commit 1 |/ * 4279dfba7939245629a2d5f5faf54663efd2e005 Merge pull request #100 from foo/bar |\ | * d168a8892abcbdfa8104f20e25b83ddcd7d19d7b merge pr canary |/ * 2ae4772fb68d385ee81f3ca46596c2637f24bd65 Merge #1 |\ | * 9b5c8396188688e9a43cc00fcf2b9736f0398171 feature A |/ * dacbef8585d93a7059dccbd5a52de9fb14edf54b initial
scripts/release-notes/test6.graph.ref.txt
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.0001648909819778055, 0.00016470573609694839, 0.0001645205047680065, 0.00016470573609694839, 1.8523860489949584e-7 ]
{ "id": 1, "code_window": [ "\t\tinput.Root.Init(s.Ctx)\n", "\t}\n", "}\n", "\n" ], "labels": [ "add", "keep", "keep", "keep" ], "after_edit": [ "\t\ts.nextBatch[i] = func(inputOp colexecop.Operator, inputIdx int) func() {\n", "\t\t\treturn func() {\n", "\t\t\t\ts.batches[inputIdx] = inputOp.Next()\n", "\t\t\t}\n", "\t\t}(input.Root, i)\n" ], "file_path": "pkg/sql/colexec/parallel_unordered_synchronizer.go", "type": "add", "edit_start_line_idx": 156 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colexec import ( "context" "fmt" "sync" "sync/atomic" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecargs" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" ) // unorderedSynchronizerMsg is a light wrapper over a coldata.Batch or metadata // sent over a channel so that the main goroutine can know which input this // message originated from. // Note that either a batch or metadata must be sent, but not both. type unorderedSynchronizerMsg struct { inputIdx int b coldata.Batch meta []execinfrapb.ProducerMetadata } var _ colexecop.Operator = &ParallelUnorderedSynchronizer{} var _ execinfra.OpNode = &ParallelUnorderedSynchronizer{} type parallelUnorderedSynchronizerState int const ( // parallelUnorderedSynchronizerStateUninitialized is the state the // ParallelUnorderedSynchronizer is in when not yet initialized. parallelUnorderedSynchronizerStateUninitialized = iota // parallelUnorderedSynchronizerStateRunning is the state the // ParallelUnorderedSynchronizer is in when all input goroutines have been // spawned and are returning batches. parallelUnorderedSynchronizerStateRunning // parallelUnorderedSynchronizerStateDraining is the state the // ParallelUnorderedSynchronizer is in when a drain has been requested through // DrainMeta. All input goroutines will call DrainMeta on its input and exit. parallelUnorderedSynchronizerStateDraining // parallelUnorderedSyncrhonizerStateDone is the state the // ParallelUnorderedSynchronizer is in when draining has completed. parallelUnorderedSynchronizerStateDone ) // ParallelUnorderedSynchronizer is an Operator that combines multiple Operator streams // into one. type ParallelUnorderedSynchronizer struct { colexecop.InitHelper inputs []colexecargs.OpWithMetaInfo // readNextBatch is a slice of channels, where each channel corresponds to the // input at the same index in inputs. It is used as a barrier for input // goroutines to wait on until the Next goroutine signals that it is safe to // retrieve the next batch. This is done so that inputs that are running // asynchronously do not overwrite batches returned previously, given that // batches must be safe for reuse until the next call to Next. readNextBatch []chan struct{} // numFinishedInputs is incremented atomically whenever one of the provided // inputs exits from a goroutine (gracefully or otherwise). numFinishedInputs uint32 // lastReadInputIdx is the index of the input whose batch we last returned. // Used so that on the next call to Next, we can resume the input. lastReadInputIdx int // batches are the last batches read from the corresponding input. batches []coldata.Batch // nextBatch is a slice of functions each of which obtains a next batch from // the corresponding to it input. nextBatch []func() state int32 // externalWaitGroup refers to the WaitGroup passed in externally. Since the // ParallelUnorderedSynchronizer spawns goroutines, this allows callers to // wait for the completion of these goroutines. externalWaitGroup *sync.WaitGroup // internalWaitGroup refers to the WaitGroup internally managed by the // ParallelUnorderedSynchronizer. This will only ever be incremented by the // ParallelUnorderedSynchronizer and decremented by the input goroutines. This // allows the ParallelUnorderedSynchronizer to wait only on internal // goroutines. internalWaitGroup *sync.WaitGroup batchCh chan *unorderedSynchronizerMsg errCh chan error // bufferedMeta is the metadata buffered during a // ParallelUnorderedSynchronizer run. bufferedMeta []execinfrapb.ProducerMetadata } // ChildCount implements the execinfra.OpNode interface. func (s *ParallelUnorderedSynchronizer) ChildCount(verbose bool) int { return len(s.inputs) } // Child implements the execinfra.OpNode interface. func (s *ParallelUnorderedSynchronizer) Child(nth int, verbose bool) execinfra.OpNode { return s.inputs[nth].Root } // NewParallelUnorderedSynchronizer creates a new ParallelUnorderedSynchronizer. // On the first call to Next, len(inputs) goroutines are spawned to read each // input asynchronously (to not be limited by a slow input). These will // increment the passed-in WaitGroup and decrement when done. It is also // guaranteed that these spawned goroutines will have completed on any error or // zero-length batch received from Next. func NewParallelUnorderedSynchronizer( inputs []colexecargs.OpWithMetaInfo, wg *sync.WaitGroup, ) *ParallelUnorderedSynchronizer { readNextBatch := make([]chan struct{}, len(inputs)) for i := range readNextBatch { // Buffer readNextBatch chans to allow for non-blocking writes. There will // only be one message on the channel at a time. readNextBatch[i] = make(chan struct{}, 1) } return &ParallelUnorderedSynchronizer{ inputs: inputs, readNextBatch: readNextBatch, batches: make([]coldata.Batch, len(inputs)), nextBatch: make([]func(), len(inputs)), externalWaitGroup: wg, internalWaitGroup: &sync.WaitGroup{}, // batchCh is a buffered channel in order to offer non-blocking writes to // input goroutines. During normal operation, this channel will have at most // len(inputs) messages. However, during DrainMeta, inputs might need to // push an extra metadata message without blocking, hence the need to double // the size of this channel. batchCh: make(chan *unorderedSynchronizerMsg, len(inputs)*2), // errCh is buffered so that writers do not block. If errCh is full, the // input goroutines will not push an error and exit immediately, given that // the Next goroutine will read an error and panic anyway. errCh: make(chan error, 1), } } // Init is part of the colexecop.Operator interface. func (s *ParallelUnorderedSynchronizer) Init(ctx context.Context) { if !s.InitHelper.Init(ctx) { return } for _, input := range s.inputs { input.Root.Init(s.Ctx) } } func (s *ParallelUnorderedSynchronizer) getState() parallelUnorderedSynchronizerState { return parallelUnorderedSynchronizerState(atomic.LoadInt32(&s.state)) } func (s *ParallelUnorderedSynchronizer) setState(state parallelUnorderedSynchronizerState) { atomic.SwapInt32(&s.state, int32(state)) } // init starts one goroutine per input to read from each input asynchronously // and push to batchCh. Canceling the context (passed in Init() above) results // in all goroutines terminating, otherwise they keep on pushing batches until a // zero-length batch is encountered. Once all inputs terminate, s.batchCh is // closed. If an error occurs, the goroutines will make a non-blocking best // effort to push that error on s.errCh, resulting in the first error pushed to // be observed by the Next goroutine. Inputs are asynchronous so that the // synchronizer is minimally affected by slow inputs. func (s *ParallelUnorderedSynchronizer) init() { for i, input := range s.inputs { s.nextBatch[i] = func(input colexecargs.OpWithMetaInfo, inputIdx int) func() { return func() { s.batches[inputIdx] = input.Root.Next() } }(input, i) s.externalWaitGroup.Add(1) s.internalWaitGroup.Add(1) // TODO(asubiotto): Most inputs are Inboxes, and these have handler // goroutines just sitting around waiting for cancellation. I wonder if we // could reuse those goroutines to push batches to batchCh directly. go func(ctx context.Context, input colexecargs.OpWithMetaInfo, inputIdx int) { var span *tracing.Span ctx, span = execinfra.ProcessorSpan(ctx, fmt.Sprintf("parallel unordered sync input %d", inputIdx)) defer func() { if span != nil { span.Finish() } if int(atomic.AddUint32(&s.numFinishedInputs, 1)) == len(s.inputs) { close(s.batchCh) } // We need to close all of the closers of this input before we // notify the wait groups. input.ToClose.CloseAndLogOnErr(ctx, "parallel unordered synchronizer input") s.internalWaitGroup.Done() s.externalWaitGroup.Done() }() sendErr := func(err error) { select { // Non-blocking write to errCh, if an error is present the main // goroutine will use that and cancel all inputs. case s.errCh <- err: default: } } msg := &unorderedSynchronizerMsg{ inputIdx: inputIdx, } for { state := s.getState() switch state { case parallelUnorderedSynchronizerStateRunning: if err := colexecerror.CatchVectorizedRuntimeError(s.nextBatch[inputIdx]); err != nil { sendErr(err) return } msg.b = s.batches[inputIdx] if s.batches[inputIdx].Length() != 0 { // Send the batch. break } // In case of a zero-length batch, proceed to drain the input. fallthrough case parallelUnorderedSynchronizerStateDraining: // Create a new message for metadata. The previous message cannot be // overwritten since it might still be in the channel. msg = &unorderedSynchronizerMsg{ inputIdx: inputIdx, } if span != nil { for _, s := range input.StatsCollectors { span.RecordStructured(s.GetStats()) } if meta := execinfra.GetTraceDataAsMetadata(span); meta != nil { msg.meta = append(msg.meta, *meta) } } if input.MetadataSources != nil { msg.meta = append(msg.meta, input.MetadataSources.DrainMeta()...) } if msg.meta == nil { // Initialize msg.meta to be non-nil, which is a signal that // metadata has been drained. msg.meta = make([]execinfrapb.ProducerMetadata, 0) } default: sendErr(errors.AssertionFailedf("unhandled state in ParallelUnorderedSynchronizer input goroutine: %d", state)) return } // Check msg.meta before sending over the channel since the channel is // the synchronization primitive of meta. sentMeta := false if msg.meta != nil { sentMeta = true } select { case <-ctx.Done(): sendErr(ctx.Err()) return case s.batchCh <- msg: } if sentMeta { // The input has been drained and this input has pushed the metadata // over the channel, exit. return } // Wait until Next goroutine tells us we are good to go. select { case <-s.readNextBatch[inputIdx]: case <-ctx.Done(): sendErr(ctx.Err()) return } } }(s.Ctx, input, i) } } // Next is part of the colexecop.Operator interface. func (s *ParallelUnorderedSynchronizer) Next() coldata.Batch { for { state := s.getState() switch state { case parallelUnorderedSynchronizerStateDone: return coldata.ZeroBatch case parallelUnorderedSynchronizerStateUninitialized: s.setState(parallelUnorderedSynchronizerStateRunning) s.init() case parallelUnorderedSynchronizerStateRunning: // Signal the input whose batch we returned in the last call to Next that it // is safe to retrieve the next batch. Since Next has been called, we can // reuse memory instead of making safe copies of batches returned. s.notifyInputToReadNextBatch(s.lastReadInputIdx) default: colexecerror.InternalError(errors.AssertionFailedf("unhandled state in ParallelUnorderedSynchronizer Next goroutine: %d", state)) } select { case err := <-s.errCh: if err != nil { // If we got an error from one of our inputs, propagate this error // through a panic. The caller should then proceed to call DrainMeta, // which will take care of closing any inputs. colexecerror.InternalError(err) } case msg := <-s.batchCh: if msg == nil { // All inputs have exited, double check that this is indeed the case. s.internalWaitGroup.Wait() // Check if this was a graceful termination or not. select { case err := <-s.errCh: if err != nil { colexecerror.InternalError(err) } default: } s.setState(parallelUnorderedSynchronizerStateDone) return coldata.ZeroBatch } s.lastReadInputIdx = msg.inputIdx if msg.meta != nil { s.bufferedMeta = append(s.bufferedMeta, msg.meta...) continue } return msg.b } } } // notifyInputToReadNextBatch is a non-blocking send to notify the given input // that it may proceed to read the next batch from the input. Refer to the // comment of the readNextBatch field in ParallelUnorderedSynchronizer for more // information. func (s *ParallelUnorderedSynchronizer) notifyInputToReadNextBatch(inputIdx int) { select { // This write is non-blocking because if the channel is full, it must be the // case that there is a pending message for the input to proceed. case s.readNextBatch[inputIdx] <- struct{}{}: default: } } // DrainMeta is part of the colexecop.MetadataSource interface. func (s *ParallelUnorderedSynchronizer) DrainMeta() []execinfrapb.ProducerMetadata { prevState := s.getState() s.setState(parallelUnorderedSynchronizerStateDraining) if prevState == parallelUnorderedSynchronizerStateUninitialized { s.init() } // Non-blocking drain of batchCh. This is important mostly because of the // following edge case: all n inputs have pushed batches to the batchCh, so // there are currently n messages. Next notifies the last read input to // retrieve the next batch but encounters an error. There are now n+1 messages // in batchCh. Notifying all these inputs to read the next batch would result // in 2n+1 messages on batchCh, which would cause a deadlock since this // goroutine blocks on the wait group, but an input will block on writing to // batchCh. This is a best effort, but note that for this scenario to occur, // there *must* be at least one message in batchCh (the message belonging to // the input that was notified). for batchChDrained := false; !batchChDrained; { select { case msg := <-s.batchCh: if msg == nil { batchChDrained = true } else if msg.meta != nil { s.bufferedMeta = append(s.bufferedMeta, msg.meta...) } default: batchChDrained = true } } // Unblock any goroutines currently waiting to be told to read the next batch. // This will force all inputs to observe the new draining state. for _, ch := range s.readNextBatch { close(ch) } // Wait for all inputs to exit. s.internalWaitGroup.Wait() // Drain the batchCh, this reads the metadata that was pushed. for msg := <-s.batchCh; msg != nil; msg = <-s.batchCh { if msg.meta != nil { s.bufferedMeta = append(s.bufferedMeta, msg.meta...) } } // Buffer any errors that may have happened without blocking on the channel. for exitLoop := false; !exitLoop; { select { case err := <-s.errCh: s.bufferedMeta = append(s.bufferedMeta, execinfrapb.ProducerMetadata{Err: err}) default: exitLoop = true } } // Done. s.setState(parallelUnorderedSynchronizerStateDone) return s.bufferedMeta }
pkg/sql/colexec/parallel_unordered_synchronizer.go
1
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.15644153952598572, 0.005461149848997593, 0.0001636749366298318, 0.00027961982414126396, 0.023955946788191795 ]
{ "id": 1, "code_window": [ "\t\tinput.Root.Init(s.Ctx)\n", "\t}\n", "}\n", "\n" ], "labels": [ "add", "keep", "keep", "keep" ], "after_edit": [ "\t\ts.nextBatch[i] = func(inputOp colexecop.Operator, inputIdx int) func() {\n", "\t\t\treturn func() {\n", "\t\t\t\ts.batches[inputIdx] = inputOp.Next()\n", "\t\t\t}\n", "\t\t}(input.Root, i)\n" ], "file_path": "pkg/sql/colexec/parallel_unordered_synchronizer.go", "type": "add", "edit_start_line_idx": 156 }
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "colcontainer", srcs = [ "diskqueue.go", "partitionedqueue.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/sql/colcontainer", visibility = ["//visibility:public"], deps = [ "//pkg/col/coldata", "//pkg/col/colserde", "//pkg/sql/colexecerror", "//pkg/sql/execinfra", "//pkg/sql/types", "//pkg/storage/fs", "//pkg/util/mon", "//pkg/util/syncutil", "//pkg/util/uuid", "@com_github_cockroachdb_errors//:errors", "@com_github_golang_snappy//:snappy", "@com_github_marusama_semaphore//:semaphore", ], ) go_test( name = "colcontainer_test", size = "small", srcs = [ "diskqueue_test.go", "main_test.go", "partitionedqueue_test.go", ], deps = [ ":colcontainer", "//pkg/col/coldata", "//pkg/col/coldataext", "//pkg/col/coldatatestutils", "//pkg/settings/cluster", "//pkg/sql/colexecop", "//pkg/sql/colmem", "//pkg/sql/execinfra", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/types", "//pkg/storage/fs", "//pkg/testutils/colcontainerutils", "//pkg/testutils/skip", "//pkg/util/humanizeutil", "//pkg/util/leaktest", "//pkg/util/log", "//pkg/util/mon", "//pkg/util/randutil", "@com_github_marusama_semaphore//:semaphore", "@com_github_stretchr_testify//require", ], )
pkg/sql/colcontainer/BUILD.bazel
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.00017023138934746385, 0.0001680359710007906, 0.00016591961320955306, 0.00016807584324851632, 0.0000013978506103740074 ]
{ "id": 1, "code_window": [ "\t\tinput.Root.Init(s.Ctx)\n", "\t}\n", "}\n", "\n" ], "labels": [ "add", "keep", "keep", "keep" ], "after_edit": [ "\t\ts.nextBatch[i] = func(inputOp colexecop.Operator, inputIdx int) func() {\n", "\t\t\treturn func() {\n", "\t\t\t\ts.batches[inputIdx] = inputOp.Next()\n", "\t\t\t}\n", "\t\t}(input.Root, i)\n" ], "file_path": "pkg/sql/colexec/parallel_unordered_synchronizer.go", "type": "add", "edit_start_line_idx": 156 }
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package sqltelemetry import "github.com/cockroachdb/cockroach/pkg/server/telemetry" // CreateReassignOwnedByCounter returns a counter to increment for the REASSIGN OWNED BY command. func CreateReassignOwnedByCounter() telemetry.Counter { return telemetry.GetCounter("sql.reassign_owned_by") }
pkg/sql/sqltelemetry/reassign_owned_by.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.00017470067541580647, 0.0001710512296995148, 0.00016740178398322314, 0.0001710512296995148, 0.000003649445716291666 ]
{ "id": 1, "code_window": [ "\t\tinput.Root.Init(s.Ctx)\n", "\t}\n", "}\n", "\n" ], "labels": [ "add", "keep", "keep", "keep" ], "after_edit": [ "\t\ts.nextBatch[i] = func(inputOp colexecop.Operator, inputIdx int) func() {\n", "\t\t\treturn func() {\n", "\t\t\t\ts.batches[inputIdx] = inputOp.Next()\n", "\t\t\t}\n", "\t\t}(input.Root, i)\n" ], "file_path": "pkg/sql/colexec/parallel_unordered_synchronizer.go", "type": "add", "edit_start_line_idx": 156 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colexec import ( "context" "math" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/errors" ) // NewSorter returns a new sort operator, which sorts its input on the columns // given in orderingCols. The inputTypes must correspond 1-1 with the columns // in the input operator. func NewSorter( allocator *colmem.Allocator, input colexecop.Operator, inputTypes []*types.T, orderingCols []execinfrapb.Ordering_Column, ) (colexecop.Operator, error) { return newSorter(allocator, newAllSpooler(allocator, input, inputTypes), inputTypes, orderingCols) } func newSorter( allocator *colmem.Allocator, input spooler, inputTypes []*types.T, orderingCols []execinfrapb.Ordering_Column, ) (colexecop.ResettableOperator, error) { partitioners := make([]partitioner, len(orderingCols)-1) var err error for i, ord := range orderingCols { if !isSorterSupported(inputTypes[ord.ColIdx], ord.Direction) { return nil, errors.Errorf("sorter for type: %s and direction: %s not supported", inputTypes[ord.ColIdx], ord.Direction) } if i < len(orderingCols)-1 { partitioners[i], err = newPartitioner(inputTypes[ord.ColIdx]) if err != nil { return nil, err } } } return &sortOp{ allocator: allocator, input: input, inputTypes: inputTypes, sorters: make([]colSorter, len(orderingCols)), partitioners: partitioners, orderingCols: orderingCols, state: sortSpooling, }, nil } // spooler is a column vector operator that spools the data from its input. type spooler interface { execinfra.OpNode // init initializes this spooler and will be called once at the setup time. init(context.Context) // spool performs the actual spooling. spool() // getValues returns ith Vec of the already spooled data. getValues(i int) coldata.Vec // getNumTuples returns the number of spooled tuples. getNumTuples() int // getPartitionsCol returns a partitions column vector in which every true // value indicates a start of a different partition (i.e. "chunk") within // spooled tuples. It should return nil if all the tuples belong to the same // partition. getPartitionsCol() []bool // getWindowedBatch returns a batch that is a "window" into all Vecs of the // already spooled data, with tuples in range [startIdx, endIdx). This batch // is not allowed to be modified and is only safe to use until the next call // to this method. // TODO(yuzefovich): one idea we might want to implement at some point is // adding a wrapper on top of a coldata.Batch that is coldata.ImmutableBatch // that returns coldata.ImmutableVecs to enforce immutability. getWindowedBatch(startIdx, endIdx int) coldata.Batch } // allSpooler is the spooler that spools all tuples from the input. It is used // by the general sorter over the whole input. type allSpooler struct { colexecop.OneInputNode colexecop.NonExplainable allocator *colmem.Allocator // inputTypes contains the types of all of the columns from the input. inputTypes []*types.T // bufferedTuples stores all the values from the input after spooling. Each // Vec in this batch is the entire column from the input. bufferedTuples *colexecutils.AppendOnlyBufferedBatch // spooled indicates whether spool() has already been called. spooled bool windowedBatch coldata.Batch } var _ spooler = &allSpooler{} var _ colexecop.Resetter = &allSpooler{} func newAllSpooler( allocator *colmem.Allocator, input colexecop.Operator, inputTypes []*types.T, ) spooler { return &allSpooler{ OneInputNode: colexecop.NewOneInputNode(input), allocator: allocator, inputTypes: inputTypes, } } func (p *allSpooler) init(ctx context.Context) { p.Input.Init(ctx) p.bufferedTuples = colexecutils.NewAppendOnlyBufferedBatch(p.allocator, p.inputTypes, nil /* colsToStore */) p.windowedBatch = p.allocator.NewMemBatchWithFixedCapacity(p.inputTypes, 0 /* size */) } func (p *allSpooler) spool() { if p.spooled { colexecerror.InternalError(errors.AssertionFailedf("spool() is called for the second time")) } p.spooled = true for batch := p.Input.Next(); batch.Length() != 0; batch = p.Input.Next() { p.allocator.PerformOperation(p.bufferedTuples.ColVecs(), func() { p.bufferedTuples.AppendTuples(batch, 0 /* startIdx */, batch.Length()) }) } } func (p *allSpooler) getValues(i int) coldata.Vec { if !p.spooled { colexecerror.InternalError(errors.AssertionFailedf("getValues() is called before spool()")) } return p.bufferedTuples.ColVec(i) } func (p *allSpooler) getNumTuples() int { return p.bufferedTuples.Length() } func (p *allSpooler) getPartitionsCol() []bool { if !p.spooled { colexecerror.InternalError(errors.AssertionFailedf("getPartitionsCol() is called before spool()")) } return nil } func (p *allSpooler) getWindowedBatch(startIdx, endIdx int) coldata.Batch { // We don't need to worry about selection vectors here because if these were // present on the original input batches, they have been removed when we were // buffering up tuples. for i := range p.inputTypes { window := p.bufferedTuples.ColVec(i).Window(startIdx, endIdx) p.windowedBatch.ReplaceCol(window, i) } p.windowedBatch.SetSelection(false) p.windowedBatch.SetLength(endIdx - startIdx) return p.windowedBatch } func (p *allSpooler) Reset(ctx context.Context) { if r, ok := p.Input.(colexecop.Resetter); ok { r.Reset(ctx) } p.spooled = false p.bufferedTuples.ResetInternalBatch() } type sortOp struct { colexecop.InitHelper allocator *colmem.Allocator input spooler // inputTypes contains the types of all of the columns from input. inputTypes []*types.T // orderingCols is the ordered list of column orderings that the sorter should // sort on. orderingCols []execinfrapb.Ordering_Column // sorters contains one colSorter per sort column. The instantiation of // sorters occurs within the sort method rather than during construction // of the sortOp so that we can correctly choose a sorter based on // whether the input has nulls or not. sorters []colSorter // partitioners contains one partitioner per sort column except for the last, // which doesn't need to be partitioned. partitioners []partitioner // order maintains the order of tuples in the batch, after sorting. The value // at index i in order is the ordinal value of the tuple in the input that // belongs at index i. For example, if the input column to sort was // [c,b,a,d], the order vector after sorting would be [2,1,0,3]. order []int // emitted is the number of tuples emitted so far. emitted int // state is the current state of the sort. state sortState output coldata.Batch exported int } var _ colexecop.BufferingInMemoryOperator = &sortOp{} var _ colexecop.Resetter = &sortOp{} // colSorter is a single-column sorter, specialized on a particular type. type colSorter interface { // init prepares this sorter, given a particular Vec and an order vector, // which must be the same size as the input Vec and will be permuted with // the same swaps as the column. init(ctx context.Context, col coldata.Vec, order []int) // sort globally sorts this sorter's column. sort() // sortPartitions sorts this sorter's column once for every partition in the // partition slice. sortPartitions(partitions []int) } func (p *sortOp) Init(ctx context.Context) { if !p.InitHelper.Init(ctx) { return } p.input.init(p.Ctx) } // sortState represents the state of the sort operator. type sortState int const ( // sortSpooling is the initial state of the operator, where it spools its // input. sortSpooling sortState = iota // sortSorting is the second state of the operator, where it actually sorts // all the spooled data. sortSorting // sortEmitting is the third state of the operator, indicating that each call // to Next will return another batch of the sorted data. sortEmitting // sortDone is the final state of the operator, where it always returns a // zero batch. sortDone ) func (p *sortOp) Next() coldata.Batch { for { switch p.state { case sortSpooling: p.input.spool() p.state = sortSorting case sortSorting: p.sort() p.state = sortEmitting case sortEmitting: toEmit := p.input.getNumTuples() - p.emitted if toEmit == 0 { p.state = sortDone continue } if toEmit > coldata.BatchSize() { toEmit = coldata.BatchSize() } // For now, we don't enforce any footprint-based memory limit. // TODO(yuzefovich): refactor this. const maxBatchMemSize = math.MaxInt64 p.output, _ = p.allocator.ResetMaybeReallocate(p.inputTypes, p.output, toEmit, maxBatchMemSize) newEmitted := p.emitted + toEmit for j := 0; j < len(p.inputTypes); j++ { // At this point, we have already fully sorted the input. It is ok to do // this Copy outside of the allocator - the work has been done, but // theoretically it is possible to hit the limit here (mainly with // variable-sized types like Bytes). Nonetheless, for performance reasons // it would be sad to fallback to disk at this point. p.output.ColVec(j).Copy( coldata.CopySliceArgs{ SliceArgs: coldata.SliceArgs{ Sel: p.order, Src: p.input.getValues(j), SrcStartIdx: p.emitted, SrcEndIdx: newEmitted, }, }, ) } p.output.SetLength(toEmit) p.emitted = newEmitted return p.output case sortDone: return coldata.ZeroBatch default: colexecerror.InternalError(errors.AssertionFailedf("invalid sort state %v", p.state)) // This code is unreachable, but the compiler cannot infer that. return nil } } } // sort sorts the spooled tuples, so it must be called after spool() has been // performed. func (p *sortOp) sort() { spooledTuples := p.input.getNumTuples() if spooledTuples == 0 { // There is nothing to sort. return } // Allocate p.order and p.workingSpace if it hasn't been allocated yet or the // underlying memory is insufficient. if p.order == nil || cap(p.order) < spooledTuples { p.order = make([]int, spooledTuples) } p.order = p.order[:spooledTuples] // Initialize the order vector to the ordinal positions within the input set. for i := 0; i < len(p.order); i++ { p.order[i] = i } for i := range p.orderingCols { inputVec := p.input.getValues(int(p.orderingCols[i].ColIdx)) p.sorters[i] = newSingleSorter(p.inputTypes[p.orderingCols[i].ColIdx], p.orderingCols[i].Direction, inputVec.MaybeHasNulls()) p.sorters[i].init(p.Ctx, inputVec, p.order) } // Now, sort each column in turn. sorters := p.sorters partitionsCol := p.input.getPartitionsCol() omitNextPartitioning := false offset := 0 if partitionsCol == nil { // All spooled tuples belong to the same partition, so the first column // doesn't need special treatment - we just globally sort it. p.sorters[0].sort() if len(p.sorters) == 1 { // We're done sorting. Transition to emitting. return } sorters = sorters[1:] partitionsCol = make([]bool, spooledTuples) } else { // There are at least two partitions already, so the first column needs the // same special treatment as all others. The general sequence is as // follows: global sort -> partition -> sort partitions -> partition -> // -> sort partitions -> partition -> sort partitions -> ..., but in this // case, global sort doesn't make sense and partitioning has already been // done, so we want to skip the first partitioning step and sort partitions // right away. Also, in order to account for not performed global sort, we // introduce an offset of 1 for partitioners. omitNextPartitioning = true offset = 1 } // The rest of the columns need p sorts, one per partition in the previous // column. For example, in a two column sort: // // 1 b // 2 b // 1 a // 2 a // // We'll first sort the first column: // // 1 b // 1 a // 2 b // 2 a // // Then, for each group in the sorted, first column, we sort the second column: // // 1 a // 1 b // 2 a // 2 b partitions := make([]int, 0, 16) for i, sorter := range sorters { if !omitNextPartitioning { // We partition the previous column by running an ordered distinct operation // on it, ORing the results together with each subsequent column. This // produces a distinct vector (a boolean vector that has true in each // position that is different from the last position). p.partitioners[i-offset].partitionWithOrder(p.input.getValues(int(p.orderingCols[i-offset].ColIdx)), p.order, partitionsCol, spooledTuples) } else { omitNextPartitioning = false } // Convert the distinct vector into a selection vector - a vector of indices // that were true in the distinct vector. partitions = boolVecToSel64(partitionsCol, partitions[:0]) // For each partition (set of tuples that are identical in all of the sort // columns we've seen so far), sort based on the new column. sorter.sortPartitions(partitions) } } func (p *sortOp) Reset(ctx context.Context) { if r, ok := p.input.(colexecop.Resetter); ok { r.Reset(ctx) } p.emitted = 0 p.exported = 0 p.state = sortSpooling } func (p *sortOp) ChildCount(verbose bool) int { return 1 } func (p *sortOp) Child(nth int, verbose bool) execinfra.OpNode { if nth == 0 { return p.input } colexecerror.InternalError(errors.AssertionFailedf("invalid index %d", nth)) // This code is unreachable, but the compiler cannot infer that. return nil } func (p *sortOp) ExportBuffered(colexecop.Operator) coldata.Batch { if p.exported == p.input.getNumTuples() { return coldata.ZeroBatch } newExported := p.exported + coldata.BatchSize() if newExported > p.input.getNumTuples() { newExported = p.input.getNumTuples() } b := p.input.getWindowedBatch(p.exported, newExported) p.exported = newExported return b }
pkg/sql/colexec/sort.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.0054537574760615826, 0.0004828627861570567, 0.00016315079119522125, 0.0001800838072085753, 0.0008950546034611762 ]
{ "id": 2, "code_window": [ "// effort to push that error on s.errCh, resulting in the first error pushed to\n", "// be observed by the Next goroutine. Inputs are asynchronous so that the\n", "// synchronizer is minimally affected by slow inputs.\n", "func (s *ParallelUnorderedSynchronizer) init() {\n", "\tfor i, input := range s.inputs {\n", "\t\ts.nextBatch[i] = func(input colexecargs.OpWithMetaInfo, inputIdx int) func() {\n", "\t\t\treturn func() {\n", "\t\t\t\ts.batches[inputIdx] = input.Root.Next()\n", "\t\t\t}\n", "\t\t}(input, i)\n", "\t\ts.externalWaitGroup.Add(1)\n", "\t\ts.internalWaitGroup.Add(1)\n", "\t\t// TODO(asubiotto): Most inputs are Inboxes, and these have handler\n", "\t\t// goroutines just sitting around waiting for cancellation. I wonder if we\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/sql/colexec/parallel_unordered_synchronizer.go", "type": "replace", "edit_start_line_idx": 177 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colrpc import ( "bytes" "context" "fmt" "io" "sync/atomic" "time" "github.com/cockroachdb/cockroach/pkg/col/colserde" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" "github.com/cockroachdb/logtags" ) // flowStreamClient is a utility interface used to mock out the RPC layer. type flowStreamClient interface { Send(*execinfrapb.ProducerMessage) error Recv() (*execinfrapb.ConsumerSignal, error) CloseSend() error } // Outbox is used to push data from local flows to a remote endpoint. Run may // be called with the necessary information to establish a connection to a // given remote endpoint. type Outbox struct { colexecop.OneInputNode typs []*types.T converter *colserde.ArrowBatchConverter serializer *colserde.RecordBatchSerializer // draining is an atomic that represents whether the Outbox is draining. draining uint32 metadataSources colexecop.MetadataSources // closers is a slice of Closers that need to be Closed on termination. closers colexecop.Closers scratch struct { buf *bytes.Buffer msg *execinfrapb.ProducerMessage } span *tracing.Span // getStats, when non-nil, returns all of the execution statistics of the // operators that are in the same tree as this Outbox. The stats will be // added into the span as Structured payload and returned to the gateway as // execinfrapb.ProducerMetadata. getStats func() []*execinfrapb.ComponentStats // A copy of Run's caller ctx, with no StreamID tag. // Used to pass a clean context to the input.Next. runnerCtx context.Context } // NewOutbox creates a new Outbox. // - getStats, when non-nil, returns all of the execution statistics of the // operators that are in the same tree as this Outbox. func NewOutbox( allocator *colmem.Allocator, input colexecop.Operator, typs []*types.T, getStats func() []*execinfrapb.ComponentStats, metadataSources []colexecop.MetadataSource, toClose []colexecop.Closer, ) (*Outbox, error) { c, err := colserde.NewArrowBatchConverter(typs) if err != nil { return nil, err } s, err := colserde.NewRecordBatchSerializer(typs) if err != nil { return nil, err } o := &Outbox{ // Add a deselector as selection vectors are not serialized (nor should they // be). OneInputNode: colexecop.NewOneInputNode(colexecutils.NewDeselectorOp(allocator, input, typs)), typs: typs, converter: c, serializer: s, getStats: getStats, metadataSources: metadataSources, closers: toClose, } o.scratch.buf = &bytes.Buffer{} o.scratch.msg = &execinfrapb.ProducerMessage{} return o, nil } func (o *Outbox) close(ctx context.Context) { o.closers.CloseAndLogOnErr(ctx, "outbox") } // Run starts an outbox by connecting to the provided node and pushing // coldata.Batches over the stream after sending a header with the provided flow // and stream ID. Note that an extra goroutine is spawned so that Recv may be // called concurrently wrt the Send goroutine to listen for drain signals. // If an io.EOF is received while sending, the outbox will cancel all components // from the same tree as the outbox. // If non-io.EOF is received while sending, the outbox will call flowCtxCancel // to shutdown all parts of the flow on this node. // If an error is encountered that cannot be sent over the stream, the error // will be logged but not returned. // There are several ways the bidirectional FlowStream RPC may terminate. // 1) Execution is finished. In this case, the upstream operator signals // termination by returning a zero-length batch. The Outbox will drain its // metadata sources, send the metadata, and then call CloseSend on the // stream. The Outbox will wait until its Recv goroutine receives a non-nil // error to not leak resources. // 2) A cancellation happened. This can come from the provided context or the // remote reader. Refer to tests for expected behavior. // 3) A drain signal was received from the server (consumer). In this case, the // Outbox goes through the same steps as 1). func (o *Outbox) Run( ctx context.Context, dialer execinfra.Dialer, nodeID roachpb.NodeID, flowID execinfrapb.FlowID, streamID execinfrapb.StreamID, flowCtxCancel context.CancelFunc, connectionTimeout time.Duration, ) { // Derive a child context so that we can cancel all components rooted in // this outbox. var outboxCtxCancel context.CancelFunc ctx, outboxCtxCancel = context.WithCancel(ctx) // Calling outboxCtxCancel is not strictly necessary, but we do it just to // be safe. defer outboxCtxCancel() ctx, o.span = execinfra.ProcessorSpan(ctx, "outbox") if o.span != nil { defer o.span.Finish() } o.runnerCtx = ctx ctx = logtags.AddTag(ctx, "streamID", streamID) log.VEventf(ctx, 2, "Outbox Dialing %s", nodeID) var stream execinfrapb.DistSQL_FlowStreamClient if err := func() error { conn, err := execinfra.GetConnForOutbox(ctx, dialer, nodeID, connectionTimeout) if err != nil { log.Warningf( ctx, "Outbox Dial connection error, distributed query will fail: %+v", err, ) return err } client := execinfrapb.NewDistSQLClient(conn) stream, err = client.FlowStream(ctx) if err != nil { log.Warningf( ctx, "Outbox FlowStream connection error, distributed query will fail: %+v", err, ) return err } log.VEvent(ctx, 2, "Outbox sending header") // Send header message to establish the remote server (consumer). if err := stream.Send( &execinfrapb.ProducerMessage{Header: &execinfrapb.ProducerHeader{FlowID: flowID, StreamID: streamID}}, ); err != nil { log.Warningf( ctx, "Outbox Send header error, distributed query will fail: %+v", err, ) return err } return nil }(); err != nil { // error during stream set up. o.close(ctx) return } log.VEvent(ctx, 2, "Outbox starting normal operation") o.runWithStream(ctx, stream, flowCtxCancel, outboxCtxCancel) log.VEvent(ctx, 2, "Outbox exiting") } // handleStreamErr is a utility method used to handle an error when calling // a method on a flowStreamClient. If err is an io.EOF, outboxCtxCancel is // called, for all other error flowCtxCancel is. The given error is logged with // the associated opName. func (o *Outbox) handleStreamErr( ctx context.Context, opName string, err error, flowCtxCancel, outboxCtxCancel context.CancelFunc, ) { if err == io.EOF { if log.V(1) { log.Infof(ctx, "Outbox calling outboxCtxCancel after %s EOF", opName) } outboxCtxCancel() } else { log.Warningf(ctx, "Outbox calling flowCtxCancel after %s connection error: %+v", opName, err) flowCtxCancel() } } func (o *Outbox) moveToDraining(ctx context.Context, reason string) { if atomic.CompareAndSwapUint32(&o.draining, 0, 1) { log.VEventf(ctx, 2, "Outbox moved to draining (%s)", reason) } } // sendBatches reads from the Outbox's input in a loop and sends the // coldata.Batches over the stream. A boolean is returned, indicating whether // execution completed gracefully (either received a zero-length batch or a // drain signal) as well as an error which is non-nil if an error was // encountered AND the error should be sent over the stream as metadata. The for // loop continues iterating until one of the following conditions becomes true: // 1) A zero-length batch is received from the input. This indicates graceful // termination. true, nil is returned. // 2) Outbox.draining is observed to be true. This is also considered graceful // termination. true, nil is returned. // 3) An error unrelated to the stream occurs (e.g. while deserializing a // coldata.Batch). false, err is returned. This err should be sent over the // stream as metadata. // 4) An error related to the stream occurs. In this case, the error is logged // but not returned, as there is no way to propagate this error anywhere // meaningful. false, nil is returned. // NOTE: if non-io.EOF error is encountered (indicating ungraceful shutdown // of the stream), flowCtxCancel will be called. If an io.EOF is encountered // (indicating a graceful shutdown initiated by the remote Inbox), // outboxCtxCancel will be called. func (o *Outbox) sendBatches( ctx context.Context, stream flowStreamClient, flowCtxCancel, outboxCtxCancel context.CancelFunc, ) (terminatedGracefully bool, errToSend error) { if o.runnerCtx == nil { // In the non-testing path, runnerCtx has been set in Run() method; // however, the tests might use runWithStream() directly in which case // runnerCtx will remain unset, so we have this check. o.runnerCtx = ctx } errToSend = colexecerror.CatchVectorizedRuntimeError(func() { o.Input.Init(o.runnerCtx) for { if atomic.LoadUint32(&o.draining) == 1 { terminatedGracefully = true return } batch := o.Input.Next() n := batch.Length() if n == 0 { terminatedGracefully = true return } o.scratch.buf.Reset() d, err := o.converter.BatchToArrow(batch) if err != nil { colexecerror.InternalError(errors.Wrap(err, "Outbox BatchToArrow data serialization error")) } if _, _, err := o.serializer.Serialize(o.scratch.buf, d, n); err != nil { colexecerror.InternalError(errors.Wrap(err, "Outbox Serialize data error")) } o.scratch.msg.Data.RawBytes = o.scratch.buf.Bytes() // o.scratch.msg can be reused as soon as Send returns since it returns as // soon as the message is written to the control buffer. The message is // marshaled (bytes are copied) before writing. if err := stream.Send(o.scratch.msg); err != nil { o.handleStreamErr(ctx, "Send (batches)", err, flowCtxCancel, outboxCtxCancel) return } } }) return terminatedGracefully, errToSend } // sendMetadata drains the Outbox.metadataSources and sends the metadata over // the given stream, returning the Send error, if any. sendMetadata also sends // errToSend as metadata if non-nil. func (o *Outbox) sendMetadata(ctx context.Context, stream flowStreamClient, errToSend error) error { msg := &execinfrapb.ProducerMessage{} if errToSend != nil { log.VEventf(ctx, 1, "Outbox sending an error as metadata: %v", errToSend) msg.Data.Metadata = append( msg.Data.Metadata, execinfrapb.LocalMetaToRemoteProducerMeta(ctx, execinfrapb.ProducerMetadata{Err: errToSend}), ) } if o.span != nil && o.getStats != nil { for _, s := range o.getStats() { o.span.RecordStructured(s) } } if trace := execinfra.GetTraceData(ctx); trace != nil { msg.Data.Metadata = append(msg.Data.Metadata, execinfrapb.RemoteProducerMetadata{ Value: &execinfrapb.RemoteProducerMetadata_TraceData_{ TraceData: &execinfrapb.RemoteProducerMetadata_TraceData{ CollectedSpans: trace, }, }, }) } for _, meta := range o.metadataSources.DrainMeta() { msg.Data.Metadata = append(msg.Data.Metadata, execinfrapb.LocalMetaToRemoteProducerMeta(ctx, meta)) } if len(msg.Data.Metadata) == 0 { return nil } return stream.Send(msg) } // runWithStream should be called after sending the ProducerHeader on the // stream. It implements the behavior described in Run. func (o *Outbox) runWithStream( ctx context.Context, stream flowStreamClient, flowCtxCancel, outboxCtxCancel context.CancelFunc, ) { if flowCtxCancel == nil { // The flowCtxCancel might be nil in some tests, but we'll make it a // noop for convenience. flowCtxCancel = func() {} } waitCh := make(chan struct{}) go func() { // This goroutine's job is to listen continually on the stream from the // consumer for errors or drain requests, while the remainder of this // function concurrently is producing data and sending it over the // network. This goroutine will tear down the flow if non-io.EOF error // is received - without it, a producer goroutine might spin doing work // forever after a connection is closed, since it wouldn't notice a // closed connection until it tried to Send over that connection. for { msg, err := stream.Recv() if err != nil { if err != io.EOF { log.Warningf(ctx, "Outbox calling flowCtxCancel after Recv connection error: %+v", err) flowCtxCancel() } break } switch { case msg.Handshake != nil: log.VEventf(ctx, 2, "Outbox received handshake: %v", msg.Handshake) case msg.DrainRequest != nil: o.moveToDraining(ctx, "consumer requested draining" /* reason */) } } close(waitCh) }() terminatedGracefully, errToSend := o.sendBatches(ctx, stream, flowCtxCancel, outboxCtxCancel) if terminatedGracefully || errToSend != nil { reason := "terminated gracefully" if errToSend != nil { reason = fmt.Sprintf("encountered error when sending batches: %v", errToSend) } o.moveToDraining(ctx, reason) if err := o.sendMetadata(ctx, stream, errToSend); err != nil { o.handleStreamErr(ctx, "Send (metadata)", err, flowCtxCancel, outboxCtxCancel) } else { // Close the stream. Note that if this block isn't reached, the stream // is unusable. // The receiver goroutine will read from the stream until any error // is returned (most likely an io.EOF). if err := stream.CloseSend(); err != nil { o.handleStreamErr(ctx, "CloseSend", err, flowCtxCancel, outboxCtxCancel) } } } o.close(ctx) <-waitCh }
pkg/sql/colflow/colrpc/outbox.go
1
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.05928751081228256, 0.0017656439449638128, 0.0001588581217220053, 0.00016717458493076265, 0.009220036678016186 ]
{ "id": 2, "code_window": [ "// effort to push that error on s.errCh, resulting in the first error pushed to\n", "// be observed by the Next goroutine. Inputs are asynchronous so that the\n", "// synchronizer is minimally affected by slow inputs.\n", "func (s *ParallelUnorderedSynchronizer) init() {\n", "\tfor i, input := range s.inputs {\n", "\t\ts.nextBatch[i] = func(input colexecargs.OpWithMetaInfo, inputIdx int) func() {\n", "\t\t\treturn func() {\n", "\t\t\t\ts.batches[inputIdx] = input.Root.Next()\n", "\t\t\t}\n", "\t\t}(input, i)\n", "\t\ts.externalWaitGroup.Add(1)\n", "\t\ts.internalWaitGroup.Add(1)\n", "\t\t// TODO(asubiotto): Most inputs are Inboxes, and these have handler\n", "\t\t// goroutines just sitting around waiting for cancellation. I wonder if we\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/sql/colexec/parallel_unordered_synchronizer.go", "type": "replace", "edit_start_line_idx": 177 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package sql import ( "context" "fmt" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/errors" ) type cancelSessionsNode struct { rows planNode ifExists bool } func (n *cancelSessionsNode) startExec(runParams) error { return nil } func (n *cancelSessionsNode) Next(params runParams) (bool, error) { // TODO(knz): instead of performing the cancels sequentially, // accumulate all the query IDs and then send batches to each of the // nodes. if ok, err := n.rows.Next(params); err != nil || !ok { return ok, err } datum := n.rows.Values()[0] if datum == tree.DNull { return true, nil } sessionIDString, ok := tree.AsDString(datum) if !ok { return false, errors.AssertionFailedf("%q: expected *DString, found %T", datum, datum) } sessionID, err := StringToClusterWideID(string(sessionIDString)) if err != nil { return false, pgerror.Wrapf(err, pgcode.Syntax, "invalid session ID %s", datum) } // Get the lowest 32 bits of the session ID. nodeID := sessionID.GetNodeID() request := &serverpb.CancelSessionRequest{ NodeId: fmt.Sprintf("%d", nodeID), SessionID: sessionID.GetBytes(), Username: params.SessionData().User().Normalized(), } response, err := params.extendedEvalCtx.SQLStatusServer.CancelSession(params.ctx, request) if err != nil { return false, err } if !response.Canceled && !n.ifExists { return false, errors.Newf("could not cancel session %s: %s", sessionID, response.Error) } return true, nil } func (*cancelSessionsNode) Values() tree.Datums { return nil } func (n *cancelSessionsNode) Close(ctx context.Context) { n.rows.Close(ctx) }
pkg/sql/cancel_sessions.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.00090789858950302, 0.00025114152231253684, 0.0001609965111128986, 0.00016971080913208425, 0.0002322585933143273 ]
{ "id": 2, "code_window": [ "// effort to push that error on s.errCh, resulting in the first error pushed to\n", "// be observed by the Next goroutine. Inputs are asynchronous so that the\n", "// synchronizer is minimally affected by slow inputs.\n", "func (s *ParallelUnorderedSynchronizer) init() {\n", "\tfor i, input := range s.inputs {\n", "\t\ts.nextBatch[i] = func(input colexecargs.OpWithMetaInfo, inputIdx int) func() {\n", "\t\t\treturn func() {\n", "\t\t\t\ts.batches[inputIdx] = input.Root.Next()\n", "\t\t\t}\n", "\t\t}(input, i)\n", "\t\ts.externalWaitGroup.Add(1)\n", "\t\ts.internalWaitGroup.Add(1)\n", "\t\t// TODO(asubiotto): Most inputs are Inboxes, and these have handler\n", "\t\t// goroutines just sitting around waiting for cancellation. I wonder if we\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/sql/colexec/parallel_unordered_synchronizer.go", "type": "replace", "edit_start_line_idx": 177 }
// Copyright 2016 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package pgerror_test import ( "fmt" "regexp" "testing" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" ) func TestPGError(t *testing.T) { const msg = "err" var code = pgcode.MakeCode("abc") checkErr := func(pErr *pgerror.Error, errMsg string) { if pgcode.MakeCode(pErr.Code) != code { t.Fatalf("got: %q\nwant: %q", pErr.Code, code) } if pErr.Message != errMsg { t.Fatalf("got: %q\nwant: %q", pErr.Message, errMsg) } const want = `errors_test.go` match, err := regexp.MatchString(want, pErr.Source.File) if err != nil { t.Fatal(err) } if !match { t.Fatalf("got: %q\nwant: %q", pErr.Source.File, want) } } // Test NewError. pErr := pgerror.Flatten(pgerror.New(code, msg)) checkErr(pErr, msg) pErr = pgerror.Flatten(pgerror.New(code, "bad%format")) checkErr(pErr, "bad%format") // Test NewErrorf. const prefix = "prefix" pErr = pgerror.Flatten(pgerror.Newf(code, "%s: %s", prefix, msg)) expected := fmt.Sprintf("%s: %s", prefix, msg) checkErr(pErr, expected) } func TestIsSQLRetryableError(t *testing.T) { errAmbiguous := &roachpb.AmbiguousResultError{} if !pgerror.IsSQLRetryableError(roachpb.NewError(errAmbiguous).GoError()) { t.Fatalf("%s should be a SQLRetryableError", errAmbiguous) } }
pkg/sql/pgwire/pgerror/errors_test.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.0001795271091395989, 0.00017204340838361531, 0.0001668168551987037, 0.00017041913815774024, 0.000004967907443642616 ]
{ "id": 2, "code_window": [ "// effort to push that error on s.errCh, resulting in the first error pushed to\n", "// be observed by the Next goroutine. Inputs are asynchronous so that the\n", "// synchronizer is minimally affected by slow inputs.\n", "func (s *ParallelUnorderedSynchronizer) init() {\n", "\tfor i, input := range s.inputs {\n", "\t\ts.nextBatch[i] = func(input colexecargs.OpWithMetaInfo, inputIdx int) func() {\n", "\t\t\treturn func() {\n", "\t\t\t\ts.batches[inputIdx] = input.Root.Next()\n", "\t\t\t}\n", "\t\t}(input, i)\n", "\t\ts.externalWaitGroup.Add(1)\n", "\t\ts.internalWaitGroup.Add(1)\n", "\t\t// TODO(asubiotto): Most inputs are Inboxes, and these have handler\n", "\t\t// goroutines just sitting around waiting for cancellation. I wonder if we\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/sql/colexec/parallel_unordered_synchronizer.go", "type": "replace", "edit_start_line_idx": 177 }
["\uDBFF\uDFFE"]
pkg/util/json/testdata/raw/string_unicode_U+10FFFE_nonchar.json
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.0001691762008704245, 0.0001691762008704245, 0.0001691762008704245, 0.0001691762008704245, 0 ]
{ "id": 3, "code_window": [ "\t\t\t\t// goroutine will use that and cancel all inputs.\n", "\t\t\t\tcase s.errCh <- err:\n", "\t\t\t\tdefault:\n", "\t\t\t\t}\n", "\t\t\t}\n", "\t\t\tmsg := &unorderedSynchronizerMsg{\n", "\t\t\t\tinputIdx: inputIdx,\n", "\t\t\t}\n", "\t\t\tfor {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tif s.nextBatch[inputIdx] == nil {\n", "\t\t\t\t// The initialization of this input wasn't successful, so it is\n", "\t\t\t\t// invalid to call Next or DrainMeta on it. Exit early.\n", "\t\t\t\treturn\n", "\t\t\t}\n" ], "file_path": "pkg/sql/colexec/parallel_unordered_synchronizer.go", "type": "add", "edit_start_line_idx": 211 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colflow import ( "context" "sync" "sync/atomic" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colcontainer" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecargs" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexechash" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" "github.com/marusama/semaphore" ) // routerOutput is an interface implemented by router outputs. It exists for // easier test mocking of outputs. type routerOutput interface { execinfra.OpNode // initWithHashRouter passes a reference to the HashRouter that will be // pushing batches to this output. initWithHashRouter(*HashRouter) // addBatch adds the elements specified by the selection vector from batch // to the output. It returns whether or not the output changed its state to // blocked (see implementations). addBatch(context.Context, coldata.Batch) bool // cancel tells the output to stop producing batches. Optionally forwards an // error if not nil. cancel(context.Context, error) // forwardErr forwards an error to the output. The output should call // colexecerror.ExpectedError with this error on the next call to Next. // Calling forwardErr multiple times will result in the most recent error // overwriting the previous error. forwardErr(error) // resetForTests resets the routerOutput for a benchmark or test run. resetForTests(context.Context) } // getDefaultRouterOutputBlockedThreshold returns the number of unread values // buffered by the routerOutputOp after which the output is considered blocked. // It is a function rather than a variable so that in tests we could modify // coldata.BatchSize() (if it were a variable, then its value would be // evaluated before we set the desired batch size). func getDefaultRouterOutputBlockedThreshold() int { return coldata.BatchSize() * 2 } type routerOutputOpState int const ( // routerOutputOpRunning is the state in which routerOutputOp operates // normally. The router output transitions into routerOutputDoneAdding when // a zero-length batch was added or routerOutputOpDraining when it // encounters an error or the drain is requested. routerOutputOpRunning routerOutputOpState = iota // routerOutputDoneAdding is the state in which a zero-length was batch was // added to routerOutputOp and no more batches will be added. The router // output transitions to routerOutputOpDraining when the output is canceled // (either closed or the drain is requested). routerOutputDoneAdding // routerOutputOpDraining is the state in which routerOutputOp always // returns zero-length batches on calls to Next. routerOutputOpDraining ) // drainCoordinator is an interface that the HashRouter implements to coordinate // cancellation of all of its outputs in the case of an error and draining in // the case of graceful termination. // WARNING: No locks should be held when calling these methods, as the // HashRouter might call routerOutput methods (e.g. cancel) that attempt to // reacquire locks. type drainCoordinator interface { // encounteredError should be called when a routerOutput encounters an error. // This terminates execution. No locks should be held when calling this // method, since cancellation could occur. encounteredError(context.Context) // drainMeta should be called exactly once when the routerOutput moves to // draining. drainMeta() []execinfrapb.ProducerMetadata } type routerOutputOp struct { colexecop.InitHelper // input is a reference to our router. input execinfra.OpNode // drainCoordinator is a reference to the HashRouter to be able to notify it // if the output encounters an error or transitions to a draining state. drainCoordinator drainCoordinator types []*types.T // unblockedEventsChan is signaled when a routerOutput changes state from // blocked to unblocked. unblockedEventsChan chan<- struct{} mu struct { syncutil.Mutex state routerOutputOpState // forwardedErr is an error that was forwarded by the HashRouter. If set, // any subsequent calls to Next will return this error. forwardedErr error cond *sync.Cond // data is a SpillingQueue, a circular buffer backed by a disk queue. data *colexecutils.SpillingQueue numUnread int blocked bool } testingKnobs routerOutputOpTestingKnobs } func (o *routerOutputOp) ChildCount(verbose bool) int { return 1 } func (o *routerOutputOp) Child(nth int, verbose bool) execinfra.OpNode { if nth == 0 { return o.input } colexecerror.InternalError(errors.AssertionFailedf("invalid index %d", nth)) // This code is unreachable, but the compiler cannot infer that. return nil } var _ colexecop.Operator = &routerOutputOp{} type routerOutputOpTestingKnobs struct { // blockedThreshold is the number of buffered values above which we consider // a router output to be blocked. It defaults to // defaultRouterOutputBlockedThreshold but can be modified by tests to test // edge cases. blockedThreshold int // addBatchTestInducedErrorCb is called after any function call that could // produce an error if that error is nil. If the callback returns an error, // the router output overwrites the nil error with the returned error. // It is guaranteed that this callback will be called at least once during // normal execution. addBatchTestInducedErrorCb func() error // nextTestInducedErrorCb is called after any function call that could // produce an error if that error is nil. If the callback returns an error, // the router output overwrites the nil error with the returned error. // It is guaranteed that this callback will be called at least once during // normal execution. nextTestInducedErrorCb func() error } // routerOutputOpArgs are the arguments to newRouterOutputOp. All fields apart // from the testing knobs are optional. type routerOutputOpArgs struct { // All fields are required unless marked optional. types []*types.T // unlimitedAllocator should not have a memory limit. Pass in a soft // memoryLimit that will be respected instead. unlimitedAllocator *colmem.Allocator // memoryLimit acts as a soft limit to allow the router output to use disk // when it is exceeded. memoryLimit int64 diskAcc *mon.BoundAccount cfg colcontainer.DiskQueueCfg fdSemaphore semaphore.Semaphore // unblockedEventsChan must be a buffered channel. unblockedEventsChan chan<- struct{} testingKnobs routerOutputOpTestingKnobs } // newRouterOutputOp creates a new router output. func newRouterOutputOp(args routerOutputOpArgs) *routerOutputOp { if args.testingKnobs.blockedThreshold == 0 { args.testingKnobs.blockedThreshold = getDefaultRouterOutputBlockedThreshold() } o := &routerOutputOp{ types: args.types, unblockedEventsChan: args.unblockedEventsChan, testingKnobs: args.testingKnobs, } o.mu.cond = sync.NewCond(&o.mu) o.mu.data = colexecutils.NewSpillingQueue( &colexecutils.NewSpillingQueueArgs{ UnlimitedAllocator: args.unlimitedAllocator, Types: args.types, MemoryLimit: args.memoryLimit, DiskQueueCfg: args.cfg, FDSemaphore: args.fdSemaphore, DiskAcc: args.diskAcc, }, ) return o } func (o *routerOutputOp) Init(ctx context.Context) { o.InitHelper.Init(ctx) } // nextErrorLocked is a helper method that handles an error encountered in Next. func (o *routerOutputOp) nextErrorLocked(err error) { o.mu.state = routerOutputOpDraining o.maybeUnblockLocked() // Unlock the mutex, since the HashRouter will cancel all outputs. o.mu.Unlock() o.drainCoordinator.encounteredError(o.Ctx) o.mu.Lock() colexecerror.InternalError(err) } // Next returns the next coldata.Batch from the routerOutputOp. Note that Next // is designed for only one concurrent caller and will block until data is // ready. func (o *routerOutputOp) Next() coldata.Batch { o.mu.Lock() defer o.mu.Unlock() for o.mu.forwardedErr == nil && o.mu.state == routerOutputOpRunning && o.mu.data.Empty() { // Wait until there is data to read or the output is canceled. o.mu.cond.Wait() } if o.mu.forwardedErr != nil { colexecerror.ExpectedError(o.mu.forwardedErr) } if o.mu.state == routerOutputOpDraining { return coldata.ZeroBatch } b, err := o.mu.data.Dequeue(o.Ctx) if err == nil && o.testingKnobs.nextTestInducedErrorCb != nil { err = o.testingKnobs.nextTestInducedErrorCb() } if err != nil { o.nextErrorLocked(err) } o.mu.numUnread -= b.Length() if o.mu.numUnread <= o.testingKnobs.blockedThreshold { o.maybeUnblockLocked() } if b.Length() == 0 { if o.testingKnobs.nextTestInducedErrorCb != nil { if err := o.testingKnobs.nextTestInducedErrorCb(); err != nil { o.nextErrorLocked(err) } } // This is the last batch. closeLocked will set done to protect against // further calls to Next since this is allowed by the interface as well as // cleaning up and releasing possible disk infrastructure. o.closeLocked(o.Ctx) } return b } func (o *routerOutputOp) DrainMeta() []execinfrapb.ProducerMetadata { o.mu.Lock() o.mu.state = routerOutputOpDraining o.maybeUnblockLocked() o.mu.Unlock() return o.drainCoordinator.drainMeta() } func (o *routerOutputOp) initWithHashRouter(r *HashRouter) { o.input = r o.drainCoordinator = r } func (o *routerOutputOp) closeLocked(ctx context.Context) { o.mu.state = routerOutputOpDraining if err := o.mu.data.Close(ctx); err != nil { // This log message is Info instead of Warning because the flow will also // attempt to clean up the parent directory, so this failure might not have // any effect. log.Infof(ctx, "error closing vectorized hash router output, files may be left over: %s", err) } } // cancel wakes up a reader in Next if there is one and results in the output // returning zero length batches for every Next call after cancel. Note that // all accumulated data that hasn't been read will not be returned. func (o *routerOutputOp) cancel(ctx context.Context, err error) { o.mu.Lock() defer o.mu.Unlock() o.closeLocked(ctx) o.forwardErrLocked(err) // Some goroutine might be waiting on the condition variable, so wake it up. // Note that read goroutines check o.mu.done, so won't wait on the condition // variable after we unlock the mutex. o.mu.cond.Signal() } func (o *routerOutputOp) forwardErrLocked(err error) { if err != nil { o.mu.forwardedErr = err } } func (o *routerOutputOp) forwardErr(err error) { o.mu.Lock() defer o.mu.Unlock() o.forwardErrLocked(err) o.mu.cond.Signal() } // addBatch copies the batch (according to its selection vector) into an // internal buffer. Zero-length batch should be passed-in to indicate that no // more batches will be added. // TODO(asubiotto): We should explore pipelining addBatch if disk-spilling // performance becomes a concern. The main router goroutine will be writing to // disk as the code is written, meaning that we impact the performance of // writing rows to a fast output if we have to write to disk for a single // slow output. func (o *routerOutputOp) addBatch(ctx context.Context, batch coldata.Batch) bool { o.mu.Lock() defer o.mu.Unlock() switch o.mu.state { case routerOutputDoneAdding: colexecerror.InternalError(errors.AssertionFailedf("a batch was added to routerOutput in DoneAdding state")) case routerOutputOpDraining: // This output is draining, discard any data. return false } o.mu.numUnread += batch.Length() o.mu.data.Enqueue(ctx, batch) if o.testingKnobs.addBatchTestInducedErrorCb != nil { if err := o.testingKnobs.addBatchTestInducedErrorCb(); err != nil { colexecerror.InternalError(err) } } if batch.Length() == 0 { o.mu.state = routerOutputDoneAdding o.mu.cond.Signal() return false } stateChanged := false if o.mu.numUnread > o.testingKnobs.blockedThreshold && !o.mu.blocked { // The output is now blocked. o.mu.blocked = true stateChanged = true } o.mu.cond.Signal() return stateChanged } // maybeUnblockLocked unblocks the router output if it is in a blocked state. If the // output was previously in a blocked state, an event will be sent on // routerOutputOp.unblockedEventsChan. func (o *routerOutputOp) maybeUnblockLocked() { if o.mu.blocked { o.mu.blocked = false o.unblockedEventsChan <- struct{}{} } } // resetForTests resets the routerOutputOp for a test or benchmark run. func (o *routerOutputOp) resetForTests(ctx context.Context) { o.mu.Lock() defer o.mu.Unlock() o.mu.state = routerOutputOpRunning o.mu.forwardedErr = nil o.mu.data.Reset(ctx) o.mu.numUnread = 0 o.mu.blocked = false } // hashRouterDrainState is a state that specifically describes the hashRouter's // state in the draining process. This differs from its "general" state. For // example, a hash router can have drained and exited the Run method but still // be in hashRouterDrainStateRunning until somebody calls drainMeta. type hashRouterDrainState int const ( // hashRouterDrainStateRunning is the state that a hashRouter is in when // running normally (i.e. pulling and pushing batches). hashRouterDrainStateRunning = iota // hashRouterDrainStateRequested is the state that a hashRouter is in when // either all outputs have called drainMeta or an error was encountered by one // of the outputs. hashRouterDrainStateRequested // hashRouterDrainStateCompleted is the state that a hashRouter is in when // draining has completed. hashRouterDrainStateCompleted ) // HashRouter hashes values according to provided hash columns and computes a // destination for each row. These destinations are exposed as Operators // returned by the constructor. type HashRouter struct { colexecop.OneInputNode // inputMetaInfo contains all of the meta components that the hash router // is responsible for. Root field is exactly the same as OneInputNode.Input. inputMetaInfo colexecargs.OpWithMetaInfo // hashCols is a slice of indices of the columns used for hashing. hashCols []uint32 // One output for each stream. outputs []routerOutput // unblockedEventsChan is a channel shared between the HashRouter and its // outputs. outputs send events on this channel when they are unblocked by a // read. unblockedEventsChan <-chan struct{} numBlockedOutputs int bufferedMeta []execinfrapb.ProducerMetadata // atomics is shared state between the Run goroutine and any routerOutput // goroutines that call drainMeta. atomics struct { // drainState is the state the hashRouter is in. The Run goroutine should // only ever read these states, never set them. drainState int32 numDrainedOutputs int32 } // waitForMetadata is a channel that the last output to drain will read from // to pass on any metadata buffered through the Run goroutine. waitForMetadata chan []execinfrapb.ProducerMetadata // tupleDistributor is used to decide to which output a particular tuple // should be routed. tupleDistributor *colexechash.TupleHashDistributor } // NewHashRouter creates a new hash router that consumes coldata.Batches from // input and hashes each row according to hashCols to one of the outputs // returned as Operators. // The number of allocators provided will determine the number of outputs // returned. Note that each allocator must be unlimited, memory will be limited // by comparing memory use in the allocator with the memoryLimit argument. Each // Operator must have an independent allocator (this means that each allocator // should be linked to an independent mem account) as Operator.Next will usually // be called concurrently between different outputs. Similarly, each output // needs to have a separate disk account. func NewHashRouter( unlimitedAllocators []*colmem.Allocator, input colexecargs.OpWithMetaInfo, types []*types.T, hashCols []uint32, memoryLimit int64, diskQueueCfg colcontainer.DiskQueueCfg, fdSemaphore semaphore.Semaphore, diskAccounts []*mon.BoundAccount, ) (*HashRouter, []colexecop.DrainableOperator) { if diskQueueCfg.CacheMode != colcontainer.DiskQueueCacheModeDefault { colexecerror.InternalError(errors.Errorf("hash router instantiated with incompatible disk queue cache mode: %d", diskQueueCfg.CacheMode)) } outputs := make([]routerOutput, len(unlimitedAllocators)) outputsAsOps := make([]colexecop.DrainableOperator, len(unlimitedAllocators)) // unblockEventsChan is buffered to 2*numOutputs as we don't want the outputs // writing to it to block. // Unblock events only happen after a corresponding block event. Since these // are state changes and are done under lock (including the output sending // on the channel, which is why we want the channel to be buffered in the // first place), every time the HashRouter blocks an output, it *must* read // all unblock events preceding it since these *must* be on the channel. unblockEventsChan := make(chan struct{}, 2*len(unlimitedAllocators)) memoryLimitPerOutput := memoryLimit / int64(len(unlimitedAllocators)) for i := range unlimitedAllocators { op := newRouterOutputOp( routerOutputOpArgs{ types: types, unlimitedAllocator: unlimitedAllocators[i], memoryLimit: memoryLimitPerOutput, diskAcc: diskAccounts[i], cfg: diskQueueCfg, fdSemaphore: fdSemaphore, unblockedEventsChan: unblockEventsChan, }, ) outputs[i] = op outputsAsOps[i] = op } return newHashRouterWithOutputs(input, hashCols, unblockEventsChan, outputs), outputsAsOps } func newHashRouterWithOutputs( input colexecargs.OpWithMetaInfo, hashCols []uint32, unblockEventsChan <-chan struct{}, outputs []routerOutput, ) *HashRouter { r := &HashRouter{ OneInputNode: colexecop.NewOneInputNode(input.Root), inputMetaInfo: input, hashCols: hashCols, outputs: outputs, unblockedEventsChan: unblockEventsChan, // waitForMetadata is a buffered channel to avoid blocking if nobody will // read the metadata. waitForMetadata: make(chan []execinfrapb.ProducerMetadata, 1), tupleDistributor: colexechash.NewTupleHashDistributor(colexechash.DefaultInitHashValue, len(outputs)), } for i := range outputs { outputs[i].initWithHashRouter(r) } return r } // cancelOutputs cancels all outputs and forwards the given error to all of // them if non-nil. The only case where the error is not forwarded is if no // output could be canceled due to an error. In this case each output will // forward the error returned during cancellation. func (r *HashRouter) cancelOutputs(ctx context.Context, errToForward error) { for _, o := range r.outputs { if err := colexecerror.CatchVectorizedRuntimeError(func() { o.cancel(ctx, errToForward) }); err != nil { // If there was an error canceling this output, this error can be // forwarded to whoever is calling Next. o.forwardErr(err) } } } func (r *HashRouter) setDrainState(drainState hashRouterDrainState) { atomic.StoreInt32(&r.atomics.drainState, int32(drainState)) } func (r *HashRouter) getDrainState() hashRouterDrainState { return hashRouterDrainState(atomic.LoadInt32(&r.atomics.drainState)) } // Run runs the HashRouter. Batches are read from the input and pushed to an // output calculated by hashing columns. Cancel the given context to terminate // early. func (r *HashRouter) Run(ctx context.Context) { var span *tracing.Span ctx, span = execinfra.ProcessorSpan(ctx, "hash router") if span != nil { defer span.Finish() } // Since HashRouter runs in a separate goroutine, we want to be safe and // make sure that we catch errors in all code paths, so we wrap the whole // method with a catcher. Note that we also have "internal" catchers as // well for more fine-grained control of error propagation. if err := colexecerror.CatchVectorizedRuntimeError(func() { r.Input.Init(ctx) var done bool processNextBatch := func() { done = r.processNextBatch(ctx) } for { if r.getDrainState() != hashRouterDrainStateRunning { break } // Check for cancellation. select { case <-ctx.Done(): r.cancelOutputs(ctx, ctx.Err()) return default: } // Read all the routerOutput state changes that have happened since the // last iteration. for moreToRead := true; moreToRead; { select { case <-r.unblockedEventsChan: r.numBlockedOutputs-- default: // No more routerOutput state changes to read without blocking. moreToRead = false } } if r.numBlockedOutputs == len(r.outputs) { // All outputs are blocked, wait until at least one output is unblocked. select { case <-r.unblockedEventsChan: r.numBlockedOutputs-- case <-ctx.Done(): r.cancelOutputs(ctx, ctx.Err()) return } } if err := colexecerror.CatchVectorizedRuntimeError(processNextBatch); err != nil { r.cancelOutputs(ctx, err) return } if done { // The input was done and we have notified the routerOutputs that there // is no more data. return } } }); err != nil { r.cancelOutputs(ctx, err) } if span != nil { for _, s := range r.inputMetaInfo.StatsCollectors { span.RecordStructured(s.GetStats()) } if meta := execinfra.GetTraceDataAsMetadata(span); meta != nil { r.bufferedMeta = append(r.bufferedMeta, *meta) } } r.bufferedMeta = append(r.bufferedMeta, r.inputMetaInfo.MetadataSources.DrainMeta()...) // Non-blocking send of metadata so that one of the outputs can return it // in DrainMeta. r.waitForMetadata <- r.bufferedMeta close(r.waitForMetadata) r.inputMetaInfo.ToClose.CloseAndLogOnErr(ctx, "hash router") } // processNextBatch reads the next batch from its input, hashes it and adds // each column to its corresponding output, returning whether the input is // done. func (r *HashRouter) processNextBatch(ctx context.Context) bool { b := r.Input.Next() n := b.Length() if n == 0 { // Done. Push an empty batch to outputs to tell them the data is done as // well. for _, o := range r.outputs { o.addBatch(ctx, b) } return true } // It is ok that we call Init() on every batch since all calls except for // the first one are noops. r.tupleDistributor.Init(ctx) selections := r.tupleDistributor.Distribute(b, r.hashCols) for i, o := range r.outputs { if len(selections[i]) > 0 { b.SetSelection(true) copy(b.Selection(), selections[i]) b.SetLength(len(selections[i])) if o.addBatch(ctx, b) { // This batch blocked the output. r.numBlockedOutputs++ } } } return false } // resetForTests resets the HashRouter for a test or benchmark run. func (r *HashRouter) resetForTests(ctx context.Context) { if i, ok := r.Input.(colexecop.Resetter); ok { i.Reset(ctx) } r.setDrainState(hashRouterDrainStateRunning) r.waitForMetadata = make(chan []execinfrapb.ProducerMetadata, 1) r.atomics.numDrainedOutputs = 0 r.bufferedMeta = nil r.numBlockedOutputs = 0 for moreToRead := true; moreToRead; { select { case <-r.unblockedEventsChan: default: moreToRead = false } } for _, o := range r.outputs { o.resetForTests(ctx) } } func (r *HashRouter) encounteredError(ctx context.Context) { // Once one output returns an error the hash router needs to stop running // and drain its input. r.setDrainState(hashRouterDrainStateRequested) // cancel all outputs. The Run goroutine will eventually realize that the // HashRouter is done and exit without draining. r.cancelOutputs(ctx, nil /* errToForward */) } func (r *HashRouter) drainMeta() []execinfrapb.ProducerMetadata { if int(atomic.AddInt32(&r.atomics.numDrainedOutputs, 1)) != len(r.outputs) { return nil } // All outputs have been drained, return any buffered metadata to the last // output to call drainMeta. r.setDrainState(hashRouterDrainStateRequested) meta := <-r.waitForMetadata r.setDrainState(hashRouterDrainStateCompleted) return meta }
pkg/sql/colflow/routers.go
1
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.020410217344760895, 0.001278144191019237, 0.0001615205401321873, 0.000190232036402449, 0.002889275085180998 ]
{ "id": 3, "code_window": [ "\t\t\t\t// goroutine will use that and cancel all inputs.\n", "\t\t\t\tcase s.errCh <- err:\n", "\t\t\t\tdefault:\n", "\t\t\t\t}\n", "\t\t\t}\n", "\t\t\tmsg := &unorderedSynchronizerMsg{\n", "\t\t\t\tinputIdx: inputIdx,\n", "\t\t\t}\n", "\t\t\tfor {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tif s.nextBatch[inputIdx] == nil {\n", "\t\t\t\t// The initialization of this input wasn't successful, so it is\n", "\t\t\t\t// invalid to call Next or DrainMeta on it. Exit early.\n", "\t\t\t\treturn\n", "\t\t\t}\n" ], "file_path": "pkg/sql/colexec/parallel_unordered_synchronizer.go", "type": "add", "edit_start_line_idx": 211 }
#!/bin/sh set -eux . common.sh t=test7 relnotescript=${1:?} rewrite=${2:-} test_init ( cd $t init_repo git checkout -b release-branch git checkout -b feature1 make_change "feature A Release note (bug fix): feature A " make_change "feature B Release note (bug fix): feature B " tag_pr 1 git checkout master merge_pr feature1 1 "PR title 1" git checkout release-branch -b backport make_change "backport A Release note (bug fix): feature A " tag_pr 2 git checkout release-branch merge_pr backport 2 "PR title 2" ) test_end --exclude-from initial --exclude-until release-branch
scripts/release-notes/test7.sh
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.00017521099653095007, 0.00017313829448539764, 0.00017031906463671476, 0.00017476048378739506, 0.000002181878926421632 ]
{ "id": 3, "code_window": [ "\t\t\t\t// goroutine will use that and cancel all inputs.\n", "\t\t\t\tcase s.errCh <- err:\n", "\t\t\t\tdefault:\n", "\t\t\t\t}\n", "\t\t\t}\n", "\t\t\tmsg := &unorderedSynchronizerMsg{\n", "\t\t\t\tinputIdx: inputIdx,\n", "\t\t\t}\n", "\t\t\tfor {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tif s.nextBatch[inputIdx] == nil {\n", "\t\t\t\t// The initialization of this input wasn't successful, so it is\n", "\t\t\t\t// invalid to call Next or DrainMeta on it. Exit early.\n", "\t\t\t\treturn\n", "\t\t\t}\n" ], "file_path": "pkg/sql/colexec/parallel_unordered_synchronizer.go", "type": "add", "edit_start_line_idx": 211 }
// Copyright 2020 The Cockroach Authors. // // Licensed as a CockroachDB Enterprise file under the Cockroach Community // License (the "License"); you may not use this file except in compliance with // the License. You may obtain a copy of the License at // // https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt package streamingest import ( "os" "testing" _ "github.com/cockroachdb/cockroach/pkg/ccl/storageccl" "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/security/securitytest" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/randutil" ) func TestMain(m *testing.M) { defer utilccl.TestingEnableEnterprise()() security.SetAssetLoader(securitytest.EmbeddedAssets) randutil.SeedForTests() serverutils.InitTestServerFactory(server.TestServerFactory) serverutils.InitTestClusterFactory(testcluster.TestClusterFactory) os.Exit(m.Run()) } //go:generate ../../../util/leaktest/add-leaktest.sh *_test.go
pkg/ccl/streamingccl/streamingest/main_test.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.00017815660976339132, 0.0001720134896459058, 0.00016394969134125859, 0.0001729738141875714, 0.0000051203369366703555 ]
{ "id": 3, "code_window": [ "\t\t\t\t// goroutine will use that and cancel all inputs.\n", "\t\t\t\tcase s.errCh <- err:\n", "\t\t\t\tdefault:\n", "\t\t\t\t}\n", "\t\t\t}\n", "\t\t\tmsg := &unorderedSynchronizerMsg{\n", "\t\t\t\tinputIdx: inputIdx,\n", "\t\t\t}\n", "\t\t\tfor {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tif s.nextBatch[inputIdx] == nil {\n", "\t\t\t\t// The initialization of this input wasn't successful, so it is\n", "\t\t\t\t// invalid to call Next or DrainMeta on it. Exit early.\n", "\t\t\t\treturn\n", "\t\t\t}\n" ], "file_path": "pkg/sql/colexec/parallel_unordered_synchronizer.go", "type": "add", "edit_start_line_idx": 211 }
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package kvserver import ( "testing" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/stretchr/testify/require" ) func TestCalcRangeCounterIsLiveMap(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) leaseStatus := kvserverpb.LeaseStatus{ Lease: roachpb.Lease{ Replica: roachpb.ReplicaDescriptor{ NodeID: 10, StoreID: 11, }, }, State: kvserverpb.LeaseState_VALID, } // Regression test for a bug, see: // https://github.com/cockroachdb/cockroach/pull/39936#pullrequestreview-359059629 threeVotersAndSingleNonVoter := roachpb.NewRangeDescriptor(123, roachpb.RKeyMin, roachpb.RKeyMax, roachpb.MakeReplicaSet([]roachpb.ReplicaDescriptor{ {NodeID: 10, StoreID: 11, ReplicaID: 12, Type: roachpb.ReplicaTypeVoterFull()}, {NodeID: 100, StoreID: 110, ReplicaID: 120, Type: roachpb.ReplicaTypeVoterFull()}, {NodeID: 1000, StoreID: 1100, ReplicaID: 1200, Type: roachpb.ReplicaTypeVoterFull()}, {NodeID: 2000, StoreID: 2100, ReplicaID: 2200, Type: roachpb.ReplicaTypeNonVoter()}, })) oneVoterAndThreeNonVoters := roachpb.NewRangeDescriptor(123, roachpb.RKeyMin, roachpb.RKeyMax, roachpb.MakeReplicaSet([]roachpb.ReplicaDescriptor{ {NodeID: 10, StoreID: 11, ReplicaID: 12, Type: roachpb.ReplicaTypeVoterFull()}, {NodeID: 100, StoreID: 110, ReplicaID: 120, Type: roachpb.ReplicaTypeNonVoter()}, {NodeID: 1000, StoreID: 1100, ReplicaID: 1200, Type: roachpb.ReplicaTypeNonVoter()}, {NodeID: 2000, StoreID: 2100, ReplicaID: 2200, Type: roachpb.ReplicaTypeNonVoter()}, })) { ctr, down, under, over := calcRangeCounter(1100, threeVotersAndSingleNonVoter, leaseStatus, liveness.IsLiveMap{ 1000: liveness.IsLiveMapEntry{IsLive: true}, // by NodeID }, 3 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */) require.True(t, ctr) require.True(t, down) require.True(t, under) require.False(t, over) } { ctr, down, under, over := calcRangeCounter(1000, threeVotersAndSingleNonVoter, leaseStatus, liveness.IsLiveMap{ 1000: liveness.IsLiveMapEntry{IsLive: false}, }, 3 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */) // Does not confuse a non-live entry for a live one. In other words, // does not think that the liveness map has only entries for live nodes. require.False(t, ctr) require.False(t, down) require.False(t, under) require.False(t, over) } { ctr, down, under, over := calcRangeCounter(11, threeVotersAndSingleNonVoter, leaseStatus, liveness.IsLiveMap{ 10: liveness.IsLiveMapEntry{IsLive: true}, 100: liveness.IsLiveMapEntry{IsLive: true}, 1000: liveness.IsLiveMapEntry{IsLive: true}, 2000: liveness.IsLiveMapEntry{IsLive: true}, }, 3 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */) require.True(t, ctr) require.False(t, down) require.False(t, under) require.False(t, over) } { // Single non-voter dead ctr, down, under, over := calcRangeCounter(11, oneVoterAndThreeNonVoters, leaseStatus, liveness.IsLiveMap{ 10: liveness.IsLiveMapEntry{IsLive: true}, 100: liveness.IsLiveMapEntry{IsLive: true}, 1000: liveness.IsLiveMapEntry{IsLive: false}, 2000: liveness.IsLiveMapEntry{IsLive: true}, }, 1 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */) require.True(t, ctr) require.False(t, down) require.True(t, under) require.False(t, over) } { // All non-voters are dead, but range is not unavailable ctr, down, under, over := calcRangeCounter(11, oneVoterAndThreeNonVoters, leaseStatus, liveness.IsLiveMap{ 10: liveness.IsLiveMapEntry{IsLive: true}, 100: liveness.IsLiveMapEntry{IsLive: false}, 1000: liveness.IsLiveMapEntry{IsLive: false}, 2000: liveness.IsLiveMapEntry{IsLive: false}, }, 1 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */) require.True(t, ctr) require.False(t, down) require.True(t, under) require.False(t, over) } { // More non-voters than needed ctr, down, under, over := calcRangeCounter(11, oneVoterAndThreeNonVoters, leaseStatus, liveness.IsLiveMap{ 10: liveness.IsLiveMapEntry{IsLive: true}, 100: liveness.IsLiveMapEntry{IsLive: true}, 1000: liveness.IsLiveMapEntry{IsLive: true}, 2000: liveness.IsLiveMapEntry{IsLive: true}, }, 1 /* numVoters */, 3 /* numReplicas */, 4 /* clusterNodes */) require.True(t, ctr) require.False(t, down) require.False(t, under) require.True(t, over) } } func TestCalcRangeCounterLeaseHolder(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) rangeDesc := roachpb.NewRangeDescriptor(123, roachpb.RKeyMin, roachpb.RKeyMax, roachpb.MakeReplicaSet([]roachpb.ReplicaDescriptor{ {NodeID: 1, StoreID: 10, ReplicaID: 100, Type: roachpb.ReplicaTypeVoterFull()}, {NodeID: 2, StoreID: 20, ReplicaID: 200, Type: roachpb.ReplicaTypeNonVoter()}, {NodeID: 3, StoreID: 30, ReplicaID: 300, Type: roachpb.ReplicaTypeVoterFull()}, {NodeID: 4, StoreID: 40, ReplicaID: 400, Type: roachpb.ReplicaTypeVoterFull()}, })) leaseStatus := kvserverpb.LeaseStatus{ Lease: roachpb.Lease{ Replica: roachpb.ReplicaDescriptor{ NodeID: 3, StoreID: 30, ReplicaID: 300, }, }, State: kvserverpb.LeaseState_VALID, } leaseStatusInvalid := kvserverpb.LeaseStatus{ Lease: roachpb.Lease{ Replica: roachpb.ReplicaDescriptor{ NodeID: 3, StoreID: 30, ReplicaID: 300, }, }, State: kvserverpb.LeaseState_ERROR, } testcases := []struct { desc string storeID roachpb.StoreID leaseStatus kvserverpb.LeaseStatus liveNodes []roachpb.NodeID expectCounter bool }{ { desc: "leaseholder is counter", storeID: 30, leaseStatus: leaseStatus, liveNodes: []roachpb.NodeID{1, 2, 3, 4}, expectCounter: true, }, { desc: "invalid leaseholder is counter", storeID: 30, leaseStatus: leaseStatusInvalid, liveNodes: []roachpb.NodeID{1, 2, 3, 4}, expectCounter: true, }, { desc: "non-leaseholder is not counter", storeID: 10, leaseStatus: leaseStatus, liveNodes: []roachpb.NodeID{1, 2, 3, 4}, expectCounter: false, }, { desc: "non-leaseholder not counter with invalid lease", storeID: 10, leaseStatus: leaseStatusInvalid, liveNodes: []roachpb.NodeID{1, 2, 3, 4}, expectCounter: false, }, { desc: "unavailable leaseholder is not counter", storeID: 30, leaseStatus: leaseStatus, liveNodes: []roachpb.NodeID{1, 2, 4}, expectCounter: false, }, { desc: "first is counter with unavailable leaseholder", storeID: 10, leaseStatus: leaseStatus, liveNodes: []roachpb.NodeID{1, 2, 4}, expectCounter: true, }, { desc: "other is not counter with unavailable leaseholder", storeID: 20, leaseStatus: leaseStatus, liveNodes: []roachpb.NodeID{1, 2, 4}, expectCounter: false, }, { desc: "non-voter can be counter", storeID: 20, leaseStatus: leaseStatus, liveNodes: []roachpb.NodeID{2, 4}, expectCounter: true, }, } for _, tc := range testcases { t.Run(tc.desc, func(t *testing.T) { livenessMap := liveness.IsLiveMap{} for _, nodeID := range tc.liveNodes { livenessMap[nodeID] = liveness.IsLiveMapEntry{IsLive: true} } ctr, _, _, _ := calcRangeCounter(tc.storeID, rangeDesc, tc.leaseStatus, livenessMap, 3 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */) require.Equal(t, tc.expectCounter, ctr) }) } }
pkg/kv/kvserver/replica_metrics_test.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.00017842435045167804, 0.00017295405268669128, 0.00016293219232466072, 0.0001730730291455984, 0.0000035178420603187988 ]
{ "id": 4, "code_window": [ "}\n", "\n", "// MetadataSource is an interface implemented by processors and columnar\n", "// operators that can produce metadata.\n", "type MetadataSource interface {\n", "\t// DrainMeta returns all the metadata produced by the processor or operator.\n", "\t// It will be called exactly once, usually, when the processor or operator\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "// TODO(yuzefovich): remove this interface in favor of DrainableOperator and\n", "// clarify that calling DrainMeta on an uninitialized operator is illegal.\n" ], "file_path": "pkg/sql/colexecop/operator.go", "type": "add", "edit_start_line_idx": 363 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colexec import ( "context" "fmt" "sync" "sync/atomic" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecargs" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" ) // unorderedSynchronizerMsg is a light wrapper over a coldata.Batch or metadata // sent over a channel so that the main goroutine can know which input this // message originated from. // Note that either a batch or metadata must be sent, but not both. type unorderedSynchronizerMsg struct { inputIdx int b coldata.Batch meta []execinfrapb.ProducerMetadata } var _ colexecop.Operator = &ParallelUnorderedSynchronizer{} var _ execinfra.OpNode = &ParallelUnorderedSynchronizer{} type parallelUnorderedSynchronizerState int const ( // parallelUnorderedSynchronizerStateUninitialized is the state the // ParallelUnorderedSynchronizer is in when not yet initialized. parallelUnorderedSynchronizerStateUninitialized = iota // parallelUnorderedSynchronizerStateRunning is the state the // ParallelUnorderedSynchronizer is in when all input goroutines have been // spawned and are returning batches. parallelUnorderedSynchronizerStateRunning // parallelUnorderedSynchronizerStateDraining is the state the // ParallelUnorderedSynchronizer is in when a drain has been requested through // DrainMeta. All input goroutines will call DrainMeta on its input and exit. parallelUnorderedSynchronizerStateDraining // parallelUnorderedSyncrhonizerStateDone is the state the // ParallelUnorderedSynchronizer is in when draining has completed. parallelUnorderedSynchronizerStateDone ) // ParallelUnorderedSynchronizer is an Operator that combines multiple Operator streams // into one. type ParallelUnorderedSynchronizer struct { colexecop.InitHelper inputs []colexecargs.OpWithMetaInfo // readNextBatch is a slice of channels, where each channel corresponds to the // input at the same index in inputs. It is used as a barrier for input // goroutines to wait on until the Next goroutine signals that it is safe to // retrieve the next batch. This is done so that inputs that are running // asynchronously do not overwrite batches returned previously, given that // batches must be safe for reuse until the next call to Next. readNextBatch []chan struct{} // numFinishedInputs is incremented atomically whenever one of the provided // inputs exits from a goroutine (gracefully or otherwise). numFinishedInputs uint32 // lastReadInputIdx is the index of the input whose batch we last returned. // Used so that on the next call to Next, we can resume the input. lastReadInputIdx int // batches are the last batches read from the corresponding input. batches []coldata.Batch // nextBatch is a slice of functions each of which obtains a next batch from // the corresponding to it input. nextBatch []func() state int32 // externalWaitGroup refers to the WaitGroup passed in externally. Since the // ParallelUnorderedSynchronizer spawns goroutines, this allows callers to // wait for the completion of these goroutines. externalWaitGroup *sync.WaitGroup // internalWaitGroup refers to the WaitGroup internally managed by the // ParallelUnorderedSynchronizer. This will only ever be incremented by the // ParallelUnorderedSynchronizer and decremented by the input goroutines. This // allows the ParallelUnorderedSynchronizer to wait only on internal // goroutines. internalWaitGroup *sync.WaitGroup batchCh chan *unorderedSynchronizerMsg errCh chan error // bufferedMeta is the metadata buffered during a // ParallelUnorderedSynchronizer run. bufferedMeta []execinfrapb.ProducerMetadata } // ChildCount implements the execinfra.OpNode interface. func (s *ParallelUnorderedSynchronizer) ChildCount(verbose bool) int { return len(s.inputs) } // Child implements the execinfra.OpNode interface. func (s *ParallelUnorderedSynchronizer) Child(nth int, verbose bool) execinfra.OpNode { return s.inputs[nth].Root } // NewParallelUnorderedSynchronizer creates a new ParallelUnorderedSynchronizer. // On the first call to Next, len(inputs) goroutines are spawned to read each // input asynchronously (to not be limited by a slow input). These will // increment the passed-in WaitGroup and decrement when done. It is also // guaranteed that these spawned goroutines will have completed on any error or // zero-length batch received from Next. func NewParallelUnorderedSynchronizer( inputs []colexecargs.OpWithMetaInfo, wg *sync.WaitGroup, ) *ParallelUnorderedSynchronizer { readNextBatch := make([]chan struct{}, len(inputs)) for i := range readNextBatch { // Buffer readNextBatch chans to allow for non-blocking writes. There will // only be one message on the channel at a time. readNextBatch[i] = make(chan struct{}, 1) } return &ParallelUnorderedSynchronizer{ inputs: inputs, readNextBatch: readNextBatch, batches: make([]coldata.Batch, len(inputs)), nextBatch: make([]func(), len(inputs)), externalWaitGroup: wg, internalWaitGroup: &sync.WaitGroup{}, // batchCh is a buffered channel in order to offer non-blocking writes to // input goroutines. During normal operation, this channel will have at most // len(inputs) messages. However, during DrainMeta, inputs might need to // push an extra metadata message without blocking, hence the need to double // the size of this channel. batchCh: make(chan *unorderedSynchronizerMsg, len(inputs)*2), // errCh is buffered so that writers do not block. If errCh is full, the // input goroutines will not push an error and exit immediately, given that // the Next goroutine will read an error and panic anyway. errCh: make(chan error, 1), } } // Init is part of the colexecop.Operator interface. func (s *ParallelUnorderedSynchronizer) Init(ctx context.Context) { if !s.InitHelper.Init(ctx) { return } for _, input := range s.inputs { input.Root.Init(s.Ctx) } } func (s *ParallelUnorderedSynchronizer) getState() parallelUnorderedSynchronizerState { return parallelUnorderedSynchronizerState(atomic.LoadInt32(&s.state)) } func (s *ParallelUnorderedSynchronizer) setState(state parallelUnorderedSynchronizerState) { atomic.SwapInt32(&s.state, int32(state)) } // init starts one goroutine per input to read from each input asynchronously // and push to batchCh. Canceling the context (passed in Init() above) results // in all goroutines terminating, otherwise they keep on pushing batches until a // zero-length batch is encountered. Once all inputs terminate, s.batchCh is // closed. If an error occurs, the goroutines will make a non-blocking best // effort to push that error on s.errCh, resulting in the first error pushed to // be observed by the Next goroutine. Inputs are asynchronous so that the // synchronizer is minimally affected by slow inputs. func (s *ParallelUnorderedSynchronizer) init() { for i, input := range s.inputs { s.nextBatch[i] = func(input colexecargs.OpWithMetaInfo, inputIdx int) func() { return func() { s.batches[inputIdx] = input.Root.Next() } }(input, i) s.externalWaitGroup.Add(1) s.internalWaitGroup.Add(1) // TODO(asubiotto): Most inputs are Inboxes, and these have handler // goroutines just sitting around waiting for cancellation. I wonder if we // could reuse those goroutines to push batches to batchCh directly. go func(ctx context.Context, input colexecargs.OpWithMetaInfo, inputIdx int) { var span *tracing.Span ctx, span = execinfra.ProcessorSpan(ctx, fmt.Sprintf("parallel unordered sync input %d", inputIdx)) defer func() { if span != nil { span.Finish() } if int(atomic.AddUint32(&s.numFinishedInputs, 1)) == len(s.inputs) { close(s.batchCh) } // We need to close all of the closers of this input before we // notify the wait groups. input.ToClose.CloseAndLogOnErr(ctx, "parallel unordered synchronizer input") s.internalWaitGroup.Done() s.externalWaitGroup.Done() }() sendErr := func(err error) { select { // Non-blocking write to errCh, if an error is present the main // goroutine will use that and cancel all inputs. case s.errCh <- err: default: } } msg := &unorderedSynchronizerMsg{ inputIdx: inputIdx, } for { state := s.getState() switch state { case parallelUnorderedSynchronizerStateRunning: if err := colexecerror.CatchVectorizedRuntimeError(s.nextBatch[inputIdx]); err != nil { sendErr(err) return } msg.b = s.batches[inputIdx] if s.batches[inputIdx].Length() != 0 { // Send the batch. break } // In case of a zero-length batch, proceed to drain the input. fallthrough case parallelUnorderedSynchronizerStateDraining: // Create a new message for metadata. The previous message cannot be // overwritten since it might still be in the channel. msg = &unorderedSynchronizerMsg{ inputIdx: inputIdx, } if span != nil { for _, s := range input.StatsCollectors { span.RecordStructured(s.GetStats()) } if meta := execinfra.GetTraceDataAsMetadata(span); meta != nil { msg.meta = append(msg.meta, *meta) } } if input.MetadataSources != nil { msg.meta = append(msg.meta, input.MetadataSources.DrainMeta()...) } if msg.meta == nil { // Initialize msg.meta to be non-nil, which is a signal that // metadata has been drained. msg.meta = make([]execinfrapb.ProducerMetadata, 0) } default: sendErr(errors.AssertionFailedf("unhandled state in ParallelUnorderedSynchronizer input goroutine: %d", state)) return } // Check msg.meta before sending over the channel since the channel is // the synchronization primitive of meta. sentMeta := false if msg.meta != nil { sentMeta = true } select { case <-ctx.Done(): sendErr(ctx.Err()) return case s.batchCh <- msg: } if sentMeta { // The input has been drained and this input has pushed the metadata // over the channel, exit. return } // Wait until Next goroutine tells us we are good to go. select { case <-s.readNextBatch[inputIdx]: case <-ctx.Done(): sendErr(ctx.Err()) return } } }(s.Ctx, input, i) } } // Next is part of the colexecop.Operator interface. func (s *ParallelUnorderedSynchronizer) Next() coldata.Batch { for { state := s.getState() switch state { case parallelUnorderedSynchronizerStateDone: return coldata.ZeroBatch case parallelUnorderedSynchronizerStateUninitialized: s.setState(parallelUnorderedSynchronizerStateRunning) s.init() case parallelUnorderedSynchronizerStateRunning: // Signal the input whose batch we returned in the last call to Next that it // is safe to retrieve the next batch. Since Next has been called, we can // reuse memory instead of making safe copies of batches returned. s.notifyInputToReadNextBatch(s.lastReadInputIdx) default: colexecerror.InternalError(errors.AssertionFailedf("unhandled state in ParallelUnorderedSynchronizer Next goroutine: %d", state)) } select { case err := <-s.errCh: if err != nil { // If we got an error from one of our inputs, propagate this error // through a panic. The caller should then proceed to call DrainMeta, // which will take care of closing any inputs. colexecerror.InternalError(err) } case msg := <-s.batchCh: if msg == nil { // All inputs have exited, double check that this is indeed the case. s.internalWaitGroup.Wait() // Check if this was a graceful termination or not. select { case err := <-s.errCh: if err != nil { colexecerror.InternalError(err) } default: } s.setState(parallelUnorderedSynchronizerStateDone) return coldata.ZeroBatch } s.lastReadInputIdx = msg.inputIdx if msg.meta != nil { s.bufferedMeta = append(s.bufferedMeta, msg.meta...) continue } return msg.b } } } // notifyInputToReadNextBatch is a non-blocking send to notify the given input // that it may proceed to read the next batch from the input. Refer to the // comment of the readNextBatch field in ParallelUnorderedSynchronizer for more // information. func (s *ParallelUnorderedSynchronizer) notifyInputToReadNextBatch(inputIdx int) { select { // This write is non-blocking because if the channel is full, it must be the // case that there is a pending message for the input to proceed. case s.readNextBatch[inputIdx] <- struct{}{}: default: } } // DrainMeta is part of the colexecop.MetadataSource interface. func (s *ParallelUnorderedSynchronizer) DrainMeta() []execinfrapb.ProducerMetadata { prevState := s.getState() s.setState(parallelUnorderedSynchronizerStateDraining) if prevState == parallelUnorderedSynchronizerStateUninitialized { s.init() } // Non-blocking drain of batchCh. This is important mostly because of the // following edge case: all n inputs have pushed batches to the batchCh, so // there are currently n messages. Next notifies the last read input to // retrieve the next batch but encounters an error. There are now n+1 messages // in batchCh. Notifying all these inputs to read the next batch would result // in 2n+1 messages on batchCh, which would cause a deadlock since this // goroutine blocks on the wait group, but an input will block on writing to // batchCh. This is a best effort, but note that for this scenario to occur, // there *must* be at least one message in batchCh (the message belonging to // the input that was notified). for batchChDrained := false; !batchChDrained; { select { case msg := <-s.batchCh: if msg == nil { batchChDrained = true } else if msg.meta != nil { s.bufferedMeta = append(s.bufferedMeta, msg.meta...) } default: batchChDrained = true } } // Unblock any goroutines currently waiting to be told to read the next batch. // This will force all inputs to observe the new draining state. for _, ch := range s.readNextBatch { close(ch) } // Wait for all inputs to exit. s.internalWaitGroup.Wait() // Drain the batchCh, this reads the metadata that was pushed. for msg := <-s.batchCh; msg != nil; msg = <-s.batchCh { if msg.meta != nil { s.bufferedMeta = append(s.bufferedMeta, msg.meta...) } } // Buffer any errors that may have happened without blocking on the channel. for exitLoop := false; !exitLoop; { select { case err := <-s.errCh: s.bufferedMeta = append(s.bufferedMeta, execinfrapb.ProducerMetadata{Err: err}) default: exitLoop = true } } // Done. s.setState(parallelUnorderedSynchronizerStateDone) return s.bufferedMeta }
pkg/sql/colexec/parallel_unordered_synchronizer.go
1
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.9973099231719971, 0.09524806588888168, 0.0001670731435297057, 0.00035890884464606643, 0.27301713824272156 ]
{ "id": 4, "code_window": [ "}\n", "\n", "// MetadataSource is an interface implemented by processors and columnar\n", "// operators that can produce metadata.\n", "type MetadataSource interface {\n", "\t// DrainMeta returns all the metadata produced by the processor or operator.\n", "\t// It will be called exactly once, usually, when the processor or operator\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "// TODO(yuzefovich): remove this interface in favor of DrainableOperator and\n", "// clarify that calling DrainMeta on an uninitialized operator is illegal.\n" ], "file_path": "pkg/sql/colexecop/operator.go", "type": "add", "edit_start_line_idx": 363 }
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: sql/execinfrapb/processors_table_stats.proto // Beware! This package name must not be changed, even though it doesn't match // the Go package name, because it defines the Protobuf message names which // can't be changed without breaking backward compatibility. package execinfrapb import ( encoding_binary "encoding/binary" fmt "fmt" github_com_cockroachdb_cockroach_pkg_jobs_jobspb "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" descpb "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" github_com_cockroachdb_cockroach_pkg_sql_catalog_descpb "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type SketchType int32 const ( // This is the github.com/axiomhq/hyperloglog binary format (as of commit // 730eea1) for a sketch with precision 14. Values are encoded using their key // encoding, except integers which are encoded in 8 bytes (little-endian). SketchType_HLL_PLUS_PLUS_V1 SketchType = 0 ) var SketchType_name = map[int32]string{ 0: "HLL_PLUS_PLUS_V1", } var SketchType_value = map[string]int32{ "HLL_PLUS_PLUS_V1": 0, } func (x SketchType) Enum() *SketchType { p := new(SketchType) *p = x return p } func (x SketchType) String() string { return proto.EnumName(SketchType_name, int32(x)) } func (x *SketchType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(SketchType_value, data, "SketchType") if err != nil { return err } *x = SketchType(value) return nil } func (SketchType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_d08bfa18785ff29a, []int{0} } // SketchSpec contains the specification for a generated statistic. type SketchSpec struct { SketchType SketchType `protobuf:"varint,1,opt,name=sketch_type,json=sketchType,enum=cockroach.sql.distsqlrun.SketchType" json:"sketch_type"` // Each value is an index identifying a column in the input stream. // TODO(radu): currently only one column is supported. Columns []uint32 `protobuf:"varint,2,rep,name=columns" json:"columns,omitempty"` // If set, we generate a histogram for the first column in the sketch. GenerateHistogram bool `protobuf:"varint,3,opt,name=generate_histogram,json=generateHistogram" json:"generate_histogram"` // Controls the maximum number of buckets in the histogram. // Only used by the SampleAggregator. HistogramMaxBuckets uint32 `protobuf:"varint,4,opt,name=histogram_max_buckets,json=histogramMaxBuckets" json:"histogram_max_buckets"` // Only used by the SampleAggregator. StatName string `protobuf:"bytes,5,opt,name=stat_name,json=statName" json:"stat_name"` // Index is needed by some types (for example the geo types) when generating // inverted index entries, since it may contain configuration. Index *descpb.IndexDescriptor `protobuf:"bytes,6,opt,name=index" json:"index,omitempty"` } func (m *SketchSpec) Reset() { *m = SketchSpec{} } func (m *SketchSpec) String() string { return proto.CompactTextString(m) } func (*SketchSpec) ProtoMessage() {} func (*SketchSpec) Descriptor() ([]byte, []int) { return fileDescriptor_d08bfa18785ff29a, []int{0} } func (m *SketchSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *SketchSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *SketchSpec) XXX_Merge(src proto.Message) { xxx_messageInfo_SketchSpec.Merge(m, src) } func (m *SketchSpec) XXX_Size() int { return m.Size() } func (m *SketchSpec) XXX_DiscardUnknown() { xxx_messageInfo_SketchSpec.DiscardUnknown(m) } var xxx_messageInfo_SketchSpec proto.InternalMessageInfo // SamplerSpec is the specification of a "sampler" processor which // returns a sample (random subset) of the input columns and computes // cardinality estimation sketches on sets of columns. // // The sampler is configured with a sample size and sets of columns // for the sketches. It produces one row with global statistics, one // row with sketch information for each sketch plus at most // sample_size sampled rows. // // For each column with an inverted index, a sketch and sample reservoir are // created. Each of these produces one sketch row and at most sample_size // sampled rows from the inverted index keys. // // The following method is used to do reservoir sampling: we generate a // "rank" for each row, which is just a random, uniformly distributed // 64-bit value. The rows with the smallest <sample_size> ranks are selected. // This method is chosen because it allows to combine sample sets very easily. // // The internal schema of the processor is formed of three column groups: // 1. sampled row columns: // - columns that map 1-1 to the columns in the input (same // schema as the input). Note that columns unused in a histogram are // set to NULL. // - an INT column with the "rank" of the row; this is a random value // associated with the row (necessary for combining sample sets). // 2. sketch columns: // - an INT column indicating the sketch index // (0 to len(sketches) - 1). // - an INT column indicating the number of rows processed // - an INT column indicating the number of rows with NULL values // on all columns of the sketch. // - a BYTES column with the binary sketch data (format // dependent on the sketch type). // 3. inverted columns: // - an INT column identifying the column index for this inverted sample // - a BYTE column of the inverted index key. // // There are four row types produced: // 1. sample rows, using column group #1. // 2. sketch rows, using column group #2. // 3. inverted sample rows, using column group #3 and the rank column from #1. // 4. inverted sketch rows, using column group #2 and first column from #3. // // Rows have NULLs on either all the sampled row columns or on all the // sketch columns. type SamplerSpec struct { Sketches []SketchSpec `protobuf:"bytes,1,rep,name=sketches" json:"sketches"` InvertedSketches []SketchSpec `protobuf:"bytes,4,rep,name=inverted_sketches,json=invertedSketches" json:"inverted_sketches"` SampleSize uint32 `protobuf:"varint,2,opt,name=sample_size,json=sampleSize" json:"sample_size"` // Setting this value enables throttling; this is the fraction of time that // the sampler processors will be idle when the recent CPU usage is high. The // throttling is adaptive so the actual idle fraction will depend on CPU // usage; this value is a ceiling. // // Currently, this field is set only for automatic statistics based on the // value of the cluster setting // sql.stats.automatic_collection.max_fraction_idle. MaxFractionIdle float64 `protobuf:"fixed64,3,opt,name=max_fraction_idle,json=maxFractionIdle" json:"max_fraction_idle"` } func (m *SamplerSpec) Reset() { *m = SamplerSpec{} } func (m *SamplerSpec) String() string { return proto.CompactTextString(m) } func (*SamplerSpec) ProtoMessage() {} func (*SamplerSpec) Descriptor() ([]byte, []int) { return fileDescriptor_d08bfa18785ff29a, []int{1} } func (m *SamplerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *SamplerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *SamplerSpec) XXX_Merge(src proto.Message) { xxx_messageInfo_SamplerSpec.Merge(m, src) } func (m *SamplerSpec) XXX_Size() int { return m.Size() } func (m *SamplerSpec) XXX_DiscardUnknown() { xxx_messageInfo_SamplerSpec.DiscardUnknown(m) } var xxx_messageInfo_SamplerSpec proto.InternalMessageInfo // SampleAggregatorSpec is the specification of a processor that aggregates the // results from multiple sampler processors and writes out the statistics to // system.table_statistics. // // The input schema it expects matches the output schema of a sampler spec (see // the comment for SamplerSpec for all the details): // 1. sampled row columns: // - sampled columns // - row rank // 2. sketch columns: // - sketch index // - number of rows processed // - number of rows encountered with NULL values on all columns of the sketch // - binary sketch data // 3. inverted columns: // - column index for inverted sample // - sample column type SampleAggregatorSpec struct { Sketches []SketchSpec `protobuf:"bytes,1,rep,name=sketches" json:"sketches"` InvertedSketches []SketchSpec `protobuf:"bytes,8,rep,name=inverted_sketches,json=invertedSketches" json:"inverted_sketches"` // The processor merges reservoir sample sets into a single // sample set of this size. This must match the sample size // used for each Sampler. SampleSize uint32 `protobuf:"varint,2,opt,name=sample_size,json=sampleSize" json:"sample_size"` // The i-th value indicates the ColumnID of the i-th sampled row column. // These are necessary for writing out the statistic data. SampledColumnIDs []github_com_cockroachdb_cockroach_pkg_sql_catalog_descpb.ColumnID `protobuf:"varint,3,rep,name=sampled_column_ids,json=sampledColumnIds,casttype=github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID" json:"sampled_column_ids,omitempty"` TableID github_com_cockroachdb_cockroach_pkg_sql_catalog_descpb.ID `protobuf:"varint,4,opt,name=table_id,json=tableId,casttype=github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID" json:"table_id"` // JobID is the id of the CREATE STATISTICS job. JobID github_com_cockroachdb_cockroach_pkg_jobs_jobspb.JobID `protobuf:"varint,6,opt,name=job_id,json=jobId,casttype=github.com/cockroachdb/cockroach/pkg/jobs/jobspb.JobID" json:"job_id"` // The total number of rows expected in the table based on previous runs of // CREATE STATISTICS. Used for progress reporting. If rows expected is 0, // reported progress is 0 until the very end. RowsExpected uint64 `protobuf:"varint,7,opt,name=rows_expected,json=rowsExpected" json:"rows_expected"` } func (m *SampleAggregatorSpec) Reset() { *m = SampleAggregatorSpec{} } func (m *SampleAggregatorSpec) String() string { return proto.CompactTextString(m) } func (*SampleAggregatorSpec) ProtoMessage() {} func (*SampleAggregatorSpec) Descriptor() ([]byte, []int) { return fileDescriptor_d08bfa18785ff29a, []int{2} } func (m *SampleAggregatorSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *SampleAggregatorSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } func (m *SampleAggregatorSpec) XXX_Merge(src proto.Message) { xxx_messageInfo_SampleAggregatorSpec.Merge(m, src) } func (m *SampleAggregatorSpec) XXX_Size() int { return m.Size() } func (m *SampleAggregatorSpec) XXX_DiscardUnknown() { xxx_messageInfo_SampleAggregatorSpec.DiscardUnknown(m) } var xxx_messageInfo_SampleAggregatorSpec proto.InternalMessageInfo func init() { proto.RegisterEnum("cockroach.sql.distsqlrun.SketchType", SketchType_name, SketchType_value) proto.RegisterType((*SketchSpec)(nil), "cockroach.sql.distsqlrun.SketchSpec") proto.RegisterType((*SamplerSpec)(nil), "cockroach.sql.distsqlrun.SamplerSpec") proto.RegisterType((*SampleAggregatorSpec)(nil), "cockroach.sql.distsqlrun.SampleAggregatorSpec") } func init() { proto.RegisterFile("sql/execinfrapb/processors_table_stats.proto", fileDescriptor_d08bfa18785ff29a) } var fileDescriptor_d08bfa18785ff29a = []byte{ // 703 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0x5b, 0x6f, 0xd3, 0x48, 0x14, 0x8e, 0x73, 0x69, 0xd2, 0xc9, 0x76, 0x37, 0xf5, 0x76, 0x25, 0xab, 0x0f, 0x8e, 0x37, 0x7b, 0x91, 0x77, 0xb5, 0x6b, 0xef, 0x16, 0x09, 0x21, 0x9e, 0x20, 0x84, 0xd2, 0x94, 0x16, 0xa1, 0xa4, 0x5c, 0xc4, 0x03, 0xd6, 0x78, 0xe6, 0xd4, 0x71, 0x63, 0x7b, 0x9c, 0x99, 0x09, 0xa4, 0xfd, 0x01, 0xf0, 0xca, 0xcf, 0x2a, 0x6f, 0x7d, 0xec, 0x53, 0x04, 0xe9, 0xbf, 0xe8, 0x13, 0xf2, 0x25, 0xa1, 0x80, 0x10, 0x97, 0x87, 0xbe, 0x58, 0x33, 0xe7, 0x3b, 0xdf, 0x77, 0xce, 0x7c, 0x9e, 0x39, 0xe8, 0x1f, 0x31, 0x0a, 0x6c, 0x98, 0x00, 0xf1, 0xa3, 0x7d, 0x8e, 0x63, 0xd7, 0x8e, 0x39, 0x23, 0x20, 0x04, 0xe3, 0xc2, 0x91, 0xd8, 0x0d, 0xc0, 0x11, 0x12, 0x4b, 0x61, 0xc5, 0x9c, 0x49, 0xa6, 0x6a, 0x84, 0x91, 0x21, 0x67, 0x98, 0x0c, 0x2c, 0x31, 0x0a, 0x2c, 0xea, 0x0b, 0x29, 0x46, 0x01, 0x1f, 0x47, 0xeb, 0xbf, 0x25, 0x3a, 0x04, 0x4b, 0x1c, 0x30, 0xcf, 0xa6, 0x20, 0x48, 0xec, 0xda, 0x42, 0xf2, 0x31, 0x91, 0x63, 0x0e, 0x34, 0xa3, 0xaf, 0xaf, 0x79, 0xcc, 0x63, 0xe9, 0xd2, 0x4e, 0x56, 0x59, 0xb4, 0xf5, 0xba, 0x88, 0x50, 0x7f, 0x08, 0x92, 0x0c, 0xfa, 0x31, 0x10, 0xf5, 0x2e, 0xaa, 0x8b, 0x74, 0xe7, 0xc8, 0xc3, 0x18, 0x34, 0xc5, 0x50, 0xcc, 0x1f, 0x37, 0x7e, 0xb7, 0x3e, 0x57, 0xd9, 0xca, 0xa8, 0x7b, 0x87, 0x31, 0xb4, 0xcb, 0xc7, 0xd3, 0x66, 0xa1, 0x87, 0xc4, 0x22, 0xa2, 0x6a, 0xa8, 0x4a, 0x58, 0x30, 0x0e, 0x23, 0xa1, 0x15, 0x8d, 0x92, 0xb9, 0xd2, 0x9b, 0x6f, 0xd5, 0x2b, 0x48, 0xf5, 0x20, 0x02, 0x8e, 0x25, 0x38, 0x03, 0x5f, 0x48, 0xe6, 0x71, 0x1c, 0x6a, 0x25, 0x43, 0x31, 0x6b, 0xb9, 0xce, 0xea, 0x1c, 0xdf, 0x9a, 0xc3, 0xea, 0x35, 0xf4, 0xcb, 0x22, 0xd7, 0x09, 0xf1, 0xc4, 0x71, 0xc7, 0x64, 0x08, 0x52, 0x68, 0x65, 0x43, 0x31, 0x57, 0x72, 0xde, 0xcf, 0x8b, 0x94, 0x5d, 0x3c, 0x69, 0x67, 0x09, 0xea, 0xaf, 0x68, 0x39, 0x31, 0xd2, 0x89, 0x70, 0x08, 0x5a, 0xc5, 0x50, 0xcc, 0xe5, 0x3c, 0xbb, 0x96, 0x84, 0xef, 0xe1, 0x10, 0xd4, 0x36, 0xaa, 0xf8, 0x11, 0x85, 0x89, 0xb6, 0x64, 0x28, 0x66, 0x7d, 0xe3, 0xcf, 0x8f, 0x8e, 0x2c, 0x46, 0x81, 0x8b, 0x05, 0x58, 0xdd, 0x24, 0xa7, 0x03, 0x82, 0x70, 0x3f, 0x96, 0x8c, 0xa7, 0x32, 0x4a, 0x2f, 0xa3, 0xb6, 0x5e, 0x16, 0x51, 0xbd, 0x8f, 0xc3, 0x38, 0x00, 0x9e, 0x9a, 0xb9, 0x89, 0x6a, 0x99, 0x1b, 0x20, 0x34, 0xc5, 0x28, 0x99, 0xf5, 0x2f, 0x3b, 0x99, 0xf0, 0x16, 0xbd, 0xe5, 0x5c, 0xf5, 0x11, 0x5a, 0xf5, 0xa3, 0x67, 0xc0, 0x25, 0x50, 0x67, 0x21, 0x58, 0xfe, 0x66, 0xc1, 0xc6, 0x5c, 0xa4, 0x3f, 0x17, 0xfe, 0x03, 0xd5, 0x45, 0xda, 0xaf, 0x23, 0xfc, 0x23, 0xd0, 0x8a, 0x17, 0x7c, 0x44, 0x19, 0xd0, 0xf7, 0x8f, 0x40, 0xfd, 0x0f, 0xad, 0x26, 0x76, 0xef, 0x73, 0x4c, 0xa4, 0xcf, 0x22, 0xc7, 0xa7, 0x01, 0xa4, 0x3f, 0x4b, 0xc9, 0x93, 0x7f, 0x0a, 0xf1, 0x64, 0x33, 0x47, 0xbb, 0x34, 0x80, 0xd6, 0xac, 0x8c, 0xd6, 0x32, 0x27, 0x6e, 0x7a, 0x1e, 0x07, 0x0f, 0x4b, 0x76, 0x09, 0x96, 0xd4, 0x2e, 0xcf, 0x92, 0x17, 0x0a, 0x52, 0xb3, 0x2d, 0x75, 0xb2, 0x4b, 0xed, 0xf8, 0x54, 0x68, 0xa5, 0xe4, 0x9a, 0xb7, 0x1f, 0xcf, 0xa6, 0xcd, 0x46, 0x76, 0x7c, 0x7a, 0x2b, 0x05, 0xbb, 0x1d, 0x71, 0x3e, 0x6d, 0xde, 0xf0, 0x7c, 0x39, 0x18, 0xbb, 0x16, 0x61, 0xa1, 0xbd, 0xe8, 0x91, 0xba, 0xef, 0xd7, 0x76, 0x3c, 0xf4, 0xec, 0x4f, 0xdf, 0xb2, 0x35, 0x17, 0xe9, 0x35, 0xc4, 0x07, 0xaa, 0x54, 0xa8, 0x03, 0x54, 0xcb, 0x26, 0x85, 0x4f, 0xf3, 0x77, 0xb0, 0x9b, 0x34, 0x3b, 0x9b, 0x36, 0xab, 0x7b, 0x49, 0xbc, 0xdb, 0x39, 0x9f, 0x36, 0xaf, 0x7f, 0x6f, 0xe1, 0x6e, 0xa7, 0x57, 0x4d, 0xe5, 0xbb, 0x54, 0x7d, 0x8a, 0x96, 0x0e, 0x98, 0x9b, 0xd4, 0x49, 0x9e, 0x48, 0xa9, 0x7d, 0x27, 0xaf, 0x53, 0xd9, 0x66, 0x6e, 0x5a, 0xe5, 0xea, 0x57, 0x55, 0x39, 0x60, 0xae, 0x48, 0x3f, 0xb1, 0x6b, 0xa5, 0xcc, 0x5e, 0xe5, 0x80, 0xb9, 0x5d, 0xaa, 0xfe, 0x85, 0x56, 0x38, 0x7b, 0x2e, 0x1c, 0x98, 0xc4, 0x40, 0x24, 0x50, 0xad, 0x6a, 0x28, 0x66, 0x39, 0xf7, 0xfe, 0x87, 0x04, 0xba, 0x9d, 0x23, 0xdb, 0xe5, 0x5a, 0xa5, 0xb1, 0xf4, 0x77, 0x6b, 0x3e, 0xb9, 0xd2, 0x61, 0xb3, 0x86, 0x1a, 0x5b, 0x3b, 0x3b, 0xce, 0xfd, 0x9d, 0x07, 0xfd, 0xec, 0xf3, 0xf0, 0xff, 0x46, 0xa1, 0xfd, 0xef, 0xf1, 0x5b, 0xbd, 0x70, 0x3c, 0xd3, 0x95, 0x93, 0x99, 0xae, 0x9c, 0xce, 0x74, 0xe5, 0xcd, 0x4c, 0x57, 0x5e, 0x9d, 0xe9, 0x85, 0x93, 0x33, 0xbd, 0x70, 0x7a, 0xa6, 0x17, 0x9e, 0xd4, 0x2f, 0xcc, 0xdf, 0x77, 0x01, 0x00, 0x00, 0xff, 0xff, 0x93, 0x2b, 0x59, 0x42, 0x91, 0x05, 0x00, 0x00, } func (m *SketchSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *SketchSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SketchSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.Index != nil { { size, err := m.Index.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintProcessorsTableStats(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x32 } i -= len(m.StatName) copy(dAtA[i:], m.StatName) i = encodeVarintProcessorsTableStats(dAtA, i, uint64(len(m.StatName))) i-- dAtA[i] = 0x2a i = encodeVarintProcessorsTableStats(dAtA, i, uint64(m.HistogramMaxBuckets)) i-- dAtA[i] = 0x20 i-- if m.GenerateHistogram { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x18 if len(m.Columns) > 0 { for iNdEx := len(m.Columns) - 1; iNdEx >= 0; iNdEx-- { i = encodeVarintProcessorsTableStats(dAtA, i, uint64(m.Columns[iNdEx])) i-- dAtA[i] = 0x10 } } i = encodeVarintProcessorsTableStats(dAtA, i, uint64(m.SketchType)) i-- dAtA[i] = 0x8 return len(dAtA) - i, nil } func (m *SamplerSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *SamplerSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SamplerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.InvertedSketches) > 0 { for iNdEx := len(m.InvertedSketches) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.InvertedSketches[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintProcessorsTableStats(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x22 } } i -= 8 encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.MaxFractionIdle)))) i-- dAtA[i] = 0x19 i = encodeVarintProcessorsTableStats(dAtA, i, uint64(m.SampleSize)) i-- dAtA[i] = 0x10 if len(m.Sketches) > 0 { for iNdEx := len(m.Sketches) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Sketches[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintProcessorsTableStats(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa } } return len(dAtA) - i, nil } func (m *SampleAggregatorSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *SampleAggregatorSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SampleAggregatorSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.InvertedSketches) > 0 { for iNdEx := len(m.InvertedSketches) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.InvertedSketches[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintProcessorsTableStats(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x42 } } i = encodeVarintProcessorsTableStats(dAtA, i, uint64(m.RowsExpected)) i-- dAtA[i] = 0x38 i = encodeVarintProcessorsTableStats(dAtA, i, uint64(m.JobID)) i-- dAtA[i] = 0x30 i = encodeVarintProcessorsTableStats(dAtA, i, uint64(m.TableID)) i-- dAtA[i] = 0x20 if len(m.SampledColumnIDs) > 0 { for iNdEx := len(m.SampledColumnIDs) - 1; iNdEx >= 0; iNdEx-- { i = encodeVarintProcessorsTableStats(dAtA, i, uint64(m.SampledColumnIDs[iNdEx])) i-- dAtA[i] = 0x18 } } i = encodeVarintProcessorsTableStats(dAtA, i, uint64(m.SampleSize)) i-- dAtA[i] = 0x10 if len(m.Sketches) > 0 { for iNdEx := len(m.Sketches) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Sketches[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintProcessorsTableStats(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa } } return len(dAtA) - i, nil } func encodeVarintProcessorsTableStats(dAtA []byte, offset int, v uint64) int { offset -= sovProcessorsTableStats(v) base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return base } func (m *SketchSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l n += 1 + sovProcessorsTableStats(uint64(m.SketchType)) if len(m.Columns) > 0 { for _, e := range m.Columns { n += 1 + sovProcessorsTableStats(uint64(e)) } } n += 2 n += 1 + sovProcessorsTableStats(uint64(m.HistogramMaxBuckets)) l = len(m.StatName) n += 1 + l + sovProcessorsTableStats(uint64(l)) if m.Index != nil { l = m.Index.Size() n += 1 + l + sovProcessorsTableStats(uint64(l)) } return n } func (m *SamplerSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l if len(m.Sketches) > 0 { for _, e := range m.Sketches { l = e.Size() n += 1 + l + sovProcessorsTableStats(uint64(l)) } } n += 1 + sovProcessorsTableStats(uint64(m.SampleSize)) n += 9 if len(m.InvertedSketches) > 0 { for _, e := range m.InvertedSketches { l = e.Size() n += 1 + l + sovProcessorsTableStats(uint64(l)) } } return n } func (m *SampleAggregatorSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l if len(m.Sketches) > 0 { for _, e := range m.Sketches { l = e.Size() n += 1 + l + sovProcessorsTableStats(uint64(l)) } } n += 1 + sovProcessorsTableStats(uint64(m.SampleSize)) if len(m.SampledColumnIDs) > 0 { for _, e := range m.SampledColumnIDs { n += 1 + sovProcessorsTableStats(uint64(e)) } } n += 1 + sovProcessorsTableStats(uint64(m.TableID)) n += 1 + sovProcessorsTableStats(uint64(m.JobID)) n += 1 + sovProcessorsTableStats(uint64(m.RowsExpected)) if len(m.InvertedSketches) > 0 { for _, e := range m.InvertedSketches { l = e.Size() n += 1 + l + sovProcessorsTableStats(uint64(l)) } } return n } func sovProcessorsTableStats(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozProcessorsTableStats(x uint64) (n int) { return sovProcessorsTableStats(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *SketchSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: SketchSpec: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: SketchSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field SketchType", wireType) } m.SketchType = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.SketchType |= SketchType(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType == 0 { var v uint32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= uint32(b&0x7F) << shift if b < 0x80 { break } } m.Columns = append(m.Columns, v) } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ packedLen |= int(b&0x7F) << shift if b < 0x80 { break } } if packedLen < 0 { return ErrInvalidLengthProcessorsTableStats } postIndex := iNdEx + packedLen if postIndex < 0 { return ErrInvalidLengthProcessorsTableStats } if postIndex > l { return io.ErrUnexpectedEOF } var elementCount int var count int for _, integer := range dAtA[iNdEx:postIndex] { if integer < 128 { count++ } } elementCount = count if elementCount != 0 && len(m.Columns) == 0 { m.Columns = make([]uint32, 0, elementCount) } for iNdEx < postIndex { var v uint32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= uint32(b&0x7F) << shift if b < 0x80 { break } } m.Columns = append(m.Columns, v) } } else { return fmt.Errorf("proto: wrong wireType = %d for field Columns", wireType) } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field GenerateHistogram", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.GenerateHistogram = bool(v != 0) case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field HistogramMaxBuckets", wireType) } m.HistogramMaxBuckets = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.HistogramMaxBuckets |= uint32(b&0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StatName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthProcessorsTableStats } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthProcessorsTableStats } if postIndex > l { return io.ErrUnexpectedEOF } m.StatName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthProcessorsTableStats } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthProcessorsTableStats } if postIndex > l { return io.ErrUnexpectedEOF } if m.Index == nil { m.Index = &descpb.IndexDescriptor{} } if err := m.Index.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipProcessorsTableStats(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthProcessorsTableStats } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *SamplerSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: SamplerSpec: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: SamplerSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sketches", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthProcessorsTableStats } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthProcessorsTableStats } if postIndex > l { return io.ErrUnexpectedEOF } m.Sketches = append(m.Sketches, SketchSpec{}) if err := m.Sketches[len(m.Sketches)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field SampleSize", wireType) } m.SampleSize = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.SampleSize |= uint32(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 1 { return fmt.Errorf("proto: wrong wireType = %d for field MaxFractionIdle", wireType) } var v uint64 if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 m.MaxFractionIdle = float64(math.Float64frombits(v)) case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field InvertedSketches", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthProcessorsTableStats } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthProcessorsTableStats } if postIndex > l { return io.ErrUnexpectedEOF } m.InvertedSketches = append(m.InvertedSketches, SketchSpec{}) if err := m.InvertedSketches[len(m.InvertedSketches)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipProcessorsTableStats(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthProcessorsTableStats } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *SampleAggregatorSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: SampleAggregatorSpec: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: SampleAggregatorSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sketches", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthProcessorsTableStats } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthProcessorsTableStats } if postIndex > l { return io.ErrUnexpectedEOF } m.Sketches = append(m.Sketches, SketchSpec{}) if err := m.Sketches[len(m.Sketches)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field SampleSize", wireType) } m.SampleSize = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.SampleSize |= uint32(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType == 0 { var v github_com_cockroachdb_cockroach_pkg_sql_catalog_descpb.ColumnID for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= github_com_cockroachdb_cockroach_pkg_sql_catalog_descpb.ColumnID(b&0x7F) << shift if b < 0x80 { break } } m.SampledColumnIDs = append(m.SampledColumnIDs, v) } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ packedLen |= int(b&0x7F) << shift if b < 0x80 { break } } if packedLen < 0 { return ErrInvalidLengthProcessorsTableStats } postIndex := iNdEx + packedLen if postIndex < 0 { return ErrInvalidLengthProcessorsTableStats } if postIndex > l { return io.ErrUnexpectedEOF } var elementCount int var count int for _, integer := range dAtA[iNdEx:postIndex] { if integer < 128 { count++ } } elementCount = count if elementCount != 0 && len(m.SampledColumnIDs) == 0 { m.SampledColumnIDs = make([]github_com_cockroachdb_cockroach_pkg_sql_catalog_descpb.ColumnID, 0, elementCount) } for iNdEx < postIndex { var v github_com_cockroachdb_cockroach_pkg_sql_catalog_descpb.ColumnID for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= github_com_cockroachdb_cockroach_pkg_sql_catalog_descpb.ColumnID(b&0x7F) << shift if b < 0x80 { break } } m.SampledColumnIDs = append(m.SampledColumnIDs, v) } } else { return fmt.Errorf("proto: wrong wireType = %d for field SampledColumnIDs", wireType) } case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field TableID", wireType) } m.TableID = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.TableID |= github_com_cockroachdb_cockroach_pkg_sql_catalog_descpb.ID(b&0x7F) << shift if b < 0x80 { break } } case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field JobID", wireType) } m.JobID = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.JobID |= github_com_cockroachdb_cockroach_pkg_jobs_jobspb.JobID(b&0x7F) << shift if b < 0x80 { break } } case 7: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field RowsExpected", wireType) } m.RowsExpected = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.RowsExpected |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field InvertedSketches", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthProcessorsTableStats } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthProcessorsTableStats } if postIndex > l { return io.ErrUnexpectedEOF } m.InvertedSketches = append(m.InvertedSketches, SketchSpec{}) if err := m.InvertedSketches[len(m.InvertedSketches)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipProcessorsTableStats(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthProcessorsTableStats } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipProcessorsTableStats(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } case 1: iNdEx += 8 case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowProcessorsTableStats } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if length < 0 { return 0, ErrInvalidLengthProcessorsTableStats } iNdEx += length case 3: depth++ case 4: if depth == 0 { return 0, ErrUnexpectedEndOfGroupProcessorsTableStats } depth-- case 5: iNdEx += 4 default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } if iNdEx < 0 { return 0, ErrInvalidLengthProcessorsTableStats } if depth == 0 { return iNdEx, nil } } return 0, io.ErrUnexpectedEOF } var ( ErrInvalidLengthProcessorsTableStats = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowProcessorsTableStats = fmt.Errorf("proto: integer overflow") ErrUnexpectedEndOfGroupProcessorsTableStats = fmt.Errorf("proto: unexpected end of group") )
pkg/sql/execinfrapb/processors_table_stats.pb.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.015565041452646255, 0.00042308698175475, 0.00016518414486199617, 0.0002024280111072585, 0.0014128502225503325 ]
{ "id": 4, "code_window": [ "}\n", "\n", "// MetadataSource is an interface implemented by processors and columnar\n", "// operators that can produce metadata.\n", "type MetadataSource interface {\n", "\t// DrainMeta returns all the metadata produced by the processor or operator.\n", "\t// It will be called exactly once, usually, when the processor or operator\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "// TODO(yuzefovich): remove this interface in favor of DrainableOperator and\n", "// clarify that calling DrainMeta on an uninitialized operator is illegal.\n" ], "file_path": "pkg/sql/colexecop/operator.go", "type": "add", "edit_start_line_idx": 363 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. import _ from "lodash"; import React from "react"; import { connect } from "react-redux"; import { createSelector } from "reselect"; import { RouteComponentProps, withRouter } from "react-router-dom"; import { refreshLiveness, refreshNodes } from "src/redux/apiReducers"; import { hoverOff as hoverOffAction, hoverOn as hoverOnAction, hoverStateSelector, HoverState, } from "src/redux/hover"; import { NodesSummary, nodesSummarySelector } from "src/redux/nodes"; import { AdminUIState } from "src/redux/state"; import { nodeIDAttr } from "src/util/constants"; import { GraphDashboardProps, storeIDsForNode, } from "src/views/cluster/containers/nodeGraphs/dashboards/dashboardUtils"; import TimeScaleDropdown from "src/views/cluster/containers/timescale"; import Dropdown, { DropdownOption } from "src/views/shared/components/dropdown"; import { PageConfig, PageConfigItem, } from "src/views/shared/components/pageconfig"; import { MetricsDataProvider } from "src/views/shared/containers/metricDataProvider"; import messagesDashboard from "./messages"; import { getMatchParamByName } from "src/util/query"; interface NodeGraphsOwnProps { refreshNodes: typeof refreshNodes; refreshLiveness: typeof refreshLiveness; hoverOn: typeof hoverOnAction; hoverOff: typeof hoverOffAction; nodesQueryValid: boolean; livenessQueryValid: boolean; nodesSummary: NodesSummary; hoverState: HoverState; } type RaftMessagesProps = NodeGraphsOwnProps & RouteComponentProps; export class RaftMessages extends React.Component<RaftMessagesProps> { /** * Selector to compute node dropdown options from the current node summary * collection. */ private nodeDropdownOptions = createSelector( (summary: NodesSummary) => summary.nodeStatuses, (summary: NodesSummary) => summary.nodeDisplayNameByID, (nodeStatuses, nodeDisplayNameByID): DropdownOption[] => { const base = [{ value: "", label: "Cluster" }]; return base.concat( _.map(nodeStatuses, (ns) => { return { value: ns.desc.node_id.toString(), label: nodeDisplayNameByID[ns.desc.node_id], }; }), ); }, ); refresh(props = this.props) { if (!props.nodesQueryValid) { props.refreshNodes(); } if (!props.livenessQueryValid) { props.refreshLiveness(); } } setClusterPath(nodeID: string) { const push = this.props.history.push; if (!_.isString(nodeID) || nodeID === "") { push("/raft/messages/all/"); } else { push(`/raft/messages/node/${nodeID}`); } } nodeChange = (selected: DropdownOption) => { this.setClusterPath(selected.value); }; componentDidMount() { this.refresh(); } componentDidUpdate(props: RaftMessagesProps) { this.refresh(props); } render() { const { match, nodesSummary, hoverState, hoverOn, hoverOff } = this.props; const selectedNode = getMatchParamByName(match, nodeIDAttr) || ""; const nodeSources = selectedNode !== "" ? [selectedNode] : null; // When "all" is the selected source, some graphs display a line for every // node in the cluster using the nodeIDs collection. However, if a specific // node is already selected, these per-node graphs should only display data // only for the selected node. const nodeIDs = nodeSources ? nodeSources : nodesSummary.nodeIDs; // If a single node is selected, we need to restrict the set of stores // queried for per-store metrics (only stores that belong to that node will // be queried). const storeSources = nodeSources ? storeIDsForNode(nodesSummary, nodeSources[0]) : null; // tooltipSelection is a string used in tooltips to reference the currently // selected nodes. This is a prepositional phrase, currently either "across // all nodes" or "on node X". const tooltipSelection = nodeSources && nodeSources.length === 1 ? `on node ${nodeSources[0]}` : "across all nodes"; const dashboardProps: GraphDashboardProps = { nodeIDs, nodesSummary, nodeSources, storeSources, tooltipSelection, }; // Generate graphs for the current dashboard, wrapping each one in a // MetricsDataProvider with a unique key. const graphs = messagesDashboard(dashboardProps); const graphComponents = _.map(graphs, (graph, idx) => { const key = `nodes.raftMessages.${idx}`; return ( <div key={key}> <MetricsDataProvider id={key}> {React.cloneElement(graph, { hoverOn, hoverOff, hoverState })} </MetricsDataProvider> </div> ); }); return ( <div> <PageConfig> <PageConfigItem> <Dropdown title="Graph" options={this.nodeDropdownOptions(this.props.nodesSummary)} selected={selectedNode} onChange={this.nodeChange} /> </PageConfigItem> <PageConfigItem> <TimeScaleDropdown /> </PageConfigItem> </PageConfig> <div className="section l-columns"> <div className="chart-group l-columns__left">{graphComponents}</div> </div> </div> ); } } const mapStateToProps = (state: AdminUIState) => ({ // RootState contains declaration for whole state nodesSummary: nodesSummarySelector(state), nodesQueryValid: state.cachedData.nodes.valid, livenessQueryValid: state.cachedData.nodes.valid, hoverState: hoverStateSelector(state), }); const mapDispatchToProps = { refreshNodes, refreshLiveness, hoverOn: hoverOnAction, hoverOff: hoverOffAction, }; export default withRouter( connect(mapStateToProps, mapDispatchToProps)(RaftMessages), );
pkg/ui/src/views/devtools/containers/raftMessages/index.tsx
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.005613155197352171, 0.0005795431789010763, 0.00016709353076294065, 0.00017198018031194806, 0.001257929252460599 ]
{ "id": 4, "code_window": [ "}\n", "\n", "// MetadataSource is an interface implemented by processors and columnar\n", "// operators that can produce metadata.\n", "type MetadataSource interface {\n", "\t// DrainMeta returns all the metadata produced by the processor or operator.\n", "\t// It will be called exactly once, usually, when the processor or operator\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "// TODO(yuzefovich): remove this interface in favor of DrainableOperator and\n", "// clarify that calling DrainMeta on an uninitialized operator is illegal.\n" ], "file_path": "pkg/sql/colexecop/operator.go", "type": "add", "edit_start_line_idx": 363 }
exec-ddl CREATE TABLE ab (a INT, b STRING) ---- build CANCEL JOBS SELECT 1 ---- control-jobs (CANCEL) └── project ├── columns: "?column?":1!null ├── values │ └── () └── projections └── 1 [as="?column?":1] build RESUME JOBS VALUES (1), (2), (3) ---- control-jobs (RESUME) └── values ├── columns: column1:1!null ├── (1,) ├── (2,) └── (3,) build PAUSE JOBS SELECT a FROM ab ORDER BY b ---- control-jobs (PAUSE) └── sort ├── columns: a:1 [hidden: b:2] ├── ordering: +2 └── project ├── columns: a:1 b:2 └── scan ab └── columns: a:1 b:2 rowid:3!null crdb_internal_mvcc_timestamp:4 build PAUSE JOB 1 ---- control-jobs (PAUSE) └── values ├── columns: column1:1!null └── (1,) build PAUSE JOBS SELECT 1.1 ---- error (42601): PAUSE JOBS data column 1 (job_id) must be of type int, not type decimal build CANCEL JOBS SELECT 1, 1 ---- error (42601): too many columns in CANCEL JOBS data build CANCEL SESSION 'foo' ---- cancel-sessions └── values ├── columns: column1:1!null └── ('foo',) build CANCEL SESSIONS VALUES ('foo'), ('bar') ---- cancel-sessions └── values ├── columns: column1:1!null ├── ('foo',) └── ('bar',) build CANCEL SESSIONS SELECT b FROM ab ORDER BY a ---- cancel-sessions └── sort ├── columns: b:2 [hidden: a:1] ├── ordering: +1 └── project ├── columns: a:1 b:2 └── scan ab └── columns: a:1 b:2 rowid:3!null crdb_internal_mvcc_timestamp:4 build CANCEL SESSION 1 ---- error (42601): CANCEL SESSIONS data column 1 (session_id) must be of type string, not type int build CANCEL SESSIONS VALUES (1, 2) ---- error (42601): too many columns in CANCEL SESSIONS data build CANCEL QUERY 'foo' ---- cancel-queries └── values ├── columns: column1:1!null └── ('foo',) build CANCEL QUERIES VALUES ('foo'), ('bar') ---- cancel-queries └── values ├── columns: column1:1!null ├── ('foo',) └── ('bar',) build CANCEL QUERIES SELECT b FROM ab ORDER BY a ---- cancel-queries └── sort ├── columns: b:2 [hidden: a:1] ├── ordering: +1 └── project ├── columns: a:1 b:2 └── scan ab └── columns: a:1 b:2 rowid:3!null crdb_internal_mvcc_timestamp:4 build CANCEL QUERY 1 ---- error (42601): CANCEL QUERIES data column 1 (query_id) must be of type string, not type int build CANCEL QUERIES VALUES (1, 2) ---- error (42601): too many columns in CANCEL QUERIES data build EXPORT INTO CSV 'nodelocal://0/foo' FROM SELECT * FROM ab ---- export ├── columns: filename:5 rows:6 bytes:7 ├── format: CSV ├── project │ ├── columns: a:1 b:2 │ └── scan ab │ └── columns: a:1 b:2 rowid:3!null crdb_internal_mvcc_timestamp:4 └── 'nodelocal://0/foo' build EXPORT INTO CSV 'nodelocal://0/foo' WITH 'foo', 'bar'='baz' FROM SELECT * FROM ab ---- export ├── columns: filename:5 rows:6 bytes:7 ├── format: CSV ├── project │ ├── columns: a:1 b:2 │ └── scan ab │ └── columns: a:1 b:2 rowid:3!null crdb_internal_mvcc_timestamp:4 ├── 'nodelocal://0/foo' └── k-v-options ├── k-v-options-item foo │ └── CAST(NULL AS STRING) └── k-v-options-item bar └── 'baz' build EXPORT INTO CSV 'nodelocal://0/foo' WITH 'foo' = $1 FROM SELECT * FROM ab ---- export ├── columns: filename:5 rows:6 bytes:7 ├── format: CSV ├── project │ ├── columns: a:1 b:2 │ └── scan ab │ └── columns: a:1 b:2 rowid:3!null crdb_internal_mvcc_timestamp:4 ├── 'nodelocal://0/foo' └── k-v-options └── k-v-options-item foo └── $1 build CREATE STATISTICS foo FROM ab ---- create-statistics └── CREATE STATISTICS foo FROM ab build ANALYZE ab ---- create-statistics └── CREATE STATISTICS "" FROM ab
pkg/sql/opt/optbuilder/testdata/misc_statements
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.011270822025835514, 0.0009253211319446564, 0.00016661686822772026, 0.00017040986858773977, 0.002488283906131983 ]
{ "id": 5, "code_window": [ "// Outbox is used to push data from local flows to a remote endpoint. Run may\n", "// be called with the necessary information to establish a connection to a\n", "// given remote endpoint.\n", "type Outbox struct {\n", "\tcolexecop.OneInputNode\n", "\n", "\ttyps []*types.T\n", "\n", "\tconverter *colserde.ArrowBatchConverter\n", "\tserializer *colserde.RecordBatchSerializer\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tinputInitialized bool\n" ], "file_path": "pkg/sql/colflow/colrpc/outbox.go", "type": "add", "edit_start_line_idx": 47 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colflow import ( "context" "sync" "sync/atomic" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colcontainer" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecargs" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexechash" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" "github.com/marusama/semaphore" ) // routerOutput is an interface implemented by router outputs. It exists for // easier test mocking of outputs. type routerOutput interface { execinfra.OpNode // initWithHashRouter passes a reference to the HashRouter that will be // pushing batches to this output. initWithHashRouter(*HashRouter) // addBatch adds the elements specified by the selection vector from batch // to the output. It returns whether or not the output changed its state to // blocked (see implementations). addBatch(context.Context, coldata.Batch) bool // cancel tells the output to stop producing batches. Optionally forwards an // error if not nil. cancel(context.Context, error) // forwardErr forwards an error to the output. The output should call // colexecerror.ExpectedError with this error on the next call to Next. // Calling forwardErr multiple times will result in the most recent error // overwriting the previous error. forwardErr(error) // resetForTests resets the routerOutput for a benchmark or test run. resetForTests(context.Context) } // getDefaultRouterOutputBlockedThreshold returns the number of unread values // buffered by the routerOutputOp after which the output is considered blocked. // It is a function rather than a variable so that in tests we could modify // coldata.BatchSize() (if it were a variable, then its value would be // evaluated before we set the desired batch size). func getDefaultRouterOutputBlockedThreshold() int { return coldata.BatchSize() * 2 } type routerOutputOpState int const ( // routerOutputOpRunning is the state in which routerOutputOp operates // normally. The router output transitions into routerOutputDoneAdding when // a zero-length batch was added or routerOutputOpDraining when it // encounters an error or the drain is requested. routerOutputOpRunning routerOutputOpState = iota // routerOutputDoneAdding is the state in which a zero-length was batch was // added to routerOutputOp and no more batches will be added. The router // output transitions to routerOutputOpDraining when the output is canceled // (either closed or the drain is requested). routerOutputDoneAdding // routerOutputOpDraining is the state in which routerOutputOp always // returns zero-length batches on calls to Next. routerOutputOpDraining ) // drainCoordinator is an interface that the HashRouter implements to coordinate // cancellation of all of its outputs in the case of an error and draining in // the case of graceful termination. // WARNING: No locks should be held when calling these methods, as the // HashRouter might call routerOutput methods (e.g. cancel) that attempt to // reacquire locks. type drainCoordinator interface { // encounteredError should be called when a routerOutput encounters an error. // This terminates execution. No locks should be held when calling this // method, since cancellation could occur. encounteredError(context.Context) // drainMeta should be called exactly once when the routerOutput moves to // draining. drainMeta() []execinfrapb.ProducerMetadata } type routerOutputOp struct { colexecop.InitHelper // input is a reference to our router. input execinfra.OpNode // drainCoordinator is a reference to the HashRouter to be able to notify it // if the output encounters an error or transitions to a draining state. drainCoordinator drainCoordinator types []*types.T // unblockedEventsChan is signaled when a routerOutput changes state from // blocked to unblocked. unblockedEventsChan chan<- struct{} mu struct { syncutil.Mutex state routerOutputOpState // forwardedErr is an error that was forwarded by the HashRouter. If set, // any subsequent calls to Next will return this error. forwardedErr error cond *sync.Cond // data is a SpillingQueue, a circular buffer backed by a disk queue. data *colexecutils.SpillingQueue numUnread int blocked bool } testingKnobs routerOutputOpTestingKnobs } func (o *routerOutputOp) ChildCount(verbose bool) int { return 1 } func (o *routerOutputOp) Child(nth int, verbose bool) execinfra.OpNode { if nth == 0 { return o.input } colexecerror.InternalError(errors.AssertionFailedf("invalid index %d", nth)) // This code is unreachable, but the compiler cannot infer that. return nil } var _ colexecop.Operator = &routerOutputOp{} type routerOutputOpTestingKnobs struct { // blockedThreshold is the number of buffered values above which we consider // a router output to be blocked. It defaults to // defaultRouterOutputBlockedThreshold but can be modified by tests to test // edge cases. blockedThreshold int // addBatchTestInducedErrorCb is called after any function call that could // produce an error if that error is nil. If the callback returns an error, // the router output overwrites the nil error with the returned error. // It is guaranteed that this callback will be called at least once during // normal execution. addBatchTestInducedErrorCb func() error // nextTestInducedErrorCb is called after any function call that could // produce an error if that error is nil. If the callback returns an error, // the router output overwrites the nil error with the returned error. // It is guaranteed that this callback will be called at least once during // normal execution. nextTestInducedErrorCb func() error } // routerOutputOpArgs are the arguments to newRouterOutputOp. All fields apart // from the testing knobs are optional. type routerOutputOpArgs struct { // All fields are required unless marked optional. types []*types.T // unlimitedAllocator should not have a memory limit. Pass in a soft // memoryLimit that will be respected instead. unlimitedAllocator *colmem.Allocator // memoryLimit acts as a soft limit to allow the router output to use disk // when it is exceeded. memoryLimit int64 diskAcc *mon.BoundAccount cfg colcontainer.DiskQueueCfg fdSemaphore semaphore.Semaphore // unblockedEventsChan must be a buffered channel. unblockedEventsChan chan<- struct{} testingKnobs routerOutputOpTestingKnobs } // newRouterOutputOp creates a new router output. func newRouterOutputOp(args routerOutputOpArgs) *routerOutputOp { if args.testingKnobs.blockedThreshold == 0 { args.testingKnobs.blockedThreshold = getDefaultRouterOutputBlockedThreshold() } o := &routerOutputOp{ types: args.types, unblockedEventsChan: args.unblockedEventsChan, testingKnobs: args.testingKnobs, } o.mu.cond = sync.NewCond(&o.mu) o.mu.data = colexecutils.NewSpillingQueue( &colexecutils.NewSpillingQueueArgs{ UnlimitedAllocator: args.unlimitedAllocator, Types: args.types, MemoryLimit: args.memoryLimit, DiskQueueCfg: args.cfg, FDSemaphore: args.fdSemaphore, DiskAcc: args.diskAcc, }, ) return o } func (o *routerOutputOp) Init(ctx context.Context) { o.InitHelper.Init(ctx) } // nextErrorLocked is a helper method that handles an error encountered in Next. func (o *routerOutputOp) nextErrorLocked(err error) { o.mu.state = routerOutputOpDraining o.maybeUnblockLocked() // Unlock the mutex, since the HashRouter will cancel all outputs. o.mu.Unlock() o.drainCoordinator.encounteredError(o.Ctx) o.mu.Lock() colexecerror.InternalError(err) } // Next returns the next coldata.Batch from the routerOutputOp. Note that Next // is designed for only one concurrent caller and will block until data is // ready. func (o *routerOutputOp) Next() coldata.Batch { o.mu.Lock() defer o.mu.Unlock() for o.mu.forwardedErr == nil && o.mu.state == routerOutputOpRunning && o.mu.data.Empty() { // Wait until there is data to read or the output is canceled. o.mu.cond.Wait() } if o.mu.forwardedErr != nil { colexecerror.ExpectedError(o.mu.forwardedErr) } if o.mu.state == routerOutputOpDraining { return coldata.ZeroBatch } b, err := o.mu.data.Dequeue(o.Ctx) if err == nil && o.testingKnobs.nextTestInducedErrorCb != nil { err = o.testingKnobs.nextTestInducedErrorCb() } if err != nil { o.nextErrorLocked(err) } o.mu.numUnread -= b.Length() if o.mu.numUnread <= o.testingKnobs.blockedThreshold { o.maybeUnblockLocked() } if b.Length() == 0 { if o.testingKnobs.nextTestInducedErrorCb != nil { if err := o.testingKnobs.nextTestInducedErrorCb(); err != nil { o.nextErrorLocked(err) } } // This is the last batch. closeLocked will set done to protect against // further calls to Next since this is allowed by the interface as well as // cleaning up and releasing possible disk infrastructure. o.closeLocked(o.Ctx) } return b } func (o *routerOutputOp) DrainMeta() []execinfrapb.ProducerMetadata { o.mu.Lock() o.mu.state = routerOutputOpDraining o.maybeUnblockLocked() o.mu.Unlock() return o.drainCoordinator.drainMeta() } func (o *routerOutputOp) initWithHashRouter(r *HashRouter) { o.input = r o.drainCoordinator = r } func (o *routerOutputOp) closeLocked(ctx context.Context) { o.mu.state = routerOutputOpDraining if err := o.mu.data.Close(ctx); err != nil { // This log message is Info instead of Warning because the flow will also // attempt to clean up the parent directory, so this failure might not have // any effect. log.Infof(ctx, "error closing vectorized hash router output, files may be left over: %s", err) } } // cancel wakes up a reader in Next if there is one and results in the output // returning zero length batches for every Next call after cancel. Note that // all accumulated data that hasn't been read will not be returned. func (o *routerOutputOp) cancel(ctx context.Context, err error) { o.mu.Lock() defer o.mu.Unlock() o.closeLocked(ctx) o.forwardErrLocked(err) // Some goroutine might be waiting on the condition variable, so wake it up. // Note that read goroutines check o.mu.done, so won't wait on the condition // variable after we unlock the mutex. o.mu.cond.Signal() } func (o *routerOutputOp) forwardErrLocked(err error) { if err != nil { o.mu.forwardedErr = err } } func (o *routerOutputOp) forwardErr(err error) { o.mu.Lock() defer o.mu.Unlock() o.forwardErrLocked(err) o.mu.cond.Signal() } // addBatch copies the batch (according to its selection vector) into an // internal buffer. Zero-length batch should be passed-in to indicate that no // more batches will be added. // TODO(asubiotto): We should explore pipelining addBatch if disk-spilling // performance becomes a concern. The main router goroutine will be writing to // disk as the code is written, meaning that we impact the performance of // writing rows to a fast output if we have to write to disk for a single // slow output. func (o *routerOutputOp) addBatch(ctx context.Context, batch coldata.Batch) bool { o.mu.Lock() defer o.mu.Unlock() switch o.mu.state { case routerOutputDoneAdding: colexecerror.InternalError(errors.AssertionFailedf("a batch was added to routerOutput in DoneAdding state")) case routerOutputOpDraining: // This output is draining, discard any data. return false } o.mu.numUnread += batch.Length() o.mu.data.Enqueue(ctx, batch) if o.testingKnobs.addBatchTestInducedErrorCb != nil { if err := o.testingKnobs.addBatchTestInducedErrorCb(); err != nil { colexecerror.InternalError(err) } } if batch.Length() == 0 { o.mu.state = routerOutputDoneAdding o.mu.cond.Signal() return false } stateChanged := false if o.mu.numUnread > o.testingKnobs.blockedThreshold && !o.mu.blocked { // The output is now blocked. o.mu.blocked = true stateChanged = true } o.mu.cond.Signal() return stateChanged } // maybeUnblockLocked unblocks the router output if it is in a blocked state. If the // output was previously in a blocked state, an event will be sent on // routerOutputOp.unblockedEventsChan. func (o *routerOutputOp) maybeUnblockLocked() { if o.mu.blocked { o.mu.blocked = false o.unblockedEventsChan <- struct{}{} } } // resetForTests resets the routerOutputOp for a test or benchmark run. func (o *routerOutputOp) resetForTests(ctx context.Context) { o.mu.Lock() defer o.mu.Unlock() o.mu.state = routerOutputOpRunning o.mu.forwardedErr = nil o.mu.data.Reset(ctx) o.mu.numUnread = 0 o.mu.blocked = false } // hashRouterDrainState is a state that specifically describes the hashRouter's // state in the draining process. This differs from its "general" state. For // example, a hash router can have drained and exited the Run method but still // be in hashRouterDrainStateRunning until somebody calls drainMeta. type hashRouterDrainState int const ( // hashRouterDrainStateRunning is the state that a hashRouter is in when // running normally (i.e. pulling and pushing batches). hashRouterDrainStateRunning = iota // hashRouterDrainStateRequested is the state that a hashRouter is in when // either all outputs have called drainMeta or an error was encountered by one // of the outputs. hashRouterDrainStateRequested // hashRouterDrainStateCompleted is the state that a hashRouter is in when // draining has completed. hashRouterDrainStateCompleted ) // HashRouter hashes values according to provided hash columns and computes a // destination for each row. These destinations are exposed as Operators // returned by the constructor. type HashRouter struct { colexecop.OneInputNode // inputMetaInfo contains all of the meta components that the hash router // is responsible for. Root field is exactly the same as OneInputNode.Input. inputMetaInfo colexecargs.OpWithMetaInfo // hashCols is a slice of indices of the columns used for hashing. hashCols []uint32 // One output for each stream. outputs []routerOutput // unblockedEventsChan is a channel shared between the HashRouter and its // outputs. outputs send events on this channel when they are unblocked by a // read. unblockedEventsChan <-chan struct{} numBlockedOutputs int bufferedMeta []execinfrapb.ProducerMetadata // atomics is shared state between the Run goroutine and any routerOutput // goroutines that call drainMeta. atomics struct { // drainState is the state the hashRouter is in. The Run goroutine should // only ever read these states, never set them. drainState int32 numDrainedOutputs int32 } // waitForMetadata is a channel that the last output to drain will read from // to pass on any metadata buffered through the Run goroutine. waitForMetadata chan []execinfrapb.ProducerMetadata // tupleDistributor is used to decide to which output a particular tuple // should be routed. tupleDistributor *colexechash.TupleHashDistributor } // NewHashRouter creates a new hash router that consumes coldata.Batches from // input and hashes each row according to hashCols to one of the outputs // returned as Operators. // The number of allocators provided will determine the number of outputs // returned. Note that each allocator must be unlimited, memory will be limited // by comparing memory use in the allocator with the memoryLimit argument. Each // Operator must have an independent allocator (this means that each allocator // should be linked to an independent mem account) as Operator.Next will usually // be called concurrently between different outputs. Similarly, each output // needs to have a separate disk account. func NewHashRouter( unlimitedAllocators []*colmem.Allocator, input colexecargs.OpWithMetaInfo, types []*types.T, hashCols []uint32, memoryLimit int64, diskQueueCfg colcontainer.DiskQueueCfg, fdSemaphore semaphore.Semaphore, diskAccounts []*mon.BoundAccount, ) (*HashRouter, []colexecop.DrainableOperator) { if diskQueueCfg.CacheMode != colcontainer.DiskQueueCacheModeDefault { colexecerror.InternalError(errors.Errorf("hash router instantiated with incompatible disk queue cache mode: %d", diskQueueCfg.CacheMode)) } outputs := make([]routerOutput, len(unlimitedAllocators)) outputsAsOps := make([]colexecop.DrainableOperator, len(unlimitedAllocators)) // unblockEventsChan is buffered to 2*numOutputs as we don't want the outputs // writing to it to block. // Unblock events only happen after a corresponding block event. Since these // are state changes and are done under lock (including the output sending // on the channel, which is why we want the channel to be buffered in the // first place), every time the HashRouter blocks an output, it *must* read // all unblock events preceding it since these *must* be on the channel. unblockEventsChan := make(chan struct{}, 2*len(unlimitedAllocators)) memoryLimitPerOutput := memoryLimit / int64(len(unlimitedAllocators)) for i := range unlimitedAllocators { op := newRouterOutputOp( routerOutputOpArgs{ types: types, unlimitedAllocator: unlimitedAllocators[i], memoryLimit: memoryLimitPerOutput, diskAcc: diskAccounts[i], cfg: diskQueueCfg, fdSemaphore: fdSemaphore, unblockedEventsChan: unblockEventsChan, }, ) outputs[i] = op outputsAsOps[i] = op } return newHashRouterWithOutputs(input, hashCols, unblockEventsChan, outputs), outputsAsOps } func newHashRouterWithOutputs( input colexecargs.OpWithMetaInfo, hashCols []uint32, unblockEventsChan <-chan struct{}, outputs []routerOutput, ) *HashRouter { r := &HashRouter{ OneInputNode: colexecop.NewOneInputNode(input.Root), inputMetaInfo: input, hashCols: hashCols, outputs: outputs, unblockedEventsChan: unblockEventsChan, // waitForMetadata is a buffered channel to avoid blocking if nobody will // read the metadata. waitForMetadata: make(chan []execinfrapb.ProducerMetadata, 1), tupleDistributor: colexechash.NewTupleHashDistributor(colexechash.DefaultInitHashValue, len(outputs)), } for i := range outputs { outputs[i].initWithHashRouter(r) } return r } // cancelOutputs cancels all outputs and forwards the given error to all of // them if non-nil. The only case where the error is not forwarded is if no // output could be canceled due to an error. In this case each output will // forward the error returned during cancellation. func (r *HashRouter) cancelOutputs(ctx context.Context, errToForward error) { for _, o := range r.outputs { if err := colexecerror.CatchVectorizedRuntimeError(func() { o.cancel(ctx, errToForward) }); err != nil { // If there was an error canceling this output, this error can be // forwarded to whoever is calling Next. o.forwardErr(err) } } } func (r *HashRouter) setDrainState(drainState hashRouterDrainState) { atomic.StoreInt32(&r.atomics.drainState, int32(drainState)) } func (r *HashRouter) getDrainState() hashRouterDrainState { return hashRouterDrainState(atomic.LoadInt32(&r.atomics.drainState)) } // Run runs the HashRouter. Batches are read from the input and pushed to an // output calculated by hashing columns. Cancel the given context to terminate // early. func (r *HashRouter) Run(ctx context.Context) { var span *tracing.Span ctx, span = execinfra.ProcessorSpan(ctx, "hash router") if span != nil { defer span.Finish() } // Since HashRouter runs in a separate goroutine, we want to be safe and // make sure that we catch errors in all code paths, so we wrap the whole // method with a catcher. Note that we also have "internal" catchers as // well for more fine-grained control of error propagation. if err := colexecerror.CatchVectorizedRuntimeError(func() { r.Input.Init(ctx) var done bool processNextBatch := func() { done = r.processNextBatch(ctx) } for { if r.getDrainState() != hashRouterDrainStateRunning { break } // Check for cancellation. select { case <-ctx.Done(): r.cancelOutputs(ctx, ctx.Err()) return default: } // Read all the routerOutput state changes that have happened since the // last iteration. for moreToRead := true; moreToRead; { select { case <-r.unblockedEventsChan: r.numBlockedOutputs-- default: // No more routerOutput state changes to read without blocking. moreToRead = false } } if r.numBlockedOutputs == len(r.outputs) { // All outputs are blocked, wait until at least one output is unblocked. select { case <-r.unblockedEventsChan: r.numBlockedOutputs-- case <-ctx.Done(): r.cancelOutputs(ctx, ctx.Err()) return } } if err := colexecerror.CatchVectorizedRuntimeError(processNextBatch); err != nil { r.cancelOutputs(ctx, err) return } if done { // The input was done and we have notified the routerOutputs that there // is no more data. return } } }); err != nil { r.cancelOutputs(ctx, err) } if span != nil { for _, s := range r.inputMetaInfo.StatsCollectors { span.RecordStructured(s.GetStats()) } if meta := execinfra.GetTraceDataAsMetadata(span); meta != nil { r.bufferedMeta = append(r.bufferedMeta, *meta) } } r.bufferedMeta = append(r.bufferedMeta, r.inputMetaInfo.MetadataSources.DrainMeta()...) // Non-blocking send of metadata so that one of the outputs can return it // in DrainMeta. r.waitForMetadata <- r.bufferedMeta close(r.waitForMetadata) r.inputMetaInfo.ToClose.CloseAndLogOnErr(ctx, "hash router") } // processNextBatch reads the next batch from its input, hashes it and adds // each column to its corresponding output, returning whether the input is // done. func (r *HashRouter) processNextBatch(ctx context.Context) bool { b := r.Input.Next() n := b.Length() if n == 0 { // Done. Push an empty batch to outputs to tell them the data is done as // well. for _, o := range r.outputs { o.addBatch(ctx, b) } return true } // It is ok that we call Init() on every batch since all calls except for // the first one are noops. r.tupleDistributor.Init(ctx) selections := r.tupleDistributor.Distribute(b, r.hashCols) for i, o := range r.outputs { if len(selections[i]) > 0 { b.SetSelection(true) copy(b.Selection(), selections[i]) b.SetLength(len(selections[i])) if o.addBatch(ctx, b) { // This batch blocked the output. r.numBlockedOutputs++ } } } return false } // resetForTests resets the HashRouter for a test or benchmark run. func (r *HashRouter) resetForTests(ctx context.Context) { if i, ok := r.Input.(colexecop.Resetter); ok { i.Reset(ctx) } r.setDrainState(hashRouterDrainStateRunning) r.waitForMetadata = make(chan []execinfrapb.ProducerMetadata, 1) r.atomics.numDrainedOutputs = 0 r.bufferedMeta = nil r.numBlockedOutputs = 0 for moreToRead := true; moreToRead; { select { case <-r.unblockedEventsChan: default: moreToRead = false } } for _, o := range r.outputs { o.resetForTests(ctx) } } func (r *HashRouter) encounteredError(ctx context.Context) { // Once one output returns an error the hash router needs to stop running // and drain its input. r.setDrainState(hashRouterDrainStateRequested) // cancel all outputs. The Run goroutine will eventually realize that the // HashRouter is done and exit without draining. r.cancelOutputs(ctx, nil /* errToForward */) } func (r *HashRouter) drainMeta() []execinfrapb.ProducerMetadata { if int(atomic.AddInt32(&r.atomics.numDrainedOutputs, 1)) != len(r.outputs) { return nil } // All outputs have been drained, return any buffered metadata to the last // output to call drainMeta. r.setDrainState(hashRouterDrainStateRequested) meta := <-r.waitForMetadata r.setDrainState(hashRouterDrainStateCompleted) return meta }
pkg/sql/colflow/routers.go
1
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.06864051520824432, 0.0016481211641803384, 0.00016389810480177402, 0.0002682702033780515, 0.008081478998064995 ]
{ "id": 5, "code_window": [ "// Outbox is used to push data from local flows to a remote endpoint. Run may\n", "// be called with the necessary information to establish a connection to a\n", "// given remote endpoint.\n", "type Outbox struct {\n", "\tcolexecop.OneInputNode\n", "\n", "\ttyps []*types.T\n", "\n", "\tconverter *colserde.ArrowBatchConverter\n", "\tserializer *colserde.RecordBatchSerializer\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tinputInitialized bool\n" ], "file_path": "pkg/sql/colflow/colrpc/outbox.go", "type": "add", "edit_start_line_idx": 47 }
// Copyright 2021 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package diagnostics_test import ( "context" gosql "database/sql" "fmt" "testing" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/ccl/kvccl/kvtenantccl" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/diagnostics" "github.com/cockroachdb/cockroach/pkg/server/diagnostics/diagnosticspb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/diagutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/util/cloudinfo" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/system" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) // Dummy import to pull in kvtenantccl. This allows us to start tenants. var _ = kvtenantccl.Connector{} const elemName = "somestring" func TestTenantReport(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) rt := startReporterTest(t) defer rt.Close() tenantArgs := base.TestTenantArgs{ TenantID: roachpb.MakeTenantID(security.EmbeddedTenantIDs()[0]), AllowSettingClusterSettings: true, TestingKnobs: rt.testingKnobs, } tenant, tenantDB := serverutils.StartTenant(t, rt.server, tenantArgs) reporter := tenant.DiagnosticsReporter().(*diagnostics.Reporter) ctx := context.Background() setupCluster(t, tenantDB) // Clear the SQL stat pool before getting diagnostics. rt.server.SQLServer().(*sql.Server).ResetSQLStats(ctx) reporter.ReportDiagnostics(ctx) require.Equal(t, 1, rt.diagServer.NumRequests()) last := rt.diagServer.LastRequestData() require.Equal(t, rt.server.ClusterID().String(), last.UUID) require.Equal(t, tenantArgs.TenantID.String(), last.TenantID) require.Equal(t, "", last.NodeID) require.Equal(t, tenant.SQLInstanceID().String(), last.SQLInstanceID) require.Equal(t, "true", last.Internal) // Verify environment. verifyEnvironment(t, "", roachpb.Locality{}, &last.Env) // Verify SQL info. require.Equal(t, tenant.SQLInstanceID(), last.SQL.SQLInstanceID) // Verify FeatureUsage. require.NotZero(t, len(last.FeatureUsage)) // Call PeriodicallyReportDiagnostics and ensure it sends out a report. reporter.PeriodicallyReportDiagnostics(ctx, rt.server.Stopper()) testutils.SucceedsSoon(t, func() error { if rt.diagServer.NumRequests() != 2 { return errors.Errorf("did not receive a diagnostics report") } return nil }) } // TestServerReport checks nodes, stores, localities, and zone configs. // Telemetry metrics are checked in datadriven tests (see sql.TestTelemetry). func TestServerReport(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) rt := startReporterTest(t) defer rt.Close() ctx := context.Background() setupCluster(t, rt.serverDB) for _, cmd := range []struct { resource string config string }{ {"TABLE system.rangelog", fmt.Sprintf(`constraints: [+zone=%[1]s, +%[1]s]`, elemName)}, {"TABLE system.rangelog", `{gc: {ttlseconds: 1}}`}, {"DATABASE system", `num_replicas: 5`}, {"DATABASE system", fmt.Sprintf(`constraints: {"+zone=%[1]s,+%[1]s": 2, +%[1]s: 1}`, elemName)}, {"DATABASE system", fmt.Sprintf(`experimental_lease_preferences: [[+zone=%[1]s,+%[1]s], [+%[1]s]]`, elemName)}, } { testutils.SucceedsSoon(t, func() error { if _, err := rt.serverDB.Exec( fmt.Sprintf(`ALTER %s CONFIGURE ZONE = '%s'`, cmd.resource, cmd.config), ); err != nil { // Work around gossip asynchronicity. return errors.Errorf("error applying zone config %q to %q: %v", cmd.config, cmd.resource, err) } return nil }) } expectedUsageReports := 0 clusterSecret := sql.ClusterSecret.Get(&rt.settings.SV) testutils.SucceedsSoon(t, func() error { expectedUsageReports++ node := rt.server.MetricsRecorder().GenerateNodeStatus(ctx) // Clear the SQL stat pool before getting diagnostics. rt.server.SQLServer().(*sql.Server).ResetSQLStats(ctx) rt.server.DiagnosticsReporter().(*diagnostics.Reporter).ReportDiagnostics(ctx) keyCounts := make(map[roachpb.StoreID]int64) rangeCounts := make(map[roachpb.StoreID]int64) totalKeys := int64(0) totalRanges := int64(0) for _, store := range node.StoreStatuses { keys, ok := store.Metrics["keycount"] require.True(t, ok, "keycount not in metrics") totalKeys += int64(keys) keyCounts[store.Desc.StoreID] = int64(keys) replicas, ok := store.Metrics["replicas"] require.True(t, ok, "replicas not in metrics") totalRanges += int64(replicas) rangeCounts[store.Desc.StoreID] = int64(replicas) } require.Equal(t, expectedUsageReports, rt.diagServer.NumRequests()) last := rt.diagServer.LastRequestData() if minExpected, actual := totalKeys, last.Node.KeyCount; minExpected > actual { return errors.Errorf("expected node keys at least %v got %v", minExpected, actual) } if minExpected, actual := totalRanges, last.Node.RangeCount; minExpected > actual { return errors.Errorf("expected node ranges at least %v got %v", minExpected, actual) } if minExpected, actual := len(rt.serverArgs.StoreSpecs), len(last.Stores); minExpected > actual { return errors.Errorf("expected at least %v stores got %v", minExpected, actual) } for _, store := range last.Stores { if minExpected, actual := keyCounts[store.StoreID], store.KeyCount; minExpected > actual { return errors.Errorf("expected at least %v keys in store %v got %v", minExpected, store.StoreID, actual) } if minExpected, actual := rangeCounts[store.StoreID], store.RangeCount; minExpected > actual { return errors.Errorf("expected at least %v ranges in store %v got %v", minExpected, store.StoreID, actual) } } return nil }) last := rt.diagServer.LastRequestData() require.Equal(t, rt.server.ClusterID().String(), last.UUID) require.Equal(t, "system", last.TenantID) require.Equal(t, rt.server.NodeID().String(), last.NodeID) require.Equal(t, rt.server.NodeID().String(), last.SQLInstanceID) require.Equal(t, "true", last.Internal) // Verify environment. verifyEnvironment(t, clusterSecret, rt.serverArgs.Locality, &last.Env) // This check isn't clean, since the body is a raw proto binary and thus could // easily contain some encoded form of elemName, but *if* it ever does fail, // that is probably very interesting. require.NotContains(t, last.RawReportBody, elemName) // 3 + 3 = 6: set 3 initially and org is set mid-test for 3 altered settings, // plus version, reporting and secret settings are set in startup // migrations. expected, actual := 6, len(last.AlteredSettings) require.Equal(t, expected, actual, "expected %d changed settings, got %d: %v", expected, actual, last.AlteredSettings) for key, expected := range map[string]string{ "cluster.organization": "<redacted>", "diagnostics.reporting.send_crash_reports": "false", "server.time_until_store_dead": "1m30s", "version": clusterversion.TestingBinaryVersion.String(), "cluster.secret": "<redacted>", } { got, ok := last.AlteredSettings[key] require.True(t, ok, "expected report of altered setting %q", key) require.Equal(t, expected, got, "expected reported value of setting %q to be %q not %q", key, expected, got) } // Verify that we receive the four auto-populated zone configs plus the two // modified above, and that their values are as expected. for _, expectedID := range []int64{ keys.RootNamespaceID, keys.LivenessRangesID, keys.MetaRangesID, keys.RangeEventTableID, keys.SystemDatabaseID, } { _, ok := last.ZoneConfigs[expectedID] require.True(t, ok, "didn't find expected ID %d in reported ZoneConfigs: %+v", expectedID, last.ZoneConfigs) } hashedElemName := sql.HashForReporting(clusterSecret, elemName) hashedZone := sql.HashForReporting(clusterSecret, "zone") for id, zone := range last.ZoneConfigs { if id == keys.RootNamespaceID { require.Equal(t, zone, *rt.server.ExecutorConfig().(sql.ExecutorConfig).DefaultZoneConfig) } if id == keys.RangeEventTableID { require.Equal(t, int32(1), zone.GC.TTLSeconds) constraints := []zonepb.ConstraintsConjunction{ { Constraints: []zonepb.Constraint{ {Key: hashedZone, Value: hashedElemName, Type: zonepb.Constraint_REQUIRED}, {Value: hashedElemName, Type: zonepb.Constraint_REQUIRED}, }, }, } require.Equal(t, zone.Constraints, constraints) } if id == keys.SystemDatabaseID { constraints := []zonepb.ConstraintsConjunction{ { NumReplicas: 1, Constraints: []zonepb.Constraint{{Value: hashedElemName, Type: zonepb.Constraint_REQUIRED}}, }, { NumReplicas: 2, Constraints: []zonepb.Constraint{ {Key: hashedZone, Value: hashedElemName, Type: zonepb.Constraint_REQUIRED}, {Value: hashedElemName, Type: zonepb.Constraint_REQUIRED}, }, }, } require.Equal(t, constraints, zone.Constraints) prefs := []zonepb.LeasePreference{ { Constraints: []zonepb.Constraint{ {Key: hashedZone, Value: hashedElemName, Type: zonepb.Constraint_REQUIRED}, {Value: hashedElemName, Type: zonepb.Constraint_REQUIRED}, }, }, { Constraints: []zonepb.Constraint{{Value: hashedElemName, Type: zonepb.Constraint_REQUIRED}}, }, } require.Equal(t, prefs, zone.LeasePreferences) } } } func TestUsageQuantization(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) defer cloudinfo.Disable()() skip.UnderRace(t, "takes >1min under race") r := diagutils.NewServer() defer r.Close() st := cluster.MakeTestingClusterSettings() ctx := context.Background() url := r.URL() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{ Settings: st, Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ DiagnosticsTestingKnobs: diagnostics.TestingKnobs{ OverrideReportingURL: &url, }, }, }, }) defer s.Stopper().Stop(ctx) ts := s.(*server.TestServer) // Disable periodic reporting so it doesn't interfere with the test. if _, err := db.Exec(`SET CLUSTER SETTING diagnostics.reporting.enabled = false`); err != nil { t.Fatal(err) } if _, err := db.Exec(`SET application_name = 'test'`); err != nil { t.Fatal(err) } // Issue some queries against the test app name. for i := 0; i < 8; i++ { _, err := db.Exec(`SELECT 1`) require.NoError(t, err) } // Between 10 and 100 queries is quantized to 10. for i := 0; i < 30; i++ { _, err := db.Exec(`SELECT 1,2`) require.NoError(t, err) } // Between 100 and 10000 gets quantized to 100. for i := 0; i < 200; i++ { _, err := db.Exec(`SELECT 1,2,3`) require.NoError(t, err) } // Above 10000 gets quantized to 10000. for i := 0; i < 10010; i++ { _, err := db.Exec(`SHOW application_name`) require.NoError(t, err) } // Flush the SQL stat pool. ts.SQLServer().(*sql.Server).ResetSQLStats(ctx) // Collect a round of statistics. ts.DiagnosticsReporter().(*diagnostics.Reporter).ReportDiagnostics(ctx) // The stats "hide" the application name by hashing it. To find the // test app name, we need to hash the ref string too prior to the // comparison. clusterSecret := sql.ClusterSecret.Get(&st.SV) hashedAppName := sql.HashForReporting(clusterSecret, "test") require.NotEqual(t, sql.FailedHashedValue, hashedAppName, "expected hashedAppName to not be 'unknown'") testData := []struct { query string expectedCount int64 }{ {`SELECT _`, 8}, {`SELECT _, _`, 10}, {`SELECT _, _, _`, 100}, {`SHOW application_name`, 10000}, } last := r.LastRequestData() for _, test := range testData { found := false for _, s := range last.SqlStats { if s.Key.App == hashedAppName && s.Key.Query == test.query { require.Equal(t, test.expectedCount, s.Stats.Count, "quantization incorrect for query %q", test.query) found = true break } } if !found { t.Errorf("query %q missing from stats", test.query) } } } type reporterTest struct { cloudEnable func() settings *cluster.Settings diagServer *diagutils.Server testingKnobs base.TestingKnobs serverArgs base.TestServerArgs server serverutils.TestServerInterface serverDB *gosql.DB } func (t *reporterTest) Close() { t.cloudEnable() t.diagServer.Close() // stopper will wait for the update/report loop to finish too. t.server.Stopper().Stop(context.Background()) } func startReporterTest(t *testing.T) *reporterTest { // Disable cloud info reporting, since it slows down tests. rt := &reporterTest{ cloudEnable: cloudinfo.Disable(), settings: cluster.MakeTestingClusterSettings(), diagServer: diagutils.NewServer(), } url := rt.diagServer.URL() rt.testingKnobs = base.TestingKnobs{ SQLLeaseManager: &lease.ManagerTestingKnobs{ // Disable SELECT called for delete orphaned leases to keep // query stats stable. DisableDeleteOrphanedLeases: true, }, Server: &server.TestingKnobs{ DiagnosticsTestingKnobs: diagnostics.TestingKnobs{ OverrideReportingURL: &url, }, }, } storeSpec := base.DefaultTestStoreSpec storeSpec.Attributes = roachpb.Attributes{Attrs: []string{elemName}} rt.serverArgs = base.TestServerArgs{ StoreSpecs: []base.StoreSpec{ storeSpec, base.DefaultTestStoreSpec, }, Settings: rt.settings, Locality: roachpb.Locality{ Tiers: []roachpb.Tier{ {Key: "region", Value: "east"}, {Key: "zone", Value: elemName}, {Key: "state", Value: "ny"}, {Key: "city", Value: "nyc"}, }, }, Knobs: rt.testingKnobs, } rt.server, rt.serverDB, _ = serverutils.StartServer(t, rt.serverArgs) // Make sure the test's generated activity is the only activity we measure. telemetry.GetFeatureCounts(telemetry.Raw, telemetry.ResetCounts) return rt } func setupCluster(t *testing.T, db *gosql.DB) { _, err := db.Exec(`SET CLUSTER SETTING server.time_until_store_dead = '90s'`) require.NoError(t, err) // Enable diagnostics reporting to test PeriodicallyReportDiagnostics. _, err = db.Exec(`SET CLUSTER SETTING diagnostics.reporting.enabled = true`) require.NoError(t, err) _, err = db.Exec(`SET CLUSTER SETTING diagnostics.reporting.send_crash_reports = false`) require.NoError(t, err) _, err = db.Exec(fmt.Sprintf(`CREATE DATABASE %s`, elemName)) require.NoError(t, err) // Set cluster to an internal testing cluster q := `SET CLUSTER SETTING cluster.organization = 'Cockroach Labs - Production Testing'` _, err = db.Exec(q) require.NoError(t, err) } func verifyEnvironment( t *testing.T, secret string, locality roachpb.Locality, env *diagnosticspb.Environment, ) { require.NotEqual(t, 0, env.Hardware.Mem.Total) require.NotEqual(t, 0, env.Hardware.Mem.Available) require.Equal(t, int32(system.NumCPU()), env.Hardware.Cpu.Numcpu) require.NotEqual(t, 0, env.Hardware.Cpu.Sockets) require.NotEqual(t, 0.0, env.Hardware.Cpu.Mhz) require.NotEqual(t, 0.0, env.Os.Platform) require.NotEmpty(t, env.Build.Tag) require.NotEmpty(t, env.Build.Distribution) require.NotEmpty(t, env.LicenseType) require.Equal(t, len(locality.Tiers), len(env.Locality.Tiers)) for i := range locality.Tiers { require.Equal(t, sql.HashForReporting(secret, locality.Tiers[i].Key), env.Locality.Tiers[i].Key) require.Equal(t, sql.HashForReporting(secret, locality.Tiers[i].Value), env.Locality.Tiers[i].Value) } }
pkg/server/diagnostics/reporter_test.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.0002145707985619083, 0.000171010397025384, 0.0001648832840146497, 0.00016795669216662645, 0.000009594012226443738 ]
{ "id": 5, "code_window": [ "// Outbox is used to push data from local flows to a remote endpoint. Run may\n", "// be called with the necessary information to establish a connection to a\n", "// given remote endpoint.\n", "type Outbox struct {\n", "\tcolexecop.OneInputNode\n", "\n", "\ttyps []*types.T\n", "\n", "\tconverter *colserde.ArrowBatchConverter\n", "\tserializer *colserde.RecordBatchSerializer\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tinputInitialized bool\n" ], "file_path": "pkg/sql/colflow/colrpc/outbox.go", "type": "add", "edit_start_line_idx": 47 }
// Copyright 2018 The Cockroach Authors. // // Licensed as a CockroachDB Enterprise file under the Cockroach Community // License (the "License"); you may not use this file except in compliance with // the License. You may obtain a copy of the License at // // https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt package importccl_test import ( "bytes" "compress/gzip" "context" gosql "database/sql" "fmt" "io" "io/ioutil" "net/url" "path/filepath" "strings" "testing" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/workload/bank" "github.com/cockroachdb/cockroach/pkg/workload/workloadsql" "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" ) func setupExportableBank(t *testing.T, nodes, rows int) (*sqlutils.SQLRunner, string, func()) { ctx := context.Background() dir, cleanupDir := testutils.TempDir(t) tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: base.TestServerArgs{ExternalIODir: dir, UseDatabase: "test"}}, ) conn := tc.Conns[0] db := sqlutils.MakeSQLRunner(conn) db.Exec(t, "CREATE DATABASE test") wk := bank.FromRows(rows) l := workloadsql.InsertsDataLoader{BatchSize: 100, Concurrency: 3} if _, err := workloadsql.Setup(ctx, conn, wk, l); err != nil { t.Fatal(err) } config.TestingSetupZoneConfigHook(tc.Stopper()) v, err := tc.Servers[0].DB().Get(context.Background(), keys.SystemSQLCodec.DescIDSequenceKey()) if err != nil { t.Fatal(err) } last := config.SystemTenantObjectID(v.ValueInt()) zoneConfig := zonepb.DefaultZoneConfig() zoneConfig.RangeMaxBytes = proto.Int64(5000) config.TestingSetZoneConfig(last+1, zoneConfig) db.Exec(t, "ALTER TABLE bank SCATTER") db.Exec(t, "SELECT 'force a scan to repopulate range cache' FROM [SELECT count(*) FROM bank]") return db, dir, func() { tc.Stopper().Stop(ctx) cleanupDir() } } func TestExportImportBank(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) db, _, cleanup := setupExportableBank(t, 3, 100) defer cleanup() // Add some unicode to prove FmtExport works as advertised. db.Exec(t, "UPDATE bank SET payload = payload || '✅' WHERE id = 5") db.Exec(t, "UPDATE bank SET payload = NULL WHERE id % 2 = 0") chunkSize := 13 baseExportDir := "userfile:///t/" for _, null := range []string{"", "NULL"} { nullAs := fmt.Sprintf(", nullas = '%s'", null) nullIf := fmt.Sprintf(", nullif = '%s'", null) t.Run("null="+null, func(t *testing.T) { exportDir := filepath.Join(baseExportDir, t.Name()) var asOf string db.QueryRow(t, "SELECT cluster_logical_timestamp()").Scan(&asOf) db.Exec(t, fmt.Sprintf(`EXPORT INTO CSV $1 WITH chunk_rows = $2, delimiter = '|' %s FROM SELECT * FROM bank AS OF SYSTEM TIME %s`, nullAs, asOf), exportDir, chunkSize, ) schema := bank.FromRows(1).Tables()[0].Schema exportedFiles := filepath.Join(exportDir, "*") db.Exec(t, fmt.Sprintf(`IMPORT TABLE bank2 %s CSV DATA ($1) WITH delimiter = '|'%s`, schema, nullIf), exportedFiles) db.CheckQueryResults(t, fmt.Sprintf(`SELECT * FROM bank AS OF SYSTEM TIME %s ORDER BY id`, asOf), db.QueryStr(t, `SELECT * FROM bank2 ORDER BY id`), ) db.CheckQueryResults(t, `SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE bank2`, db.QueryStr(t, `SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE bank`), ) db.Exec(t, "DROP TABLE bank2") }) } } // Tests if user does not specify nullas option and imports null data, an error is raised. func TestExportNullWithEmptyNullAs(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() dir, cleanup := testutils.TempDir(t) defer cleanup() tc := testcluster.StartTestCluster( t, 1, base.TestClusterArgs{ServerArgs: base.TestServerArgs{ExternalIODir: dir}}) defer tc.Stopper().Stop(ctx) conn := tc.Conns[0] db := sqlutils.MakeSQLRunner(conn) // Set up dummy accounts table with NULL value db.Exec(t, ` CREATE TABLE accounts (id INT PRIMARY KEY, balance INT); INSERT INTO accounts VALUES (1, NULL), (2, 8); `) // Case when `nullas` option is unspecified: expect error const stmtWithoutNullas = "EXPORT INTO CSV 'nodelocal://0/t' FROM SELECT * FROM accounts" db.ExpectErr(t, "NULL value encountered during EXPORT, "+ "use `WITH nullas` to specify the string representation of NULL", stmtWithoutNullas) // Case when `nullas` option is specified: operation is successful and NULLs are encoded to "None" const stmtWithNullas = `EXPORT INTO CSV 'nodelocal://0/t' WITH nullas="None" FROM SELECT * FROM accounts` db.Exec(t, stmtWithNullas) contents := readFileByGlob(t, filepath.Join(dir, "t", "export*-n1.0.csv")) require.Equal(t, "1,None\n2,8\n", string(contents)) // Verify successful IMPORT statement `WITH nullif="None"` to complete round trip const importStmt = `IMPORT TABLE accounts2(id INT PRIMARY KEY, balance INT) CSV DATA ('nodelocal://0/t/export*-n1.0.csv') WITH nullif="None"` db.Exec(t, importStmt) db.CheckQueryResults(t, "SELECT * FROM accounts2", db.QueryStr(t, "SELECT * FROM accounts"), ) } func TestMultiNodeExportStmt(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) nodes := 5 exportRows := 100 db, _, cleanup := setupExportableBank(t, nodes, exportRows*2) defer cleanup() maxTries := 10 // we might need to retry if our table didn't actually scatter enough. for tries := 0; tries < maxTries; tries++ { chunkSize := 13 rows := db.Query(t, `EXPORT INTO CSV 'nodelocal://0/t' WITH chunk_rows = $3 FROM SELECT * FROM bank WHERE id >= $1 and id < $2`, 10, 10+exportRows, chunkSize, ) files, totalRows, totalBytes := 0, 0, 0 nodesSeen := make(map[string]bool) for rows.Next() { filename, count, bytes := "", 0, 0 if err := rows.Scan(&filename, &count, &bytes); err != nil { t.Fatal(err) } files++ if count > chunkSize { t.Fatalf("expected no chunk larger than %d, got %d", chunkSize, count) } totalRows += count totalBytes += bytes nodesSeen[strings.SplitN(filename, ".", 2)[0]] = true } if err := rows.Err(); err != nil { t.Fatalf("unexpected error during export: %s", err.Error()) } if totalRows != exportRows { t.Fatalf("expected %d rows, got %d", exportRows, totalRows) } if expected := exportRows / chunkSize; files < expected { t.Fatalf("expected at least %d files, got %d", expected, files) } if len(nodesSeen) < 2 { // table isn't as scattered as we expected, but we can try again. if tries < maxTries { continue } t.Fatalf("expected files from %d nodes, got %d: %v", 2, len(nodesSeen), nodesSeen) } break } } func TestExportJoin(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) dir, cleanupDir := testutils.TempDir(t) defer cleanupDir() srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{ExternalIODir: dir}) defer srv.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(db) sqlDB.Exec(t, `CREATE TABLE t AS VALUES (1, 2)`) sqlDB.Exec(t, `EXPORT INTO CSV 'nodelocal://0/join' FROM SELECT * FROM t, t as u`) } func readFileByGlob(t *testing.T, pattern string) []byte { paths, err := filepath.Glob(pattern) require.NoError(t, err) require.Equal(t, 1, len(paths)) result, err := ioutil.ReadFile(paths[0]) require.NoError(t, err) return result } func TestExportOrder(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) dir, cleanupDir := testutils.TempDir(t) defer cleanupDir() srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{ExternalIODir: dir}) defer srv.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(db) sqlDB.Exec(t, `CREATE TABLE foo (i INT PRIMARY KEY, x INT, y INT, z INT, INDEX (y))`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 12, 3, 14), (2, 22, 2, 24), (3, 32, 1, 34)`) sqlDB.Exec(t, `EXPORT INTO CSV 'nodelocal://0/order' FROM SELECT * FROM foo ORDER BY y ASC LIMIT 2`) content := readFileByGlob(t, filepath.Join(dir, "order", "export*-n1.0.csv")) if expected, got := "3,32,1,34\n2,22,2,24\n", string(content); expected != got { t.Fatalf("expected %q, got %q", expected, got) } } func TestExportUniqueness(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) dir, cleanupDir := testutils.TempDir(t) defer cleanupDir() srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{ExternalIODir: dir}) defer srv.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(db) sqlDB.Exec(t, `CREATE TABLE foo (i INT PRIMARY KEY, x INT, y INT, z INT, INDEX (y))`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 12, 3, 14), (2, 22, 2, 24), (3, 32, 1, 34)`) const stmt = `EXPORT INTO CSV 'nodelocal://0/' WITH chunk_rows=$1 FROM SELECT * FROM foo` sqlDB.Exec(t, stmt, 2) dir1, err := ioutil.ReadDir(dir) require.NoError(t, err) sqlDB.Exec(t, stmt, 2) dir2, err := ioutil.ReadDir(dir) require.NoError(t, err) require.Equal(t, 2*len(dir1), len(dir2), "second export did not double the number of files") } func TestExportUserDefinedTypes(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() dir, cleanup := testutils.TempDir(t) defer cleanup() tc := testcluster.StartTestCluster( t, 1, base.TestClusterArgs{ServerArgs: base.TestServerArgs{ExternalIODir: dir}}) defer tc.Stopper().Stop(ctx) conn := tc.Conns[0] sqlDB := sqlutils.MakeSQLRunner(conn) // Set up some initial state for the tests. sqlDB.Exec(t, ` CREATE TYPE greeting AS ENUM ('hello', 'hi'); CREATE TABLE greeting_table (x greeting, y greeting); INSERT INTO greeting_table VALUES ('hello', 'hello'), ('hi', 'hi'); `) tests := []struct { stmt string expected string }{ { stmt: "EXPORT INTO CSV 'nodelocal://0/%s/' FROM (SELECT 'hello':::greeting, 'hi':::greeting)", expected: "hello,hi\n", }, { stmt: "EXPORT INTO CSV 'nodelocal://0/%s/' FROM TABLE greeting_table", expected: "hello,hello\nhi,hi\n", }, { stmt: "EXPORT INTO CSV 'nodelocal://0/%s/' FROM (SELECT x, y, enum_first(x) FROM greeting_table)", expected: "hello,hello,hello\nhi,hi,hello\n", }, } for i, test := range tests { path := fmt.Sprintf("test%d", i) stmt := fmt.Sprintf(test.stmt, path) sqlDB.Exec(t, stmt) // Read the dumped file. contents := readFileByGlob(t, filepath.Join(dir, path, "export*-n1.0.csv")) require.Equal(t, test.expected, string(contents)) } } func TestExportOrderCompressed(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) dir, cleanupDir := testutils.TempDir(t) defer cleanupDir() var close = func(c io.Closer) { if err := c.Close(); err != nil { t.Fatalf("failed to close stream, got error %s", err) } } srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{ExternalIODir: dir}) defer srv.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(db) sqlDB.Exec(t, `CREATE TABLE foo (i INT PRIMARY KEY, x INT, y INT, z INT, INDEX (y))`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 12, 3, 14), (2, 22, 2, 24), (3, 32, 1, 34)`) sqlDB.Exec(t, `EXPORT INTO CSV 'nodelocal://0/order' with compression = gzip from select * from foo order by y asc limit 2`) compressed := readFileByGlob(t, filepath.Join(dir, "order", "export*-n1.0.csv.gz")) gzipReader, err := gzip.NewReader(bytes.NewReader(compressed)) defer close(gzipReader) require.NoError(t, err) content, err := ioutil.ReadAll(gzipReader) require.NoError(t, err) if expected, got := "3,32,1,34\n2,22,2,24\n", string(content); expected != got { t.Fatalf("expected %q, got %q", expected, got) } } func TestExportShow(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) dir, cleanupDir := testutils.TempDir(t) defer cleanupDir() srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{ExternalIODir: dir}) defer srv.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(db) sqlDB.Exec(t, `EXPORT INTO CSV 'nodelocal://0/show' FROM SELECT database_name, owner FROM [SHOW DATABASES] ORDER BY database_name`) content := readFileByGlob(t, filepath.Join(dir, "show", "export*-n1.0.csv")) if expected, got := "defaultdb,"+security.RootUser+"\npostgres,"+security.RootUser+"\nsystem,"+ security.NodeUser+"\n", string(content); expected != got { t.Fatalf("expected %q, got %q", expected, got) } } // TestExportVectorized makes sure that SupportsVectorized check doesn't panic // on CSVWriter processor. func TestExportVectorized(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) dir, cleanupDir := testutils.TempDir(t) defer cleanupDir() srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{ExternalIODir: dir}) defer srv.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(db) sqlDB.Exec(t, `CREATE TABLE t(a INT PRIMARY KEY)`) sqlDB.Exec(t, `EXPORT INTO CSV 'http://0.1:37957/exp_1' FROM TABLE t`) } // TestExportFeatureFlag tests the feature flag logic that allows the EXPORT // command to be toggled off via cluster settings. func TestExportFeatureFlag(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) dir, cleanupDir := testutils.TempDir(t) defer cleanupDir() srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{ExternalIODir: dir}) defer srv.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(db) // Feature flag is off — test that EXPORT surfaces error. sqlDB.Exec(t, `SET CLUSTER SETTING feature.export.enabled = FALSE`) sqlDB.Exec(t, `CREATE TABLE feature_flags (a INT PRIMARY KEY)`) sqlDB.ExpectErr(t, `feature EXPORT was disabled by the database administrator`, `EXPORT INTO CSV 'nodelocal://0/%s/' FROM TABLE feature_flags`) // Feature flag is on — test that EXPORT does not error. sqlDB.Exec(t, `SET CLUSTER SETTING feature.export.enabled = TRUE`) sqlDB.Exec(t, `EXPORT INTO CSV 'nodelocal://0/%s/' FROM TABLE feature_flags`) } func TestExportPrivileges(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) dir, cleanupDir := testutils.TempDir(t) defer cleanupDir() srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{ExternalIODir: dir}) defer srv.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(db) sqlDB.Exec(t, `CREATE USER testuser`) sqlDB.Exec(t, `CREATE TABLE privs (a INT)`) pgURL, cleanup := sqlutils.PGUrl(t, srv.ServingSQLAddr(), "TestExportPrivileges-testuser", url.User("testuser")) defer cleanup() startTestUser := func() *gosql.DB { testuser, err := gosql.Open("postgres", pgURL.String()) require.NoError(t, err) return testuser } testuser := startTestUser() _, err := testuser.Exec(`EXPORT INTO CSV 'nodelocal://0/privs' FROM TABLE privs`) require.True(t, testutils.IsError(err, "testuser does not have SELECT privilege")) dest := "nodelocal://0/privs_placeholder" _, err = testuser.Exec(`EXPORT INTO CSV $1 FROM TABLE privs`, dest) require.True(t, testutils.IsError(err, "testuser does not have SELECT privilege")) testuser.Close() // Grant SELECT privilege. sqlDB.Exec(t, `GRANT SELECT ON TABLE privs TO testuser`) // The above SELECT GRANT hangs if we leave the user conn open. Thus, we need // to reinitialize it here. testuser = startTestUser() defer testuser.Close() _, err = testuser.Exec(`EXPORT INTO CSV 'nodelocal://0/privs' FROM TABLE privs`) require.True(t, testutils.IsError(err, "only users with the admin role are allowed to EXPORT to the specified URI")) _, err = testuser.Exec(`EXPORT INTO CSV $1 FROM TABLE privs`, dest) require.True(t, testutils.IsError(err, "only users with the admin role are allowed to EXPORT to the specified URI")) sqlDB.Exec(t, `GRANT ADMIN TO testuser`) _, err = testuser.Exec(`EXPORT INTO CSV 'nodelocal://0/privs' FROM TABLE privs`) require.NoError(t, err) _, err = testuser.Exec(`EXPORT INTO CSV $1 FROM TABLE privs`, dest) require.NoError(t, err) }
pkg/ccl/importccl/exportcsv_test.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.000303690874716267, 0.00017928544548340142, 0.00016517155745532364, 0.0001683009322732687, 0.000025706051019369625 ]
{ "id": 5, "code_window": [ "// Outbox is used to push data from local flows to a remote endpoint. Run may\n", "// be called with the necessary information to establish a connection to a\n", "// given remote endpoint.\n", "type Outbox struct {\n", "\tcolexecop.OneInputNode\n", "\n", "\ttyps []*types.T\n", "\n", "\tconverter *colserde.ArrowBatchConverter\n", "\tserializer *colserde.RecordBatchSerializer\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tinputInitialized bool\n" ], "file_path": "pkg/sql/colflow/colrpc/outbox.go", "type": "add", "edit_start_line_idx": 47 }
// Copyright 2021 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package geomfn import ( "testing" "github.com/cockroachdb/cockroach/pkg/geo" "github.com/stretchr/testify/require" "github.com/twpayne/go-geom" ) func TestShiftLongitude(t *testing.T) { testCases := []struct { desc string input geom.T expected geom.T }{ { desc: "shift longitude for a 2D point that has longitude within the range 0-180 on the smaller end", input: geom.NewPointFlat(geom.XY, []float64{1, 60}), expected: geom.NewPointFlat(geom.XY, []float64{1, 60}), }, { desc: "shift longitude for a 2D point that has longitude within the range 0-180 on the larger end", input: geom.NewPointFlat(geom.XY, []float64{179, 23}), expected: geom.NewPointFlat(geom.XY, []float64{179, 23}), }, { desc: "shift longitude for a 2D point that has longitude within the range 0-180 at the left boundary", input: geom.NewPointFlat(geom.XY, []float64{0, 17}), expected: geom.NewPointFlat(geom.XY, []float64{0, 17}), }, { desc: "shift longitude for a 2D point that has longitude within the range 0-180 at the right boundary", input: geom.NewPointFlat(geom.XY, []float64{180, -90}).SetSRID(4326), expected: geom.NewPointFlat(geom.XY, []float64{180, -90}).SetSRID(4326), }, { desc: "shift longitude for a 2D point that has longitude less than 0", input: geom.NewPointFlat(geom.XY, []float64{-1, 60}).SetSRID(4326), expected: geom.NewPointFlat(geom.XY, []float64{359, 60}).SetSRID(4326), }, { desc: "shift longitude for a 2D point that has longitude greater than 180", input: geom.NewPointFlat(geom.XY, []float64{181, 60}).SetSRID(26918), expected: geom.NewPointFlat(geom.XY, []float64{-179, 60}).SetSRID(26918), }, { desc: "shift longitude for a line string", input: geom.NewLineStringFlat(geom.XY, []float64{0, 0, 24, 80, 190, 20, 5, 5, -5, 23}), expected: geom.NewLineStringFlat(geom.XY, []float64{0, 0, 24, 80, -170, 20, 5, 5, 355, 23}), }, { desc: "shift longitude for a polygon", input: geom.NewPolygonFlat(geom.XY, []float64{-1, -1, 35, 35, 50, 2, -1, -1, 13, 3, 11, 1, 14, 2, 13, 3}, []int{8, 16}), expected: geom.NewPolygonFlat(geom.XY, []float64{359, -1, 35, 35, 50, 2, 359, -1, 13, 3, 11, 1, 14, 2, 13, 3}, []int{8, 16}), }, { desc: "shift longitude for multiple 2D points", input: geom.NewMultiPointFlat(geom.XY, []float64{1, 1, -40, 40}), expected: geom.NewMultiPointFlat(geom.XY, []float64{1, 1, 320, 40}), }, { desc: "shift longitude for multiple line strings", input: geom.NewMultiLineStringFlat(geom.XY, []float64{3, 5, 56, 36, -2, 50, 200, 50}, []int{4, 8}), expected: geom.NewMultiLineStringFlat(geom.XY, []float64{3, 5, 56, 36, 358, 50, -160, 50}, []int{4, 8}), }, { desc: "shift longitude for multiple polygons", input: geom.NewMultiPolygonFlat(geom.XY, []float64{0, 90, 0, 0, 90, 0, 0, 90, 50, 50, 360, 23, 82, 49, 50, 50}, [][]int{{8}, {16}}), expected: geom.NewMultiPolygonFlat(geom.XY, []float64{0, 90, 0, 0, 90, 0, 0, 90, 50, 50, 0, 23, 82, 49, 50, 50}, [][]int{{8}, {16}}), }, { desc: "shift longitude for non-empty geometry collection", input: geom.NewGeometryCollection().MustPush( geom.NewPointFlat(geom.XY, []float64{-1, 25}), geom.NewPointFlat(geom.XY, []float64{0, 0}), geom.NewLineStringFlat(geom.XY, []float64{-26, 85, 26, 75}), geom.NewPolygonFlat(geom.XY, []float64{5, 5, 270, 90, -118, -85, 5, 5}, []int{8}).SetSRID(4326), ), expected: geom.NewGeometryCollection().MustPush( geom.NewPointFlat(geom.XY, []float64{359, 25}), geom.NewPointFlat(geom.XY, []float64{0, 0}), geom.NewLineStringFlat(geom.XY, []float64{334, 85, 26, 75}), geom.NewPolygonFlat(geom.XY, []float64{5, 5, -90, 90, 242, -85, 5, 5}, []int{8}).SetSRID(4326), ), }, { desc: "shift longitude for empty point", input: geom.NewPointEmpty(geom.XY), expected: geom.NewPointEmpty(geom.XY), }, { desc: "shift longitude for empty line string", input: geom.NewLineString(geom.XY), expected: geom.NewLineString(geom.XY), }, { desc: "shift longitude for empty polygon", input: geom.NewPolygon(geom.XY), expected: geom.NewPolygon(geom.XY), }, { desc: "shift longitude for empty geometry collection", input: geom.NewGeometryCollection(), expected: geom.NewGeometryCollection(), }, } for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { geometry, err := geo.MakeGeometryFromGeomT(tc.input) require.NoError(t, err) got, err := ShiftLongitude(geometry) require.NoError(t, err) want, err := geo.MakeGeometryFromGeomT(tc.expected) require.NoError(t, err) require.Equal(t, want, got) require.EqualValues(t, tc.input.SRID(), got.SRID()) }) } }
pkg/geo/geomfn/shift_longitude_test.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.0013669097097590566, 0.00026349248946644366, 0.00016754886019043624, 0.00016921095084398985, 0.00030716697801835835 ]
{ "id": 6, "code_window": [ "\t\t// runnerCtx will remain unset, so we have this check.\n", "\t\to.runnerCtx = ctx\n", "\t}\n", "\terrToSend = colexecerror.CatchVectorizedRuntimeError(func() {\n", "\t\to.Input.Init(o.runnerCtx)\n", "\t\tfor {\n", "\t\t\tif atomic.LoadUint32(&o.draining) == 1 {\n", "\t\t\t\tterminatedGracefully = true\n", "\t\t\t\treturn\n", "\t\t\t}\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\to.inputInitialized = true\n" ], "file_path": "pkg/sql/colflow/colrpc/outbox.go", "type": "add", "edit_start_line_idx": 263 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colflow import ( "context" "sync" "sync/atomic" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colcontainer" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecargs" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexechash" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" "github.com/marusama/semaphore" ) // routerOutput is an interface implemented by router outputs. It exists for // easier test mocking of outputs. type routerOutput interface { execinfra.OpNode // initWithHashRouter passes a reference to the HashRouter that will be // pushing batches to this output. initWithHashRouter(*HashRouter) // addBatch adds the elements specified by the selection vector from batch // to the output. It returns whether or not the output changed its state to // blocked (see implementations). addBatch(context.Context, coldata.Batch) bool // cancel tells the output to stop producing batches. Optionally forwards an // error if not nil. cancel(context.Context, error) // forwardErr forwards an error to the output. The output should call // colexecerror.ExpectedError with this error on the next call to Next. // Calling forwardErr multiple times will result in the most recent error // overwriting the previous error. forwardErr(error) // resetForTests resets the routerOutput for a benchmark or test run. resetForTests(context.Context) } // getDefaultRouterOutputBlockedThreshold returns the number of unread values // buffered by the routerOutputOp after which the output is considered blocked. // It is a function rather than a variable so that in tests we could modify // coldata.BatchSize() (if it were a variable, then its value would be // evaluated before we set the desired batch size). func getDefaultRouterOutputBlockedThreshold() int { return coldata.BatchSize() * 2 } type routerOutputOpState int const ( // routerOutputOpRunning is the state in which routerOutputOp operates // normally. The router output transitions into routerOutputDoneAdding when // a zero-length batch was added or routerOutputOpDraining when it // encounters an error or the drain is requested. routerOutputOpRunning routerOutputOpState = iota // routerOutputDoneAdding is the state in which a zero-length was batch was // added to routerOutputOp and no more batches will be added. The router // output transitions to routerOutputOpDraining when the output is canceled // (either closed or the drain is requested). routerOutputDoneAdding // routerOutputOpDraining is the state in which routerOutputOp always // returns zero-length batches on calls to Next. routerOutputOpDraining ) // drainCoordinator is an interface that the HashRouter implements to coordinate // cancellation of all of its outputs in the case of an error and draining in // the case of graceful termination. // WARNING: No locks should be held when calling these methods, as the // HashRouter might call routerOutput methods (e.g. cancel) that attempt to // reacquire locks. type drainCoordinator interface { // encounteredError should be called when a routerOutput encounters an error. // This terminates execution. No locks should be held when calling this // method, since cancellation could occur. encounteredError(context.Context) // drainMeta should be called exactly once when the routerOutput moves to // draining. drainMeta() []execinfrapb.ProducerMetadata } type routerOutputOp struct { colexecop.InitHelper // input is a reference to our router. input execinfra.OpNode // drainCoordinator is a reference to the HashRouter to be able to notify it // if the output encounters an error or transitions to a draining state. drainCoordinator drainCoordinator types []*types.T // unblockedEventsChan is signaled when a routerOutput changes state from // blocked to unblocked. unblockedEventsChan chan<- struct{} mu struct { syncutil.Mutex state routerOutputOpState // forwardedErr is an error that was forwarded by the HashRouter. If set, // any subsequent calls to Next will return this error. forwardedErr error cond *sync.Cond // data is a SpillingQueue, a circular buffer backed by a disk queue. data *colexecutils.SpillingQueue numUnread int blocked bool } testingKnobs routerOutputOpTestingKnobs } func (o *routerOutputOp) ChildCount(verbose bool) int { return 1 } func (o *routerOutputOp) Child(nth int, verbose bool) execinfra.OpNode { if nth == 0 { return o.input } colexecerror.InternalError(errors.AssertionFailedf("invalid index %d", nth)) // This code is unreachable, but the compiler cannot infer that. return nil } var _ colexecop.Operator = &routerOutputOp{} type routerOutputOpTestingKnobs struct { // blockedThreshold is the number of buffered values above which we consider // a router output to be blocked. It defaults to // defaultRouterOutputBlockedThreshold but can be modified by tests to test // edge cases. blockedThreshold int // addBatchTestInducedErrorCb is called after any function call that could // produce an error if that error is nil. If the callback returns an error, // the router output overwrites the nil error with the returned error. // It is guaranteed that this callback will be called at least once during // normal execution. addBatchTestInducedErrorCb func() error // nextTestInducedErrorCb is called after any function call that could // produce an error if that error is nil. If the callback returns an error, // the router output overwrites the nil error with the returned error. // It is guaranteed that this callback will be called at least once during // normal execution. nextTestInducedErrorCb func() error } // routerOutputOpArgs are the arguments to newRouterOutputOp. All fields apart // from the testing knobs are optional. type routerOutputOpArgs struct { // All fields are required unless marked optional. types []*types.T // unlimitedAllocator should not have a memory limit. Pass in a soft // memoryLimit that will be respected instead. unlimitedAllocator *colmem.Allocator // memoryLimit acts as a soft limit to allow the router output to use disk // when it is exceeded. memoryLimit int64 diskAcc *mon.BoundAccount cfg colcontainer.DiskQueueCfg fdSemaphore semaphore.Semaphore // unblockedEventsChan must be a buffered channel. unblockedEventsChan chan<- struct{} testingKnobs routerOutputOpTestingKnobs } // newRouterOutputOp creates a new router output. func newRouterOutputOp(args routerOutputOpArgs) *routerOutputOp { if args.testingKnobs.blockedThreshold == 0 { args.testingKnobs.blockedThreshold = getDefaultRouterOutputBlockedThreshold() } o := &routerOutputOp{ types: args.types, unblockedEventsChan: args.unblockedEventsChan, testingKnobs: args.testingKnobs, } o.mu.cond = sync.NewCond(&o.mu) o.mu.data = colexecutils.NewSpillingQueue( &colexecutils.NewSpillingQueueArgs{ UnlimitedAllocator: args.unlimitedAllocator, Types: args.types, MemoryLimit: args.memoryLimit, DiskQueueCfg: args.cfg, FDSemaphore: args.fdSemaphore, DiskAcc: args.diskAcc, }, ) return o } func (o *routerOutputOp) Init(ctx context.Context) { o.InitHelper.Init(ctx) } // nextErrorLocked is a helper method that handles an error encountered in Next. func (o *routerOutputOp) nextErrorLocked(err error) { o.mu.state = routerOutputOpDraining o.maybeUnblockLocked() // Unlock the mutex, since the HashRouter will cancel all outputs. o.mu.Unlock() o.drainCoordinator.encounteredError(o.Ctx) o.mu.Lock() colexecerror.InternalError(err) } // Next returns the next coldata.Batch from the routerOutputOp. Note that Next // is designed for only one concurrent caller and will block until data is // ready. func (o *routerOutputOp) Next() coldata.Batch { o.mu.Lock() defer o.mu.Unlock() for o.mu.forwardedErr == nil && o.mu.state == routerOutputOpRunning && o.mu.data.Empty() { // Wait until there is data to read or the output is canceled. o.mu.cond.Wait() } if o.mu.forwardedErr != nil { colexecerror.ExpectedError(o.mu.forwardedErr) } if o.mu.state == routerOutputOpDraining { return coldata.ZeroBatch } b, err := o.mu.data.Dequeue(o.Ctx) if err == nil && o.testingKnobs.nextTestInducedErrorCb != nil { err = o.testingKnobs.nextTestInducedErrorCb() } if err != nil { o.nextErrorLocked(err) } o.mu.numUnread -= b.Length() if o.mu.numUnread <= o.testingKnobs.blockedThreshold { o.maybeUnblockLocked() } if b.Length() == 0 { if o.testingKnobs.nextTestInducedErrorCb != nil { if err := o.testingKnobs.nextTestInducedErrorCb(); err != nil { o.nextErrorLocked(err) } } // This is the last batch. closeLocked will set done to protect against // further calls to Next since this is allowed by the interface as well as // cleaning up and releasing possible disk infrastructure. o.closeLocked(o.Ctx) } return b } func (o *routerOutputOp) DrainMeta() []execinfrapb.ProducerMetadata { o.mu.Lock() o.mu.state = routerOutputOpDraining o.maybeUnblockLocked() o.mu.Unlock() return o.drainCoordinator.drainMeta() } func (o *routerOutputOp) initWithHashRouter(r *HashRouter) { o.input = r o.drainCoordinator = r } func (o *routerOutputOp) closeLocked(ctx context.Context) { o.mu.state = routerOutputOpDraining if err := o.mu.data.Close(ctx); err != nil { // This log message is Info instead of Warning because the flow will also // attempt to clean up the parent directory, so this failure might not have // any effect. log.Infof(ctx, "error closing vectorized hash router output, files may be left over: %s", err) } } // cancel wakes up a reader in Next if there is one and results in the output // returning zero length batches for every Next call after cancel. Note that // all accumulated data that hasn't been read will not be returned. func (o *routerOutputOp) cancel(ctx context.Context, err error) { o.mu.Lock() defer o.mu.Unlock() o.closeLocked(ctx) o.forwardErrLocked(err) // Some goroutine might be waiting on the condition variable, so wake it up. // Note that read goroutines check o.mu.done, so won't wait on the condition // variable after we unlock the mutex. o.mu.cond.Signal() } func (o *routerOutputOp) forwardErrLocked(err error) { if err != nil { o.mu.forwardedErr = err } } func (o *routerOutputOp) forwardErr(err error) { o.mu.Lock() defer o.mu.Unlock() o.forwardErrLocked(err) o.mu.cond.Signal() } // addBatch copies the batch (according to its selection vector) into an // internal buffer. Zero-length batch should be passed-in to indicate that no // more batches will be added. // TODO(asubiotto): We should explore pipelining addBatch if disk-spilling // performance becomes a concern. The main router goroutine will be writing to // disk as the code is written, meaning that we impact the performance of // writing rows to a fast output if we have to write to disk for a single // slow output. func (o *routerOutputOp) addBatch(ctx context.Context, batch coldata.Batch) bool { o.mu.Lock() defer o.mu.Unlock() switch o.mu.state { case routerOutputDoneAdding: colexecerror.InternalError(errors.AssertionFailedf("a batch was added to routerOutput in DoneAdding state")) case routerOutputOpDraining: // This output is draining, discard any data. return false } o.mu.numUnread += batch.Length() o.mu.data.Enqueue(ctx, batch) if o.testingKnobs.addBatchTestInducedErrorCb != nil { if err := o.testingKnobs.addBatchTestInducedErrorCb(); err != nil { colexecerror.InternalError(err) } } if batch.Length() == 0 { o.mu.state = routerOutputDoneAdding o.mu.cond.Signal() return false } stateChanged := false if o.mu.numUnread > o.testingKnobs.blockedThreshold && !o.mu.blocked { // The output is now blocked. o.mu.blocked = true stateChanged = true } o.mu.cond.Signal() return stateChanged } // maybeUnblockLocked unblocks the router output if it is in a blocked state. If the // output was previously in a blocked state, an event will be sent on // routerOutputOp.unblockedEventsChan. func (o *routerOutputOp) maybeUnblockLocked() { if o.mu.blocked { o.mu.blocked = false o.unblockedEventsChan <- struct{}{} } } // resetForTests resets the routerOutputOp for a test or benchmark run. func (o *routerOutputOp) resetForTests(ctx context.Context) { o.mu.Lock() defer o.mu.Unlock() o.mu.state = routerOutputOpRunning o.mu.forwardedErr = nil o.mu.data.Reset(ctx) o.mu.numUnread = 0 o.mu.blocked = false } // hashRouterDrainState is a state that specifically describes the hashRouter's // state in the draining process. This differs from its "general" state. For // example, a hash router can have drained and exited the Run method but still // be in hashRouterDrainStateRunning until somebody calls drainMeta. type hashRouterDrainState int const ( // hashRouterDrainStateRunning is the state that a hashRouter is in when // running normally (i.e. pulling and pushing batches). hashRouterDrainStateRunning = iota // hashRouterDrainStateRequested is the state that a hashRouter is in when // either all outputs have called drainMeta or an error was encountered by one // of the outputs. hashRouterDrainStateRequested // hashRouterDrainStateCompleted is the state that a hashRouter is in when // draining has completed. hashRouterDrainStateCompleted ) // HashRouter hashes values according to provided hash columns and computes a // destination for each row. These destinations are exposed as Operators // returned by the constructor. type HashRouter struct { colexecop.OneInputNode // inputMetaInfo contains all of the meta components that the hash router // is responsible for. Root field is exactly the same as OneInputNode.Input. inputMetaInfo colexecargs.OpWithMetaInfo // hashCols is a slice of indices of the columns used for hashing. hashCols []uint32 // One output for each stream. outputs []routerOutput // unblockedEventsChan is a channel shared between the HashRouter and its // outputs. outputs send events on this channel when they are unblocked by a // read. unblockedEventsChan <-chan struct{} numBlockedOutputs int bufferedMeta []execinfrapb.ProducerMetadata // atomics is shared state between the Run goroutine and any routerOutput // goroutines that call drainMeta. atomics struct { // drainState is the state the hashRouter is in. The Run goroutine should // only ever read these states, never set them. drainState int32 numDrainedOutputs int32 } // waitForMetadata is a channel that the last output to drain will read from // to pass on any metadata buffered through the Run goroutine. waitForMetadata chan []execinfrapb.ProducerMetadata // tupleDistributor is used to decide to which output a particular tuple // should be routed. tupleDistributor *colexechash.TupleHashDistributor } // NewHashRouter creates a new hash router that consumes coldata.Batches from // input and hashes each row according to hashCols to one of the outputs // returned as Operators. // The number of allocators provided will determine the number of outputs // returned. Note that each allocator must be unlimited, memory will be limited // by comparing memory use in the allocator with the memoryLimit argument. Each // Operator must have an independent allocator (this means that each allocator // should be linked to an independent mem account) as Operator.Next will usually // be called concurrently between different outputs. Similarly, each output // needs to have a separate disk account. func NewHashRouter( unlimitedAllocators []*colmem.Allocator, input colexecargs.OpWithMetaInfo, types []*types.T, hashCols []uint32, memoryLimit int64, diskQueueCfg colcontainer.DiskQueueCfg, fdSemaphore semaphore.Semaphore, diskAccounts []*mon.BoundAccount, ) (*HashRouter, []colexecop.DrainableOperator) { if diskQueueCfg.CacheMode != colcontainer.DiskQueueCacheModeDefault { colexecerror.InternalError(errors.Errorf("hash router instantiated with incompatible disk queue cache mode: %d", diskQueueCfg.CacheMode)) } outputs := make([]routerOutput, len(unlimitedAllocators)) outputsAsOps := make([]colexecop.DrainableOperator, len(unlimitedAllocators)) // unblockEventsChan is buffered to 2*numOutputs as we don't want the outputs // writing to it to block. // Unblock events only happen after a corresponding block event. Since these // are state changes and are done under lock (including the output sending // on the channel, which is why we want the channel to be buffered in the // first place), every time the HashRouter blocks an output, it *must* read // all unblock events preceding it since these *must* be on the channel. unblockEventsChan := make(chan struct{}, 2*len(unlimitedAllocators)) memoryLimitPerOutput := memoryLimit / int64(len(unlimitedAllocators)) for i := range unlimitedAllocators { op := newRouterOutputOp( routerOutputOpArgs{ types: types, unlimitedAllocator: unlimitedAllocators[i], memoryLimit: memoryLimitPerOutput, diskAcc: diskAccounts[i], cfg: diskQueueCfg, fdSemaphore: fdSemaphore, unblockedEventsChan: unblockEventsChan, }, ) outputs[i] = op outputsAsOps[i] = op } return newHashRouterWithOutputs(input, hashCols, unblockEventsChan, outputs), outputsAsOps } func newHashRouterWithOutputs( input colexecargs.OpWithMetaInfo, hashCols []uint32, unblockEventsChan <-chan struct{}, outputs []routerOutput, ) *HashRouter { r := &HashRouter{ OneInputNode: colexecop.NewOneInputNode(input.Root), inputMetaInfo: input, hashCols: hashCols, outputs: outputs, unblockedEventsChan: unblockEventsChan, // waitForMetadata is a buffered channel to avoid blocking if nobody will // read the metadata. waitForMetadata: make(chan []execinfrapb.ProducerMetadata, 1), tupleDistributor: colexechash.NewTupleHashDistributor(colexechash.DefaultInitHashValue, len(outputs)), } for i := range outputs { outputs[i].initWithHashRouter(r) } return r } // cancelOutputs cancels all outputs and forwards the given error to all of // them if non-nil. The only case where the error is not forwarded is if no // output could be canceled due to an error. In this case each output will // forward the error returned during cancellation. func (r *HashRouter) cancelOutputs(ctx context.Context, errToForward error) { for _, o := range r.outputs { if err := colexecerror.CatchVectorizedRuntimeError(func() { o.cancel(ctx, errToForward) }); err != nil { // If there was an error canceling this output, this error can be // forwarded to whoever is calling Next. o.forwardErr(err) } } } func (r *HashRouter) setDrainState(drainState hashRouterDrainState) { atomic.StoreInt32(&r.atomics.drainState, int32(drainState)) } func (r *HashRouter) getDrainState() hashRouterDrainState { return hashRouterDrainState(atomic.LoadInt32(&r.atomics.drainState)) } // Run runs the HashRouter. Batches are read from the input and pushed to an // output calculated by hashing columns. Cancel the given context to terminate // early. func (r *HashRouter) Run(ctx context.Context) { var span *tracing.Span ctx, span = execinfra.ProcessorSpan(ctx, "hash router") if span != nil { defer span.Finish() } // Since HashRouter runs in a separate goroutine, we want to be safe and // make sure that we catch errors in all code paths, so we wrap the whole // method with a catcher. Note that we also have "internal" catchers as // well for more fine-grained control of error propagation. if err := colexecerror.CatchVectorizedRuntimeError(func() { r.Input.Init(ctx) var done bool processNextBatch := func() { done = r.processNextBatch(ctx) } for { if r.getDrainState() != hashRouterDrainStateRunning { break } // Check for cancellation. select { case <-ctx.Done(): r.cancelOutputs(ctx, ctx.Err()) return default: } // Read all the routerOutput state changes that have happened since the // last iteration. for moreToRead := true; moreToRead; { select { case <-r.unblockedEventsChan: r.numBlockedOutputs-- default: // No more routerOutput state changes to read without blocking. moreToRead = false } } if r.numBlockedOutputs == len(r.outputs) { // All outputs are blocked, wait until at least one output is unblocked. select { case <-r.unblockedEventsChan: r.numBlockedOutputs-- case <-ctx.Done(): r.cancelOutputs(ctx, ctx.Err()) return } } if err := colexecerror.CatchVectorizedRuntimeError(processNextBatch); err != nil { r.cancelOutputs(ctx, err) return } if done { // The input was done and we have notified the routerOutputs that there // is no more data. return } } }); err != nil { r.cancelOutputs(ctx, err) } if span != nil { for _, s := range r.inputMetaInfo.StatsCollectors { span.RecordStructured(s.GetStats()) } if meta := execinfra.GetTraceDataAsMetadata(span); meta != nil { r.bufferedMeta = append(r.bufferedMeta, *meta) } } r.bufferedMeta = append(r.bufferedMeta, r.inputMetaInfo.MetadataSources.DrainMeta()...) // Non-blocking send of metadata so that one of the outputs can return it // in DrainMeta. r.waitForMetadata <- r.bufferedMeta close(r.waitForMetadata) r.inputMetaInfo.ToClose.CloseAndLogOnErr(ctx, "hash router") } // processNextBatch reads the next batch from its input, hashes it and adds // each column to its corresponding output, returning whether the input is // done. func (r *HashRouter) processNextBatch(ctx context.Context) bool { b := r.Input.Next() n := b.Length() if n == 0 { // Done. Push an empty batch to outputs to tell them the data is done as // well. for _, o := range r.outputs { o.addBatch(ctx, b) } return true } // It is ok that we call Init() on every batch since all calls except for // the first one are noops. r.tupleDistributor.Init(ctx) selections := r.tupleDistributor.Distribute(b, r.hashCols) for i, o := range r.outputs { if len(selections[i]) > 0 { b.SetSelection(true) copy(b.Selection(), selections[i]) b.SetLength(len(selections[i])) if o.addBatch(ctx, b) { // This batch blocked the output. r.numBlockedOutputs++ } } } return false } // resetForTests resets the HashRouter for a test or benchmark run. func (r *HashRouter) resetForTests(ctx context.Context) { if i, ok := r.Input.(colexecop.Resetter); ok { i.Reset(ctx) } r.setDrainState(hashRouterDrainStateRunning) r.waitForMetadata = make(chan []execinfrapb.ProducerMetadata, 1) r.atomics.numDrainedOutputs = 0 r.bufferedMeta = nil r.numBlockedOutputs = 0 for moreToRead := true; moreToRead; { select { case <-r.unblockedEventsChan: default: moreToRead = false } } for _, o := range r.outputs { o.resetForTests(ctx) } } func (r *HashRouter) encounteredError(ctx context.Context) { // Once one output returns an error the hash router needs to stop running // and drain its input. r.setDrainState(hashRouterDrainStateRequested) // cancel all outputs. The Run goroutine will eventually realize that the // HashRouter is done and exit without draining. r.cancelOutputs(ctx, nil /* errToForward */) } func (r *HashRouter) drainMeta() []execinfrapb.ProducerMetadata { if int(atomic.AddInt32(&r.atomics.numDrainedOutputs, 1)) != len(r.outputs) { return nil } // All outputs have been drained, return any buffered metadata to the last // output to call drainMeta. r.setDrainState(hashRouterDrainStateRequested) meta := <-r.waitForMetadata r.setDrainState(hashRouterDrainStateCompleted) return meta }
pkg/sql/colflow/routers.go
1
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.0042694322764873505, 0.0007699911948293447, 0.00016147510905284435, 0.00019216777582187206, 0.0010016181040555239 ]
{ "id": 6, "code_window": [ "\t\t// runnerCtx will remain unset, so we have this check.\n", "\t\to.runnerCtx = ctx\n", "\t}\n", "\terrToSend = colexecerror.CatchVectorizedRuntimeError(func() {\n", "\t\to.Input.Init(o.runnerCtx)\n", "\t\tfor {\n", "\t\t\tif atomic.LoadUint32(&o.draining) == 1 {\n", "\t\t\t\tterminatedGracefully = true\n", "\t\t\t\treturn\n", "\t\t\t}\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\to.inputInitialized = true\n" ], "file_path": "pkg/sql/colflow/colrpc/outbox.go", "type": "add", "edit_start_line_idx": 263 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package ordering import ( "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/opt/props" "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" ) // CanProvide returns true if the given operator returns rows that can // satisfy the given required ordering. func CanProvide(expr memo.RelExpr, required *physical.OrderingChoice) bool { if required.Any() { return true } if util.CrdbTestBuild { checkRequired(expr, required) } return funcMap[expr.Op()].canProvideOrdering(expr, required) } // BuildChildRequired returns the ordering that must be required of its // given child in order to satisfy a required ordering. Can only be called if // CanProvide is true for the required ordering. func BuildChildRequired( parent memo.RelExpr, required *physical.OrderingChoice, childIdx int, ) physical.OrderingChoice { result := funcMap[parent.Op()].buildChildReqOrdering(parent, required, childIdx) if util.CrdbTestBuild && !result.Any() { checkRequired(parent.Child(childIdx).(memo.RelExpr), &result) } return result } // BuildProvided returns a specific ordering that the operator provides (and which // must be maintained on the results during distributed execution). // // The returned ordering, in conjunction with the operator's functional // dependencies, must intersect the required ordering. // // A best-effort attempt is made to make the provided orderings as simple as // possible (while still satisfying the required ordering). // // For example, if we scan an index on x,y,z with required ordering "+y opt(x)", // the provided ordering is "+x,+y". If we scan the same index with constraint // x=1, the provided ordering is "+y". // // This function assumes that the provided orderings have already been set in // the children of the expression. func BuildProvided(expr memo.RelExpr, required *physical.OrderingChoice) opt.Ordering { if required.Any() { return nil } provided := funcMap[expr.Op()].buildProvidedOrdering(expr, required) if util.CrdbTestBuild { checkProvided(expr, required, provided) } return provided } type funcs struct { canProvideOrdering func(expr memo.RelExpr, required *physical.OrderingChoice) bool buildChildReqOrdering func( parent memo.RelExpr, required *physical.OrderingChoice, childIdx int, ) physical.OrderingChoice buildProvidedOrdering func( expr memo.RelExpr, required *physical.OrderingChoice, ) opt.Ordering } var funcMap [opt.NumOperators]funcs func init() { for _, op := range opt.RelationalOperators { funcMap[op] = funcs{ canProvideOrdering: canNeverProvideOrdering, buildChildReqOrdering: noChildReqOrdering, buildProvidedOrdering: noProvidedOrdering, } } funcMap[opt.ScanOp] = funcs{ canProvideOrdering: scanCanProvideOrdering, buildChildReqOrdering: noChildReqOrdering, buildProvidedOrdering: scanBuildProvided, } funcMap[opt.SelectOp] = funcs{ canProvideOrdering: selectCanProvideOrdering, buildChildReqOrdering: selectBuildChildReqOrdering, buildProvidedOrdering: selectBuildProvided, } funcMap[opt.ProjectOp] = funcs{ canProvideOrdering: projectCanProvideOrdering, buildChildReqOrdering: projectBuildChildReqOrdering, buildProvidedOrdering: projectBuildProvided, } funcMap[opt.UnionOp] = funcs{ canProvideOrdering: setOpCanProvideOrdering, buildChildReqOrdering: setOpBuildChildReqOrdering, buildProvidedOrdering: setOpBuildProvided, } funcMap[opt.UnionAllOp] = funcs{ canProvideOrdering: setOpCanProvideOrdering, buildChildReqOrdering: setOpBuildChildReqOrdering, buildProvidedOrdering: setOpBuildProvided, } funcMap[opt.IntersectOp] = funcs{ canProvideOrdering: setOpCanProvideOrdering, buildChildReqOrdering: setOpBuildChildReqOrdering, buildProvidedOrdering: setOpBuildProvided, } funcMap[opt.IntersectAllOp] = funcs{ canProvideOrdering: setOpCanProvideOrdering, buildChildReqOrdering: setOpBuildChildReqOrdering, buildProvidedOrdering: setOpBuildProvided, } funcMap[opt.ExceptOp] = funcs{ canProvideOrdering: setOpCanProvideOrdering, buildChildReqOrdering: setOpBuildChildReqOrdering, buildProvidedOrdering: setOpBuildProvided, } funcMap[opt.ExceptAllOp] = funcs{ canProvideOrdering: setOpCanProvideOrdering, buildChildReqOrdering: setOpBuildChildReqOrdering, buildProvidedOrdering: setOpBuildProvided, } funcMap[opt.IndexJoinOp] = funcs{ canProvideOrdering: lookupOrIndexJoinCanProvideOrdering, buildChildReqOrdering: lookupOrIndexJoinBuildChildReqOrdering, buildProvidedOrdering: indexJoinBuildProvided, } funcMap[opt.LookupJoinOp] = funcs{ canProvideOrdering: lookupOrIndexJoinCanProvideOrdering, buildChildReqOrdering: lookupOrIndexJoinBuildChildReqOrdering, buildProvidedOrdering: lookupJoinBuildProvided, } funcMap[opt.InvertedJoinOp] = funcs{ canProvideOrdering: invertedJoinCanProvideOrdering, buildChildReqOrdering: invertedJoinBuildChildReqOrdering, buildProvidedOrdering: invertedJoinBuildProvided, } funcMap[opt.OrdinalityOp] = funcs{ canProvideOrdering: ordinalityCanProvideOrdering, buildChildReqOrdering: ordinalityBuildChildReqOrdering, buildProvidedOrdering: ordinalityBuildProvided, } funcMap[opt.MergeJoinOp] = funcs{ canProvideOrdering: mergeJoinCanProvideOrdering, buildChildReqOrdering: mergeJoinBuildChildReqOrdering, buildProvidedOrdering: mergeJoinBuildProvided, } funcMap[opt.LimitOp] = funcs{ canProvideOrdering: limitOrOffsetCanProvideOrdering, buildChildReqOrdering: limitOrOffsetBuildChildReqOrdering, buildProvidedOrdering: limitOrOffsetBuildProvided, } funcMap[opt.OffsetOp] = funcs{ canProvideOrdering: limitOrOffsetCanProvideOrdering, buildChildReqOrdering: limitOrOffsetBuildChildReqOrdering, buildProvidedOrdering: limitOrOffsetBuildProvided, } funcMap[opt.ScalarGroupByOp] = funcs{ // ScalarGroupBy always has exactly one result; any required ordering should // have been simplified to Any (unless normalization rules are disabled). canProvideOrdering: canNeverProvideOrdering, buildChildReqOrdering: scalarGroupByBuildChildReqOrdering, buildProvidedOrdering: noProvidedOrdering, } funcMap[opt.GroupByOp] = funcs{ canProvideOrdering: groupByCanProvideOrdering, buildChildReqOrdering: groupByBuildChildReqOrdering, buildProvidedOrdering: groupByBuildProvided, } funcMap[opt.DistinctOnOp] = funcs{ canProvideOrdering: distinctOnCanProvideOrdering, buildChildReqOrdering: distinctOnBuildChildReqOrdering, buildProvidedOrdering: distinctOnBuildProvided, } funcMap[opt.EnsureDistinctOnOp] = funcs{ canProvideOrdering: distinctOnCanProvideOrdering, buildChildReqOrdering: distinctOnBuildChildReqOrdering, buildProvidedOrdering: distinctOnBuildProvided, } funcMap[opt.UpsertDistinctOnOp] = funcs{ canProvideOrdering: distinctOnCanProvideOrdering, buildChildReqOrdering: distinctOnBuildChildReqOrdering, buildProvidedOrdering: distinctOnBuildProvided, } funcMap[opt.EnsureUpsertDistinctOnOp] = funcs{ canProvideOrdering: distinctOnCanProvideOrdering, buildChildReqOrdering: distinctOnBuildChildReqOrdering, buildProvidedOrdering: distinctOnBuildProvided, } funcMap[opt.SortOp] = funcs{ canProvideOrdering: nil, // should never get called buildChildReqOrdering: sortBuildChildReqOrdering, buildProvidedOrdering: sortBuildProvided, } funcMap[opt.InsertOp] = funcs{ canProvideOrdering: mutationCanProvideOrdering, buildChildReqOrdering: mutationBuildChildReqOrdering, buildProvidedOrdering: mutationBuildProvided, } funcMap[opt.UpdateOp] = funcs{ canProvideOrdering: mutationCanProvideOrdering, buildChildReqOrdering: mutationBuildChildReqOrdering, buildProvidedOrdering: mutationBuildProvided, } funcMap[opt.UpsertOp] = funcs{ canProvideOrdering: mutationCanProvideOrdering, buildChildReqOrdering: mutationBuildChildReqOrdering, buildProvidedOrdering: mutationBuildProvided, } funcMap[opt.DeleteOp] = funcs{ canProvideOrdering: mutationCanProvideOrdering, buildChildReqOrdering: mutationBuildChildReqOrdering, buildProvidedOrdering: mutationBuildProvided, } funcMap[opt.ExplainOp] = funcs{ canProvideOrdering: canNeverProvideOrdering, buildChildReqOrdering: explainBuildChildReqOrdering, buildProvidedOrdering: noProvidedOrdering, } funcMap[opt.AlterTableSplitOp] = funcs{ canProvideOrdering: canNeverProvideOrdering, buildChildReqOrdering: alterTableSplitBuildChildReqOrdering, buildProvidedOrdering: noProvidedOrdering, } funcMap[opt.AlterTableUnsplitOp] = funcs{ canProvideOrdering: canNeverProvideOrdering, buildChildReqOrdering: alterTableUnsplitBuildChildReqOrdering, buildProvidedOrdering: noProvidedOrdering, } funcMap[opt.AlterTableRelocateOp] = funcs{ canProvideOrdering: canNeverProvideOrdering, buildChildReqOrdering: alterTableRelocateBuildChildReqOrdering, buildProvidedOrdering: noProvidedOrdering, } funcMap[opt.ControlJobsOp] = funcs{ canProvideOrdering: canNeverProvideOrdering, buildChildReqOrdering: controlJobsBuildChildReqOrdering, buildProvidedOrdering: noProvidedOrdering, } funcMap[opt.CancelQueriesOp] = funcs{ canProvideOrdering: canNeverProvideOrdering, buildChildReqOrdering: cancelQueriesBuildChildReqOrdering, buildProvidedOrdering: noProvidedOrdering, } funcMap[opt.CancelSessionsOp] = funcs{ canProvideOrdering: canNeverProvideOrdering, buildChildReqOrdering: cancelSessionsBuildChildReqOrdering, buildProvidedOrdering: noProvidedOrdering, } funcMap[opt.ExportOp] = funcs{ canProvideOrdering: canNeverProvideOrdering, buildChildReqOrdering: exportBuildChildReqOrdering, buildProvidedOrdering: noProvidedOrdering, } } func canNeverProvideOrdering(expr memo.RelExpr, required *physical.OrderingChoice) bool { return false } func noChildReqOrdering( parent memo.RelExpr, required *physical.OrderingChoice, childIdx int, ) physical.OrderingChoice { return physical.OrderingChoice{} } func noProvidedOrdering(expr memo.RelExpr, required *physical.OrderingChoice) opt.Ordering { return nil } // remapProvided remaps columns in a provided ordering (according to the given // FDs) so that it only refers to columns in the given outCols set. It also // removes any columns that are redundant according to the FDs. // // Can only be called if the provided ordering can be remapped. // // Does not modify <provided> in place, but it can return the same slice. func remapProvided(provided opt.Ordering, fds *props.FuncDepSet, outCols opt.ColSet) opt.Ordering { if len(provided) == 0 { return nil } // result is nil until we determine that we need to make a copy. var result opt.Ordering // closure is the set of columns that are functionally determined by the // columns in provided[:i]. closure := fds.ComputeClosure(opt.ColSet{}) for i := range provided { col := provided[i].ID() if closure.Contains(col) { // At the level of the new operator, this column is redundant. if result == nil { result = make(opt.Ordering, i, len(provided)) copy(result, provided) } continue } if outCols.Contains(col) { if result != nil { result = append(result, provided[i]) } } else { equivCols := fds.ComputeEquivClosure(opt.MakeColSet(col)) remappedCol, ok := equivCols.Intersection(outCols).Next(0) if !ok { panic(errors.AssertionFailedf("no output column equivalent to %d", log.Safe(col))) } if result == nil { result = make(opt.Ordering, i, len(provided)) copy(result, provided) } result = append(result, opt.MakeOrderingColumn( remappedCol, provided[i].Descending(), )) } closure.Add(col) closure = fds.ComputeClosure(closure) } if result == nil { return provided } return result } // trimProvided returns the smallest prefix of <provided> that is sufficient to // satisfy <required> (in conjunction with the FDs). // // This is useful because in a distributed setting execution is configured to // maintain the provided ordering when merging results from multiple nodes, and // we don't want to make needless comparisons. func trimProvided( provided opt.Ordering, required *physical.OrderingChoice, fds *props.FuncDepSet, ) opt.Ordering { if len(provided) == 0 { return nil } // closure is the set of columns that are functionally determined by the // columns in provided[:provIdx]. closure := fds.ComputeClosure(opt.ColSet{}) provIdx := 0 for reqIdx := range required.Columns { c := &required.Columns[reqIdx] // Consume columns from the provided ordering until their closure intersects // the required group. for !closure.Intersects(c.Group) { closure.Add(provided[provIdx].ID()) closure = fds.ComputeClosure(closure) provIdx++ if provIdx == len(provided) { return provided } } } return provided[:provIdx] } // checkRequired runs sanity checks on the ordering required of an operator. func checkRequired(expr memo.RelExpr, required *physical.OrderingChoice) { rel := expr.Relational() // Verify that the ordering only refers to output columns. if !required.SubsetOfCols(rel.OutputCols) { panic(errors.AssertionFailedf("required ordering refers to non-output columns (op %s)", log.Safe(expr.Op()))) } // Verify that columns in a column group are equivalent. for i := range required.Columns { c := &required.Columns[i] if !c.Group.SubsetOf(rel.FuncDeps.ComputeEquivGroup(c.AnyID())) { panic(errors.AssertionFailedf( "ordering column group %s contains non-equivalent columns (op %s)", c.Group, expr.Op(), )) } } } // checkProvided runs sanity checks on a provided ordering. func checkProvided(expr memo.RelExpr, required *physical.OrderingChoice, provided opt.Ordering) { // The provided ordering must refer only to output columns. if outCols := expr.Relational().OutputCols; !provided.ColSet().SubsetOf(outCols) { panic(errors.AssertionFailedf( "provided %s must refer only to output columns %s", provided, outCols, )) } // TODO(radu): this check would be nice to have, but it is too strict. In some // cases, child expressions created during exploration (like constrained // scans) have FDs that are more restricted than what was known when the // parent expression was constructed. Related to #32320. if false { // The provided ordering must intersect the required ordering, after FDs are // applied. fds := &expr.Relational().FuncDeps r := required.Copy() r.Simplify(fds) var p physical.OrderingChoice p.FromOrdering(provided) p.Simplify(fds) if !r.Any() && (p.Any() || !p.Intersects(&r)) { panic(errors.AssertionFailedf( "provided %s does not intersect required %s (FDs: %s)", provided, required, fds, )) } } // The provided ordering should not have unnecessary columns. fds := &expr.Relational().FuncDeps if trimmed := trimProvided(provided, required, fds); len(trimmed) != len(provided) { panic(errors.AssertionFailedf( "provided %s can be trimmed to %s (FDs: %s)", log.Safe(provided), log.Safe(trimmed), log.Safe(fds), )) } }
pkg/sql/opt/ordering/ordering.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.001374606741592288, 0.00022668643214274198, 0.0001619603717699647, 0.00017268085503019392, 0.00021619598555844277 ]
{ "id": 6, "code_window": [ "\t\t// runnerCtx will remain unset, so we have this check.\n", "\t\to.runnerCtx = ctx\n", "\t}\n", "\terrToSend = colexecerror.CatchVectorizedRuntimeError(func() {\n", "\t\to.Input.Init(o.runnerCtx)\n", "\t\tfor {\n", "\t\t\tif atomic.LoadUint32(&o.draining) == 1 {\n", "\t\t\t\tterminatedGracefully = true\n", "\t\t\t\treturn\n", "\t\t\t}\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\to.inputInitialized = true\n" ], "file_path": "pkg/sql/colflow/colrpc/outbox.go", "type": "add", "edit_start_line_idx": 263 }
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. import { get } from "lodash"; import { assert } from "chai"; import { createSandbox } from "sinon"; import { track } from "./trackTimeFrameChange"; const sandbox = createSandbox(); describe("trackPaginate", () => { const direction = "test"; afterEach(() => { sandbox.reset(); }); it("should only call track once", () => { const spy = sandbox.spy(); track(spy)(direction); assert.isTrue(spy.calledOnce); }); it("should send the right event", () => { const spy = sandbox.spy(); const expected = "Time Frame Change"; track(spy)(direction); const sent = spy.getCall(0).args[0]; const event = get(sent, "event"); assert.isTrue(event === expected); }); it("should send the correct payload", () => { const spy = sandbox.spy(); track(spy)(direction); const sent = spy.getCall(0).args[0]; const changeDirection = get(sent, "properties.direction"); assert.isTrue(changeDirection === direction); }); });
pkg/ui/src/util/analytics/trackTimeFrameChange.spec.ts
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.0001795353164197877, 0.00017327851674053818, 0.000170081781107001, 0.00017146098252851516, 0.00000338665563504037 ]
{ "id": 6, "code_window": [ "\t\t// runnerCtx will remain unset, so we have this check.\n", "\t\to.runnerCtx = ctx\n", "\t}\n", "\terrToSend = colexecerror.CatchVectorizedRuntimeError(func() {\n", "\t\to.Input.Init(o.runnerCtx)\n", "\t\tfor {\n", "\t\t\tif atomic.LoadUint32(&o.draining) == 1 {\n", "\t\t\t\tterminatedGracefully = true\n", "\t\t\t\treturn\n", "\t\t\t}\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\to.inputInitialized = true\n" ], "file_path": "pkg/sql/colflow/colrpc/outbox.go", "type": "add", "edit_start_line_idx": 263 }
#!/usr/bin/env bash source "$(dirname "${0}")/teamcity-support.sh" # mark_build marks a build with a given label specified as a parameter on # docker. For example, calling this function on the label "qualified", on a # v19.2.4 build would tag it as `latest-v19.2-qualified-build`. mark_build() { tc_start_block "Variable Setup" build_label=$1 # On no match, `grep -Eo` returns 1. `|| echo""` makes the script not error. release_branch="$(echo "$TC_BUILD_BRANCH" | grep -Eo "^v[0-9]+\.[0-9]+" || echo"")" if [[ -z "${DRY_RUN}" ]] ; then google_credentials=$GOOGLE_COCKROACH_CLOUD_IMAGES_CREDENTIALS gcr_repository="us.gcr.io/cockroach-cloud-images/cockroach" else google_credentials=$GOOGLE_COCKROACH_RELEASE_CREDENTIALS gcr_repository="us.gcr.io/cockroach-release/cockroach-test" fi tc_end_block "Variable Setup" tc_start_block "Push new docker image tag" if [[ -z "${release_branch}" ]] ; then echo "This tag/branch does not contain a valid major version. Tag/Branch=\"${TC_BUILD_BRANCH}\". Unable to tag docker image as qualified." exit fi log_into_gcloud gcloud container images add-tag "${gcr_repository}:${TC_BUILD_BRANCH}" "${gcr_repository}:latest-${release_branch}-${build_label}-build" tc_end_block "Push new docker image tag" }
build/release/teamcity-mark-build.sh
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.00017638015560805798, 0.00017330546688754112, 0.00016959694039542228, 0.00017362236394546926, 0.0000025975202788686147 ]
{ "id": 7, "code_window": [ "\t\t\tmsg.Data.Metadata, execinfrapb.LocalMetaToRemoteProducerMeta(ctx, execinfrapb.ProducerMetadata{Err: errToSend}),\n", "\t\t)\n", "\t}\n", "\tif o.span != nil && o.getStats != nil {\n", "\t\tfor _, s := range o.getStats() {\n", "\t\t\to.span.RecordStructured(s)\n", "\t\t}\n", "\t}\n", "\tif trace := execinfra.GetTraceData(ctx); trace != nil {\n", "\t\tmsg.Data.Metadata = append(msg.Data.Metadata, execinfrapb.RemoteProducerMetadata{\n", "\t\t\tValue: &execinfrapb.RemoteProducerMetadata_TraceData_{\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif o.inputInitialized {\n", "\t\t// Retrieving stats and draining the metadata is only safe if the input\n", "\t\t// to the outbox was properly initialized.\n", "\t\tif o.span != nil && o.getStats != nil {\n", "\t\t\tfor _, s := range o.getStats() {\n", "\t\t\t\to.span.RecordStructured(s)\n", "\t\t\t}\n", "\t\t}\n", "\t\tfor _, meta := range o.metadataSources.DrainMeta() {\n", "\t\t\tmsg.Data.Metadata = append(msg.Data.Metadata, execinfrapb.LocalMetaToRemoteProducerMeta(ctx, meta))\n" ], "file_path": "pkg/sql/colflow/colrpc/outbox.go", "type": "replace", "edit_start_line_idx": 309 }
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colexecop import ( "context" "time" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" ) // Operator is a column vector operator that produces a Batch as output. type Operator interface { // Init initializes this operator. It will be called once at operator setup // time. Second, third, etc calls should be noops. If an operator has any // input operators, it's responsible for calling Init on all of those input // operators as well. // // Canceling the provided context results in forceful termination of // execution. The operators are expected to hold onto the provided context // (and derive a new one if needed) that is then used for Next() calls. // // It might panic with an expected error, so there must be a "root" // component that will catch that panic. // TODO(yuzefovich): use the stored context for DrainMeta calls (when // applicable) too. Init(ctx context.Context) // Next returns the next Batch from this operator. Once the operator is // finished, it will return a Batch with length 0. Subsequent calls to // Next at that point will always return a Batch with length 0. // // Calling Next may invalidate the contents of the last Batch returned by // Next. // // It might panic with an expected error, so there must be a "root" // component that will catch that panic. Next() coldata.Batch execinfra.OpNode } // DrainableOperator is an operator that also implements DrainMeta. Next and // DrainMeta may not be called concurrently. type DrainableOperator interface { Operator MetadataSource } // KVReader is an operator that performs KV reads. // TODO(yuzefovich): consider changing the contract to remove the mention of // concurrency safety once stats are only retrieved from Next goroutines. type KVReader interface { // GetBytesRead returns the number of bytes read from KV by this operator. // It must be safe for concurrent use. GetBytesRead() int64 // GetRowsRead returns the number of rows read from KV by this operator. // It must be safe for concurrent use. GetRowsRead() int64 // GetCumulativeContentionTime returns the amount of time KV reads spent // contending. It must be safe for concurrent use. GetCumulativeContentionTime() time.Duration } // ZeroInputNode is an execinfra.OpNode with no inputs. type ZeroInputNode struct{} // ChildCount implements the execinfra.OpNode interface. func (ZeroInputNode) ChildCount(verbose bool) int { return 0 } // Child implements the execinfra.OpNode interface. func (ZeroInputNode) Child(nth int, verbose bool) execinfra.OpNode { colexecerror.InternalError(errors.AssertionFailedf("invalid index %d", nth)) // This code is unreachable, but the compiler cannot infer that. return nil } // NewOneInputNode returns an execinfra.OpNode with a single Operator input. func NewOneInputNode(input Operator) OneInputNode { return OneInputNode{Input: input} } // OneInputNode is an execinfra.OpNode with a single Operator input. type OneInputNode struct { Input Operator } // ChildCount implements the execinfra.OpNode interface. func (OneInputNode) ChildCount(verbose bool) int { return 1 } // Child implements the execinfra.OpNode interface. func (n OneInputNode) Child(nth int, verbose bool) execinfra.OpNode { if nth == 0 { return n.Input } colexecerror.InternalError(errors.AssertionFailedf("invalid index %d", nth)) // This code is unreachable, but the compiler cannot infer that. return nil } // BufferingInMemoryOperator is an Operator that buffers up intermediate tuples // in memory and knows how to export them once the memory limit has been // reached. type BufferingInMemoryOperator interface { Operator // ExportBuffered returns all the batches that have been buffered up from the // input and have not yet been processed by the operator. It needs to be // called once the memory limit has been reached in order to "dump" the // buffered tuples into a disk-backed operator. It will return a zero-length // batch once the buffer has been emptied. // // Calling ExportBuffered may invalidate the contents of the last batch // returned by ExportBuffered. ExportBuffered(input Operator) coldata.Batch } // Closer is an object that releases resources when Close is called. Note that // this interface must be implemented by all operators that could be planned on // top of other operators that do actually need to release the resources (e.g. // if we have a simple project on top of a disk-backed operator, that simple // project needs to implement this interface so that Close() call could be // propagated correctly). type Closer interface { Close(ctx context.Context) error } // Closers is a slice of Closers. type Closers []Closer // CloseAndLogOnErr closes all Closers and logs the error if the log verbosity // is 1 or higher. The given prefix is prepended to the log message. // Note: this method should *only* be used when returning an error doesn't make // sense. func (c Closers) CloseAndLogOnErr(ctx context.Context, prefix string) { prefix += ":" for _, closer := range c { if err := closer.Close(ctx); err != nil && log.V(1) { log.Infof(ctx, "%s error closing Closer: %v", prefix, err) } } } // Close closes all Closers and returns the last error (if any occurs). func (c Closers) Close(ctx context.Context) error { var lastErr error for _, closer := range c { if err := closer.Close(ctx); err != nil { lastErr = err } } return lastErr } // Resetter is an interface that operators can implement if they can be reset // either for reusing (to keep the already allocated memory) or during tests. type Resetter interface { // Reset resets the operator for reuse. Reset(ctx context.Context) } // ResettableOperator is an Operator that can be reset. type ResettableOperator interface { Operator Resetter } // FeedOperator is used to feed an Operator chain with input by manually // setting the next batch. type FeedOperator struct { ZeroInputNode NonExplainable batch coldata.Batch } // NewFeedOperator returns a new feed operator. func NewFeedOperator() *FeedOperator { return &FeedOperator{} } // Init implements the colexecop.Operator interface. func (FeedOperator) Init(context.Context) {} // Next implements the colexecop.Operator interface. func (o *FeedOperator) Next() coldata.Batch { return o.batch } // SetBatch sets the next batch to be returned on Next call. func (o *FeedOperator) SetBatch(batch coldata.Batch) { o.batch = batch } var _ Operator = &FeedOperator{} // NonExplainable is a marker interface which identifies an Operator that // should be omitted from the output of EXPLAIN (VEC). Note that VERBOSE // explain option will override the omitting behavior. type NonExplainable interface { // nonExplainableMarker is just a marker method. It should never be called. nonExplainableMarker() } // InitHelper is a simple struct that helps Operators implement Init() method. type InitHelper struct { // Ctx is the context passed on the first call to Init(). If it is nil, then // Init() hasn't been called yet. Ctx context.Context } // Init marks the InitHelper as initialized. If true is returned, this is the // first call to Init. func (h *InitHelper) Init(ctx context.Context) bool { if h.Ctx != nil { return false } if ctx == nil { colexecerror.InternalError(errors.AssertionFailedf("nil context is passed")) } h.Ctx = ctx return true } // MakeOneInputHelper returns a new OneInputHelper. func MakeOneInputHelper(input Operator) OneInputHelper { return OneInputHelper{ OneInputNode: NewOneInputNode(input), } } // OneInputHelper is an execinfra.OpNode which only needs to initialize its // single Operator input in Init(). type OneInputHelper struct { OneInputNode InitHelper } // Init implements the Operator interface. func (h *OneInputHelper) Init(ctx context.Context) { if !h.InitHelper.Init(ctx) { return } h.Input.Init(h.Ctx) } // CloserHelper is a simple helper that helps Operators implement Closer. If // close returns true, resources may be released, if it returns false, close has // already been called. type CloserHelper struct { closed bool } // Close marks the CloserHelper as closed. If true is returned, this is the // first call to Close. func (c *CloserHelper) Close() bool { if c.closed { return false } c.closed = true return true } // Reset resets the CloserHelper so that it can be closed again. func (c *CloserHelper) Reset() { c.closed = false } // ClosableOperator is an Operator that needs to be Close()'d. type ClosableOperator interface { Operator Closer } // MakeOneInputCloserHelper returns a new OneInputCloserHelper. func MakeOneInputCloserHelper(input Operator) OneInputCloserHelper { return OneInputCloserHelper{ OneInputNode: NewOneInputNode(input), } } // OneInputCloserHelper is an execinfra.OpNode with a single Operator input // that might need to be Close()'d. type OneInputCloserHelper struct { OneInputNode CloserHelper } var _ Closer = &OneInputCloserHelper{} // Close implements the Closer interface. func (c *OneInputCloserHelper) Close(ctx context.Context) error { if !c.CloserHelper.Close() { return nil } if closer, ok := c.Input.(Closer); ok { return closer.Close(ctx) } return nil } // MakeOneInputInitCloserHelper returns a new OneInputInitCloserHelper. func MakeOneInputInitCloserHelper(input Operator) OneInputInitCloserHelper { return OneInputInitCloserHelper{ OneInputCloserHelper: MakeOneInputCloserHelper(input), } } // OneInputInitCloserHelper is an execinfra.OpNode that only needs to initialize // its single Operator input in Init() and might need to Close() it too. type OneInputInitCloserHelper struct { InitHelper OneInputCloserHelper } // Init implements the Operator interface. func (h *OneInputInitCloserHelper) Init(ctx context.Context) { if !h.InitHelper.Init(ctx) { return } h.Input.Init(h.Ctx) } type noopOperator struct { OneInputInitCloserHelper NonExplainable } var _ ResettableOperator = &noopOperator{} // NewNoop returns a new noop Operator. func NewNoop(input Operator) ResettableOperator { return &noopOperator{OneInputInitCloserHelper: MakeOneInputInitCloserHelper(input)} } func (n *noopOperator) Next() coldata.Batch { return n.Input.Next() } func (n *noopOperator) Reset(ctx context.Context) { if r, ok := n.Input.(Resetter); ok { r.Reset(ctx) } } // MetadataSource is an interface implemented by processors and columnar // operators that can produce metadata. type MetadataSource interface { // DrainMeta returns all the metadata produced by the processor or operator. // It will be called exactly once, usually, when the processor or operator // has finished doing its computations. This is a signal that the output // requires no more rows to be returned. // Implementers can choose what to do on subsequent calls (if such occur). // TODO(yuzefovich): modify the contract to require returning nil on all // calls after the first one. DrainMeta() []execinfrapb.ProducerMetadata } // MetadataSources is a slice of MetadataSource. type MetadataSources []MetadataSource // DrainMeta calls DrainMeta on all MetadataSources and returns a single slice // with all the accumulated metadata. Note that this method wraps the draining // with the panic-catcher so that the callers don't have to. func (s MetadataSources) DrainMeta() []execinfrapb.ProducerMetadata { var result []execinfrapb.ProducerMetadata if err := colexecerror.CatchVectorizedRuntimeError(func() { for _, src := range s { result = append(result, src.DrainMeta()...) } }); err != nil { meta := execinfrapb.GetProducerMeta() meta.Err = err result = append(result, *meta) } return result } // VectorizedStatsCollector is the common interface implemented by several // variations of the execution statistics collectors. At the moment of writing // we have two variants: the "default" option (for all Operators) and the // "network" option (strictly for colrpc.Inboxes). type VectorizedStatsCollector interface { Operator // GetStats returns the execution statistics of a single Operator. It will // always return non-nil (but possibly empty) object. GetStats() *execinfrapb.ComponentStats }
pkg/sql/colexecop/operator.go
1
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.288283109664917, 0.0072467997670173645, 0.00015865452587604523, 0.00016758384299464524, 0.044436149299144745 ]
{ "id": 7, "code_window": [ "\t\t\tmsg.Data.Metadata, execinfrapb.LocalMetaToRemoteProducerMeta(ctx, execinfrapb.ProducerMetadata{Err: errToSend}),\n", "\t\t)\n", "\t}\n", "\tif o.span != nil && o.getStats != nil {\n", "\t\tfor _, s := range o.getStats() {\n", "\t\t\to.span.RecordStructured(s)\n", "\t\t}\n", "\t}\n", "\tif trace := execinfra.GetTraceData(ctx); trace != nil {\n", "\t\tmsg.Data.Metadata = append(msg.Data.Metadata, execinfrapb.RemoteProducerMetadata{\n", "\t\t\tValue: &execinfrapb.RemoteProducerMetadata_TraceData_{\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif o.inputInitialized {\n", "\t\t// Retrieving stats and draining the metadata is only safe if the input\n", "\t\t// to the outbox was properly initialized.\n", "\t\tif o.span != nil && o.getStats != nil {\n", "\t\t\tfor _, s := range o.getStats() {\n", "\t\t\t\to.span.RecordStructured(s)\n", "\t\t\t}\n", "\t\t}\n", "\t\tfor _, meta := range o.metadataSources.DrainMeta() {\n", "\t\t\tmsg.Data.Metadata = append(msg.Data.Metadata, execinfrapb.LocalMetaToRemoteProducerMeta(ctx, meta))\n" ], "file_path": "pkg/sql/colflow/colrpc/outbox.go", "type": "replace", "edit_start_line_idx": 309 }
INSERT INTO kv2(k, v) VALUES ('a', 'b'), ('c', 'd'), ('e', 'f'), ('f', 'g'), (ARRAY[NULL::INT]), (ARRAY[NULL::INT, 1]), (ARRAY[1, NULL::INT]), (ARRAY[NULL::INT, NULL::INT]), (((9 / 3) * (1 / 3))), (2.0), (2.4 + 4.6)
pkg/sql/sem/tree/testdata/pretty/6.sql
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.0001615615183254704, 0.0001615615183254704, 0.0001615615183254704, 0.0001615615183254704, 0 ]
{ "id": 7, "code_window": [ "\t\t\tmsg.Data.Metadata, execinfrapb.LocalMetaToRemoteProducerMeta(ctx, execinfrapb.ProducerMetadata{Err: errToSend}),\n", "\t\t)\n", "\t}\n", "\tif o.span != nil && o.getStats != nil {\n", "\t\tfor _, s := range o.getStats() {\n", "\t\t\to.span.RecordStructured(s)\n", "\t\t}\n", "\t}\n", "\tif trace := execinfra.GetTraceData(ctx); trace != nil {\n", "\t\tmsg.Data.Metadata = append(msg.Data.Metadata, execinfrapb.RemoteProducerMetadata{\n", "\t\t\tValue: &execinfrapb.RemoteProducerMetadata_TraceData_{\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif o.inputInitialized {\n", "\t\t// Retrieving stats and draining the metadata is only safe if the input\n", "\t\t// to the outbox was properly initialized.\n", "\t\tif o.span != nil && o.getStats != nil {\n", "\t\t\tfor _, s := range o.getStats() {\n", "\t\t\t\to.span.RecordStructured(s)\n", "\t\t\t}\n", "\t\t}\n", "\t\tfor _, meta := range o.metadataSources.DrainMeta() {\n", "\t\t\tmsg.Data.Metadata = append(msg.Data.Metadata, execinfrapb.LocalMetaToRemoteProducerMeta(ctx, meta))\n" ], "file_path": "pkg/sql/colflow/colrpc/outbox.go", "type": "replace", "edit_start_line_idx": 309 }
exec-ddl CREATE TABLE parent (x INT, p INT PRIMARY KEY, other INT UNIQUE) ---- exec-ddl CREATE TABLE child (c INT PRIMARY KEY, p INT NOT NULL REFERENCES parent(p)) ---- build UPDATE child SET p = 4 ---- update child ├── columns: <none> ├── fetch columns: c:4 child.p:5 ├── update-mapping: │ └── p_new:7 => child.p:2 ├── input binding: &1 ├── project │ ├── columns: p_new:7!null c:4!null child.p:5!null child.crdb_internal_mvcc_timestamp:6 │ ├── scan child │ │ └── columns: c:4!null child.p:5!null child.crdb_internal_mvcc_timestamp:6 │ └── projections │ └── 4 [as=p_new:7] └── f-k-checks └── f-k-checks-item: child(p) -> parent(p) └── anti-join (hash) ├── columns: p:8!null ├── with-scan &1 │ ├── columns: p:8!null │ └── mapping: │ └── p_new:7 => p:8 ├── scan parent │ └── columns: parent.p:10!null └── filters └── p:8 = parent.p:10 build UPDATE parent SET p = p+1 ---- update parent ├── columns: <none> ├── fetch columns: x:5 parent.p:6 other:7 ├── update-mapping: │ └── p_new:9 => parent.p:2 ├── input binding: &1 ├── project │ ├── columns: p_new:9!null x:5 parent.p:6!null other:7 parent.crdb_internal_mvcc_timestamp:8 │ ├── scan parent │ │ └── columns: x:5 parent.p:6!null other:7 parent.crdb_internal_mvcc_timestamp:8 │ └── projections │ └── parent.p:6 + 1 [as=p_new:9] └── f-k-checks └── f-k-checks-item: child(p) -> parent(p) └── semi-join (hash) ├── columns: p:10!null ├── except │ ├── columns: p:10!null │ ├── left columns: p:10!null │ ├── right columns: p:11 │ ├── with-scan &1 │ │ ├── columns: p:10!null │ │ └── mapping: │ │ └── parent.p:6 => p:10 │ └── with-scan &1 │ ├── columns: p:11!null │ └── mapping: │ └── p_new:9 => p:11 ├── scan child │ └── columns: child.p:13!null └── filters └── p:10 = child.p:13 exec-ddl CREATE TABLE grandchild (g INT PRIMARY KEY, c INT NOT NULL REFERENCES child(c)) ---- build UPDATE child SET c = 4 ---- update child ├── columns: <none> ├── fetch columns: child.c:4 p:5 ├── update-mapping: │ └── c_new:7 => child.c:1 ├── input binding: &1 ├── project │ ├── columns: c_new:7!null child.c:4!null p:5!null child.crdb_internal_mvcc_timestamp:6 │ ├── scan child │ │ └── columns: child.c:4!null p:5!null child.crdb_internal_mvcc_timestamp:6 │ └── projections │ └── 4 [as=c_new:7] └── f-k-checks └── f-k-checks-item: grandchild(c) -> child(c) └── semi-join (hash) ├── columns: c:8!null ├── except │ ├── columns: c:8!null │ ├── left columns: c:8!null │ ├── right columns: c:9 │ ├── with-scan &1 │ │ ├── columns: c:8!null │ │ └── mapping: │ │ └── child.c:4 => c:8 │ └── with-scan &1 │ ├── columns: c:9!null │ └── mapping: │ └── c_new:7 => c:9 ├── scan grandchild │ └── columns: grandchild.c:11!null └── filters └── c:8 = grandchild.c:11 # This update shouldn't emit checks for c, since it's unchanged. build UPDATE child SET p = 4 ---- update child ├── columns: <none> ├── fetch columns: c:4 child.p:5 ├── update-mapping: │ └── p_new:7 => child.p:2 ├── input binding: &1 ├── project │ ├── columns: p_new:7!null c:4!null child.p:5!null child.crdb_internal_mvcc_timestamp:6 │ ├── scan child │ │ └── columns: c:4!null child.p:5!null child.crdb_internal_mvcc_timestamp:6 │ └── projections │ └── 4 [as=p_new:7] └── f-k-checks └── f-k-checks-item: child(p) -> parent(p) └── anti-join (hash) ├── columns: p:8!null ├── with-scan &1 │ ├── columns: p:8!null │ └── mapping: │ └── p_new:7 => p:8 ├── scan parent │ └── columns: parent.p:10!null └── filters └── p:8 = parent.p:10 build UPDATE child SET p = p ---- update child ├── columns: <none> ├── fetch columns: c:4 child.p:5 ├── update-mapping: │ └── child.p:5 => child.p:2 ├── input binding: &1 ├── scan child │ └── columns: c:4!null child.p:5!null child.crdb_internal_mvcc_timestamp:6 └── f-k-checks └── f-k-checks-item: child(p) -> parent(p) └── anti-join (hash) ├── columns: p:7!null ├── with-scan &1 │ ├── columns: p:7!null │ └── mapping: │ └── child.p:5 => p:7 ├── scan parent │ └── columns: parent.p:9!null └── filters └── p:7 = parent.p:9 build UPDATE child SET p = p+1, c = c+1 ---- update child ├── columns: <none> ├── fetch columns: child.c:4 child.p:5 ├── update-mapping: │ ├── c_new:8 => child.c:1 │ └── p_new:7 => child.p:2 ├── input binding: &1 ├── project │ ├── columns: p_new:7!null c_new:8!null child.c:4!null child.p:5!null child.crdb_internal_mvcc_timestamp:6 │ ├── scan child │ │ └── columns: child.c:4!null child.p:5!null child.crdb_internal_mvcc_timestamp:6 │ └── projections │ ├── child.p:5 + 1 [as=p_new:7] │ └── child.c:4 + 1 [as=c_new:8] └── f-k-checks ├── f-k-checks-item: child(p) -> parent(p) │ └── anti-join (hash) │ ├── columns: p:9!null │ ├── with-scan &1 │ │ ├── columns: p:9!null │ │ └── mapping: │ │ └── p_new:7 => p:9 │ ├── scan parent │ │ └── columns: parent.p:11!null │ └── filters │ └── p:9 = parent.p:11 └── f-k-checks-item: grandchild(c) -> child(c) └── semi-join (hash) ├── columns: c:14!null ├── except │ ├── columns: c:14!null │ ├── left columns: c:14!null │ ├── right columns: c:15 │ ├── with-scan &1 │ │ ├── columns: c:14!null │ │ └── mapping: │ │ └── child.c:4 => c:14 │ └── with-scan &1 │ ├── columns: c:15!null │ └── mapping: │ └── c_new:8 => c:15 ├── scan grandchild │ └── columns: grandchild.c:17!null └── filters └── c:14 = grandchild.c:17 exec-ddl CREATE TABLE child_nullable (c INT PRIMARY KEY, p INT REFERENCES parent(p)) ---- # We don't need the FK check in this case because we are only setting NULL # values. build UPDATE child_nullable SET p = NULL ---- update child_nullable ├── columns: <none> ├── fetch columns: c:4 p:5 ├── update-mapping: │ └── p_new:7 => p:2 └── project ├── columns: p_new:7 c:4!null p:5 crdb_internal_mvcc_timestamp:6 ├── scan child_nullable │ └── columns: c:4!null p:5 crdb_internal_mvcc_timestamp:6 └── projections └── NULL::INT8 [as=p_new:7] # Multiple grandchild tables exec-ddl CREATE TABLE grandchild2 (g INT PRIMARY KEY, c INT NOT NULL REFERENCES child(c)) ---- build UPDATE child SET p = 4 ---- update child ├── columns: <none> ├── fetch columns: c:4 child.p:5 ├── update-mapping: │ └── p_new:7 => child.p:2 ├── input binding: &1 ├── project │ ├── columns: p_new:7!null c:4!null child.p:5!null child.crdb_internal_mvcc_timestamp:6 │ ├── scan child │ │ └── columns: c:4!null child.p:5!null child.crdb_internal_mvcc_timestamp:6 │ └── projections │ └── 4 [as=p_new:7] └── f-k-checks └── f-k-checks-item: child(p) -> parent(p) └── anti-join (hash) ├── columns: p:8!null ├── with-scan &1 │ ├── columns: p:8!null │ └── mapping: │ └── p_new:7 => p:8 ├── scan parent │ └── columns: parent.p:10!null └── filters └── p:8 = parent.p:10 exec-ddl CREATE TABLE self (x INT PRIMARY KEY, y INT NOT NULL REFERENCES self(x)) ---- build UPDATE self SET y = 3 ---- update self ├── columns: <none> ├── fetch columns: x:4 self.y:5 ├── update-mapping: │ └── y_new:7 => self.y:2 ├── input binding: &1 ├── project │ ├── columns: y_new:7!null x:4!null self.y:5!null crdb_internal_mvcc_timestamp:6 │ ├── scan self │ │ └── columns: x:4!null self.y:5!null crdb_internal_mvcc_timestamp:6 │ └── projections │ └── 3 [as=y_new:7] └── f-k-checks └── f-k-checks-item: self(y) -> self(x) └── anti-join (hash) ├── columns: y:8!null ├── with-scan &1 │ ├── columns: y:8!null │ └── mapping: │ └── y_new:7 => y:8 ├── scan self │ └── columns: x:9!null └── filters └── y:8 = x:9 build UPDATE self SET x = 3 ---- update self ├── columns: <none> ├── fetch columns: self.x:4 y:5 ├── update-mapping: │ └── x_new:7 => self.x:1 ├── input binding: &1 ├── project │ ├── columns: x_new:7!null self.x:4!null y:5!null crdb_internal_mvcc_timestamp:6 │ ├── scan self │ │ └── columns: self.x:4!null y:5!null crdb_internal_mvcc_timestamp:6 │ └── projections │ └── 3 [as=x_new:7] └── f-k-checks └── f-k-checks-item: self(y) -> self(x) └── semi-join (hash) ├── columns: x:8!null ├── except │ ├── columns: x:8!null │ ├── left columns: x:8!null │ ├── right columns: x:9 │ ├── with-scan &1 │ │ ├── columns: x:8!null │ │ └── mapping: │ │ └── self.x:4 => x:8 │ └── with-scan &1 │ ├── columns: x:9!null │ └── mapping: │ └── x_new:7 => x:9 ├── scan self │ └── columns: y:11!null └── filters └── x:8 = y:11 exec-ddl CREATE TABLE parent_multicol (a INT, b INT, c INT, PRIMARY KEY (a,b,c)) ---- exec-ddl CREATE TABLE child_multicol_simple ( k INT PRIMARY KEY, a INT, b INT, c INT, CONSTRAINT fk FOREIGN KEY(a,b,c) REFERENCES parent_multicol(a,b,c) MATCH SIMPLE ) ---- # With MATCH SIMPLE, we can elide the FK check if any FK column is NULL. build UPDATE child_multicol_simple SET a = 1, b = NULL, c = 1 WHERE k = 1 ---- update child_multicol_simple ├── columns: <none> ├── fetch columns: k:6 a:7 b:8 c:9 ├── update-mapping: │ ├── a_new:11 => a:2 │ ├── b_new:12 => b:3 │ └── a_new:11 => c:4 └── project ├── columns: a_new:11!null b_new:12 k:6!null a:7 b:8 c:9 crdb_internal_mvcc_timestamp:10 ├── select │ ├── columns: k:6!null a:7 b:8 c:9 crdb_internal_mvcc_timestamp:10 │ ├── scan child_multicol_simple │ │ └── columns: k:6!null a:7 b:8 c:9 crdb_internal_mvcc_timestamp:10 │ └── filters │ └── k:6 = 1 └── projections ├── 1 [as=a_new:11] └── NULL::INT8 [as=b_new:12] exec-ddl CREATE TABLE child_multicol_full ( k INT PRIMARY KEY, a INT, b INT, c INT, CONSTRAINT fk FOREIGN KEY(a,b,c) REFERENCES parent_multicol(a,b,c) MATCH FULL ) ---- # With MATCH FULL, we can elide the FK check only if all FK columns are NULL. build UPDATE child_multicol_full SET a = 1, b = NULL, c = 1 WHERE k = 1 ---- update child_multicol_full ├── columns: <none> ├── fetch columns: k:6 child_multicol_full.a:7 child_multicol_full.b:8 child_multicol_full.c:9 ├── update-mapping: │ ├── a_new:11 => child_multicol_full.a:2 │ ├── b_new:12 => child_multicol_full.b:3 │ └── a_new:11 => child_multicol_full.c:4 ├── input binding: &1 ├── project │ ├── columns: a_new:11!null b_new:12 k:6!null child_multicol_full.a:7 child_multicol_full.b:8 child_multicol_full.c:9 child_multicol_full.crdb_internal_mvcc_timestamp:10 │ ├── select │ │ ├── columns: k:6!null child_multicol_full.a:7 child_multicol_full.b:8 child_multicol_full.c:9 child_multicol_full.crdb_internal_mvcc_timestamp:10 │ │ ├── scan child_multicol_full │ │ │ └── columns: k:6!null child_multicol_full.a:7 child_multicol_full.b:8 child_multicol_full.c:9 child_multicol_full.crdb_internal_mvcc_timestamp:10 │ │ └── filters │ │ └── k:6 = 1 │ └── projections │ ├── 1 [as=a_new:11] │ └── NULL::INT8 [as=b_new:12] └── f-k-checks └── f-k-checks-item: child_multicol_full(a,b,c) -> parent_multicol(a,b,c) └── anti-join (hash) ├── columns: a:13!null b:14 c:15!null ├── with-scan &1 │ ├── columns: a:13!null b:14 c:15!null │ └── mapping: │ ├── a_new:11 => a:13 │ ├── b_new:12 => b:14 │ └── a_new:11 => c:15 ├── scan parent_multicol │ └── columns: parent_multicol.a:16!null parent_multicol.b:17!null parent_multicol.c:18!null └── filters ├── a:13 = parent_multicol.a:16 ├── b:14 = parent_multicol.b:17 └── c:15 = parent_multicol.c:18 build UPDATE child_multicol_full SET a = NULL, b = NULL, c = NULL WHERE k = 1 ---- update child_multicol_full ├── columns: <none> ├── fetch columns: k:6 a:7 b:8 c:9 ├── update-mapping: │ ├── a_new:11 => a:2 │ ├── a_new:11 => b:3 │ └── a_new:11 => c:4 └── project ├── columns: a_new:11 k:6!null a:7 b:8 c:9 crdb_internal_mvcc_timestamp:10 ├── select │ ├── columns: k:6!null a:7 b:8 c:9 crdb_internal_mvcc_timestamp:10 │ ├── scan child_multicol_full │ │ └── columns: k:6!null a:7 b:8 c:9 crdb_internal_mvcc_timestamp:10 │ └── filters │ └── k:6 = 1 └── projections └── NULL::INT8 [as=a_new:11] exec-ddl CREATE TABLE two (a int, b int, primary key (a, b)) ---- exec-ddl CREATE TABLE fam ( a INT, b INT, c INT, d INT, e INT, FAMILY (a, b, c), FAMILY (d, e), FOREIGN KEY (c, d) REFERENCES two (a, b) ) ---- # Ensure that we fetch all relevant columns for a foreign key. # NOTE: when we no longer require indexes to be created for FKs, ensure that # these still scan all the relevant FK columns. norm UPDATE fam SET c = 3 ---- update fam ├── columns: <none> ├── fetch columns: fam.a:8 fam.b:9 fam.c:10 rowid:13 ├── update-mapping: │ └── c_new:15 => fam.c:3 ├── input binding: &1 ├── project │ ├── columns: c_new:15!null fam.a:8 fam.b:9 fam.c:10 fam.d:11 rowid:13!null │ ├── scan fam │ │ └── columns: fam.a:8 fam.b:9 fam.c:10 fam.d:11 rowid:13!null │ └── projections │ └── 3 [as=c_new:15] └── f-k-checks └── f-k-checks-item: fam(c,d) -> two(a,b) └── anti-join (hash) ├── columns: c:16!null d:17!null ├── select │ ├── columns: c:16!null d:17!null │ ├── with-scan &1 │ │ ├── columns: c:16!null d:17 │ │ └── mapping: │ │ ├── c_new:15 => c:16 │ │ └── fam.d:11 => d:17 │ └── filters │ └── d:17 IS NOT NULL ├── scan two │ └── columns: two.a:18!null two.b:19!null └── filters ├── c:16 = two.a:18 └── d:17 = two.b:19 norm UPDATE fam SET d = 3 ---- update fam ├── columns: <none> ├── fetch columns: fam.d:11 e:12 rowid:13 ├── update-mapping: │ └── d_new:15 => fam.d:4 ├── input binding: &1 ├── project │ ├── columns: d_new:15!null fam.c:10 fam.d:11 e:12 rowid:13!null │ ├── scan fam │ │ └── columns: fam.c:10 fam.d:11 e:12 rowid:13!null │ └── projections │ └── 3 [as=d_new:15] └── f-k-checks └── f-k-checks-item: fam(c,d) -> two(a,b) └── anti-join (hash) ├── columns: c:16!null d:17!null ├── select │ ├── columns: c:16!null d:17!null │ ├── with-scan &1 │ │ ├── columns: c:16 d:17!null │ │ └── mapping: │ │ ├── fam.c:10 => c:16 │ │ └── d_new:15 => d:17 │ └── filters │ └── c:16 IS NOT NULL ├── scan two │ └── columns: two.a:18!null two.b:19!null └── filters ├── c:16 = two.a:18 └── d:17 = two.b:19 # Verify that the join hint is set. build prefer-lookup-joins-for-fks UPDATE child SET p = 4 ---- update child ├── columns: <none> ├── fetch columns: c:4 child.p:5 ├── update-mapping: │ └── p_new:7 => child.p:2 ├── input binding: &1 ├── project │ ├── columns: p_new:7!null c:4!null child.p:5!null child.crdb_internal_mvcc_timestamp:6 │ ├── scan child │ │ └── columns: c:4!null child.p:5!null child.crdb_internal_mvcc_timestamp:6 │ └── projections │ └── 4 [as=p_new:7] └── f-k-checks └── f-k-checks-item: child(p) -> parent(p) └── anti-join (hash) ├── columns: p:8!null ├── flags: prefer lookup join (into right side) ├── with-scan &1 │ ├── columns: p:8!null │ └── mapping: │ └── p_new:7 => p:8 ├── scan parent │ └── columns: parent.p:10!null └── filters └── p:8 = parent.p:10
pkg/sql/opt/optbuilder/testdata/fk-checks-update
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.0001799497549654916, 0.0001749440561980009, 0.00016687157039996237, 0.0001756087294779718, 0.000002856277205864899 ]
{ "id": 7, "code_window": [ "\t\t\tmsg.Data.Metadata, execinfrapb.LocalMetaToRemoteProducerMeta(ctx, execinfrapb.ProducerMetadata{Err: errToSend}),\n", "\t\t)\n", "\t}\n", "\tif o.span != nil && o.getStats != nil {\n", "\t\tfor _, s := range o.getStats() {\n", "\t\t\to.span.RecordStructured(s)\n", "\t\t}\n", "\t}\n", "\tif trace := execinfra.GetTraceData(ctx); trace != nil {\n", "\t\tmsg.Data.Metadata = append(msg.Data.Metadata, execinfrapb.RemoteProducerMetadata{\n", "\t\t\tValue: &execinfrapb.RemoteProducerMetadata_TraceData_{\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif o.inputInitialized {\n", "\t\t// Retrieving stats and draining the metadata is only safe if the input\n", "\t\t// to the outbox was properly initialized.\n", "\t\tif o.span != nil && o.getStats != nil {\n", "\t\t\tfor _, s := range o.getStats() {\n", "\t\t\t\to.span.RecordStructured(s)\n", "\t\t\t}\n", "\t\t}\n", "\t\tfor _, meta := range o.metadataSources.DrainMeta() {\n", "\t\t\tmsg.Data.Metadata = append(msg.Data.Metadata, execinfrapb.LocalMetaToRemoteProducerMeta(ctx, meta))\n" ], "file_path": "pkg/sql/colflow/colrpc/outbox.go", "type": "replace", "edit_start_line_idx": 309 }
// Copyright 2021 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package closedts import ( "time" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/hlc" ) // TargetForPolicy returns the target closed timestamp for a range with the // given policy. func TargetForPolicy( now hlc.ClockTimestamp, maxClockOffset time.Duration, lagTargetDuration time.Duration, leadTargetOverride time.Duration, sideTransportCloseInterval time.Duration, policy roachpb.RangeClosedTimestampPolicy, ) hlc.Timestamp { var res hlc.Timestamp switch policy { case roachpb.LAG_BY_CLUSTER_SETTING: // Simple calculation: lag now by desired duration. res = now.ToTimestamp().Add(-lagTargetDuration.Nanoseconds(), 0) case roachpb.LEAD_FOR_GLOBAL_READS: // The LEAD_FOR_GLOBAL_READS calculation is more complex. Instead of the // policy defining an offset from the publisher's perspective, the // policy defines a goal from the consumer's perspective - the goal // being that present time reads (with a possible uncertainty interval) // can be served from all followers. To accomplish this, we must work // backwards to establish a lead time to publish closed timestamps at. // // The calculation looks something like the following: // // # This should be sufficient for any present-time transaction, // # because its global uncertainty limit should be <= this time. // # For more, see (*Transaction).RequiredFrontier. // closed_ts_at_follower = now + max_offset // // # The sender must account for the time it takes to propagate a // # closed timestamp update to its followers. // closed_ts_at_sender = closed_ts_at_follower + propagation_time // // # Closed timestamps propagate in two ways. Both need to make it to // # followers in time. // propagation_time = max(raft_propagation_time, side_propagation_time) // // # Raft propagation takes 3 network hops to go from a leader proposing // # a write (with a closed timestamp update) to the write being applied. // # 1. leader sends MsgProp with entry // # 2. followers send MsgPropResp with vote // # 3. leader sends MsgProp with higher commit index // # // # We also add on a small bit of overhead for request evaluation, log // # sync, and state machine apply latency. // raft_propagation_time = max_network_rtt * 1.5 + raft_overhead // // # Side-transport propagation takes 1 network hop, as there is no voting. // # However, it is delayed by the full side_transport_close_interval in // # the worst-case. // side_propagation_time = max_network_rtt * 0.5 + side_transport_close_interval // // # Combine, we get the following result // closed_ts_at_sender = now + max_offset + max( // max_network_rtt * 1.5 + raft_overhead, // max_network_rtt * 0.5 + side_transport_close_interval, // ) // // By default, this leads to a closed timestamp target that leads the // senders current clock by 800ms. // // NOTE: this calculation takes into consideration maximum clock skew as // it relates to a transaction's uncertainty interval, but it does not // take into consideration "effective" clock skew as it relates to a // follower replica having a faster clock than a leaseholder and // therefore needing the leaseholder to publish even further into the // future. Since the effect of getting this wrong is reduced performance // (i.e. missed follower reads) and not a correctness violation (i.e. // stale reads), we can be less strict here. We also expect that even // when two nodes have skewed physical clocks, the "stability" property // of HLC propagation when nodes are communicating should reduce the // effective HLC clock skew. // TODO(nvanbenschoten): make this dynamic, based on the measured // network latencies recorded by the RPC context. This isn't trivial and // brings up a number of questions. For instance, how far into the tail // do we care about? Do we place upper and lower bounds on this value? const maxNetworkRTT = 150 * time.Millisecond // See raft_propagation_time. const raftTransportOverhead = 20 * time.Millisecond raftTransportPropTime := (maxNetworkRTT*3)/2 + raftTransportOverhead // See side_propagation_time. sideTransportPropTime := maxNetworkRTT/2 + sideTransportCloseInterval // See propagation_time. maxTransportPropTime := sideTransportPropTime if maxTransportPropTime < raftTransportPropTime { maxTransportPropTime = raftTransportPropTime } // Include a small amount of extra margin to smooth out temporary // network blips or anything else that slows down closed timestamp // propagation momentarily. const bufferTime = 25 * time.Millisecond leadTimeAtSender := maxTransportPropTime + maxClockOffset + bufferTime // Override entirely with cluster setting, if necessary. if leadTargetOverride != 0 { leadTimeAtSender = leadTargetOverride } // Mark as synthetic, because this time is in the future. res = now.ToTimestamp().Add(leadTimeAtSender.Nanoseconds(), 0).WithSynthetic(true) default: panic("unexpected RangeClosedTimestampPolicy") } // We truncate the logical part in order to save a few bytes over the network, // and also because arithmetic with logical timestamp doesn't make much sense. res.Logical = 0 return res }
pkg/kv/kvserver/closedts/policy.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.00017766702512744814, 0.00016948781558312476, 0.00016445259097963572, 0.0001700306311249733, 0.000003376363565621432 ]
{ "id": 8, "code_window": [ "\t\t\t},\n", "\t\t})\n", "\t}\n", "\tfor _, meta := range o.metadataSources.DrainMeta() {\n", "\t\tmsg.Data.Metadata = append(msg.Data.Metadata, execinfrapb.LocalMetaToRemoteProducerMeta(ctx, meta))\n", "\t}\n", "\tif len(msg.Data.Metadata) == 0 {\n", "\t\treturn nil\n", "\t}\n", "\treturn stream.Send(msg)\n", "}\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/sql/colflow/colrpc/outbox.go", "type": "replace", "edit_start_line_idx": 323 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colexec import ( "context" "fmt" "sync" "sync/atomic" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecargs" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" ) // unorderedSynchronizerMsg is a light wrapper over a coldata.Batch or metadata // sent over a channel so that the main goroutine can know which input this // message originated from. // Note that either a batch or metadata must be sent, but not both. type unorderedSynchronizerMsg struct { inputIdx int b coldata.Batch meta []execinfrapb.ProducerMetadata } var _ colexecop.Operator = &ParallelUnorderedSynchronizer{} var _ execinfra.OpNode = &ParallelUnorderedSynchronizer{} type parallelUnorderedSynchronizerState int const ( // parallelUnorderedSynchronizerStateUninitialized is the state the // ParallelUnorderedSynchronizer is in when not yet initialized. parallelUnorderedSynchronizerStateUninitialized = iota // parallelUnorderedSynchronizerStateRunning is the state the // ParallelUnorderedSynchronizer is in when all input goroutines have been // spawned and are returning batches. parallelUnorderedSynchronizerStateRunning // parallelUnorderedSynchronizerStateDraining is the state the // ParallelUnorderedSynchronizer is in when a drain has been requested through // DrainMeta. All input goroutines will call DrainMeta on its input and exit. parallelUnorderedSynchronizerStateDraining // parallelUnorderedSyncrhonizerStateDone is the state the // ParallelUnorderedSynchronizer is in when draining has completed. parallelUnorderedSynchronizerStateDone ) // ParallelUnorderedSynchronizer is an Operator that combines multiple Operator streams // into one. type ParallelUnorderedSynchronizer struct { colexecop.InitHelper inputs []colexecargs.OpWithMetaInfo // readNextBatch is a slice of channels, where each channel corresponds to the // input at the same index in inputs. It is used as a barrier for input // goroutines to wait on until the Next goroutine signals that it is safe to // retrieve the next batch. This is done so that inputs that are running // asynchronously do not overwrite batches returned previously, given that // batches must be safe for reuse until the next call to Next. readNextBatch []chan struct{} // numFinishedInputs is incremented atomically whenever one of the provided // inputs exits from a goroutine (gracefully or otherwise). numFinishedInputs uint32 // lastReadInputIdx is the index of the input whose batch we last returned. // Used so that on the next call to Next, we can resume the input. lastReadInputIdx int // batches are the last batches read from the corresponding input. batches []coldata.Batch // nextBatch is a slice of functions each of which obtains a next batch from // the corresponding to it input. nextBatch []func() state int32 // externalWaitGroup refers to the WaitGroup passed in externally. Since the // ParallelUnorderedSynchronizer spawns goroutines, this allows callers to // wait for the completion of these goroutines. externalWaitGroup *sync.WaitGroup // internalWaitGroup refers to the WaitGroup internally managed by the // ParallelUnorderedSynchronizer. This will only ever be incremented by the // ParallelUnorderedSynchronizer and decremented by the input goroutines. This // allows the ParallelUnorderedSynchronizer to wait only on internal // goroutines. internalWaitGroup *sync.WaitGroup batchCh chan *unorderedSynchronizerMsg errCh chan error // bufferedMeta is the metadata buffered during a // ParallelUnorderedSynchronizer run. bufferedMeta []execinfrapb.ProducerMetadata } // ChildCount implements the execinfra.OpNode interface. func (s *ParallelUnorderedSynchronizer) ChildCount(verbose bool) int { return len(s.inputs) } // Child implements the execinfra.OpNode interface. func (s *ParallelUnorderedSynchronizer) Child(nth int, verbose bool) execinfra.OpNode { return s.inputs[nth].Root } // NewParallelUnorderedSynchronizer creates a new ParallelUnorderedSynchronizer. // On the first call to Next, len(inputs) goroutines are spawned to read each // input asynchronously (to not be limited by a slow input). These will // increment the passed-in WaitGroup and decrement when done. It is also // guaranteed that these spawned goroutines will have completed on any error or // zero-length batch received from Next. func NewParallelUnorderedSynchronizer( inputs []colexecargs.OpWithMetaInfo, wg *sync.WaitGroup, ) *ParallelUnorderedSynchronizer { readNextBatch := make([]chan struct{}, len(inputs)) for i := range readNextBatch { // Buffer readNextBatch chans to allow for non-blocking writes. There will // only be one message on the channel at a time. readNextBatch[i] = make(chan struct{}, 1) } return &ParallelUnorderedSynchronizer{ inputs: inputs, readNextBatch: readNextBatch, batches: make([]coldata.Batch, len(inputs)), nextBatch: make([]func(), len(inputs)), externalWaitGroup: wg, internalWaitGroup: &sync.WaitGroup{}, // batchCh is a buffered channel in order to offer non-blocking writes to // input goroutines. During normal operation, this channel will have at most // len(inputs) messages. However, during DrainMeta, inputs might need to // push an extra metadata message without blocking, hence the need to double // the size of this channel. batchCh: make(chan *unorderedSynchronizerMsg, len(inputs)*2), // errCh is buffered so that writers do not block. If errCh is full, the // input goroutines will not push an error and exit immediately, given that // the Next goroutine will read an error and panic anyway. errCh: make(chan error, 1), } } // Init is part of the colexecop.Operator interface. func (s *ParallelUnorderedSynchronizer) Init(ctx context.Context) { if !s.InitHelper.Init(ctx) { return } for _, input := range s.inputs { input.Root.Init(s.Ctx) } } func (s *ParallelUnorderedSynchronizer) getState() parallelUnorderedSynchronizerState { return parallelUnorderedSynchronizerState(atomic.LoadInt32(&s.state)) } func (s *ParallelUnorderedSynchronizer) setState(state parallelUnorderedSynchronizerState) { atomic.SwapInt32(&s.state, int32(state)) } // init starts one goroutine per input to read from each input asynchronously // and push to batchCh. Canceling the context (passed in Init() above) results // in all goroutines terminating, otherwise they keep on pushing batches until a // zero-length batch is encountered. Once all inputs terminate, s.batchCh is // closed. If an error occurs, the goroutines will make a non-blocking best // effort to push that error on s.errCh, resulting in the first error pushed to // be observed by the Next goroutine. Inputs are asynchronous so that the // synchronizer is minimally affected by slow inputs. func (s *ParallelUnorderedSynchronizer) init() { for i, input := range s.inputs { s.nextBatch[i] = func(input colexecargs.OpWithMetaInfo, inputIdx int) func() { return func() { s.batches[inputIdx] = input.Root.Next() } }(input, i) s.externalWaitGroup.Add(1) s.internalWaitGroup.Add(1) // TODO(asubiotto): Most inputs are Inboxes, and these have handler // goroutines just sitting around waiting for cancellation. I wonder if we // could reuse those goroutines to push batches to batchCh directly. go func(ctx context.Context, input colexecargs.OpWithMetaInfo, inputIdx int) { var span *tracing.Span ctx, span = execinfra.ProcessorSpan(ctx, fmt.Sprintf("parallel unordered sync input %d", inputIdx)) defer func() { if span != nil { span.Finish() } if int(atomic.AddUint32(&s.numFinishedInputs, 1)) == len(s.inputs) { close(s.batchCh) } // We need to close all of the closers of this input before we // notify the wait groups. input.ToClose.CloseAndLogOnErr(ctx, "parallel unordered synchronizer input") s.internalWaitGroup.Done() s.externalWaitGroup.Done() }() sendErr := func(err error) { select { // Non-blocking write to errCh, if an error is present the main // goroutine will use that and cancel all inputs. case s.errCh <- err: default: } } msg := &unorderedSynchronizerMsg{ inputIdx: inputIdx, } for { state := s.getState() switch state { case parallelUnorderedSynchronizerStateRunning: if err := colexecerror.CatchVectorizedRuntimeError(s.nextBatch[inputIdx]); err != nil { sendErr(err) return } msg.b = s.batches[inputIdx] if s.batches[inputIdx].Length() != 0 { // Send the batch. break } // In case of a zero-length batch, proceed to drain the input. fallthrough case parallelUnorderedSynchronizerStateDraining: // Create a new message for metadata. The previous message cannot be // overwritten since it might still be in the channel. msg = &unorderedSynchronizerMsg{ inputIdx: inputIdx, } if span != nil { for _, s := range input.StatsCollectors { span.RecordStructured(s.GetStats()) } if meta := execinfra.GetTraceDataAsMetadata(span); meta != nil { msg.meta = append(msg.meta, *meta) } } if input.MetadataSources != nil { msg.meta = append(msg.meta, input.MetadataSources.DrainMeta()...) } if msg.meta == nil { // Initialize msg.meta to be non-nil, which is a signal that // metadata has been drained. msg.meta = make([]execinfrapb.ProducerMetadata, 0) } default: sendErr(errors.AssertionFailedf("unhandled state in ParallelUnorderedSynchronizer input goroutine: %d", state)) return } // Check msg.meta before sending over the channel since the channel is // the synchronization primitive of meta. sentMeta := false if msg.meta != nil { sentMeta = true } select { case <-ctx.Done(): sendErr(ctx.Err()) return case s.batchCh <- msg: } if sentMeta { // The input has been drained and this input has pushed the metadata // over the channel, exit. return } // Wait until Next goroutine tells us we are good to go. select { case <-s.readNextBatch[inputIdx]: case <-ctx.Done(): sendErr(ctx.Err()) return } } }(s.Ctx, input, i) } } // Next is part of the colexecop.Operator interface. func (s *ParallelUnorderedSynchronizer) Next() coldata.Batch { for { state := s.getState() switch state { case parallelUnorderedSynchronizerStateDone: return coldata.ZeroBatch case parallelUnorderedSynchronizerStateUninitialized: s.setState(parallelUnorderedSynchronizerStateRunning) s.init() case parallelUnorderedSynchronizerStateRunning: // Signal the input whose batch we returned in the last call to Next that it // is safe to retrieve the next batch. Since Next has been called, we can // reuse memory instead of making safe copies of batches returned. s.notifyInputToReadNextBatch(s.lastReadInputIdx) default: colexecerror.InternalError(errors.AssertionFailedf("unhandled state in ParallelUnorderedSynchronizer Next goroutine: %d", state)) } select { case err := <-s.errCh: if err != nil { // If we got an error from one of our inputs, propagate this error // through a panic. The caller should then proceed to call DrainMeta, // which will take care of closing any inputs. colexecerror.InternalError(err) } case msg := <-s.batchCh: if msg == nil { // All inputs have exited, double check that this is indeed the case. s.internalWaitGroup.Wait() // Check if this was a graceful termination or not. select { case err := <-s.errCh: if err != nil { colexecerror.InternalError(err) } default: } s.setState(parallelUnorderedSynchronizerStateDone) return coldata.ZeroBatch } s.lastReadInputIdx = msg.inputIdx if msg.meta != nil { s.bufferedMeta = append(s.bufferedMeta, msg.meta...) continue } return msg.b } } } // notifyInputToReadNextBatch is a non-blocking send to notify the given input // that it may proceed to read the next batch from the input. Refer to the // comment of the readNextBatch field in ParallelUnorderedSynchronizer for more // information. func (s *ParallelUnorderedSynchronizer) notifyInputToReadNextBatch(inputIdx int) { select { // This write is non-blocking because if the channel is full, it must be the // case that there is a pending message for the input to proceed. case s.readNextBatch[inputIdx] <- struct{}{}: default: } } // DrainMeta is part of the colexecop.MetadataSource interface. func (s *ParallelUnorderedSynchronizer) DrainMeta() []execinfrapb.ProducerMetadata { prevState := s.getState() s.setState(parallelUnorderedSynchronizerStateDraining) if prevState == parallelUnorderedSynchronizerStateUninitialized { s.init() } // Non-blocking drain of batchCh. This is important mostly because of the // following edge case: all n inputs have pushed batches to the batchCh, so // there are currently n messages. Next notifies the last read input to // retrieve the next batch but encounters an error. There are now n+1 messages // in batchCh. Notifying all these inputs to read the next batch would result // in 2n+1 messages on batchCh, which would cause a deadlock since this // goroutine blocks on the wait group, but an input will block on writing to // batchCh. This is a best effort, but note that for this scenario to occur, // there *must* be at least one message in batchCh (the message belonging to // the input that was notified). for batchChDrained := false; !batchChDrained; { select { case msg := <-s.batchCh: if msg == nil { batchChDrained = true } else if msg.meta != nil { s.bufferedMeta = append(s.bufferedMeta, msg.meta...) } default: batchChDrained = true } } // Unblock any goroutines currently waiting to be told to read the next batch. // This will force all inputs to observe the new draining state. for _, ch := range s.readNextBatch { close(ch) } // Wait for all inputs to exit. s.internalWaitGroup.Wait() // Drain the batchCh, this reads the metadata that was pushed. for msg := <-s.batchCh; msg != nil; msg = <-s.batchCh { if msg.meta != nil { s.bufferedMeta = append(s.bufferedMeta, msg.meta...) } } // Buffer any errors that may have happened without blocking on the channel. for exitLoop := false; !exitLoop; { select { case err := <-s.errCh: s.bufferedMeta = append(s.bufferedMeta, execinfrapb.ProducerMetadata{Err: err}) default: exitLoop = true } } // Done. s.setState(parallelUnorderedSynchronizerStateDone) return s.bufferedMeta }
pkg/sql/colexec/parallel_unordered_synchronizer.go
1
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.9709317088127136, 0.024824559688568115, 0.00016065331874415278, 0.00020882341777905822, 0.14779648184776306 ]
{ "id": 8, "code_window": [ "\t\t\t},\n", "\t\t})\n", "\t}\n", "\tfor _, meta := range o.metadataSources.DrainMeta() {\n", "\t\tmsg.Data.Metadata = append(msg.Data.Metadata, execinfrapb.LocalMetaToRemoteProducerMeta(ctx, meta))\n", "\t}\n", "\tif len(msg.Data.Metadata) == 0 {\n", "\t\treturn nil\n", "\t}\n", "\treturn stream.Send(msg)\n", "}\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/sql/colflow/colrpc/outbox.go", "type": "replace", "edit_start_line_idx": 323 }
// Copyright 2015 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package sql import ( "context" "fmt" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" "github.com/cockroachdb/errors" ) // Grant adds privileges to users. // Current status: // - Target: single database, table, or view. // TODO(marc): open questions: // - should we have root always allowed and not present in the permissions list? // - should we make users case-insensitive? // Privileges: GRANT on database/table/view. // Notes: postgres requires the object owner. // mysql requires the "grant option" and the same privileges, and sometimes superuser. func (p *planner) Grant(ctx context.Context, n *tree.Grant) (planNode, error) { var grantOn privilege.ObjectType switch { case n.Targets.Databases != nil: sqltelemetry.IncIAMGrantPrivilegesCounter(sqltelemetry.OnDatabase) grantOn = privilege.Database case n.Targets.Schemas != nil: sqltelemetry.IncIAMGrantPrivilegesCounter(sqltelemetry.OnSchema) grantOn = privilege.Schema case n.Targets.Types != nil: sqltelemetry.IncIAMGrantPrivilegesCounter(sqltelemetry.OnType) grantOn = privilege.Type default: sqltelemetry.IncIAMGrantPrivilegesCounter(sqltelemetry.OnTable) grantOn = privilege.Table } if err := privilege.ValidatePrivileges(n.Privileges, grantOn); err != nil { return nil, err } // TODO(solon): there are SQL identifiers (tree.Name) in n.Grantees, // but we want SQL usernames. Do we normalize or not? For reference, // REASSIGN / OWNER TO do normalize. // Related: https://github.com/cockroachdb/cockroach/issues/54696 grantees := make([]security.SQLUsername, len(n.Grantees)) for i, grantee := range n.Grantees { grantees[i] = security.MakeSQLUsernameFromPreNormalizedString(string(grantee)) } return &changePrivilegesNode{ isGrant: true, targets: n.Targets, grantees: grantees, desiredprivs: n.Privileges, changePrivilege: func(privDesc *descpb.PrivilegeDescriptor, grantee security.SQLUsername) { privDesc.Grant(grantee, n.Privileges) }, grantOn: grantOn, }, nil } // Revoke removes privileges from users. // Current status: // - Target: single database, table, or view. // TODO(marc): open questions: // - should we have root always allowed and not present in the permissions list? // - should we make users case-insensitive? // Privileges: GRANT on database/table/view. // Notes: postgres requires the object owner. // mysql requires the "grant option" and the same privileges, and sometimes superuser. func (p *planner) Revoke(ctx context.Context, n *tree.Revoke) (planNode, error) { var grantOn privilege.ObjectType switch { case n.Targets.Databases != nil: sqltelemetry.IncIAMRevokePrivilegesCounter(sqltelemetry.OnDatabase) grantOn = privilege.Database case n.Targets.Schemas != nil: sqltelemetry.IncIAMRevokePrivilegesCounter(sqltelemetry.OnSchema) grantOn = privilege.Schema case n.Targets.Types != nil: sqltelemetry.IncIAMRevokePrivilegesCounter(sqltelemetry.OnType) grantOn = privilege.Type default: sqltelemetry.IncIAMRevokePrivilegesCounter(sqltelemetry.OnTable) grantOn = privilege.Table } if err := privilege.ValidatePrivileges(n.Privileges, grantOn); err != nil { return nil, err } // TODO(solon): there are SQL identifiers (tree.Name) in n.Grantees, // but we want SQL usernames. Do we normalize or not? For reference, // REASSIGN / OWNER TO do normalize. // Related: https://github.com/cockroachdb/cockroach/issues/54696 grantees := make([]security.SQLUsername, len(n.Grantees)) for i, grantee := range n.Grantees { grantees[i] = security.MakeSQLUsernameFromPreNormalizedString(string(grantee)) } return &changePrivilegesNode{ isGrant: false, targets: n.Targets, grantees: grantees, desiredprivs: n.Privileges, changePrivilege: func(privDesc *descpb.PrivilegeDescriptor, grantee security.SQLUsername) { privDesc.Revoke(grantee, n.Privileges, grantOn) }, grantOn: grantOn, }, nil } type changePrivilegesNode struct { isGrant bool targets tree.TargetList grantees []security.SQLUsername desiredprivs privilege.List changePrivilege func(*descpb.PrivilegeDescriptor, security.SQLUsername) grantOn privilege.ObjectType } // ReadingOwnWrites implements the planNodeReadingOwnWrites interface. // This is because GRANT/REVOKE performs multiple KV operations on descriptors // and expects to see its own writes. func (n *changePrivilegesNode) ReadingOwnWrites() {} func (n *changePrivilegesNode) startExec(params runParams) error { ctx := params.ctx p := params.p // Check whether grantees exists users, err := p.GetAllRoles(ctx) if err != nil { return err } // We're allowed to grant/revoke privileges to/from the "public" role even though // it does not exist: add it to the list of all users and roles. users[security.PublicRoleName()] = true // isRole for i, grantee := range n.grantees { if _, ok := users[grantee]; !ok { sqlName := tree.Name(n.grantees[i].Normalized()) return errors.Errorf("user or role %s does not exist", &sqlName) } } var descriptors []catalog.Descriptor // DDL statements avoid the cache to avoid leases, and can view non-public descriptors. // TODO(vivek): check if the cache can be used. p.runWithOptions(resolveFlags{skipCache: true}, func() { descriptors, err = getDescriptorsFromTargetListForPrivilegeChange(ctx, p, n.targets) }) if err != nil { return err } // The events to log at the end. type eventEntry struct { descID descpb.ID event eventpb.EventPayload } var events []eventEntry // First, update the descriptors. We want to catch all errors before // we update them in KV below. b := p.txn.NewBatch() for _, descriptor := range descriptors { // Disallow privilege changes on system objects. For more context, see #43842. op := "REVOKE" if n.isGrant { op = "GRANT" } if descriptor.GetID() < keys.MinUserDescID { return pgerror.Newf(pgcode.InsufficientPrivilege, "cannot %s on system object", op) } if err := p.CheckPrivilege(ctx, descriptor, privilege.GRANT); err != nil { return err } // Only allow granting/revoking privileges that the requesting // user themselves have on the descriptor. for _, priv := range n.desiredprivs { if err := p.CheckPrivilege(ctx, descriptor, priv); err != nil { return err } } privileges := descriptor.GetPrivileges() for _, grantee := range n.grantees { n.changePrivilege(privileges, grantee) } // Validate privilege descriptors directly as the db/table level Validate // may fix up the descriptor. if err := privileges.Validate(descriptor.GetID(), n.grantOn); err != nil { return err } eventDetails := eventpb.CommonSQLPrivilegeEventDetails{} if n.isGrant { eventDetails.GrantedPrivileges = n.desiredprivs.SortedNames() } else { eventDetails.RevokedPrivileges = n.desiredprivs.SortedNames() } switch d := descriptor.(type) { case *dbdesc.Mutable: if err := p.writeDatabaseChangeToBatch(ctx, d, b); err != nil { return err } if err := p.createNonDropDatabaseChangeJob(ctx, d.ID, fmt.Sprintf("updating privileges for database %d", d.ID)); err != nil { return err } for _, grantee := range n.grantees { privs := eventDetails // copy the granted/revoked privilege list. privs.Grantee = grantee.Normalized() events = append(events, eventEntry{d.ID, &eventpb.ChangeDatabasePrivilege{ CommonSQLPrivilegeEventDetails: privs, DatabaseName: (*tree.Name)(&d.Name).String(), }}) } case *tabledesc.Mutable: // TODO (lucy): This should probably have a single consolidated job like // DROP DATABASE. if err := p.createOrUpdateSchemaChangeJob( ctx, d, fmt.Sprintf("updating privileges for table %d", d.ID), descpb.InvalidMutationID, ); err != nil { return err } if !d.Dropped() { if err := p.writeSchemaChangeToBatch(ctx, d, b); err != nil { return err } } for _, grantee := range n.grantees { privs := eventDetails // copy the granted/revoked privilege list. privs.Grantee = grantee.Normalized() events = append(events, eventEntry{d.ID, &eventpb.ChangeTablePrivilege{ CommonSQLPrivilegeEventDetails: privs, TableName: d.Name, // FIXME }}) } case *typedesc.Mutable: err := p.writeTypeSchemaChange(ctx, d, fmt.Sprintf("updating privileges for type %d", d.ID)) if err != nil { return err } for _, grantee := range n.grantees { privs := eventDetails // copy the granted/revoked privilege list. privs.Grantee = grantee.Normalized() events = append(events, eventEntry{d.ID, &eventpb.ChangeTypePrivilege{ CommonSQLPrivilegeEventDetails: privs, TypeName: d.Name, // FIXME }}) } case *schemadesc.Mutable: if err := p.writeSchemaDescChange( ctx, d, fmt.Sprintf("updating privileges for schema %d", d.ID), ); err != nil { return err } for _, grantee := range n.grantees { privs := eventDetails // copy the granted/revoked privilege list. privs.Grantee = grantee.Normalized() events = append(events, eventEntry{d.ID, &eventpb.ChangeSchemaPrivilege{ CommonSQLPrivilegeEventDetails: privs, SchemaName: d.Name, // FIXME }}) } } } // Now update the descriptors transactionally. if err := p.txn.Run(ctx, b); err != nil { return err } // Record the privilege changes in the event log. This is an // auditable log event and is recorded in the same transaction as // the table descriptor update. descIDs := make(descpb.IDs, 0, len(events)) eventPayloads := make([]eventpb.EventPayload, 0, len(events)) for _, ev := range events { descIDs = append(descIDs, ev.descID) eventPayloads = append(eventPayloads, ev.event) } if err := params.p.batchLogEvents(params.ctx, descIDs, eventPayloads...); err != nil { return err } return nil } func (*changePrivilegesNode) Next(runParams) (bool, error) { return false, nil } func (*changePrivilegesNode) Values() tree.Datums { return tree.Datums{} } func (*changePrivilegesNode) Close(context.Context) {}
pkg/sql/grant_revoke.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.0012363719288259745, 0.0003442861489020288, 0.0001633319625398144, 0.000170803046785295, 0.00034366489853709936 ]
{ "id": 8, "code_window": [ "\t\t\t},\n", "\t\t})\n", "\t}\n", "\tfor _, meta := range o.metadataSources.DrainMeta() {\n", "\t\tmsg.Data.Metadata = append(msg.Data.Metadata, execinfrapb.LocalMetaToRemoteProducerMeta(ctx, meta))\n", "\t}\n", "\tif len(msg.Data.Metadata) == 0 {\n", "\t\treturn nil\n", "\t}\n", "\treturn stream.Send(msg)\n", "}\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/sql/colflow/colrpc/outbox.go", "type": "replace", "edit_start_line_idx": 323 }
# ============================================================================= # comp.opt contains normalization rules for comparison operators. # ============================================================================= # CommuteVarInequality is similar to CommuteVar (in scalar.opt), except it # handles inequality comparison operators that need special handling to commute # operands. [CommuteVarInequality, Normalize] (Le | Lt | Ge | Gt $left:^(Variable) $right:(Variable)) => (CommuteInequality (OpName) $left $right) # CommuteConstInequality is similar to CommuteConst (in scalar.opt), except # that it handles inequality comparison operators that need special handling to # commute operands. [CommuteConstInequality, Normalize] (Le | Lt | Ge | Gt $left:(ConstValue) $right:^(ConstValue)) => (CommuteInequality (OpName) $left $right) # NormalizeCmpPlusConst builds up constant expression trees on one side of the # comparison, in cases like this: # cmp cmp # / \ / \ # [+] 2 -> a [-] # / \ / \ # a 1 2 1 # # See the NormalizeConstEqNe pattern for the definition of constant expression # tree. Also, the NormalizePlusMult pattern ensures that constant expression # trees are on the right side of the expression, so no "flipped" pattern is # necessary. Other patterns will fold new constant expressions further. # # NOTE: Ne is not part of the operator choices because it wasn't handled in # normalize.go either. We can add once we've proved it's OK to do so. [NormalizeCmpPlusConst, Normalize] (Eq | Ge | Gt | Le | Lt (Plus $leftLeft:^(ConstValue) $leftRight:(ConstValue)) $right:(ConstValue) & (CanConstructBinary Minus $right $leftRight) ) => ((OpName) $leftLeft (Minus $right $leftRight)) # NormalizeCmpMinusConst builds up constant expression trees on one side of the # comparison, in cases like this: # cmp cmp # / \ / \ # [-] 2 -> a [+] # / \ / \ # a 1 2 1 # # See the NormalizeConstEqNe pattern for the definition of constant expression # tree. Other patterns will fold new constant expressions further. [NormalizeCmpMinusConst, Normalize] (Eq | Ge | Gt | Le | Lt (Minus $leftLeft:^(ConstValue) $leftRight:(ConstValue)) $right:(ConstValue) & (CanConstructBinary Plus $right $leftRight) ) => ((OpName) $leftLeft (Plus $right $leftRight)) # NormalizeCmpConstMinus builds up constant expression trees on one side of the # comparison, in cases like this: # cmp cmp # / \ / \ # [-] 2 -> [-] a # / \ / \ # 1 a 1 2 # # See the NormalizeConstEqNe pattern for the definition of constant expression # tree. Other patterns will switch the constant to the right side and fold the # constant expression if possible. [NormalizeCmpConstMinus, Normalize] (Eq | Ge | Gt | Le | Lt (Minus $leftLeft:(ConstValue) $leftRight:^(ConstValue)) $right:(ConstValue) & (CanConstructBinary Minus $leftLeft $right) ) => ((OpName) (Minus $leftLeft $right) $leftRight) # NormalizeTupleEquality breaks up expressions like: # (a, b, c) = (x, y, z) # into # (a = x) AND (b = y) AND (c = z) # # This rule makes it easier to extract constraints from boolean expressions, # so that recognition code doesn't have to handle the tuple case separately. [NormalizeTupleEquality, Normalize] (Eq (Tuple $left:*) (Tuple $right:*)) => (NormalizeTupleEquality $left $right) # FoldNullComparisonLeft replaces the comparison operator with null if its # left input is null. [FoldNullComparisonLeft, Normalize] (Eq | Ne | Ge | Gt | Le | Lt | Like | NotLike | ILike | NotILike | SimilarTo | NotSimilarTo | RegMatch | NotRegMatch | RegIMatch | NotRegIMatch | Contains | Overlaps | JsonExists | JsonSomeExists | JsonAllExists $left:(Null) * ) => (Null (BoolType)) # FoldNullComparisonRight replaces the comparison operator with null if its # right input is null. [FoldNullComparisonRight, Normalize] (Eq | Ne | Ge | Gt | Le | Lt | Like | NotLike | ILike | NotILike | SimilarTo | NotSimilarTo | RegMatch | NotRegMatch | RegIMatch | NotRegIMatch | Contains | ContainedBy | Overlaps | JsonExists | JsonSomeExists | JsonAllExists * $right:(Null) ) => (Null (BoolType)) # FoldIsNull replaces NULL IS NULL with True. [FoldIsNull, Normalize] (Is (Null) (Null)) => (True) # FoldNonNullIsNull replaces x IS NULL with False where x is a non-Null constant. [FoldNonNullIsNull, Normalize] (Is $left:(IsNeverNull $left) (Null)) => (False) # FoldNullTupleIsTupleNull replaces x IS NULL with True if x is a tuple with # only constant, null elements. [FoldNullTupleIsTupleNull, Normalize] (IsTupleNull $input:(Tuple) & (HasAllNullElements $input)) => (True) # FoldNonNullTupleIsTupleNull replaces x IS NULL with False if x is a tuple # with at least one constant, non-null element. [FoldNonNullTupleIsTupleNull, Normalize] (IsTupleNull $input:(Tuple) & (HasNonNullElement $input)) => (False) # FoldIsNotNull replaces NULL IS NOT NULL with False. [FoldIsNotNull, Normalize] (IsNot (Null) (Null)) => (False) # FoldNonNullIsNotNull replaces x IS NOT NULL with True where x is a non-Null constant. [FoldNonNullIsNotNull, Normalize] (IsNot $left:(IsNeverNull $left) (Null)) => (True) # FoldNonNullTupleIsTupleNotNull replaces x IS NOT NULL with True if x is a # tuple with only constant, non-null elements. [FoldNonNullTupleIsTupleNotNull, Normalize] (IsTupleNotNull $input:(Tuple) & (HasAllNonNullElements $input)) => (True) # FoldNullTupleIsTupleNotNull replaces x IS NOT NULL with False if x is a tuple # with at least one constant, null element. [FoldNullTupleIsTupleNotNull, Normalize] (IsTupleNotNull $input:(Tuple) & (HasNullElement $input)) => (False) # CommuteNullIs moves a NULL onto the right side of an IS/IS NOT comparison. [CommuteNullIs, Normalize] (Is | IsNot $left:(Null) $right:^(Null)) => ((OpName) $right $left) # NormalizeCmpTimeZoneFunction normalizes timezone functions within # comparison operators. It only matches expressions when: # # 1. The left side of the comparison is a timezone() function. # 2. The second argument to timezone() is a variable of type TIMESTAMP. # 3. The right side of the comparison is a constant value TIMESTAMPTZ. # # Here's an example: # # timezone('America/Denver', ts) = '2020-06-01 12:35:55-07' # => # ts = timezone('America/Denver', '2020-06-01 12:35:55-07') # # This normalization is valid because the overloaded function timezone(zone, # TIMESTAMP) is the inverse of timezone(zone, TIMESTAMPTZ). [NormalizeCmpTimeZoneFunction, Normalize] (Eq | Ge | Gt | Le | Lt (Function $args:* $private:(FunctionPrivate "timezone")) $right:(ConstValue) & (IsTimestampTZ $right) & (IsTimestamp $ts:(SecondScalarListExpr $args)) & ^(IsConstValueOrGroupOfConstValues $ts) ) => ((OpName) $ts (MakeTimeZoneFunction (FirstScalarListExpr $args) $right) ) # NormalizeCmpTimeZoneFunctionTZ normalizes timezone functions within # comparison operators. It only matches expressions when: # # 1. The left side of the comparison is a timezone() function. # 2. The second argument to timezone() is a variable of type TIMESTAMPTZ. # 3. The right side of the comparison is a constant value TIMESTAMP. # # Here's an example: # # timezone('America/Denver', tz) = '2020-06-01 12:35:55' # => # tz = timezone('America/Denver', '2020-06-01 12:35:55') # # This normalization is possible because the overloaded function timezone(zone, # TIMESTAMPTZ) is the inverse of timezone(zone, TIMESTAMP). [NormalizeCmpTimeZoneFunctionTZ, Normalize] (Eq | Ge | Gt | Le | Lt (Function $args:* $private:(FunctionPrivate "timezone")) $right:(ConstValue) & (IsTimestamp $right) & (IsTimestampTZ $tz:(SecondScalarListExpr $args)) & ^(IsConstValueOrGroupOfConstValues $tz) ) => ((OpName) $tz (MakeTimeZoneFunction (FirstScalarListExpr $args) $right) ) # FoldEqZeroSTDistance matches an expression of the form: 'ST_Distance(a,b) = 0' # and replaces it with 'ST_Intersects(a,b)'. This replacement allows for # early-exit behavior, and may allow an inverted index scan to be generated. [FoldEqZeroSTDistance, Normalize] (Eq (Function $args:* $private:(FunctionPrivate "st_distance")) $right:(Const $value:* & (IsFloatDatum $value) & (DatumsEqual $value 0) ) ) => (MakeIntersectionFunction $args) # FoldCmpSTDistanceLeft replaces an expression of the form: # 'ST_Distance(...) <= x' with a call to ST_DWithin or ST_DWithinExclusive. This # replacement allows early-exit behavior, and may enable use of an inverted # index scan. See the MakeSTDWithin method for the specific variation on # ST_DWithin that is used to replace expressions with different comparison # operators (e.g. '<' vs '<='). [FoldCmpSTDistanceLeft, Normalize] (Ge | Gt | Le | Lt (Function $args:* $private:(FunctionPrivate "st_distance")) $right:* ) => (MakeSTDWithinLeft (OpName) $args $right) # FoldCmpSTDistanceRight mirrors FoldCmpSTDistanceLeft. [FoldCmpSTDistanceRight, Normalize] (Ge | Gt | Le | Lt $left:* (Function $args:* $private:(FunctionPrivate "st_distance")) ) => (MakeSTDWithinRight (OpName) $args $left) # FoldCmpSTMaxDistanceLeft is a variant of FoldCmpSTDistanceLeft that matches # ST_MaxDistance instead of ST_Distance. [FoldCmpSTMaxDistanceLeft, Normalize] (Ge | Gt | Le | Lt (Function $args:* $private:(FunctionPrivate "st_maxdistance") ) $right:* ) => (MakeSTDFullyWithinLeft (OpName) $args $right) # FoldCmpSTMaxDistanceRight mirrors FoldCmpSTMaxDistanceLeft. [FoldCmpSTMaxDistanceRight, Normalize] (Ge | Gt | Le | Lt $left:* (Function $args:* $private:(FunctionPrivate "st_maxdistance") ) ) => (MakeSTDFullyWithinRight (OpName) $args $left)
pkg/sql/opt/norm/rules/comp.opt
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.00017345928063150495, 0.0001674388477113098, 0.00015874954988248646, 0.00016893423162400723, 0.000004210501629131613 ]
{ "id": 8, "code_window": [ "\t\t\t},\n", "\t\t})\n", "\t}\n", "\tfor _, meta := range o.metadataSources.DrainMeta() {\n", "\t\tmsg.Data.Metadata = append(msg.Data.Metadata, execinfrapb.LocalMetaToRemoteProducerMeta(ctx, meta))\n", "\t}\n", "\tif len(msg.Data.Metadata) == 0 {\n", "\t\treturn nil\n", "\t}\n", "\treturn stream.Send(msg)\n", "}\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/sql/colflow/colrpc/outbox.go", "type": "replace", "edit_start_line_idx": 323 }
// Copyright 2015 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. syntax = "proto3"; package cockroach.server.status.statuspb; option go_package = "statuspb"; import "roachpb/metadata.proto"; import "build/info.proto"; import "gogoproto/gogo.proto"; // StoreStatus records the most recent values of metrics for a store. message StoreStatus { // desc is the store descriptor. roachpb.StoreDescriptor desc = 1 [(gogoproto.nullable) = false]; // metrics contains the last sampled values for the node metrics. map<string, double> metrics = 2; } // NodeStatus records the most recent values of metrics for a node. // API: PUBLIC ALPHA message NodeStatus { // desc is the node descriptor. roachpb.NodeDescriptor desc = 1 [(gogoproto.nullable) = false]; // build_info describes the `cockroach` executable file. // API: PUBLIC ALPHA build.Info build_info = 2 [(gogoproto.nullable) = false]; // started_at is the unix timestamp at which the node process was // last started. // API: PUBLIC ALPHA int64 started_at = 3; // updated_at is the unix timestamp at which the node status record // was last updated. // API: PUBLIC ALPHA int64 updated_at = 4; // metrics contains the last sampled values for the node metrics. map<string, double> metrics = 5; // store_statuses provides the store status payloads for all // the stores on that node. repeated StoreStatus store_statuses = 6 [(gogoproto.nullable) = false]; // args is the list of command-line arguments used to last start the node. repeated string args = 7; // env is the list of environment variables that influenced // the node's configuration. repeated string env = 8; // latencies is a map of nodeIDs to nanoseconds which is the latency // between this node and the other node. // // NOTE: this is deprecated and is only set if the min supported // cluster version is >= VersionRPCNetworkStats. map<int32, int64> latencies = 9 [ (gogoproto.nullable) = false, (gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID" ]; message NetworkActivity { int64 incoming = 1; // in bytes int64 outgoing = 2; // in bytes int64 latency = 3; // in nanoseconds } // activity is a map of nodeIDs to network statistics from this node // to other nodes. map<int32, NetworkActivity> activity = 10 [ (gogoproto.nullable) = false, (gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID" ]; // total_system_memory is the total RAM available to the system // (or, if detected, the memory available to the cgroup this process is in) // in bytes. // API: PUBLIC ALPHA int64 total_system_memory = 11; // num_cpus is the number of logical CPUs as reported by the operating system // on the host where the `cockroach` process is running. Note that // this does not report the number of CPUs actually used by `cockroach`; // this parameter is controlled separately. // API: PUBLIC ALPHA int32 num_cpus = 12; } // A HealthAlert is an undesired condition detected by a server which should be // exposed to the operators. message HealthAlert { // store_id is zero for alerts not specific to a store (i.e. apply at the node level). int32 store_id = 1 [ // NB: trying to make this nullable does not work with the custom type. You need a // pointer type as the custom type, but that breaks protoc-gen-gogoroach. (gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/roachpb.StoreID", (gogoproto.customname) = "StoreID" ]; enum Category { METRICS = 0; NETWORK = 1; } Category category = 2; string description = 3; double value = 4; } // HealthCheckResult holds a number of HealthAlerts. message HealthCheckResult{ repeated HealthAlert alerts = 1 [(gogoproto.nullable) = false]; }
pkg/server/status/statuspb/status.proto
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.0001776639837771654, 0.00016985803085844964, 0.00016507346299476922, 0.00016909910482354462, 0.00000329573344970413 ]
{ "id": 9, "code_window": [ "\tif span != nil {\n", "\t\tdefer span.Finish()\n", "\t}\n", "\t// Since HashRouter runs in a separate goroutine, we want to be safe and\n", "\t// make sure that we catch errors in all code paths, so we wrap the whole\n", "\t// method with a catcher. Note that we also have \"internal\" catchers as\n", "\t// well for more fine-grained control of error propagation.\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tvar inputInitialized bool\n" ], "file_path": "pkg/sql/colflow/routers.go", "type": "add", "edit_start_line_idx": 552 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colflow import ( "context" "sync" "sync/atomic" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colcontainer" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecargs" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexechash" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" "github.com/marusama/semaphore" ) // routerOutput is an interface implemented by router outputs. It exists for // easier test mocking of outputs. type routerOutput interface { execinfra.OpNode // initWithHashRouter passes a reference to the HashRouter that will be // pushing batches to this output. initWithHashRouter(*HashRouter) // addBatch adds the elements specified by the selection vector from batch // to the output. It returns whether or not the output changed its state to // blocked (see implementations). addBatch(context.Context, coldata.Batch) bool // cancel tells the output to stop producing batches. Optionally forwards an // error if not nil. cancel(context.Context, error) // forwardErr forwards an error to the output. The output should call // colexecerror.ExpectedError with this error on the next call to Next. // Calling forwardErr multiple times will result in the most recent error // overwriting the previous error. forwardErr(error) // resetForTests resets the routerOutput for a benchmark or test run. resetForTests(context.Context) } // getDefaultRouterOutputBlockedThreshold returns the number of unread values // buffered by the routerOutputOp after which the output is considered blocked. // It is a function rather than a variable so that in tests we could modify // coldata.BatchSize() (if it were a variable, then its value would be // evaluated before we set the desired batch size). func getDefaultRouterOutputBlockedThreshold() int { return coldata.BatchSize() * 2 } type routerOutputOpState int const ( // routerOutputOpRunning is the state in which routerOutputOp operates // normally. The router output transitions into routerOutputDoneAdding when // a zero-length batch was added or routerOutputOpDraining when it // encounters an error or the drain is requested. routerOutputOpRunning routerOutputOpState = iota // routerOutputDoneAdding is the state in which a zero-length was batch was // added to routerOutputOp and no more batches will be added. The router // output transitions to routerOutputOpDraining when the output is canceled // (either closed or the drain is requested). routerOutputDoneAdding // routerOutputOpDraining is the state in which routerOutputOp always // returns zero-length batches on calls to Next. routerOutputOpDraining ) // drainCoordinator is an interface that the HashRouter implements to coordinate // cancellation of all of its outputs in the case of an error and draining in // the case of graceful termination. // WARNING: No locks should be held when calling these methods, as the // HashRouter might call routerOutput methods (e.g. cancel) that attempt to // reacquire locks. type drainCoordinator interface { // encounteredError should be called when a routerOutput encounters an error. // This terminates execution. No locks should be held when calling this // method, since cancellation could occur. encounteredError(context.Context) // drainMeta should be called exactly once when the routerOutput moves to // draining. drainMeta() []execinfrapb.ProducerMetadata } type routerOutputOp struct { colexecop.InitHelper // input is a reference to our router. input execinfra.OpNode // drainCoordinator is a reference to the HashRouter to be able to notify it // if the output encounters an error or transitions to a draining state. drainCoordinator drainCoordinator types []*types.T // unblockedEventsChan is signaled when a routerOutput changes state from // blocked to unblocked. unblockedEventsChan chan<- struct{} mu struct { syncutil.Mutex state routerOutputOpState // forwardedErr is an error that was forwarded by the HashRouter. If set, // any subsequent calls to Next will return this error. forwardedErr error cond *sync.Cond // data is a SpillingQueue, a circular buffer backed by a disk queue. data *colexecutils.SpillingQueue numUnread int blocked bool } testingKnobs routerOutputOpTestingKnobs } func (o *routerOutputOp) ChildCount(verbose bool) int { return 1 } func (o *routerOutputOp) Child(nth int, verbose bool) execinfra.OpNode { if nth == 0 { return o.input } colexecerror.InternalError(errors.AssertionFailedf("invalid index %d", nth)) // This code is unreachable, but the compiler cannot infer that. return nil } var _ colexecop.Operator = &routerOutputOp{} type routerOutputOpTestingKnobs struct { // blockedThreshold is the number of buffered values above which we consider // a router output to be blocked. It defaults to // defaultRouterOutputBlockedThreshold but can be modified by tests to test // edge cases. blockedThreshold int // addBatchTestInducedErrorCb is called after any function call that could // produce an error if that error is nil. If the callback returns an error, // the router output overwrites the nil error with the returned error. // It is guaranteed that this callback will be called at least once during // normal execution. addBatchTestInducedErrorCb func() error // nextTestInducedErrorCb is called after any function call that could // produce an error if that error is nil. If the callback returns an error, // the router output overwrites the nil error with the returned error. // It is guaranteed that this callback will be called at least once during // normal execution. nextTestInducedErrorCb func() error } // routerOutputOpArgs are the arguments to newRouterOutputOp. All fields apart // from the testing knobs are optional. type routerOutputOpArgs struct { // All fields are required unless marked optional. types []*types.T // unlimitedAllocator should not have a memory limit. Pass in a soft // memoryLimit that will be respected instead. unlimitedAllocator *colmem.Allocator // memoryLimit acts as a soft limit to allow the router output to use disk // when it is exceeded. memoryLimit int64 diskAcc *mon.BoundAccount cfg colcontainer.DiskQueueCfg fdSemaphore semaphore.Semaphore // unblockedEventsChan must be a buffered channel. unblockedEventsChan chan<- struct{} testingKnobs routerOutputOpTestingKnobs } // newRouterOutputOp creates a new router output. func newRouterOutputOp(args routerOutputOpArgs) *routerOutputOp { if args.testingKnobs.blockedThreshold == 0 { args.testingKnobs.blockedThreshold = getDefaultRouterOutputBlockedThreshold() } o := &routerOutputOp{ types: args.types, unblockedEventsChan: args.unblockedEventsChan, testingKnobs: args.testingKnobs, } o.mu.cond = sync.NewCond(&o.mu) o.mu.data = colexecutils.NewSpillingQueue( &colexecutils.NewSpillingQueueArgs{ UnlimitedAllocator: args.unlimitedAllocator, Types: args.types, MemoryLimit: args.memoryLimit, DiskQueueCfg: args.cfg, FDSemaphore: args.fdSemaphore, DiskAcc: args.diskAcc, }, ) return o } func (o *routerOutputOp) Init(ctx context.Context) { o.InitHelper.Init(ctx) } // nextErrorLocked is a helper method that handles an error encountered in Next. func (o *routerOutputOp) nextErrorLocked(err error) { o.mu.state = routerOutputOpDraining o.maybeUnblockLocked() // Unlock the mutex, since the HashRouter will cancel all outputs. o.mu.Unlock() o.drainCoordinator.encounteredError(o.Ctx) o.mu.Lock() colexecerror.InternalError(err) } // Next returns the next coldata.Batch from the routerOutputOp. Note that Next // is designed for only one concurrent caller and will block until data is // ready. func (o *routerOutputOp) Next() coldata.Batch { o.mu.Lock() defer o.mu.Unlock() for o.mu.forwardedErr == nil && o.mu.state == routerOutputOpRunning && o.mu.data.Empty() { // Wait until there is data to read or the output is canceled. o.mu.cond.Wait() } if o.mu.forwardedErr != nil { colexecerror.ExpectedError(o.mu.forwardedErr) } if o.mu.state == routerOutputOpDraining { return coldata.ZeroBatch } b, err := o.mu.data.Dequeue(o.Ctx) if err == nil && o.testingKnobs.nextTestInducedErrorCb != nil { err = o.testingKnobs.nextTestInducedErrorCb() } if err != nil { o.nextErrorLocked(err) } o.mu.numUnread -= b.Length() if o.mu.numUnread <= o.testingKnobs.blockedThreshold { o.maybeUnblockLocked() } if b.Length() == 0 { if o.testingKnobs.nextTestInducedErrorCb != nil { if err := o.testingKnobs.nextTestInducedErrorCb(); err != nil { o.nextErrorLocked(err) } } // This is the last batch. closeLocked will set done to protect against // further calls to Next since this is allowed by the interface as well as // cleaning up and releasing possible disk infrastructure. o.closeLocked(o.Ctx) } return b } func (o *routerOutputOp) DrainMeta() []execinfrapb.ProducerMetadata { o.mu.Lock() o.mu.state = routerOutputOpDraining o.maybeUnblockLocked() o.mu.Unlock() return o.drainCoordinator.drainMeta() } func (o *routerOutputOp) initWithHashRouter(r *HashRouter) { o.input = r o.drainCoordinator = r } func (o *routerOutputOp) closeLocked(ctx context.Context) { o.mu.state = routerOutputOpDraining if err := o.mu.data.Close(ctx); err != nil { // This log message is Info instead of Warning because the flow will also // attempt to clean up the parent directory, so this failure might not have // any effect. log.Infof(ctx, "error closing vectorized hash router output, files may be left over: %s", err) } } // cancel wakes up a reader in Next if there is one and results in the output // returning zero length batches for every Next call after cancel. Note that // all accumulated data that hasn't been read will not be returned. func (o *routerOutputOp) cancel(ctx context.Context, err error) { o.mu.Lock() defer o.mu.Unlock() o.closeLocked(ctx) o.forwardErrLocked(err) // Some goroutine might be waiting on the condition variable, so wake it up. // Note that read goroutines check o.mu.done, so won't wait on the condition // variable after we unlock the mutex. o.mu.cond.Signal() } func (o *routerOutputOp) forwardErrLocked(err error) { if err != nil { o.mu.forwardedErr = err } } func (o *routerOutputOp) forwardErr(err error) { o.mu.Lock() defer o.mu.Unlock() o.forwardErrLocked(err) o.mu.cond.Signal() } // addBatch copies the batch (according to its selection vector) into an // internal buffer. Zero-length batch should be passed-in to indicate that no // more batches will be added. // TODO(asubiotto): We should explore pipelining addBatch if disk-spilling // performance becomes a concern. The main router goroutine will be writing to // disk as the code is written, meaning that we impact the performance of // writing rows to a fast output if we have to write to disk for a single // slow output. func (o *routerOutputOp) addBatch(ctx context.Context, batch coldata.Batch) bool { o.mu.Lock() defer o.mu.Unlock() switch o.mu.state { case routerOutputDoneAdding: colexecerror.InternalError(errors.AssertionFailedf("a batch was added to routerOutput in DoneAdding state")) case routerOutputOpDraining: // This output is draining, discard any data. return false } o.mu.numUnread += batch.Length() o.mu.data.Enqueue(ctx, batch) if o.testingKnobs.addBatchTestInducedErrorCb != nil { if err := o.testingKnobs.addBatchTestInducedErrorCb(); err != nil { colexecerror.InternalError(err) } } if batch.Length() == 0 { o.mu.state = routerOutputDoneAdding o.mu.cond.Signal() return false } stateChanged := false if o.mu.numUnread > o.testingKnobs.blockedThreshold && !o.mu.blocked { // The output is now blocked. o.mu.blocked = true stateChanged = true } o.mu.cond.Signal() return stateChanged } // maybeUnblockLocked unblocks the router output if it is in a blocked state. If the // output was previously in a blocked state, an event will be sent on // routerOutputOp.unblockedEventsChan. func (o *routerOutputOp) maybeUnblockLocked() { if o.mu.blocked { o.mu.blocked = false o.unblockedEventsChan <- struct{}{} } } // resetForTests resets the routerOutputOp for a test or benchmark run. func (o *routerOutputOp) resetForTests(ctx context.Context) { o.mu.Lock() defer o.mu.Unlock() o.mu.state = routerOutputOpRunning o.mu.forwardedErr = nil o.mu.data.Reset(ctx) o.mu.numUnread = 0 o.mu.blocked = false } // hashRouterDrainState is a state that specifically describes the hashRouter's // state in the draining process. This differs from its "general" state. For // example, a hash router can have drained and exited the Run method but still // be in hashRouterDrainStateRunning until somebody calls drainMeta. type hashRouterDrainState int const ( // hashRouterDrainStateRunning is the state that a hashRouter is in when // running normally (i.e. pulling and pushing batches). hashRouterDrainStateRunning = iota // hashRouterDrainStateRequested is the state that a hashRouter is in when // either all outputs have called drainMeta or an error was encountered by one // of the outputs. hashRouterDrainStateRequested // hashRouterDrainStateCompleted is the state that a hashRouter is in when // draining has completed. hashRouterDrainStateCompleted ) // HashRouter hashes values according to provided hash columns and computes a // destination for each row. These destinations are exposed as Operators // returned by the constructor. type HashRouter struct { colexecop.OneInputNode // inputMetaInfo contains all of the meta components that the hash router // is responsible for. Root field is exactly the same as OneInputNode.Input. inputMetaInfo colexecargs.OpWithMetaInfo // hashCols is a slice of indices of the columns used for hashing. hashCols []uint32 // One output for each stream. outputs []routerOutput // unblockedEventsChan is a channel shared between the HashRouter and its // outputs. outputs send events on this channel when they are unblocked by a // read. unblockedEventsChan <-chan struct{} numBlockedOutputs int bufferedMeta []execinfrapb.ProducerMetadata // atomics is shared state between the Run goroutine and any routerOutput // goroutines that call drainMeta. atomics struct { // drainState is the state the hashRouter is in. The Run goroutine should // only ever read these states, never set them. drainState int32 numDrainedOutputs int32 } // waitForMetadata is a channel that the last output to drain will read from // to pass on any metadata buffered through the Run goroutine. waitForMetadata chan []execinfrapb.ProducerMetadata // tupleDistributor is used to decide to which output a particular tuple // should be routed. tupleDistributor *colexechash.TupleHashDistributor } // NewHashRouter creates a new hash router that consumes coldata.Batches from // input and hashes each row according to hashCols to one of the outputs // returned as Operators. // The number of allocators provided will determine the number of outputs // returned. Note that each allocator must be unlimited, memory will be limited // by comparing memory use in the allocator with the memoryLimit argument. Each // Operator must have an independent allocator (this means that each allocator // should be linked to an independent mem account) as Operator.Next will usually // be called concurrently between different outputs. Similarly, each output // needs to have a separate disk account. func NewHashRouter( unlimitedAllocators []*colmem.Allocator, input colexecargs.OpWithMetaInfo, types []*types.T, hashCols []uint32, memoryLimit int64, diskQueueCfg colcontainer.DiskQueueCfg, fdSemaphore semaphore.Semaphore, diskAccounts []*mon.BoundAccount, ) (*HashRouter, []colexecop.DrainableOperator) { if diskQueueCfg.CacheMode != colcontainer.DiskQueueCacheModeDefault { colexecerror.InternalError(errors.Errorf("hash router instantiated with incompatible disk queue cache mode: %d", diskQueueCfg.CacheMode)) } outputs := make([]routerOutput, len(unlimitedAllocators)) outputsAsOps := make([]colexecop.DrainableOperator, len(unlimitedAllocators)) // unblockEventsChan is buffered to 2*numOutputs as we don't want the outputs // writing to it to block. // Unblock events only happen after a corresponding block event. Since these // are state changes and are done under lock (including the output sending // on the channel, which is why we want the channel to be buffered in the // first place), every time the HashRouter blocks an output, it *must* read // all unblock events preceding it since these *must* be on the channel. unblockEventsChan := make(chan struct{}, 2*len(unlimitedAllocators)) memoryLimitPerOutput := memoryLimit / int64(len(unlimitedAllocators)) for i := range unlimitedAllocators { op := newRouterOutputOp( routerOutputOpArgs{ types: types, unlimitedAllocator: unlimitedAllocators[i], memoryLimit: memoryLimitPerOutput, diskAcc: diskAccounts[i], cfg: diskQueueCfg, fdSemaphore: fdSemaphore, unblockedEventsChan: unblockEventsChan, }, ) outputs[i] = op outputsAsOps[i] = op } return newHashRouterWithOutputs(input, hashCols, unblockEventsChan, outputs), outputsAsOps } func newHashRouterWithOutputs( input colexecargs.OpWithMetaInfo, hashCols []uint32, unblockEventsChan <-chan struct{}, outputs []routerOutput, ) *HashRouter { r := &HashRouter{ OneInputNode: colexecop.NewOneInputNode(input.Root), inputMetaInfo: input, hashCols: hashCols, outputs: outputs, unblockedEventsChan: unblockEventsChan, // waitForMetadata is a buffered channel to avoid blocking if nobody will // read the metadata. waitForMetadata: make(chan []execinfrapb.ProducerMetadata, 1), tupleDistributor: colexechash.NewTupleHashDistributor(colexechash.DefaultInitHashValue, len(outputs)), } for i := range outputs { outputs[i].initWithHashRouter(r) } return r } // cancelOutputs cancels all outputs and forwards the given error to all of // them if non-nil. The only case where the error is not forwarded is if no // output could be canceled due to an error. In this case each output will // forward the error returned during cancellation. func (r *HashRouter) cancelOutputs(ctx context.Context, errToForward error) { for _, o := range r.outputs { if err := colexecerror.CatchVectorizedRuntimeError(func() { o.cancel(ctx, errToForward) }); err != nil { // If there was an error canceling this output, this error can be // forwarded to whoever is calling Next. o.forwardErr(err) } } } func (r *HashRouter) setDrainState(drainState hashRouterDrainState) { atomic.StoreInt32(&r.atomics.drainState, int32(drainState)) } func (r *HashRouter) getDrainState() hashRouterDrainState { return hashRouterDrainState(atomic.LoadInt32(&r.atomics.drainState)) } // Run runs the HashRouter. Batches are read from the input and pushed to an // output calculated by hashing columns. Cancel the given context to terminate // early. func (r *HashRouter) Run(ctx context.Context) { var span *tracing.Span ctx, span = execinfra.ProcessorSpan(ctx, "hash router") if span != nil { defer span.Finish() } // Since HashRouter runs in a separate goroutine, we want to be safe and // make sure that we catch errors in all code paths, so we wrap the whole // method with a catcher. Note that we also have "internal" catchers as // well for more fine-grained control of error propagation. if err := colexecerror.CatchVectorizedRuntimeError(func() { r.Input.Init(ctx) var done bool processNextBatch := func() { done = r.processNextBatch(ctx) } for { if r.getDrainState() != hashRouterDrainStateRunning { break } // Check for cancellation. select { case <-ctx.Done(): r.cancelOutputs(ctx, ctx.Err()) return default: } // Read all the routerOutput state changes that have happened since the // last iteration. for moreToRead := true; moreToRead; { select { case <-r.unblockedEventsChan: r.numBlockedOutputs-- default: // No more routerOutput state changes to read without blocking. moreToRead = false } } if r.numBlockedOutputs == len(r.outputs) { // All outputs are blocked, wait until at least one output is unblocked. select { case <-r.unblockedEventsChan: r.numBlockedOutputs-- case <-ctx.Done(): r.cancelOutputs(ctx, ctx.Err()) return } } if err := colexecerror.CatchVectorizedRuntimeError(processNextBatch); err != nil { r.cancelOutputs(ctx, err) return } if done { // The input was done and we have notified the routerOutputs that there // is no more data. return } } }); err != nil { r.cancelOutputs(ctx, err) } if span != nil { for _, s := range r.inputMetaInfo.StatsCollectors { span.RecordStructured(s.GetStats()) } if meta := execinfra.GetTraceDataAsMetadata(span); meta != nil { r.bufferedMeta = append(r.bufferedMeta, *meta) } } r.bufferedMeta = append(r.bufferedMeta, r.inputMetaInfo.MetadataSources.DrainMeta()...) // Non-blocking send of metadata so that one of the outputs can return it // in DrainMeta. r.waitForMetadata <- r.bufferedMeta close(r.waitForMetadata) r.inputMetaInfo.ToClose.CloseAndLogOnErr(ctx, "hash router") } // processNextBatch reads the next batch from its input, hashes it and adds // each column to its corresponding output, returning whether the input is // done. func (r *HashRouter) processNextBatch(ctx context.Context) bool { b := r.Input.Next() n := b.Length() if n == 0 { // Done. Push an empty batch to outputs to tell them the data is done as // well. for _, o := range r.outputs { o.addBatch(ctx, b) } return true } // It is ok that we call Init() on every batch since all calls except for // the first one are noops. r.tupleDistributor.Init(ctx) selections := r.tupleDistributor.Distribute(b, r.hashCols) for i, o := range r.outputs { if len(selections[i]) > 0 { b.SetSelection(true) copy(b.Selection(), selections[i]) b.SetLength(len(selections[i])) if o.addBatch(ctx, b) { // This batch blocked the output. r.numBlockedOutputs++ } } } return false } // resetForTests resets the HashRouter for a test or benchmark run. func (r *HashRouter) resetForTests(ctx context.Context) { if i, ok := r.Input.(colexecop.Resetter); ok { i.Reset(ctx) } r.setDrainState(hashRouterDrainStateRunning) r.waitForMetadata = make(chan []execinfrapb.ProducerMetadata, 1) r.atomics.numDrainedOutputs = 0 r.bufferedMeta = nil r.numBlockedOutputs = 0 for moreToRead := true; moreToRead; { select { case <-r.unblockedEventsChan: default: moreToRead = false } } for _, o := range r.outputs { o.resetForTests(ctx) } } func (r *HashRouter) encounteredError(ctx context.Context) { // Once one output returns an error the hash router needs to stop running // and drain its input. r.setDrainState(hashRouterDrainStateRequested) // cancel all outputs. The Run goroutine will eventually realize that the // HashRouter is done and exit without draining. r.cancelOutputs(ctx, nil /* errToForward */) } func (r *HashRouter) drainMeta() []execinfrapb.ProducerMetadata { if int(atomic.AddInt32(&r.atomics.numDrainedOutputs, 1)) != len(r.outputs) { return nil } // All outputs have been drained, return any buffered metadata to the last // output to call drainMeta. r.setDrainState(hashRouterDrainStateRequested) meta := <-r.waitForMetadata r.setDrainState(hashRouterDrainStateCompleted) return meta }
pkg/sql/colflow/routers.go
1
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.977655291557312, 0.014545955695211887, 0.0001641370909055695, 0.00020428163406904787, 0.115120068192482 ]
{ "id": 9, "code_window": [ "\tif span != nil {\n", "\t\tdefer span.Finish()\n", "\t}\n", "\t// Since HashRouter runs in a separate goroutine, we want to be safe and\n", "\t// make sure that we catch errors in all code paths, so we wrap the whole\n", "\t// method with a catcher. Note that we also have \"internal\" catchers as\n", "\t// well for more fine-grained control of error propagation.\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tvar inputInitialized bool\n" ], "file_path": "pkg/sql/colflow/routers.go", "type": "add", "edit_start_line_idx": 552 }
// Copyright 2021 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package diagnostics_test import ( "context" gosql "database/sql" "fmt" "testing" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/ccl/kvccl/kvtenantccl" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/diagnostics" "github.com/cockroachdb/cockroach/pkg/server/diagnostics/diagnosticspb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/diagutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/util/cloudinfo" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/system" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) // Dummy import to pull in kvtenantccl. This allows us to start tenants. var _ = kvtenantccl.Connector{} const elemName = "somestring" func TestTenantReport(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) rt := startReporterTest(t) defer rt.Close() tenantArgs := base.TestTenantArgs{ TenantID: roachpb.MakeTenantID(security.EmbeddedTenantIDs()[0]), AllowSettingClusterSettings: true, TestingKnobs: rt.testingKnobs, } tenant, tenantDB := serverutils.StartTenant(t, rt.server, tenantArgs) reporter := tenant.DiagnosticsReporter().(*diagnostics.Reporter) ctx := context.Background() setupCluster(t, tenantDB) // Clear the SQL stat pool before getting diagnostics. rt.server.SQLServer().(*sql.Server).ResetSQLStats(ctx) reporter.ReportDiagnostics(ctx) require.Equal(t, 1, rt.diagServer.NumRequests()) last := rt.diagServer.LastRequestData() require.Equal(t, rt.server.ClusterID().String(), last.UUID) require.Equal(t, tenantArgs.TenantID.String(), last.TenantID) require.Equal(t, "", last.NodeID) require.Equal(t, tenant.SQLInstanceID().String(), last.SQLInstanceID) require.Equal(t, "true", last.Internal) // Verify environment. verifyEnvironment(t, "", roachpb.Locality{}, &last.Env) // Verify SQL info. require.Equal(t, tenant.SQLInstanceID(), last.SQL.SQLInstanceID) // Verify FeatureUsage. require.NotZero(t, len(last.FeatureUsage)) // Call PeriodicallyReportDiagnostics and ensure it sends out a report. reporter.PeriodicallyReportDiagnostics(ctx, rt.server.Stopper()) testutils.SucceedsSoon(t, func() error { if rt.diagServer.NumRequests() != 2 { return errors.Errorf("did not receive a diagnostics report") } return nil }) } // TestServerReport checks nodes, stores, localities, and zone configs. // Telemetry metrics are checked in datadriven tests (see sql.TestTelemetry). func TestServerReport(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) rt := startReporterTest(t) defer rt.Close() ctx := context.Background() setupCluster(t, rt.serverDB) for _, cmd := range []struct { resource string config string }{ {"TABLE system.rangelog", fmt.Sprintf(`constraints: [+zone=%[1]s, +%[1]s]`, elemName)}, {"TABLE system.rangelog", `{gc: {ttlseconds: 1}}`}, {"DATABASE system", `num_replicas: 5`}, {"DATABASE system", fmt.Sprintf(`constraints: {"+zone=%[1]s,+%[1]s": 2, +%[1]s: 1}`, elemName)}, {"DATABASE system", fmt.Sprintf(`experimental_lease_preferences: [[+zone=%[1]s,+%[1]s], [+%[1]s]]`, elemName)}, } { testutils.SucceedsSoon(t, func() error { if _, err := rt.serverDB.Exec( fmt.Sprintf(`ALTER %s CONFIGURE ZONE = '%s'`, cmd.resource, cmd.config), ); err != nil { // Work around gossip asynchronicity. return errors.Errorf("error applying zone config %q to %q: %v", cmd.config, cmd.resource, err) } return nil }) } expectedUsageReports := 0 clusterSecret := sql.ClusterSecret.Get(&rt.settings.SV) testutils.SucceedsSoon(t, func() error { expectedUsageReports++ node := rt.server.MetricsRecorder().GenerateNodeStatus(ctx) // Clear the SQL stat pool before getting diagnostics. rt.server.SQLServer().(*sql.Server).ResetSQLStats(ctx) rt.server.DiagnosticsReporter().(*diagnostics.Reporter).ReportDiagnostics(ctx) keyCounts := make(map[roachpb.StoreID]int64) rangeCounts := make(map[roachpb.StoreID]int64) totalKeys := int64(0) totalRanges := int64(0) for _, store := range node.StoreStatuses { keys, ok := store.Metrics["keycount"] require.True(t, ok, "keycount not in metrics") totalKeys += int64(keys) keyCounts[store.Desc.StoreID] = int64(keys) replicas, ok := store.Metrics["replicas"] require.True(t, ok, "replicas not in metrics") totalRanges += int64(replicas) rangeCounts[store.Desc.StoreID] = int64(replicas) } require.Equal(t, expectedUsageReports, rt.diagServer.NumRequests()) last := rt.diagServer.LastRequestData() if minExpected, actual := totalKeys, last.Node.KeyCount; minExpected > actual { return errors.Errorf("expected node keys at least %v got %v", minExpected, actual) } if minExpected, actual := totalRanges, last.Node.RangeCount; minExpected > actual { return errors.Errorf("expected node ranges at least %v got %v", minExpected, actual) } if minExpected, actual := len(rt.serverArgs.StoreSpecs), len(last.Stores); minExpected > actual { return errors.Errorf("expected at least %v stores got %v", minExpected, actual) } for _, store := range last.Stores { if minExpected, actual := keyCounts[store.StoreID], store.KeyCount; minExpected > actual { return errors.Errorf("expected at least %v keys in store %v got %v", minExpected, store.StoreID, actual) } if minExpected, actual := rangeCounts[store.StoreID], store.RangeCount; minExpected > actual { return errors.Errorf("expected at least %v ranges in store %v got %v", minExpected, store.StoreID, actual) } } return nil }) last := rt.diagServer.LastRequestData() require.Equal(t, rt.server.ClusterID().String(), last.UUID) require.Equal(t, "system", last.TenantID) require.Equal(t, rt.server.NodeID().String(), last.NodeID) require.Equal(t, rt.server.NodeID().String(), last.SQLInstanceID) require.Equal(t, "true", last.Internal) // Verify environment. verifyEnvironment(t, clusterSecret, rt.serverArgs.Locality, &last.Env) // This check isn't clean, since the body is a raw proto binary and thus could // easily contain some encoded form of elemName, but *if* it ever does fail, // that is probably very interesting. require.NotContains(t, last.RawReportBody, elemName) // 3 + 3 = 6: set 3 initially and org is set mid-test for 3 altered settings, // plus version, reporting and secret settings are set in startup // migrations. expected, actual := 6, len(last.AlteredSettings) require.Equal(t, expected, actual, "expected %d changed settings, got %d: %v", expected, actual, last.AlteredSettings) for key, expected := range map[string]string{ "cluster.organization": "<redacted>", "diagnostics.reporting.send_crash_reports": "false", "server.time_until_store_dead": "1m30s", "version": clusterversion.TestingBinaryVersion.String(), "cluster.secret": "<redacted>", } { got, ok := last.AlteredSettings[key] require.True(t, ok, "expected report of altered setting %q", key) require.Equal(t, expected, got, "expected reported value of setting %q to be %q not %q", key, expected, got) } // Verify that we receive the four auto-populated zone configs plus the two // modified above, and that their values are as expected. for _, expectedID := range []int64{ keys.RootNamespaceID, keys.LivenessRangesID, keys.MetaRangesID, keys.RangeEventTableID, keys.SystemDatabaseID, } { _, ok := last.ZoneConfigs[expectedID] require.True(t, ok, "didn't find expected ID %d in reported ZoneConfigs: %+v", expectedID, last.ZoneConfigs) } hashedElemName := sql.HashForReporting(clusterSecret, elemName) hashedZone := sql.HashForReporting(clusterSecret, "zone") for id, zone := range last.ZoneConfigs { if id == keys.RootNamespaceID { require.Equal(t, zone, *rt.server.ExecutorConfig().(sql.ExecutorConfig).DefaultZoneConfig) } if id == keys.RangeEventTableID { require.Equal(t, int32(1), zone.GC.TTLSeconds) constraints := []zonepb.ConstraintsConjunction{ { Constraints: []zonepb.Constraint{ {Key: hashedZone, Value: hashedElemName, Type: zonepb.Constraint_REQUIRED}, {Value: hashedElemName, Type: zonepb.Constraint_REQUIRED}, }, }, } require.Equal(t, zone.Constraints, constraints) } if id == keys.SystemDatabaseID { constraints := []zonepb.ConstraintsConjunction{ { NumReplicas: 1, Constraints: []zonepb.Constraint{{Value: hashedElemName, Type: zonepb.Constraint_REQUIRED}}, }, { NumReplicas: 2, Constraints: []zonepb.Constraint{ {Key: hashedZone, Value: hashedElemName, Type: zonepb.Constraint_REQUIRED}, {Value: hashedElemName, Type: zonepb.Constraint_REQUIRED}, }, }, } require.Equal(t, constraints, zone.Constraints) prefs := []zonepb.LeasePreference{ { Constraints: []zonepb.Constraint{ {Key: hashedZone, Value: hashedElemName, Type: zonepb.Constraint_REQUIRED}, {Value: hashedElemName, Type: zonepb.Constraint_REQUIRED}, }, }, { Constraints: []zonepb.Constraint{{Value: hashedElemName, Type: zonepb.Constraint_REQUIRED}}, }, } require.Equal(t, prefs, zone.LeasePreferences) } } } func TestUsageQuantization(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) defer cloudinfo.Disable()() skip.UnderRace(t, "takes >1min under race") r := diagutils.NewServer() defer r.Close() st := cluster.MakeTestingClusterSettings() ctx := context.Background() url := r.URL() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{ Settings: st, Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ DiagnosticsTestingKnobs: diagnostics.TestingKnobs{ OverrideReportingURL: &url, }, }, }, }) defer s.Stopper().Stop(ctx) ts := s.(*server.TestServer) // Disable periodic reporting so it doesn't interfere with the test. if _, err := db.Exec(`SET CLUSTER SETTING diagnostics.reporting.enabled = false`); err != nil { t.Fatal(err) } if _, err := db.Exec(`SET application_name = 'test'`); err != nil { t.Fatal(err) } // Issue some queries against the test app name. for i := 0; i < 8; i++ { _, err := db.Exec(`SELECT 1`) require.NoError(t, err) } // Between 10 and 100 queries is quantized to 10. for i := 0; i < 30; i++ { _, err := db.Exec(`SELECT 1,2`) require.NoError(t, err) } // Between 100 and 10000 gets quantized to 100. for i := 0; i < 200; i++ { _, err := db.Exec(`SELECT 1,2,3`) require.NoError(t, err) } // Above 10000 gets quantized to 10000. for i := 0; i < 10010; i++ { _, err := db.Exec(`SHOW application_name`) require.NoError(t, err) } // Flush the SQL stat pool. ts.SQLServer().(*sql.Server).ResetSQLStats(ctx) // Collect a round of statistics. ts.DiagnosticsReporter().(*diagnostics.Reporter).ReportDiagnostics(ctx) // The stats "hide" the application name by hashing it. To find the // test app name, we need to hash the ref string too prior to the // comparison. clusterSecret := sql.ClusterSecret.Get(&st.SV) hashedAppName := sql.HashForReporting(clusterSecret, "test") require.NotEqual(t, sql.FailedHashedValue, hashedAppName, "expected hashedAppName to not be 'unknown'") testData := []struct { query string expectedCount int64 }{ {`SELECT _`, 8}, {`SELECT _, _`, 10}, {`SELECT _, _, _`, 100}, {`SHOW application_name`, 10000}, } last := r.LastRequestData() for _, test := range testData { found := false for _, s := range last.SqlStats { if s.Key.App == hashedAppName && s.Key.Query == test.query { require.Equal(t, test.expectedCount, s.Stats.Count, "quantization incorrect for query %q", test.query) found = true break } } if !found { t.Errorf("query %q missing from stats", test.query) } } } type reporterTest struct { cloudEnable func() settings *cluster.Settings diagServer *diagutils.Server testingKnobs base.TestingKnobs serverArgs base.TestServerArgs server serverutils.TestServerInterface serverDB *gosql.DB } func (t *reporterTest) Close() { t.cloudEnable() t.diagServer.Close() // stopper will wait for the update/report loop to finish too. t.server.Stopper().Stop(context.Background()) } func startReporterTest(t *testing.T) *reporterTest { // Disable cloud info reporting, since it slows down tests. rt := &reporterTest{ cloudEnable: cloudinfo.Disable(), settings: cluster.MakeTestingClusterSettings(), diagServer: diagutils.NewServer(), } url := rt.diagServer.URL() rt.testingKnobs = base.TestingKnobs{ SQLLeaseManager: &lease.ManagerTestingKnobs{ // Disable SELECT called for delete orphaned leases to keep // query stats stable. DisableDeleteOrphanedLeases: true, }, Server: &server.TestingKnobs{ DiagnosticsTestingKnobs: diagnostics.TestingKnobs{ OverrideReportingURL: &url, }, }, } storeSpec := base.DefaultTestStoreSpec storeSpec.Attributes = roachpb.Attributes{Attrs: []string{elemName}} rt.serverArgs = base.TestServerArgs{ StoreSpecs: []base.StoreSpec{ storeSpec, base.DefaultTestStoreSpec, }, Settings: rt.settings, Locality: roachpb.Locality{ Tiers: []roachpb.Tier{ {Key: "region", Value: "east"}, {Key: "zone", Value: elemName}, {Key: "state", Value: "ny"}, {Key: "city", Value: "nyc"}, }, }, Knobs: rt.testingKnobs, } rt.server, rt.serverDB, _ = serverutils.StartServer(t, rt.serverArgs) // Make sure the test's generated activity is the only activity we measure. telemetry.GetFeatureCounts(telemetry.Raw, telemetry.ResetCounts) return rt } func setupCluster(t *testing.T, db *gosql.DB) { _, err := db.Exec(`SET CLUSTER SETTING server.time_until_store_dead = '90s'`) require.NoError(t, err) // Enable diagnostics reporting to test PeriodicallyReportDiagnostics. _, err = db.Exec(`SET CLUSTER SETTING diagnostics.reporting.enabled = true`) require.NoError(t, err) _, err = db.Exec(`SET CLUSTER SETTING diagnostics.reporting.send_crash_reports = false`) require.NoError(t, err) _, err = db.Exec(fmt.Sprintf(`CREATE DATABASE %s`, elemName)) require.NoError(t, err) // Set cluster to an internal testing cluster q := `SET CLUSTER SETTING cluster.organization = 'Cockroach Labs - Production Testing'` _, err = db.Exec(q) require.NoError(t, err) } func verifyEnvironment( t *testing.T, secret string, locality roachpb.Locality, env *diagnosticspb.Environment, ) { require.NotEqual(t, 0, env.Hardware.Mem.Total) require.NotEqual(t, 0, env.Hardware.Mem.Available) require.Equal(t, int32(system.NumCPU()), env.Hardware.Cpu.Numcpu) require.NotEqual(t, 0, env.Hardware.Cpu.Sockets) require.NotEqual(t, 0.0, env.Hardware.Cpu.Mhz) require.NotEqual(t, 0.0, env.Os.Platform) require.NotEmpty(t, env.Build.Tag) require.NotEmpty(t, env.Build.Distribution) require.NotEmpty(t, env.LicenseType) require.Equal(t, len(locality.Tiers), len(env.Locality.Tiers)) for i := range locality.Tiers { require.Equal(t, sql.HashForReporting(secret, locality.Tiers[i].Key), env.Locality.Tiers[i].Key) require.Equal(t, sql.HashForReporting(secret, locality.Tiers[i].Value), env.Locality.Tiers[i].Value) } }
pkg/server/diagnostics/reporter_test.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.00018447752518113703, 0.00016966935072559863, 0.00016094156308099627, 0.00016930050333030522, 0.0000036305982575868256 ]
{ "id": 9, "code_window": [ "\tif span != nil {\n", "\t\tdefer span.Finish()\n", "\t}\n", "\t// Since HashRouter runs in a separate goroutine, we want to be safe and\n", "\t// make sure that we catch errors in all code paths, so we wrap the whole\n", "\t// method with a catcher. Note that we also have \"internal\" catchers as\n", "\t// well for more fine-grained control of error propagation.\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tvar inputInitialized bool\n" ], "file_path": "pkg/sql/colflow/routers.go", "type": "add", "edit_start_line_idx": 552 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package main func registerTests(r *testRegistry) { // Helpful shell pipeline to generate the list below: // // grep -h -E 'func register[^(]+\(.*testRegistry\) {' pkg/cmd/roachtest/*.go | grep -E -o 'register[^(]+' | grep -E -v '^register(Tests|Benchmarks)$' | grep -v '^\w*Bench$' | sort -f | awk '{printf "\t%s(r)\n", $0}' registerAcceptance(r) registerActiveRecord(r) registerAllocator(r) registerAlterPK(r) registerAutoUpgrade(r) registerBackup(r) registerBackupNodeShutdown(r) registerCancel(r) registerCDC(r) registerClearRange(r) registerClockJumpTests(r) registerClockMonotonicTests(r) registerConnectionLatencyTest(r) registerCopy(r) registerDecommission(r) registerDiskFull(r) registerDiskStalledDetection(r) registerDjango(r) registerDrop(r) registerElectionAfterRestart(r) registerEncryption(r) registerEngineSwitch(r) registerFlowable(r) registerFollowerReads(r) registerGopg(r) registerGossip(r) registerGORM(r) registerHibernate(r, hibernateOpts) registerHibernate(r, hibernateSpatialOpts) registerHotSpotSplits(r) registerImportDecommissioned(r) registerImportMixedVersion(r) registerImportTPCC(r) registerImportTPCH(r) registerImportNodeShutdown(r) registerInconsistency(r) registerIndexes(r) registerInterleaved(r) registerJepsen(r) registerJobsMixedVersions(r) registerKV(r) registerKVContention(r) registerKVQuiescenceDead(r) registerKVGracefulDraining(r) registerKVScalability(r) registerKVSplits(r) registerKVRangeLookups(r) registerLargeRange(r) registerLedger(r) registerLibPQ(r) registerLiquibase(r) registerNamespaceUpgradeMigration(r) registerNetwork(r) registerPebble(r) registerPgjdbc(r) registerPgx(r) registerNodeJSPostgres(r) registerPsycopg(r) registerQueue(r) registerQuitAllNodes(r) registerQuitTransfersLeases(r) registerRebalanceLoad(r) registerReplicaGC(r) registerRestart(r) registerRestoreNodeShutdown(r) registerRestore(r) registerRoachmart(r) registerScaleData(r) registerSchemaChangeBulkIngest(r) registerSchemaChangeDatabaseVersionUpgrade(r) registerSchemaChangeDuringKV(r) registerSchemaChangeIndexTPCC100(r) registerSchemaChangeIndexTPCC1000(r) registerSchemaChangeDuringTPCC1000(r) registerSchemaChangeInvertedIndex(r) registerSchemaChangeMixedVersions(r) registerSchemaChangeRandomLoad(r) registerScrubAllChecksTPCC(r) registerScrubIndexOnlyTPCC(r) registerSecondaryIndexesMultiVersionCluster(r) registerSequelize(r) registerSequenceUpgrade(r) registerSQLAlchemy(r) registerSQLSmith(r) registerSyncTest(r) registerSysbench(r) registerTPCC(r) registerTPCDSVec(r) registerTPCE(r) registerTPCHVec(r) registerKVBench(r) registerTypeORM(r) registerLoadSplits(r) registerVersion(r) registerYCSB(r) registerTPCHBench(r) registerOverload(r) } func registerBenchmarks(r *testRegistry) { // Helpful shell pipeline to generate the list below: // // grep -h -E 'func register[^(]+\(.*registry\) {' *.go | grep -E -o 'register[^(]+' | grep -v '^registerTests$' | grep '^\w*Bench$' | sort | awk '{printf "\t%s(r)\n", $0}' registerIndexesBench(r) registerTPCCBench(r) registerKVBench(r) registerTPCHBench(r) }
pkg/cmd/roachtest/registry.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.00017837552877608687, 0.00017378470511175692, 0.0001718849380267784, 0.00017376137839164585, 0.0000016587407571933 ]
{ "id": 9, "code_window": [ "\tif span != nil {\n", "\t\tdefer span.Finish()\n", "\t}\n", "\t// Since HashRouter runs in a separate goroutine, we want to be safe and\n", "\t// make sure that we catch errors in all code paths, so we wrap the whole\n", "\t// method with a catcher. Note that we also have \"internal\" catchers as\n", "\t// well for more fine-grained control of error propagation.\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tvar inputInitialized bool\n" ], "file_path": "pkg/sql/colflow/routers.go", "type": "add", "edit_start_line_idx": 552 }
// Code generated by MockGen. DO NOT EDIT. // Source: cert.go // Package certmgr is a generated GoMock package. package certmgr import ( context "context" reflect "reflect" gomock "github.com/golang/mock/gomock" ) // MockCert is a mock of Cert interface. type MockCert struct { ctrl *gomock.Controller recorder *MockCertMockRecorder } // MockCertMockRecorder is the mock recorder for MockCert. type MockCertMockRecorder struct { mock *MockCert } // NewMockCert creates a new mock instance. func NewMockCert(ctrl *gomock.Controller) *MockCert { mock := &MockCert{ctrl: ctrl} mock.recorder = &MockCertMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockCert) EXPECT() *MockCertMockRecorder { return m.recorder } // ClearErr mocks base method. func (m *MockCert) ClearErr() { m.ctrl.T.Helper() m.ctrl.Call(m, "ClearErr") } // ClearErr indicates an expected call of ClearErr. func (mr *MockCertMockRecorder) ClearErr() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClearErr", reflect.TypeOf((*MockCert)(nil).ClearErr)) } // Err mocks base method. func (m *MockCert) Err() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Err") ret0, _ := ret[0].(error) return ret0 } // Err indicates an expected call of Err. func (mr *MockCertMockRecorder) Err() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockCert)(nil).Err)) } // Reload mocks base method. func (m *MockCert) Reload(ctx context.Context) { m.ctrl.T.Helper() m.ctrl.Call(m, "Reload", ctx) } // Reload indicates an expected call of Reload. func (mr *MockCertMockRecorder) Reload(ctx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reload", reflect.TypeOf((*MockCert)(nil).Reload), ctx) }
pkg/security/certmgr/mocks_generated.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.0002226705546490848, 0.00017901984392665327, 0.00016933048027567565, 0.00017242484318558127, 0.000016865236830199137 ]
{ "id": 10, "code_window": [ "\t// make sure that we catch errors in all code paths, so we wrap the whole\n", "\t// method with a catcher. Note that we also have \"internal\" catchers as\n", "\t// well for more fine-grained control of error propagation.\n", "\tif err := colexecerror.CatchVectorizedRuntimeError(func() {\n", "\t\tr.Input.Init(ctx)\n", "\t\tvar done bool\n", "\t\tprocessNextBatch := func() {\n", "\t\t\tdone = r.processNextBatch(ctx)\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\tinputInitialized = true\n" ], "file_path": "pkg/sql/colflow/routers.go", "type": "add", "edit_start_line_idx": 558 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colflow import ( "context" "sync" "sync/atomic" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colcontainer" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecargs" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexechash" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" "github.com/marusama/semaphore" ) // routerOutput is an interface implemented by router outputs. It exists for // easier test mocking of outputs. type routerOutput interface { execinfra.OpNode // initWithHashRouter passes a reference to the HashRouter that will be // pushing batches to this output. initWithHashRouter(*HashRouter) // addBatch adds the elements specified by the selection vector from batch // to the output. It returns whether or not the output changed its state to // blocked (see implementations). addBatch(context.Context, coldata.Batch) bool // cancel tells the output to stop producing batches. Optionally forwards an // error if not nil. cancel(context.Context, error) // forwardErr forwards an error to the output. The output should call // colexecerror.ExpectedError with this error on the next call to Next. // Calling forwardErr multiple times will result in the most recent error // overwriting the previous error. forwardErr(error) // resetForTests resets the routerOutput for a benchmark or test run. resetForTests(context.Context) } // getDefaultRouterOutputBlockedThreshold returns the number of unread values // buffered by the routerOutputOp after which the output is considered blocked. // It is a function rather than a variable so that in tests we could modify // coldata.BatchSize() (if it were a variable, then its value would be // evaluated before we set the desired batch size). func getDefaultRouterOutputBlockedThreshold() int { return coldata.BatchSize() * 2 } type routerOutputOpState int const ( // routerOutputOpRunning is the state in which routerOutputOp operates // normally. The router output transitions into routerOutputDoneAdding when // a zero-length batch was added or routerOutputOpDraining when it // encounters an error or the drain is requested. routerOutputOpRunning routerOutputOpState = iota // routerOutputDoneAdding is the state in which a zero-length was batch was // added to routerOutputOp and no more batches will be added. The router // output transitions to routerOutputOpDraining when the output is canceled // (either closed or the drain is requested). routerOutputDoneAdding // routerOutputOpDraining is the state in which routerOutputOp always // returns zero-length batches on calls to Next. routerOutputOpDraining ) // drainCoordinator is an interface that the HashRouter implements to coordinate // cancellation of all of its outputs in the case of an error and draining in // the case of graceful termination. // WARNING: No locks should be held when calling these methods, as the // HashRouter might call routerOutput methods (e.g. cancel) that attempt to // reacquire locks. type drainCoordinator interface { // encounteredError should be called when a routerOutput encounters an error. // This terminates execution. No locks should be held when calling this // method, since cancellation could occur. encounteredError(context.Context) // drainMeta should be called exactly once when the routerOutput moves to // draining. drainMeta() []execinfrapb.ProducerMetadata } type routerOutputOp struct { colexecop.InitHelper // input is a reference to our router. input execinfra.OpNode // drainCoordinator is a reference to the HashRouter to be able to notify it // if the output encounters an error or transitions to a draining state. drainCoordinator drainCoordinator types []*types.T // unblockedEventsChan is signaled when a routerOutput changes state from // blocked to unblocked. unblockedEventsChan chan<- struct{} mu struct { syncutil.Mutex state routerOutputOpState // forwardedErr is an error that was forwarded by the HashRouter. If set, // any subsequent calls to Next will return this error. forwardedErr error cond *sync.Cond // data is a SpillingQueue, a circular buffer backed by a disk queue. data *colexecutils.SpillingQueue numUnread int blocked bool } testingKnobs routerOutputOpTestingKnobs } func (o *routerOutputOp) ChildCount(verbose bool) int { return 1 } func (o *routerOutputOp) Child(nth int, verbose bool) execinfra.OpNode { if nth == 0 { return o.input } colexecerror.InternalError(errors.AssertionFailedf("invalid index %d", nth)) // This code is unreachable, but the compiler cannot infer that. return nil } var _ colexecop.Operator = &routerOutputOp{} type routerOutputOpTestingKnobs struct { // blockedThreshold is the number of buffered values above which we consider // a router output to be blocked. It defaults to // defaultRouterOutputBlockedThreshold but can be modified by tests to test // edge cases. blockedThreshold int // addBatchTestInducedErrorCb is called after any function call that could // produce an error if that error is nil. If the callback returns an error, // the router output overwrites the nil error with the returned error. // It is guaranteed that this callback will be called at least once during // normal execution. addBatchTestInducedErrorCb func() error // nextTestInducedErrorCb is called after any function call that could // produce an error if that error is nil. If the callback returns an error, // the router output overwrites the nil error with the returned error. // It is guaranteed that this callback will be called at least once during // normal execution. nextTestInducedErrorCb func() error } // routerOutputOpArgs are the arguments to newRouterOutputOp. All fields apart // from the testing knobs are optional. type routerOutputOpArgs struct { // All fields are required unless marked optional. types []*types.T // unlimitedAllocator should not have a memory limit. Pass in a soft // memoryLimit that will be respected instead. unlimitedAllocator *colmem.Allocator // memoryLimit acts as a soft limit to allow the router output to use disk // when it is exceeded. memoryLimit int64 diskAcc *mon.BoundAccount cfg colcontainer.DiskQueueCfg fdSemaphore semaphore.Semaphore // unblockedEventsChan must be a buffered channel. unblockedEventsChan chan<- struct{} testingKnobs routerOutputOpTestingKnobs } // newRouterOutputOp creates a new router output. func newRouterOutputOp(args routerOutputOpArgs) *routerOutputOp { if args.testingKnobs.blockedThreshold == 0 { args.testingKnobs.blockedThreshold = getDefaultRouterOutputBlockedThreshold() } o := &routerOutputOp{ types: args.types, unblockedEventsChan: args.unblockedEventsChan, testingKnobs: args.testingKnobs, } o.mu.cond = sync.NewCond(&o.mu) o.mu.data = colexecutils.NewSpillingQueue( &colexecutils.NewSpillingQueueArgs{ UnlimitedAllocator: args.unlimitedAllocator, Types: args.types, MemoryLimit: args.memoryLimit, DiskQueueCfg: args.cfg, FDSemaphore: args.fdSemaphore, DiskAcc: args.diskAcc, }, ) return o } func (o *routerOutputOp) Init(ctx context.Context) { o.InitHelper.Init(ctx) } // nextErrorLocked is a helper method that handles an error encountered in Next. func (o *routerOutputOp) nextErrorLocked(err error) { o.mu.state = routerOutputOpDraining o.maybeUnblockLocked() // Unlock the mutex, since the HashRouter will cancel all outputs. o.mu.Unlock() o.drainCoordinator.encounteredError(o.Ctx) o.mu.Lock() colexecerror.InternalError(err) } // Next returns the next coldata.Batch from the routerOutputOp. Note that Next // is designed for only one concurrent caller and will block until data is // ready. func (o *routerOutputOp) Next() coldata.Batch { o.mu.Lock() defer o.mu.Unlock() for o.mu.forwardedErr == nil && o.mu.state == routerOutputOpRunning && o.mu.data.Empty() { // Wait until there is data to read or the output is canceled. o.mu.cond.Wait() } if o.mu.forwardedErr != nil { colexecerror.ExpectedError(o.mu.forwardedErr) } if o.mu.state == routerOutputOpDraining { return coldata.ZeroBatch } b, err := o.mu.data.Dequeue(o.Ctx) if err == nil && o.testingKnobs.nextTestInducedErrorCb != nil { err = o.testingKnobs.nextTestInducedErrorCb() } if err != nil { o.nextErrorLocked(err) } o.mu.numUnread -= b.Length() if o.mu.numUnread <= o.testingKnobs.blockedThreshold { o.maybeUnblockLocked() } if b.Length() == 0 { if o.testingKnobs.nextTestInducedErrorCb != nil { if err := o.testingKnobs.nextTestInducedErrorCb(); err != nil { o.nextErrorLocked(err) } } // This is the last batch. closeLocked will set done to protect against // further calls to Next since this is allowed by the interface as well as // cleaning up and releasing possible disk infrastructure. o.closeLocked(o.Ctx) } return b } func (o *routerOutputOp) DrainMeta() []execinfrapb.ProducerMetadata { o.mu.Lock() o.mu.state = routerOutputOpDraining o.maybeUnblockLocked() o.mu.Unlock() return o.drainCoordinator.drainMeta() } func (o *routerOutputOp) initWithHashRouter(r *HashRouter) { o.input = r o.drainCoordinator = r } func (o *routerOutputOp) closeLocked(ctx context.Context) { o.mu.state = routerOutputOpDraining if err := o.mu.data.Close(ctx); err != nil { // This log message is Info instead of Warning because the flow will also // attempt to clean up the parent directory, so this failure might not have // any effect. log.Infof(ctx, "error closing vectorized hash router output, files may be left over: %s", err) } } // cancel wakes up a reader in Next if there is one and results in the output // returning zero length batches for every Next call after cancel. Note that // all accumulated data that hasn't been read will not be returned. func (o *routerOutputOp) cancel(ctx context.Context, err error) { o.mu.Lock() defer o.mu.Unlock() o.closeLocked(ctx) o.forwardErrLocked(err) // Some goroutine might be waiting on the condition variable, so wake it up. // Note that read goroutines check o.mu.done, so won't wait on the condition // variable after we unlock the mutex. o.mu.cond.Signal() } func (o *routerOutputOp) forwardErrLocked(err error) { if err != nil { o.mu.forwardedErr = err } } func (o *routerOutputOp) forwardErr(err error) { o.mu.Lock() defer o.mu.Unlock() o.forwardErrLocked(err) o.mu.cond.Signal() } // addBatch copies the batch (according to its selection vector) into an // internal buffer. Zero-length batch should be passed-in to indicate that no // more batches will be added. // TODO(asubiotto): We should explore pipelining addBatch if disk-spilling // performance becomes a concern. The main router goroutine will be writing to // disk as the code is written, meaning that we impact the performance of // writing rows to a fast output if we have to write to disk for a single // slow output. func (o *routerOutputOp) addBatch(ctx context.Context, batch coldata.Batch) bool { o.mu.Lock() defer o.mu.Unlock() switch o.mu.state { case routerOutputDoneAdding: colexecerror.InternalError(errors.AssertionFailedf("a batch was added to routerOutput in DoneAdding state")) case routerOutputOpDraining: // This output is draining, discard any data. return false } o.mu.numUnread += batch.Length() o.mu.data.Enqueue(ctx, batch) if o.testingKnobs.addBatchTestInducedErrorCb != nil { if err := o.testingKnobs.addBatchTestInducedErrorCb(); err != nil { colexecerror.InternalError(err) } } if batch.Length() == 0 { o.mu.state = routerOutputDoneAdding o.mu.cond.Signal() return false } stateChanged := false if o.mu.numUnread > o.testingKnobs.blockedThreshold && !o.mu.blocked { // The output is now blocked. o.mu.blocked = true stateChanged = true } o.mu.cond.Signal() return stateChanged } // maybeUnblockLocked unblocks the router output if it is in a blocked state. If the // output was previously in a blocked state, an event will be sent on // routerOutputOp.unblockedEventsChan. func (o *routerOutputOp) maybeUnblockLocked() { if o.mu.blocked { o.mu.blocked = false o.unblockedEventsChan <- struct{}{} } } // resetForTests resets the routerOutputOp for a test or benchmark run. func (o *routerOutputOp) resetForTests(ctx context.Context) { o.mu.Lock() defer o.mu.Unlock() o.mu.state = routerOutputOpRunning o.mu.forwardedErr = nil o.mu.data.Reset(ctx) o.mu.numUnread = 0 o.mu.blocked = false } // hashRouterDrainState is a state that specifically describes the hashRouter's // state in the draining process. This differs from its "general" state. For // example, a hash router can have drained and exited the Run method but still // be in hashRouterDrainStateRunning until somebody calls drainMeta. type hashRouterDrainState int const ( // hashRouterDrainStateRunning is the state that a hashRouter is in when // running normally (i.e. pulling and pushing batches). hashRouterDrainStateRunning = iota // hashRouterDrainStateRequested is the state that a hashRouter is in when // either all outputs have called drainMeta or an error was encountered by one // of the outputs. hashRouterDrainStateRequested // hashRouterDrainStateCompleted is the state that a hashRouter is in when // draining has completed. hashRouterDrainStateCompleted ) // HashRouter hashes values according to provided hash columns and computes a // destination for each row. These destinations are exposed as Operators // returned by the constructor. type HashRouter struct { colexecop.OneInputNode // inputMetaInfo contains all of the meta components that the hash router // is responsible for. Root field is exactly the same as OneInputNode.Input. inputMetaInfo colexecargs.OpWithMetaInfo // hashCols is a slice of indices of the columns used for hashing. hashCols []uint32 // One output for each stream. outputs []routerOutput // unblockedEventsChan is a channel shared between the HashRouter and its // outputs. outputs send events on this channel when they are unblocked by a // read. unblockedEventsChan <-chan struct{} numBlockedOutputs int bufferedMeta []execinfrapb.ProducerMetadata // atomics is shared state between the Run goroutine and any routerOutput // goroutines that call drainMeta. atomics struct { // drainState is the state the hashRouter is in. The Run goroutine should // only ever read these states, never set them. drainState int32 numDrainedOutputs int32 } // waitForMetadata is a channel that the last output to drain will read from // to pass on any metadata buffered through the Run goroutine. waitForMetadata chan []execinfrapb.ProducerMetadata // tupleDistributor is used to decide to which output a particular tuple // should be routed. tupleDistributor *colexechash.TupleHashDistributor } // NewHashRouter creates a new hash router that consumes coldata.Batches from // input and hashes each row according to hashCols to one of the outputs // returned as Operators. // The number of allocators provided will determine the number of outputs // returned. Note that each allocator must be unlimited, memory will be limited // by comparing memory use in the allocator with the memoryLimit argument. Each // Operator must have an independent allocator (this means that each allocator // should be linked to an independent mem account) as Operator.Next will usually // be called concurrently between different outputs. Similarly, each output // needs to have a separate disk account. func NewHashRouter( unlimitedAllocators []*colmem.Allocator, input colexecargs.OpWithMetaInfo, types []*types.T, hashCols []uint32, memoryLimit int64, diskQueueCfg colcontainer.DiskQueueCfg, fdSemaphore semaphore.Semaphore, diskAccounts []*mon.BoundAccount, ) (*HashRouter, []colexecop.DrainableOperator) { if diskQueueCfg.CacheMode != colcontainer.DiskQueueCacheModeDefault { colexecerror.InternalError(errors.Errorf("hash router instantiated with incompatible disk queue cache mode: %d", diskQueueCfg.CacheMode)) } outputs := make([]routerOutput, len(unlimitedAllocators)) outputsAsOps := make([]colexecop.DrainableOperator, len(unlimitedAllocators)) // unblockEventsChan is buffered to 2*numOutputs as we don't want the outputs // writing to it to block. // Unblock events only happen after a corresponding block event. Since these // are state changes and are done under lock (including the output sending // on the channel, which is why we want the channel to be buffered in the // first place), every time the HashRouter blocks an output, it *must* read // all unblock events preceding it since these *must* be on the channel. unblockEventsChan := make(chan struct{}, 2*len(unlimitedAllocators)) memoryLimitPerOutput := memoryLimit / int64(len(unlimitedAllocators)) for i := range unlimitedAllocators { op := newRouterOutputOp( routerOutputOpArgs{ types: types, unlimitedAllocator: unlimitedAllocators[i], memoryLimit: memoryLimitPerOutput, diskAcc: diskAccounts[i], cfg: diskQueueCfg, fdSemaphore: fdSemaphore, unblockedEventsChan: unblockEventsChan, }, ) outputs[i] = op outputsAsOps[i] = op } return newHashRouterWithOutputs(input, hashCols, unblockEventsChan, outputs), outputsAsOps } func newHashRouterWithOutputs( input colexecargs.OpWithMetaInfo, hashCols []uint32, unblockEventsChan <-chan struct{}, outputs []routerOutput, ) *HashRouter { r := &HashRouter{ OneInputNode: colexecop.NewOneInputNode(input.Root), inputMetaInfo: input, hashCols: hashCols, outputs: outputs, unblockedEventsChan: unblockEventsChan, // waitForMetadata is a buffered channel to avoid blocking if nobody will // read the metadata. waitForMetadata: make(chan []execinfrapb.ProducerMetadata, 1), tupleDistributor: colexechash.NewTupleHashDistributor(colexechash.DefaultInitHashValue, len(outputs)), } for i := range outputs { outputs[i].initWithHashRouter(r) } return r } // cancelOutputs cancels all outputs and forwards the given error to all of // them if non-nil. The only case where the error is not forwarded is if no // output could be canceled due to an error. In this case each output will // forward the error returned during cancellation. func (r *HashRouter) cancelOutputs(ctx context.Context, errToForward error) { for _, o := range r.outputs { if err := colexecerror.CatchVectorizedRuntimeError(func() { o.cancel(ctx, errToForward) }); err != nil { // If there was an error canceling this output, this error can be // forwarded to whoever is calling Next. o.forwardErr(err) } } } func (r *HashRouter) setDrainState(drainState hashRouterDrainState) { atomic.StoreInt32(&r.atomics.drainState, int32(drainState)) } func (r *HashRouter) getDrainState() hashRouterDrainState { return hashRouterDrainState(atomic.LoadInt32(&r.atomics.drainState)) } // Run runs the HashRouter. Batches are read from the input and pushed to an // output calculated by hashing columns. Cancel the given context to terminate // early. func (r *HashRouter) Run(ctx context.Context) { var span *tracing.Span ctx, span = execinfra.ProcessorSpan(ctx, "hash router") if span != nil { defer span.Finish() } // Since HashRouter runs in a separate goroutine, we want to be safe and // make sure that we catch errors in all code paths, so we wrap the whole // method with a catcher. Note that we also have "internal" catchers as // well for more fine-grained control of error propagation. if err := colexecerror.CatchVectorizedRuntimeError(func() { r.Input.Init(ctx) var done bool processNextBatch := func() { done = r.processNextBatch(ctx) } for { if r.getDrainState() != hashRouterDrainStateRunning { break } // Check for cancellation. select { case <-ctx.Done(): r.cancelOutputs(ctx, ctx.Err()) return default: } // Read all the routerOutput state changes that have happened since the // last iteration. for moreToRead := true; moreToRead; { select { case <-r.unblockedEventsChan: r.numBlockedOutputs-- default: // No more routerOutput state changes to read without blocking. moreToRead = false } } if r.numBlockedOutputs == len(r.outputs) { // All outputs are blocked, wait until at least one output is unblocked. select { case <-r.unblockedEventsChan: r.numBlockedOutputs-- case <-ctx.Done(): r.cancelOutputs(ctx, ctx.Err()) return } } if err := colexecerror.CatchVectorizedRuntimeError(processNextBatch); err != nil { r.cancelOutputs(ctx, err) return } if done { // The input was done and we have notified the routerOutputs that there // is no more data. return } } }); err != nil { r.cancelOutputs(ctx, err) } if span != nil { for _, s := range r.inputMetaInfo.StatsCollectors { span.RecordStructured(s.GetStats()) } if meta := execinfra.GetTraceDataAsMetadata(span); meta != nil { r.bufferedMeta = append(r.bufferedMeta, *meta) } } r.bufferedMeta = append(r.bufferedMeta, r.inputMetaInfo.MetadataSources.DrainMeta()...) // Non-blocking send of metadata so that one of the outputs can return it // in DrainMeta. r.waitForMetadata <- r.bufferedMeta close(r.waitForMetadata) r.inputMetaInfo.ToClose.CloseAndLogOnErr(ctx, "hash router") } // processNextBatch reads the next batch from its input, hashes it and adds // each column to its corresponding output, returning whether the input is // done. func (r *HashRouter) processNextBatch(ctx context.Context) bool { b := r.Input.Next() n := b.Length() if n == 0 { // Done. Push an empty batch to outputs to tell them the data is done as // well. for _, o := range r.outputs { o.addBatch(ctx, b) } return true } // It is ok that we call Init() on every batch since all calls except for // the first one are noops. r.tupleDistributor.Init(ctx) selections := r.tupleDistributor.Distribute(b, r.hashCols) for i, o := range r.outputs { if len(selections[i]) > 0 { b.SetSelection(true) copy(b.Selection(), selections[i]) b.SetLength(len(selections[i])) if o.addBatch(ctx, b) { // This batch blocked the output. r.numBlockedOutputs++ } } } return false } // resetForTests resets the HashRouter for a test or benchmark run. func (r *HashRouter) resetForTests(ctx context.Context) { if i, ok := r.Input.(colexecop.Resetter); ok { i.Reset(ctx) } r.setDrainState(hashRouterDrainStateRunning) r.waitForMetadata = make(chan []execinfrapb.ProducerMetadata, 1) r.atomics.numDrainedOutputs = 0 r.bufferedMeta = nil r.numBlockedOutputs = 0 for moreToRead := true; moreToRead; { select { case <-r.unblockedEventsChan: default: moreToRead = false } } for _, o := range r.outputs { o.resetForTests(ctx) } } func (r *HashRouter) encounteredError(ctx context.Context) { // Once one output returns an error the hash router needs to stop running // and drain its input. r.setDrainState(hashRouterDrainStateRequested) // cancel all outputs. The Run goroutine will eventually realize that the // HashRouter is done and exit without draining. r.cancelOutputs(ctx, nil /* errToForward */) } func (r *HashRouter) drainMeta() []execinfrapb.ProducerMetadata { if int(atomic.AddInt32(&r.atomics.numDrainedOutputs, 1)) != len(r.outputs) { return nil } // All outputs have been drained, return any buffered metadata to the last // output to call drainMeta. r.setDrainState(hashRouterDrainStateRequested) meta := <-r.waitForMetadata r.setDrainState(hashRouterDrainStateCompleted) return meta }
pkg/sql/colflow/routers.go
1
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.9989680051803589, 0.0574248731136322, 0.00016155319462995976, 0.00030436189263127744, 0.22989162802696228 ]
{ "id": 10, "code_window": [ "\t// make sure that we catch errors in all code paths, so we wrap the whole\n", "\t// method with a catcher. Note that we also have \"internal\" catchers as\n", "\t// well for more fine-grained control of error propagation.\n", "\tif err := colexecerror.CatchVectorizedRuntimeError(func() {\n", "\t\tr.Input.Init(ctx)\n", "\t\tvar done bool\n", "\t\tprocessNextBatch := func() {\n", "\t\t\tdone = r.processNextBatch(ctx)\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\tinputInitialized = true\n" ], "file_path": "pkg/sql/colflow/routers.go", "type": "add", "edit_start_line_idx": 558 }
// Copyright 2020 The Cockroach Authors. // // Licensed as a CockroachDB Enterprise file under the Cockroach Community // License (the "License"); you may not use this file except in compliance with // the License. You may obtain a copy of the License at // // https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt package backupccl import ( "context" gosql "database/sql" "fmt" "io/ioutil" "net/url" "path" "regexp" "sort" "strconv" "testing" "time" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/jobs/jobstest" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" pbtypes "github.com/gogo/protobuf/types" "github.com/gorhill/cronexpr" "github.com/stretchr/testify/require" ) const allSchedules = 0 // testHelper starts a server, and arranges for job scheduling daemon to // use jobstest.JobSchedulerTestEnv. // This helper also arranges for the manual override of scheduling logic // via executeSchedules callback. type execSchedulesFn = func(ctx context.Context, maxSchedules int64, txn *kv.Txn) error type testHelper struct { iodir string server serverutils.TestServerInterface env *jobstest.JobSchedulerTestEnv cfg *scheduledjobs.JobExecutionConfig sqlDB *sqlutils.SQLRunner executeSchedules func() error } // newTestHelper creates and initializes appropriate state for a test, // returning testHelper as well as a cleanup function. func newTestHelper(t *testing.T) (*testHelper, func()) { dir, dirCleanupFn := testutils.TempDir(t) th := &testHelper{ env: jobstest.NewJobSchedulerTestEnv(jobstest.UseSystemTables, timeutil.Now()), iodir: dir, } knobs := &jobs.TestingKnobs{ JobSchedulerEnv: th.env, TakeOverJobsScheduling: func(fn execSchedulesFn) { th.executeSchedules = func() error { defer th.server.JobRegistry().(*jobs.Registry).TestingNudgeAdoptionQueue() return th.cfg.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { return fn(ctx, allSchedules, txn) }) } }, CaptureJobExecutionConfig: func(config *scheduledjobs.JobExecutionConfig) { th.cfg = config }, } args := base.TestServerArgs{ ExternalIODir: dir, Knobs: base.TestingKnobs{ JobsTestingKnobs: knobs, }, } s, db, _ := serverutils.StartServer(t, args) require.NotNil(t, th.cfg) th.sqlDB = sqlutils.MakeSQLRunner(db) th.server = s return th, func() { dirCleanupFn() s.Stopper().Stop(context.Background()) } } func (h *testHelper) clearSchedules(t *testing.T) { t.Helper() h.sqlDB.Exec(t, "DELETE FROM system.scheduled_jobs WHERE true") } func (h *testHelper) waitForSuccessfulScheduledJob(t *testing.T, scheduleID int64) { query := "SELECT id FROM " + h.env.SystemJobsTableName() + " WHERE status=$1 AND created_by_type=$2 AND created_by_id=$3" testutils.SucceedsSoon(t, func() error { // Force newly created job to be adopted and verify it succeeds. h.server.JobRegistry().(*jobs.Registry).TestingNudgeAdoptionQueue() var unused int64 return h.sqlDB.DB.QueryRowContext(context.Background(), query, jobs.StatusSucceeded, jobs.CreatedByScheduledJobs, scheduleID).Scan(&unused) }) } // createBackupSchedule executes specified "CREATE SCHEDULE FOR BACKUP" query, with // the provided arguments. Returns the list of created schedules func (h *testHelper) createBackupSchedule( t *testing.T, query string, args ...interface{}, ) ([]*jobs.ScheduledJob, error) { // Execute statement and get the list of schedule IDs created by the query. ctx := context.Background() rows, err := h.sqlDB.DB.QueryContext(ctx, query, args...) if err != nil { return nil, err } var unusedStr string var unusedTS *time.Time var schedules []*jobs.ScheduledJob for rows.Next() { var id int64 require.NoError(t, rows.Scan(&id, &unusedStr, &unusedStr, &unusedTS, &unusedStr, &unusedStr)) // Query system.scheduled_job table and load those schedules. datums, cols, err := h.cfg.InternalExecutor.QueryRowExWithCols( context.Background(), "sched-load", nil, sessiondata.InternalExecutorOverride{User: security.RootUserName()}, "SELECT * FROM system.scheduled_jobs WHERE schedule_id = $1", id, ) require.NoError(t, err) require.NotNil(t, datums) s := jobs.NewScheduledJob(h.env) require.NoError(t, s.InitFromDatums(datums, cols)) schedules = append(schedules, s) } if err := rows.Err(); err != nil { return nil, err } return schedules, nil } func getScheduledBackupStatement(t *testing.T, arg *jobspb.ExecutionArguments) string { var backup ScheduledBackupExecutionArgs require.NoError(t, pbtypes.UnmarshalAny(arg.Args, &backup)) return backup.BackupStatement } type userType bool const freeUser userType = false const enterpriseUser userType = true func (t userType) String() string { if t == freeUser { return "free user" } return "enterprise user" } // This test examines serialized representation of backup schedule arguments // when the scheduled backup statement executes. This test does not concern // itself with the actual scheduling and the execution of those backups. func TestSerializesScheduledBackupExecutionArgs(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) th, cleanup := newTestHelper(t) defer cleanup() type expectedSchedule struct { nameRe string backupStmt string period time.Duration runsNow bool shownStmt string paused bool } testCases := []struct { name string query string queryArgs []interface{} user userType expectedSchedules []expectedSchedule errMsg string }{ { name: "full-cluster", query: "CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://0/backup?AWS_SECRET_ACCESS_KEY=neverappears' RECURRING '@hourly'", user: freeUser, expectedSchedules: []expectedSchedule{ { nameRe: "BACKUP .+", backupStmt: "BACKUP INTO 'nodelocal://0/backup?AWS_SECRET_ACCESS_KEY=neverappears' WITH detached", shownStmt: "BACKUP INTO 'nodelocal://0/backup?AWS_SECRET_ACCESS_KEY=redacted' WITH detached", period: time.Hour, }, }, }, { name: "full-cluster-with-name", query: "CREATE SCHEDULE 'my-backup' FOR BACKUP INTO 'nodelocal://0/backup' RECURRING '@hourly'", user: freeUser, expectedSchedules: []expectedSchedule{ { nameRe: "my-backup", backupStmt: "BACKUP INTO 'nodelocal://0/backup' WITH detached", period: time.Hour, }, }, }, { name: "full-cluster-always", query: "CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://0/backup' RECURRING '@hourly' FULL BACKUP ALWAYS", user: freeUser, expectedSchedules: []expectedSchedule{ { nameRe: "BACKUP .+", backupStmt: "BACKUP INTO 'nodelocal://0/backup' WITH detached", period: time.Hour, }, }, }, { name: "full-cluster", query: "CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://0/backup' RECURRING '@hourly'", user: enterpriseUser, expectedSchedules: []expectedSchedule{ { nameRe: "BACKUP .*", backupStmt: "BACKUP INTO LATEST IN 'nodelocal://0/backup' WITH detached", period: time.Hour, paused: true, }, { nameRe: "BACKUP .+", backupStmt: "BACKUP INTO 'nodelocal://0/backup' WITH detached", period: 24 * time.Hour, runsNow: true, }, }, }, { name: "full-cluster-with-name", query: "CREATE SCHEDULE 'my-backup' FOR BACKUP INTO 'nodelocal://0/backup' RECURRING '@hourly'", user: enterpriseUser, expectedSchedules: []expectedSchedule{ { nameRe: "my-backup", backupStmt: "BACKUP INTO LATEST IN 'nodelocal://0/backup' WITH detached", period: time.Hour, paused: true, }, { nameRe: "my-backup", backupStmt: "BACKUP INTO 'nodelocal://0/backup' WITH detached", period: 24 * time.Hour, runsNow: true, }, }, }, { name: "full-cluster-always", query: "CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://0/backup' RECURRING '@hourly' FULL BACKUP ALWAYS", user: enterpriseUser, expectedSchedules: []expectedSchedule{ { nameRe: "BACKUP .+", backupStmt: "BACKUP INTO 'nodelocal://0/backup' WITH detached", period: time.Hour, }, }, }, { name: "enterprise-license-required-for-incremental", query: "CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://0/backup' RECURRING '@hourly' FULL BACKUP '@weekly'", user: freeUser, errMsg: "use of BACKUP INTO LATEST requires an enterprise license", }, { name: "enterprise-license-required-for-revision-history", query: "CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://0/backup' WITH revision_history RECURRING '@hourly'", user: freeUser, errMsg: "use of BACKUP with revision_history requires an enterprise license", }, { name: "enterprise-license-required-for-encryption", query: "CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://0/backup' WITH encryption_passphrase = 'secret' RECURRING '@hourly'", user: freeUser, errMsg: "use of BACKUP with encryption requires an enterprise license", }, { name: "full-cluster-with-name-arg", query: `CREATE SCHEDULE $1 FOR BACKUP INTO 'nodelocal://0/backup' WITH revision_history, detached RECURRING '@hourly'`, queryArgs: []interface{}{"my_backup_name"}, user: enterpriseUser, expectedSchedules: []expectedSchedule{ { nameRe: "my_backup_name", backupStmt: "BACKUP INTO LATEST IN 'nodelocal://0/backup' WITH revision_history, detached", period: time.Hour, paused: true, }, { nameRe: "my_backup_name", backupStmt: "BACKUP INTO 'nodelocal://0/backup' WITH revision_history, detached", period: 24 * time.Hour, runsNow: true, }, }, }, { name: "multiple-tables-with-encryption", user: enterpriseUser, query: ` CREATE SCHEDULE FOR BACKUP TABLE system.jobs, system.scheduled_jobs INTO 'nodelocal://0/backup' WITH encryption_passphrase = 'secret' RECURRING '@weekly'`, expectedSchedules: []expectedSchedule{ { nameRe: "BACKUP .*", backupStmt: "BACKUP TABLE system.jobs, system.scheduled_jobs INTO 'nodelocal://0/backup' WITH encryption_passphrase = 'secret', detached", shownStmt: "BACKUP TABLE system.jobs, system.scheduled_jobs INTO 'nodelocal://0/backup' WITH encryption_passphrase = '*****', detached", period: 7 * 24 * time.Hour, }, }, }, { name: "partitioned-backup", user: enterpriseUser, query: ` CREATE SCHEDULE FOR BACKUP DATABASE system INTO ('nodelocal://0/backup?COCKROACH_LOCALITY=x%3Dy', 'nodelocal://0/backup2?COCKROACH_LOCALITY=default') WITH revision_history RECURRING '1 2 * * *' FULL BACKUP ALWAYS WITH SCHEDULE OPTIONS first_run=$1 `, queryArgs: []interface{}{th.env.Now().Add(time.Minute)}, expectedSchedules: []expectedSchedule{ { nameRe: "BACKUP .+", backupStmt: "BACKUP DATABASE system INTO " + "('nodelocal://0/backup?COCKROACH_LOCALITY=x%3Dy', 'nodelocal://0/backup2?COCKROACH_LOCALITY=default') " + "WITH revision_history, detached", period: 24 * time.Hour, }, }, }, { name: "missing-destination-placeholder", query: `CREATE SCHEDULE FOR BACKUP TABLE t INTO $1 RECURRING '@hourly'`, errMsg: "failed to evaluate backup destination paths", }, { name: "missing-encryption-placeholder", user: enterpriseUser, query: `CREATE SCHEDULE FOR BACKUP INTO 'foo' WITH encryption_passphrase=$1 RECURRING '@hourly'`, errMsg: "failed to evaluate backup encryption_passphrase", }, } for _, tc := range testCases { t.Run(fmt.Sprintf("%s-%s", tc.name, tc.user), func(t *testing.T) { defer th.clearSchedules(t) if tc.user == freeUser { defer utilccl.TestingDisableEnterprise()() } else { defer utilccl.TestingEnableEnterprise()() } schedules, err := th.createBackupSchedule(t, tc.query, tc.queryArgs...) if len(tc.errMsg) > 0 { require.True(t, testutils.IsError(err, tc.errMsg), "expected error to match %q, found %q instead", tc.errMsg, err.Error()) return } require.NoError(t, err) require.Equal(t, len(tc.expectedSchedules), len(schedules)) shown := th.sqlDB.QueryStr(t, `SELECT id, command->'backup_statement' FROM [SHOW SCHEDULES]`) require.Equal(t, len(tc.expectedSchedules), len(shown)) shownByID := map[int64]string{} for _, i := range shown { id, err := strconv.ParseInt(i[0], 10, 64) require.NoError(t, err) shownByID[id] = i[1] } // Build a map of expected backup statement to expected schedule. expectedByName := make(map[string]expectedSchedule) for _, s := range tc.expectedSchedules { expectedByName[s.backupStmt] = s } for _, s := range schedules { stmt := getScheduledBackupStatement(t, s.ExecutionArgs()) expectedSchedule, ok := expectedByName[stmt] require.True(t, ok, "could not find matching name for %q", stmt) require.Regexp(t, regexp.MustCompile(expectedSchedule.nameRe), s.ScheduleLabel()) expectedShown := fmt.Sprintf("%q", expectedSchedule.backupStmt) if expectedSchedule.shownStmt != "" { expectedShown = fmt.Sprintf("%q", expectedSchedule.shownStmt) } require.Equal(t, expectedShown, shownByID[s.ScheduleID()]) frequency, err := s.Frequency() require.NoError(t, err) require.EqualValues(t, expectedSchedule.period, frequency, expectedSchedule) require.Equal(t, expectedSchedule.paused, s.IsPaused()) if expectedSchedule.runsNow { require.EqualValues(t, th.env.Now().Round(time.Microsecond), s.ScheduledRunTime()) } } }) } } func TestScheduleBackup(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) th, cleanup := newTestHelper(t) defer cleanup() th.sqlDB.Exec(t, ` CREATE DATABASE db; USE db; CREATE TABLE t1(a int); INSERT INTO t1 values (1), (10), (100); CREATE TABLE t2(b int); INSERT INTO t2 VALUES (3), (2), (1); CREATE TABLE t3(c int); INSERT INTO t3 VALUES (5), (5), (7); CREATE DATABASE other_db; USE other_db; CREATE TABLE t1(a int); INSERT INTO t1 values (-1), (10), (-100); `) // We'll be manipulating schedule time via th.env, but we can't fool actual backup // when it comes to AsOf time. So, override AsOf backup clause to be the current time. th.cfg.TestingKnobs.(*jobs.TestingKnobs).OverrideAsOfClause = func(clause *tree.AsOfClause) { expr, err := tree.MakeDTimestampTZ(th.cfg.DB.Clock().PhysicalTime(), time.Microsecond) require.NoError(t, err) clause.Expr = expr } type dbTables struct { db string tables []string } expectBackupTables := func(dbTbls ...dbTables) [][]string { sort.Slice(dbTbls, func(i, j int) bool { return dbTbls[i].db < dbTbls[j].db }) var res [][]string for _, dbt := range dbTbls { sort.Strings(dbt.tables) for _, tbl := range dbt.tables { res = append(res, []string{dbt.db, tbl}) } } return res } expectedSystemTables := make([]string, 0) for systemTableName := range GetSystemTablesToIncludeInClusterBackup() { expectedSystemTables = append(expectedSystemTables, systemTableName) } testCases := []struct { name string schedule string verifyTables [][]string }{ { name: "cluster-backup", schedule: "CREATE SCHEDULE FOR BACKUP INTO $1 RECURRING '@hourly'", verifyTables: expectBackupTables( dbTables{"db", []string{"t1", "t2", "t3"}}, dbTables{"other_db", []string{"t1"}}, dbTables{"system", expectedSystemTables}, ), }, { name: "tables-backup-with-history", schedule: "CREATE SCHEDULE FOR BACKUP db.t2, db.t3 INTO $1 WITH revision_history RECURRING '@hourly' FULL BACKUP ALWAYS", verifyTables: expectBackupTables(dbTables{"db", []string{"t2", "t3"}}), }, { name: "table-backup-in-different-dbs", schedule: "CREATE SCHEDULE FOR BACKUP db.t1, other_db.t1, db.t3 INTO $1 RECURRING '@hourly' FULL BACKUP ALWAYS", verifyTables: expectBackupTables( dbTables{"db", []string{"t1", "t3"}}, dbTables{"other_db", []string{"t1"}}, ), }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { destination := "nodelocal://0/backup/" + tc.name schedules, err := th.createBackupSchedule(t, tc.schedule, destination) require.NoError(t, err) require.LessOrEqual(t, 1, len(schedules)) // Either 1 or two schedules will be created. // One of them (incremental) must be paused. var full, inc *jobs.ScheduledJob if len(schedules) == 1 { full = schedules[0] } else { require.Equal(t, 2, len(schedules)) full, inc = schedules[0], schedules[1] if full.IsPaused() { full, inc = inc, full // Swap: inc should be paused. } require.True(t, inc.IsPaused()) require.False(t, full.IsPaused()) // The full should list incremental as a schedule to unpause. args := &ScheduledBackupExecutionArgs{} require.NoError(t, pbtypes.UnmarshalAny(full.ExecutionArgs().Args, args)) require.EqualValues(t, inc.ScheduleID(), args.UnpauseOnSuccess) } defer func() { th.sqlDB.Exec(t, "DROP SCHEDULE $1", full.ScheduleID()) if inc != nil { th.sqlDB.Exec(t, "DROP SCHEDULE $1", inc.ScheduleID()) } }() // Force the schedule to execute. th.env.SetTime(full.NextRun().Add(time.Second)) require.NoError(t, th.executeSchedules()) // Wait for the backup complete. th.waitForSuccessfulScheduledJob(t, full.ScheduleID()) if inc != nil { // Once the full backup completes, the incremental one should no longer be paused. loadedInc, err := jobs.LoadScheduledJob( context.Background(), th.env, inc.ScheduleID(), th.cfg.InternalExecutor, nil) require.NoError(t, err) require.False(t, loadedInc.IsPaused()) } // Verify backup. latest, err := ioutil.ReadFile(path.Join(th.iodir, "backup", tc.name, latestFileName)) require.NoError(t, err) backedUp := th.sqlDB.QueryStr(t, `SELECT database_name, object_name FROM [SHOW BACKUP $1] WHERE object_type='table' ORDER BY database_name, object_name`, fmt.Sprintf("%s/%s", destination, string(latest))) require.Equal(t, tc.verifyTables, backedUp) }) } } func TestCreateBackupScheduleRequiresAdminRole(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) th, cleanup := newTestHelper(t) defer cleanup() th.sqlDB.Exec(t, `CREATE USER testuser`) pgURL, cleanupFunc := sqlutils.PGUrl( t, th.server.ServingSQLAddr(), "TestCreateSchedule-testuser", url.User("testuser"), ) defer cleanupFunc() testuser, err := gosql.Open("postgres", pgURL.String()) require.NoError(t, err) defer func() { require.NoError(t, testuser.Close()) }() _, err = testuser.Exec("CREATE SCHEDULE FOR BACKUP INTO 'somewhere' RECURRING '@daily'") require.Error(t, err) } func TestCreateBackupScheduleCollectionOverwrite(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) th, cleanup := newTestHelper(t) defer cleanup() const collectionLocation = "nodelocal://1/collection" th.sqlDB.Exec(t, `BACKUP INTO $1`, collectionLocation) // Expect that trying to normally create a scheduled backup to this location // fails. th.sqlDB.ExpectErr(t, "backups already created in", "CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://1/collection' RECURRING '@daily';") // Expect that we can override this option with the ignore_existing_backups // flag. th.sqlDB.Exec(t, "CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://1/collection' "+ "RECURRING '@daily' WITH SCHEDULE OPTIONS ignore_existing_backups;") } func TestCreateBackupScheduleInExplicitTxnRollback(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) th, cleanup := newTestHelper(t) defer cleanup() res := th.sqlDB.Query(t, "SELECT id FROM [SHOW SCHEDULES];") require.False(t, res.Next()) require.NoError(t, res.Err()) th.sqlDB.Exec(t, "BEGIN;") th.sqlDB.Exec(t, "CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://1/collection' RECURRING '@daily';") th.sqlDB.Exec(t, "ROLLBACK;") res = th.sqlDB.Query(t, "SELECT id FROM [SHOW SCHEDULES];") require.False(t, res.Next()) require.NoError(t, res.Err()) } // Normally, we issue backups with AOST set to be the scheduled nextRun. // But if the schedule time is way in the past, the backup will fail. // This test verifies that scheduled backups will start working // (eventually), even after the cluster has been down for a long period. func TestScheduleBackupRecoversFromClusterDown(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) th, cleanup := newTestHelper(t) defer cleanup() th.sqlDB.Exec(t, ` CREATE DATABASE db; USE db; CREATE TABLE t(a int); INSERT INTO t values (1), (10), (100); `) loadSchedule := func(t *testing.T, id int64) *jobs.ScheduledJob { loaded, err := jobs.LoadScheduledJob( context.Background(), th.env, id, th.cfg.InternalExecutor, nil) require.NoError(t, err) return loaded } advanceNextRun := func(t *testing.T, id int64, delta time.Duration) { // Adjust next run by the specified delta (which maybe negative). s := loadSchedule(t, id) s.SetNextRun(th.env.Now().Add(delta)) require.NoError(t, s.Update(context.Background(), th.cfg.InternalExecutor, nil)) } // We'll be manipulating schedule time via th.env, but we can't fool actual backup // when it comes to AsOf time. So, override AsOf backup clause to be the current time. useRealTimeAOST := func() func() { knobs := th.cfg.TestingKnobs.(*jobs.TestingKnobs) knobs.OverrideAsOfClause = func(clause *tree.AsOfClause) { expr, err := tree.MakeDTimestampTZ(th.cfg.DB.Clock().PhysicalTime(), time.Microsecond) require.NoError(t, err) clause.Expr = expr } return func() { knobs.OverrideAsOfClause = nil } } // Create backup schedules for this test. // Returns schedule IDs for full and incremental schedules, plus a cleanup function. createSchedules := func(t *testing.T, name string) (int64, int64, func()) { schedules, err := th.createBackupSchedule(t, "CREATE SCHEDULE FOR BACKUP INTO $1 RECURRING '*/5 * * * *'", "nodelocal://0/backup/"+name) require.NoError(t, err) // We expect full & incremental schedule to be created. require.Equal(t, 2, len(schedules)) // Order schedules so that the full schedule is the first one fullID, incID := schedules[0].ScheduleID(), schedules[1].ScheduleID() if schedules[0].IsPaused() { fullID, incID = incID, fullID } // For the initial backup, we need to ensure that AOST is the current time. defer useRealTimeAOST()() // Force full backup to execute (this unpauses incremental). advanceNextRun(t, fullID, -1*time.Minute) require.NoError(t, th.executeSchedules()) th.waitForSuccessfulScheduledJob(t, fullID) // Do the same for the incremental. advanceNextRun(t, incID, -1*time.Minute) require.NoError(t, th.executeSchedules()) th.waitForSuccessfulScheduledJob(t, incID) return fullID, incID, func() { th.sqlDB.Exec(t, "DROP SCHEDULE $1", schedules[0].ScheduleID()) th.sqlDB.Exec(t, "DROP SCHEDULE $1", schedules[1].ScheduleID()) } } markOldAndSetSchedulesPolicy := func( t *testing.T, fullID, incID int64, onError jobspb.ScheduleDetails_ErrorHandlingBehavior, ) { for _, id := range []int64{fullID, incID} { // Pretend we were down for a year. s := loadSchedule(t, id) s.SetNextRun(s.NextRun().Add(-365 * 24 * time.Hour)) // Set onError policy to the specified value. s.SetScheduleDetails(jobspb.ScheduleDetails{ OnError: onError, }) require.NoError(t, s.Update(context.Background(), th.cfg.InternalExecutor, nil)) } } t.Run("pause", func(t *testing.T) { fullID, incID, cleanup := createSchedules(t, "pause") defer cleanup() markOldAndSetSchedulesPolicy(t, fullID, incID, jobspb.ScheduleDetails_PAUSE_SCHED) require.NoError(t, th.executeSchedules()) // AOST way in the past causes backup planning to fail. We don't need // to wait for any jobs, and the schedules should now be paused. for _, id := range []int64{fullID, incID} { require.True(t, loadSchedule(t, id).IsPaused()) } }) metrics := func() *jobs.ExecutorMetrics { ex, _, err := jobs.GetScheduledJobExecutor(tree.ScheduledBackupExecutor.InternalName()) require.NoError(t, err) require.NotNil(t, ex.Metrics()) return ex.Metrics().(*backupMetrics).ExecutorMetrics }() t.Run("retry", func(t *testing.T) { fullID, incID, cleanup := createSchedules(t, "retry") defer cleanup() markOldAndSetSchedulesPolicy(t, fullID, incID, jobspb.ScheduleDetails_RETRY_SOON) require.NoError(t, th.executeSchedules()) // AOST way in the past causes backup planning to fail. We don't need // to wait for any jobs, and the schedule nextRun should be advanced // a bit in the future. for _, id := range []int64{fullID, incID} { require.True(t, loadSchedule(t, id).NextRun().Sub(th.env.Now()) > 0) } // We expect that, eventually, both backups would succeed. defer useRealTimeAOST()() th.env.AdvanceTime(time.Hour) initialSucceeded := metrics.NumSucceeded.Count() require.NoError(t, th.executeSchedules()) testutils.SucceedsSoon(t, func() error { delta := metrics.NumSucceeded.Count() - initialSucceeded if delta == 2 { return nil } return errors.Newf("expected 2 backup to succeed, got %d", delta) }) }) t.Run("reschedule", func(t *testing.T) { fullID, incID, cleanup := createSchedules(t, "reschedule") defer cleanup() markOldAndSetSchedulesPolicy(t, fullID, incID, jobspb.ScheduleDetails_RETRY_SCHED) require.NoError(t, th.executeSchedules()) // AOST way in the past causes backup planning to fail. We don't need // to wait for any jobs, and the schedule nextRun should be advanced // to the next scheduled recurrence. for _, id := range []int64{fullID, incID} { s := loadSchedule(t, id) require.EqualValues(t, cronexpr.MustParse(s.ScheduleExpr()).Next(th.env.Now()).Round(time.Microsecond), s.NextRun()) } // We expect that, eventually, both backups would succeed. defer useRealTimeAOST()() th.env.AdvanceTime(25 * time.Hour) // Go to next day to guarantee daily triggers. initialSucceeded := metrics.NumSucceeded.Count() require.NoError(t, th.executeSchedules()) testutils.SucceedsSoon(t, func() error { delta := metrics.NumSucceeded.Count() - initialSucceeded if delta == 2 { return nil } return errors.Newf("expected 2 backup to succeed, got %d", delta) }) }) }
pkg/ccl/backupccl/create_scheduled_backup_test.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.001378592336550355, 0.00022029814135748893, 0.0001568539737490937, 0.00017155820387415588, 0.00019897386664524674 ]
{ "id": 10, "code_window": [ "\t// make sure that we catch errors in all code paths, so we wrap the whole\n", "\t// method with a catcher. Note that we also have \"internal\" catchers as\n", "\t// well for more fine-grained control of error propagation.\n", "\tif err := colexecerror.CatchVectorizedRuntimeError(func() {\n", "\t\tr.Input.Init(ctx)\n", "\t\tvar done bool\n", "\t\tprocessNextBatch := func() {\n", "\t\t\tdone = r.processNextBatch(ctx)\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\tinputInitialized = true\n" ], "file_path": "pkg/sql/colflow/routers.go", "type": "add", "edit_start_line_idx": 558 }
version: '3' services: kdc: build: ./kdc volumes: - ./kdc/start.sh:/start.sh - keytab:/keytab cockroach: image: ubuntu:xenial-20170214 depends_on: - kdc command: /cockroach/cockroach --certs-dir=/certs start-single-node --listen-addr cockroach environment: - KRB5_KTNAME=/keytab/crdb.keytab volumes: - ../../.localcluster.certs:/certs - keytab:/keytab - ../../../../cockroach-linux-2.6.32-gnu-amd64:/cockroach/cockroach python: build: ./python depends_on: - cockroach command: /start.sh environment: - PGHOST=cockroach - PGPORT=26257 - PGSSLMODE=require - PGSSLCERT=/certs/node.crt - PGSSLKEY=/certs/node.key volumes: - ./kdc/krb5.conf:/etc/krb5.conf - ./python/start.sh:/start.sh - ../../.localcluster.certs:/certs volumes: keytab:
pkg/acceptance/compose/gss/docker-compose-python.yml
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.00017751901759766042, 0.00017626765475142747, 0.00017519779794383794, 0.0001761769235599786, 9.17830277558096e-7 ]
{ "id": 10, "code_window": [ "\t// make sure that we catch errors in all code paths, so we wrap the whole\n", "\t// method with a catcher. Note that we also have \"internal\" catchers as\n", "\t// well for more fine-grained control of error propagation.\n", "\tif err := colexecerror.CatchVectorizedRuntimeError(func() {\n", "\t\tr.Input.Init(ctx)\n", "\t\tvar done bool\n", "\t\tprocessNextBatch := func() {\n", "\t\t\tdone = r.processNextBatch(ctx)\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\tinputInitialized = true\n" ], "file_path": "pkg/sql/colflow/routers.go", "type": "add", "edit_start_line_idx": 558 }
# LogicTest: 5node # Tests that verify we retrieve the stats correctly. Note that we can't create # statistics if distsql mode is OFF. # Disable automatic stats to prevent flakes if auto stats run. statement ok SET CLUSTER SETTING sql.stats.automatic_collection.enabled = false statement ok CREATE TABLE uv (u INT, v INT, INDEX (u) STORING (v), INDEX (v) STORING (u)); INSERT INTO uv VALUES (1, 1), (1, 2), (1, 3), (1, 4), (2, 4), (2, 5), (2, 6), (2, 7) statement ok CREATE STATISTICS u ON u FROM uv; CREATE STATISTICS v ON v FROM uv query TTIIIB colnames SELECT statistics_name, column_names, row_count, distinct_count, null_count, histogram_id IS NOT NULL AS has_histogram FROM [SHOW STATISTICS FOR TABLE uv] ---- statistics_name column_names row_count distinct_count null_count has_histogram u {u} 8 2 0 true v {v} 8 7 0 true statement ok set enable_zigzag_join = false # Verify we scan index v which has the more selective constraint. query T retry EXPLAIN (VERBOSE) SELECT * FROM uv WHERE u = 1 AND v = 1 ---- distribution: full vectorized: true · • filter │ columns: (u, v) │ estimated row count: 1 │ filter: u = 1 │ └── • scan columns: (u, v) estimated row count: 1 (12% of the table; stats collected <hidden> ago) table: uv@uv_v_idx spans: /1-/2 # Verify that injecting different statistics changes the plan. statement ok ALTER TABLE uv INJECT STATISTICS '[ { "columns": ["u"], "created_at": "2018-01-01 1:00:00.00000+00:00", "row_count": 100, "distinct_count": 100 }, { "columns": ["v"], "created_at": "2018-01-01 1:00:00.00000+00:00", "row_count": 100, "distinct_count": 10 } ]' query T EXPLAIN (VERBOSE) SELECT * FROM uv WHERE u = 1 AND v = 1 ---- distribution: full vectorized: true · • filter │ columns: (u, v) │ estimated row count: 1 │ filter: v = 1 │ └── • scan columns: (u, v) estimated row count: 1 (1.0% of the table; stats collected <hidden> ago) table: uv@uv_u_idx spans: /1-/2 # Verify that injecting different statistics with null counts # changes the plan. statement ok ALTER TABLE uv INJECT STATISTICS '[ { "columns": ["u"], "created_at": "2018-01-01 1:00:00.00000+00:00", "row_count": 100, "distinct_count": 20, "null_count": 0 }, { "columns": ["v"], "created_at": "2018-01-01 1:00:00.00000+00:00", "row_count": 100, "distinct_count": 10, "null_count": 0 } ]' query T EXPLAIN (VERBOSE) SELECT * FROM uv WHERE u = 1 AND v = 1 ---- distribution: full vectorized: true · • filter │ columns: (u, v) │ estimated row count: 1 │ filter: v = 1 │ └── • scan columns: (u, v) estimated row count: 5 (5.0% of the table; stats collected <hidden> ago) table: uv@uv_u_idx spans: /1-/2 statement ok ALTER TABLE uv INJECT STATISTICS '[ { "columns": ["u"], "created_at": "2018-01-01 1:00:00.00000+00:00", "row_count": 100, "distinct_count": 20, "null_count": 0 }, { "columns": ["v"], "created_at": "2018-01-01 1:00:00.00000+00:00", "row_count": 100, "distinct_count": 10, "null_count": 90 } ]' query T EXPLAIN (VERBOSE) SELECT * FROM uv WHERE u = 1 AND v = 1 ---- distribution: full vectorized: true · • filter │ columns: (u, v) │ estimated row count: 1 │ filter: u = 1 │ └── • scan columns: (u, v) estimated row count: 1 (1.1% of the table; stats collected <hidden> ago) table: uv@uv_v_idx spans: /1-/2 statement ok ALTER TABLE uv INJECT STATISTICS '[ { "columns": ["u"], "created_at": "2018-01-01 1:00:00.00000+00:00", "row_count": 100, "distinct_count": 20, "null_count": 0, "histo_col_type":"INT4", "histo_buckets":[{ "num_eq":50, "num_range":0, "distinct_range":0, "upper_bound":"1" }, { "num_eq":20, "num_range":0, "distinct_range":0, "upper_bound":"2" }, { "num_eq":5, "num_range":8, "distinct_range":7, "upper_bound":"10" }, { "num_eq":5, "num_range":12, "distinct_range":9, "upper_bound":"20" }] }, { "columns": ["v"], "created_at": "2018-01-01 1:00:00.00000+00:00", "row_count": 100, "distinct_count": 10, "null_count": 90 }, { "columns": ["u", "v"], "created_at": "2018-01-01 1:00:00.00000+00:00", "row_count": 100, "distinct_count": 25, "null_count": 90 } ]' # Test that we respect the session settings for using histograms and # multi-column stats. statement ok set optimizer_use_histograms = false query T EXPLAIN (OPT, VERBOSE) SELECT * FROM uv WHERE u < 30 GROUP BY u, v ---- distinct-on ├── columns: u:1 v:2 ├── grouping columns: u:1 v:2 ├── internal-ordering: +1 ├── stats: [rows=20.0617284, distinct(1,2)=20.0617284, null(1,2)=0] ├── cost: 41.7306173 ├── key: (1,2) └── scan uv@uv_u_idx ├── columns: u:1 v:2 ├── constraint: /1/3: (/NULL - /29] ├── stats: [rows=33.3333333, distinct(1)=6.66666667, null(1)=0, distinct(1,2)=20.0617284, null(1,2)=0] ├── cost: 40.6766667 ├── ordering: +1 ├── prune: (2) └── interesting orderings: (+1) (+2) statement ok set optimizer_use_multicol_stats = false query T EXPLAIN (OPT, VERBOSE) SELECT * FROM uv WHERE u < 30 GROUP BY u, v ---- distinct-on ├── columns: u:1 v:2 ├── grouping columns: u:1 v:2 ├── internal-ordering: +1 ├── stats: [rows=33.3333333, distinct(1,2)=33.3333333, null(1,2)=0] ├── cost: 41.8633333 ├── key: (1,2) └── scan uv@uv_u_idx ├── columns: u:1 v:2 ├── constraint: /1/3: (/NULL - /29] ├── stats: [rows=33.3333333, distinct(1)=6.66666667, null(1)=0, distinct(1,2)=33.3333333, null(1,2)=0] ├── cost: 40.6766667 ├── ordering: +1 ├── prune: (2) └── interesting orderings: (+1) (+2) statement ok set optimizer_use_histograms = true query T EXPLAIN (OPT, VERBOSE) SELECT * FROM uv WHERE u < 30 GROUP BY u, v ---- distinct-on ├── columns: u:1 v:2 ├── grouping columns: u:1 v:2 ├── internal-ordering: +1 ├── stats: [rows=100, distinct(1,2)=100, null(1,2)=0] ├── cost: 117.53 ├── key: (1,2) └── scan uv@uv_u_idx ├── columns: u:1 v:2 ├── constraint: /1/3: (/NULL - /29] ├── stats: [rows=100, distinct(1)=20, null(1)=0, distinct(1,2)=100, null(1,2)=0] │ histogram(1)= 0 50 0 20 8 5 12 5 │ <--- 1 --- 2 --- 10 ---- 20 ├── cost: 114.01 ├── ordering: +1 ├── prune: (2) └── interesting orderings: (+1) (+2) statement ok set optimizer_use_multicol_stats = true query T EXPLAIN (OPT, VERBOSE) SELECT * FROM uv WHERE u < 30 GROUP BY u, v ---- distinct-on ├── columns: u:1 v:2 ├── grouping columns: u:1 v:2 ├── internal-ordering: +1 ├── stats: [rows=25, distinct(1,2)=25, null(1,2)=0] ├── cost: 116.78 ├── key: (1,2) └── scan uv@uv_u_idx ├── columns: u:1 v:2 ├── constraint: /1/3: (/NULL - /29] ├── stats: [rows=100, distinct(1)=20, null(1)=0, distinct(1,2)=25, null(1,2)=0] │ histogram(1)= 0 50 0 20 8 5 12 5 │ <--- 1 --- 2 --- 10 ---- 20 ├── cost: 114.01 ├── ordering: +1 ├── prune: (2) └── interesting orderings: (+1) (+2) # Verify basic stats for JSON are used. statement ok CREATE TABLE tj (j JSON) statement ok INSERT INTO tj VALUES (NULL), ('1'), ('true'), ('true'), ('{}') query T EXPLAIN (OPT, VERBOSE) SELECT DISTINCT j FROM tj WHERE j IS NULL ---- limit ├── columns: j:1 ├── cardinality: [0 - 1] ├── immutable ├── stats: [rows=1] ├── cost: 111.049999 ├── key: () ├── fd: ()-->(1) ├── select │ ├── columns: j:1 │ ├── immutable │ ├── stats: [rows=10.0000001, distinct(1)=1, null(1)=10] │ ├── cost: 111.029999 │ ├── fd: ()-->(1) │ ├── limit hint: 1.00 │ ├── scan tj │ │ ├── columns: j:1 │ │ ├── stats: [rows=1000, distinct(1)=100, null(1)=10] │ │ ├── cost: 110.009999 │ │ ├── limit hint: 100.00 │ │ └── prune: (1) │ └── filters │ └── j:1 IS NULL [outer=(1), immutable, constraints=(/1: [/NULL - /NULL]; tight), fd=()-->(1)] └── 1 statement ok CREATE STATISTICS tj FROM tj query T retry EXPLAIN (OPT, VERBOSE) SELECT DISTINCT j FROM tj WHERE j IS NULL ---- limit ├── columns: j:1 ├── cardinality: [0 - 1] ├── immutable ├── stats: [rows=1] ├── cost: 9.4 ├── key: () ├── fd: ()-->(1) ├── select │ ├── columns: j:1 │ ├── immutable │ ├── stats: [rows=1, distinct(1)=1, null(1)=1] │ ├── cost: 9.38 │ ├── fd: ()-->(1) │ ├── limit hint: 1.00 │ ├── scan tj │ │ ├── columns: j:1 │ │ ├── stats: [rows=5, distinct(1)=4, null(1)=1] │ │ ├── cost: 9.31 │ │ ├── limit hint: 5.00 │ │ └── prune: (1) │ └── filters │ └── j:1 IS NULL [outer=(1), immutable, constraints=(/1: [/NULL - /NULL]; tight), fd=()-->(1)] └── 1
pkg/sql/opt/exec/execbuilder/testdata/stats
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.0001775714918039739, 0.00017272178956773132, 0.00016020341718103737, 0.0001733560930006206, 0.000004131753030378604 ]
{ "id": 11, "code_window": [ "\t}); err != nil {\n", "\t\tr.cancelOutputs(ctx, err)\n", "\t}\n", "\tif span != nil {\n", "\t\tfor _, s := range r.inputMetaInfo.StatsCollectors {\n", "\t\t\tspan.RecordStructured(s.GetStats())\n", "\t\t}\n", "\t\tif meta := execinfra.GetTraceDataAsMetadata(span); meta != nil {\n", "\t\t\tr.bufferedMeta = append(r.bufferedMeta, *meta)\n", "\t\t}\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "keep" ], "after_edit": [ "\tif inputInitialized {\n", "\t\t// Retrieving stats and draining the metadata is only safe if the input\n", "\t\t// to the hash router was properly initialized.\n", "\t\tif span != nil {\n", "\t\t\tfor _, s := range r.inputMetaInfo.StatsCollectors {\n", "\t\t\t\tspan.RecordStructured(s.GetStats())\n", "\t\t\t}\n", "\t\t\tif meta := execinfra.GetTraceDataAsMetadata(span); meta != nil {\n", "\t\t\t\tr.bufferedMeta = append(r.bufferedMeta, *meta)\n", "\t\t\t}\n" ], "file_path": "pkg/sql/colflow/routers.go", "type": "replace", "edit_start_line_idx": 611 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colflow import ( "context" "sync" "sync/atomic" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colcontainer" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecargs" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexechash" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" "github.com/marusama/semaphore" ) // routerOutput is an interface implemented by router outputs. It exists for // easier test mocking of outputs. type routerOutput interface { execinfra.OpNode // initWithHashRouter passes a reference to the HashRouter that will be // pushing batches to this output. initWithHashRouter(*HashRouter) // addBatch adds the elements specified by the selection vector from batch // to the output. It returns whether or not the output changed its state to // blocked (see implementations). addBatch(context.Context, coldata.Batch) bool // cancel tells the output to stop producing batches. Optionally forwards an // error if not nil. cancel(context.Context, error) // forwardErr forwards an error to the output. The output should call // colexecerror.ExpectedError with this error on the next call to Next. // Calling forwardErr multiple times will result in the most recent error // overwriting the previous error. forwardErr(error) // resetForTests resets the routerOutput for a benchmark or test run. resetForTests(context.Context) } // getDefaultRouterOutputBlockedThreshold returns the number of unread values // buffered by the routerOutputOp after which the output is considered blocked. // It is a function rather than a variable so that in tests we could modify // coldata.BatchSize() (if it were a variable, then its value would be // evaluated before we set the desired batch size). func getDefaultRouterOutputBlockedThreshold() int { return coldata.BatchSize() * 2 } type routerOutputOpState int const ( // routerOutputOpRunning is the state in which routerOutputOp operates // normally. The router output transitions into routerOutputDoneAdding when // a zero-length batch was added or routerOutputOpDraining when it // encounters an error or the drain is requested. routerOutputOpRunning routerOutputOpState = iota // routerOutputDoneAdding is the state in which a zero-length was batch was // added to routerOutputOp and no more batches will be added. The router // output transitions to routerOutputOpDraining when the output is canceled // (either closed or the drain is requested). routerOutputDoneAdding // routerOutputOpDraining is the state in which routerOutputOp always // returns zero-length batches on calls to Next. routerOutputOpDraining ) // drainCoordinator is an interface that the HashRouter implements to coordinate // cancellation of all of its outputs in the case of an error and draining in // the case of graceful termination. // WARNING: No locks should be held when calling these methods, as the // HashRouter might call routerOutput methods (e.g. cancel) that attempt to // reacquire locks. type drainCoordinator interface { // encounteredError should be called when a routerOutput encounters an error. // This terminates execution. No locks should be held when calling this // method, since cancellation could occur. encounteredError(context.Context) // drainMeta should be called exactly once when the routerOutput moves to // draining. drainMeta() []execinfrapb.ProducerMetadata } type routerOutputOp struct { colexecop.InitHelper // input is a reference to our router. input execinfra.OpNode // drainCoordinator is a reference to the HashRouter to be able to notify it // if the output encounters an error or transitions to a draining state. drainCoordinator drainCoordinator types []*types.T // unblockedEventsChan is signaled when a routerOutput changes state from // blocked to unblocked. unblockedEventsChan chan<- struct{} mu struct { syncutil.Mutex state routerOutputOpState // forwardedErr is an error that was forwarded by the HashRouter. If set, // any subsequent calls to Next will return this error. forwardedErr error cond *sync.Cond // data is a SpillingQueue, a circular buffer backed by a disk queue. data *colexecutils.SpillingQueue numUnread int blocked bool } testingKnobs routerOutputOpTestingKnobs } func (o *routerOutputOp) ChildCount(verbose bool) int { return 1 } func (o *routerOutputOp) Child(nth int, verbose bool) execinfra.OpNode { if nth == 0 { return o.input } colexecerror.InternalError(errors.AssertionFailedf("invalid index %d", nth)) // This code is unreachable, but the compiler cannot infer that. return nil } var _ colexecop.Operator = &routerOutputOp{} type routerOutputOpTestingKnobs struct { // blockedThreshold is the number of buffered values above which we consider // a router output to be blocked. It defaults to // defaultRouterOutputBlockedThreshold but can be modified by tests to test // edge cases. blockedThreshold int // addBatchTestInducedErrorCb is called after any function call that could // produce an error if that error is nil. If the callback returns an error, // the router output overwrites the nil error with the returned error. // It is guaranteed that this callback will be called at least once during // normal execution. addBatchTestInducedErrorCb func() error // nextTestInducedErrorCb is called after any function call that could // produce an error if that error is nil. If the callback returns an error, // the router output overwrites the nil error with the returned error. // It is guaranteed that this callback will be called at least once during // normal execution. nextTestInducedErrorCb func() error } // routerOutputOpArgs are the arguments to newRouterOutputOp. All fields apart // from the testing knobs are optional. type routerOutputOpArgs struct { // All fields are required unless marked optional. types []*types.T // unlimitedAllocator should not have a memory limit. Pass in a soft // memoryLimit that will be respected instead. unlimitedAllocator *colmem.Allocator // memoryLimit acts as a soft limit to allow the router output to use disk // when it is exceeded. memoryLimit int64 diskAcc *mon.BoundAccount cfg colcontainer.DiskQueueCfg fdSemaphore semaphore.Semaphore // unblockedEventsChan must be a buffered channel. unblockedEventsChan chan<- struct{} testingKnobs routerOutputOpTestingKnobs } // newRouterOutputOp creates a new router output. func newRouterOutputOp(args routerOutputOpArgs) *routerOutputOp { if args.testingKnobs.blockedThreshold == 0 { args.testingKnobs.blockedThreshold = getDefaultRouterOutputBlockedThreshold() } o := &routerOutputOp{ types: args.types, unblockedEventsChan: args.unblockedEventsChan, testingKnobs: args.testingKnobs, } o.mu.cond = sync.NewCond(&o.mu) o.mu.data = colexecutils.NewSpillingQueue( &colexecutils.NewSpillingQueueArgs{ UnlimitedAllocator: args.unlimitedAllocator, Types: args.types, MemoryLimit: args.memoryLimit, DiskQueueCfg: args.cfg, FDSemaphore: args.fdSemaphore, DiskAcc: args.diskAcc, }, ) return o } func (o *routerOutputOp) Init(ctx context.Context) { o.InitHelper.Init(ctx) } // nextErrorLocked is a helper method that handles an error encountered in Next. func (o *routerOutputOp) nextErrorLocked(err error) { o.mu.state = routerOutputOpDraining o.maybeUnblockLocked() // Unlock the mutex, since the HashRouter will cancel all outputs. o.mu.Unlock() o.drainCoordinator.encounteredError(o.Ctx) o.mu.Lock() colexecerror.InternalError(err) } // Next returns the next coldata.Batch from the routerOutputOp. Note that Next // is designed for only one concurrent caller and will block until data is // ready. func (o *routerOutputOp) Next() coldata.Batch { o.mu.Lock() defer o.mu.Unlock() for o.mu.forwardedErr == nil && o.mu.state == routerOutputOpRunning && o.mu.data.Empty() { // Wait until there is data to read or the output is canceled. o.mu.cond.Wait() } if o.mu.forwardedErr != nil { colexecerror.ExpectedError(o.mu.forwardedErr) } if o.mu.state == routerOutputOpDraining { return coldata.ZeroBatch } b, err := o.mu.data.Dequeue(o.Ctx) if err == nil && o.testingKnobs.nextTestInducedErrorCb != nil { err = o.testingKnobs.nextTestInducedErrorCb() } if err != nil { o.nextErrorLocked(err) } o.mu.numUnread -= b.Length() if o.mu.numUnread <= o.testingKnobs.blockedThreshold { o.maybeUnblockLocked() } if b.Length() == 0 { if o.testingKnobs.nextTestInducedErrorCb != nil { if err := o.testingKnobs.nextTestInducedErrorCb(); err != nil { o.nextErrorLocked(err) } } // This is the last batch. closeLocked will set done to protect against // further calls to Next since this is allowed by the interface as well as // cleaning up and releasing possible disk infrastructure. o.closeLocked(o.Ctx) } return b } func (o *routerOutputOp) DrainMeta() []execinfrapb.ProducerMetadata { o.mu.Lock() o.mu.state = routerOutputOpDraining o.maybeUnblockLocked() o.mu.Unlock() return o.drainCoordinator.drainMeta() } func (o *routerOutputOp) initWithHashRouter(r *HashRouter) { o.input = r o.drainCoordinator = r } func (o *routerOutputOp) closeLocked(ctx context.Context) { o.mu.state = routerOutputOpDraining if err := o.mu.data.Close(ctx); err != nil { // This log message is Info instead of Warning because the flow will also // attempt to clean up the parent directory, so this failure might not have // any effect. log.Infof(ctx, "error closing vectorized hash router output, files may be left over: %s", err) } } // cancel wakes up a reader in Next if there is one and results in the output // returning zero length batches for every Next call after cancel. Note that // all accumulated data that hasn't been read will not be returned. func (o *routerOutputOp) cancel(ctx context.Context, err error) { o.mu.Lock() defer o.mu.Unlock() o.closeLocked(ctx) o.forwardErrLocked(err) // Some goroutine might be waiting on the condition variable, so wake it up. // Note that read goroutines check o.mu.done, so won't wait on the condition // variable after we unlock the mutex. o.mu.cond.Signal() } func (o *routerOutputOp) forwardErrLocked(err error) { if err != nil { o.mu.forwardedErr = err } } func (o *routerOutputOp) forwardErr(err error) { o.mu.Lock() defer o.mu.Unlock() o.forwardErrLocked(err) o.mu.cond.Signal() } // addBatch copies the batch (according to its selection vector) into an // internal buffer. Zero-length batch should be passed-in to indicate that no // more batches will be added. // TODO(asubiotto): We should explore pipelining addBatch if disk-spilling // performance becomes a concern. The main router goroutine will be writing to // disk as the code is written, meaning that we impact the performance of // writing rows to a fast output if we have to write to disk for a single // slow output. func (o *routerOutputOp) addBatch(ctx context.Context, batch coldata.Batch) bool { o.mu.Lock() defer o.mu.Unlock() switch o.mu.state { case routerOutputDoneAdding: colexecerror.InternalError(errors.AssertionFailedf("a batch was added to routerOutput in DoneAdding state")) case routerOutputOpDraining: // This output is draining, discard any data. return false } o.mu.numUnread += batch.Length() o.mu.data.Enqueue(ctx, batch) if o.testingKnobs.addBatchTestInducedErrorCb != nil { if err := o.testingKnobs.addBatchTestInducedErrorCb(); err != nil { colexecerror.InternalError(err) } } if batch.Length() == 0 { o.mu.state = routerOutputDoneAdding o.mu.cond.Signal() return false } stateChanged := false if o.mu.numUnread > o.testingKnobs.blockedThreshold && !o.mu.blocked { // The output is now blocked. o.mu.blocked = true stateChanged = true } o.mu.cond.Signal() return stateChanged } // maybeUnblockLocked unblocks the router output if it is in a blocked state. If the // output was previously in a blocked state, an event will be sent on // routerOutputOp.unblockedEventsChan. func (o *routerOutputOp) maybeUnblockLocked() { if o.mu.blocked { o.mu.blocked = false o.unblockedEventsChan <- struct{}{} } } // resetForTests resets the routerOutputOp for a test or benchmark run. func (o *routerOutputOp) resetForTests(ctx context.Context) { o.mu.Lock() defer o.mu.Unlock() o.mu.state = routerOutputOpRunning o.mu.forwardedErr = nil o.mu.data.Reset(ctx) o.mu.numUnread = 0 o.mu.blocked = false } // hashRouterDrainState is a state that specifically describes the hashRouter's // state in the draining process. This differs from its "general" state. For // example, a hash router can have drained and exited the Run method but still // be in hashRouterDrainStateRunning until somebody calls drainMeta. type hashRouterDrainState int const ( // hashRouterDrainStateRunning is the state that a hashRouter is in when // running normally (i.e. pulling and pushing batches). hashRouterDrainStateRunning = iota // hashRouterDrainStateRequested is the state that a hashRouter is in when // either all outputs have called drainMeta or an error was encountered by one // of the outputs. hashRouterDrainStateRequested // hashRouterDrainStateCompleted is the state that a hashRouter is in when // draining has completed. hashRouterDrainStateCompleted ) // HashRouter hashes values according to provided hash columns and computes a // destination for each row. These destinations are exposed as Operators // returned by the constructor. type HashRouter struct { colexecop.OneInputNode // inputMetaInfo contains all of the meta components that the hash router // is responsible for. Root field is exactly the same as OneInputNode.Input. inputMetaInfo colexecargs.OpWithMetaInfo // hashCols is a slice of indices of the columns used for hashing. hashCols []uint32 // One output for each stream. outputs []routerOutput // unblockedEventsChan is a channel shared between the HashRouter and its // outputs. outputs send events on this channel when they are unblocked by a // read. unblockedEventsChan <-chan struct{} numBlockedOutputs int bufferedMeta []execinfrapb.ProducerMetadata // atomics is shared state between the Run goroutine and any routerOutput // goroutines that call drainMeta. atomics struct { // drainState is the state the hashRouter is in. The Run goroutine should // only ever read these states, never set them. drainState int32 numDrainedOutputs int32 } // waitForMetadata is a channel that the last output to drain will read from // to pass on any metadata buffered through the Run goroutine. waitForMetadata chan []execinfrapb.ProducerMetadata // tupleDistributor is used to decide to which output a particular tuple // should be routed. tupleDistributor *colexechash.TupleHashDistributor } // NewHashRouter creates a new hash router that consumes coldata.Batches from // input and hashes each row according to hashCols to one of the outputs // returned as Operators. // The number of allocators provided will determine the number of outputs // returned. Note that each allocator must be unlimited, memory will be limited // by comparing memory use in the allocator with the memoryLimit argument. Each // Operator must have an independent allocator (this means that each allocator // should be linked to an independent mem account) as Operator.Next will usually // be called concurrently between different outputs. Similarly, each output // needs to have a separate disk account. func NewHashRouter( unlimitedAllocators []*colmem.Allocator, input colexecargs.OpWithMetaInfo, types []*types.T, hashCols []uint32, memoryLimit int64, diskQueueCfg colcontainer.DiskQueueCfg, fdSemaphore semaphore.Semaphore, diskAccounts []*mon.BoundAccount, ) (*HashRouter, []colexecop.DrainableOperator) { if diskQueueCfg.CacheMode != colcontainer.DiskQueueCacheModeDefault { colexecerror.InternalError(errors.Errorf("hash router instantiated with incompatible disk queue cache mode: %d", diskQueueCfg.CacheMode)) } outputs := make([]routerOutput, len(unlimitedAllocators)) outputsAsOps := make([]colexecop.DrainableOperator, len(unlimitedAllocators)) // unblockEventsChan is buffered to 2*numOutputs as we don't want the outputs // writing to it to block. // Unblock events only happen after a corresponding block event. Since these // are state changes and are done under lock (including the output sending // on the channel, which is why we want the channel to be buffered in the // first place), every time the HashRouter blocks an output, it *must* read // all unblock events preceding it since these *must* be on the channel. unblockEventsChan := make(chan struct{}, 2*len(unlimitedAllocators)) memoryLimitPerOutput := memoryLimit / int64(len(unlimitedAllocators)) for i := range unlimitedAllocators { op := newRouterOutputOp( routerOutputOpArgs{ types: types, unlimitedAllocator: unlimitedAllocators[i], memoryLimit: memoryLimitPerOutput, diskAcc: diskAccounts[i], cfg: diskQueueCfg, fdSemaphore: fdSemaphore, unblockedEventsChan: unblockEventsChan, }, ) outputs[i] = op outputsAsOps[i] = op } return newHashRouterWithOutputs(input, hashCols, unblockEventsChan, outputs), outputsAsOps } func newHashRouterWithOutputs( input colexecargs.OpWithMetaInfo, hashCols []uint32, unblockEventsChan <-chan struct{}, outputs []routerOutput, ) *HashRouter { r := &HashRouter{ OneInputNode: colexecop.NewOneInputNode(input.Root), inputMetaInfo: input, hashCols: hashCols, outputs: outputs, unblockedEventsChan: unblockEventsChan, // waitForMetadata is a buffered channel to avoid blocking if nobody will // read the metadata. waitForMetadata: make(chan []execinfrapb.ProducerMetadata, 1), tupleDistributor: colexechash.NewTupleHashDistributor(colexechash.DefaultInitHashValue, len(outputs)), } for i := range outputs { outputs[i].initWithHashRouter(r) } return r } // cancelOutputs cancels all outputs and forwards the given error to all of // them if non-nil. The only case where the error is not forwarded is if no // output could be canceled due to an error. In this case each output will // forward the error returned during cancellation. func (r *HashRouter) cancelOutputs(ctx context.Context, errToForward error) { for _, o := range r.outputs { if err := colexecerror.CatchVectorizedRuntimeError(func() { o.cancel(ctx, errToForward) }); err != nil { // If there was an error canceling this output, this error can be // forwarded to whoever is calling Next. o.forwardErr(err) } } } func (r *HashRouter) setDrainState(drainState hashRouterDrainState) { atomic.StoreInt32(&r.atomics.drainState, int32(drainState)) } func (r *HashRouter) getDrainState() hashRouterDrainState { return hashRouterDrainState(atomic.LoadInt32(&r.atomics.drainState)) } // Run runs the HashRouter. Batches are read from the input and pushed to an // output calculated by hashing columns. Cancel the given context to terminate // early. func (r *HashRouter) Run(ctx context.Context) { var span *tracing.Span ctx, span = execinfra.ProcessorSpan(ctx, "hash router") if span != nil { defer span.Finish() } // Since HashRouter runs in a separate goroutine, we want to be safe and // make sure that we catch errors in all code paths, so we wrap the whole // method with a catcher. Note that we also have "internal" catchers as // well for more fine-grained control of error propagation. if err := colexecerror.CatchVectorizedRuntimeError(func() { r.Input.Init(ctx) var done bool processNextBatch := func() { done = r.processNextBatch(ctx) } for { if r.getDrainState() != hashRouterDrainStateRunning { break } // Check for cancellation. select { case <-ctx.Done(): r.cancelOutputs(ctx, ctx.Err()) return default: } // Read all the routerOutput state changes that have happened since the // last iteration. for moreToRead := true; moreToRead; { select { case <-r.unblockedEventsChan: r.numBlockedOutputs-- default: // No more routerOutput state changes to read without blocking. moreToRead = false } } if r.numBlockedOutputs == len(r.outputs) { // All outputs are blocked, wait until at least one output is unblocked. select { case <-r.unblockedEventsChan: r.numBlockedOutputs-- case <-ctx.Done(): r.cancelOutputs(ctx, ctx.Err()) return } } if err := colexecerror.CatchVectorizedRuntimeError(processNextBatch); err != nil { r.cancelOutputs(ctx, err) return } if done { // The input was done and we have notified the routerOutputs that there // is no more data. return } } }); err != nil { r.cancelOutputs(ctx, err) } if span != nil { for _, s := range r.inputMetaInfo.StatsCollectors { span.RecordStructured(s.GetStats()) } if meta := execinfra.GetTraceDataAsMetadata(span); meta != nil { r.bufferedMeta = append(r.bufferedMeta, *meta) } } r.bufferedMeta = append(r.bufferedMeta, r.inputMetaInfo.MetadataSources.DrainMeta()...) // Non-blocking send of metadata so that one of the outputs can return it // in DrainMeta. r.waitForMetadata <- r.bufferedMeta close(r.waitForMetadata) r.inputMetaInfo.ToClose.CloseAndLogOnErr(ctx, "hash router") } // processNextBatch reads the next batch from its input, hashes it and adds // each column to its corresponding output, returning whether the input is // done. func (r *HashRouter) processNextBatch(ctx context.Context) bool { b := r.Input.Next() n := b.Length() if n == 0 { // Done. Push an empty batch to outputs to tell them the data is done as // well. for _, o := range r.outputs { o.addBatch(ctx, b) } return true } // It is ok that we call Init() on every batch since all calls except for // the first one are noops. r.tupleDistributor.Init(ctx) selections := r.tupleDistributor.Distribute(b, r.hashCols) for i, o := range r.outputs { if len(selections[i]) > 0 { b.SetSelection(true) copy(b.Selection(), selections[i]) b.SetLength(len(selections[i])) if o.addBatch(ctx, b) { // This batch blocked the output. r.numBlockedOutputs++ } } } return false } // resetForTests resets the HashRouter for a test or benchmark run. func (r *HashRouter) resetForTests(ctx context.Context) { if i, ok := r.Input.(colexecop.Resetter); ok { i.Reset(ctx) } r.setDrainState(hashRouterDrainStateRunning) r.waitForMetadata = make(chan []execinfrapb.ProducerMetadata, 1) r.atomics.numDrainedOutputs = 0 r.bufferedMeta = nil r.numBlockedOutputs = 0 for moreToRead := true; moreToRead; { select { case <-r.unblockedEventsChan: default: moreToRead = false } } for _, o := range r.outputs { o.resetForTests(ctx) } } func (r *HashRouter) encounteredError(ctx context.Context) { // Once one output returns an error the hash router needs to stop running // and drain its input. r.setDrainState(hashRouterDrainStateRequested) // cancel all outputs. The Run goroutine will eventually realize that the // HashRouter is done and exit without draining. r.cancelOutputs(ctx, nil /* errToForward */) } func (r *HashRouter) drainMeta() []execinfrapb.ProducerMetadata { if int(atomic.AddInt32(&r.atomics.numDrainedOutputs, 1)) != len(r.outputs) { return nil } // All outputs have been drained, return any buffered metadata to the last // output to call drainMeta. r.setDrainState(hashRouterDrainStateRequested) meta := <-r.waitForMetadata r.setDrainState(hashRouterDrainStateCompleted) return meta }
pkg/sql/colflow/routers.go
1
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.9977987408638, 0.022952497005462646, 0.0001602801203262061, 0.0002300846972502768, 0.13251210749149323 ]
{ "id": 11, "code_window": [ "\t}); err != nil {\n", "\t\tr.cancelOutputs(ctx, err)\n", "\t}\n", "\tif span != nil {\n", "\t\tfor _, s := range r.inputMetaInfo.StatsCollectors {\n", "\t\t\tspan.RecordStructured(s.GetStats())\n", "\t\t}\n", "\t\tif meta := execinfra.GetTraceDataAsMetadata(span); meta != nil {\n", "\t\t\tr.bufferedMeta = append(r.bufferedMeta, *meta)\n", "\t\t}\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "keep" ], "after_edit": [ "\tif inputInitialized {\n", "\t\t// Retrieving stats and draining the metadata is only safe if the input\n", "\t\t// to the hash router was properly initialized.\n", "\t\tif span != nil {\n", "\t\t\tfor _, s := range r.inputMetaInfo.StatsCollectors {\n", "\t\t\t\tspan.RecordStructured(s.GetStats())\n", "\t\t\t}\n", "\t\t\tif meta := execinfra.GetTraceDataAsMetadata(span); meta != nil {\n", "\t\t\t\tr.bufferedMeta = append(r.bufferedMeta, *meta)\n", "\t\t\t}\n" ], "file_path": "pkg/sql/colflow/routers.go", "type": "replace", "edit_start_line_idx": 611 }
statement error pq: invalid locale bad_locale: language: subtag "locale" is well-formed but unknown SELECT 'a' COLLATE bad_locale statement error pq: unsupported comparison operator: <collatedstring{en}> = <string> SELECT 'A' COLLATE en = 'a' statement error pq: unsupported comparison operator: <collatedstring{en}> = <collatedstring{de}> SELECT 'A' COLLATE en = 'a' COLLATE de statement error pq: unsupported comparison operator: \('a' COLLATE en_u_ks_level1\) IN \('A' COLLATE en_u_ks_level1, 'b' COLLATE en\): expected 'b' COLLATE en to be of type collatedstring\{en_u_ks_level1\}, found type collatedstring\{en\} SELECT ('a' COLLATE en_u_ks_level1) IN ('A' COLLATE en_u_ks_level1, 'b' COLLATE en) statement error pq: tuples \('a' COLLATE en_u_ks_level1, 'a' COLLATE en\), \('A' COLLATE en, 'B' COLLATE en\) are not comparable at index 1: unsupported comparison operator: <collatedstring\{en_u_ks_level1\}> < <collatedstring\{en\}> SELECT ('a' COLLATE en_u_ks_level1, 'a' COLLATE en) < ('A' COLLATE en, 'B' COLLATE en) query T SELECT 'A' COLLATE en ---- A query T SELECT ('A' COLLATE de) COLLATE en ---- A query T SELECT NAME 'A' COLLATE en ---- A query T SELECT (NAME 'A' COLLATE de) COLLATE en ---- A query T SELECT NULL COLLATE en ---- NULL query B SELECT 'a' COLLATE en < ('B' COLLATE de) COLLATE en ---- true query B SELECT (1, 'a' COLLATE en) < (1, 'B' COLLATE en) ---- true query B SELECT ('a' COLLATE en_u_ks_level1, 'a' COLLATE en) < ('A' COLLATE en_u_ks_level1, 'B' COLLATE en) ---- true query B SELECT 'A' COLLATE en_u_ks_level1 = 'a' COLLATE en_u_ks_level1 ---- true query B SELECT 'A' COLLATE en_u_ks_level1 <> 'a' COLLATE en_u_ks_level1 ---- false query B SELECT 'A' COLLATE en_u_ks_level1 < 'a' COLLATE en_u_ks_level1 ---- false query B SELECT 'A' COLLATE en_u_ks_level1 >= 'a' COLLATE en_u_ks_level1 ---- true query B SELECT 'A' COLLATE en_u_ks_level1 <= 'a' COLLATE en_u_ks_level1 ---- true query B SELECT 'A' COLLATE en_u_ks_level1 > 'a' COLLATE en_u_ks_level1 ---- false query B SELECT 'a' COLLATE en_u_ks_level1 = 'B' COLLATE en_u_ks_level1 ---- false query B SELECT 'a' COLLATE en_u_ks_level1 <> 'B' COLLATE en_u_ks_level1 ---- true query B SELECT 'a' COLLATE en_u_ks_level1 < 'B' COLLATE en_u_ks_level1 ---- true query B SELECT 'a' COLLATE en_u_ks_level1 >= 'B' COLLATE en_u_ks_level1 ---- false query B SELECT 'a' COLLATE en_u_ks_level1 <= 'B' COLLATE en_u_ks_level1 ---- true query B SELECT 'a' COLLATE en_u_ks_level1 > 'B' COLLATE en_u_ks_level1 ---- false query B SELECT 'B' COLLATE en_u_ks_level1 = 'A' COLLATE en_u_ks_level1 ---- false query B SELECT 'B' COLLATE en_u_ks_level1 <> 'A' COLLATE en_u_ks_level1 ---- true query B SELECT 'B' COLLATE en_u_ks_level1 < 'A' COLLATE en_u_ks_level1 ---- false query B SELECT 'B' COLLATE en_u_ks_level1 >= 'A' COLLATE en_u_ks_level1 ---- true query B SELECT 'B' COLLATE en_u_ks_level1 <= 'A' COLLATE en_u_ks_level1 ---- false query B SELECT 'B' COLLATE en_u_ks_level1 > 'A' COLLATE en_u_ks_level1 ---- true query B SELECT ('a' COLLATE en_u_ks_level1) IN ('A' COLLATE en_u_ks_level1, 'b' COLLATE en_u_ks_level1) ---- true query B SELECT ('a' COLLATE en_u_ks_level1) NOT IN ('A' COLLATE en_u_ks_level1, 'b' COLLATE en_u_ks_level1) ---- false query B SELECT ('a' COLLATE en) IN ('A' COLLATE en, 'b' COLLATE en) ---- false query B SELECT ('a' COLLATE en) NOT IN ('A' COLLATE en, 'b' COLLATE en) ---- true query B SELECT 'Fussball' COLLATE de = 'Fußball' COLLATE de ---- false query B SELECT 'Fussball' COLLATE de_u_ks_level1 = 'Fußball' COLLATE de_u_ks_level1 ---- true query B SELECT 'ü' COLLATE da < 'x' COLLATE da ---- false query B SELECT 'ü' COLLATE de < 'x' COLLATE de ---- true statement error syntax error: invalid locale e: language: tag is not well-formed CREATE TABLE e1 ( a STRING COLLATE e ) statement error multiple COLLATE declarations for column "a" CREATE TABLE e2 ( a STRING COLLATE en COLLATE de ) statement error COLLATE declaration for non-string-typed column "a" CREATE TABLE e3 ( a INT COLLATE en ) statement ok CREATE TABLE t ( a STRING COLLATE en ) query TT SHOW CREATE TABLE t ---- t CREATE TABLE public.t ( a STRING COLLATE en NULL, rowid INT8 NOT VISIBLE NOT NULL DEFAULT unique_rowid(), CONSTRAINT "primary" PRIMARY KEY (rowid ASC), FAMILY "primary" (a, rowid) ) statement ok INSERT INTO t VALUES ('A' COLLATE en), ('B' COLLATE en), ('a' COLLATE en), ('b' COLLATE en), ('x' COLLATE en), ('ü' COLLATE en) statement error value type collatedstring{de} doesn't match type collatedstring{en} of column "a" INSERT INTO t VALUES ('X' COLLATE de) query T SELECT a FROM t ORDER BY t.a ---- a A b B ü x query T SELECT a FROM t ORDER BY t.a COLLATE da ---- a A b B x ü query T SELECT a FROM t WHERE a = 'A' COLLATE en; ---- A query T SELECT 'a' COLLATE en::STRING || 'b' ---- ab query T SELECT 'a🐛b🏠c' COLLATE en::VARCHAR(3) ---- a🐛b query B SELECT 't' COLLATE en::BOOLEAN ---- true query I SELECT '42' COLLATE en::INTEGER ---- 42 query R SELECT '42.0' COLLATE en::FLOAT ---- 42 query R SELECT '42.0' COLLATE en::DECIMAL ---- 42.0 query T SELECT 'a' COLLATE en::BYTES ---- a query T SELECT '2017-01-10 16:05:50.734049+00:00' COLLATE en::TIMESTAMP ---- 2017-01-10 16:05:50.734049 +0000 +0000 query T SELECT '2017-01-10 16:05:50.734049+00:00' COLLATE en::TIMESTAMPTZ ---- 2017-01-10 16:05:50.734049 +0000 UTC query T SELECT '40 days' COLLATE en::INTERVAL ---- 40 days statement ok CREATE TABLE foo(a STRING COLLATE en_u_ks_level2) statement ok PREPARE x AS INSERT INTO foo VALUES ($1 COLLATE en_u_ks_level2) RETURNING a query T EXECUTE x(NULL) ---- NULL query T SELECT a FROM foo ---- NULL # Regression test for #24449 statement ok INSERT INTO foo VALUES ('aBcD' COLLATE en_u_ks_level2) query T SELECT * FROM foo WHERE a = 'aBcD' COLLATE en_u_ks_level2 ---- aBcD query T SELECT * FROM foo WHERE a = 'abcd' COLLATE en_u_ks_level2 ---- aBcD # Test quoted collations. statement ok CREATE TABLE quoted_coll ( a STRING COLLATE "en", b STRING COLLATE "en_US", c STRING COLLATE "en-Us" DEFAULT ('c' COLLATE "en-Us"), d STRING COLLATE "en-u-ks-level1" DEFAULT ('d'::STRING COLLATE "en-u-ks-level1"), e STRING COLLATE "en-us" AS (a COLLATE "en-us") STORED, FAMILY "primary" (a, b, c, d, e, rowid) ) query TT SHOW CREATE TABLE quoted_coll ---- quoted_coll CREATE TABLE public.quoted_coll ( a STRING COLLATE en NULL, b STRING COLLATE en_US NULL, c STRING COLLATE en_US NULL DEFAULT 'c':::STRING COLLATE en_US, d STRING COLLATE en_u_ks_level1 NULL DEFAULT 'd':::STRING COLLATE en_u_ks_level1, e STRING COLLATE en_US NULL AS (a COLLATE en_US) STORED, rowid INT8 NOT VISIBLE NOT NULL DEFAULT unique_rowid(), CONSTRAINT "primary" PRIMARY KEY (rowid ASC), FAMILY "primary" (a, b, c, d, e, rowid) ) # Regression for #46570. statement ok CREATE TABLE t46570(c0 BOOL, c1 STRING COLLATE en); CREATE INDEX ON t46570(rowid, c1 DESC); INSERT INTO t46570(c1, rowid) VALUES('' COLLATE en, 0); UPSERT INTO t46570(rowid) VALUES (0), (1) # Test trailing spaces are truncated for char types. subtest regression_50015 query T SELECT t FROM ( VALUES ('hello '::CHAR(100) COLLATE en_US), ('hello t'::CHAR(100) COLLATE en_US), ('hello '::STRING::CHAR(100) COLLATE en_US), ('hello t'::STRING::CHAR(100) COLLATE en_US) ) g(t) ---- hello hello t hello hello t statement ok CREATE TABLE t50015(id int PRIMARY KEY, a char(100), b char(100) COLLATE en); INSERT INTO t50015 VALUES (1, 'hello', 'hello' COLLATE en), (2, 'hello ', 'hello ' COLLATE en), (3, repeat('hello ', 2), repeat('hello ', 2) COLLATE en) query ITITI SELECT id, a, length(a), b, length(b::string) FROM t50015 ORDER BY id ASC ---- 1 hello 5 hello 5 2 hello 5 hello 5 3 hello hello 11 hello hello 11 statement ok CREATE TABLE t54989( no_collation_str text, no_collation_str_array text[], collated_str text COLLATE en, default_collation text COLLATE "default" ) query TT SELECT a.attname AS column_name, collname AS collation FROM pg_attribute a LEFT JOIN pg_collation co ON a.attcollation = co.oid JOIN pg_class c ON a.attrelid = c.oid JOIN pg_namespace n ON c.relnamespace = n.oid WHERE c.relname = 't54989' ORDER BY column_name ---- collated_str en default_collation default no_collation_str default no_collation_str_array default rowid NULL # Regression test for collated string lowercase and hyphen/underscore equality. subtest nocase_strings statement ok CREATE TABLE nocase_strings (s STRING COLLATE "en-US-u-ks-level2"); statement ok INSERT INTO nocase_strings VALUES ('Aaa' COLLATE "en-US-u-ks-level2"), ('Bbb' COLLATE "en-US-u-ks-level2"); query T SELECT s FROM nocase_strings WHERE s = ('bbb' COLLATE "en-US-u-ks-level2") ---- Bbb query T SELECT s FROM nocase_strings WHERE s = ('bbb' COLLATE "en-us-u-ks-level2") ---- Bbb query T SELECT s FROM nocase_strings WHERE s = ('bbb' COLLATE "en_US_u_ks_level2") ---- Bbb statement ok CREATE TABLE collation_name_case (s STRING COLLATE en_us_u_ks_level2); query TT SHOW CREATE TABLE collation_name_case ---- collation_name_case CREATE TABLE public.collation_name_case ( s STRING COLLATE en_US_u_ks_level2 NULL, rowid INT8 NOT VISIBLE NOT NULL DEFAULT unique_rowid(), CONSTRAINT "primary" PRIMARY KEY (rowid ASC), FAMILY "primary" (s, rowid) ) statement error invalid locale en-US-u-ks-le"vel2: language: tag is not well-formed CREATE TABLE nocase_strings (s STRING COLLATE "en-US-u-ks-le""vel2"); statement error syntax error: invalid locale en-US-u-ks-le: language: tag is not well-formed CREATE TABLE nocase_strings (s STRING COLLATE "en-US-u-ks-le"vel2"); statement error invalid locale en-us-u-ks-l"evel2: language: tag is not well-formed SELECT s FROM nocase_strings WHERE s = ('bbb' COLLATE "en-us-u-ks-l""evel2") statement error at or near "evel2": syntax error SELECT s FROM nocase_strings WHERE s = ('bbb' COLLATE "en-us-u-ks-l"evel2") statement error DEFAULT collations are not supported SELECT 'default collate'::text collate "default" statement ok CREATE TABLE nocase_strings2 ( i INT, s STRING COLLATE "en-US-u-ks-level2" ); statement ok INSERT INTO nocase_strings2 VALUES (1, 'Aaa' COLLATE "en-US-u-ks-level2"), (2, 'Bbb' COLLATE "en-US-u-ks-level2"); query T SELECT s FROM nocase_strings2 WHERE s = ('bbb' COLLATE "en-US-u-ks-level2") ---- Bbb query T SELECT s FROM nocase_strings2 WHERE s = ('bbb' COLLATE "en-us-u-ks-level2") ---- Bbb query T SELECT s FROM nocase_strings2 WHERE s = ('bbb' COLLATE "en_US_u_ks_level2") ---- Bbb
pkg/sql/logictest/testdata/logic_test/collatedstring
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.0004047775291837752, 0.00017615653632674366, 0.00016256936942227185, 0.0001730398362269625, 0.00003261891470174305 ]
{ "id": 11, "code_window": [ "\t}); err != nil {\n", "\t\tr.cancelOutputs(ctx, err)\n", "\t}\n", "\tif span != nil {\n", "\t\tfor _, s := range r.inputMetaInfo.StatsCollectors {\n", "\t\t\tspan.RecordStructured(s.GetStats())\n", "\t\t}\n", "\t\tif meta := execinfra.GetTraceDataAsMetadata(span); meta != nil {\n", "\t\t\tr.bufferedMeta = append(r.bufferedMeta, *meta)\n", "\t\t}\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "keep" ], "after_edit": [ "\tif inputInitialized {\n", "\t\t// Retrieving stats and draining the metadata is only safe if the input\n", "\t\t// to the hash router was properly initialized.\n", "\t\tif span != nil {\n", "\t\t\tfor _, s := range r.inputMetaInfo.StatsCollectors {\n", "\t\t\t\tspan.RecordStructured(s.GetStats())\n", "\t\t\t}\n", "\t\t\tif meta := execinfra.GetTraceDataAsMetadata(span); meta != nil {\n", "\t\t\t\tr.bufferedMeta = append(r.bufferedMeta, *meta)\n", "\t\t\t}\n" ], "file_path": "pkg/sql/colflow/routers.go", "type": "replace", "edit_start_line_idx": 611 }
embedded.go -diff
pkg/cmd/roachprod/vm/aws/.gitattributes
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.0001768243673723191, 0.0001768243673723191, 0.0001768243673723191, 0.0001768243673723191, 0 ]
{ "id": 11, "code_window": [ "\t}); err != nil {\n", "\t\tr.cancelOutputs(ctx, err)\n", "\t}\n", "\tif span != nil {\n", "\t\tfor _, s := range r.inputMetaInfo.StatsCollectors {\n", "\t\t\tspan.RecordStructured(s.GetStats())\n", "\t\t}\n", "\t\tif meta := execinfra.GetTraceDataAsMetadata(span); meta != nil {\n", "\t\t\tr.bufferedMeta = append(r.bufferedMeta, *meta)\n", "\t\t}\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "keep" ], "after_edit": [ "\tif inputInitialized {\n", "\t\t// Retrieving stats and draining the metadata is only safe if the input\n", "\t\t// to the hash router was properly initialized.\n", "\t\tif span != nil {\n", "\t\t\tfor _, s := range r.inputMetaInfo.StatsCollectors {\n", "\t\t\t\tspan.RecordStructured(s.GetStats())\n", "\t\t\t}\n", "\t\t\tif meta := execinfra.GetTraceDataAsMetadata(span); meta != nil {\n", "\t\t\t\tr.bufferedMeta = append(r.bufferedMeta, *meta)\n", "\t\t\t}\n" ], "file_path": "pkg/sql/colflow/routers.go", "type": "replace", "edit_start_line_idx": 611 }
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "partitionccl", srcs = ["partition.go"], importpath = "github.com/cockroachdb/cockroach/pkg/ccl/partitionccl", visibility = ["//visibility:public"], deps = [ "//pkg/ccl/utilccl", "//pkg/clusterversion", "//pkg/settings/cluster", "//pkg/sql", "//pkg/sql/catalog", "//pkg/sql/catalog/colinfo", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/schemaexpr", "//pkg/sql/catalog/tabledesc", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/rowenc", "//pkg/sql/sem/tree", "//pkg/sql/types", "//pkg/util/encoding", "//pkg/util/errorutil/unimplemented", "@com_github_cockroachdb_errors//:errors", ], ) go_test( name = "partitionccl_test", size = "medium", srcs = [ "alter_primary_key_test.go", "drop_test.go", "main_test.go", "partition_test.go", "zone_test.go", ], embed = [":partitionccl"], deps = [ "//pkg/base", "//pkg/ccl/importccl", "//pkg/ccl/storageccl", "//pkg/ccl/testutilsccl", "//pkg/ccl/utilccl", "//pkg/config", "//pkg/config/zonepb", "//pkg/jobs", "//pkg/jobs/jobspb", "//pkg/keys", "//pkg/kv/kvserver", "//pkg/roachpb", "//pkg/security", "//pkg/security/securitytest", "//pkg/server", "//pkg/settings/cluster", "//pkg/sql", "//pkg/sql/catalog", "//pkg/sql/catalog/catalogkv", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/tabledesc", "//pkg/sql/gcjob", "//pkg/sql/parser", "//pkg/sql/randgen", "//pkg/sql/rowenc", "//pkg/sql/sem/tree", "//pkg/sql/tests", "//pkg/sql/types", "//pkg/testutils", "//pkg/testutils/serverutils", "//pkg/testutils/skip", "//pkg/testutils/sqlutils", "//pkg/testutils/testcluster", "//pkg/util/encoding", "//pkg/util/hlc", "//pkg/util/leaktest", "//pkg/util/log", "//pkg/util/protoutil", "//pkg/util/randutil", "//pkg/util/uuid", "@com_github_cockroachdb_errors//:errors", "@com_github_gogo_protobuf//proto", "@in_gopkg_yaml_v2//:yaml_v2", ], )
pkg/ccl/partitionccl/BUILD.bazel
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.00017907148867379874, 0.0001741645100992173, 0.00017048364679794759, 0.00017338104953523725, 0.0000029307805107237073 ]
{ "id": 12, "code_window": [ "\t\t}\n", "\t}\n" ], "labels": [ "add", "keep" ], "after_edit": [ "\t\tr.bufferedMeta = append(r.bufferedMeta, r.inputMetaInfo.MetadataSources.DrainMeta()...)\n" ], "file_path": "pkg/sql/colflow/routers.go", "type": "add", "edit_start_line_idx": 618 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colrpc import ( "bytes" "context" "fmt" "io" "sync/atomic" "time" "github.com/cockroachdb/cockroach/pkg/col/colserde" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" "github.com/cockroachdb/logtags" ) // flowStreamClient is a utility interface used to mock out the RPC layer. type flowStreamClient interface { Send(*execinfrapb.ProducerMessage) error Recv() (*execinfrapb.ConsumerSignal, error) CloseSend() error } // Outbox is used to push data from local flows to a remote endpoint. Run may // be called with the necessary information to establish a connection to a // given remote endpoint. type Outbox struct { colexecop.OneInputNode typs []*types.T converter *colserde.ArrowBatchConverter serializer *colserde.RecordBatchSerializer // draining is an atomic that represents whether the Outbox is draining. draining uint32 metadataSources colexecop.MetadataSources // closers is a slice of Closers that need to be Closed on termination. closers colexecop.Closers scratch struct { buf *bytes.Buffer msg *execinfrapb.ProducerMessage } span *tracing.Span // getStats, when non-nil, returns all of the execution statistics of the // operators that are in the same tree as this Outbox. The stats will be // added into the span as Structured payload and returned to the gateway as // execinfrapb.ProducerMetadata. getStats func() []*execinfrapb.ComponentStats // A copy of Run's caller ctx, with no StreamID tag. // Used to pass a clean context to the input.Next. runnerCtx context.Context } // NewOutbox creates a new Outbox. // - getStats, when non-nil, returns all of the execution statistics of the // operators that are in the same tree as this Outbox. func NewOutbox( allocator *colmem.Allocator, input colexecop.Operator, typs []*types.T, getStats func() []*execinfrapb.ComponentStats, metadataSources []colexecop.MetadataSource, toClose []colexecop.Closer, ) (*Outbox, error) { c, err := colserde.NewArrowBatchConverter(typs) if err != nil { return nil, err } s, err := colserde.NewRecordBatchSerializer(typs) if err != nil { return nil, err } o := &Outbox{ // Add a deselector as selection vectors are not serialized (nor should they // be). OneInputNode: colexecop.NewOneInputNode(colexecutils.NewDeselectorOp(allocator, input, typs)), typs: typs, converter: c, serializer: s, getStats: getStats, metadataSources: metadataSources, closers: toClose, } o.scratch.buf = &bytes.Buffer{} o.scratch.msg = &execinfrapb.ProducerMessage{} return o, nil } func (o *Outbox) close(ctx context.Context) { o.closers.CloseAndLogOnErr(ctx, "outbox") } // Run starts an outbox by connecting to the provided node and pushing // coldata.Batches over the stream after sending a header with the provided flow // and stream ID. Note that an extra goroutine is spawned so that Recv may be // called concurrently wrt the Send goroutine to listen for drain signals. // If an io.EOF is received while sending, the outbox will cancel all components // from the same tree as the outbox. // If non-io.EOF is received while sending, the outbox will call flowCtxCancel // to shutdown all parts of the flow on this node. // If an error is encountered that cannot be sent over the stream, the error // will be logged but not returned. // There are several ways the bidirectional FlowStream RPC may terminate. // 1) Execution is finished. In this case, the upstream operator signals // termination by returning a zero-length batch. The Outbox will drain its // metadata sources, send the metadata, and then call CloseSend on the // stream. The Outbox will wait until its Recv goroutine receives a non-nil // error to not leak resources. // 2) A cancellation happened. This can come from the provided context or the // remote reader. Refer to tests for expected behavior. // 3) A drain signal was received from the server (consumer). In this case, the // Outbox goes through the same steps as 1). func (o *Outbox) Run( ctx context.Context, dialer execinfra.Dialer, nodeID roachpb.NodeID, flowID execinfrapb.FlowID, streamID execinfrapb.StreamID, flowCtxCancel context.CancelFunc, connectionTimeout time.Duration, ) { // Derive a child context so that we can cancel all components rooted in // this outbox. var outboxCtxCancel context.CancelFunc ctx, outboxCtxCancel = context.WithCancel(ctx) // Calling outboxCtxCancel is not strictly necessary, but we do it just to // be safe. defer outboxCtxCancel() ctx, o.span = execinfra.ProcessorSpan(ctx, "outbox") if o.span != nil { defer o.span.Finish() } o.runnerCtx = ctx ctx = logtags.AddTag(ctx, "streamID", streamID) log.VEventf(ctx, 2, "Outbox Dialing %s", nodeID) var stream execinfrapb.DistSQL_FlowStreamClient if err := func() error { conn, err := execinfra.GetConnForOutbox(ctx, dialer, nodeID, connectionTimeout) if err != nil { log.Warningf( ctx, "Outbox Dial connection error, distributed query will fail: %+v", err, ) return err } client := execinfrapb.NewDistSQLClient(conn) stream, err = client.FlowStream(ctx) if err != nil { log.Warningf( ctx, "Outbox FlowStream connection error, distributed query will fail: %+v", err, ) return err } log.VEvent(ctx, 2, "Outbox sending header") // Send header message to establish the remote server (consumer). if err := stream.Send( &execinfrapb.ProducerMessage{Header: &execinfrapb.ProducerHeader{FlowID: flowID, StreamID: streamID}}, ); err != nil { log.Warningf( ctx, "Outbox Send header error, distributed query will fail: %+v", err, ) return err } return nil }(); err != nil { // error during stream set up. o.close(ctx) return } log.VEvent(ctx, 2, "Outbox starting normal operation") o.runWithStream(ctx, stream, flowCtxCancel, outboxCtxCancel) log.VEvent(ctx, 2, "Outbox exiting") } // handleStreamErr is a utility method used to handle an error when calling // a method on a flowStreamClient. If err is an io.EOF, outboxCtxCancel is // called, for all other error flowCtxCancel is. The given error is logged with // the associated opName. func (o *Outbox) handleStreamErr( ctx context.Context, opName string, err error, flowCtxCancel, outboxCtxCancel context.CancelFunc, ) { if err == io.EOF { if log.V(1) { log.Infof(ctx, "Outbox calling outboxCtxCancel after %s EOF", opName) } outboxCtxCancel() } else { log.Warningf(ctx, "Outbox calling flowCtxCancel after %s connection error: %+v", opName, err) flowCtxCancel() } } func (o *Outbox) moveToDraining(ctx context.Context, reason string) { if atomic.CompareAndSwapUint32(&o.draining, 0, 1) { log.VEventf(ctx, 2, "Outbox moved to draining (%s)", reason) } } // sendBatches reads from the Outbox's input in a loop and sends the // coldata.Batches over the stream. A boolean is returned, indicating whether // execution completed gracefully (either received a zero-length batch or a // drain signal) as well as an error which is non-nil if an error was // encountered AND the error should be sent over the stream as metadata. The for // loop continues iterating until one of the following conditions becomes true: // 1) A zero-length batch is received from the input. This indicates graceful // termination. true, nil is returned. // 2) Outbox.draining is observed to be true. This is also considered graceful // termination. true, nil is returned. // 3) An error unrelated to the stream occurs (e.g. while deserializing a // coldata.Batch). false, err is returned. This err should be sent over the // stream as metadata. // 4) An error related to the stream occurs. In this case, the error is logged // but not returned, as there is no way to propagate this error anywhere // meaningful. false, nil is returned. // NOTE: if non-io.EOF error is encountered (indicating ungraceful shutdown // of the stream), flowCtxCancel will be called. If an io.EOF is encountered // (indicating a graceful shutdown initiated by the remote Inbox), // outboxCtxCancel will be called. func (o *Outbox) sendBatches( ctx context.Context, stream flowStreamClient, flowCtxCancel, outboxCtxCancel context.CancelFunc, ) (terminatedGracefully bool, errToSend error) { if o.runnerCtx == nil { // In the non-testing path, runnerCtx has been set in Run() method; // however, the tests might use runWithStream() directly in which case // runnerCtx will remain unset, so we have this check. o.runnerCtx = ctx } errToSend = colexecerror.CatchVectorizedRuntimeError(func() { o.Input.Init(o.runnerCtx) for { if atomic.LoadUint32(&o.draining) == 1 { terminatedGracefully = true return } batch := o.Input.Next() n := batch.Length() if n == 0 { terminatedGracefully = true return } o.scratch.buf.Reset() d, err := o.converter.BatchToArrow(batch) if err != nil { colexecerror.InternalError(errors.Wrap(err, "Outbox BatchToArrow data serialization error")) } if _, _, err := o.serializer.Serialize(o.scratch.buf, d, n); err != nil { colexecerror.InternalError(errors.Wrap(err, "Outbox Serialize data error")) } o.scratch.msg.Data.RawBytes = o.scratch.buf.Bytes() // o.scratch.msg can be reused as soon as Send returns since it returns as // soon as the message is written to the control buffer. The message is // marshaled (bytes are copied) before writing. if err := stream.Send(o.scratch.msg); err != nil { o.handleStreamErr(ctx, "Send (batches)", err, flowCtxCancel, outboxCtxCancel) return } } }) return terminatedGracefully, errToSend } // sendMetadata drains the Outbox.metadataSources and sends the metadata over // the given stream, returning the Send error, if any. sendMetadata also sends // errToSend as metadata if non-nil. func (o *Outbox) sendMetadata(ctx context.Context, stream flowStreamClient, errToSend error) error { msg := &execinfrapb.ProducerMessage{} if errToSend != nil { log.VEventf(ctx, 1, "Outbox sending an error as metadata: %v", errToSend) msg.Data.Metadata = append( msg.Data.Metadata, execinfrapb.LocalMetaToRemoteProducerMeta(ctx, execinfrapb.ProducerMetadata{Err: errToSend}), ) } if o.span != nil && o.getStats != nil { for _, s := range o.getStats() { o.span.RecordStructured(s) } } if trace := execinfra.GetTraceData(ctx); trace != nil { msg.Data.Metadata = append(msg.Data.Metadata, execinfrapb.RemoteProducerMetadata{ Value: &execinfrapb.RemoteProducerMetadata_TraceData_{ TraceData: &execinfrapb.RemoteProducerMetadata_TraceData{ CollectedSpans: trace, }, }, }) } for _, meta := range o.metadataSources.DrainMeta() { msg.Data.Metadata = append(msg.Data.Metadata, execinfrapb.LocalMetaToRemoteProducerMeta(ctx, meta)) } if len(msg.Data.Metadata) == 0 { return nil } return stream.Send(msg) } // runWithStream should be called after sending the ProducerHeader on the // stream. It implements the behavior described in Run. func (o *Outbox) runWithStream( ctx context.Context, stream flowStreamClient, flowCtxCancel, outboxCtxCancel context.CancelFunc, ) { if flowCtxCancel == nil { // The flowCtxCancel might be nil in some tests, but we'll make it a // noop for convenience. flowCtxCancel = func() {} } waitCh := make(chan struct{}) go func() { // This goroutine's job is to listen continually on the stream from the // consumer for errors or drain requests, while the remainder of this // function concurrently is producing data and sending it over the // network. This goroutine will tear down the flow if non-io.EOF error // is received - without it, a producer goroutine might spin doing work // forever after a connection is closed, since it wouldn't notice a // closed connection until it tried to Send over that connection. for { msg, err := stream.Recv() if err != nil { if err != io.EOF { log.Warningf(ctx, "Outbox calling flowCtxCancel after Recv connection error: %+v", err) flowCtxCancel() } break } switch { case msg.Handshake != nil: log.VEventf(ctx, 2, "Outbox received handshake: %v", msg.Handshake) case msg.DrainRequest != nil: o.moveToDraining(ctx, "consumer requested draining" /* reason */) } } close(waitCh) }() terminatedGracefully, errToSend := o.sendBatches(ctx, stream, flowCtxCancel, outboxCtxCancel) if terminatedGracefully || errToSend != nil { reason := "terminated gracefully" if errToSend != nil { reason = fmt.Sprintf("encountered error when sending batches: %v", errToSend) } o.moveToDraining(ctx, reason) if err := o.sendMetadata(ctx, stream, errToSend); err != nil { o.handleStreamErr(ctx, "Send (metadata)", err, flowCtxCancel, outboxCtxCancel) } else { // Close the stream. Note that if this block isn't reached, the stream // is unusable. // The receiver goroutine will read from the stream until any error // is returned (most likely an io.EOF). if err := stream.CloseSend(); err != nil { o.handleStreamErr(ctx, "CloseSend", err, flowCtxCancel, outboxCtxCancel) } } } o.close(ctx) <-waitCh }
pkg/sql/colflow/colrpc/outbox.go
1
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.005450197029858828, 0.0003993641585111618, 0.0001654268999118358, 0.00020660727750509977, 0.000822301662992686 ]
{ "id": 12, "code_window": [ "\t\t}\n", "\t}\n" ], "labels": [ "add", "keep" ], "after_edit": [ "\t\tr.bufferedMeta = append(r.bufferedMeta, r.inputMetaInfo.MetadataSources.DrainMeta()...)\n" ], "file_path": "pkg/sql/colflow/routers.go", "type": "add", "edit_start_line_idx": 618 }
// Copyright 2016 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package flowinfra import ( "context" "fmt" "sync" "time" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" "github.com/cockroachdb/redact" ) var errNoInboundStreamConnection = errors.New("no inbound stream connection") // SettingFlowStreamTimeout is a cluster setting that sets the default flow // stream timeout. var SettingFlowStreamTimeout = settings.RegisterDurationSetting( "sql.distsql.flow_stream_timeout", "amount of time incoming streams wait for a flow to be set up before erroring out", 10*time.Second, settings.NonNegativeDuration, ) // expectedConnectionTime is the expected time taken by a flow to connect to its // consumers. const expectedConnectionTime time.Duration = 500 * time.Millisecond // InboundStreamInfo represents the endpoint where a data stream from another // node connects to a flow. The external node initiates this process through a // FlowStream RPC, which uses (*Flow).connectInboundStream() to associate the // stream to a receiver to push rows to. type InboundStreamInfo struct { // receiver is the entity that will receive rows from another host, which is // part of a processor (normally an input synchronizer) for row-based // execution and a colrpc.Inbox for vectorized execution. // // During a FlowStream RPC, the stream is handed off to this strategy to // process. receiver InboundStreamHandler connected bool // if set, indicates that we waited too long for an inbound connection, or // we don't want this stream to connect anymore due to flow cancellation. canceled bool // finished is set if we have signaled that the stream is done transferring // rows (to the flow's wait group). finished bool // waitGroup to signal on when finished. waitGroup *sync.WaitGroup } // NewInboundStreamInfo returns a new InboundStreamInfo. func NewInboundStreamInfo( receiver InboundStreamHandler, waitGroup *sync.WaitGroup, ) *InboundStreamInfo { return &InboundStreamInfo{ receiver: receiver, waitGroup: waitGroup, } } // flowEntry is a structure associated with a (potential) flow. type flowEntry struct { // waitCh is set if one or more clients are waiting for the flow; the // channel gets closed when the flow is registered. waitCh chan struct{} // refCount is used to allow multiple clients to wait for a flow - if the // flow never shows up, the refCount is used to decide which client cleans // up the entry. refCount int flow *FlowBase // inboundStreams are streams that receive data from other hosts, through the // FlowStream API. All fields in the inboundStreamInfos are protected by the // FlowRegistry mutex (except the receiver, whose methods can be called // freely). inboundStreams map[execinfrapb.StreamID]*InboundStreamInfo // streamTimer is a timer that fires after a timeout and verifies that all // inbound streams have been connected. streamTimer *time.Timer } // FlowRegistry allows clients to look up flows by ID and to wait for flows to // be registered. Multiple clients can wait concurrently for the same flow. type FlowRegistry struct { syncutil.Mutex // All fields in the flowEntry's are protected by the FlowRegistry mutex, // except flow, whose methods can be called freely. flows map[execinfrapb.FlowID]*flowEntry // draining specifies whether the FlowRegistry is in drain mode. If it is, // the FlowRegistry will not accept new flows. draining bool // flowDone is signaled whenever the size of flows decreases. flowDone *sync.Cond // testingRunBeforeDrainSleep is a testing knob executed when a draining // FlowRegistry has no registered flows but must still wait for a minimum time // for any incoming flows to register. testingRunBeforeDrainSleep func() } // NewFlowRegistry creates a new FlowRegistry. // // instID is the ID of the current node. Used for debugging; pass 0 if you don't // care. func NewFlowRegistry(instID base.SQLInstanceID) *FlowRegistry { fr := &FlowRegistry{flows: make(map[execinfrapb.FlowID]*flowEntry)} fr.flowDone = sync.NewCond(fr) return fr } // getEntryLocked returns the flowEntry associated with the id. If the entry // doesn't exist, one is created and inserted into the map. // It should only be called while holding the mutex. func (fr *FlowRegistry) getEntryLocked(id execinfrapb.FlowID) *flowEntry { entry, ok := fr.flows[id] if !ok { entry = &flowEntry{} fr.flows[id] = entry } return entry } // releaseEntryLocked decreases the refCount in the entry for the given id, and // cleans up the entry if the refCount reaches 0. // It should only be called while holding the mutex. func (fr *FlowRegistry) releaseEntryLocked(id execinfrapb.FlowID) { entry := fr.flows[id] if entry.refCount > 1 { entry.refCount-- } else { if entry.refCount != 1 { panic(errors.AssertionFailedf("invalid refCount: %d", entry.refCount)) } delete(fr.flows, id) fr.flowDone.Signal() } } type flowRetryableError struct { cause error } func (e *flowRetryableError) Error() string { return fmt.Sprintf("flow retryable error: %+v", e.cause) } // IsFlowRetryableError returns true if an error represents a retryable // flow error. func IsFlowRetryableError(e error) bool { return errors.HasType(e, (*flowRetryableError)(nil)) } // RegisterFlow makes a flow accessible to ConnectInboundStream. Any concurrent // ConnectInboundStream calls that are waiting for this flow are woken up. // // It is expected that UnregisterFlow will be called at some point to remove the // flow from the registry. // // inboundStreams are all the remote streams that will be connected into this // flow. If any of them is not connected within timeout, errors are propagated. // The inboundStreams are expected to have been initialized with their // WaitGroups (the group should have been incremented). RegisterFlow takes // responsibility for calling Done() on that WaitGroup; this responsibility will // be forwarded forward by ConnectInboundStream. In case this method returns an // error, the WaitGroup will be decremented. func (fr *FlowRegistry) RegisterFlow( ctx context.Context, id execinfrapb.FlowID, f *FlowBase, inboundStreams map[execinfrapb.StreamID]*InboundStreamInfo, timeout time.Duration, ) (retErr error) { fr.Lock() defer fr.Unlock() defer func() { if retErr != nil { for _, stream := range inboundStreams { stream.waitGroup.Done() } } }() draining := fr.draining if f.Cfg != nil { if knobs, ok := f.Cfg.TestingKnobs.Flowinfra.(*TestingKnobs); ok && knobs != nil && knobs.FlowRegistryDraining != nil { draining = knobs.FlowRegistryDraining() } } if draining { return &flowRetryableError{cause: errors.Errorf( "could not register flowID %d because the registry is draining", id, )} } entry := fr.getEntryLocked(id) if entry.flow != nil { return errors.Errorf( "flow already registered: flowID: %s.\n"+ "Current flow: %+v\nExisting flow: %+v", f.spec.FlowID, f.spec, entry.flow.spec) } // Take a reference that will be removed by UnregisterFlow. entry.refCount++ entry.flow = f entry.inboundStreams = inboundStreams // If there are any waiters, wake them up by closing waitCh. if entry.waitCh != nil { close(entry.waitCh) } if len(inboundStreams) > 0 { // Set up a function to time out inbound streams after a while. entry.streamTimer = time.AfterFunc(timeout, func() { fr.Lock() // We're giving up waiting for these inbound streams. We will push an // error to its consumer after fr.Unlock; the error will propagate and // eventually drain all the processors. timedOutReceivers := fr.cancelPendingStreamsLocked(id) fr.Unlock() if len(timedOutReceivers) != 0 { // The span in the context might be finished by the time this runs. In // principle, we could ForkSpan() beforehand, but we don't want to // create the extra span every time. timeoutCtx := tracing.ContextWithSpan(ctx, nil) log.Errorf( timeoutCtx, "flow id:%s : %d inbound streams timed out after %s; propagated error throughout flow", id, len(timedOutReceivers), timeout, ) } for _, r := range timedOutReceivers { go func(r InboundStreamHandler) { r.Timeout(errNoInboundStreamConnection) }(r) } }) } return nil } // cancelPendingStreamsLocked cancels all of the streams that haven't been // connected yet in this flow, by setting them to finished and ending their // wait group. The method returns the list of RowReceivers corresponding to the // streams that were canceled. The caller is expected to send those // RowReceivers a cancellation message - this method can't do it because sending // those messages shouldn't happen under the flow registry's lock. func (fr *FlowRegistry) cancelPendingStreamsLocked(id execinfrapb.FlowID) []InboundStreamHandler { entry := fr.flows[id] if entry == nil || entry.flow == nil { return nil } pendingReceivers := make([]InboundStreamHandler, 0) for streamID, is := range entry.inboundStreams { // Connected, non-finished inbound streams will get an error // returned in ProcessInboundStream(). Non-connected streams // are handled below. if !is.connected && !is.finished && !is.canceled { is.canceled = true pendingReceivers = append(pendingReceivers, is.receiver) fr.finishInboundStreamLocked(id, streamID) } } return pendingReceivers } // UnregisterFlow removes a flow from the registry. Any subsequent // ConnectInboundStream calls for the flow will fail to find it and time out. func (fr *FlowRegistry) UnregisterFlow(id execinfrapb.FlowID) { fr.Lock() entry := fr.flows[id] if entry.streamTimer != nil { entry.streamTimer.Stop() entry.streamTimer = nil } fr.releaseEntryLocked(id) fr.Unlock() } // waitForFlowLocked waits until the flow with the given id gets registered - // up to the given timeout - and returns the flowEntry. If the timeout elapses, // returns nil. It should only be called while holding the mutex. The mutex is // temporarily unlocked if we need to wait. // It is illegal to call this if the flow is already connected. func (fr *FlowRegistry) waitForFlowLocked( ctx context.Context, id execinfrapb.FlowID, timeout time.Duration, ) *flowEntry { entry := fr.getEntryLocked(id) if entry.flow != nil { log.Fatalf(ctx, "waitForFlowLocked called for a flow that's already registered: %d", id) } // Flow not registered (at least not yet). // Set up a channel that gets closed when the flow shows up, or when the // timeout elapses. The channel might have been created already if there are // other waiters for the same id. waitCh := entry.waitCh if waitCh == nil { waitCh = make(chan struct{}) entry.waitCh = waitCh } entry.refCount++ fr.Unlock() select { case <-waitCh: case <-time.After(timeout): case <-ctx.Done(): } fr.Lock() fr.releaseEntryLocked(id) if entry.flow == nil { return nil } return entry } // Drain waits at most flowDrainWait for currently running flows to finish and // at least minFlowDrainWait for any incoming flows to be registered. If there // are still flows active after flowDrainWait, Drain waits an extra // expectedConnectionTime so that any flows that were registered at the end of // the time window have a reasonable amount of time to connect to their // consumers, thus unblocking them. // The FlowRegistry rejects any new flows once it has finished draining. // // Note that since local flows are not added to the registry, they are not // waited for. However, this is fine since there should be no local flows // running when the FlowRegistry drains as the draining logic starts with // draining all client connections to a node. // // The reporter callback, if non-nil, is called on a best effort basis // to report work that needed to be done and which may or may not have // been done by the time this call returns. See the explanation in // pkg/server/drain.go for details. func (fr *FlowRegistry) Drain( flowDrainWait time.Duration, minFlowDrainWait time.Duration, reporter func(int, redact.SafeString), ) { allFlowsDone := make(chan struct{}, 1) start := timeutil.Now() stopWaiting := false sleep := func(t time.Duration) { if fr.testingRunBeforeDrainSleep != nil { fr.testingRunBeforeDrainSleep() } time.Sleep(t) } defer func() { // At this stage, we have either hit the flowDrainWait timeout or we have no // flows left. We wait for an expectedConnectionTime longer so that we give // any flows that were registered in the // flowDrainWait - expectedConnectionTime window enough time to establish // connections to their consumers so that the consumers do not block for a // long time waiting for a connection to be established. fr.Lock() fr.draining = true if len(fr.flows) > 0 { fr.Unlock() time.Sleep(expectedConnectionTime) fr.Lock() } fr.Unlock() }() fr.Lock() if len(fr.flows) == 0 { fr.Unlock() sleep(minFlowDrainWait) fr.Lock() // No flows were registered, return. if len(fr.flows) == 0 { fr.Unlock() return } } if reporter != nil { // Report progress to the Drain RPC. reporter(len(fr.flows), "distSQL execution flows") } go func() { select { case <-time.After(flowDrainWait): fr.Lock() stopWaiting = true fr.flowDone.Signal() fr.Unlock() case <-allFlowsDone: } }() for !(stopWaiting || len(fr.flows) == 0) { fr.flowDone.Wait() } fr.Unlock() // If we spent less time waiting for all registered flows to finish, wait // for the minimum time for any new incoming flows and wait for these to // finish. waitTime := timeutil.Since(start) if waitTime < minFlowDrainWait { sleep(minFlowDrainWait - waitTime) fr.Lock() for !(stopWaiting || len(fr.flows) == 0) { fr.flowDone.Wait() } fr.Unlock() } allFlowsDone <- struct{}{} } // Undrain causes the FlowRegistry to start accepting flows again. func (fr *FlowRegistry) Undrain() { fr.Lock() fr.draining = false fr.Unlock() } // ConnectInboundStream finds the InboundStreamInfo for the given // <flowID,streamID> pair and marks it as connected. It waits up to timeout for // the stream to be registered with the registry. It also sends the handshake // messages to the producer of the stream. // // stream is the inbound stream. // // It returns the Flow that the stream is connecting to, the receiver that the // stream must push data to and a cleanup function that must be called to // unregister the flow from the registry after all the data has been pushed. // // The cleanup function will decrement the flow's WaitGroup, so that Flow.Wait() // is not blocked on this stream any more. // In case an error is returned, the cleanup function is nil, the Flow is not // considered connected and is not cleaned up. func (fr *FlowRegistry) ConnectInboundStream( ctx context.Context, flowID execinfrapb.FlowID, streamID execinfrapb.StreamID, stream execinfrapb.DistSQL_FlowStreamServer, timeout time.Duration, ) (_ *FlowBase, _ InboundStreamHandler, _ func(), retErr error) { fr.Lock() defer fr.Unlock() entry := fr.getEntryLocked(flowID) if entry.flow == nil { // Send the handshake message informing the producer that the consumer has // not been scheduled yet. Another handshake will be sent below once the // consumer has been connected. deadline := timeutil.Now().Add(timeout) if err := stream.Send(&execinfrapb.ConsumerSignal{ Handshake: &execinfrapb.ConsumerHandshake{ ConsumerScheduled: false, ConsumerScheduleDeadline: &deadline, Version: execinfra.Version, MinAcceptedVersion: execinfra.MinAcceptedVersion, }, }); err != nil { // TODO(andrei): We failed to send a message to the producer; we'll return // an error and leave this stream with connected == false so it times out // later. We could call finishInboundStreamLocked() now so that the flow // doesn't wait for the timeout and we could remember the error for the // consumer if the consumer comes later, but I'm not sure what the best // way to do that is. Similarly for the 2nd handshake message below, // except there we already have the consumer and we can push the error. return nil, nil, nil, err } entry = fr.waitForFlowLocked(ctx, flowID, timeout) if entry == nil { return nil, nil, nil, errors.Errorf("flow %s not found", flowID) } } s, ok := entry.inboundStreams[streamID] if !ok { return nil, nil, nil, errors.Errorf("flow %s: no inbound stream %d", flowID, streamID) } if s.connected { return nil, nil, nil, errors.Errorf("flow %s: inbound stream %d already connected", flowID, streamID) } if s.canceled { return nil, nil, nil, errors.Errorf("flow %s: inbound stream %d came too late", flowID, streamID) } // We now mark the stream as connected but, if an error happens later because // the handshake fails, we reset the state; we want the stream to be // considered timed out when the moment comes just as if this connection // attempt never happened. s.connected = true defer func() { if retErr != nil { s.connected = false } }() if err := stream.Send(&execinfrapb.ConsumerSignal{ Handshake: &execinfrapb.ConsumerHandshake{ ConsumerScheduled: true, Version: execinfra.Version, MinAcceptedVersion: execinfra.MinAcceptedVersion, }, }); err != nil { return nil, nil, nil, err } cleanup := func() { fr.Lock() fr.finishInboundStreamLocked(flowID, streamID) fr.Unlock() } return entry.flow, s.receiver, cleanup, nil } func (fr *FlowRegistry) finishInboundStreamLocked( fid execinfrapb.FlowID, sid execinfrapb.StreamID, ) { flowEntry := fr.getEntryLocked(fid) streamEntry := flowEntry.inboundStreams[sid] if !streamEntry.connected && !streamEntry.canceled { panic("finising inbound stream that didn't connect or time out") } if streamEntry.finished { panic("double finish") } streamEntry.finished = true streamEntry.waitGroup.Done() }
pkg/sql/flowinfra/flow_registry.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.277915894985199, 0.006700785364955664, 0.0001663214643485844, 0.00023874193720985204, 0.036610934883356094 ]
{ "id": 12, "code_window": [ "\t\t}\n", "\t}\n" ], "labels": [ "add", "keep" ], "after_edit": [ "\t\tr.bufferedMeta = append(r.bufferedMeta, r.inputMetaInfo.MetadataSources.DrainMeta()...)\n" ], "file_path": "pkg/sql/colflow/routers.go", "type": "add", "edit_start_line_idx": 618 }
// Copyright 2016 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package humanizeutil import ( "flag" "fmt" "math" "sync/atomic" "time" "github.com/dustin/go-humanize" "github.com/spf13/pflag" ) // IBytes is an int64 version of go-humanize's IBytes. func IBytes(value int64) string { if value < 0 { return fmt.Sprintf("-%s", humanize.IBytes(uint64(-value))) } return humanize.IBytes(uint64(value)) } // ParseBytes is an int64 version of go-humanize's ParseBytes. func ParseBytes(s string) (int64, error) { if len(s) == 0 { return 0, fmt.Errorf("parsing \"\": invalid syntax") } var startIndex int var negative bool if s[0] == '-' { negative = true startIndex = 1 } value, err := humanize.ParseBytes(s[startIndex:]) if err != nil { return 0, err } if value > math.MaxInt64 { return 0, fmt.Errorf("too large: %s", s) } if negative { return -int64(value), nil } return int64(value), nil } // BytesValue is a struct that implements flag.Value and pflag.Value // suitable to create command-line parameters that accept sizes // specified using a format recognized by humanize. // The value is written atomically, so that it is safe to use this // struct to make a parameter configurable that is used by an // asynchronous process spawned before command-line argument handling. // This is useful e.g. for the log file settings which are used // by the asynchronous log file GC daemon. type BytesValue struct { val *int64 isSet bool } var _ flag.Value = &BytesValue{} var _ pflag.Value = &BytesValue{} // NewBytesValue creates a new pflag.Value bound to the specified // int64 variable. It also happens to be a flag.Value. func NewBytesValue(val *int64) *BytesValue { return &BytesValue{val: val} } // Set implements the flag.Value and pflag.Value interfaces. func (b *BytesValue) Set(s string) error { v, err := ParseBytes(s) if err != nil { return err } if b.val == nil { b.val = new(int64) } atomic.StoreInt64(b.val, v) b.isSet = true return nil } // Type implements the pflag.Value interface. func (b *BytesValue) Type() string { return "bytes" } // String implements the flag.Value and pflag.Value interfaces. func (b *BytesValue) String() string { // When b.val is nil, the real value of the flag will only be known after a // Resolve() call. We do not want our flag package to report an erroneous // default value for this flag. So the value we return here must cause // defaultIsZeroValue to return true: // https://github.com/spf13/pflag/blob/v1.0.5/flag.go#L724 if b.val == nil { return "<nil>" } // This uses the MiB, GiB, etc suffixes. If we use humanize.Bytes() we get // the MB, GB, etc suffixes, but the conversion is done in multiples of 1000 // vs 1024. return IBytes(atomic.LoadInt64(b.val)) } // IsSet returns true iff Set has successfully been called. func (b *BytesValue) IsSet() bool { return b.isSet } // DataRate formats the passed byte count over duration as "x MiB/s". func DataRate(bytes int64, elapsed time.Duration) string { if bytes == 0 { return "0" } if elapsed == 0 { return "inf" } return fmt.Sprintf("%0.2f MiB/s", (float64(bytes)/elapsed.Seconds())/float64(1<<20)) }
pkg/util/humanizeutil/humanize.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.005245296750217676, 0.0008984720334410667, 0.00016943174705374986, 0.0002357828343519941, 0.0013461954658851027 ]
{ "id": 12, "code_window": [ "\t\t}\n", "\t}\n" ], "labels": [ "add", "keep" ], "after_edit": [ "\t\tr.bufferedMeta = append(r.bufferedMeta, r.inputMetaInfo.MetadataSources.DrainMeta()...)\n" ], "file_path": "pkg/sql/colflow/routers.go", "type": "add", "edit_start_line_idx": 618 }
// Copyright 2020 The Cockroach Authors. // // Licensed as a CockroachDB Enterprise file under the Cockroach Community // License (the "License"); you may not use this file except in compliance with // the License. You may obtain a copy of the License at // // https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt package sqlproxyccl import ( "net" "time" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" ) // IdleDisconnectConnection is a wrapper around net.Conn that disconnects if // connection is idle. The idle time is only counted while the client is // waiting, blocked on Read. type IdleDisconnectConnection struct { net.Conn timeout time.Duration mu struct { syncutil.Mutex lastDeadlineSetAt time.Time } } var errNotSupported = errors.Errorf( "Not supported for IdleDisconnectConnection", ) func (c *IdleDisconnectConnection) updateDeadline() error { now := timeutil.Now() // If it has been more than 1% of the timeout duration - advance the deadline. c.mu.Lock() defer c.mu.Unlock() if now.Sub(c.mu.lastDeadlineSetAt) > c.timeout/100 { c.mu.lastDeadlineSetAt = now if err := c.Conn.SetReadDeadline(now.Add(c.timeout)); err != nil { return err } } return nil } // Read reads data from the connection with timeout. func (c *IdleDisconnectConnection) Read(b []byte) (n int, err error) { if err := c.updateDeadline(); err != nil { return 0, err } return c.Conn.Read(b) } // Write writes data to the connection and sets the read timeout. func (c *IdleDisconnectConnection) Write(b []byte) (n int, err error) { // The Write for the connection is not blocking (or can block only temporary // in case of flow control). For idle connections, the Read will be the call // that will block and stay blocked until the backend doesn't send something. // However, it is theoretically possible, that the traffic is only going in // one direction - from the proxy to the backend, in which case we will call // repeatedly Write but stay blocked on the Read. For that specific case - the // write pushes further out the read deadline so the read doesn't timeout. if err := c.updateDeadline(); err != nil { return 0, err } return c.Conn.Write(b) } // SetDeadline is unsupported as it will interfere with the reads. func (c *IdleDisconnectConnection) SetDeadline(t time.Time) error { return errNotSupported } // SetReadDeadline is unsupported as it will interfere with the reads. func (c *IdleDisconnectConnection) SetReadDeadline(t time.Time) error { return errNotSupported } // SetWriteDeadline is unsupported as it will interfere with the reads. func (c *IdleDisconnectConnection) SetWriteDeadline(t time.Time) error { return errNotSupported } // IdleDisconnectOverlay upgrades the connection to one that closes when // idle for more than timeout duration. Timeout of zero will turn off // the idle disconnect code. func IdleDisconnectOverlay(conn net.Conn, timeout time.Duration) net.Conn { if timeout != 0 { return &IdleDisconnectConnection{Conn: conn, timeout: timeout} } return conn }
pkg/ccl/sqlproxyccl/idle_disconnect_connection.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.8303633332252502, 0.0855107307434082, 0.0001719426509225741, 0.0011287233792245388, 0.24830690026283264 ]
{ "id": 13, "code_window": [ "\t}\n", "\tr.bufferedMeta = append(r.bufferedMeta, r.inputMetaInfo.MetadataSources.DrainMeta()...)\n", "\t// Non-blocking send of metadata so that one of the outputs can return it\n", "\t// in DrainMeta.\n", "\tr.waitForMetadata <- r.bufferedMeta\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/sql/colflow/routers.go", "type": "replace", "edit_start_line_idx": 619 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colflow import ( "context" "sync" "sync/atomic" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colcontainer" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecargs" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexechash" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" "github.com/marusama/semaphore" ) // routerOutput is an interface implemented by router outputs. It exists for // easier test mocking of outputs. type routerOutput interface { execinfra.OpNode // initWithHashRouter passes a reference to the HashRouter that will be // pushing batches to this output. initWithHashRouter(*HashRouter) // addBatch adds the elements specified by the selection vector from batch // to the output. It returns whether or not the output changed its state to // blocked (see implementations). addBatch(context.Context, coldata.Batch) bool // cancel tells the output to stop producing batches. Optionally forwards an // error if not nil. cancel(context.Context, error) // forwardErr forwards an error to the output. The output should call // colexecerror.ExpectedError with this error on the next call to Next. // Calling forwardErr multiple times will result in the most recent error // overwriting the previous error. forwardErr(error) // resetForTests resets the routerOutput for a benchmark or test run. resetForTests(context.Context) } // getDefaultRouterOutputBlockedThreshold returns the number of unread values // buffered by the routerOutputOp after which the output is considered blocked. // It is a function rather than a variable so that in tests we could modify // coldata.BatchSize() (if it were a variable, then its value would be // evaluated before we set the desired batch size). func getDefaultRouterOutputBlockedThreshold() int { return coldata.BatchSize() * 2 } type routerOutputOpState int const ( // routerOutputOpRunning is the state in which routerOutputOp operates // normally. The router output transitions into routerOutputDoneAdding when // a zero-length batch was added or routerOutputOpDraining when it // encounters an error or the drain is requested. routerOutputOpRunning routerOutputOpState = iota // routerOutputDoneAdding is the state in which a zero-length was batch was // added to routerOutputOp and no more batches will be added. The router // output transitions to routerOutputOpDraining when the output is canceled // (either closed or the drain is requested). routerOutputDoneAdding // routerOutputOpDraining is the state in which routerOutputOp always // returns zero-length batches on calls to Next. routerOutputOpDraining ) // drainCoordinator is an interface that the HashRouter implements to coordinate // cancellation of all of its outputs in the case of an error and draining in // the case of graceful termination. // WARNING: No locks should be held when calling these methods, as the // HashRouter might call routerOutput methods (e.g. cancel) that attempt to // reacquire locks. type drainCoordinator interface { // encounteredError should be called when a routerOutput encounters an error. // This terminates execution. No locks should be held when calling this // method, since cancellation could occur. encounteredError(context.Context) // drainMeta should be called exactly once when the routerOutput moves to // draining. drainMeta() []execinfrapb.ProducerMetadata } type routerOutputOp struct { colexecop.InitHelper // input is a reference to our router. input execinfra.OpNode // drainCoordinator is a reference to the HashRouter to be able to notify it // if the output encounters an error or transitions to a draining state. drainCoordinator drainCoordinator types []*types.T // unblockedEventsChan is signaled when a routerOutput changes state from // blocked to unblocked. unblockedEventsChan chan<- struct{} mu struct { syncutil.Mutex state routerOutputOpState // forwardedErr is an error that was forwarded by the HashRouter. If set, // any subsequent calls to Next will return this error. forwardedErr error cond *sync.Cond // data is a SpillingQueue, a circular buffer backed by a disk queue. data *colexecutils.SpillingQueue numUnread int blocked bool } testingKnobs routerOutputOpTestingKnobs } func (o *routerOutputOp) ChildCount(verbose bool) int { return 1 } func (o *routerOutputOp) Child(nth int, verbose bool) execinfra.OpNode { if nth == 0 { return o.input } colexecerror.InternalError(errors.AssertionFailedf("invalid index %d", nth)) // This code is unreachable, but the compiler cannot infer that. return nil } var _ colexecop.Operator = &routerOutputOp{} type routerOutputOpTestingKnobs struct { // blockedThreshold is the number of buffered values above which we consider // a router output to be blocked. It defaults to // defaultRouterOutputBlockedThreshold but can be modified by tests to test // edge cases. blockedThreshold int // addBatchTestInducedErrorCb is called after any function call that could // produce an error if that error is nil. If the callback returns an error, // the router output overwrites the nil error with the returned error. // It is guaranteed that this callback will be called at least once during // normal execution. addBatchTestInducedErrorCb func() error // nextTestInducedErrorCb is called after any function call that could // produce an error if that error is nil. If the callback returns an error, // the router output overwrites the nil error with the returned error. // It is guaranteed that this callback will be called at least once during // normal execution. nextTestInducedErrorCb func() error } // routerOutputOpArgs are the arguments to newRouterOutputOp. All fields apart // from the testing knobs are optional. type routerOutputOpArgs struct { // All fields are required unless marked optional. types []*types.T // unlimitedAllocator should not have a memory limit. Pass in a soft // memoryLimit that will be respected instead. unlimitedAllocator *colmem.Allocator // memoryLimit acts as a soft limit to allow the router output to use disk // when it is exceeded. memoryLimit int64 diskAcc *mon.BoundAccount cfg colcontainer.DiskQueueCfg fdSemaphore semaphore.Semaphore // unblockedEventsChan must be a buffered channel. unblockedEventsChan chan<- struct{} testingKnobs routerOutputOpTestingKnobs } // newRouterOutputOp creates a new router output. func newRouterOutputOp(args routerOutputOpArgs) *routerOutputOp { if args.testingKnobs.blockedThreshold == 0 { args.testingKnobs.blockedThreshold = getDefaultRouterOutputBlockedThreshold() } o := &routerOutputOp{ types: args.types, unblockedEventsChan: args.unblockedEventsChan, testingKnobs: args.testingKnobs, } o.mu.cond = sync.NewCond(&o.mu) o.mu.data = colexecutils.NewSpillingQueue( &colexecutils.NewSpillingQueueArgs{ UnlimitedAllocator: args.unlimitedAllocator, Types: args.types, MemoryLimit: args.memoryLimit, DiskQueueCfg: args.cfg, FDSemaphore: args.fdSemaphore, DiskAcc: args.diskAcc, }, ) return o } func (o *routerOutputOp) Init(ctx context.Context) { o.InitHelper.Init(ctx) } // nextErrorLocked is a helper method that handles an error encountered in Next. func (o *routerOutputOp) nextErrorLocked(err error) { o.mu.state = routerOutputOpDraining o.maybeUnblockLocked() // Unlock the mutex, since the HashRouter will cancel all outputs. o.mu.Unlock() o.drainCoordinator.encounteredError(o.Ctx) o.mu.Lock() colexecerror.InternalError(err) } // Next returns the next coldata.Batch from the routerOutputOp. Note that Next // is designed for only one concurrent caller and will block until data is // ready. func (o *routerOutputOp) Next() coldata.Batch { o.mu.Lock() defer o.mu.Unlock() for o.mu.forwardedErr == nil && o.mu.state == routerOutputOpRunning && o.mu.data.Empty() { // Wait until there is data to read or the output is canceled. o.mu.cond.Wait() } if o.mu.forwardedErr != nil { colexecerror.ExpectedError(o.mu.forwardedErr) } if o.mu.state == routerOutputOpDraining { return coldata.ZeroBatch } b, err := o.mu.data.Dequeue(o.Ctx) if err == nil && o.testingKnobs.nextTestInducedErrorCb != nil { err = o.testingKnobs.nextTestInducedErrorCb() } if err != nil { o.nextErrorLocked(err) } o.mu.numUnread -= b.Length() if o.mu.numUnread <= o.testingKnobs.blockedThreshold { o.maybeUnblockLocked() } if b.Length() == 0 { if o.testingKnobs.nextTestInducedErrorCb != nil { if err := o.testingKnobs.nextTestInducedErrorCb(); err != nil { o.nextErrorLocked(err) } } // This is the last batch. closeLocked will set done to protect against // further calls to Next since this is allowed by the interface as well as // cleaning up and releasing possible disk infrastructure. o.closeLocked(o.Ctx) } return b } func (o *routerOutputOp) DrainMeta() []execinfrapb.ProducerMetadata { o.mu.Lock() o.mu.state = routerOutputOpDraining o.maybeUnblockLocked() o.mu.Unlock() return o.drainCoordinator.drainMeta() } func (o *routerOutputOp) initWithHashRouter(r *HashRouter) { o.input = r o.drainCoordinator = r } func (o *routerOutputOp) closeLocked(ctx context.Context) { o.mu.state = routerOutputOpDraining if err := o.mu.data.Close(ctx); err != nil { // This log message is Info instead of Warning because the flow will also // attempt to clean up the parent directory, so this failure might not have // any effect. log.Infof(ctx, "error closing vectorized hash router output, files may be left over: %s", err) } } // cancel wakes up a reader in Next if there is one and results in the output // returning zero length batches for every Next call after cancel. Note that // all accumulated data that hasn't been read will not be returned. func (o *routerOutputOp) cancel(ctx context.Context, err error) { o.mu.Lock() defer o.mu.Unlock() o.closeLocked(ctx) o.forwardErrLocked(err) // Some goroutine might be waiting on the condition variable, so wake it up. // Note that read goroutines check o.mu.done, so won't wait on the condition // variable after we unlock the mutex. o.mu.cond.Signal() } func (o *routerOutputOp) forwardErrLocked(err error) { if err != nil { o.mu.forwardedErr = err } } func (o *routerOutputOp) forwardErr(err error) { o.mu.Lock() defer o.mu.Unlock() o.forwardErrLocked(err) o.mu.cond.Signal() } // addBatch copies the batch (according to its selection vector) into an // internal buffer. Zero-length batch should be passed-in to indicate that no // more batches will be added. // TODO(asubiotto): We should explore pipelining addBatch if disk-spilling // performance becomes a concern. The main router goroutine will be writing to // disk as the code is written, meaning that we impact the performance of // writing rows to a fast output if we have to write to disk for a single // slow output. func (o *routerOutputOp) addBatch(ctx context.Context, batch coldata.Batch) bool { o.mu.Lock() defer o.mu.Unlock() switch o.mu.state { case routerOutputDoneAdding: colexecerror.InternalError(errors.AssertionFailedf("a batch was added to routerOutput in DoneAdding state")) case routerOutputOpDraining: // This output is draining, discard any data. return false } o.mu.numUnread += batch.Length() o.mu.data.Enqueue(ctx, batch) if o.testingKnobs.addBatchTestInducedErrorCb != nil { if err := o.testingKnobs.addBatchTestInducedErrorCb(); err != nil { colexecerror.InternalError(err) } } if batch.Length() == 0 { o.mu.state = routerOutputDoneAdding o.mu.cond.Signal() return false } stateChanged := false if o.mu.numUnread > o.testingKnobs.blockedThreshold && !o.mu.blocked { // The output is now blocked. o.mu.blocked = true stateChanged = true } o.mu.cond.Signal() return stateChanged } // maybeUnblockLocked unblocks the router output if it is in a blocked state. If the // output was previously in a blocked state, an event will be sent on // routerOutputOp.unblockedEventsChan. func (o *routerOutputOp) maybeUnblockLocked() { if o.mu.blocked { o.mu.blocked = false o.unblockedEventsChan <- struct{}{} } } // resetForTests resets the routerOutputOp for a test or benchmark run. func (o *routerOutputOp) resetForTests(ctx context.Context) { o.mu.Lock() defer o.mu.Unlock() o.mu.state = routerOutputOpRunning o.mu.forwardedErr = nil o.mu.data.Reset(ctx) o.mu.numUnread = 0 o.mu.blocked = false } // hashRouterDrainState is a state that specifically describes the hashRouter's // state in the draining process. This differs from its "general" state. For // example, a hash router can have drained and exited the Run method but still // be in hashRouterDrainStateRunning until somebody calls drainMeta. type hashRouterDrainState int const ( // hashRouterDrainStateRunning is the state that a hashRouter is in when // running normally (i.e. pulling and pushing batches). hashRouterDrainStateRunning = iota // hashRouterDrainStateRequested is the state that a hashRouter is in when // either all outputs have called drainMeta or an error was encountered by one // of the outputs. hashRouterDrainStateRequested // hashRouterDrainStateCompleted is the state that a hashRouter is in when // draining has completed. hashRouterDrainStateCompleted ) // HashRouter hashes values according to provided hash columns and computes a // destination for each row. These destinations are exposed as Operators // returned by the constructor. type HashRouter struct { colexecop.OneInputNode // inputMetaInfo contains all of the meta components that the hash router // is responsible for. Root field is exactly the same as OneInputNode.Input. inputMetaInfo colexecargs.OpWithMetaInfo // hashCols is a slice of indices of the columns used for hashing. hashCols []uint32 // One output for each stream. outputs []routerOutput // unblockedEventsChan is a channel shared between the HashRouter and its // outputs. outputs send events on this channel when they are unblocked by a // read. unblockedEventsChan <-chan struct{} numBlockedOutputs int bufferedMeta []execinfrapb.ProducerMetadata // atomics is shared state between the Run goroutine and any routerOutput // goroutines that call drainMeta. atomics struct { // drainState is the state the hashRouter is in. The Run goroutine should // only ever read these states, never set them. drainState int32 numDrainedOutputs int32 } // waitForMetadata is a channel that the last output to drain will read from // to pass on any metadata buffered through the Run goroutine. waitForMetadata chan []execinfrapb.ProducerMetadata // tupleDistributor is used to decide to which output a particular tuple // should be routed. tupleDistributor *colexechash.TupleHashDistributor } // NewHashRouter creates a new hash router that consumes coldata.Batches from // input and hashes each row according to hashCols to one of the outputs // returned as Operators. // The number of allocators provided will determine the number of outputs // returned. Note that each allocator must be unlimited, memory will be limited // by comparing memory use in the allocator with the memoryLimit argument. Each // Operator must have an independent allocator (this means that each allocator // should be linked to an independent mem account) as Operator.Next will usually // be called concurrently between different outputs. Similarly, each output // needs to have a separate disk account. func NewHashRouter( unlimitedAllocators []*colmem.Allocator, input colexecargs.OpWithMetaInfo, types []*types.T, hashCols []uint32, memoryLimit int64, diskQueueCfg colcontainer.DiskQueueCfg, fdSemaphore semaphore.Semaphore, diskAccounts []*mon.BoundAccount, ) (*HashRouter, []colexecop.DrainableOperator) { if diskQueueCfg.CacheMode != colcontainer.DiskQueueCacheModeDefault { colexecerror.InternalError(errors.Errorf("hash router instantiated with incompatible disk queue cache mode: %d", diskQueueCfg.CacheMode)) } outputs := make([]routerOutput, len(unlimitedAllocators)) outputsAsOps := make([]colexecop.DrainableOperator, len(unlimitedAllocators)) // unblockEventsChan is buffered to 2*numOutputs as we don't want the outputs // writing to it to block. // Unblock events only happen after a corresponding block event. Since these // are state changes and are done under lock (including the output sending // on the channel, which is why we want the channel to be buffered in the // first place), every time the HashRouter blocks an output, it *must* read // all unblock events preceding it since these *must* be on the channel. unblockEventsChan := make(chan struct{}, 2*len(unlimitedAllocators)) memoryLimitPerOutput := memoryLimit / int64(len(unlimitedAllocators)) for i := range unlimitedAllocators { op := newRouterOutputOp( routerOutputOpArgs{ types: types, unlimitedAllocator: unlimitedAllocators[i], memoryLimit: memoryLimitPerOutput, diskAcc: diskAccounts[i], cfg: diskQueueCfg, fdSemaphore: fdSemaphore, unblockedEventsChan: unblockEventsChan, }, ) outputs[i] = op outputsAsOps[i] = op } return newHashRouterWithOutputs(input, hashCols, unblockEventsChan, outputs), outputsAsOps } func newHashRouterWithOutputs( input colexecargs.OpWithMetaInfo, hashCols []uint32, unblockEventsChan <-chan struct{}, outputs []routerOutput, ) *HashRouter { r := &HashRouter{ OneInputNode: colexecop.NewOneInputNode(input.Root), inputMetaInfo: input, hashCols: hashCols, outputs: outputs, unblockedEventsChan: unblockEventsChan, // waitForMetadata is a buffered channel to avoid blocking if nobody will // read the metadata. waitForMetadata: make(chan []execinfrapb.ProducerMetadata, 1), tupleDistributor: colexechash.NewTupleHashDistributor(colexechash.DefaultInitHashValue, len(outputs)), } for i := range outputs { outputs[i].initWithHashRouter(r) } return r } // cancelOutputs cancels all outputs and forwards the given error to all of // them if non-nil. The only case where the error is not forwarded is if no // output could be canceled due to an error. In this case each output will // forward the error returned during cancellation. func (r *HashRouter) cancelOutputs(ctx context.Context, errToForward error) { for _, o := range r.outputs { if err := colexecerror.CatchVectorizedRuntimeError(func() { o.cancel(ctx, errToForward) }); err != nil { // If there was an error canceling this output, this error can be // forwarded to whoever is calling Next. o.forwardErr(err) } } } func (r *HashRouter) setDrainState(drainState hashRouterDrainState) { atomic.StoreInt32(&r.atomics.drainState, int32(drainState)) } func (r *HashRouter) getDrainState() hashRouterDrainState { return hashRouterDrainState(atomic.LoadInt32(&r.atomics.drainState)) } // Run runs the HashRouter. Batches are read from the input and pushed to an // output calculated by hashing columns. Cancel the given context to terminate // early. func (r *HashRouter) Run(ctx context.Context) { var span *tracing.Span ctx, span = execinfra.ProcessorSpan(ctx, "hash router") if span != nil { defer span.Finish() } // Since HashRouter runs in a separate goroutine, we want to be safe and // make sure that we catch errors in all code paths, so we wrap the whole // method with a catcher. Note that we also have "internal" catchers as // well for more fine-grained control of error propagation. if err := colexecerror.CatchVectorizedRuntimeError(func() { r.Input.Init(ctx) var done bool processNextBatch := func() { done = r.processNextBatch(ctx) } for { if r.getDrainState() != hashRouterDrainStateRunning { break } // Check for cancellation. select { case <-ctx.Done(): r.cancelOutputs(ctx, ctx.Err()) return default: } // Read all the routerOutput state changes that have happened since the // last iteration. for moreToRead := true; moreToRead; { select { case <-r.unblockedEventsChan: r.numBlockedOutputs-- default: // No more routerOutput state changes to read without blocking. moreToRead = false } } if r.numBlockedOutputs == len(r.outputs) { // All outputs are blocked, wait until at least one output is unblocked. select { case <-r.unblockedEventsChan: r.numBlockedOutputs-- case <-ctx.Done(): r.cancelOutputs(ctx, ctx.Err()) return } } if err := colexecerror.CatchVectorizedRuntimeError(processNextBatch); err != nil { r.cancelOutputs(ctx, err) return } if done { // The input was done and we have notified the routerOutputs that there // is no more data. return } } }); err != nil { r.cancelOutputs(ctx, err) } if span != nil { for _, s := range r.inputMetaInfo.StatsCollectors { span.RecordStructured(s.GetStats()) } if meta := execinfra.GetTraceDataAsMetadata(span); meta != nil { r.bufferedMeta = append(r.bufferedMeta, *meta) } } r.bufferedMeta = append(r.bufferedMeta, r.inputMetaInfo.MetadataSources.DrainMeta()...) // Non-blocking send of metadata so that one of the outputs can return it // in DrainMeta. r.waitForMetadata <- r.bufferedMeta close(r.waitForMetadata) r.inputMetaInfo.ToClose.CloseAndLogOnErr(ctx, "hash router") } // processNextBatch reads the next batch from its input, hashes it and adds // each column to its corresponding output, returning whether the input is // done. func (r *HashRouter) processNextBatch(ctx context.Context) bool { b := r.Input.Next() n := b.Length() if n == 0 { // Done. Push an empty batch to outputs to tell them the data is done as // well. for _, o := range r.outputs { o.addBatch(ctx, b) } return true } // It is ok that we call Init() on every batch since all calls except for // the first one are noops. r.tupleDistributor.Init(ctx) selections := r.tupleDistributor.Distribute(b, r.hashCols) for i, o := range r.outputs { if len(selections[i]) > 0 { b.SetSelection(true) copy(b.Selection(), selections[i]) b.SetLength(len(selections[i])) if o.addBatch(ctx, b) { // This batch blocked the output. r.numBlockedOutputs++ } } } return false } // resetForTests resets the HashRouter for a test or benchmark run. func (r *HashRouter) resetForTests(ctx context.Context) { if i, ok := r.Input.(colexecop.Resetter); ok { i.Reset(ctx) } r.setDrainState(hashRouterDrainStateRunning) r.waitForMetadata = make(chan []execinfrapb.ProducerMetadata, 1) r.atomics.numDrainedOutputs = 0 r.bufferedMeta = nil r.numBlockedOutputs = 0 for moreToRead := true; moreToRead; { select { case <-r.unblockedEventsChan: default: moreToRead = false } } for _, o := range r.outputs { o.resetForTests(ctx) } } func (r *HashRouter) encounteredError(ctx context.Context) { // Once one output returns an error the hash router needs to stop running // and drain its input. r.setDrainState(hashRouterDrainStateRequested) // cancel all outputs. The Run goroutine will eventually realize that the // HashRouter is done and exit without draining. r.cancelOutputs(ctx, nil /* errToForward */) } func (r *HashRouter) drainMeta() []execinfrapb.ProducerMetadata { if int(atomic.AddInt32(&r.atomics.numDrainedOutputs, 1)) != len(r.outputs) { return nil } // All outputs have been drained, return any buffered metadata to the last // output to call drainMeta. r.setDrainState(hashRouterDrainStateRequested) meta := <-r.waitForMetadata r.setDrainState(hashRouterDrainStateCompleted) return meta }
pkg/sql/colflow/routers.go
1
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.5445613265037537, 0.012483878992497921, 0.0001654268999118358, 0.0005327535909600556, 0.06699541211128235 ]
{ "id": 13, "code_window": [ "\t}\n", "\tr.bufferedMeta = append(r.bufferedMeta, r.inputMetaInfo.MetadataSources.DrainMeta()...)\n", "\t// Non-blocking send of metadata so that one of the outputs can return it\n", "\t// in DrainMeta.\n", "\tr.waitForMetadata <- r.bufferedMeta\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/sql/colflow/routers.go", "type": "replace", "edit_start_line_idx": 619 }
-- test that \echo spills into the second level \set echo \i testdata/i_twolevels2.sql -- at this point, the second level has disabled echo. -- verify this. SELECT 456;
pkg/cli/testdata/i_twolevels1.sql
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.00017542664136271924, 0.00017542664136271924, 0.00017542664136271924, 0.00017542664136271924, 0 ]
{ "id": 13, "code_window": [ "\t}\n", "\tr.bufferedMeta = append(r.bufferedMeta, r.inputMetaInfo.MetadataSources.DrainMeta()...)\n", "\t// Non-blocking send of metadata so that one of the outputs can return it\n", "\t// in DrainMeta.\n", "\tr.waitForMetadata <- r.bufferedMeta\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/sql/colflow/routers.go", "type": "replace", "edit_start_line_idx": 619 }
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colexec import ( "context" "fmt" "testing" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/colcontainer" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecargs" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexectestutils" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/testutils/colcontainerutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/randutil" "github.com/marusama/semaphore" "github.com/stretchr/testify/require" ) func TestExternalHashJoiner(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() st := cluster.MakeTestingClusterSettings() evalCtx := tree.MakeTestingEvalContext(st) defer evalCtx.Stop(ctx) flowCtx := &execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{ Settings: st, }, DiskMonitor: testDiskMonitor, } queueCfg, cleanup := colcontainerutils.NewTestingDiskQueueCfg(t, true /* inMem */) defer cleanup() var ( accounts []*mon.BoundAccount monitors []*mon.BytesMonitor ) rng, _ := randutil.NewPseudoRand() numForcedRepartitions := rng.Intn(5) // Test the case in which the default memory is used as well as the case in // which the joiner spills to disk. for _, spillForced := range []bool{false, true} { flowCtx.Cfg.TestingKnobs.ForceDiskSpill = spillForced for _, tcs := range [][]*joinTestCase{getHJTestCases(), getMJTestCases()} { for _, tc := range tcs { delegateFDAcquisitions := rng.Float64() < 0.5 log.Infof(ctx, "spillForced=%t/numRepartitions=%d/%s/delegateFDAcquisitions=%t", spillForced, numForcedRepartitions, tc.description, delegateFDAcquisitions) var semsToCheck []semaphore.Semaphore oldSkipAllNullsInjection := tc.skipAllNullsInjection if !tc.onExpr.Empty() { // When we have ON expression, there might be other operators (like // selections) on top of the external hash joiner in // diskSpiller.diskBackedOp chain. This will not allow for Close() // call to propagate to the external hash joiner, so we will skip // allNullsInjection test for now. tc.skipAllNullsInjection = true } runHashJoinTestCase(t, tc, func(sources []colexecop.Operator) (colexecop.Operator, error) { sem := colexecop.NewTestingSemaphore(externalHJMinPartitions) semsToCheck = append(semsToCheck, sem) spec := createSpecForHashJoiner(tc) // TODO(asubiotto): Pass in the testing.T of the caller to this // function and do substring matching on the test name to // conditionally explicitly call Close() on the hash joiner // (through result.ToClose) in cases where it is known the sorter // will not be drained. hjOp, newAccounts, newMonitors, closers, err := createDiskBackedHashJoiner( ctx, flowCtx, spec, sources, func() {}, queueCfg, numForcedRepartitions, delegateFDAcquisitions, sem, ) // Expect three closers. These are the external hash joiner, and // one external sorter for each input. // TODO(asubiotto): Explicitly Close when testing.T is passed into // this constructor and we do a substring match. require.Equal(t, 3, len(closers)) accounts = append(accounts, newAccounts...) monitors = append(monitors, newMonitors...) return hjOp, err }) for i, sem := range semsToCheck { require.Equal(t, 0, sem.GetCount(), "sem still reports open FDs at index %d", i) } tc.skipAllNullsInjection = oldSkipAllNullsInjection } } } for _, acc := range accounts { acc.Close(ctx) } for _, mon := range monitors { mon.Stop(ctx) } } // TestExternalHashJoinerFallbackToSortMergeJoin tests that the external hash // joiner falls back to using sort + merge join when repartitioning doesn't // decrease the size of the partition. We instantiate two sources that contain // the same tuple many times. func TestExternalHashJoinerFallbackToSortMergeJoin(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() st := cluster.MakeTestingClusterSettings() evalCtx := tree.MakeTestingEvalContext(st) defer evalCtx.Stop(ctx) flowCtx := &execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{ Settings: st, TestingKnobs: execinfra.TestingKnobs{ ForceDiskSpill: true, }, }, DiskMonitor: testDiskMonitor, } sourceTypes := []*types.T{types.Int} batch := testAllocator.NewMemBatchWithMaxCapacity(sourceTypes) // We don't need to set the data since zero values in the columns work. batch.SetLength(coldata.BatchSize()) nBatches := 2 leftSource := colexectestutils.NewFiniteBatchSource(testAllocator, batch, sourceTypes, nBatches) rightSource := colexectestutils.NewFiniteBatchSource(testAllocator, batch, sourceTypes, nBatches) tc := &joinTestCase{ joinType: descpb.InnerJoin, leftTypes: sourceTypes, leftOutCols: []uint32{0}, leftEqCols: []uint32{0}, rightTypes: sourceTypes, rightOutCols: []uint32{0}, rightEqCols: []uint32{0}, } tc.init() spec := createSpecForHashJoiner(tc) var spilled bool queueCfg, cleanup := colcontainerutils.NewTestingDiskQueueCfg(t, true /* inMem */) defer cleanup() sem := colexecop.NewTestingSemaphore(externalHJMinPartitions) // Ignore closers since the sorter should close itself when it is drained of // all tuples. We assert this by checking that the semaphore reports a count // of 0. hj, accounts, monitors, _, err := createDiskBackedHashJoiner( ctx, flowCtx, spec, []colexecop.Operator{leftSource, rightSource}, func() { spilled = true }, queueCfg, // Force a repartition so that the recursive repartitioning always // occurs. 1, /* numForcedRepartitions */ true /* delegateFDAcquisitions */, sem, ) defer func() { for _, acc := range accounts { acc.Close(ctx) } for _, mon := range monitors { mon.Stop(ctx) } }() require.NoError(t, err) hj.Init(ctx) // We have a full cross-product, so we should get the number of tuples // squared in the output. expectedTuplesCount := nBatches * nBatches * coldata.BatchSize() * coldata.BatchSize() actualTuplesCount := 0 for b := hj.Next(); b.Length() > 0; b = hj.Next() { actualTuplesCount += b.Length() } require.True(t, spilled) require.Equal(t, expectedTuplesCount, actualTuplesCount) require.Equal(t, 0, sem.GetCount()) } // newIntColumns returns nCols columns of types.Int with increasing values // starting at 0. func newIntColumns(nCols int, length int) []coldata.Vec { cols := make([]coldata.Vec, nCols) for colIdx := 0; colIdx < nCols; colIdx++ { cols[colIdx] = testAllocator.NewMemColumn(types.Int, length) col := cols[colIdx].Int64() for i := 0; i < length; i++ { col[i] = int64(i) } } return cols } func BenchmarkExternalHashJoiner(b *testing.B) { defer log.Scope(b).Close(b) ctx := context.Background() st := cluster.MakeTestingClusterSettings() evalCtx := tree.MakeTestingEvalContext(st) defer evalCtx.Stop(ctx) flowCtx := &execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{ Settings: st, }, DiskMonitor: testDiskMonitor, } nCols := 4 sourceTypes := make([]*types.T, nCols) for colIdx := 0; colIdx < nCols; colIdx++ { sourceTypes[colIdx] = types.Int } var ( memAccounts []*mon.BoundAccount memMonitors []*mon.BytesMonitor ) queueCfg, cleanup := colcontainerutils.NewTestingDiskQueueCfg(b, false /* inMem */) defer cleanup() for _, spillForced := range []bool{false, true} { flowCtx.Cfg.TestingKnobs.ForceDiskSpill = spillForced for _, nRows := range []int{1, 1 << 4, 1 << 8, 1 << 12, 1 << 16, 1 << 20} { if spillForced && nRows < coldata.BatchSize() { // Forcing spilling to disk on very small input size doesn't // provide a meaningful signal, so we skip such config. continue } cols := newIntColumns(nCols, nRows) for _, fullOuter := range []bool{false, true} { joinType := descpb.InnerJoin if fullOuter { joinType = descpb.FullOuterJoin } tc := &joinTestCase{ joinType: joinType, leftTypes: sourceTypes, leftOutCols: []uint32{0, 1}, leftEqCols: []uint32{0, 2}, rightTypes: sourceTypes, rightOutCols: []uint32{2, 3}, rightEqCols: []uint32{0, 1}, } tc.init() spec := createSpecForHashJoiner(tc) b.Run(fmt.Sprintf("spillForced=%t/rows=%d/fullOuter=%t", spillForced, nRows, fullOuter), func(b *testing.B) { b.SetBytes(int64(8 * nRows * nCols * 2)) b.ResetTimer() for i := 0; i < b.N; i++ { leftSource := colexectestutils.NewChunkingBatchSource(testAllocator, sourceTypes, cols, nRows) rightSource := colexectestutils.NewChunkingBatchSource(testAllocator, sourceTypes, cols, nRows) hj, accounts, monitors, _, err := createDiskBackedHashJoiner( ctx, flowCtx, spec, []colexecop.Operator{leftSource, rightSource}, func() {}, queueCfg, 0 /* numForcedRepartitions */, false, /* delegateFDAcquisitions */ colexecop.NewTestingSemaphore(VecMaxOpenFDsLimit), ) memAccounts = append(memAccounts, accounts...) memMonitors = append(memMonitors, monitors...) require.NoError(b, err) hj.Init(ctx) for b := hj.Next(); b.Length() > 0; b = hj.Next() { } } }) } } } for _, memAccount := range memAccounts { memAccount.Close(ctx) } for _, memMonitor := range memMonitors { memMonitor.Stop(ctx) } } // createDiskBackedHashJoiner is a helper function that instantiates a // disk-backed hash join operator. The desired memory limit must have been // already set on flowCtx. It returns an operator and an error as well as // memory monitors and memory accounts that will need to be closed once the // caller is done with the operator. func createDiskBackedHashJoiner( ctx context.Context, flowCtx *execinfra.FlowCtx, spec *execinfrapb.ProcessorSpec, sources []colexecop.Operator, spillingCallbackFn func(), diskQueueCfg colcontainer.DiskQueueCfg, numForcedRepartitions int, delegateFDAcquisitions bool, testingSemaphore semaphore.Semaphore, ) (colexecop.Operator, []*mon.BoundAccount, []*mon.BytesMonitor, []colexecop.Closer, error) { args := &colexecargs.NewColOperatorArgs{ Spec: spec, Inputs: colexectestutils.MakeInputs(sources), StreamingMemAccount: testMemAcc, DiskQueueCfg: diskQueueCfg, FDSemaphore: testingSemaphore, } // We will not use streaming memory account for the external hash join so // that the in-memory hash join operator could hit the memory limit set on // flowCtx. args.TestingKnobs.SpillingCallbackFn = spillingCallbackFn args.TestingKnobs.NumForcedRepartitions = numForcedRepartitions args.TestingKnobs.DelegateFDAcquisitions = delegateFDAcquisitions result, err := colexecargs.TestNewColOperator(ctx, flowCtx, args) return result.Root, result.OpAccounts, result.OpMonitors, result.ToClose, err }
pkg/sql/colexec/external_hash_joiner_test.go
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.00020269607193768024, 0.0001712034863885492, 0.00016423290071543306, 0.00017054656927939504, 0.000006623031367780641 ]
{ "id": 13, "code_window": [ "\t}\n", "\tr.bufferedMeta = append(r.bufferedMeta, r.inputMetaInfo.MetadataSources.DrainMeta()...)\n", "\t// Non-blocking send of metadata so that one of the outputs can return it\n", "\t// in DrainMeta.\n", "\tr.waitForMetadata <- r.bufferedMeta\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/sql/colflow/routers.go", "type": "replace", "edit_start_line_idx": 619 }
-----BEGIN CERTIFICATE----- MIIDAzCCAeugAwIBAgIQWF0W4TRm+U0vQaWsGR8crDANBgkqhkiG9w0BAQsFADAr MRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTAeFw0y MDA3MTUxMTQ2MjdaFw0yNTA3MjAxMTQ2MjdaMCcxEjAQBgNVBAoTCUNvY2tyb2Fj aDERMA8GA1UEAxMIdGVzdHVzZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK AoIBAQDTOfgMdnPjk6+3XPj4nZIdUkKBQVurkzs48f+2bU6OL6+Ix88NSP7gDdw8 P07Oz7czfavMoCYDFKDJNh/j/zuameVk9hJ6B6NEMtjXt2+6IPfn7EwZyTK0zCoX LeuU/BmT+7DxXxUduQ51uq0WLxSDZXrK4+E9nvs8onrHxK1mPc37dzgW3A7qjUOD E3+4VFwt6yV1dJFEdCyZvoYrIZimSrX8UHzzy1vO+m2evCIE+w0e0CN7rcV0/dJV Xq/wXmD6OHXR3a/o0S44+qWBSmxDLJyAYnPTIigVLQHdb8UltMFcvXKI8bhH7PEH r7zK0Osa4BTLciGuEowS68TW/xO5AgMBAAGjJzAlMA4GA1UdDwEB/wQEAwIFoDAT BgNVHSUEDDAKBggrBgEFBQcDAjANBgkqhkiG9w0BAQsFAAOCAQEAMVShvPzxuTnd tPekm3cQ0uNHLb6dUBwC4Ts0VDW60vcIcrefWiPsV9XJCg5m4L1AHUr3DUwaEHR7 h9wpbKMQaWTyEH4k62nL9vs0UgvfXroZ2Z1cin7+HdoJlt3RKRBfvgw/KY5765oW 5/IQuz0ZTe0d+Hov2WLzEPWXPXwzPPym+WSTdxbUxzcYSjCHKbBNHoCGCvirKipd pnfmrwGFgE/xtL3v2Ek+kIiZWfhoZGbsIFEg/Dz+/xeJckgqYyDGmWZDXLPmicM5 7eUCGs/Qu6lyr1qzduPbU2oiTlHwtCt2DSKdxpmoe3A95rSffv4N7Iqf8iUIEfHx zZM6df5qmw== -----END CERTIFICATE-----
pkg/security/securitytest/test_certs/client.testuser.crt
0
https://github.com/cockroachdb/cockroach/commit/dd48ea52e6fbd304f48e4995c448364bc5b71321
[ 0.005111141130328178, 0.0026419044006615877, 0.00017266784561797976, 0.0026419044006615877, 0.0024692367296665907 ]
{ "id": 0, "code_window": [ "\treturn receipts, err\n", "}\n", "\n", "// Process block will attempt to process the given block's transactions and applies them\n", "// on top of the block's parent state (given it exists) and will return wether it was\n", "// successful or not.\n", "func (sm *BlockProcessor) Process(block *types.Block) (logs state.Logs, err error) {\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "func (sm *BlockProcessor) RetryProcess(block *types.Block) (logs state.Logs, err error) {\n", "\t// Processing a blocks may never happen simultaneously\n", "\tsm.mutex.Lock()\n", "\tdefer sm.mutex.Unlock()\n", "\n", "\theader := block.Header()\n", "\tif !sm.bc.HasBlock(header.ParentHash) {\n", "\t\treturn nil, ParentError(header.ParentHash)\n", "\t}\n", "\tparent := sm.bc.GetBlock(header.ParentHash)\n", "\n", "\treturn sm.processWithParent(block, parent)\n", "}\n", "\n" ], "file_path": "core/block_processor.go", "type": "add", "edit_start_line_idx": 151 }
package core import ( "fmt" "math/big" "os" "path" "runtime" "strconv" "testing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/rlp" ) func init() { runtime.GOMAXPROCS(runtime.NumCPU()) } // Test fork of length N starting from block i func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big.Int)) { // switch databases to process the new chain db, err := ethdb.NewMemDatabase() if err != nil { t.Fatal("Failed to create db:", err) } // copy old chain up to i into new db with deterministic canonical bman2, err := newCanonical(i, db) if err != nil { t.Fatal("could not make new canonical in testFork", err) } // asert the bmans have the same block at i bi1 := bman.bc.GetBlockByNumber(uint64(i)).Hash() bi2 := bman2.bc.GetBlockByNumber(uint64(i)).Hash() if bi1 != bi2 { t.Fatal("chains do not have the same hash at height", i) } bman2.bc.SetProcessor(bman2) // extend the fork parent := bman2.bc.CurrentBlock() chainB := makeChain(bman2, parent, N, db, ForkSeed) err = bman2.bc.InsertChain(chainB) if err != nil { t.Fatal("Insert chain error for fork:", err) } tdpre := bman.bc.Td() // Test the fork's blocks on the original chain td, err := testChain(chainB, bman) if err != nil { t.Fatal("expected chainB not to give errors:", err) } // Compare difficulties f(tdpre, td) } func printChain(bc *ChainManager) { for i := bc.CurrentBlock().Number().Uint64(); i > 0; i-- { b := bc.GetBlockByNumber(uint64(i)) fmt.Printf("\t%x\n", b.Hash()) } } // process blocks against a chain func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) { td := new(big.Int) for _, block := range chainB { td2, _, err := bman.bc.processor.Process(block) if err != nil { if IsKnownBlockErr(err) { continue } return nil, err } block.Td = td2 td = td2 bman.bc.mu.Lock() { bman.bc.write(block) } bman.bc.mu.Unlock() } return td, nil } func loadChain(fn string, t *testing.T) (types.Blocks, error) { fh, err := os.OpenFile(path.Join(os.Getenv("GOPATH"), "src", "github.com", "ethereum", "go-ethereum", "_data", fn), os.O_RDONLY, os.ModePerm) if err != nil { return nil, err } defer fh.Close() var chain types.Blocks if err := rlp.Decode(fh, &chain); err != nil { return nil, err } return chain, nil } func insertChain(done chan bool, chainMan *ChainManager, chain types.Blocks, t *testing.T) { err := chainMan.InsertChain(chain) if err != nil { fmt.Println(err) t.FailNow() } done <- true } func TestExtendCanonical(t *testing.T) { CanonicalLength := 5 db, err := ethdb.NewMemDatabase() if err != nil { t.Fatal("Failed to create db:", err) } // make first chain starting from genesis bman, err := newCanonical(CanonicalLength, db) if err != nil { t.Fatal("Could not make new canonical chain:", err) } f := func(td1, td2 *big.Int) { if td2.Cmp(td1) <= 0 { t.Error("expected chainB to have higher difficulty. Got", td2, "expected more than", td1) } } // Start fork from current height (CanonicalLength) testFork(t, bman, CanonicalLength, 1, f) testFork(t, bman, CanonicalLength, 2, f) testFork(t, bman, CanonicalLength, 5, f) testFork(t, bman, CanonicalLength, 10, f) } func TestShorterFork(t *testing.T) { db, err := ethdb.NewMemDatabase() if err != nil { t.Fatal("Failed to create db:", err) } // make first chain starting from genesis bman, err := newCanonical(10, db) if err != nil { t.Fatal("Could not make new canonical chain:", err) } f := func(td1, td2 *big.Int) { if td2.Cmp(td1) >= 0 { t.Error("expected chainB to have lower difficulty. Got", td2, "expected less than", td1) } } // Sum of numbers must be less than 10 // for this to be a shorter fork testFork(t, bman, 0, 3, f) testFork(t, bman, 0, 7, f) testFork(t, bman, 1, 1, f) testFork(t, bman, 1, 7, f) testFork(t, bman, 5, 3, f) testFork(t, bman, 5, 4, f) } func TestLongerFork(t *testing.T) { db, err := ethdb.NewMemDatabase() if err != nil { t.Fatal("Failed to create db:", err) } // make first chain starting from genesis bman, err := newCanonical(10, db) if err != nil { t.Fatal("Could not make new canonical chain:", err) } f := func(td1, td2 *big.Int) { if td2.Cmp(td1) <= 0 { t.Error("expected chainB to have higher difficulty. Got", td2, "expected more than", td1) } } // Sum of numbers must be greater than 10 // for this to be a longer fork testFork(t, bman, 0, 11, f) testFork(t, bman, 0, 15, f) testFork(t, bman, 1, 10, f) testFork(t, bman, 1, 12, f) testFork(t, bman, 5, 6, f) testFork(t, bman, 5, 8, f) } func TestEqualFork(t *testing.T) { db, err := ethdb.NewMemDatabase() if err != nil { t.Fatal("Failed to create db:", err) } bman, err := newCanonical(10, db) if err != nil { t.Fatal("Could not make new canonical chain:", err) } f := func(td1, td2 *big.Int) { if td2.Cmp(td1) != 0 { t.Error("expected chainB to have equal difficulty. Got", td2, "expected ", td1) } } // Sum of numbers must be equal to 10 // for this to be an equal fork testFork(t, bman, 0, 10, f) testFork(t, bman, 1, 9, f) testFork(t, bman, 2, 8, f) testFork(t, bman, 5, 5, f) testFork(t, bman, 6, 4, f) testFork(t, bman, 9, 1, f) } func TestBrokenChain(t *testing.T) { db, err := ethdb.NewMemDatabase() if err != nil { t.Fatal("Failed to create db:", err) } bman, err := newCanonical(10, db) if err != nil { t.Fatal("Could not make new canonical chain:", err) } db2, err := ethdb.NewMemDatabase() if err != nil { t.Fatal("Failed to create db:", err) } bman2, err := newCanonical(10, db2) if err != nil { t.Fatal("Could not make new canonical chain:", err) } bman2.bc.SetProcessor(bman2) parent := bman2.bc.CurrentBlock() chainB := makeChain(bman2, parent, 5, db2, ForkSeed) chainB = chainB[1:] _, err = testChain(chainB, bman) if err == nil { t.Error("expected broken chain to return error") } } func TestChainInsertions(t *testing.T) { t.Skip() // travil fails. db, _ := ethdb.NewMemDatabase() chain1, err := loadChain("valid1", t) if err != nil { fmt.Println(err) t.FailNow() } chain2, err := loadChain("valid2", t) if err != nil { fmt.Println(err) t.FailNow() } var eventMux event.TypeMux chainMan := NewChainManager(db, db, &eventMux) txPool := NewTxPool(&eventMux, chainMan.State) blockMan := NewBlockProcessor(db, db, nil, txPool, chainMan, &eventMux) chainMan.SetProcessor(blockMan) const max = 2 done := make(chan bool, max) go insertChain(done, chainMan, chain1, t) go insertChain(done, chainMan, chain2, t) for i := 0; i < max; i++ { <-done } if chain2[len(chain2)-1].Hash() != chainMan.CurrentBlock().Hash() { t.Error("chain2 is canonical and shouldn't be") } if chain1[len(chain1)-1].Hash() != chainMan.CurrentBlock().Hash() { t.Error("chain1 isn't canonical and should be") } } func TestChainMultipleInsertions(t *testing.T) { t.Skip() // travil fails. db, _ := ethdb.NewMemDatabase() const max = 4 chains := make([]types.Blocks, max) var longest int for i := 0; i < max; i++ { var err error name := "valid" + strconv.Itoa(i+1) chains[i], err = loadChain(name, t) if len(chains[i]) >= len(chains[longest]) { longest = i } fmt.Println("loaded", name, "with a length of", len(chains[i])) if err != nil { fmt.Println(err) t.FailNow() } } var eventMux event.TypeMux chainMan := NewChainManager(db, db, &eventMux) txPool := NewTxPool(&eventMux, chainMan.State) blockMan := NewBlockProcessor(db, db, nil, txPool, chainMan, &eventMux) chainMan.SetProcessor(blockMan) done := make(chan bool, max) for i, chain := range chains { // XXX the go routine would otherwise reference the same (chain[3]) variable and fail i := i chain := chain go func() { insertChain(done, chainMan, chain, t) fmt.Println(i, "done") }() } for i := 0; i < max; i++ { <-done } if chains[longest][len(chains[longest])-1].Hash() != chainMan.CurrentBlock().Hash() { t.Error("Invalid canonical chain") } } func TestGetAncestors(t *testing.T) { t.Skip() // travil fails. db, _ := ethdb.NewMemDatabase() var eventMux event.TypeMux chainMan := NewChainManager(db, db, &eventMux) chain, err := loadChain("valid1", t) if err != nil { fmt.Println(err) t.FailNow() } for _, block := range chain { chainMan.write(block) } ancestors := chainMan.GetAncestors(chain[len(chain)-1], 4) fmt.Println(ancestors) }
core/chain_manager_test.go
1
https://github.com/ethereum/go-ethereum/commit/72d065d49102dd07c929d1e147186604c5e4ab05
[ 0.9977566599845886, 0.06512030959129333, 0.00016647028678562492, 0.0013796016573905945, 0.21822594106197357 ]
{ "id": 0, "code_window": [ "\treturn receipts, err\n", "}\n", "\n", "// Process block will attempt to process the given block's transactions and applies them\n", "// on top of the block's parent state (given it exists) and will return wether it was\n", "// successful or not.\n", "func (sm *BlockProcessor) Process(block *types.Block) (logs state.Logs, err error) {\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "func (sm *BlockProcessor) RetryProcess(block *types.Block) (logs state.Logs, err error) {\n", "\t// Processing a blocks may never happen simultaneously\n", "\tsm.mutex.Lock()\n", "\tdefer sm.mutex.Unlock()\n", "\n", "\theader := block.Header()\n", "\tif !sm.bc.HasBlock(header.ParentHash) {\n", "\t\treturn nil, ParentError(header.ParentHash)\n", "\t}\n", "\tparent := sm.bc.GetBlock(header.ParentHash)\n", "\n", "\treturn sm.processWithParent(block, parent)\n", "}\n", "\n" ], "file_path": "core/block_processor.go", "type": "add", "edit_start_line_idx": 151 }
package otto import ( "fmt" "math" "strings" "testing" ) func TestGlobal(t *testing.T) { tt(t, func() { test, vm := test() runtime := vm.vm.runtime { call := func(object interface{}, src string, argumentList ...interface{}) Value { var tgt *Object switch object := object.(type) { case Value: tgt = object.Object() case *Object: tgt = object case *_object: tgt = toValue_object(object).Object() default: panic("Here be dragons.") } value, err := tgt.Call(src, argumentList...) is(err, nil) return value } // FIXME enterGlobalScope if false { value := runtime.scope.lexical.getBinding("Object", false)._object().call(UndefinedValue(), []Value{toValue(runtime.newObject())}, false, nativeFrame) is(value.IsObject(), true) is(value, "[object Object]") is(value._object().prototype == runtime.global.ObjectPrototype, true) is(value._object().prototype == runtime.global.Object.get("prototype")._object(), true) is(value._object().get("toString"), "function toString() { [native code] }") is(call(value.Object(), "hasOwnProperty", "hasOwnProperty"), false) is(call(value._object().get("toString")._object().prototype, "toString"), "function () { [native code] }") // TODO Is this right? is(value._object().get("toString")._object().get("toString"), "function toString() { [native code] }") is(value._object().get("toString")._object().get("toString")._object(), "function toString() { [native code] }") is(call(value._object(), "propertyIsEnumerable", "isPrototypeOf"), false) value._object().put("xyzzy", toValue_string("Nothing happens."), false) is(call(value, "propertyIsEnumerable", "isPrototypeOf"), false) is(call(value, "propertyIsEnumerable", "xyzzy"), true) is(value._object().get("xyzzy"), "Nothing happens.") is(call(runtime.scope.lexical.getBinding("Object", false), "isPrototypeOf", value), false) is(call(runtime.scope.lexical.getBinding("Object", false)._object().get("prototype"), "isPrototypeOf", value), true) is(call(runtime.scope.lexical.getBinding("Function", false), "isPrototypeOf", value), false) is(runtime.newObject().prototype == runtime.global.Object.get("prototype")._object(), true) abc := runtime.newBoolean(toValue_bool(true)) is(toValue_object(abc), "true") // TODO Call primitive? //def := runtime.localGet("Boolean")._object().Construct(UndefinedValue(), []Value{}) //is(def, "false") // TODO Call primitive? } } test(`new Number().constructor == Number`, true) test(`this.hasOwnProperty`, "function hasOwnProperty() { [native code] }") test(`eval.length === 1`, true) test(`eval.prototype === undefined`, true) test(`raise: new eval()`, "TypeError: function eval() { [native code] } is not a constructor") test(` [ [ delete undefined, undefined ], [ delete NaN, NaN ], [ delete Infinity, Infinity ], ]; `, "false,,false,NaN,false,Infinity") test(` Object.getOwnPropertyNames(Function('return this')()).sort(); `, "Array,Boolean,Date,Error,EvalError,Function,Infinity,JSON,Math,NaN,Number,Object,RangeError,ReferenceError,RegExp,String,SyntaxError,TypeError,URIError,console,decodeURI,decodeURIComponent,encodeURI,encodeURIComponent,escape,eval,isFinite,isNaN,parseFloat,parseInt,undefined,unescape") // __defineGetter__,__defineSetter__,__lookupGetter__,__lookupSetter__,constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf test(` Object.getOwnPropertyNames(Object.prototype).sort(); `, "constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf") // arguments,caller,length,name,prototype test(` Object.getOwnPropertyNames(EvalError).sort(); `, "length,prototype") test(` var abc = []; var def = [EvalError, RangeError, ReferenceError, SyntaxError, TypeError, URIError]; for (constructor in def) { abc.push(def[constructor] === def[constructor].prototype.constructor); } def = [Array, Boolean, Date, Function, Number, Object, RegExp, String, SyntaxError]; for (constructor in def) { abc.push(def[constructor] === def[constructor].prototype.constructor); } abc; `, "true,true,true,true,true,true,true,true,true,true,true,true,true,true,true") test(` [ Array.prototype.constructor === Array, Array.constructor === Function ]; `, "true,true") test(` [ Number.prototype.constructor === Number, Number.constructor === Function ]; `, "true,true") test(` [ Function.prototype.constructor === Function, Function.constructor === Function ]; `, "true,true") }) } func TestGlobalLength(t *testing.T) { tt(t, func() { test, _ := test() test(` [ Object.length, Function.length, RegExp.length, Math.length ]; `, "1,1,2,") }) } func TestGlobalError(t *testing.T) { tt(t, func() { test, _ := test() test(` [ TypeError.length, TypeError(), TypeError("Nothing happens.") ]; `, "1,TypeError,TypeError: Nothing happens.") test(` [ URIError.length, URIError(), URIError("Nothing happens.") ]; `, "1,URIError,URIError: Nothing happens.") }) } func TestGlobalReadOnly(t *testing.T) { tt(t, func() { test, _ := test() test(`Number.POSITIVE_INFINITY`, math.Inf(1)) test(` Number.POSITIVE_INFINITY = 1; `, 1) test(`Number.POSITIVE_INFINITY`, math.Inf(1)) test(` Number.POSITIVE_INFINITY = 1; Number.POSITIVE_INFINITY; `, math.Inf(1)) }) } func Test_isNaN(t *testing.T) { tt(t, func() { test, _ := test() test(`isNaN(0)`, false) test(`isNaN("Xyzzy")`, true) test(`isNaN()`, true) test(`isNaN(NaN)`, true) test(`isNaN(Infinity)`, false) test(`isNaN.length === 1`, true) test(`isNaN.prototype === undefined`, true) }) } func Test_isFinite(t *testing.T) { tt(t, func() { test, _ := test() test(`isFinite(0)`, true) test(`isFinite("Xyzzy")`, false) test(`isFinite()`, false) test(`isFinite(NaN)`, false) test(`isFinite(Infinity)`, false) test(`isFinite(new Number(451));`, true) test(`isFinite.length === 1`, true) test(`isFinite.prototype === undefined`, true) }) } func Test_parseInt(t *testing.T) { tt(t, func() { test, _ := test() test(`parseInt("0")`, 0) test(`parseInt("11")`, 11) test(`parseInt(" 11")`, 11) test(`parseInt("11 ")`, 11) test(`parseInt(" 11 ")`, 11) test(`parseInt(" 11\n")`, 11) test(`parseInt(" 11\n", 16)`, 17) test(`parseInt("Xyzzy")`, _NaN) test(`parseInt(" 0x11\n", 16)`, 17) test(`parseInt("0x0aXyzzy", 16)`, 10) test(`parseInt("0x1", 0)`, 1) test(`parseInt("0x10000000000000000000", 16)`, float64(75557863725914323419136)) test(`parseInt.length === 2`, true) test(`parseInt.prototype === undefined`, true) }) } func Test_parseFloat(t *testing.T) { tt(t, func() { test, _ := test() test(`parseFloat("0")`, 0) test(`parseFloat("11")`, 11) test(`parseFloat(" 11")`, 11) test(`parseFloat("11 ")`, 11) test(`parseFloat(" 11 ")`, 11) test(`parseFloat(" 11\n")`, 11) test(`parseFloat(" 11\n", 16)`, 11) test(`parseFloat("11.1")`, 11.1) test(`parseFloat("Xyzzy")`, _NaN) test(`parseFloat(" 0x11\n", 16)`, 0) test(`parseFloat("0x0a")`, 0) test(`parseFloat("0x0aXyzzy")`, 0) test(`parseFloat("Infinity")`, _Infinity) test(`parseFloat("infinity")`, _NaN) test(`parseFloat("0x")`, 0) test(`parseFloat("11x")`, 11) test(`parseFloat("Infinity1")`, _Infinity) test(`parseFloat.length === 1`, true) test(`parseFloat.prototype === undefined`, true) }) } func Test_encodeURI(t *testing.T) { tt(t, func() { test, _ := test() test(`encodeURI("http://example.com/ Nothing happens.")`, "http://example.com/%20Nothing%20happens.") test(`encodeURI("http://example.com/ _^#")`, "http://example.com/%20_%5E#") test(`encodeURI(String.fromCharCode("0xE000"))`, "%EE%80%80") test(`encodeURI(String.fromCharCode("0xFFFD"))`, "%EF%BF%BD") test(`raise: encodeURI(String.fromCharCode("0xDC00"))`, "URIError: URI malformed") test(`encodeURI.length === 1`, true) test(`encodeURI.prototype === undefined`, true) }) } func Test_encodeURIComponent(t *testing.T) { tt(t, func() { test, _ := test() test(`encodeURIComponent("http://example.com/ Nothing happens.")`, "http%3A%2F%2Fexample.com%2F%20Nothing%20happens.") test(`encodeURIComponent("http://example.com/ _^#")`, "http%3A%2F%2Fexample.com%2F%20_%5E%23") }) } func Test_decodeURI(t *testing.T) { tt(t, func() { test, _ := test() test(`decodeURI(encodeURI("http://example.com/ Nothing happens."))`, "http://example.com/ Nothing happens.") test(`decodeURI(encodeURI("http://example.com/ _^#"))`, "http://example.com/ _^#") test(`raise: decodeURI("http://example.com/ _^#%")`, "URIError: URI malformed") test(`raise: decodeURI("%DF%7F")`, "URIError: URI malformed") for _, check := range strings.Fields("+ %3B %2F %3F %3A %40 %26 %3D %2B %24 %2C %23") { test(fmt.Sprintf(`decodeURI("%s")`, check), check) } test(`decodeURI.length === 1`, true) test(`decodeURI.prototype === undefined`, true) }) } func Test_decodeURIComponent(t *testing.T) { tt(t, func() { test, _ := test() test(`decodeURIComponent(encodeURI("http://example.com/ Nothing happens."))`, "http://example.com/ Nothing happens.") test(`decodeURIComponent(encodeURI("http://example.com/ _^#"))`, "http://example.com/ _^#") test(`decodeURIComponent.length === 1`, true) test(`decodeURIComponent.prototype === undefined`, true) test(` var global = Function('return this')(); var abc = Object.getOwnPropertyDescriptor(global, "decodeURIComponent"); [ abc.value === global.decodeURIComponent, abc.writable, abc.enumerable, abc.configurable ]; `, "true,true,false,true") }) } func TestGlobal_skipEnumeration(t *testing.T) { tt(t, func() { test, _ := test() test(` var found = []; for (var test in this) { if (false || test === 'NaN' || test === 'undefined' || test === 'Infinity' || false) { found.push(test) } } found.length; `, 0) test(` var found = []; for (var test in this) { if (false || test === 'Object' || test === 'Function' || test === 'String' || test === 'Number' || test === 'Array' || test === 'Boolean' || test === 'Date' || test === 'RegExp' || test === 'Error' || test === 'EvalError' || test === 'RangeError' || test === 'ReferenceError' || test === 'SyntaxError' || test === 'TypeError' || test === 'URIError' || false) { found.push(test) } } found.length; `, 0) }) }
Godeps/_workspace/src/github.com/robertkrimen/otto/global_test.go
0
https://github.com/ethereum/go-ethereum/commit/72d065d49102dd07c929d1e147186604c5e4ab05
[ 0.0004609500174410641, 0.00019988010171800852, 0.00016596597561147064, 0.00017004471737891436, 0.00007086088589858264 ]
{ "id": 0, "code_window": [ "\treturn receipts, err\n", "}\n", "\n", "// Process block will attempt to process the given block's transactions and applies them\n", "// on top of the block's parent state (given it exists) and will return wether it was\n", "// successful or not.\n", "func (sm *BlockProcessor) Process(block *types.Block) (logs state.Logs, err error) {\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "func (sm *BlockProcessor) RetryProcess(block *types.Block) (logs state.Logs, err error) {\n", "\t// Processing a blocks may never happen simultaneously\n", "\tsm.mutex.Lock()\n", "\tdefer sm.mutex.Unlock()\n", "\n", "\theader := block.Header()\n", "\tif !sm.bc.HasBlock(header.ParentHash) {\n", "\t\treturn nil, ParentError(header.ParentHash)\n", "\t}\n", "\tparent := sm.bc.GetBlock(header.ParentHash)\n", "\n", "\treturn sm.processWithParent(block, parent)\n", "}\n", "\n" ], "file_path": "core/block_processor.go", "type": "add", "edit_start_line_idx": 151 }
{ "randomStatetest" : { "env" : { "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5", "currentDifficulty" : "5623894562375", "currentGasLimit" : "115792089237316195423570985008687907853269984665640564039457584007913129639935", "currentNumber" : "0", "currentTimestamp" : "1", "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6" }, "logs" : [ ], "out" : "0x", "post" : { "095e7baea6a6c7c4c2dfeb977efac326af552d87" : { "balance" : "549769638", "code" : "0x7f00000000000000000000000000000000000000000000000000000000000000017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f0000000000000000000000000000000000000000000000000000000000000001427f000000000000000000000000000000000000000000000000000000000000c3507f00000000000000000000000000000000000000000000000000000000000000007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8636f2599055", "nonce" : "0", "storage" : { "0x" : "0xc360" } }, "945304eb96065b2a98b57a48a06ae28d285a71b5" : { "balance" : "65459", "code" : "0x6000355415600957005b60203560003555", "nonce" : "0", "storage" : { } }, "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { "balance" : "999999999450164949", "code" : "0x", "nonce" : "1", "storage" : { } } }, "postStateRoot" : "bae76e2729692bbeb8c29d5577071c6ae382645fcbb40e340cff51c35673b597", "pre" : { "095e7baea6a6c7c4c2dfeb977efac326af552d87" : { "balance" : "0", "code" : "0x7f00000000000000000000000000000000000000000000000000000000000000017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f0000000000000000000000000000000000000000000000000000000000000001427f000000000000000000000000000000000000000000000000000000000000c3507f00000000000000000000000000000000000000000000000000000000000000007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8636f2599055", "nonce" : "0", "storage" : { } }, "945304eb96065b2a98b57a48a06ae28d285a71b5" : { "balance" : "46", "code" : "0x6000355415600957005b60203560003555", "nonce" : "0", "storage" : { } }, "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { "balance" : "1000000000000000000", "code" : "0x", "nonce" : "0", "storage" : { } } }, "transaction" : { "data" : "0x7f00000000000000000000000000000000000000000000000000000000000000017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f0000000000000000000000000000000000000000000000000000000000000001427f000000000000000000000000000000000000000000000000000000000000c3507f00000000000000000000000000000000000000000000000000000000000000007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8636f25990", "gasLimit" : "0x2fbe208d", "gasPrice" : "1", "nonce" : "0", "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87", "value" : "549769638" } } }
tests/files/StateTests/RandomTests/st201504011916JS.json
0
https://github.com/ethereum/go-ethereum/commit/72d065d49102dd07c929d1e147186604c5e4ab05
[ 0.00029043015092611313, 0.00018382069538347423, 0.00016672063793521374, 0.00016908800171222538, 0.000040314702346222475 ]
{ "id": 0, "code_window": [ "\treturn receipts, err\n", "}\n", "\n", "// Process block will attempt to process the given block's transactions and applies them\n", "// on top of the block's parent state (given it exists) and will return wether it was\n", "// successful or not.\n", "func (sm *BlockProcessor) Process(block *types.Block) (logs state.Logs, err error) {\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "func (sm *BlockProcessor) RetryProcess(block *types.Block) (logs state.Logs, err error) {\n", "\t// Processing a blocks may never happen simultaneously\n", "\tsm.mutex.Lock()\n", "\tdefer sm.mutex.Unlock()\n", "\n", "\theader := block.Header()\n", "\tif !sm.bc.HasBlock(header.ParentHash) {\n", "\t\treturn nil, ParentError(header.ParentHash)\n", "\t}\n", "\tparent := sm.bc.GetBlock(header.ParentHash)\n", "\n", "\treturn sm.processWithParent(block, parent)\n", "}\n", "\n" ], "file_path": "core/block_processor.go", "type": "add", "edit_start_line_idx": 151 }
package vm import ( "math/big" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/params" ) type Address interface { Call(in []byte) []byte } type PrecompiledAccount struct { Gas func(l int) *big.Int fn func(in []byte) []byte } func (self PrecompiledAccount) Call(in []byte) []byte { return self.fn(in) } var Precompiled = PrecompiledContracts() // XXX Could set directly. Testing requires resetting and setting of pre compiled contracts. func PrecompiledContracts() map[string]*PrecompiledAccount { return map[string]*PrecompiledAccount{ // ECRECOVER string(common.LeftPadBytes([]byte{1}, 20)): &PrecompiledAccount{func(l int) *big.Int { return params.EcrecoverGas }, ecrecoverFunc}, // SHA256 string(common.LeftPadBytes([]byte{2}, 20)): &PrecompiledAccount{func(l int) *big.Int { n := big.NewInt(int64(l+31) / 32) n.Mul(n, params.Sha256WordGas) return n.Add(n, params.Sha256Gas) }, sha256Func}, // RIPEMD160 string(common.LeftPadBytes([]byte{3}, 20)): &PrecompiledAccount{func(l int) *big.Int { n := big.NewInt(int64(l+31) / 32) n.Mul(n, params.Ripemd160WordGas) return n.Add(n, params.Ripemd160Gas) }, ripemd160Func}, string(common.LeftPadBytes([]byte{4}, 20)): &PrecompiledAccount{func(l int) *big.Int { n := big.NewInt(int64(l+31) / 32) n.Mul(n, params.IdentityWordGas) return n.Add(n, params.IdentityGas) }, memCpy}, } } func sha256Func(in []byte) []byte { return crypto.Sha256(in) } func ripemd160Func(in []byte) []byte { return common.LeftPadBytes(crypto.Ripemd160(in), 32) } const ecRecoverInputLength = 128 func ecrecoverFunc(in []byte) []byte { // "in" is (hash, v, r, s), each 32 bytes // but for ecrecover we want (r, s, v) if len(in) < ecRecoverInputLength { return nil } // Treat V as a 256bit integer v := new(big.Int).Sub(common.Bytes2Big(in[32:64]), big.NewInt(27)) // Ethereum requires V to be either 0 or 1 => (27 || 28) if !(v.Cmp(Zero) == 0 || v.Cmp(One) == 0) { return nil } // v needs to be moved to the end rsv := append(in[64:128], byte(v.Uint64())) pubKey, err := crypto.Ecrecover(in[:32], rsv) // make sure the public key is a valid one if err != nil { glog.V(logger.Error).Infof("EC RECOVER FAIL: ", err) return nil } // the first byte of pubkey is bitcoin heritage return common.LeftPadBytes(crypto.Sha3(pubKey[1:])[12:], 32) } func memCpy(in []byte) []byte { return in }
core/vm/address.go
0
https://github.com/ethereum/go-ethereum/commit/72d065d49102dd07c929d1e147186604c5e4ab05
[ 0.00024304275575559586, 0.00018980269669555128, 0.00016677963139954954, 0.0001749351213220507, 0.000028193595426273532 ]
{ "id": 1, "code_window": [ "\tglog.V(logger.Info).Infof(\"exporting %v blocks...\\n\", self.currentBlock.Header().Number)\n", "\n", "\tlast := self.currentBlock.NumberU64()\n", "\n", "\tfor nr := uint64(0); nr <= last; nr++ {\n", "\t\tif err := self.GetBlockByNumber(nr).EncodeRLP(w); err != nil {\n", "\t\t\treturn err\n", "\t\t}\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tblock := self.GetBlockByNumber(nr)\n", "\t\tif block == nil {\n", "\t\t\treturn fmt.Errorf(\"export failed on #%d: not found\", nr)\n", "\t\t}\n", "\n", "\t\tif err := block.EncodeRLP(w); err != nil {\n" ], "file_path": "core/chain_manager.go", "type": "replace", "edit_start_line_idx": 324 }
package core import ( "fmt" "math/big" "os" "path" "runtime" "strconv" "testing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/rlp" ) func init() { runtime.GOMAXPROCS(runtime.NumCPU()) } // Test fork of length N starting from block i func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big.Int)) { // switch databases to process the new chain db, err := ethdb.NewMemDatabase() if err != nil { t.Fatal("Failed to create db:", err) } // copy old chain up to i into new db with deterministic canonical bman2, err := newCanonical(i, db) if err != nil { t.Fatal("could not make new canonical in testFork", err) } // asert the bmans have the same block at i bi1 := bman.bc.GetBlockByNumber(uint64(i)).Hash() bi2 := bman2.bc.GetBlockByNumber(uint64(i)).Hash() if bi1 != bi2 { t.Fatal("chains do not have the same hash at height", i) } bman2.bc.SetProcessor(bman2) // extend the fork parent := bman2.bc.CurrentBlock() chainB := makeChain(bman2, parent, N, db, ForkSeed) err = bman2.bc.InsertChain(chainB) if err != nil { t.Fatal("Insert chain error for fork:", err) } tdpre := bman.bc.Td() // Test the fork's blocks on the original chain td, err := testChain(chainB, bman) if err != nil { t.Fatal("expected chainB not to give errors:", err) } // Compare difficulties f(tdpre, td) } func printChain(bc *ChainManager) { for i := bc.CurrentBlock().Number().Uint64(); i > 0; i-- { b := bc.GetBlockByNumber(uint64(i)) fmt.Printf("\t%x\n", b.Hash()) } } // process blocks against a chain func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) { td := new(big.Int) for _, block := range chainB { td2, _, err := bman.bc.processor.Process(block) if err != nil { if IsKnownBlockErr(err) { continue } return nil, err } block.Td = td2 td = td2 bman.bc.mu.Lock() { bman.bc.write(block) } bman.bc.mu.Unlock() } return td, nil } func loadChain(fn string, t *testing.T) (types.Blocks, error) { fh, err := os.OpenFile(path.Join(os.Getenv("GOPATH"), "src", "github.com", "ethereum", "go-ethereum", "_data", fn), os.O_RDONLY, os.ModePerm) if err != nil { return nil, err } defer fh.Close() var chain types.Blocks if err := rlp.Decode(fh, &chain); err != nil { return nil, err } return chain, nil } func insertChain(done chan bool, chainMan *ChainManager, chain types.Blocks, t *testing.T) { err := chainMan.InsertChain(chain) if err != nil { fmt.Println(err) t.FailNow() } done <- true } func TestExtendCanonical(t *testing.T) { CanonicalLength := 5 db, err := ethdb.NewMemDatabase() if err != nil { t.Fatal("Failed to create db:", err) } // make first chain starting from genesis bman, err := newCanonical(CanonicalLength, db) if err != nil { t.Fatal("Could not make new canonical chain:", err) } f := func(td1, td2 *big.Int) { if td2.Cmp(td1) <= 0 { t.Error("expected chainB to have higher difficulty. Got", td2, "expected more than", td1) } } // Start fork from current height (CanonicalLength) testFork(t, bman, CanonicalLength, 1, f) testFork(t, bman, CanonicalLength, 2, f) testFork(t, bman, CanonicalLength, 5, f) testFork(t, bman, CanonicalLength, 10, f) } func TestShorterFork(t *testing.T) { db, err := ethdb.NewMemDatabase() if err != nil { t.Fatal("Failed to create db:", err) } // make first chain starting from genesis bman, err := newCanonical(10, db) if err != nil { t.Fatal("Could not make new canonical chain:", err) } f := func(td1, td2 *big.Int) { if td2.Cmp(td1) >= 0 { t.Error("expected chainB to have lower difficulty. Got", td2, "expected less than", td1) } } // Sum of numbers must be less than 10 // for this to be a shorter fork testFork(t, bman, 0, 3, f) testFork(t, bman, 0, 7, f) testFork(t, bman, 1, 1, f) testFork(t, bman, 1, 7, f) testFork(t, bman, 5, 3, f) testFork(t, bman, 5, 4, f) } func TestLongerFork(t *testing.T) { db, err := ethdb.NewMemDatabase() if err != nil { t.Fatal("Failed to create db:", err) } // make first chain starting from genesis bman, err := newCanonical(10, db) if err != nil { t.Fatal("Could not make new canonical chain:", err) } f := func(td1, td2 *big.Int) { if td2.Cmp(td1) <= 0 { t.Error("expected chainB to have higher difficulty. Got", td2, "expected more than", td1) } } // Sum of numbers must be greater than 10 // for this to be a longer fork testFork(t, bman, 0, 11, f) testFork(t, bman, 0, 15, f) testFork(t, bman, 1, 10, f) testFork(t, bman, 1, 12, f) testFork(t, bman, 5, 6, f) testFork(t, bman, 5, 8, f) } func TestEqualFork(t *testing.T) { db, err := ethdb.NewMemDatabase() if err != nil { t.Fatal("Failed to create db:", err) } bman, err := newCanonical(10, db) if err != nil { t.Fatal("Could not make new canonical chain:", err) } f := func(td1, td2 *big.Int) { if td2.Cmp(td1) != 0 { t.Error("expected chainB to have equal difficulty. Got", td2, "expected ", td1) } } // Sum of numbers must be equal to 10 // for this to be an equal fork testFork(t, bman, 0, 10, f) testFork(t, bman, 1, 9, f) testFork(t, bman, 2, 8, f) testFork(t, bman, 5, 5, f) testFork(t, bman, 6, 4, f) testFork(t, bman, 9, 1, f) } func TestBrokenChain(t *testing.T) { db, err := ethdb.NewMemDatabase() if err != nil { t.Fatal("Failed to create db:", err) } bman, err := newCanonical(10, db) if err != nil { t.Fatal("Could not make new canonical chain:", err) } db2, err := ethdb.NewMemDatabase() if err != nil { t.Fatal("Failed to create db:", err) } bman2, err := newCanonical(10, db2) if err != nil { t.Fatal("Could not make new canonical chain:", err) } bman2.bc.SetProcessor(bman2) parent := bman2.bc.CurrentBlock() chainB := makeChain(bman2, parent, 5, db2, ForkSeed) chainB = chainB[1:] _, err = testChain(chainB, bman) if err == nil { t.Error("expected broken chain to return error") } } func TestChainInsertions(t *testing.T) { t.Skip() // travil fails. db, _ := ethdb.NewMemDatabase() chain1, err := loadChain("valid1", t) if err != nil { fmt.Println(err) t.FailNow() } chain2, err := loadChain("valid2", t) if err != nil { fmt.Println(err) t.FailNow() } var eventMux event.TypeMux chainMan := NewChainManager(db, db, &eventMux) txPool := NewTxPool(&eventMux, chainMan.State) blockMan := NewBlockProcessor(db, db, nil, txPool, chainMan, &eventMux) chainMan.SetProcessor(blockMan) const max = 2 done := make(chan bool, max) go insertChain(done, chainMan, chain1, t) go insertChain(done, chainMan, chain2, t) for i := 0; i < max; i++ { <-done } if chain2[len(chain2)-1].Hash() != chainMan.CurrentBlock().Hash() { t.Error("chain2 is canonical and shouldn't be") } if chain1[len(chain1)-1].Hash() != chainMan.CurrentBlock().Hash() { t.Error("chain1 isn't canonical and should be") } } func TestChainMultipleInsertions(t *testing.T) { t.Skip() // travil fails. db, _ := ethdb.NewMemDatabase() const max = 4 chains := make([]types.Blocks, max) var longest int for i := 0; i < max; i++ { var err error name := "valid" + strconv.Itoa(i+1) chains[i], err = loadChain(name, t) if len(chains[i]) >= len(chains[longest]) { longest = i } fmt.Println("loaded", name, "with a length of", len(chains[i])) if err != nil { fmt.Println(err) t.FailNow() } } var eventMux event.TypeMux chainMan := NewChainManager(db, db, &eventMux) txPool := NewTxPool(&eventMux, chainMan.State) blockMan := NewBlockProcessor(db, db, nil, txPool, chainMan, &eventMux) chainMan.SetProcessor(blockMan) done := make(chan bool, max) for i, chain := range chains { // XXX the go routine would otherwise reference the same (chain[3]) variable and fail i := i chain := chain go func() { insertChain(done, chainMan, chain, t) fmt.Println(i, "done") }() } for i := 0; i < max; i++ { <-done } if chains[longest][len(chains[longest])-1].Hash() != chainMan.CurrentBlock().Hash() { t.Error("Invalid canonical chain") } } func TestGetAncestors(t *testing.T) { t.Skip() // travil fails. db, _ := ethdb.NewMemDatabase() var eventMux event.TypeMux chainMan := NewChainManager(db, db, &eventMux) chain, err := loadChain("valid1", t) if err != nil { fmt.Println(err) t.FailNow() } for _, block := range chain { chainMan.write(block) } ancestors := chainMan.GetAncestors(chain[len(chain)-1], 4) fmt.Println(ancestors) }
core/chain_manager_test.go
1
https://github.com/ethereum/go-ethereum/commit/72d065d49102dd07c929d1e147186604c5e4ab05
[ 0.0009528360096737742, 0.00023944013810250908, 0.0001640915434109047, 0.00017549943004269153, 0.00016884853539522737 ]
{ "id": 1, "code_window": [ "\tglog.V(logger.Info).Infof(\"exporting %v blocks...\\n\", self.currentBlock.Header().Number)\n", "\n", "\tlast := self.currentBlock.NumberU64()\n", "\n", "\tfor nr := uint64(0); nr <= last; nr++ {\n", "\t\tif err := self.GetBlockByNumber(nr).EncodeRLP(w); err != nil {\n", "\t\t\treturn err\n", "\t\t}\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tblock := self.GetBlockByNumber(nr)\n", "\t\tif block == nil {\n", "\t\t\treturn fmt.Errorf(\"export failed on #%d: not found\", nr)\n", "\t\t}\n", "\n", "\t\tif err := block.EncodeRLP(w); err != nil {\n" ], "file_path": "core/chain_manager.go", "type": "replace", "edit_start_line_idx": 324 }
{ "randomStatetest" : { "env" : { "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5", "currentDifficulty" : "5623894562375", "currentGasLimit" : "115792089237316195423570985008687907853269984665640564039457584007913129639935", "currentNumber" : "0", "currentTimestamp" : "1", "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6" }, "logs" : [ ], "out" : "0x", "post" : { "095e7baea6a6c7c4c2dfeb977efac326af552d87" : { "balance" : "1574880944", "code" : "0x7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe447f0000000000000000000000000000000000000000000000000000000000000001407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff6f2d6f4162696d5767078708977c37545560005155", "nonce" : "0", "storage" : { "0x" : "0x2d6f4162696d5767078708977c375455" } }, "945304eb96065b2a98b57a48a06ae28d285a71b5" : { "balance" : "51498", "code" : "0x6000355415600957005b60203560003555", "nonce" : "0", "storage" : { } }, "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { "balance" : "999999998425067604", "code" : "0x", "nonce" : "1", "storage" : { } } }, "postStateRoot" : "d680f860bc04f1ccb1a8cf8928d9bf4046f560d79a0a2387dadf5a34487aa947", "pre" : { "095e7baea6a6c7c4c2dfeb977efac326af552d87" : { "balance" : "0", "code" : "0x7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe447f0000000000000000000000000000000000000000000000000000000000000001407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff6f2d6f4162696d5767078708977c37545560005155", "nonce" : "0", "storage" : { } }, "945304eb96065b2a98b57a48a06ae28d285a71b5" : { "balance" : "46", "code" : "0x6000355415600957005b60203560003555", "nonce" : "0", "storage" : { } }, "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { "balance" : "1000000000000000000", "code" : "0x", "nonce" : "0", "storage" : { } } }, "transaction" : { "data" : "0x7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe447f0000000000000000000000000000000000000000000000000000000000000001407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff6f2d6f4162696d5767078708977c3754", "gasLimit" : "0x168e26c2", "gasPrice" : "1", "nonce" : "0", "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87", "value" : "1574880944" } } }
tests/files/StateTests/RandomTests/st201503181309GO.json
0
https://github.com/ethereum/go-ethereum/commit/72d065d49102dd07c929d1e147186604c5e4ab05
[ 0.00017768210091162473, 0.00017073977505788207, 0.00016514038725290447, 0.00017024688713718206, 0.000004076508048456162 ]
{ "id": 1, "code_window": [ "\tglog.V(logger.Info).Infof(\"exporting %v blocks...\\n\", self.currentBlock.Header().Number)\n", "\n", "\tlast := self.currentBlock.NumberU64()\n", "\n", "\tfor nr := uint64(0); nr <= last; nr++ {\n", "\t\tif err := self.GetBlockByNumber(nr).EncodeRLP(w); err != nil {\n", "\t\t\treturn err\n", "\t\t}\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tblock := self.GetBlockByNumber(nr)\n", "\t\tif block == nil {\n", "\t\t\treturn fmt.Errorf(\"export failed on #%d: not found\", nr)\n", "\t\t}\n", "\n", "\t\tif err := block.EncodeRLP(w); err != nil {\n" ], "file_path": "core/chain_manager.go", "type": "replace", "edit_start_line_idx": 324 }
package otto import ( "fmt" "math" "strings" "github.com/robertkrimen/otto/token" ) func (self *_runtime) evaluateMultiply(left float64, right float64) Value { // TODO 11.5.1 return Value{} } func (self *_runtime) evaluateDivide(left float64, right float64) Value { if math.IsNaN(left) || math.IsNaN(right) { return NaNValue() } if math.IsInf(left, 0) && math.IsInf(right, 0) { return NaNValue() } if left == 0 && right == 0 { return NaNValue() } if math.IsInf(left, 0) { if math.Signbit(left) == math.Signbit(right) { return positiveInfinityValue() } else { return negativeInfinityValue() } } if math.IsInf(right, 0) { if math.Signbit(left) == math.Signbit(right) { return positiveZeroValue() } else { return negativeZeroValue() } } if right == 0 { if math.Signbit(left) == math.Signbit(right) { return positiveInfinityValue() } else { return negativeInfinityValue() } } return toValue_float64(left / right) } func (self *_runtime) evaluateModulo(left float64, right float64) Value { // TODO 11.5.3 return Value{} } func (self *_runtime) calculateBinaryExpression(operator token.Token, left Value, right Value) Value { leftValue := left.resolve() switch operator { // Additive case token.PLUS: leftValue = toPrimitive(leftValue) rightValue := right.resolve() rightValue = toPrimitive(rightValue) if leftValue.IsString() || rightValue.IsString() { return toValue_string(strings.Join([]string{leftValue.string(), rightValue.string()}, "")) } else { return toValue_float64(leftValue.float64() + rightValue.float64()) } case token.MINUS: rightValue := right.resolve() return toValue_float64(leftValue.float64() - rightValue.float64()) // Multiplicative case token.MULTIPLY: rightValue := right.resolve() return toValue_float64(leftValue.float64() * rightValue.float64()) case token.SLASH: rightValue := right.resolve() return self.evaluateDivide(leftValue.float64(), rightValue.float64()) case token.REMAINDER: rightValue := right.resolve() return toValue_float64(math.Mod(leftValue.float64(), rightValue.float64())) // Logical case token.LOGICAL_AND: left := leftValue.bool() if !left { return falseValue } return toValue_bool(right.resolve().bool()) case token.LOGICAL_OR: left := leftValue.bool() if left { return trueValue } return toValue_bool(right.resolve().bool()) // Bitwise case token.AND: rightValue := right.resolve() return toValue_int32(toInt32(leftValue) & toInt32(rightValue)) case token.OR: rightValue := right.resolve() return toValue_int32(toInt32(leftValue) | toInt32(rightValue)) case token.EXCLUSIVE_OR: rightValue := right.resolve() return toValue_int32(toInt32(leftValue) ^ toInt32(rightValue)) // Shift // (Masking of 0x1f is to restrict the shift to a maximum of 31 places) case token.SHIFT_LEFT: rightValue := right.resolve() return toValue_int32(toInt32(leftValue) << (toUint32(rightValue) & 0x1f)) case token.SHIFT_RIGHT: rightValue := right.resolve() return toValue_int32(toInt32(leftValue) >> (toUint32(rightValue) & 0x1f)) case token.UNSIGNED_SHIFT_RIGHT: rightValue := right.resolve() // Shifting an unsigned integer is a logical shift return toValue_uint32(toUint32(leftValue) >> (toUint32(rightValue) & 0x1f)) case token.INSTANCEOF: rightValue := right.resolve() if !rightValue.IsObject() { panic(self.panicTypeError("Expecting a function in instanceof check, but got: %v", rightValue)) } return toValue_bool(rightValue._object().hasInstance(leftValue)) case token.IN: rightValue := right.resolve() if !rightValue.IsObject() { panic(self.panicTypeError()) } return toValue_bool(rightValue._object().hasProperty(leftValue.string())) } panic(hereBeDragons(operator)) } func valueKindDispatchKey(left _valueKind, right _valueKind) int { return (int(left) << 2) + int(right) } var equalDispatch map[int](func(Value, Value) bool) = makeEqualDispatch() func makeEqualDispatch() map[int](func(Value, Value) bool) { key := valueKindDispatchKey return map[int](func(Value, Value) bool){ key(valueNumber, valueObject): func(x Value, y Value) bool { return x.float64() == y.float64() }, key(valueString, valueObject): func(x Value, y Value) bool { return x.float64() == y.float64() }, key(valueObject, valueNumber): func(x Value, y Value) bool { return x.float64() == y.float64() }, key(valueObject, valueString): func(x Value, y Value) bool { return x.float64() == y.float64() }, } } type _lessThanResult int const ( lessThanFalse _lessThanResult = iota lessThanTrue lessThanUndefined ) func calculateLessThan(left Value, right Value, leftFirst bool) _lessThanResult { x := Value{} y := x if leftFirst { x = toNumberPrimitive(left) y = toNumberPrimitive(right) } else { y = toNumberPrimitive(right) x = toNumberPrimitive(left) } result := false if x.kind != valueString || y.kind != valueString { x, y := x.float64(), y.float64() if math.IsNaN(x) || math.IsNaN(y) { return lessThanUndefined } result = x < y } else { x, y := x.string(), y.string() result = x < y } if result { return lessThanTrue } return lessThanFalse } // FIXME Probably a map is not the most efficient way to do this var lessThanTable [4](map[_lessThanResult]bool) = [4](map[_lessThanResult]bool){ // < map[_lessThanResult]bool{ lessThanFalse: false, lessThanTrue: true, lessThanUndefined: false, }, // > map[_lessThanResult]bool{ lessThanFalse: false, lessThanTrue: true, lessThanUndefined: false, }, // <= map[_lessThanResult]bool{ lessThanFalse: true, lessThanTrue: false, lessThanUndefined: false, }, // >= map[_lessThanResult]bool{ lessThanFalse: true, lessThanTrue: false, lessThanUndefined: false, }, } func (self *_runtime) calculateComparison(comparator token.Token, left Value, right Value) bool { // FIXME Use strictEqualityComparison? // TODO This might be redundant now (with regards to evaluateComparison) x := left.resolve() y := right.resolve() kindEqualKind := false result := true negate := false switch comparator { case token.LESS: result = lessThanTable[0][calculateLessThan(x, y, true)] case token.GREATER: result = lessThanTable[1][calculateLessThan(y, x, false)] case token.LESS_OR_EQUAL: result = lessThanTable[2][calculateLessThan(y, x, false)] case token.GREATER_OR_EQUAL: result = lessThanTable[3][calculateLessThan(x, y, true)] case token.STRICT_NOT_EQUAL: negate = true fallthrough case token.STRICT_EQUAL: if x.kind != y.kind { result = false } else { kindEqualKind = true } case token.NOT_EQUAL: negate = true fallthrough case token.EQUAL: if x.kind == y.kind { kindEqualKind = true } else if x.kind <= valueNull && y.kind <= valueNull { result = true } else if x.kind <= valueNull || y.kind <= valueNull { result = false } else if x.kind <= valueString && y.kind <= valueString { result = x.float64() == y.float64() } else if x.kind == valueBoolean { result = self.calculateComparison(token.EQUAL, toValue_float64(x.float64()), y) } else if y.kind == valueBoolean { result = self.calculateComparison(token.EQUAL, x, toValue_float64(y.float64())) } else if x.kind == valueObject { result = self.calculateComparison(token.EQUAL, toPrimitive(x), y) } else if y.kind == valueObject { result = self.calculateComparison(token.EQUAL, x, toPrimitive(y)) } else { panic(hereBeDragons("Unable to test for equality: %v ==? %v", x, y)) } default: panic(fmt.Errorf("Unknown comparator %s", comparator.String())) } if kindEqualKind { switch x.kind { case valueUndefined, valueNull: result = true case valueNumber: x := x.float64() y := y.float64() if math.IsNaN(x) || math.IsNaN(y) { result = false } else { result = x == y } case valueString: result = x.string() == y.string() case valueBoolean: result = x.bool() == y.bool() case valueObject: result = x._object() == y._object() default: goto ERROR } } if negate { result = !result } return result ERROR: panic(hereBeDragons("%v (%v) %s %v (%v)", x, x.kind, comparator, y, y.kind)) }
Godeps/_workspace/src/github.com/robertkrimen/otto/evaluate.go
0
https://github.com/ethereum/go-ethereum/commit/72d065d49102dd07c929d1e147186604c5e4ab05
[ 0.00017958549142349511, 0.00017638207646086812, 0.00016859182505868375, 0.00017710003885440528, 0.0000026329078082198976 ]
{ "id": 1, "code_window": [ "\tglog.V(logger.Info).Infof(\"exporting %v blocks...\\n\", self.currentBlock.Header().Number)\n", "\n", "\tlast := self.currentBlock.NumberU64()\n", "\n", "\tfor nr := uint64(0); nr <= last; nr++ {\n", "\t\tif err := self.GetBlockByNumber(nr).EncodeRLP(w); err != nil {\n", "\t\t\treturn err\n", "\t\t}\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tblock := self.GetBlockByNumber(nr)\n", "\t\tif block == nil {\n", "\t\t\treturn fmt.Errorf(\"export failed on #%d: not found\", nr)\n", "\t\t}\n", "\n", "\t\tif err := block.EncodeRLP(w); err != nil {\n" ], "file_path": "core/chain_manager.go", "type": "replace", "edit_start_line_idx": 324 }
{ "randomStatetest" : { "env" : { "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5", "currentDifficulty" : "5623894562375", "currentGasLimit" : "115792089237316195423570985008687907853269984665640564039457584007913129639935", "currentNumber" : "0", "currentTimestamp" : "1", "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6" }, "logs" : [ ], "out" : "0x", "post" : { "095e7baea6a6c7c4c2dfeb977efac326af552d87" : { "balance" : "1968716197", "code" : "0x7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000c3507f00000000000000000000000100000000000000000000000000000000000000007f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff099055510760005155", "nonce" : "0", "storage" : { } }, "945304eb96065b2a98b57a48a06ae28d285a71b5" : { "balance" : "51880", "code" : "0x6000355415600957005b60203560003555", "nonce" : "0", "storage" : { } }, "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { "balance" : "999999998031231969", "code" : "0x", "nonce" : "1", "storage" : { } } }, "postStateRoot" : "f052b9992c4c5d67b47396b5e0bbce60de54f56ec06cf726b1a989ae46d3199e", "pre" : { "095e7baea6a6c7c4c2dfeb977efac326af552d87" : { "balance" : "0", "code" : "0x7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000c3507f00000000000000000000000100000000000000000000000000000000000000007f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff099055510760005155", "nonce" : "0", "storage" : { } }, "945304eb96065b2a98b57a48a06ae28d285a71b5" : { "balance" : "46", "code" : "0x6000355415600957005b60203560003555", "nonce" : "0", "storage" : { } }, "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { "balance" : "1000000000000000000", "code" : "0x", "nonce" : "0", "storage" : { } } }, "transaction" : { "data" : "0x7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000c3507f00000000000000000000000100000000000000000000000000000000000000007f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff0990555107", "gasLimit" : "0x38a59ba4", "gasPrice" : "1", "nonce" : "0", "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87", "value" : "1968716197" } } }
tests/files/StateTests/RandomTests/st201503181731CPPJIT.json
0
https://github.com/ethereum/go-ethereum/commit/72d065d49102dd07c929d1e147186604c5e4ab05
[ 0.00017733967979438603, 0.00017040976672433317, 0.00016493076691403985, 0.00017058303637895733, 0.0000035079206099908333 ]