hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 1,
"code_window": [
"\tloadJob := func() {\n",
"\t\tc.Stop()\n",
"\n",
"\t\trawSchedule := app.Settings().Backups.Cron\n",
"\t\tif rawSchedule == \"\" || !app.IsBootstrapped() {\n",
"\t\t\treturn\n",
"\t\t}\n",
"\n",
"\t\tc.Add(\"@autobackup\", rawSchedule, func() {\n",
"\t\t\tautoPrefix := \"@auto_pb_backup_\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif rawSchedule == \"\" || !isServe || !app.IsBootstrapped() {\n"
],
"file_path": "core/base_backup.go",
"type": "replace",
"edit_start_line_idx": 258
} | package forms
import (
"os"
"time"
"github.com/pocketbase/pocketbase/core"
"github.com/pocketbase/pocketbase/daos"
"github.com/pocketbase/pocketbase/models/settings"
)
// SettingsUpsert is a [settings.Settings] upsert (create/update) form.
type SettingsUpsert struct {
*settings.Settings
app core.App
dao *daos.Dao
}
// NewSettingsUpsert creates a new [SettingsUpsert] form with initializer
// config created from the provided [core.App] instance.
//
// If you want to submit the form as part of a transaction,
// you can change the default Dao via [SetDao()].
func NewSettingsUpsert(app core.App) *SettingsUpsert {
form := &SettingsUpsert{
app: app,
dao: app.Dao(),
}
// load the application settings into the form
form.Settings, _ = app.Settings().Clone()
return form
}
// SetDao replaces the default form Dao instance with the provided one.
func (form *SettingsUpsert) SetDao(dao *daos.Dao) {
form.dao = dao
}
// Validate makes the form validatable by implementing [validation.Validatable] interface.
func (form *SettingsUpsert) Validate() error {
return form.Settings.Validate()
}
// Submit validates the form and upserts the loaded settings.
//
// On success the app settings will be refreshed with the form ones.
//
// You can optionally provide a list of InterceptorFunc to further
// modify the form behavior before persisting it.
func (form *SettingsUpsert) Submit(interceptors ...InterceptorFunc[*settings.Settings]) error {
if err := form.Validate(); err != nil {
return err
}
return runInterceptors(form.Settings, func(s *settings.Settings) error {
form.Settings = s
encryptionKey := os.Getenv(form.app.EncryptionEnv())
if err := form.dao.SaveSettings(form.Settings, encryptionKey); err != nil {
return err
}
// explicitly trigger old logs deletion
form.app.LogsDao().DeleteOldRequests(
time.Now().AddDate(0, 0, -1*form.Settings.Logs.MaxDays),
)
if form.Settings.Logs.MaxDays == 0 {
// no logs are allowed -> reclaim preserved disk space after the previous delete operation
form.app.LogsDao().Vacuum()
}
// merge the application settings with the form ones
return form.app.Settings().Merge(form.Settings)
}, interceptors...)
}
| forms/settings_upsert.go | 1 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.0010783040197566152,
0.0004156899522058666,
0.00016348449571523815,
0.00017121274140663445,
0.0003555945004336536
] |
{
"id": 1,
"code_window": [
"\tloadJob := func() {\n",
"\t\tc.Stop()\n",
"\n",
"\t\trawSchedule := app.Settings().Backups.Cron\n",
"\t\tif rawSchedule == \"\" || !app.IsBootstrapped() {\n",
"\t\t\treturn\n",
"\t\t}\n",
"\n",
"\t\tc.Add(\"@autobackup\", rawSchedule, func() {\n",
"\t\t\tautoPrefix := \"@auto_pb_backup_\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif rawSchedule == \"\" || !isServe || !app.IsBootstrapped() {\n"
],
"file_path": "core/base_backup.go",
"type": "replace",
"edit_start_line_idx": 258
} | <script>
import { Collection } from "pocketbase";
import ApiClient from "@/utils/ApiClient";
import CommonHelper from "@/utils/CommonHelper";
import CodeBlock from "@/components/base/CodeBlock.svelte";
import SdkTabs from "@/components/collections/docs/SdkTabs.svelte";
import FieldsQueryParam from "@/components/collections/docs/FieldsQueryParam.svelte";
export let collection = new Collection();
let responseTab = 200;
let responses = [];
let baseData = {};
$: adminsOnly = collection?.createRule === null;
$: backendAbsUrl = CommonHelper.getApiExampleUrl(ApiClient.baseUrl);
$: responses = [
{
code: 200,
body: JSON.stringify(CommonHelper.dummyCollectionRecord(collection), null, 2),
},
{
code: 400,
body: `
{
"code": 400,
"message": "Failed to create record.",
"data": {
"${collection?.schema?.[0]?.name}": {
"code": "validation_required",
"message": "Missing required value."
}
}
}
`,
},
{
code: 403,
body: `
{
"code": 403,
"message": "You are not allowed to perform this request.",
"data": {}
}
`,
},
];
$: if (collection.$isAuth) {
baseData = {
username: "test_username",
email: "[email protected]",
emailVisibility: true,
password: "12345678",
passwordConfirm: "12345678",
};
} else {
baseData = {};
}
</script>
<h3 class="m-b-sm">Create ({collection.name})</h3>
<div class="content txt-lg m-b-sm">
<p>Create a new <strong>{collection.name}</strong> record.</p>
<p>
Body parameters could be sent as <code>application/json</code> or
<code>multipart/form-data</code>.
</p>
<p>
File upload is supported only via <code>multipart/form-data</code>.
<br />
For more info and examples you could check the detailed
<a href={import.meta.env.PB_FILE_UPLOAD_DOCS} target="_blank" rel="noopener noreferrer">
Files upload and handling docs
</a>.
</p>
</div>
<!-- prettier-ignore -->
<SdkTabs
js={`
import PocketBase from 'pocketbase';
const pb = new PocketBase('${backendAbsUrl}');
...
// example create data
const data = ${JSON.stringify(Object.assign({}, baseData, CommonHelper.dummyCollectionSchemaData(collection)), null, 4)};
const record = await pb.collection('${collection?.name}').create(data);
` + (collection?.isAuth ?
`
// (optional) send an email verification request
await pb.collection('${collection?.name}').requestVerification('[email protected]');
` : ""
)}
dart={`
import 'package:pocketbase/pocketbase.dart';
final pb = PocketBase('${backendAbsUrl}');
...
// example create body
final body = <String, dynamic>${JSON.stringify(Object.assign({}, baseData, CommonHelper.dummyCollectionSchemaData(collection)), null, 2)};
final record = await pb.collection('${collection?.name}').create(body: body);
` + (collection?.isAuth ?
`
// (optional) send an email verification request
await pb.collection('${collection?.name}').requestVerification('[email protected]');
` : ""
)}
/>
<h6 class="m-b-xs">API details</h6>
<div class="alert alert-success">
<strong class="label label-primary">POST</strong>
<div class="content">
<p>
/api/collections/<strong>{collection.name}</strong>/records
</p>
</div>
{#if adminsOnly}
<p class="txt-hint txt-sm txt-right">Requires admin <code>Authorization:TOKEN</code> header</p>
{/if}
</div>
<div class="section-title">Body Parameters</div>
<table class="table-compact table-border m-b-base">
<thead>
<tr>
<th>Param</th>
<th>Type</th>
<th width="50%">Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<div class="inline-flex">
<span class="label label-warning">Optional</span>
<span>id</span>
</div>
</td>
<td>
<span class="label">String</span>
</td>
<td>
<strong>15 characters string</strong> to store as record ID.
<br />
If not set, it will be auto generated.
</td>
</tr>
{#if collection?.isAuth}
<tr>
<td colspan="3" class="txt-hint">Auth fields</td>
</tr>
<tr>
<td>
<div class="inline-flex">
<span class="label label-warning">Optional</span>
<span>username</span>
</div>
</td>
<td>
<span class="label">String</span>
</td>
<td>
The username of the auth record.
<br />
If not set, it will be auto generated.
</td>
</tr>
<tr>
<td>
<div class="inline-flex">
{#if collection?.options?.requireEmail}
<span class="label label-success">Required</span>
{:else}
<span class="label label-warning">Optional</span>
{/if}
<span>email</span>
</div>
</td>
<td>
<span class="label">String</span>
</td>
<td>Auth record email address.</td>
</tr>
<tr>
<td>
<div class="inline-flex">
<span class="label label-warning">Optional</span>
<span>emailVisibility</span>
</div>
</td>
<td>
<span class="label">Boolean</span>
</td>
<td>Whether to show/hide the auth record email when fetching the record data.</td>
</tr>
<tr>
<td>
<div class="inline-flex">
<span class="label label-success">Required</span>
<span>password</span>
</div>
</td>
<td>
<span class="label">String</span>
</td>
<td>Auth record password.</td>
</tr>
<tr>
<td>
<div class="inline-flex">
<span class="label label-success">Required</span>
<span>passwordConfirm</span>
</div>
</td>
<td>
<span class="label">String</span>
</td>
<td>Auth record password confirmation.</td>
</tr>
<tr>
<td>
<div class="inline-flex">
<span class="label label-warning">Optional</span>
<span>verified</span>
</div>
</td>
<td>
<span class="label">Boolean</span>
</td>
<td>
Indicates whether the auth record is verified or not.
<br />
This field can be set only by admins or auth records with "Manage" access.
</td>
</tr>
<tr>
<td colspan="3" class="txt-hint">Schema fields</td>
</tr>
{/if}
{#each collection?.schema as field (field.name)}
<tr>
<td>
<div class="inline-flex">
{#if field.required}
<span class="label label-success">Required</span>
{:else}
<span class="label label-warning">Optional</span>
{/if}
<span>{field.name}</span>
</div>
</td>
<td>
<span class="label">{CommonHelper.getFieldValueType(field)}</span>
</td>
<td>
{#if field.type === "text"}
Plain text value.
{:else if field.type === "number"}
Number value.
{:else if field.type === "json"}
JSON array or object.
{:else if field.type === "email"}
Email address.
{:else if field.type === "url"}
URL address.
{:else if field.type === "file"}
File object.<br />
Set to <code>null</code> to delete already uploaded file(s).
{:else if field.type === "relation"}
Relation record {field.options?.maxSelect === 1 ? "id" : "ids"}.
{/if}
</td>
</tr>
{/each}
</tbody>
</table>
<div class="section-title">Query parameters</div>
<table class="table-compact table-border m-b-base">
<thead>
<tr>
<th>Param</th>
<th>Type</th>
<th width="60%">Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>expand</td>
<td>
<span class="label">String</span>
</td>
<td>
Auto expand relations when returning the created record. Ex.:
<CodeBlock content={`?expand=relField1,relField2.subRelField`} />
Supports up to 6-levels depth nested relations expansion. <br />
The expanded relations will be appended to the record under the
<code>expand</code> property (eg. <code>{`"expand": {"relField1": {...}, ...}`}</code>).
<br />
Only the relations to which the request user has permissions to <strong>view</strong> will be expanded.
</td>
</tr>
<FieldsQueryParam />
</tbody>
</table>
<div class="section-title">Responses</div>
<div class="tabs">
<div class="tabs-header compact left">
{#each responses as response (response.code)}
<button
class="tab-item"
class:active={responseTab === response.code}
on:click={() => (responseTab = response.code)}
>
{response.code}
</button>
{/each}
</div>
<div class="tabs-content">
{#each responses as response (response.code)}
<div class="tab-item" class:active={responseTab === response.code}>
<CodeBlock content={response.body} />
</div>
{/each}
</div>
</div>
| ui/src/components/collections/docs/CreateApiDocs.svelte | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.00017681541794445366,
0.00017260129970964044,
0.0001635880471440032,
0.00017273420235142112,
0.000002940598506029346
] |
{
"id": 1,
"code_window": [
"\tloadJob := func() {\n",
"\t\tc.Stop()\n",
"\n",
"\t\trawSchedule := app.Settings().Backups.Cron\n",
"\t\tif rawSchedule == \"\" || !app.IsBootstrapped() {\n",
"\t\t\treturn\n",
"\t\t}\n",
"\n",
"\t\tc.Add(\"@autobackup\", rawSchedule, func() {\n",
"\t\t\tautoPrefix := \"@auto_pb_backup_\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif rawSchedule == \"\" || !isServe || !app.IsBootstrapped() {\n"
],
"file_path": "core/base_backup.go",
"type": "replace",
"edit_start_line_idx": 258
} | package models_test
import (
"bytes"
"database/sql"
"encoding/json"
"testing"
"time"
"github.com/pocketbase/dbx"
"github.com/pocketbase/pocketbase/models"
"github.com/pocketbase/pocketbase/models/schema"
"github.com/pocketbase/pocketbase/tools/list"
"github.com/pocketbase/pocketbase/tools/types"
)
func TestNewRecord(t *testing.T) {
collection := &models.Collection{
Name: "test_collection",
Schema: schema.NewSchema(
&schema.SchemaField{
Name: "test",
Type: schema.FieldTypeText,
},
),
}
m := models.NewRecord(collection)
if m.Collection().Name != collection.Name {
t.Fatalf("Expected collection with name %q, got %q", collection.Id, m.Collection().Id)
}
if len(m.SchemaData()) != 0 {
t.Fatalf("Expected empty schema data, got %v", m.SchemaData())
}
}
func TestNewRecordFromNullStringMap(t *testing.T) {
collection := &models.Collection{
Name: "test",
Schema: schema.NewSchema(
&schema.SchemaField{
Name: "field1",
Type: schema.FieldTypeText,
},
&schema.SchemaField{
Name: "field2",
Type: schema.FieldTypeText,
},
&schema.SchemaField{
Name: "field3",
Type: schema.FieldTypeBool,
},
&schema.SchemaField{
Name: "field4",
Type: schema.FieldTypeNumber,
},
&schema.SchemaField{
Name: "field5",
Type: schema.FieldTypeSelect,
Options: &schema.SelectOptions{
Values: []string{"test1", "test2"},
MaxSelect: 1,
},
},
&schema.SchemaField{
Name: "field6",
Type: schema.FieldTypeFile,
Options: &schema.FileOptions{
MaxSelect: 2,
MaxSize: 1,
},
},
),
}
data := dbx.NullStringMap{
"id": sql.NullString{
String: "test_id",
Valid: true,
},
"created": sql.NullString{
String: "2022-01-01 10:00:00.123Z",
Valid: true,
},
"updated": sql.NullString{
String: "2022-01-01 10:00:00.456Z",
Valid: true,
},
// auth collection specific fields
"username": sql.NullString{
String: "test_username",
Valid: true,
},
"email": sql.NullString{
String: "test_email",
Valid: true,
},
"emailVisibility": sql.NullString{
String: "true",
Valid: true,
},
"verified": sql.NullString{
String: "",
Valid: false,
},
"tokenKey": sql.NullString{
String: "test_tokenKey",
Valid: true,
},
"passwordHash": sql.NullString{
String: "test_passwordHash",
Valid: true,
},
"lastResetSentAt": sql.NullString{
String: "2022-01-02 10:00:00.123Z",
Valid: true,
},
"lastVerificationSentAt": sql.NullString{
String: "2022-02-03 10:00:00.456Z",
Valid: true,
},
// custom schema fields
"field1": sql.NullString{
String: "test",
Valid: true,
},
"field2": sql.NullString{
String: "test",
Valid: false, // test invalid db serialization
},
"field3": sql.NullString{
String: "true",
Valid: true,
},
"field4": sql.NullString{
String: "123.123",
Valid: true,
},
"field5": sql.NullString{
String: `["test1","test2"]`, // will select only the last elem
Valid: true,
},
"field6": sql.NullString{
String: "test", // will be converted to slice
Valid: true,
},
"unknown": sql.NullString{
String: "test",
Valid: true,
},
}
scenarios := []struct {
collectionType string
expectedJson string
}{
{
models.CollectionTypeBase,
`{"collectionId":"","collectionName":"test","created":"2022-01-01 10:00:00.123Z","field1":"test","field2":"","field3":true,"field4":123.123,"field5":"test2","field6":["test"],"id":"test_id","updated":"2022-01-01 10:00:00.456Z"}`,
},
{
models.CollectionTypeAuth,
`{"collectionId":"","collectionName":"test","created":"2022-01-01 10:00:00.123Z","email":"test_email","emailVisibility":true,"field1":"test","field2":"","field3":true,"field4":123.123,"field5":"test2","field6":["test"],"id":"test_id","updated":"2022-01-01 10:00:00.456Z","username":"test_username","verified":false}`,
},
}
for i, s := range scenarios {
collection.Type = s.collectionType
m := models.NewRecordFromNullStringMap(collection, data)
m.IgnoreEmailVisibility(true)
encoded, err := m.MarshalJSON()
if err != nil {
t.Errorf("(%d) Unexpected error: %v", i, err)
continue
}
if string(encoded) != s.expectedJson {
t.Errorf("(%d) Expected \n%v \ngot \n%v", i, s.expectedJson, string(encoded))
}
// additional data checks
if collection.IsAuth() {
if v := m.GetString(schema.FieldNamePasswordHash); v != "test_passwordHash" {
t.Errorf("(%d) Expected %q, got %q", i, "test_passwordHash", v)
}
if v := m.GetString(schema.FieldNameTokenKey); v != "test_tokenKey" {
t.Errorf("(%d) Expected %q, got %q", i, "test_tokenKey", v)
}
if v := m.GetString(schema.FieldNameLastResetSentAt); v != "2022-01-02 10:00:00.123Z" {
t.Errorf("(%d) Expected %q, got %q", i, "2022-01-02 10:00:00.123Z", v)
}
if v := m.GetString(schema.FieldNameLastVerificationSentAt); v != "2022-02-03 10:00:00.456Z" {
t.Errorf("(%d) Expected %q, got %q", i, "2022-01-02 10:00:00.123Z", v)
}
}
}
}
func TestNewRecordsFromNullStringMaps(t *testing.T) {
collection := &models.Collection{
Name: "test",
Schema: schema.NewSchema(
&schema.SchemaField{
Name: "field1",
Type: schema.FieldTypeText,
},
&schema.SchemaField{
Name: "field2",
Type: schema.FieldTypeNumber,
},
&schema.SchemaField{
Name: "field3",
Type: schema.FieldTypeUrl,
},
),
}
data := []dbx.NullStringMap{
{
"id": sql.NullString{
String: "test_id1",
Valid: true,
},
"created": sql.NullString{
String: "2022-01-01 10:00:00.123Z",
Valid: true,
},
"updated": sql.NullString{
String: "2022-01-01 10:00:00.456Z",
Valid: true,
},
// partial auth fields
"email": sql.NullString{
String: "test_email",
Valid: true,
},
"tokenKey": sql.NullString{
String: "test_tokenKey",
Valid: true,
},
"emailVisibility": sql.NullString{
String: "true",
Valid: true,
},
// custom schema fields
"field1": sql.NullString{
String: "test",
Valid: true,
},
"field2": sql.NullString{
String: "123.123",
Valid: true,
},
"field3": sql.NullString{
String: "test",
Valid: false, // should force resolving to empty string
},
"unknown": sql.NullString{
String: "test",
Valid: true,
},
},
{
"field3": sql.NullString{
String: "test",
Valid: true,
},
"email": sql.NullString{
String: "test_email",
Valid: true,
},
"emailVisibility": sql.NullString{
String: "false",
Valid: true,
},
},
}
scenarios := []struct {
collectionType string
expectedJson string
}{
{
models.CollectionTypeBase,
`[{"collectionId":"","collectionName":"test","created":"2022-01-01 10:00:00.123Z","field1":"test","field2":123.123,"field3":"","id":"test_id1","updated":"2022-01-01 10:00:00.456Z"},{"collectionId":"","collectionName":"test","created":"","field1":"","field2":0,"field3":"test","id":"","updated":""}]`,
},
{
models.CollectionTypeAuth,
`[{"collectionId":"","collectionName":"test","created":"2022-01-01 10:00:00.123Z","email":"test_email","emailVisibility":true,"field1":"test","field2":123.123,"field3":"","id":"test_id1","updated":"2022-01-01 10:00:00.456Z","username":"","verified":false},{"collectionId":"","collectionName":"test","created":"","emailVisibility":false,"field1":"","field2":0,"field3":"test","id":"","updated":"","username":"","verified":false}]`,
},
}
for i, s := range scenarios {
collection.Type = s.collectionType
result := models.NewRecordsFromNullStringMaps(collection, data)
encoded, err := json.Marshal(result)
if err != nil {
t.Errorf("(%d) Unexpected error: %v", i, err)
continue
}
if string(encoded) != s.expectedJson {
t.Errorf("(%d) Expected \n%v \ngot \n%v", i, s.expectedJson, string(encoded))
}
}
}
func TestRecordTableName(t *testing.T) {
collection := &models.Collection{}
collection.Name = "test"
collection.RefreshId()
m := models.NewRecord(collection)
if m.TableName() != collection.Name {
t.Fatalf("Expected table %q, got %q", collection.Name, m.TableName())
}
}
func TestRecordCollection(t *testing.T) {
collection := &models.Collection{}
collection.RefreshId()
m := models.NewRecord(collection)
if m.Collection().Id != collection.Id {
t.Fatalf("Expected collection with id %v, got %v", collection.Id, m.Collection().Id)
}
}
func TestRecordOriginalCopy(t *testing.T) {
m := models.NewRecord(&models.Collection{})
m.Load(map[string]any{"f": "123"})
// change the field
m.Set("f", "456")
if v := m.GetString("f"); v != "456" {
t.Fatalf("Expected f to be %q, got %q", "456", v)
}
if v := m.OriginalCopy().GetString("f"); v != "123" {
t.Fatalf("Expected the initial/original f to be %q, got %q", "123", v)
}
// loading new data shouldn't affect the original state
m.Load(map[string]any{"f": "789"})
if v := m.GetString("f"); v != "789" {
t.Fatalf("Expected f to be %q, got %q", "789", v)
}
if v := m.OriginalCopy().GetString("f"); v != "123" {
t.Fatalf("Expected the initial/original f still to be %q, got %q", "123", v)
}
}
func TestRecordCleanCopy(t *testing.T) {
m := models.NewRecord(&models.Collection{
Name: "cname",
Type: models.CollectionTypeAuth,
})
m.Load(map[string]any{
"id": "id1",
"created": "2023-01-01 00:00:00.000Z",
"updated": "2023-01-02 00:00:00.000Z",
"username": "test",
"verified": true,
"email": "[email protected]",
"unknown": "456",
})
// make a change to ensure that the latest data is targeted
m.Set("id", "id2")
// allow the special flags and options to check whether they will be ignored
m.SetExpand(map[string]any{"test": 123})
m.IgnoreEmailVisibility(true)
m.WithUnknownData(true)
copy := m.CleanCopy()
copyExport, _ := copy.MarshalJSON()
expectedExport := []byte(`{"collectionId":"","collectionName":"cname","created":"2023-01-01 00:00:00.000Z","emailVisibility":false,"id":"id2","updated":"2023-01-02 00:00:00.000Z","username":"test","verified":true}`)
if !bytes.Equal(copyExport, expectedExport) {
t.Fatalf("Expected clean export \n%s, \ngot \n%s", expectedExport, copyExport)
}
}
func TestRecordSetAndGetExpand(t *testing.T) {
collection := &models.Collection{}
m := models.NewRecord(collection)
data := map[string]any{"test": 123}
m.SetExpand(data)
// change the original data to check if it was shallow copied
data["test"] = 456
expand := m.Expand()
if v, ok := expand["test"]; !ok || v != 123 {
t.Fatalf("Expected expand.test to be %v, got %v", 123, v)
}
}
func TestRecordMergeExpand(t *testing.T) {
collection := &models.Collection{}
m := models.NewRecord(collection)
m.Id = "m"
// a
a := models.NewRecord(collection)
a.Id = "a"
a1 := models.NewRecord(collection)
a1.Id = "a1"
a2 := models.NewRecord(collection)
a2.Id = "a2"
a3 := models.NewRecord(collection)
a3.Id = "a3"
a31 := models.NewRecord(collection)
a31.Id = "a31"
a32 := models.NewRecord(collection)
a32.Id = "a32"
a.SetExpand(map[string]any{
"a1": a1,
"a23": []*models.Record{a2, a3},
})
a3.SetExpand(map[string]any{
"a31": a31,
"a32": []*models.Record{a32},
})
// b
b := models.NewRecord(collection)
b.Id = "b"
b1 := models.NewRecord(collection)
b1.Id = "b1"
b.SetExpand(map[string]any{
"b1": b1,
})
// c
c := models.NewRecord(collection)
c.Id = "c"
// load initial expand
m.SetExpand(map[string]any{
"a": a,
"b": b,
"c": []*models.Record{c},
})
// a (new)
aNew := models.NewRecord(collection)
aNew.Id = a.Id
a3New := models.NewRecord(collection)
a3New.Id = a3.Id
a32New := models.NewRecord(collection)
a32New.Id = "a32New"
a33New := models.NewRecord(collection)
a33New.Id = "a33New"
a3New.SetExpand(map[string]any{
"a32": []*models.Record{a32New},
"a33New": a33New,
})
aNew.SetExpand(map[string]any{
"a23": []*models.Record{a2, a3New},
})
// b (new)
bNew := models.NewRecord(collection)
bNew.Id = "bNew"
dNew := models.NewRecord(collection)
dNew.Id = "dNew"
// merge expands
m.MergeExpand(map[string]any{
"a": aNew,
"b": []*models.Record{bNew},
"dNew": dNew,
})
result := m.Expand()
raw, err := json.Marshal(result)
if err != nil {
t.Fatal(err)
}
rawStr := string(raw)
expected := `{"a":{"collectionId":"","collectionName":"","created":"","expand":{"a1":{"collectionId":"","collectionName":"","created":"","id":"a1","updated":""},"a23":[{"collectionId":"","collectionName":"","created":"","id":"a2","updated":""},{"collectionId":"","collectionName":"","created":"","expand":{"a31":{"collectionId":"","collectionName":"","created":"","id":"a31","updated":""},"a32":[{"collectionId":"","collectionName":"","created":"","id":"a32","updated":""},{"collectionId":"","collectionName":"","created":"","id":"a32New","updated":""}],"a33New":{"collectionId":"","collectionName":"","created":"","id":"a33New","updated":""}},"id":"a3","updated":""}]},"id":"a","updated":""},"b":[{"collectionId":"","collectionName":"","created":"","expand":{"b1":{"collectionId":"","collectionName":"","created":"","id":"b1","updated":""}},"id":"b","updated":""},{"collectionId":"","collectionName":"","created":"","id":"bNew","updated":""}],"c":[{"collectionId":"","collectionName":"","created":"","id":"c","updated":""}],"dNew":{"collectionId":"","collectionName":"","created":"","id":"dNew","updated":""}}`
if expected != rawStr {
t.Fatalf("Expected \n%v, \ngot \n%v", expected, rawStr)
}
}
func TestRecordMergeExpandNilCheck(t *testing.T) {
collection := &models.Collection{}
scenarios := []struct {
name string
expand map[string]any
expected string
}{
{
"nil expand",
nil,
`{"collectionId":"","collectionName":"","created":"","id":"","updated":""}`,
},
{
"empty expand",
map[string]any{},
`{"collectionId":"","collectionName":"","created":"","id":"","updated":""}`,
},
{
"non-empty expand",
map[string]any{"test": models.NewRecord(collection)},
`{"collectionId":"","collectionName":"","created":"","expand":{"test":{"collectionId":"","collectionName":"","created":"","id":"","updated":""}},"id":"","updated":""}`,
},
}
for _, s := range scenarios {
m := models.NewRecord(collection)
m.MergeExpand(s.expand)
raw, err := json.Marshal(m)
if err != nil {
t.Fatal(err)
}
rawStr := string(raw)
if rawStr != s.expected {
t.Fatalf("[%s] Expected \n%v, \ngot \n%v", s.name, s.expected, rawStr)
}
}
}
func TestRecordSchemaData(t *testing.T) {
collection := &models.Collection{
Type: models.CollectionTypeAuth,
Schema: schema.NewSchema(
&schema.SchemaField{
Name: "field1",
Type: schema.FieldTypeText,
},
&schema.SchemaField{
Name: "field2",
Type: schema.FieldTypeNumber,
},
),
}
m := models.NewRecord(collection)
m.Set("email", "[email protected]")
m.Set("field1", 123)
m.Set("field2", 456)
m.Set("unknown", 789)
encoded, err := json.Marshal(m.SchemaData())
if err != nil {
t.Fatal(err)
}
expected := `{"field1":"123","field2":456}`
if v := string(encoded); v != expected {
t.Fatalf("Expected \n%v \ngot \n%v", v, expected)
}
}
func TestRecordUnknownData(t *testing.T) {
collection := &models.Collection{
Schema: schema.NewSchema(
&schema.SchemaField{
Name: "field1",
Type: schema.FieldTypeText,
},
&schema.SchemaField{
Name: "field2",
Type: schema.FieldTypeNumber,
},
),
}
data := map[string]any{
"id": "test_id",
"created": "2022-01-01 00:00:00.000",
"updated": "2022-01-01 00:00:00.000",
"collectionId": "test_collectionId",
"collectionName": "test_collectionName",
"expand": "test_expand",
"field1": "test_field1",
"field2": "test_field1",
"unknown1": "test_unknown1",
"unknown2": "test_unknown2",
"passwordHash": "test_passwordHash",
"username": "test_username",
"emailVisibility": true,
"email": "test_email",
"verified": true,
"tokenKey": "test_tokenKey",
"lastResetSentAt": "2022-01-01 00:00:00.000",
"lastVerificationSentAt": "2022-01-01 00:00:00.000",
}
scenarios := []struct {
collectionType string
expectedKeys []string
}{
{
models.CollectionTypeBase,
[]string{
"unknown1",
"unknown2",
"passwordHash",
"username",
"emailVisibility",
"email",
"verified",
"tokenKey",
"lastResetSentAt",
"lastVerificationSentAt",
},
},
{
models.CollectionTypeAuth,
[]string{"unknown1", "unknown2"},
},
}
for i, s := range scenarios {
collection.Type = s.collectionType
m := models.NewRecord(collection)
m.Load(data)
result := m.UnknownData()
if len(result) != len(s.expectedKeys) {
t.Errorf("(%d) Expected data \n%v \ngot \n%v", i, s.expectedKeys, result)
continue
}
for _, key := range s.expectedKeys {
if _, ok := result[key]; !ok {
t.Errorf("(%d) Missing expected key %q in \n%v", i, key, result)
}
}
}
}
func TestRecordSetAndGet(t *testing.T) {
collection := &models.Collection{
Schema: schema.NewSchema(
&schema.SchemaField{
Name: "field1",
Type: schema.FieldTypeText,
},
&schema.SchemaField{
Name: "field2",
Type: schema.FieldTypeNumber,
},
),
}
m := models.NewRecord(collection)
m.Set("id", "test_id")
m.Set("created", "2022-09-15 00:00:00.123Z")
m.Set("updated", "invalid")
m.Set("field1", 123) // should be casted to string
m.Set("field2", "invlaid") // should be casted to zero-number
m.Set("unknown", 456) // undefined fields are allowed but not exported by default
m.Set("expand", map[string]any{"test": 123}) // should store the value in m.expand
if m.Get("id") != "test_id" {
t.Fatalf("Expected id %q, got %q", "test_id", m.Get("id"))
}
if m.GetString("created") != "2022-09-15 00:00:00.123Z" {
t.Fatalf("Expected created %q, got %q", "2022-09-15 00:00:00.123Z", m.GetString("created"))
}
if m.GetString("updated") != "" {
t.Fatalf("Expected updated to be empty, got %q", m.GetString("updated"))
}
if m.Get("field1") != "123" {
t.Fatalf("Expected field1 %q, got %v", "123", m.Get("field1"))
}
if m.Get("field2") != 0.0 {
t.Fatalf("Expected field2 %v, got %v", 0.0, m.Get("field2"))
}
if m.Get("unknown") != 456 {
t.Fatalf("Expected unknown %v, got %v", 456, m.Get("unknown"))
}
if m.Expand()["test"] != 123 {
t.Fatalf("Expected expand to be %v, got %v", map[string]any{"test": 123}, m.Expand())
}
}
func TestRecordGetBool(t *testing.T) {
scenarios := []struct {
value any
expected bool
}{
{nil, false},
{"", false},
{0, false},
{1, true},
{[]string{"true"}, false},
{time.Now(), false},
{"test", false},
{"false", false},
{"true", true},
{false, false},
{true, true},
}
collection := &models.Collection{}
for i, s := range scenarios {
m := models.NewRecord(collection)
m.Set("test", s.value)
result := m.GetBool("test")
if result != s.expected {
t.Errorf("(%d) Expected %v, got %v", i, s.expected, result)
}
}
}
func TestRecordGetString(t *testing.T) {
scenarios := []struct {
value any
expected string
}{
{nil, ""},
{"", ""},
{0, "0"},
{1.4, "1.4"},
{[]string{"true"}, ""},
{map[string]int{"test": 1}, ""},
{[]byte("abc"), "abc"},
{"test", "test"},
{false, "false"},
{true, "true"},
}
collection := &models.Collection{}
for i, s := range scenarios {
m := models.NewRecord(collection)
m.Set("test", s.value)
result := m.GetString("test")
if result != s.expected {
t.Errorf("(%d) Expected %v, got %v", i, s.expected, result)
}
}
}
func TestRecordGetInt(t *testing.T) {
scenarios := []struct {
value any
expected int
}{
{nil, 0},
{"", 0},
{[]string{"true"}, 0},
{map[string]int{"test": 1}, 0},
{time.Now(), 0},
{"test", 0},
{123, 123},
{2.4, 2},
{"123", 123},
{"123.5", 0},
{false, 0},
{true, 1},
}
collection := &models.Collection{}
for i, s := range scenarios {
m := models.NewRecord(collection)
m.Set("test", s.value)
result := m.GetInt("test")
if result != s.expected {
t.Errorf("(%d) Expected %v, got %v", i, s.expected, result)
}
}
}
func TestRecordGetFloat(t *testing.T) {
scenarios := []struct {
value any
expected float64
}{
{nil, 0},
{"", 0},
{[]string{"true"}, 0},
{map[string]int{"test": 1}, 0},
{time.Now(), 0},
{"test", 0},
{123, 123},
{2.4, 2.4},
{"123", 123},
{"123.5", 123.5},
{false, 0},
{true, 1},
}
collection := &models.Collection{}
for i, s := range scenarios {
m := models.NewRecord(collection)
m.Set("test", s.value)
result := m.GetFloat("test")
if result != s.expected {
t.Errorf("(%d) Expected %v, got %v", i, s.expected, result)
}
}
}
func TestRecordGetTime(t *testing.T) {
nowTime := time.Now()
testTime, _ := time.Parse(types.DefaultDateLayout, "2022-01-01 08:00:40.000Z")
scenarios := []struct {
value any
expected time.Time
}{
{nil, time.Time{}},
{"", time.Time{}},
{false, time.Time{}},
{true, time.Time{}},
{"test", time.Time{}},
{[]string{"true"}, time.Time{}},
{map[string]int{"test": 1}, time.Time{}},
{1641024040, testTime},
{"2022-01-01 08:00:40.000", testTime},
{nowTime, nowTime},
}
collection := &models.Collection{}
for i, s := range scenarios {
m := models.NewRecord(collection)
m.Set("test", s.value)
result := m.GetTime("test")
if !result.Equal(s.expected) {
t.Errorf("(%d) Expected %v, got %v", i, s.expected, result)
}
}
}
func TestRecordGetDateTime(t *testing.T) {
nowTime := time.Now()
testTime, _ := time.Parse(types.DefaultDateLayout, "2022-01-01 08:00:40.000Z")
scenarios := []struct {
value any
expected time.Time
}{
{nil, time.Time{}},
{"", time.Time{}},
{false, time.Time{}},
{true, time.Time{}},
{"test", time.Time{}},
{[]string{"true"}, time.Time{}},
{map[string]int{"test": 1}, time.Time{}},
{1641024040, testTime},
{"2022-01-01 08:00:40.000", testTime},
{nowTime, nowTime},
}
collection := &models.Collection{}
for i, s := range scenarios {
m := models.NewRecord(collection)
m.Set("test", s.value)
result := m.GetDateTime("test")
if !result.Time().Equal(s.expected) {
t.Errorf("(%d) Expected %v, got %v", i, s.expected, result)
}
}
}
func TestRecordGetStringSlice(t *testing.T) {
nowTime := time.Now()
scenarios := []struct {
value any
expected []string
}{
{nil, []string{}},
{"", []string{}},
{false, []string{"false"}},
{true, []string{"true"}},
{nowTime, []string{}},
{123, []string{"123"}},
{"test", []string{"test"}},
{map[string]int{"test": 1}, []string{}},
{`["test1", "test2"]`, []string{"test1", "test2"}},
{[]int{123, 123, 456}, []string{"123", "456"}},
{[]string{"test", "test", "123"}, []string{"test", "123"}},
}
collection := &models.Collection{}
for i, s := range scenarios {
m := models.NewRecord(collection)
m.Set("test", s.value)
result := m.GetStringSlice("test")
if len(result) != len(s.expected) {
t.Errorf("(%d) Expected %d elements, got %d: %v", i, len(s.expected), len(result), result)
continue
}
for _, v := range result {
if !list.ExistInSlice(v, s.expected) {
t.Errorf("(%d) Cannot find %v in %v", i, v, s.expected)
}
}
}
}
func TestRecordUnmarshalJSONField(t *testing.T) {
collection := &models.Collection{
Schema: schema.NewSchema(&schema.SchemaField{
Name: "field",
Type: schema.FieldTypeJson,
}),
}
m := models.NewRecord(collection)
var testPointer *string
var testStr string
var testInt int
var testBool bool
var testSlice []int
var testMap map[string]any
scenarios := []struct {
value any
destination any
expectError bool
expectedJson string
}{
{nil, testStr, true, `""`},
{"", testStr, false, `""`},
{1, testInt, false, `1`},
{true, testBool, false, `true`},
{[]int{1, 2, 3}, testSlice, false, `[1,2,3]`},
{map[string]any{"test": 123}, testMap, false, `{"test":123}`},
// json encoded values
{`null`, testPointer, false, `null`},
{`true`, testBool, false, `true`},
{`456`, testInt, false, `456`},
{`"test"`, testStr, false, `"test"`},
{`[4,5,6]`, testSlice, false, `[4,5,6]`},
{`{"test":456}`, testMap, false, `{"test":456}`},
}
for i, s := range scenarios {
m.Set("field", s.value)
err := m.UnmarshalJSONField("field", &s.destination)
hasErr := err != nil
if hasErr != s.expectError {
t.Errorf("(%d) Expected hasErr %v, got %v", i, s.expectError, hasErr)
continue
}
raw, _ := json.Marshal(s.destination)
if v := string(raw); v != s.expectedJson {
t.Errorf("(%d) Expected %q, got %q", i, s.expectedJson, v)
}
}
}
func TestRecordBaseFilesPath(t *testing.T) {
collection := &models.Collection{}
collection.RefreshId()
collection.Name = "test"
m := models.NewRecord(collection)
m.RefreshId()
expected := collection.BaseFilesPath() + "/" + m.Id
result := m.BaseFilesPath()
if result != expected {
t.Fatalf("Expected %q, got %q", expected, result)
}
}
func TestRecordFindFileFieldByFile(t *testing.T) {
collection := &models.Collection{
Schema: schema.NewSchema(
&schema.SchemaField{
Name: "field1",
Type: schema.FieldTypeText,
},
&schema.SchemaField{
Name: "field2",
Type: schema.FieldTypeFile,
Options: &schema.FileOptions{
MaxSelect: 1,
MaxSize: 1,
},
},
&schema.SchemaField{
Name: "field3",
Type: schema.FieldTypeFile,
Options: &schema.FileOptions{
MaxSelect: 2,
MaxSize: 1,
},
},
),
}
m := models.NewRecord(collection)
m.Set("field1", "test")
m.Set("field2", "test.png")
m.Set("field3", []string{"test1.png", "test2.png"})
scenarios := []struct {
filename string
expectField string
}{
{"", ""},
{"test", ""},
{"test2", ""},
{"test.png", "field2"},
{"test2.png", "field3"},
}
for i, s := range scenarios {
result := m.FindFileFieldByFile(s.filename)
var fieldName string
if result != nil {
fieldName = result.Name
}
if s.expectField != fieldName {
t.Errorf("(%d) Expected field %v, got %v", i, s.expectField, result)
continue
}
}
}
func TestRecordLoadAndData(t *testing.T) {
collection := &models.Collection{
Schema: schema.NewSchema(
&schema.SchemaField{
Name: "field1",
Type: schema.FieldTypeText,
},
&schema.SchemaField{
Name: "field2",
Type: schema.FieldTypeNumber,
},
),
}
data := map[string]any{
"id": "test_id",
"created": "2022-01-01 10:00:00.123Z",
"updated": "2022-01-01 10:00:00.456Z",
"field1": "test_field",
"field2": "123", // should be casted to float
"unknown": "test_unknown",
// auth collection sepcific casting test
"passwordHash": "test_passwordHash",
"emailVisibility": "12345", // should be casted to bool only for auth collections
"username": 123, // should be casted to string only for auth collections
"email": "test_email",
"verified": true,
"tokenKey": "test_tokenKey",
"lastResetSentAt": "2022-01-01 11:00:00.000", // should be casted to DateTime only for auth collections
"lastVerificationSentAt": "2022-01-01 12:00:00.000", // should be casted to DateTime only for auth collections
}
scenarios := []struct {
collectionType string
}{
{models.CollectionTypeBase},
{models.CollectionTypeAuth},
}
for i, s := range scenarios {
collection.Type = s.collectionType
m := models.NewRecord(collection)
m.Load(data)
expectations := map[string]any{}
for k, v := range data {
expectations[k] = v
}
expectations["created"], _ = types.ParseDateTime("2022-01-01 10:00:00.123Z")
expectations["updated"], _ = types.ParseDateTime("2022-01-01 10:00:00.456Z")
expectations["field2"] = 123.0
// extra casting test
if collection.IsAuth() {
lastResetSentAt, _ := types.ParseDateTime(expectations["lastResetSentAt"])
lastVerificationSentAt, _ := types.ParseDateTime(expectations["lastVerificationSentAt"])
expectations["emailVisibility"] = false
expectations["username"] = "123"
expectations["verified"] = true
expectations["lastResetSentAt"] = lastResetSentAt
expectations["lastVerificationSentAt"] = lastVerificationSentAt
}
for k, v := range expectations {
if m.Get(k) != v {
t.Errorf("(%d) Expected field %s to be %v, got %v", i, k, v, m.Get(k))
}
}
}
}
func TestRecordColumnValueMap(t *testing.T) {
collection := &models.Collection{
Schema: schema.NewSchema(
&schema.SchemaField{
Name: "field1",
Type: schema.FieldTypeText,
},
&schema.SchemaField{
Name: "field2",
Type: schema.FieldTypeFile,
Options: &schema.FileOptions{
MaxSelect: 1,
MaxSize: 1,
},
},
&schema.SchemaField{
Name: "field3",
Type: schema.FieldTypeSelect,
Options: &schema.SelectOptions{
MaxSelect: 2,
Values: []string{"test1", "test2", "test3"},
},
},
&schema.SchemaField{
Name: "field4",
Type: schema.FieldTypeRelation,
Options: &schema.RelationOptions{
MaxSelect: types.Pointer(2),
},
},
),
}
scenarios := []struct {
collectionType string
expectedJson string
}{
{
models.CollectionTypeBase,
`{"created":"2022-01-01 10:00:30.123Z","field1":"test","field2":"test.png","field3":["test1","test2"],"field4":["test11","test12"],"id":"test_id","updated":""}`,
},
{
models.CollectionTypeAuth,
`{"created":"2022-01-01 10:00:30.123Z","email":"test_email","emailVisibility":true,"field1":"test","field2":"test.png","field3":["test1","test2"],"field4":["test11","test12"],"id":"test_id","lastResetSentAt":"2022-01-02 10:00:30.123Z","lastVerificationSentAt":"","passwordHash":"test_passwordHash","tokenKey":"test_tokenKey","updated":"","username":"test_username","verified":false}`,
},
}
created, _ := types.ParseDateTime("2022-01-01 10:00:30.123Z")
lastResetSentAt, _ := types.ParseDateTime("2022-01-02 10:00:30.123Z")
data := map[string]any{
"id": "test_id",
"created": created,
"field1": "test",
"field2": "test.png",
"field3": []string{"test1", "test2"},
"field4": []string{"test11", "test12", "test11"}, // strip duplicate,
"unknown": "test_unknown",
"passwordHash": "test_passwordHash",
"username": "test_username",
"emailVisibility": true,
"email": "test_email",
"verified": "invalid", // should be casted
"tokenKey": "test_tokenKey",
"lastResetSentAt": lastResetSentAt,
}
m := models.NewRecord(collection)
for i, s := range scenarios {
collection.Type = s.collectionType
m.Load(data)
result := m.ColumnValueMap()
encoded, err := json.Marshal(result)
if err != nil {
t.Errorf("(%d) Unexpected error %v", i, err)
continue
}
if str := string(encoded); str != s.expectedJson {
t.Errorf("(%d) Expected \n%v \ngot \n%v", i, s.expectedJson, str)
}
}
}
func TestRecordPublicExportAndMarshalJSON(t *testing.T) {
collection := &models.Collection{
Name: "c_name",
Schema: schema.NewSchema(
&schema.SchemaField{
Name: "field1",
Type: schema.FieldTypeText,
},
&schema.SchemaField{
Name: "field2",
Type: schema.FieldTypeFile,
Options: &schema.FileOptions{
MaxSelect: 1,
MaxSize: 1,
},
},
&schema.SchemaField{
Name: "field3",
Type: schema.FieldTypeSelect,
Options: &schema.SelectOptions{
MaxSelect: 2,
Values: []string{"test1", "test2", "test3"},
},
},
),
}
collection.Id = "c_id"
scenarios := []struct {
collectionType string
exportHidden bool
exportUnknown bool
expectedJson string
}{
// base
{
models.CollectionTypeBase,
false,
false,
`{"collectionId":"c_id","collectionName":"c_name","created":"2022-01-01 10:00:30.123Z","expand":{"test":123},"field1":"test","field2":"test.png","field3":["test1","test2"],"id":"test_id","updated":""}`,
},
{
models.CollectionTypeBase,
true,
false,
`{"collectionId":"c_id","collectionName":"c_name","created":"2022-01-01 10:00:30.123Z","expand":{"test":123},"field1":"test","field2":"test.png","field3":["test1","test2"],"id":"test_id","updated":""}`,
},
{
models.CollectionTypeBase,
false,
true,
`{"collectionId":"c_id","collectionName":"c_name","created":"2022-01-01 10:00:30.123Z","email":"test_email","emailVisibility":"test_invalid","expand":{"test":123},"field1":"test","field2":"test.png","field3":["test1","test2"],"id":"test_id","lastResetSentAt":"2022-01-02 10:00:30.123Z","lastVerificationSentAt":"test_lastVerificationSentAt","passwordHash":"test_passwordHash","tokenKey":"test_tokenKey","unknown":"test_unknown","updated":"","username":123,"verified":true}`,
},
{
models.CollectionTypeBase,
true,
true,
`{"collectionId":"c_id","collectionName":"c_name","created":"2022-01-01 10:00:30.123Z","email":"test_email","emailVisibility":"test_invalid","expand":{"test":123},"field1":"test","field2":"test.png","field3":["test1","test2"],"id":"test_id","lastResetSentAt":"2022-01-02 10:00:30.123Z","lastVerificationSentAt":"test_lastVerificationSentAt","passwordHash":"test_passwordHash","tokenKey":"test_tokenKey","unknown":"test_unknown","updated":"","username":123,"verified":true}`,
},
// auth
{
models.CollectionTypeAuth,
false,
false,
`{"collectionId":"c_id","collectionName":"c_name","created":"2022-01-01 10:00:30.123Z","emailVisibility":false,"expand":{"test":123},"field1":"test","field2":"test.png","field3":["test1","test2"],"id":"test_id","updated":"","username":"123","verified":true}`,
},
{
models.CollectionTypeAuth,
true,
false,
`{"collectionId":"c_id","collectionName":"c_name","created":"2022-01-01 10:00:30.123Z","email":"test_email","emailVisibility":false,"expand":{"test":123},"field1":"test","field2":"test.png","field3":["test1","test2"],"id":"test_id","updated":"","username":"123","verified":true}`,
},
{
models.CollectionTypeAuth,
false,
true,
`{"collectionId":"c_id","collectionName":"c_name","created":"2022-01-01 10:00:30.123Z","emailVisibility":false,"expand":{"test":123},"field1":"test","field2":"test.png","field3":["test1","test2"],"id":"test_id","unknown":"test_unknown","updated":"","username":"123","verified":true}`,
},
{
models.CollectionTypeAuth,
true,
true,
`{"collectionId":"c_id","collectionName":"c_name","created":"2022-01-01 10:00:30.123Z","email":"test_email","emailVisibility":false,"expand":{"test":123},"field1":"test","field2":"test.png","field3":["test1","test2"],"id":"test_id","unknown":"test_unknown","updated":"","username":"123","verified":true}`,
},
}
created, _ := types.ParseDateTime("2022-01-01 10:00:30.123Z")
lastResetSentAt, _ := types.ParseDateTime("2022-01-02 10:00:30.123Z")
data := map[string]any{
"id": "test_id",
"created": created,
"field1": "test",
"field2": "test.png",
"field3": []string{"test1", "test2"},
"expand": map[string]any{"test": 123},
"collectionId": "m_id", // should be always ignored
"collectionName": "m_name", // should be always ignored
"unknown": "test_unknown",
"passwordHash": "test_passwordHash",
"username": 123, // for auth collections should be casted to string
"emailVisibility": "test_invalid", // for auth collections should be casted to bool
"email": "test_email",
"verified": true,
"tokenKey": "test_tokenKey",
"lastResetSentAt": lastResetSentAt,
"lastVerificationSentAt": "test_lastVerificationSentAt",
}
m := models.NewRecord(collection)
for i, s := range scenarios {
collection.Type = s.collectionType
m.Load(data)
m.IgnoreEmailVisibility(s.exportHidden)
m.WithUnknownData(s.exportUnknown)
exportResult, err := json.Marshal(m.PublicExport())
if err != nil {
t.Errorf("(%d) Unexpected error %v", i, err)
continue
}
exportResultStr := string(exportResult)
// MarshalJSON and PublicExport should return the same
marshalResult, err := m.MarshalJSON()
if err != nil {
t.Errorf("(%d) Unexpected error %v", i, err)
continue
}
marshalResultStr := string(marshalResult)
if exportResultStr != marshalResultStr {
t.Errorf("(%d) Expected the PublicExport to be the same as MarshalJSON, but got \n%v \nvs \n%v", i, exportResultStr, marshalResultStr)
}
if exportResultStr != s.expectedJson {
t.Errorf("(%d) Expected json \n%v \ngot \n%v", i, s.expectedJson, exportResultStr)
}
}
}
func TestRecordUnmarshalJSON(t *testing.T) {
collection := &models.Collection{
Schema: schema.NewSchema(
&schema.SchemaField{
Name: "field1",
Type: schema.FieldTypeText,
},
&schema.SchemaField{
Name: "field2",
Type: schema.FieldTypeNumber,
},
),
}
data := map[string]any{
"id": "test_id",
"created": "2022-01-01 10:00:00.123Z",
"updated": "2022-01-01 10:00:00.456Z",
"field1": "test_field",
"field2": "123", // should be casted to float
"unknown": "test_unknown",
// auth collection sepcific casting test
"passwordHash": "test_passwordHash",
"emailVisibility": "12345", // should be casted to bool only for auth collections
"username": 123.123, // should be casted to string only for auth collections
"email": "test_email",
"verified": true,
"tokenKey": "test_tokenKey",
"lastResetSentAt": "2022-01-01 11:00:00.000", // should be casted to DateTime only for auth collections
"lastVerificationSentAt": "2022-01-01 12:00:00.000", // should be casted to DateTime only for auth collections
}
dataRaw, err := json.Marshal(data)
if err != nil {
t.Fatalf("Unexpected data marshal error %v", err)
}
scenarios := []struct {
collectionType string
}{
{models.CollectionTypeBase},
{models.CollectionTypeAuth},
}
// with invalid data
m0 := models.NewRecord(collection)
if err := m0.UnmarshalJSON([]byte("test")); err == nil {
t.Fatal("Expected error, got nil")
}
// with valid data (it should be pretty much the same as load)
for i, s := range scenarios {
collection.Type = s.collectionType
m := models.NewRecord(collection)
err := m.UnmarshalJSON(dataRaw)
if err != nil {
t.Errorf("(%d) Unexpected error %v", i, err)
continue
}
expectations := map[string]any{}
for k, v := range data {
expectations[k] = v
}
expectations["created"], _ = types.ParseDateTime("2022-01-01 10:00:00.123Z")
expectations["updated"], _ = types.ParseDateTime("2022-01-01 10:00:00.456Z")
expectations["field2"] = 123.0
// extra casting test
if collection.IsAuth() {
lastResetSentAt, _ := types.ParseDateTime(expectations["lastResetSentAt"])
lastVerificationSentAt, _ := types.ParseDateTime(expectations["lastVerificationSentAt"])
expectations["emailVisibility"] = false
expectations["username"] = "123.123"
expectations["verified"] = true
expectations["lastResetSentAt"] = lastResetSentAt
expectations["lastVerificationSentAt"] = lastVerificationSentAt
}
for k, v := range expectations {
if m.Get(k) != v {
t.Errorf("(%d) Expected field %s to be %v, got %v", i, k, v, m.Get(k))
}
}
}
}
func TestRecordReplaceModifers(t *testing.T) {
collection := &models.Collection{
Schema: schema.NewSchema(
&schema.SchemaField{
Name: "text",
Type: schema.FieldTypeText,
},
&schema.SchemaField{
Name: "number",
Type: schema.FieldTypeNumber,
},
&schema.SchemaField{
Name: "rel_one",
Type: schema.FieldTypeRelation,
Options: &schema.RelationOptions{MaxSelect: types.Pointer(1)},
},
&schema.SchemaField{
Name: "rel_many",
Type: schema.FieldTypeRelation,
},
&schema.SchemaField{
Name: "select_one",
Type: schema.FieldTypeSelect,
Options: &schema.SelectOptions{MaxSelect: 1},
},
&schema.SchemaField{
Name: "select_many",
Type: schema.FieldTypeSelect,
Options: &schema.SelectOptions{MaxSelect: 10},
},
&schema.SchemaField{
Name: "file_one",
Type: schema.FieldTypeFile,
Options: &schema.FileOptions{MaxSelect: 1},
},
&schema.SchemaField{
Name: "file_one_index",
Type: schema.FieldTypeFile,
Options: &schema.FileOptions{MaxSelect: 1},
},
&schema.SchemaField{
Name: "file_one_name",
Type: schema.FieldTypeFile,
Options: &schema.FileOptions{MaxSelect: 1},
},
&schema.SchemaField{
Name: "file_many",
Type: schema.FieldTypeFile,
Options: &schema.FileOptions{MaxSelect: 10},
},
),
}
record := models.NewRecord(collection)
record.Load(map[string]any{
"text": "test",
"number": 10,
"rel_one": "a",
"rel_many": []string{"a", "b"},
"select_one": "a",
"select_many": []string{"a", "b", "c"},
"file_one": "a",
"file_one_index": "b",
"file_one_name": "c",
"file_many": []string{"a", "b", "c", "d", "e", "f"},
})
result := record.ReplaceModifers(map[string]any{
"text-": "m-",
"text+": "m+",
"number-": 3,
"number+": 5,
"rel_one-": "a",
"rel_one+": "b",
"rel_many-": []string{"a"},
"rel_many+": []string{"c", "d", "e"},
"select_one-": "a",
"select_one+": "c",
"select_many-": []string{"b", "c"},
"select_many+": []string{"d", "e"},
"file_one+": "skip", // should be ignored
"file_one-": "a",
"file_one_index.0": "",
"file_one_name.c": "",
"file_many+": []string{"e", "f"}, // should be ignored
"file_many-": []string{"c", "d"},
"file_many.f": nil,
"file_many.0": nil,
})
raw, err := json.Marshal(result)
if err != nil {
t.Fatal(err)
}
expected := `{"file_many":["b","e"],"file_one":"","file_one_index":"","file_one_name":"","number":12,"rel_many":["b","c","d","e"],"rel_one":"b","select_many":["a","d","e"],"select_one":"c","text":"test"}`
if v := string(raw); v != expected {
t.Fatalf("Expected \n%s, \ngot \n%s", expected, v)
}
}
// -------------------------------------------------------------------
// Auth helpers:
// -------------------------------------------------------------------
func TestRecordUsername(t *testing.T) {
scenarios := []struct {
collectionType string
expectError bool
}{
{models.CollectionTypeBase, true},
{models.CollectionTypeAuth, false},
}
testValue := "test 1232 !@#%" // formatting isn't checked
for i, s := range scenarios {
collection := &models.Collection{Type: s.collectionType}
m := models.NewRecord(collection)
if s.expectError {
if err := m.SetUsername(testValue); err == nil {
t.Errorf("(%d) Expected error, got nil", i)
}
if v := m.Username(); v != "" {
t.Fatalf("(%d) Expected empty string, got %q", i, v)
}
// verify that nothing is stored in the record data slice
if v := m.Get(schema.FieldNameUsername); v != nil {
t.Fatalf("(%d) Didn't expect data field %q: %v", i, schema.FieldNameUsername, v)
}
} else {
if err := m.SetUsername(testValue); err != nil {
t.Fatalf("(%d) Expected nil, got error %v", i, err)
}
if v := m.Username(); v != testValue {
t.Fatalf("(%d) Expected %q, got %q", i, testValue, v)
}
// verify that the field is stored in the record data slice
if v := m.Get(schema.FieldNameUsername); v != testValue {
t.Fatalf("(%d) Expected data field value %q, got %q", i, testValue, v)
}
}
}
}
func TestRecordEmail(t *testing.T) {
scenarios := []struct {
collectionType string
expectError bool
}{
{models.CollectionTypeBase, true},
{models.CollectionTypeAuth, false},
}
testValue := "test 1232 !@#%" // formatting isn't checked
for i, s := range scenarios {
collection := &models.Collection{Type: s.collectionType}
m := models.NewRecord(collection)
if s.expectError {
if err := m.SetEmail(testValue); err == nil {
t.Errorf("(%d) Expected error, got nil", i)
}
if v := m.Email(); v != "" {
t.Fatalf("(%d) Expected empty string, got %q", i, v)
}
// verify that nothing is stored in the record data slice
if v := m.Get(schema.FieldNameEmail); v != nil {
t.Fatalf("(%d) Didn't expect data field %q: %v", i, schema.FieldNameEmail, v)
}
} else {
if err := m.SetEmail(testValue); err != nil {
t.Fatalf("(%d) Expected nil, got error %v", i, err)
}
if v := m.Email(); v != testValue {
t.Fatalf("(%d) Expected %q, got %q", i, testValue, v)
}
// verify that the field is stored in the record data slice
if v := m.Get(schema.FieldNameEmail); v != testValue {
t.Fatalf("(%d) Expected data field value %q, got %q", i, testValue, v)
}
}
}
}
func TestRecordEmailVisibility(t *testing.T) {
scenarios := []struct {
collectionType string
value bool
expectError bool
}{
{models.CollectionTypeBase, true, true},
{models.CollectionTypeBase, true, true},
{models.CollectionTypeAuth, false, false},
{models.CollectionTypeAuth, true, false},
}
for i, s := range scenarios {
collection := &models.Collection{Type: s.collectionType}
m := models.NewRecord(collection)
if s.expectError {
if err := m.SetEmailVisibility(s.value); err == nil {
t.Errorf("(%d) Expected error, got nil", i)
}
if v := m.EmailVisibility(); v != false {
t.Fatalf("(%d) Expected empty string, got %v", i, v)
}
// verify that nothing is stored in the record data slice
if v := m.Get(schema.FieldNameEmailVisibility); v != nil {
t.Fatalf("(%d) Didn't expect data field %q: %v", i, schema.FieldNameEmailVisibility, v)
}
} else {
if err := m.SetEmailVisibility(s.value); err != nil {
t.Fatalf("(%d) Expected nil, got error %v", i, err)
}
if v := m.EmailVisibility(); v != s.value {
t.Fatalf("(%d) Expected %v, got %v", i, s.value, v)
}
// verify that the field is stored in the record data slice
if v := m.Get(schema.FieldNameEmailVisibility); v != s.value {
t.Fatalf("(%d) Expected data field value %v, got %v", i, s.value, v)
}
}
}
}
func TestRecordEmailVerified(t *testing.T) {
scenarios := []struct {
collectionType string
value bool
expectError bool
}{
{models.CollectionTypeBase, true, true},
{models.CollectionTypeBase, true, true},
{models.CollectionTypeAuth, false, false},
{models.CollectionTypeAuth, true, false},
}
for i, s := range scenarios {
collection := &models.Collection{Type: s.collectionType}
m := models.NewRecord(collection)
if s.expectError {
if err := m.SetVerified(s.value); err == nil {
t.Errorf("(%d) Expected error, got nil", i)
}
if v := m.Verified(); v != false {
t.Fatalf("(%d) Expected empty string, got %v", i, v)
}
// verify that nothing is stored in the record data slice
if v := m.Get(schema.FieldNameVerified); v != nil {
t.Fatalf("(%d) Didn't expect data field %q: %v", i, schema.FieldNameVerified, v)
}
} else {
if err := m.SetVerified(s.value); err != nil {
t.Fatalf("(%d) Expected nil, got error %v", i, err)
}
if v := m.Verified(); v != s.value {
t.Fatalf("(%d) Expected %v, got %v", i, s.value, v)
}
// verify that the field is stored in the record data slice
if v := m.Get(schema.FieldNameVerified); v != s.value {
t.Fatalf("(%d) Expected data field value %v, got %v", i, s.value, v)
}
}
}
}
func TestRecordTokenKey(t *testing.T) {
scenarios := []struct {
collectionType string
expectError bool
}{
{models.CollectionTypeBase, true},
{models.CollectionTypeAuth, false},
}
testValue := "test 1232 !@#%" // formatting isn't checked
for i, s := range scenarios {
collection := &models.Collection{Type: s.collectionType}
m := models.NewRecord(collection)
if s.expectError {
if err := m.SetTokenKey(testValue); err == nil {
t.Errorf("(%d) Expected error, got nil", i)
}
if v := m.TokenKey(); v != "" {
t.Fatalf("(%d) Expected empty string, got %q", i, v)
}
// verify that nothing is stored in the record data slice
if v := m.Get(schema.FieldNameTokenKey); v != nil {
t.Fatalf("(%d) Didn't expect data field %q: %v", i, schema.FieldNameTokenKey, v)
}
} else {
if err := m.SetTokenKey(testValue); err != nil {
t.Fatalf("(%d) Expected nil, got error %v", i, err)
}
if v := m.TokenKey(); v != testValue {
t.Fatalf("(%d) Expected %q, got %q", i, testValue, v)
}
// verify that the field is stored in the record data slice
if v := m.Get(schema.FieldNameTokenKey); v != testValue {
t.Fatalf("(%d) Expected data field value %q, got %q", i, testValue, v)
}
}
}
}
func TestRecordRefreshTokenKey(t *testing.T) {
scenarios := []struct {
collectionType string
expectError bool
}{
{models.CollectionTypeBase, true},
{models.CollectionTypeAuth, false},
}
for i, s := range scenarios {
collection := &models.Collection{Type: s.collectionType}
m := models.NewRecord(collection)
if s.expectError {
if err := m.RefreshTokenKey(); err == nil {
t.Errorf("(%d) Expected error, got nil", i)
}
if v := m.TokenKey(); v != "" {
t.Fatalf("(%d) Expected empty string, got %q", i, v)
}
// verify that nothing is stored in the record data slice
if v := m.Get(schema.FieldNameTokenKey); v != nil {
t.Fatalf("(%d) Didn't expect data field %q: %v", i, schema.FieldNameTokenKey, v)
}
} else {
if err := m.RefreshTokenKey(); err != nil {
t.Fatalf("(%d) Expected nil, got error %v", i, err)
}
if v := m.TokenKey(); len(v) != 50 {
t.Fatalf("(%d) Expected 50 chars, got %d", i, len(v))
}
// verify that the field is stored in the record data slice
if v := m.Get(schema.FieldNameTokenKey); v != m.TokenKey() {
t.Fatalf("(%d) Expected data field value %q, got %q", i, m.TokenKey(), v)
}
}
}
}
func TestRecordLastResetSentAt(t *testing.T) {
scenarios := []struct {
collectionType string
expectError bool
}{
{models.CollectionTypeBase, true},
{models.CollectionTypeAuth, false},
}
testValue, err := types.ParseDateTime("2022-01-01 00:00:00.123Z")
if err != nil {
t.Fatal(err)
}
for i, s := range scenarios {
collection := &models.Collection{Type: s.collectionType}
m := models.NewRecord(collection)
if s.expectError {
if err := m.SetLastResetSentAt(testValue); err == nil {
t.Errorf("(%d) Expected error, got nil", i)
}
if v := m.LastResetSentAt(); !v.IsZero() {
t.Fatalf("(%d) Expected empty value, got %v", i, v)
}
// verify that nothing is stored in the record data slice
if v := m.Get(schema.FieldNameLastResetSentAt); v != nil {
t.Fatalf("(%d) Didn't expect data field %q: %v", i, schema.FieldNameLastResetSentAt, v)
}
} else {
if err := m.SetLastResetSentAt(testValue); err != nil {
t.Fatalf("(%d) Expected nil, got error %v", i, err)
}
if v := m.LastResetSentAt(); v != testValue {
t.Fatalf("(%d) Expected %v, got %v", i, testValue, v)
}
// verify that the field is stored in the record data slice
if v := m.Get(schema.FieldNameLastResetSentAt); v != testValue {
t.Fatalf("(%d) Expected data field value %v, got %v", i, testValue, v)
}
}
}
}
func TestRecordLastVerificationSentAt(t *testing.T) {
scenarios := []struct {
collectionType string
expectError bool
}{
{models.CollectionTypeBase, true},
{models.CollectionTypeAuth, false},
}
testValue, err := types.ParseDateTime("2022-01-01 00:00:00.123Z")
if err != nil {
t.Fatal(err)
}
for i, s := range scenarios {
collection := &models.Collection{Type: s.collectionType}
m := models.NewRecord(collection)
if s.expectError {
if err := m.SetLastVerificationSentAt(testValue); err == nil {
t.Errorf("(%d) Expected error, got nil", i)
}
if v := m.LastVerificationSentAt(); !v.IsZero() {
t.Fatalf("(%d) Expected empty value, got %v", i, v)
}
// verify that nothing is stored in the record data slice
if v := m.Get(schema.FieldNameLastVerificationSentAt); v != nil {
t.Fatalf("(%d) Didn't expect data field %q: %v", i, schema.FieldNameLastVerificationSentAt, v)
}
} else {
if err := m.SetLastVerificationSentAt(testValue); err != nil {
t.Fatalf("(%d) Expected nil, got error %v", i, err)
}
if v := m.LastVerificationSentAt(); v != testValue {
t.Fatalf("(%d) Expected %v, got %v", i, testValue, v)
}
// verify that the field is stored in the record data slice
if v := m.Get(schema.FieldNameLastVerificationSentAt); v != testValue {
t.Fatalf("(%d) Expected data field value %v, got %v", i, testValue, v)
}
}
}
}
func TestRecordPasswordHash(t *testing.T) {
m := models.NewRecord(&models.Collection{})
if v := m.PasswordHash(); v != "" {
t.Errorf("Expected PasswordHash() to be empty, got %v", v)
}
m.Set(schema.FieldNamePasswordHash, "test")
if v := m.PasswordHash(); v != "test" {
t.Errorf("Expected PasswordHash() to be 'test', got %v", v)
}
}
func TestRecordValidatePassword(t *testing.T) {
// 123456
hash := "$2a$10$YKU8mPP8sTE3xZrpuM.xQuq27KJ7aIJB2oUeKPsDDqZshbl5g5cDK"
scenarios := []struct {
collectionType string
password string
hash string
expected bool
}{
{models.CollectionTypeBase, "123456", hash, false},
{models.CollectionTypeAuth, "", "", false},
{models.CollectionTypeAuth, "", hash, false},
{models.CollectionTypeAuth, "123456", hash, true},
{models.CollectionTypeAuth, "654321", hash, false},
}
for i, s := range scenarios {
collection := &models.Collection{Type: s.collectionType}
m := models.NewRecord(collection)
m.Set(schema.FieldNamePasswordHash, hash)
if v := m.ValidatePassword(s.password); v != s.expected {
t.Errorf("(%d) Expected %v, got %v", i, s.expected, v)
}
}
}
func TestRecordSetPassword(t *testing.T) {
scenarios := []struct {
collectionType string
password string
expectError bool
}{
{models.CollectionTypeBase, "", true},
{models.CollectionTypeBase, "123456", true},
{models.CollectionTypeAuth, "", true},
{models.CollectionTypeAuth, "123456", false},
}
for i, s := range scenarios {
collection := &models.Collection{Type: s.collectionType}
m := models.NewRecord(collection)
if s.expectError {
if err := m.SetPassword(s.password); err == nil {
t.Errorf("(%d) Expected error, got nil", i)
}
if v := m.GetString(schema.FieldNamePasswordHash); v != "" {
t.Errorf("(%d) Expected empty hash, got %q", i, v)
}
} else {
if err := m.SetPassword(s.password); err != nil {
t.Errorf("(%d) Expected nil, got err", i)
}
if v := m.GetString(schema.FieldNamePasswordHash); v == "" {
t.Errorf("(%d) Expected non empty hash", i)
}
if !m.ValidatePassword(s.password) {
t.Errorf("(%d) Expected true, got false", i)
}
}
}
}
| models/record_test.go | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.0013876494485884905,
0.00018008357437793165,
0.00016562546079512686,
0.0001745364279486239,
0.0000860802101669833
] |
{
"id": 1,
"code_window": [
"\tloadJob := func() {\n",
"\t\tc.Stop()\n",
"\n",
"\t\trawSchedule := app.Settings().Backups.Cron\n",
"\t\tif rawSchedule == \"\" || !app.IsBootstrapped() {\n",
"\t\t\treturn\n",
"\t\t}\n",
"\n",
"\t\tc.Add(\"@autobackup\", rawSchedule, func() {\n",
"\t\t\tautoPrefix := \"@auto_pb_backup_\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif rawSchedule == \"\" || !isServe || !app.IsBootstrapped() {\n"
],
"file_path": "core/base_backup.go",
"type": "replace",
"edit_start_line_idx": 258
} | /**
* TinyMCE version 6.4.1 (2023-03-29)
*/
!function(){"use strict";var e=tinymce.util.Tools.resolve("tinymce.PluginManager");const t=e=>t=>(e=>{const t=typeof e;return null===e?"null":"object"===t&&Array.isArray(e)?"array":"object"===t&&(o=l=e,(n=String).prototype.isPrototypeOf(o)||(null===(r=l.constructor)||void 0===r?void 0:r.name)===n.name)?"string":t;var o,l,n,r})(t)===e,o=e=>t=>typeof t===e,l=t("string"),n=t("array"),r=o("boolean"),a=(void 0,e=>undefined===e);const s=e=>!(e=>null==e)(e),c=o("function"),i=o("number"),m=()=>{},d=e=>()=>e,u=e=>e,p=(e,t)=>e===t;function b(e,...t){return(...o)=>{const l=t.concat(o);return e.apply(null,l)}}const g=e=>{e()},h=d(!1),f=d(!0);class y{constructor(e,t){this.tag=e,this.value=t}static some(e){return new y(!0,e)}static none(){return y.singletonNone}fold(e,t){return this.tag?t(this.value):e()}isSome(){return this.tag}isNone(){return!this.tag}map(e){return this.tag?y.some(e(this.value)):y.none()}bind(e){return this.tag?e(this.value):y.none()}exists(e){return this.tag&&e(this.value)}forall(e){return!this.tag||e(this.value)}filter(e){return!this.tag||e(this.value)?this:y.none()}getOr(e){return this.tag?this.value:e}or(e){return this.tag?this:e}getOrThunk(e){return this.tag?this.value:e()}orThunk(e){return this.tag?this:e()}getOrDie(e){if(this.tag)return this.value;throw new Error(null!=e?e:"Called getOrDie on None")}static from(e){return s(e)?y.some(e):y.none()}getOrNull(){return this.tag?this.value:null}getOrUndefined(){return this.value}each(e){this.tag&&e(this.value)}toArray(){return this.tag?[this.value]:[]}toString(){return this.tag?`some(${this.value})`:"none()"}}y.singletonNone=new y(!1);const w=Object.keys,S=Object.hasOwnProperty,C=(e,t)=>{const o=w(e);for(let l=0,n=o.length;l<n;l++){const n=o[l];t(e[n],n)}},v=(e,t)=>{const o={};var l;return((e,t,o,l)=>{C(e,((e,n)=>{(t(e,n)?o:l)(e,n)}))})(e,t,(l=o,(e,t)=>{l[t]=e}),m),o},T=e=>w(e).length,x=(e,t)=>A(e,t)?y.from(e[t]):y.none(),A=(e,t)=>S.call(e,t),R=(e,t)=>A(e,t)&&void 0!==e[t]&&null!==e[t],O=Array.prototype.indexOf,_=Array.prototype.push,D=(e,t)=>((e,t)=>O.call(e,t))(e,t)>-1,I=(e,t)=>{for(let o=0,l=e.length;o<l;o++)if(t(e[o],o))return!0;return!1},M=(e,t)=>{const o=[];for(let l=0;l<e;l++)o.push(t(l));return o},N=(e,t)=>{const o=e.length,l=new Array(o);for(let n=0;n<o;n++){const o=e[n];l[n]=t(o,n)}return l},P=(e,t)=>{for(let o=0,l=e.length;o<l;o++)t(e[o],o)},k=(e,t)=>{const o=[];for(let l=0,n=e.length;l<n;l++){const n=e[l];t(n,l)&&o.push(n)}return o},B=(e,t,o)=>(P(e,((e,l)=>{o=t(o,e,l)})),o),E=(e,t)=>((e,t,o)=>{for(let l=0,n=e.length;l<n;l++){const n=e[l];if(t(n,l))return y.some(n);if(o(n,l))break}return y.none()})(e,t,h),F=(e,t)=>(e=>{const t=[];for(let o=0,l=e.length;o<l;++o){if(!n(e[o]))throw new Error("Arr.flatten item "+o+" was not an array, input: "+e);_.apply(t,e[o])}return t})(N(e,t)),q=(e,t)=>{for(let o=0,l=e.length;o<l;++o)if(!0!==t(e[o],o))return!1;return!0},L=(e,t)=>t>=0&&t<e.length?y.some(e[t]):y.none(),H=(e,t)=>{for(let o=0;o<e.length;o++){const l=t(e[o],o);if(l.isSome())return l}return y.none()},j=e=>{if(null==e)throw new Error("Node cannot be null or undefined");return{dom:e}},V={fromHtml:(e,t)=>{const o=(t||document).createElement("div");if(o.innerHTML=e,!o.hasChildNodes()||o.childNodes.length>1){const t="HTML does not have a single root node";throw console.error(t,e),new Error(t)}return j(o.childNodes[0])},fromTag:(e,t)=>{const o=(t||document).createElement(e);return j(o)},fromText:(e,t)=>{const o=(t||document).createTextNode(e);return j(o)},fromDom:j,fromPoint:(e,t,o)=>y.from(e.dom.elementFromPoint(t,o)).map(j)},z=(e,t)=>{const o=e.dom;if(1!==o.nodeType)return!1;{const e=o;if(void 0!==e.matches)return e.matches(t);if(void 0!==e.msMatchesSelector)return e.msMatchesSelector(t);if(void 0!==e.webkitMatchesSelector)return e.webkitMatchesSelector(t);if(void 0!==e.mozMatchesSelector)return e.mozMatchesSelector(t);throw new Error("Browser lacks native selectors")}},W=e=>1!==e.nodeType&&9!==e.nodeType&&11!==e.nodeType||0===e.childElementCount,$=(e,t)=>e.dom===t.dom,U=z;"undefined"!=typeof window?window:Function("return this;")();const G=e=>e.dom.nodeName.toLowerCase(),K=e=>e.dom.nodeType,J=e=>t=>K(t)===e,Q=J(1),X=J(3),Y=J(9),Z=J(11),ee=e=>t=>Q(t)&&G(t)===e,te=e=>Y(e)?e:V.fromDom(e.dom.ownerDocument),oe=e=>y.from(e.dom.parentNode).map(V.fromDom),le=e=>y.from(e.dom.nextSibling).map(V.fromDom),ne=e=>N(e.dom.childNodes,V.fromDom),re=c(Element.prototype.attachShadow)&&c(Node.prototype.getRootNode)?e=>V.fromDom(e.dom.getRootNode()):te,ae=e=>V.fromDom(e.dom.host),se=e=>{const t=X(e)?e.dom.parentNode:e.dom;if(null==t||null===t.ownerDocument)return!1;const o=t.ownerDocument;return(e=>{const t=re(e);return Z(o=t)&&s(o.dom.host)?y.some(t):y.none();var o})(V.fromDom(t)).fold((()=>o.body.contains(t)),(l=se,n=ae,e=>l(n(e))));var l,n};var ce=(e,t,o,l,n)=>e(o,l)?y.some(o):c(n)&&n(o)?y.none():t(o,l,n);const ie=(e,t,o)=>{let l=e.dom;const n=c(o)?o:h;for(;l.parentNode;){l=l.parentNode;const e=V.fromDom(l);if(t(e))return y.some(e);if(n(e))break}return y.none()},me=(e,t,o)=>ie(e,(e=>z(e,t)),o),de=(e,t)=>((e,o)=>E(e.dom.childNodes,(e=>{return o=V.fromDom(e),z(o,t);var o})).map(V.fromDom))(e),ue=(e,t)=>((e,t)=>{const o=void 0===t?document:t.dom;return W(o)?y.none():y.from(o.querySelector(e)).map(V.fromDom)})(t,e),pe=(e,t,o)=>ce(((e,t)=>z(e,t)),me,e,t,o),be=(e,t=!1)=>{return se(e)?e.dom.isContentEditable:(o=e,pe(o,"[contenteditable]")).fold(d(t),(e=>"true"===ge(e)));var o},ge=e=>e.dom.contentEditable,he=e=>t=>$(t,(e=>V.fromDom(e.getBody()))(e)),fe=e=>/^\d+(\.\d+)?$/.test(e)?e+"px":e,ye=e=>V.fromDom(e.selection.getStart()),we=(e,t)=>{let o=[];return P(ne(e),(e=>{t(e)&&(o=o.concat([e])),o=o.concat(we(e,t))})),o},Se=(e,t)=>((e,o)=>k(ne(e),(e=>z(e,t))))(e),Ce=(e,t)=>((e,t)=>{const o=void 0===t?document:t.dom;return W(o)?[]:N(o.querySelectorAll(e),V.fromDom)})(t,e),ve=(e,t,o)=>{if(!(l(o)||r(o)||i(o)))throw console.error("Invalid call to Attribute.set. Key ",t,":: Value ",o,":: Element ",e),new Error("Attribute value was not simple");e.setAttribute(t,o+"")},Te=(e,t)=>{const o=e.dom.getAttribute(t);return null===o?void 0:o},xe=(e,t)=>y.from(Te(e,t)),Ae=(e,t)=>{e.dom.removeAttribute(t)},Re=(e,t,o=p)=>e.exists((e=>o(e,t))),Oe=(e,t,o)=>e.isSome()&&t.isSome()?y.some(o(e.getOrDie(),t.getOrDie())):y.none(),_e=(e,t)=>((e,t,o)=>""===t||e.length>=t.length&&e.substr(0,0+t.length)===t)(e,t),De=(Ie=/^\s+|\s+$/g,e=>e.replace(Ie,""));var Ie;const Me=e=>e.length>0,Ne=(e,t=10)=>{const o=parseInt(e,t);return isNaN(o)?y.none():y.some(o)},Pe=e=>void 0!==e.style&&c(e.style.getPropertyValue),ke=(e,t)=>{const o=e.dom,l=window.getComputedStyle(o).getPropertyValue(t);return""!==l||se(e)?l:Be(o,t)},Be=(e,t)=>Pe(e)?e.style.getPropertyValue(t):"",Ee=(e,t)=>{const o=e.dom,l=Be(o,t);return y.from(l).filter((e=>e.length>0))},Fe=(e,t,o=0)=>xe(e,t).map((e=>parseInt(e,10))).getOr(o),qe=(e,t)=>Le(e,t,f),Le=(e,t,o)=>F(ne(e),(e=>z(e,t)?o(e)?[e]:[]:Le(e,t,o))),He=["tfoot","thead","tbody","colgroup"],je=(e,t,o)=>({element:e,rowspan:t,colspan:o}),Ve=(e,t,o)=>({element:e,cells:t,section:o}),ze=(e,t)=>pe(e,"table",t),We=e=>qe(e,"tr"),$e=e=>ze(e).fold(d([]),(e=>Se(e,"colgroup"))),Ue=e=>oe(e).map((e=>{const t=G(e);return(e=>D(He,e))(t)?t:"tbody"})).getOr("tbody"),Ge=e=>xe(e,"data-snooker-locked-cols").bind((e=>y.from(e.match(/\d+/g)))).map((e=>((e,t)=>{const o={};for(let l=0,n=e.length;l<n;l++){const n=e[l];o[String(n)]=t(n,l)}return o})(e,f))),Ke=(e,t)=>e+","+t,Je=e=>{const t={},o=[];var l;const n=(l=e,L(l,0)).map((e=>e.element)).bind(ze).bind(Ge).getOr({});let r=0,a=0,s=0;const{pass:c,fail:i}=((e,t)=>{const o=[],l=[];for(let t=0,r=e.length;t<r;t++){const r=e[t];(n=r,"colgroup"===n.section?o:l).push(r)}var n;return{pass:o,fail:l}})(e);P(i,(e=>{const l=[];P(e.cells,(e=>{let o=0;for(;void 0!==t[Ke(s,o)];)o++;const r=R(n,o.toString()),c=((e,t,o,l,n,r)=>({element:e,rowspan:t,colspan:o,row:l,column:n,isLocked:r}))(e.element,e.rowspan,e.colspan,s,o,r);for(let l=0;l<e.colspan;l++)for(let n=0;n<e.rowspan;n++){const e=o+l,r=Ke(s+n,e);t[r]=c,a=Math.max(a,e+1)}l.push(c)})),r++,o.push(Ve(e.element,l,e.section)),s++}));const{columns:m,colgroups:d}=(e=>L(e,e.length-1))(c).map((e=>{const t=(e=>{const t={};let o=0;return P(e.cells,(e=>{const l=e.colspan;M(l,(n=>{const r=o+n;t[r]=((e,t,o)=>({element:e,colspan:t,column:o}))(e.element,l,r)})),o+=l})),t})(e),o=((e,t)=>({element:e,columns:t}))(e.element,((e,t)=>{const o=[];return C(e,((e,l)=>{o.push(t(e,l))})),o})(t,u));return{colgroups:[o],columns:t}})).getOrThunk((()=>({colgroups:[],columns:{}}))),p=((e,t)=>({rows:e,columns:t}))(r,a);return{grid:p,access:t,all:o,columns:m,colgroups:d}},Qe=e=>{const t=(e=>{const t=We(e);return((e,t)=>N(e,(e=>{if("colgroup"===G(e)){const t=N((e=>z(e,"colgroup")?Se(e,"col"):F($e(e),(e=>Se(e,"col"))))(e),(e=>{const t=Fe(e,"span",1);return je(e,1,t)}));return Ve(e,t,"colgroup")}{const o=N((e=>qe(e,"th,td"))(e),(e=>{const t=Fe(e,"rowspan",1),o=Fe(e,"colspan",1);return je(e,t,o)}));return Ve(e,o,t(e))}})))([...$e(e),...t],Ue)})(e);return Je(t)},Xe=(e,t,o)=>y.from(e.access[Ke(t,o)]),Ye=(e,t,o)=>{const l=((e,t)=>{const o=F(e.all,(e=>e.cells));return k(o,t)})(e,(e=>o(t,e.element)));return l.length>0?y.some(l[0]):y.none()},Ze=(e,t)=>y.from(e.columns[t]);var et=tinymce.util.Tools.resolve("tinymce.util.Tools");const tt=(e,t,o)=>{const l=e.select("td,th",t);let n;for(let t=0;t<l.length;t++){const r=e.getStyle(l[t],o);if(a(n)&&(n=r),n!==r)return""}return n},ot=(e,t,o)=>{et.each("left center right".split(" "),(l=>{l!==o&&e.formatter.remove("align"+l,{},t)})),o&&e.formatter.apply("align"+o,{},t)},lt=(e,t,o)=>{e.dispatch("TableModified",{...o,table:t})},nt=(e,t,o)=>((e,t)=>(e=>{const t=parseFloat(e);return isNaN(t)?y.none():y.some(t)})(e).getOr(t))(ke(e,t),o),rt=e=>((e,t)=>{const o=e.dom,l=o.getBoundingClientRect().width||o.offsetWidth;return"border-box"===t?l:((e,t,o,l)=>t-nt(e,"padding-left",0)-nt(e,"padding-right",0)-nt(e,"border-left-width",0)-nt(e,"border-right-width",0))(e,l)})(e,"content-box");var at=tinymce.util.Tools.resolve("tinymce.Env");const st=M(5,(e=>{const t=`${e+1}px`;return{title:t,value:t}})),ct=N(["Solid","Dotted","Dashed","Double","Groove","Ridge","Inset","Outset","None","Hidden"],(e=>({title:e,value:e.toLowerCase()}))),it="100%",mt=e=>{var t;const o=e.dom,l=null!==(t=o.getParent(e.selection.getStart(),o.isBlock))&&void 0!==t?t:e.getBody();return rt(V.fromDom(l))+"px"},dt=e=>t=>t.options.get(e),ut=dt("table_sizing_mode"),pt=dt("table_border_widths"),bt=dt("table_border_styles"),gt=dt("table_cell_advtab"),ht=dt("table_row_advtab"),ft=dt("table_advtab"),yt=dt("table_appearance_options"),wt=dt("table_grid"),St=dt("table_style_by_css"),Ct=dt("table_cell_class_list"),vt=dt("table_row_class_list"),Tt=dt("table_class_list"),xt=dt("table_toolbar"),At=dt("table_background_color_map"),Rt=dt("table_border_color_map"),Ot=e=>"fixed"===ut(e),_t=e=>"responsive"===ut(e),Dt=e=>{const t=e.options,o=t.get("table_default_styles");return t.isSet("table_default_styles")?o:((e,t)=>_t(e)||!St(e)?t:Ot(e)?{...t,width:mt(e)}:{...t,width:it})(e,o)},It=e=>{const t=e.options,o=t.get("table_default_attributes");return t.isSet("table_default_attributes")?o:((e,t)=>_t(e)||St(e)?t:Ot(e)?{...t,width:mt(e)}:{...t,width:it})(e,o)},Mt=(e,t)=>t.column>=e.startCol&&t.column+t.colspan-1<=e.finishCol&&t.row>=e.startRow&&t.row+t.rowspan-1<=e.finishRow,Nt=(e,t,o)=>((e,t,o)=>{const l=Ye(e,t,$),n=Ye(e,o,$);return l.bind((e=>n.map((t=>{return o=e,l=t,{startRow:Math.min(o.row,l.row),startCol:Math.min(o.column,l.column),finishRow:Math.max(o.row+o.rowspan-1,l.row+l.rowspan-1),finishCol:Math.max(o.column+o.colspan-1,l.column+l.colspan-1)};var o,l}))))})(e,t,o).bind((t=>((e,t)=>{let o=!0;const l=b(Mt,t);for(let n=t.startRow;n<=t.finishRow;n++)for(let r=t.startCol;r<=t.finishCol;r++)o=o&&Xe(e,n,r).exists(l);return o?y.some(t):y.none()})(e,t))),Pt=Qe,kt=(e,t)=>{oe(e).each((o=>{o.dom.insertBefore(t.dom,e.dom)}))},Bt=(e,t)=>{le(e).fold((()=>{oe(e).each((e=>{Et(e,t)}))}),(e=>{kt(e,t)}))},Et=(e,t)=>{e.dom.appendChild(t.dom)},Ft=(e,t)=>{P(t,((o,l)=>{const n=0===l?e:t[l-1];Bt(n,o)}))},qt=e=>{const t=e.dom;null!==t.parentNode&&t.parentNode.removeChild(t)},Lt=((e,t)=>{const o=t=>e(t)?y.from(t.dom.nodeValue):y.none();return{get:t=>{if(!e(t))throw new Error("Can only get text value of a text node");return o(t).getOr("")},getOption:o,set:(t,o)=>{if(!e(t))throw new Error("Can only set raw text value of a text node");t.dom.nodeValue=o}}})(X);var Ht=["body","p","div","article","aside","figcaption","figure","footer","header","nav","section","ol","ul","li","table","thead","tbody","tfoot","caption","tr","td","th","h1","h2","h3","h4","h5","h6","blockquote","pre","address"];const jt=(e,t,o,l)=>{const n=t(e,o);return r=(o,l)=>{const n=t(e,l);return Vt(e,o,n)},a=n,((e,t)=>{for(let o=e.length-1;o>=0;o--)t(e[o],o)})(l,((e,t)=>{a=r(a,e)})),a;var r,a},Vt=(e,t,o)=>t.bind((t=>o.filter(b(e.eq,t)))),zt={up:d({selector:me,closest:pe,predicate:ie,all:(e,t)=>{const o=c(t)?t:h;let l=e.dom;const n=[];for(;null!==l.parentNode&&void 0!==l.parentNode;){const e=l.parentNode,t=V.fromDom(e);if(n.push(t),!0===o(t))break;l=e}return n}}),down:d({selector:Ce,predicate:we}),styles:d({get:ke,getRaw:Ee,set:(e,t,o)=>{((e,t,o)=>{if(!l(o))throw console.error("Invalid call to CSS.set. Property ",t,":: Value ",o,":: Element ",e),new Error("CSS value must be a string: "+o);Pe(e)&&e.style.setProperty(t,o)})(e.dom,t,o)},remove:(e,t)=>{((e,t)=>{Pe(e)&&e.style.removeProperty(t)})(e.dom,t),Re(xe(e,"style").map(De),"")&&Ae(e,"style")}}),attrs:d({get:Te,set:(e,t,o)=>{ve(e.dom,t,o)},remove:Ae,copyTo:(e,t)=>{((e,t)=>{const o=e.dom;C(t,((e,t)=>{ve(o,t,e)}))})(t,B(e.dom.attributes,((e,t)=>(e[t.name]=t.value,e)),{}))}}),insert:d({before:kt,after:Bt,afterAll:Ft,append:Et,appendAll:(e,t)=>{P(t,(t=>{Et(e,t)}))},prepend:(e,t)=>{(e=>((e,t)=>{const o=e.dom.childNodes;return y.from(o[0]).map(V.fromDom)})(e))(e).fold((()=>{Et(e,t)}),(o=>{e.dom.insertBefore(t.dom,o.dom)}))},wrap:(e,t)=>{kt(e,t),Et(t,e)}}),remove:d({unwrap:e=>{const t=ne(e);t.length>0&&Ft(e,t),qt(e)},remove:qt}),create:d({nu:V.fromTag,clone:e=>V.fromDom(e.dom.cloneNode(!1)),text:V.fromText}),query:d({comparePosition:(e,t)=>e.dom.compareDocumentPosition(t.dom),prevSibling:e=>y.from(e.dom.previousSibling).map(V.fromDom),nextSibling:le}),property:d({children:ne,name:G,parent:oe,document:e=>te(e).dom,isText:X,isComment:e=>8===K(e)||"#comment"===G(e),isElement:Q,isSpecial:e=>{const t=G(e);return D(["script","noscript","iframe","noframes","noembed","title","style","textarea","xmp"],t)},getLanguage:e=>Q(e)?xe(e,"lang"):y.none(),getText:e=>Lt.get(e),setText:(e,t)=>Lt.set(e,t),isBoundary:e=>!!Q(e)&&("body"===G(e)||D(Ht,G(e))),isEmptyTag:e=>!!Q(e)&&D(["br","img","hr","input"],G(e)),isNonEditable:e=>Q(e)&&"false"===Te(e,"contenteditable")}),eq:$,is:U},Wt=e=>me(e,"table"),$t=(e,t,o)=>ue(e,t).bind((t=>ue(e,o).bind((e=>{return(o=Wt,l=[t,e],((e,t,o)=>o.length>0?((e,t,o,l)=>l(e,t,o[0],o.slice(1)))(e,t,o,jt):y.none())(zt,((e,t)=>o(t)),l)).map((o=>({first:t,last:e,table:o})));var o,l})))),Ut=e=>N(e,V.fromDom),Gt={selected:"data-mce-selected",selectedSelector:"td[data-mce-selected],th[data-mce-selected]",firstSelected:"data-mce-first-selected",firstSelectedSelector:"td[data-mce-first-selected],th[data-mce-first-selected]",lastSelected:"data-mce-last-selected",lastSelectedSelector:"td[data-mce-last-selected],th[data-mce-last-selected]"},Kt=e=>(t,o)=>{const l=G(t),n="col"===l||"colgroup"===l?ze(r=t).bind((e=>((e,t)=>((e,t)=>{const o=Ce(e,t);return o.length>0?y.some(o):y.none()})(e,t))(e,Gt.firstSelectedSelector))).fold(d(r),(e=>e[0])):t;var r;return pe(n,e,o)},Jt=Kt("th,td,caption"),Qt=Kt("th,td"),Xt=e=>Ut(e.model.table.getSelectedCells()),Yt=(e,t)=>{const o=Qt(e),l=o.bind((e=>ze(e))).map((e=>We(e)));return Oe(o,l,((e,o)=>k(o,(o=>I(Ut(o.dom.cells),(o=>"1"===Te(o,t)||$(o,e))))))).getOr([])},Zt=[{text:"None",value:""},{text:"Top",value:"top"},{text:"Middle",value:"middle"},{text:"Bottom",value:"bottom"}],eo=/^#?([a-f\d])([a-f\d])([a-f\d])$/i,to=/^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i,oo=e=>{return(t=e,"#",_e(t,"#")?((e,t)=>e.substring(t))(t,"#".length):t).toUpperCase();var t},lo=e=>{const t=e.toString(16);return(1===t.length?"0"+t:t).toUpperCase()},no=e=>{return t=lo(e.red)+lo(e.green)+lo(e.blue),{value:oo(t)};var t},ro=/^\s*rgb\s*\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)\s*$/i,ao=/^\s*rgba\s*\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d?(?:\.\d+)?)\s*\)\s*$/i,so=(e,t,o,l)=>({red:e,green:t,blue:o,alpha:l}),co=(e,t,o,l)=>{const n=parseInt(e,10),r=parseInt(t,10),a=parseInt(o,10),s=parseFloat(l);return so(n,r,a,s)},io=e=>{if("transparent"===e)return y.some(so(0,0,0,0));const t=ro.exec(e);if(null!==t)return y.some(co(t[1],t[2],t[3],"1"));const o=ao.exec(e);return null!==o?y.some(co(o[1],o[2],o[3],o[4])):y.none()},mo=e=>{let t=e;return{get:()=>t,set:e=>{t=e}}},uo=(e,t,o)=>l=>{const n=(e=>{const t=mo(y.none()),o=()=>t.get().each(e);return{clear:()=>{o(),t.set(y.none())},isSet:()=>t.get().isSome(),get:()=>t.get(),set:e=>{o(),t.set(y.some(e))}}})((e=>e.unbind())),r=!Me(o),a=()=>{const a=Xt(e),s=l=>e.formatter.match(t,{value:o},l.dom,r);r?(l.setActive(!I(a,s)),n.set(e.formatter.formatChanged(t,(e=>l.setActive(!e)),!0))):(l.setActive(q(a,s)),n.set(e.formatter.formatChanged(t,l.setActive,!1,{value:o})))};return e.initialized?a():e.on("init",a),n.clear},po=e=>R(e,"menu"),bo=e=>N(e,(e=>{const t=e.text||e.title||"";return po(e)?{text:t,items:bo(e.menu)}:{text:t,value:e.value}})),go=(e,t,o,l)=>N(t,(t=>{const n=t.text||t.title;return po(t)?{type:"nestedmenuitem",text:n,getSubmenuItems:()=>go(e,t.menu,o,l)}:{text:n,type:"togglemenuitem",onAction:()=>l(t.value),onSetup:uo(e,o,t.value)}})),ho=(e,t)=>o=>{e.execCommand("mceTableApplyCellStyle",!1,{[t]:o})},fo=e=>F(e,(e=>po(e)?[{...e,menu:fo(e.menu)}]:Me(e.value)?[e]:[])),yo=(e,t,o,l)=>n=>n(go(e,t,o,l)),wo=(e,t,o)=>{const l=N(t,(e=>{return{text:e.title,value:"#"+(o=e.value,(t=o,(e=>eo.test(e)||to.test(e))(t)?y.some({value:oo(t)}):y.none()).orThunk((()=>io(o).map(no))).getOrThunk((()=>{const e=document.createElement("canvas");e.height=1,e.width=1;const t=e.getContext("2d");t.clearRect(0,0,e.width,e.height),t.fillStyle="#FFFFFF",t.fillStyle=o,t.fillRect(0,0,1,1);const l=t.getImageData(0,0,1,1).data,n=l[0],r=l[1],a=l[2],s=l[3];return no(so(n,r,a,s))}))).value,type:"choiceitem"};var t,o}));return[{type:"fancymenuitem",fancytype:"colorswatch",initData:{colors:l.length>0?l:void 0,allowCustomColors:!1},onAction:t=>{const l="remove"===t.value?"":t.value;e.execCommand("mceTableApplyCellStyle",!1,{[o]:l})}}]},So=e=>()=>{const t="header"===e.queryCommandValue("mceTableRowType")?"body":"header";e.execCommand("mceTableRowType",!1,{type:t})},Co=e=>()=>{const t="th"===e.queryCommandValue("mceTableColType")?"td":"th";e.execCommand("mceTableColType",!1,{type:t})},vo=[{name:"width",type:"input",label:"Width"},{name:"height",type:"input",label:"Height"},{name:"celltype",type:"listbox",label:"Cell type",items:[{text:"Cell",value:"td"},{text:"Header cell",value:"th"}]},{name:"scope",type:"listbox",label:"Scope",items:[{text:"None",value:""},{text:"Row",value:"row"},{text:"Column",value:"col"},{text:"Row group",value:"rowgroup"},{text:"Column group",value:"colgroup"}]},{name:"halign",type:"listbox",label:"Horizontal align",items:[{text:"None",value:""},{text:"Left",value:"left"},{text:"Center",value:"center"},{text:"Right",value:"right"}]},{name:"valign",type:"listbox",label:"Vertical align",items:Zt}],To=e=>vo.concat((e=>{const t=bo(Ct(e));return t.length>0?y.some({name:"class",type:"listbox",label:"Class",items:t}):y.none()})(e).toArray()),xo=(e,t)=>{const o=[{name:"borderstyle",type:"listbox",label:"Border style",items:[{text:"Select...",value:""}].concat(bo(bt(e)))},{name:"bordercolor",type:"colorinput",label:"Border color"},{name:"backgroundcolor",type:"colorinput",label:"Background color"}];return{title:"Advanced",name:"advanced",items:"cell"===t?[{name:"borderwidth",type:"input",label:"Border width"}].concat(o):o}},Ao=(e,t)=>{const o=e.dom;return{setAttrib:(e,l)=>{o.setAttrib(t,e,l)},setStyle:(e,l)=>{o.setStyle(t,e,l)},setFormat:(o,l)=>{""===l?e.formatter.remove(o,{value:null},t,!0):e.formatter.apply(o,{value:l},t)}}},Ro=ee("th"),Oo=(e,t)=>e&&t?"sectionCells":e?"section":"cells",_o=e=>{const t=N(e,(e=>(e=>{const t="thead"===e.section,o=Re((e=>{const t=k(e,(e=>Ro(e.element)));return 0===t.length?y.some("td"):t.length===e.length?y.some("th"):y.none()})(e.cells),"th");return"tfoot"===e.section?{type:"footer"}:t||o?{type:"header",subType:Oo(t,o)}:{type:"body"}})(e).type)),o=D(t,"header"),l=D(t,"footer");if(o||l){const e=D(t,"body");return!o||e||l?o||e||!l?y.none():y.some("footer"):y.some("header")}return y.some("body")},Do=(e,t)=>H(e.all,(e=>E(e.cells,(e=>$(t,e.element))))),Io=(e,t,o)=>{const l=(e=>{const t=[],o=e=>{t.push(e)};for(let t=0;t<e.length;t++)e[t].each(o);return t})(N(t.selection,(t=>{return(l=t,((e,t,o=h)=>o(t)?y.none():D(e,G(t))?y.some(t):me(t,e.join(","),(e=>z(e,"table")||o(e))))(["td","th"],l,n)).bind((t=>Do(e,t))).filter(o);var l,n})));return n=l,l.length>0?y.some(n):y.none();var n},Mo=(e,t)=>Io(e,t,f),No=(e,t)=>q(t,(t=>((e,t)=>Do(e,t).exists((e=>!e.isLocked)))(e,t))),Po=(e,t)=>((e,t)=>t.mergable)(0,t).filter((t=>No(e,t.cells))),ko=(e,t)=>((e,t)=>t.unmergable)(0,t).filter((t=>No(e,t))),Bo=((e=>{if(!n(e))throw new Error("cases must be an array");if(0===e.length)throw new Error("there must be at least one case");const t=[],o={};P(e,((l,r)=>{const a=w(l);if(1!==a.length)throw new Error("one and only one name per case");const s=a[0],c=l[s];if(void 0!==o[s])throw new Error("duplicate key detected:"+s);if("cata"===s)throw new Error("cannot have a case named cata (sorry)");if(!n(c))throw new Error("case arguments must be an array");t.push(s),o[s]=(...o)=>{const l=o.length;if(l!==c.length)throw new Error("Wrong number of arguments to case "+s+". Expected "+c.length+" ("+c+"), got "+l);return{fold:(...t)=>{if(t.length!==e.length)throw new Error("Wrong number of arguments to fold. Expected "+e.length+", got "+t.length);return t[r].apply(null,o)},match:e=>{const l=w(e);if(t.length!==l.length)throw new Error("Wrong number of arguments to match. Expected: "+t.join(",")+"\nActual: "+l.join(","));if(!q(t,(e=>D(l,e))))throw new Error("Not all branches were specified when using match. Specified: "+l.join(", ")+"\nRequired: "+t.join(", "));return e[s].apply(null,o)},log:e=>{console.log(e,{constructors:t,constructor:s,params:o})}}}}))})([{none:[]},{only:["index"]},{left:["index","next"]},{middle:["prev","index","next"]},{right:["prev","index"]}]),(e,t)=>{const o=Qe(e);return Mo(o,t).bind((e=>{const t=e[e.length-1],l=e[0].row,n=t.row+t.rowspan,r=o.all.slice(l,n);return _o(r)})).getOr("")}),Eo=e=>{return _e(e,"rgb")?io(t=e).map(no).map((e=>"#"+e.value)).getOr(t):e;var t},Fo=e=>{const t=V.fromDom(e);return{borderwidth:Ee(t,"border-width").getOr(""),borderstyle:Ee(t,"border-style").getOr(""),bordercolor:Ee(t,"border-color").map(Eo).getOr(""),backgroundcolor:Ee(t,"background-color").map(Eo).getOr("")}},qo=e=>{const t=e[0],o=e.slice(1);return P(o,(e=>{P(w(t),(o=>{C(e,((e,l)=>{const n=t[o];""!==n&&o===l&&n!==e&&(t[o]="")}))}))})),t},Lo=(e,t,o,l)=>E(e,(e=>!a(o.formatter.matchNode(l,t+e)))).getOr(""),Ho=b(Lo,["left","center","right"],"align"),jo=b(Lo,["top","middle","bottom"],"valign"),Vo=e=>ze(V.fromDom(e)).map((t=>{const o={selection:Ut(e.cells)};return Bo(t,o)})).getOr(""),zo=(e,t)=>{const o=Qe(e),l=(e=>F(e.all,(e=>e.cells)))(o),n=k(l,(e=>I(t,(t=>$(e.element,t)))));return N(n,(e=>({element:e.element.dom,column:Ze(o,e.column).map((e=>e.element.dom))})))},Wo=(e,t,o,l)=>{const n=l.getData();l.close(),e.undoManager.transact((()=>{((e,t,o,l)=>{const n=v(l,((e,t)=>o[t]!==e));T(n)>0&&t.length>=1&&ze(t[0]).each((o=>{const r=zo(o,t),a=T(v(n,((e,t)=>"scope"!==t&&"celltype"!==t)))>0,s=A(n,"celltype");(a||A(n,"scope"))&&((e,t,o,l)=>{const n=1===t.length;P(t,(t=>{const r=t.element,a=n?f:l,s=Ao(e,r);((e,t,o,l)=>{l("scope")&&e.setAttrib("scope",o.scope),l("class")&&e.setAttrib("class",o.class),l("height")&&e.setStyle("height",fe(o.height)),l("width")&&t.setStyle("width",fe(o.width))})(s,t.column.map((t=>Ao(e,t))).getOr(s),o,a),gt(e)&&((e,t,o)=>{o("backgroundcolor")&&e.setFormat("tablecellbackgroundcolor",t.backgroundcolor),o("bordercolor")&&e.setFormat("tablecellbordercolor",t.bordercolor),o("borderstyle")&&e.setFormat("tablecellborderstyle",t.borderstyle),o("borderwidth")&&e.setFormat("tablecellborderwidth",fe(t.borderwidth))})(s,o,a),l("halign")&&ot(e,r,o.halign),l("valign")&&((e,t,o)=>{et.each("top middle bottom".split(" "),(l=>{l!==o&&e.formatter.remove("valign"+l,{},t)})),o&&e.formatter.apply("valign"+o,{},t)})(e,r,o.valign)}))})(e,r,l,b(A,n)),s&&((e,t)=>{e.execCommand("mceTableCellType",!1,{type:t.celltype,no_events:!0})})(e,l),lt(e,o.dom,{structure:s,style:a})}))})(e,t,o,n),e.focus()}))},$o=e=>{const t=Xt(e);if(0===t.length)return;const o=((e,t)=>{const o=ze(t[0]).map((o=>N(zo(o,t),(t=>((e,t,o,l)=>{const n=e.dom,r=(e,t)=>n.getStyle(e,t)||n.getAttrib(e,t);return{width:r(l.getOr(t),"width"),height:r(t,"height"),scope:n.getAttrib(t,"scope"),celltype:(a=t,a.nodeName.toLowerCase()),class:n.getAttrib(t,"class",""),halign:Ho(e,t),valign:jo(e,t),...o?Fo(t):{}};var a})(e,t.element,gt(e),t.column)))));return qo(o.getOrDie())})(e,t),l={type:"tabpanel",tabs:[{title:"General",name:"general",items:To(e)},xo(e,"cell")]},n={type:"panel",items:[{type:"grid",columns:2,items:To(e)}]};e.windowManager.open({title:"Cell Properties",size:"normal",body:gt(e)?l:n,buttons:[{type:"cancel",name:"cancel",text:"Cancel"},{type:"submit",name:"save",text:"Save",primary:!0}],initialData:o,onSubmit:b(Wo,e,t,o)})},Uo=[{type:"listbox",name:"type",label:"Row type",items:[{text:"Header",value:"header"},{text:"Body",value:"body"},{text:"Footer",value:"footer"}]},{type:"listbox",name:"align",label:"Alignment",items:[{text:"None",value:""},{text:"Left",value:"left"},{text:"Center",value:"center"},{text:"Right",value:"right"}]},{label:"Height",name:"height",type:"input"}],Go=e=>Uo.concat((e=>{const t=bo(vt(e));return t.length>0?y.some({name:"class",type:"listbox",label:"Class",items:t}):y.none()})(e).toArray()),Ko=(e,t,o,l)=>{const n=l.getData();l.close(),e.undoManager.transact((()=>{((e,t,o,l)=>{const n=v(l,((e,t)=>o[t]!==e));if(T(n)>0){const o=A(n,"type"),r=!o||T(n)>1;r&&((e,t,o,l)=>{const n=1===t.length?f:l;P(t,(t=>{const r=Ao(e,t);((e,t,o)=>{o("class")&&e.setAttrib("class",t.class),o("height")&&e.setStyle("height",fe(t.height))})(r,o,n),ht(e)&&((e,t,o)=>{o("backgroundcolor")&&e.setStyle("background-color",t.backgroundcolor),o("bordercolor")&&e.setStyle("border-color",t.bordercolor),o("borderstyle")&&e.setStyle("border-style",t.borderstyle)})(r,o,n),l("align")&&ot(e,t,o.align)}))})(e,t,l,b(A,n)),o&&((e,t)=>{e.execCommand("mceTableRowType",!1,{type:t.type,no_events:!0})})(e,l),ze(V.fromDom(t[0])).each((t=>lt(e,t.dom,{structure:o,style:r})))}})(e,t,o,n),e.focus()}))},Jo=e=>{const t=Yt(ye(e),Gt.selected);if(0===t.length)return;const o=N(t,(t=>((e,t,o)=>{const l=e.dom;return{height:l.getStyle(t,"height")||l.getAttrib(t,"height"),class:l.getAttrib(t,"class",""),type:Vo(t),align:Ho(e,t),...o?Fo(t):{}}})(e,t.dom,ht(e)))),l=qo(o),n={type:"tabpanel",tabs:[{title:"General",name:"general",items:Go(e)},xo(e,"row")]},r={type:"panel",items:[{type:"grid",columns:2,items:Go(e)}]};e.windowManager.open({title:"Row Properties",size:"normal",body:ht(e)?n:r,buttons:[{type:"cancel",name:"cancel",text:"Cancel"},{type:"submit",name:"save",text:"Save",primary:!0}],initialData:l,onSubmit:b(Ko,e,N(t,(e=>e.dom)),l)})},Qo=(e,t,o)=>{const l=o?[{type:"input",name:"cols",label:"Cols",inputMode:"numeric"},{type:"input",name:"rows",label:"Rows",inputMode:"numeric"}]:[],n=yt(e)?[{type:"input",name:"cellspacing",label:"Cell spacing",inputMode:"numeric"},{type:"input",name:"cellpadding",label:"Cell padding",inputMode:"numeric"},{type:"input",name:"border",label:"Border width"},{type:"label",label:"Caption",items:[{type:"checkbox",name:"caption",label:"Show caption"}]}]:[],r=t.length>0?[{type:"listbox",name:"class",label:"Class",items:t}]:[];return l.concat([{type:"input",name:"width",label:"Width"},{type:"input",name:"height",label:"Height"}]).concat(n).concat([{type:"listbox",name:"align",label:"Alignment",items:[{text:"None",value:""},{text:"Left",value:"left"},{text:"Center",value:"center"},{text:"Right",value:"right"}]}]).concat(r)},Xo=(e,t,o,n)=>{if("TD"===t.tagName||"TH"===t.tagName)l(o)&&s(n)?e.setStyle(t,o,n):e.setStyles(t,o);else if(t.children)for(let l=0;l<t.children.length;l++)Xo(e,t.children[l],o,n)},Yo=(e,t,o,l)=>{const n=e.dom,r=l.getData(),s=v(r,((e,t)=>o[t]!==e));l.close(),""===r.class&&delete r.class,e.undoManager.transact((()=>{if(!t){const o=Ne(r.cols).getOr(1),l=Ne(r.rows).getOr(1);e.execCommand("mceInsertTable",!1,{rows:l,columns:o}),t=Qt(ye(e),he(e)).bind((t=>ze(t,he(e)))).map((e=>e.dom)).getOrDie()}if(T(s)>0){((e,t,o)=>{const l=e.dom,n={},r={};if(a(o.class)||(n.class=o.class),r.height=fe(o.height),St(e)?r.width=fe(o.width):l.getAttrib(t,"width")&&(n.width=(e=>e?e.replace(/px$/,""):"")(o.width)),St(e)?(r["border-width"]=fe(o.border),r["border-spacing"]=fe(o.cellspacing)):(n.border=o.border,n.cellpadding=o.cellpadding,n.cellspacing=o.cellspacing),St(e)&&t.children)for(let n=0;n<t.children.length;n++)Xo(l,t.children[n],{"border-width":fe(o.border),padding:fe(o.cellpadding)}),ft(e)&&Xo(l,t.children[n],{"border-color":o.bordercolor});if(ft(e)){const e=o;r["background-color"]=e.backgroundcolor,r["border-color"]=e.bordercolor,r["border-style"]=e.borderstyle}n.style=l.serializeStyle({...Dt(e),...r}),l.setAttribs(t,{...It(e),...n})})(e,t,r);const o=n.select("caption",t)[0];(o&&!r.caption||!o&&r.caption)&&e.execCommand("mceTableToggleCaption"),ot(e,t,r.align)}if(e.focus(),e.addVisual(),T(s)>0){const o=A(s,"caption"),l=!o||T(s)>1;lt(e,t,{structure:o,style:l})}}))},Zo=(e,t)=>{const o=e.dom;let l,n=((e,t)=>{const o=Dt(e),l=It(e),n=t?{borderstyle:x(o,"border-style").getOr(""),bordercolor:Eo(x(o,"border-color").getOr("")),backgroundcolor:Eo(x(o,"background-color").getOr(""))}:{};return{height:"",width:"100%",cellspacing:"",cellpadding:"",caption:!1,class:"",align:"",border:"",...o,...l,...n,...(()=>{const t=o["border-width"];return St(e)&&t?{border:t}:x(l,"border").fold((()=>({})),(e=>({border:e})))})(),...{...x(o,"border-spacing").or(x(l,"cellspacing")).fold((()=>({})),(e=>({cellspacing:e}))),...x(o,"border-padding").or(x(l,"cellpadding")).fold((()=>({})),(e=>({cellpadding:e})))}}})(e,ft(e));t?(n.cols="1",n.rows="1",ft(e)&&(n.borderstyle="",n.bordercolor="",n.backgroundcolor="")):(l=o.getParent(e.selection.getStart(),"table",e.getBody()),l?n=((e,t,o)=>{const l=e.dom,n=St(e)?l.getStyle(t,"border-spacing")||l.getAttrib(t,"cellspacing"):l.getAttrib(t,"cellspacing")||l.getStyle(t,"border-spacing"),r=St(e)?tt(l,t,"padding")||l.getAttrib(t,"cellpadding"):l.getAttrib(t,"cellpadding")||tt(l,t,"padding");return{width:l.getStyle(t,"width")||l.getAttrib(t,"width"),height:l.getStyle(t,"height")||l.getAttrib(t,"height"),cellspacing:null!=n?n:"",cellpadding:null!=r?r:"",border:((t,o)=>{const l=Ee(V.fromDom(o),"border-width");return St(e)&&l.isSome()?l.getOr(""):t.getAttrib(o,"border")||tt(e.dom,o,"border-width")||tt(e.dom,o,"border")||""})(l,t),caption:!!l.select("caption",t)[0],class:l.getAttrib(t,"class",""),align:Ho(e,t),...o?Fo(t):{}}})(e,l,ft(e)):ft(e)&&(n.borderstyle="",n.bordercolor="",n.backgroundcolor=""));const r=bo(Tt(e));r.length>0&&n.class&&(n.class=n.class.replace(/\s*mce\-item\-table\s*/g,""));const a={type:"grid",columns:2,items:Qo(e,r,t)},s=ft(e)?{type:"tabpanel",tabs:[{title:"General",name:"general",items:[a]},xo(e,"table")]}:{type:"panel",items:[a]};e.windowManager.open({title:"Table Properties",size:"normal",body:s,onSubmit:b(Yo,e,l,n),buttons:[{type:"cancel",name:"cancel",text:"Cancel"},{type:"submit",name:"save",text:"Save",primary:!0}],initialData:n})},el=e=>{C({mceTableProps:b(Zo,e,!1),mceTableRowProps:b(Jo,e),mceTableCellProps:b($o,e),mceInsertTableDialog:b(Zo,e,!0)},((t,o)=>e.addCommand(o,(()=>{return o=t,void((e=>{return(t=e,o=ee("table"),ce(((e,t)=>t(e)),ie,t,o,void 0)).forall(be);var t,o})(ye(e))&&o());var o}))))},tl=u,ol=e=>{const t=(e,t)=>xe(e,t).exists((e=>parseInt(e,10)>1));return e.length>0&&q(e,(e=>t(e,"rowspan")||t(e,"colspan")))?y.some(e):y.none()},ll=(e,t,o)=>{return t.length<=1?y.none():(l=e,n=o.firstSelectedSelector,r=o.lastSelectedSelector,$t(l,n,r).bind((e=>{const t=e=>$(l,e),o="thead,tfoot,tbody,table",n=me(e.first,o,t),r=me(e.last,o,t);return n.bind((t=>r.bind((o=>$(t,o)?((e,t,o)=>{const l=Pt(e);return Nt(l,t,o)})(e.table,e.first,e.last):y.none()))))}))).map((e=>({bounds:e,cells:t})));var l,n,r},nl=e=>{const t=mo(y.none()),o=mo([]);let l=y.none();const n=ee("caption"),r=e=>l.forall((t=>!t[e])),a=()=>Jt(ye(e),he(e)).bind((t=>{return o=Oe(ze(t),Jt((e=>V.fromDom(e.selection.getEnd()))(e),he(e)).bind(ze),((o,l)=>$(o,l)?n(t)?y.some((e=>({element:e,mergable:y.none(),unmergable:y.none(),selection:[e]}))(t)):y.some(((e,t,o)=>({element:o,mergable:ll(t,e,Gt),unmergable:ol(e),selection:tl(e)}))(Xt(e),o,t)):y.none())),o.bind(u);var o})),s=e=>ze(e.element).map((t=>{const o=Qe(t),l=Mo(o,e).getOr([]),n=B(l,((e,t)=>(t.isLocked&&(e.onAny=!0,0===t.column?e.onFirst=!0:t.column+t.colspan>=o.grid.columns&&(e.onLast=!0)),e)),{onAny:!1,onFirst:!1,onLast:!1});return{mergeable:Po(o,e).isSome(),unmergeable:ko(o,e).isSome(),locked:n}})),c=()=>{t.set((e=>{let t,o=!1;return(...l)=>(o||(o=!0,t=e.apply(null,l)),t)})(a)()),l=t.get().bind(s),P(o.get(),g)},i=e=>(e(),o.set(o.get().concat([e])),()=>{o.set(k(o.get(),(t=>t!==e)))}),m=(e,o)=>i((()=>t.get().fold((()=>{e.setEnabled(!1)}),(t=>{e.setEnabled(!o(t))})))),d=(e,o,l)=>i((()=>t.get().fold((()=>{e.setEnabled(!1),e.setActive(!1)}),(t=>{e.setEnabled(!o(t)),e.setActive(l(t))})))),p=e=>l.exists((t=>t.locked[e])),b=(t,o)=>l=>d(l,(e=>n(e.element)),(()=>e.queryCommandValue(t)===o)),f=b("mceTableRowType","header"),w=b("mceTableColType","th");return e.on("NodeChange ExecCommand TableSelectorChange",c),{onSetupTable:e=>m(e,(e=>!1)),onSetupCellOrRow:e=>m(e,(e=>n(e.element))),onSetupColumn:e=>t=>m(t,(t=>n(t.element)||p(e))),onSetupPasteable:e=>t=>m(t,(t=>n(t.element)||e().isNone())),onSetupPasteableColumn:(e,t)=>o=>m(o,(o=>n(o.element)||e().isNone()||p(t))),onSetupMergeable:e=>m(e,(e=>r("mergeable"))),onSetupUnmergeable:e=>m(e,(e=>r("unmergeable"))),resetTargets:c,onSetupTableWithCaption:t=>d(t,h,(t=>ze(t.element,he(e)).exists((e=>de(e,"caption").isSome())))),onSetupTableRowHeaders:f,onSetupTableColumnHeaders:w,targets:t.get}};var rl=tinymce.util.Tools.resolve("tinymce.FakeClipboard");const al=e=>{var t;const o=null!==(t=rl.read())&&void 0!==t?t:[];return H(o,(t=>y.from(t.getType(e))))},sl=()=>al("x-tinymce/dom-table-rows"),cl=()=>al("x-tinymce/dom-table-columns");e.add("table",(e=>{const t=nl(e);(e=>{const t=e.options.register;t("table_border_widths",{processor:"object[]",default:st}),t("table_border_styles",{processor:"object[]",default:ct}),t("table_cell_advtab",{processor:"boolean",default:!0}),t("table_row_advtab",{processor:"boolean",default:!0}),t("table_advtab",{processor:"boolean",default:!0}),t("table_appearance_options",{processor:"boolean",default:!0}),t("table_grid",{processor:"boolean",default:!at.deviceType.isTouch()}),t("table_cell_class_list",{processor:"object[]",default:[]}),t("table_row_class_list",{processor:"object[]",default:[]}),t("table_class_list",{processor:"object[]",default:[]}),t("table_toolbar",{processor:"string",default:"tableprops tabledelete | tableinsertrowbefore tableinsertrowafter tabledeleterow | tableinsertcolbefore tableinsertcolafter tabledeletecol"}),t("table_background_color_map",{processor:"object[]",default:[]}),t("table_border_color_map",{processor:"object[]",default:[]})})(e),el(e),((e,t)=>{const o=t=>()=>e.execCommand(t),l=(t,l)=>!!e.queryCommandSupported(l.command)&&(e.ui.registry.addMenuItem(t,{...l,onAction:c(l.onAction)?l.onAction:o(l.command)}),!0),n=(t,l)=>{e.queryCommandSupported(l.command)&&e.ui.registry.addToggleMenuItem(t,{...l,onAction:c(l.onAction)?l.onAction:o(l.command)})},r=t=>{e.execCommand("mceInsertTable",!1,{rows:t.numRows,columns:t.numColumns})},a=[l("tableinsertrowbefore",{text:"Insert row before",icon:"table-insert-row-above",command:"mceTableInsertRowBefore",onSetup:t.onSetupCellOrRow}),l("tableinsertrowafter",{text:"Insert row after",icon:"table-insert-row-after",command:"mceTableInsertRowAfter",onSetup:t.onSetupCellOrRow}),l("tabledeleterow",{text:"Delete row",icon:"table-delete-row",command:"mceTableDeleteRow",onSetup:t.onSetupCellOrRow}),l("tablerowprops",{text:"Row properties",icon:"table-row-properties",command:"mceTableRowProps",onSetup:t.onSetupCellOrRow}),l("tablecutrow",{text:"Cut row",icon:"cut-row",command:"mceTableCutRow",onSetup:t.onSetupCellOrRow}),l("tablecopyrow",{text:"Copy row",icon:"duplicate-row",command:"mceTableCopyRow",onSetup:t.onSetupCellOrRow}),l("tablepasterowbefore",{text:"Paste row before",icon:"paste-row-before",command:"mceTablePasteRowBefore",onSetup:t.onSetupPasteable(sl)}),l("tablepasterowafter",{text:"Paste row after",icon:"paste-row-after",command:"mceTablePasteRowAfter",onSetup:t.onSetupPasteable(sl)})],s=[l("tableinsertcolumnbefore",{text:"Insert column before",icon:"table-insert-column-before",command:"mceTableInsertColBefore",onSetup:t.onSetupColumn("onFirst")}),l("tableinsertcolumnafter",{text:"Insert column after",icon:"table-insert-column-after",command:"mceTableInsertColAfter",onSetup:t.onSetupColumn("onLast")}),l("tabledeletecolumn",{text:"Delete column",icon:"table-delete-column",command:"mceTableDeleteCol",onSetup:t.onSetupColumn("onAny")}),l("tablecutcolumn",{text:"Cut column",icon:"cut-column",command:"mceTableCutCol",onSetup:t.onSetupColumn("onAny")}),l("tablecopycolumn",{text:"Copy column",icon:"duplicate-column",command:"mceTableCopyCol",onSetup:t.onSetupColumn("onAny")}),l("tablepastecolumnbefore",{text:"Paste column before",icon:"paste-column-before",command:"mceTablePasteColBefore",onSetup:t.onSetupPasteableColumn(cl,"onFirst")}),l("tablepastecolumnafter",{text:"Paste column after",icon:"paste-column-after",command:"mceTablePasteColAfter",onSetup:t.onSetupPasteableColumn(cl,"onLast")})],i=[l("tablecellprops",{text:"Cell properties",icon:"table-cell-properties",command:"mceTableCellProps",onSetup:t.onSetupCellOrRow}),l("tablemergecells",{text:"Merge cells",icon:"table-merge-cells",command:"mceTableMergeCells",onSetup:t.onSetupMergeable}),l("tablesplitcells",{text:"Split cell",icon:"table-split-cells",command:"mceTableSplitCells",onSetup:t.onSetupUnmergeable})];wt(e)?e.ui.registry.addNestedMenuItem("inserttable",{text:"Table",icon:"table",getSubmenuItems:()=>[{type:"fancymenuitem",fancytype:"inserttable",onAction:r}]}):e.ui.registry.addMenuItem("inserttable",{text:"Table",icon:"table",onAction:o("mceInsertTableDialog")}),e.ui.registry.addMenuItem("inserttabledialog",{text:"Insert table",icon:"table",onAction:o("mceInsertTableDialog")}),l("tableprops",{text:"Table properties",onSetup:t.onSetupTable,command:"mceTableProps"}),l("deletetable",{text:"Delete table",icon:"table-delete-table",onSetup:t.onSetupTable,command:"mceTableDelete"}),D(a,!0)&&e.ui.registry.addNestedMenuItem("row",{type:"nestedmenuitem",text:"Row",getSubmenuItems:d("tableinsertrowbefore tableinsertrowafter tabledeleterow tablerowprops | tablecutrow tablecopyrow tablepasterowbefore tablepasterowafter")}),D(s,!0)&&e.ui.registry.addNestedMenuItem("column",{type:"nestedmenuitem",text:"Column",getSubmenuItems:d("tableinsertcolumnbefore tableinsertcolumnafter tabledeletecolumn | tablecutcolumn tablecopycolumn tablepastecolumnbefore tablepastecolumnafter")}),D(i,!0)&&e.ui.registry.addNestedMenuItem("cell",{type:"nestedmenuitem",text:"Cell",getSubmenuItems:d("tablecellprops tablemergecells tablesplitcells")}),e.ui.registry.addContextMenu("table",{update:()=>(t.resetTargets(),t.targets().fold(d(""),(e=>"caption"===G(e.element)?"tableprops deletetable":"cell row column | advtablesort | tableprops deletetable")))});const m=fo(Tt(e));0!==m.length&&e.queryCommandSupported("mceTableToggleClass")&&e.ui.registry.addNestedMenuItem("tableclass",{icon:"table-classes",text:"Table styles",getSubmenuItems:()=>go(e,m,"tableclass",(t=>e.execCommand("mceTableToggleClass",!1,t))),onSetup:t.onSetupTable});const u=fo(Ct(e));0!==u.length&&e.queryCommandSupported("mceTableCellToggleClass")&&e.ui.registry.addNestedMenuItem("tablecellclass",{icon:"table-cell-classes",text:"Cell styles",getSubmenuItems:()=>go(e,u,"tablecellclass",(t=>e.execCommand("mceTableCellToggleClass",!1,t))),onSetup:t.onSetupCellOrRow}),e.queryCommandSupported("mceTableApplyCellStyle")&&(e.ui.registry.addNestedMenuItem("tablecellvalign",{icon:"vertical-align",text:"Vertical align",getSubmenuItems:()=>go(e,Zt,"tablecellverticalalign",ho(e,"vertical-align")),onSetup:t.onSetupCellOrRow}),e.ui.registry.addNestedMenuItem("tablecellborderwidth",{icon:"border-width",text:"Border width",getSubmenuItems:()=>go(e,pt(e),"tablecellborderwidth",ho(e,"border-width")),onSetup:t.onSetupCellOrRow}),e.ui.registry.addNestedMenuItem("tablecellborderstyle",{icon:"border-style",text:"Border style",getSubmenuItems:()=>go(e,bt(e),"tablecellborderstyle",ho(e,"border-style")),onSetup:t.onSetupCellOrRow}),e.ui.registry.addNestedMenuItem("tablecellbackgroundcolor",{icon:"cell-background-color",text:"Background color",getSubmenuItems:()=>wo(e,At(e),"background-color"),onSetup:t.onSetupCellOrRow}),e.ui.registry.addNestedMenuItem("tablecellbordercolor",{icon:"cell-border-color",text:"Border color",getSubmenuItems:()=>wo(e,Rt(e),"border-color"),onSetup:t.onSetupCellOrRow})),n("tablecaption",{icon:"table-caption",text:"Table caption",command:"mceTableToggleCaption",onSetup:t.onSetupTableWithCaption}),n("tablerowheader",{text:"Row header",icon:"table-top-header",command:"mceTableRowType",onAction:So(e),onSetup:t.onSetupTableRowHeaders}),n("tablecolheader",{text:"Column header",icon:"table-left-header",command:"mceTableColType",onAction:Co(e),onSetup:t.onSetupTableRowHeaders})})(e,t),((e,t)=>{e.ui.registry.addMenuButton("table",{tooltip:"Table",icon:"table",fetch:e=>e("inserttable | cell row column | advtablesort | tableprops deletetable")});const o=t=>()=>e.execCommand(t),l=(t,l)=>{e.queryCommandSupported(l.command)&&e.ui.registry.addButton(t,{...l,onAction:c(l.onAction)?l.onAction:o(l.command)})},n=(t,l)=>{e.queryCommandSupported(l.command)&&e.ui.registry.addToggleButton(t,{...l,onAction:c(l.onAction)?l.onAction:o(l.command)})};l("tableprops",{tooltip:"Table properties",command:"mceTableProps",icon:"table",onSetup:t.onSetupTable}),l("tabledelete",{tooltip:"Delete table",command:"mceTableDelete",icon:"table-delete-table",onSetup:t.onSetupTable}),l("tablecellprops",{tooltip:"Cell properties",command:"mceTableCellProps",icon:"table-cell-properties",onSetup:t.onSetupCellOrRow}),l("tablemergecells",{tooltip:"Merge cells",command:"mceTableMergeCells",icon:"table-merge-cells",onSetup:t.onSetupMergeable}),l("tablesplitcells",{tooltip:"Split cell",command:"mceTableSplitCells",icon:"table-split-cells",onSetup:t.onSetupUnmergeable}),l("tableinsertrowbefore",{tooltip:"Insert row before",command:"mceTableInsertRowBefore",icon:"table-insert-row-above",onSetup:t.onSetupCellOrRow}),l("tableinsertrowafter",{tooltip:"Insert row after",command:"mceTableInsertRowAfter",icon:"table-insert-row-after",onSetup:t.onSetupCellOrRow}),l("tabledeleterow",{tooltip:"Delete row",command:"mceTableDeleteRow",icon:"table-delete-row",onSetup:t.onSetupCellOrRow}),l("tablerowprops",{tooltip:"Row properties",command:"mceTableRowProps",icon:"table-row-properties",onSetup:t.onSetupCellOrRow}),l("tableinsertcolbefore",{tooltip:"Insert column before",command:"mceTableInsertColBefore",icon:"table-insert-column-before",onSetup:t.onSetupColumn("onFirst")}),l("tableinsertcolafter",{tooltip:"Insert column after",command:"mceTableInsertColAfter",icon:"table-insert-column-after",onSetup:t.onSetupColumn("onLast")}),l("tabledeletecol",{tooltip:"Delete column",command:"mceTableDeleteCol",icon:"table-delete-column",onSetup:t.onSetupColumn("onAny")}),l("tablecutrow",{tooltip:"Cut row",command:"mceTableCutRow",icon:"cut-row",onSetup:t.onSetupCellOrRow}),l("tablecopyrow",{tooltip:"Copy row",command:"mceTableCopyRow",icon:"duplicate-row",onSetup:t.onSetupCellOrRow}),l("tablepasterowbefore",{tooltip:"Paste row before",command:"mceTablePasteRowBefore",icon:"paste-row-before",onSetup:t.onSetupPasteable(sl)}),l("tablepasterowafter",{tooltip:"Paste row after",command:"mceTablePasteRowAfter",icon:"paste-row-after",onSetup:t.onSetupPasteable(sl)}),l("tablecutcol",{tooltip:"Cut column",command:"mceTableCutCol",icon:"cut-column",onSetup:t.onSetupColumn("onAny")}),l("tablecopycol",{tooltip:"Copy column",command:"mceTableCopyCol",icon:"duplicate-column",onSetup:t.onSetupColumn("onAny")}),l("tablepastecolbefore",{tooltip:"Paste column before",command:"mceTablePasteColBefore",icon:"paste-column-before",onSetup:t.onSetupPasteableColumn(cl,"onFirst")}),l("tablepastecolafter",{tooltip:"Paste column after",command:"mceTablePasteColAfter",icon:"paste-column-after",onSetup:t.onSetupPasteableColumn(cl,"onLast")}),l("tableinsertdialog",{tooltip:"Insert table",command:"mceInsertTableDialog",icon:"table"});const r=fo(Tt(e));0!==r.length&&e.queryCommandSupported("mceTableToggleClass")&&e.ui.registry.addMenuButton("tableclass",{icon:"table-classes",tooltip:"Table styles",fetch:yo(e,r,"tableclass",(t=>e.execCommand("mceTableToggleClass",!1,t))),onSetup:t.onSetupTable});const a=fo(Ct(e));0!==a.length&&e.queryCommandSupported("mceTableCellToggleClass")&&e.ui.registry.addMenuButton("tablecellclass",{icon:"table-cell-classes",tooltip:"Cell styles",fetch:yo(e,a,"tablecellclass",(t=>e.execCommand("mceTableCellToggleClass",!1,t))),onSetup:t.onSetupCellOrRow}),e.queryCommandSupported("mceTableApplyCellStyle")&&(e.ui.registry.addMenuButton("tablecellvalign",{icon:"vertical-align",tooltip:"Vertical align",fetch:yo(e,Zt,"tablecellverticalalign",ho(e,"vertical-align")),onSetup:t.onSetupCellOrRow}),e.ui.registry.addMenuButton("tablecellborderwidth",{icon:"border-width",tooltip:"Border width",fetch:yo(e,pt(e),"tablecellborderwidth",ho(e,"border-width")),onSetup:t.onSetupCellOrRow}),e.ui.registry.addMenuButton("tablecellborderstyle",{icon:"border-style",tooltip:"Border style",fetch:yo(e,bt(e),"tablecellborderstyle",ho(e,"border-style")),onSetup:t.onSetupCellOrRow}),e.ui.registry.addMenuButton("tablecellbackgroundcolor",{icon:"cell-background-color",tooltip:"Background color",fetch:t=>t(wo(e,At(e),"background-color")),onSetup:t.onSetupCellOrRow}),e.ui.registry.addMenuButton("tablecellbordercolor",{icon:"cell-border-color",tooltip:"Border color",fetch:t=>t(wo(e,Rt(e),"border-color")),onSetup:t.onSetupCellOrRow})),n("tablecaption",{tooltip:"Table caption",icon:"table-caption",command:"mceTableToggleCaption",onSetup:t.onSetupTableWithCaption}),n("tablerowheader",{tooltip:"Row header",icon:"table-top-header",command:"mceTableRowType",onAction:So(e),onSetup:t.onSetupTableRowHeaders}),n("tablecolheader",{tooltip:"Column header",icon:"table-left-header",command:"mceTableColType",onAction:Co(e),onSetup:t.onSetupTableColumnHeaders})})(e,t),(e=>{const t=xt(e);t.length>0&&e.ui.registry.addContextToolbar("table",{predicate:t=>e.dom.is(t,"table")&&e.getBody().contains(t),items:t,scope:"node",position:"node"})})(e)}))}(); | ui/public/libs/tinymce/plugins/table/plugin.min.js | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.00017028837464749813,
0.00017028837464749813,
0.00017028837464749813,
0.00017028837464749813,
0
] |
{
"id": 2,
"code_window": [
"\n",
"\t// load on app serve\n",
"\tapp.OnBeforeServe().Add(func(e *ServeEvent) error {\n",
"\t\tloadJob()\n",
"\t\treturn nil\n",
"\t})\n",
"\n",
"\t// stop the ticker on app termination\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tisServe = true\n"
],
"file_path": "core/base_backup.go",
"type": "add",
"edit_start_line_idx": 323
} | package core
import (
"context"
"errors"
"fmt"
"io"
"log"
"os"
"path/filepath"
"runtime"
"sort"
"time"
"github.com/pocketbase/pocketbase/daos"
"github.com/pocketbase/pocketbase/models"
"github.com/pocketbase/pocketbase/tools/archive"
"github.com/pocketbase/pocketbase/tools/cron"
"github.com/pocketbase/pocketbase/tools/filesystem"
"github.com/pocketbase/pocketbase/tools/security"
)
const CacheKeyActiveBackup string = "@activeBackup"
// CreateBackup creates a new backup of the current app pb_data directory.
//
// If name is empty, it will be autogenerated.
// If backup with the same name exists, the new backup file will replace it.
//
// The backup is executed within a transaction, meaning that new writes
// will be temporary "blocked" until the backup file is generated.
//
// By default backups are stored in pb_data/backups
// (the backups directory itself is excluded from the generated backup).
//
// When using S3 storage for the uploaded collection files, you have to
// take care manually to backup those since they are not part of the pb_data.
//
// Backups can be stored on S3 if it is configured in app.Settings().Backups.
func (app *BaseApp) CreateBackup(ctx context.Context, name string) error {
if app.Cache().Has(CacheKeyActiveBackup) {
return errors.New("try again later - another backup/restore operation has already been started")
}
// auto generate backup name
if name == "" {
name = fmt.Sprintf(
"pb_backup_%s.zip",
time.Now().UTC().Format("20060102150405"),
)
}
app.Cache().Set(CacheKeyActiveBackup, name)
defer app.Cache().Remove(CacheKeyActiveBackup)
// Archive pb_data in a temp directory, exluding the "backups" dir itself (if exist).
//
// Run in transaction to temporary block other writes (transactions uses the NonconcurrentDB connection).
// ---
tempPath := filepath.Join(os.TempDir(), "pb_backup_"+security.PseudorandomString(4))
createErr := app.Dao().RunInTransaction(func(txDao *daos.Dao) error {
if err := archive.Create(app.DataDir(), tempPath, LocalBackupsDirName); err != nil {
return err
}
return nil
})
if createErr != nil {
return createErr
}
defer os.Remove(tempPath)
// Persist the backup in the backups filesystem.
// ---
fsys, err := app.NewBackupsFilesystem()
if err != nil {
return err
}
defer fsys.Close()
fsys.SetContext(ctx)
file, err := filesystem.NewFileFromPath(tempPath)
if err != nil {
return err
}
file.OriginalName = name
file.Name = file.OriginalName
if err := fsys.UploadFile(file, file.Name); err != nil {
return err
}
return nil
}
// RestoreBackup restores the backup with the specified name and restarts
// the current running application process.
//
// NB! This feature is experimental and currently is expected to work only on UNIX based systems.
//
// To safely perform the restore it is recommended to have free disk space
// for at least 2x the size of the restored pb_data backup.
//
// The performed steps are:
//
// 1. Download the backup with the specified name in a temp location
// (this is in case of S3; otherwise it creates a temp copy of the zip)
//
// 2. Extract the backup in a temp directory next to the app "pb_data"
// (eg. "pb_data/../pb_data_to_restore").
//
// 3. Move the current app "pb_data" under a special sub temp dir that
// will be deleted on the next app start up (eg. "pb_data_to_restore/.pb_temp_to_delete/").
// This is because on some operating systems it may not be allowed
// to delete the currently open "pb_data" files.
//
// 4. Rename the extracted dir from step 1 as the new "pb_data".
//
// 5. Move from the old "pb_data" any local backups that may have been
// created previously to the new "pb_data/backups".
//
// 6. Restart the app (on successfull app bootstap it will also remove the old pb_data).
//
// If a failure occure during the restore process the dir changes are reverted.
// If for whatever reason the revert is not possible, it panics.
func (app *BaseApp) RestoreBackup(ctx context.Context, name string) error {
if runtime.GOOS == "windows" {
return errors.New("restore is not supported on windows")
}
if app.Cache().Has(CacheKeyActiveBackup) {
return errors.New("try again later - another backup/restore operation has already been started")
}
app.Cache().Set(CacheKeyActiveBackup, name)
defer app.Cache().Remove(CacheKeyActiveBackup)
fsys, err := app.NewBackupsFilesystem()
if err != nil {
return err
}
defer fsys.Close()
fsys.SetContext(ctx)
// fetch the backup file in a temp location
br, err := fsys.GetFile(name)
if err != nil {
return err
}
defer br.Close()
tempZip, err := os.CreateTemp(os.TempDir(), "pb_restore")
if err != nil {
return err
}
defer os.Remove(tempZip.Name())
if _, err := io.Copy(tempZip, br); err != nil {
return err
}
parentDataDir := filepath.Dir(app.DataDir())
extractedDataDir := filepath.Join(parentDataDir, "pb_restore_"+security.PseudorandomString(4))
defer os.RemoveAll(extractedDataDir)
if err := archive.Extract(tempZip.Name(), extractedDataDir); err != nil {
return err
}
// ensure that a database file exists
extractedDB := filepath.Join(extractedDataDir, "data.db")
if _, err := os.Stat(extractedDB); err != nil {
return fmt.Errorf("data.db file is missing or invalid: %w", err)
}
// remove the extracted zip file since we no longer need it
// (this is in case the app restarts and the defer calls are not called)
if err := os.Remove(tempZip.Name()); err != nil && app.IsDebug() {
log.Println(err)
}
// make sure that a special temp directory exists in the extracted one
if err := os.MkdirAll(filepath.Join(extractedDataDir, LocalTempDirName), os.ModePerm); err != nil {
return fmt.Errorf("failed to create a temp dir: %w", err)
}
// move the current pb_data to a special temp location that will
// hold the old data between dirs replace
// (the temp dir will be automatically removed on the next app start)
oldTempDataDir := filepath.Join(extractedDataDir, LocalTempDirName, "old_pb_data")
if err := os.Rename(app.DataDir(), oldTempDataDir); err != nil {
return fmt.Errorf("failed to move the current pb_data to a temp location: %w", err)
}
// "restore", aka. set the extracted backup as the new pb_data directory
if err := os.Rename(extractedDataDir, app.DataDir()); err != nil {
return fmt.Errorf("failed to set the extracted backup as pb_data dir: %w", err)
}
// update the old temp data dir path after the restore
oldTempDataDir = filepath.Join(app.DataDir(), LocalTempDirName, "old_pb_data")
oldLocalBackupsDir := filepath.Join(oldTempDataDir, LocalBackupsDirName)
newLocalBackupsDir := filepath.Join(app.DataDir(), LocalBackupsDirName)
revertDataDirChanges := func(revertLocalBackupsDir bool) error {
if revertLocalBackupsDir {
if _, err := os.Stat(newLocalBackupsDir); err == nil {
if err := os.Rename(newLocalBackupsDir, oldLocalBackupsDir); err != nil {
return fmt.Errorf("failed to revert the backups dir change: %w", err)
}
}
}
if err := os.Rename(app.DataDir(), extractedDataDir); err != nil {
return fmt.Errorf("failed to revert the extracted dir change: %w", err)
}
if err := os.Rename(oldTempDataDir, app.DataDir()); err != nil {
return fmt.Errorf("failed to revert old pb_data dir change: %w", err)
}
return nil
}
// restore the local pb_data/backups dir (if any)
if _, err := os.Stat(oldLocalBackupsDir); err == nil {
if err := os.Rename(oldLocalBackupsDir, newLocalBackupsDir); err != nil {
if err := revertDataDirChanges(false); err != nil && app.IsDebug() {
log.Println(err)
}
return fmt.Errorf("failed to move the local pb_data/backups dir: %w", err)
}
}
// restart the app
if err := app.Restart(); err != nil {
if err := revertDataDirChanges(true); err != nil {
panic(err)
}
return fmt.Errorf("failed to restart the app process: %w", err)
}
return nil
}
// initAutobackupHooks registers the autobackup app serve hooks.
// @todo add tests
func (app *BaseApp) initAutobackupHooks() error {
c := cron.New()
loadJob := func() {
c.Stop()
rawSchedule := app.Settings().Backups.Cron
if rawSchedule == "" || !app.IsBootstrapped() {
return
}
c.Add("@autobackup", rawSchedule, func() {
autoPrefix := "@auto_pb_backup_"
name := fmt.Sprintf(
"%s%s.zip",
autoPrefix,
time.Now().UTC().Format("20060102150405"),
)
if err := app.CreateBackup(context.Background(), name); err != nil && app.IsDebug() {
// @todo replace after logs generalization
log.Println(err)
}
maxKeep := app.Settings().Backups.CronMaxKeep
if maxKeep == 0 {
return // no explicit limit
}
fsys, err := app.NewBackupsFilesystem()
if err != nil && app.IsDebug() {
// @todo replace after logs generalization
log.Println(err)
return
}
defer fsys.Close()
files, err := fsys.List(autoPrefix)
if err != nil && app.IsDebug() {
// @todo replace after logs generalization
log.Println(err)
return
}
if maxKeep >= len(files) {
return // nothing to remove
}
// sort desc
sort.Slice(files, func(i, j int) bool {
return files[i].ModTime.After(files[j].ModTime)
})
// keep only the most recent n auto backup files
toRemove := files[maxKeep:]
for _, f := range toRemove {
if err := fsys.Delete(f.Key); err != nil && app.IsDebug() {
// @todo replace after logs generalization
log.Println(err)
}
}
})
// restart the ticker
c.Start()
}
// load on app serve
app.OnBeforeServe().Add(func(e *ServeEvent) error {
loadJob()
return nil
})
// stop the ticker on app termination
app.OnTerminate().Add(func(e *TerminateEvent) error {
c.Stop()
return nil
})
// reload on app settings change
app.OnModelAfterUpdate((&models.Param{}).TableName()).Add(func(e *ModelEvent) error {
if !c.HasStarted() {
return nil // no need to reload as it hasn't been started yet
}
p := e.Model.(*models.Param)
if p == nil || p.Key != models.ParamAppSettings {
return nil
}
loadJob()
return nil
})
return nil
}
| core/base_backup.go | 1 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.953962504863739,
0.026724539697170258,
0.00016324834723491222,
0.0001697664993116632,
0.1567320078611374
] |
{
"id": 2,
"code_window": [
"\n",
"\t// load on app serve\n",
"\tapp.OnBeforeServe().Add(func(e *ServeEvent) error {\n",
"\t\tloadJob()\n",
"\t\treturn nil\n",
"\t})\n",
"\n",
"\t// stop the ticker on app termination\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tisServe = true\n"
],
"file_path": "core/base_backup.go",
"type": "add",
"edit_start_line_idx": 323
} | {"user.cache_control":"","user.content_disposition":"","user.content_encoding":"","user.content_language":"","user.content_type":"image/png","user.metadata":null,"md5":"zZhZjzVvCvpcxtMAJie3GQ=="}
| tests/data/storage/wsmn24bux7wo113/84nmscqy84lsi1t/300_WlbFWSGmW9.png.attrs | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.00017116092203650624,
0.00017116092203650624,
0.00017116092203650624,
0.00017116092203650624,
0
] |
{
"id": 2,
"code_window": [
"\n",
"\t// load on app serve\n",
"\tapp.OnBeforeServe().Add(func(e *ServeEvent) error {\n",
"\t\tloadJob()\n",
"\t\treturn nil\n",
"\t})\n",
"\n",
"\t// stop the ticker on app termination\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tisServe = true\n"
],
"file_path": "core/base_backup.go",
"type": "add",
"edit_start_line_idx": 323
} | <svg viewBox="0 0 36 36" fill="none" role="img" xmlns="http://www.w3.org/2000/svg" width="128" height="128"><title>Elizabeth Peratrovich</title><mask id="mask__beam" maskUnits="userSpaceOnUse" x="0" y="0" width="36" height="36"><rect width="36" height="36" fill="#FFFFFF"></rect></mask><g mask="url(#mask__beam)"><rect width="36" height="36" fill="#004853"></rect><rect x="0" y="0" width="36" height="36" transform="translate(9 -5) rotate(219 18 18) scale(1)" fill="#00b9bd" rx="6"></rect><g transform="translate(4.5 -4) rotate(9 18 18)"><path d="M15 19c2 1 4 1 6 0" stroke="#000000" fill="none" stroke-linecap="round"></path><rect x="10" y="14" width="1.5" height="2" rx="1" stroke="none" fill="#000000"></rect><rect x="24" y="14" width="1.5" height="2" rx="1" stroke="none" fill="#000000"></rect></g></g></svg>
| ui/public/images/avatars/avatar7.svg | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.00017460192611906677,
0.00017460192611906677,
0.00017460192611906677,
0.00017460192611906677,
0
] |
{
"id": 2,
"code_window": [
"\n",
"\t// load on app serve\n",
"\tapp.OnBeforeServe().Add(func(e *ServeEvent) error {\n",
"\t\tloadJob()\n",
"\t\treturn nil\n",
"\t})\n",
"\n",
"\t// stop the ticker on app termination\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tisServe = true\n"
],
"file_path": "core/base_backup.go",
"type": "add",
"edit_start_line_idx": 323
} | package dbutils
import (
"regexp"
"strings"
"github.com/pocketbase/pocketbase/tools/tokenizer"
)
var (
indexRegex = regexp.MustCompile(`(?im)create\s+(unique\s+)?\s*index\s*(if\s+not\s+exists\s+)?(\S*)\s+on\s+(\S*)\s+\(([\s\S]*)\)(?:\s*where\s+([\s\S]*))?`)
indexColumnRegex = regexp.MustCompile(`(?im)^([\s\S]+?)(?:\s+collate\s+([\w]+))?(?:\s+(asc|desc))?$`)
)
// IndexColumn represents a single parsed SQL index column.
type IndexColumn struct {
Name string `json:"name"` // identifier or expression
Collate string `json:"collate"`
Sort string `json:"sort"`
}
// Index represents a single parsed SQL CREATE INDEX expression.
type Index struct {
Unique bool `json:"unique"`
Optional bool `json:"optional"`
SchemaName string `json:"schemaName"`
IndexName string `json:"indexName"`
TableName string `json:"tableName"`
Columns []IndexColumn `json:"columns"`
Where string `json:"where"`
}
// IsValid checks if the current Index contains the minimum required fields to be considered valid.
func (idx Index) IsValid() bool {
return idx.IndexName != "" && idx.TableName != "" && len(idx.Columns) > 0
}
// Build returns a "CREATE INDEX" SQL string from the current index parts.
//
// Returns empty string if idx.IsValid() is false.
func (idx Index) Build() string {
if !idx.IsValid() {
return ""
}
var str strings.Builder
str.WriteString("CREATE ")
if idx.Unique {
str.WriteString("UNIQUE ")
}
str.WriteString("INDEX ")
if idx.Optional {
str.WriteString("IF NOT EXISTS ")
}
if idx.SchemaName != "" {
str.WriteString("`")
str.WriteString(idx.SchemaName)
str.WriteString("`.")
}
str.WriteString("`")
str.WriteString(idx.IndexName)
str.WriteString("` ")
str.WriteString("ON `")
str.WriteString(idx.TableName)
str.WriteString("` (")
if len(idx.Columns) > 1 {
str.WriteString("\n ")
}
var hasCol bool
for _, col := range idx.Columns {
trimmedColName := strings.TrimSpace(col.Name)
if trimmedColName == "" {
continue
}
if hasCol {
str.WriteString(",\n ")
}
if strings.Contains(col.Name, "(") || strings.Contains(col.Name, " ") {
// most likely an expression
str.WriteString(trimmedColName)
} else {
// regular identifier
str.WriteString("`")
str.WriteString(trimmedColName)
str.WriteString("`")
}
if col.Collate != "" {
str.WriteString(" COLLATE ")
str.WriteString(col.Collate)
}
if col.Sort != "" {
str.WriteString(" ")
str.WriteString(strings.ToUpper(col.Sort))
}
hasCol = true
}
if hasCol && len(idx.Columns) > 1 {
str.WriteString("\n")
}
str.WriteString(")")
if idx.Where != "" {
str.WriteString(" WHERE ")
str.WriteString(idx.Where)
}
return str.String()
}
// ParseIndex parses the provided "CREATE INDEX" SQL string into Index struct.
func ParseIndex(createIndexExpr string) Index {
result := Index{}
matches := indexRegex.FindStringSubmatch(createIndexExpr)
if len(matches) != 7 {
return result
}
trimChars := "`\"'[]\r\n\t\f\v "
// Unique
// ---
result.Unique = strings.TrimSpace(matches[1]) != ""
// Optional (aka. "IF NOT EXISTS")
// ---
result.Optional = strings.TrimSpace(matches[2]) != ""
// SchemaName and IndexName
// ---
nameTk := tokenizer.NewFromString(matches[3])
nameTk.Separators('.')
nameParts, _ := nameTk.ScanAll()
if len(nameParts) == 2 {
result.SchemaName = strings.Trim(nameParts[0], trimChars)
result.IndexName = strings.Trim(nameParts[1], trimChars)
} else {
result.IndexName = strings.Trim(nameParts[0], trimChars)
}
// TableName
// ---
result.TableName = strings.Trim(matches[4], trimChars)
// Columns
// ---
columnsTk := tokenizer.NewFromString(matches[5])
columnsTk.Separators(',')
rawColumns, _ := columnsTk.ScanAll()
result.Columns = make([]IndexColumn, 0, len(rawColumns))
for _, col := range rawColumns {
colMatches := indexColumnRegex.FindStringSubmatch(col)
if len(colMatches) != 4 {
continue
}
trimmedName := strings.Trim(colMatches[1], trimChars)
if trimmedName == "" {
continue
}
result.Columns = append(result.Columns, IndexColumn{
Name: trimmedName,
Collate: strings.TrimSpace(colMatches[2]),
Sort: strings.ToUpper(colMatches[3]),
})
}
// WHERE expression
// ---
result.Where = strings.TrimSpace(matches[6])
return result
}
| tools/dbutils/index.go | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.00039666914381086826,
0.00018360474496148527,
0.00016286075697280467,
0.0001738548744469881,
0.00004900993371848017
] |
{
"id": 3,
"code_window": [
"\n",
"\t// reload on app settings change\n",
"\tapp.OnModelAfterUpdate((&models.Param{}).TableName()).Add(func(e *ModelEvent) error {\n",
"\t\tif !c.HasStarted() {\n",
"\t\t\treturn nil // no need to reload as it hasn't been started yet\n",
"\t\t}\n",
"\n",
"\t\tp := e.Model.(*models.Param)\n",
"\t\tif p == nil || p.Key != models.ParamAppSettings {\n",
"\t\t\treturn nil\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "core/base_backup.go",
"type": "replace",
"edit_start_line_idx": 335
} | package core
import (
"context"
"errors"
"fmt"
"io"
"log"
"os"
"path/filepath"
"runtime"
"sort"
"time"
"github.com/pocketbase/pocketbase/daos"
"github.com/pocketbase/pocketbase/models"
"github.com/pocketbase/pocketbase/tools/archive"
"github.com/pocketbase/pocketbase/tools/cron"
"github.com/pocketbase/pocketbase/tools/filesystem"
"github.com/pocketbase/pocketbase/tools/security"
)
const CacheKeyActiveBackup string = "@activeBackup"
// CreateBackup creates a new backup of the current app pb_data directory.
//
// If name is empty, it will be autogenerated.
// If backup with the same name exists, the new backup file will replace it.
//
// The backup is executed within a transaction, meaning that new writes
// will be temporary "blocked" until the backup file is generated.
//
// By default backups are stored in pb_data/backups
// (the backups directory itself is excluded from the generated backup).
//
// When using S3 storage for the uploaded collection files, you have to
// take care manually to backup those since they are not part of the pb_data.
//
// Backups can be stored on S3 if it is configured in app.Settings().Backups.
func (app *BaseApp) CreateBackup(ctx context.Context, name string) error {
if app.Cache().Has(CacheKeyActiveBackup) {
return errors.New("try again later - another backup/restore operation has already been started")
}
// auto generate backup name
if name == "" {
name = fmt.Sprintf(
"pb_backup_%s.zip",
time.Now().UTC().Format("20060102150405"),
)
}
app.Cache().Set(CacheKeyActiveBackup, name)
defer app.Cache().Remove(CacheKeyActiveBackup)
// Archive pb_data in a temp directory, exluding the "backups" dir itself (if exist).
//
// Run in transaction to temporary block other writes (transactions uses the NonconcurrentDB connection).
// ---
tempPath := filepath.Join(os.TempDir(), "pb_backup_"+security.PseudorandomString(4))
createErr := app.Dao().RunInTransaction(func(txDao *daos.Dao) error {
if err := archive.Create(app.DataDir(), tempPath, LocalBackupsDirName); err != nil {
return err
}
return nil
})
if createErr != nil {
return createErr
}
defer os.Remove(tempPath)
// Persist the backup in the backups filesystem.
// ---
fsys, err := app.NewBackupsFilesystem()
if err != nil {
return err
}
defer fsys.Close()
fsys.SetContext(ctx)
file, err := filesystem.NewFileFromPath(tempPath)
if err != nil {
return err
}
file.OriginalName = name
file.Name = file.OriginalName
if err := fsys.UploadFile(file, file.Name); err != nil {
return err
}
return nil
}
// RestoreBackup restores the backup with the specified name and restarts
// the current running application process.
//
// NB! This feature is experimental and currently is expected to work only on UNIX based systems.
//
// To safely perform the restore it is recommended to have free disk space
// for at least 2x the size of the restored pb_data backup.
//
// The performed steps are:
//
// 1. Download the backup with the specified name in a temp location
// (this is in case of S3; otherwise it creates a temp copy of the zip)
//
// 2. Extract the backup in a temp directory next to the app "pb_data"
// (eg. "pb_data/../pb_data_to_restore").
//
// 3. Move the current app "pb_data" under a special sub temp dir that
// will be deleted on the next app start up (eg. "pb_data_to_restore/.pb_temp_to_delete/").
// This is because on some operating systems it may not be allowed
// to delete the currently open "pb_data" files.
//
// 4. Rename the extracted dir from step 1 as the new "pb_data".
//
// 5. Move from the old "pb_data" any local backups that may have been
// created previously to the new "pb_data/backups".
//
// 6. Restart the app (on successfull app bootstap it will also remove the old pb_data).
//
// If a failure occure during the restore process the dir changes are reverted.
// If for whatever reason the revert is not possible, it panics.
func (app *BaseApp) RestoreBackup(ctx context.Context, name string) error {
if runtime.GOOS == "windows" {
return errors.New("restore is not supported on windows")
}
if app.Cache().Has(CacheKeyActiveBackup) {
return errors.New("try again later - another backup/restore operation has already been started")
}
app.Cache().Set(CacheKeyActiveBackup, name)
defer app.Cache().Remove(CacheKeyActiveBackup)
fsys, err := app.NewBackupsFilesystem()
if err != nil {
return err
}
defer fsys.Close()
fsys.SetContext(ctx)
// fetch the backup file in a temp location
br, err := fsys.GetFile(name)
if err != nil {
return err
}
defer br.Close()
tempZip, err := os.CreateTemp(os.TempDir(), "pb_restore")
if err != nil {
return err
}
defer os.Remove(tempZip.Name())
if _, err := io.Copy(tempZip, br); err != nil {
return err
}
parentDataDir := filepath.Dir(app.DataDir())
extractedDataDir := filepath.Join(parentDataDir, "pb_restore_"+security.PseudorandomString(4))
defer os.RemoveAll(extractedDataDir)
if err := archive.Extract(tempZip.Name(), extractedDataDir); err != nil {
return err
}
// ensure that a database file exists
extractedDB := filepath.Join(extractedDataDir, "data.db")
if _, err := os.Stat(extractedDB); err != nil {
return fmt.Errorf("data.db file is missing or invalid: %w", err)
}
// remove the extracted zip file since we no longer need it
// (this is in case the app restarts and the defer calls are not called)
if err := os.Remove(tempZip.Name()); err != nil && app.IsDebug() {
log.Println(err)
}
// make sure that a special temp directory exists in the extracted one
if err := os.MkdirAll(filepath.Join(extractedDataDir, LocalTempDirName), os.ModePerm); err != nil {
return fmt.Errorf("failed to create a temp dir: %w", err)
}
// move the current pb_data to a special temp location that will
// hold the old data between dirs replace
// (the temp dir will be automatically removed on the next app start)
oldTempDataDir := filepath.Join(extractedDataDir, LocalTempDirName, "old_pb_data")
if err := os.Rename(app.DataDir(), oldTempDataDir); err != nil {
return fmt.Errorf("failed to move the current pb_data to a temp location: %w", err)
}
// "restore", aka. set the extracted backup as the new pb_data directory
if err := os.Rename(extractedDataDir, app.DataDir()); err != nil {
return fmt.Errorf("failed to set the extracted backup as pb_data dir: %w", err)
}
// update the old temp data dir path after the restore
oldTempDataDir = filepath.Join(app.DataDir(), LocalTempDirName, "old_pb_data")
oldLocalBackupsDir := filepath.Join(oldTempDataDir, LocalBackupsDirName)
newLocalBackupsDir := filepath.Join(app.DataDir(), LocalBackupsDirName)
revertDataDirChanges := func(revertLocalBackupsDir bool) error {
if revertLocalBackupsDir {
if _, err := os.Stat(newLocalBackupsDir); err == nil {
if err := os.Rename(newLocalBackupsDir, oldLocalBackupsDir); err != nil {
return fmt.Errorf("failed to revert the backups dir change: %w", err)
}
}
}
if err := os.Rename(app.DataDir(), extractedDataDir); err != nil {
return fmt.Errorf("failed to revert the extracted dir change: %w", err)
}
if err := os.Rename(oldTempDataDir, app.DataDir()); err != nil {
return fmt.Errorf("failed to revert old pb_data dir change: %w", err)
}
return nil
}
// restore the local pb_data/backups dir (if any)
if _, err := os.Stat(oldLocalBackupsDir); err == nil {
if err := os.Rename(oldLocalBackupsDir, newLocalBackupsDir); err != nil {
if err := revertDataDirChanges(false); err != nil && app.IsDebug() {
log.Println(err)
}
return fmt.Errorf("failed to move the local pb_data/backups dir: %w", err)
}
}
// restart the app
if err := app.Restart(); err != nil {
if err := revertDataDirChanges(true); err != nil {
panic(err)
}
return fmt.Errorf("failed to restart the app process: %w", err)
}
return nil
}
// initAutobackupHooks registers the autobackup app serve hooks.
// @todo add tests
func (app *BaseApp) initAutobackupHooks() error {
c := cron.New()
loadJob := func() {
c.Stop()
rawSchedule := app.Settings().Backups.Cron
if rawSchedule == "" || !app.IsBootstrapped() {
return
}
c.Add("@autobackup", rawSchedule, func() {
autoPrefix := "@auto_pb_backup_"
name := fmt.Sprintf(
"%s%s.zip",
autoPrefix,
time.Now().UTC().Format("20060102150405"),
)
if err := app.CreateBackup(context.Background(), name); err != nil && app.IsDebug() {
// @todo replace after logs generalization
log.Println(err)
}
maxKeep := app.Settings().Backups.CronMaxKeep
if maxKeep == 0 {
return // no explicit limit
}
fsys, err := app.NewBackupsFilesystem()
if err != nil && app.IsDebug() {
// @todo replace after logs generalization
log.Println(err)
return
}
defer fsys.Close()
files, err := fsys.List(autoPrefix)
if err != nil && app.IsDebug() {
// @todo replace after logs generalization
log.Println(err)
return
}
if maxKeep >= len(files) {
return // nothing to remove
}
// sort desc
sort.Slice(files, func(i, j int) bool {
return files[i].ModTime.After(files[j].ModTime)
})
// keep only the most recent n auto backup files
toRemove := files[maxKeep:]
for _, f := range toRemove {
if err := fsys.Delete(f.Key); err != nil && app.IsDebug() {
// @todo replace after logs generalization
log.Println(err)
}
}
})
// restart the ticker
c.Start()
}
// load on app serve
app.OnBeforeServe().Add(func(e *ServeEvent) error {
loadJob()
return nil
})
// stop the ticker on app termination
app.OnTerminate().Add(func(e *TerminateEvent) error {
c.Stop()
return nil
})
// reload on app settings change
app.OnModelAfterUpdate((&models.Param{}).TableName()).Add(func(e *ModelEvent) error {
if !c.HasStarted() {
return nil // no need to reload as it hasn't been started yet
}
p := e.Model.(*models.Param)
if p == nil || p.Key != models.ParamAppSettings {
return nil
}
loadJob()
return nil
})
return nil
}
| core/base_backup.go | 1 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.9980446100234985,
0.05565020814538002,
0.00016146818234119564,
0.00016993272583931684,
0.22855497896671295
] |
{
"id": 3,
"code_window": [
"\n",
"\t// reload on app settings change\n",
"\tapp.OnModelAfterUpdate((&models.Param{}).TableName()).Add(func(e *ModelEvent) error {\n",
"\t\tif !c.HasStarted() {\n",
"\t\t\treturn nil // no need to reload as it hasn't been started yet\n",
"\t\t}\n",
"\n",
"\t\tp := e.Model.(*models.Param)\n",
"\t\tif p == nil || p.Key != models.ParamAppSettings {\n",
"\t\t\treturn nil\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "core/base_backup.go",
"type": "replace",
"edit_start_line_idx": 335
} | <!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta
http-equiv="Content-Security-Policy"
content="default-src 'self'; style-src 'self' 'unsafe-inline'; img-src 'self' http://127.0.0.1:* data: blob:; connect-src 'self' http://127.0.0.1:*; script-src 'self' 'sha256-GRUzBA7PzKYug7pqxv5rJaec5bwDCw1Vo6/IXwvD3Tc='"
/>
<title>PocketBase</title>
<link rel="apple-touch-icon" sizes="180x180" href="./images/favicon/apple-touch-icon.png">
<link rel="icon" type="image/png" sizes="32x32" href="./images/favicon/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="16x16" href="./images/favicon/favicon-16x16.png">
<link rel="manifest" href="./images/favicon/site.webmanifest">
<link rel="mask-icon" href="./images/favicon/safari-pinned-tab.svg" color="#000000">
<link rel="shortcut icon" href="./images/favicon/favicon.ico">
<meta name="msapplication-TileColor" content="#ffffff">
<meta name="msapplication-config" content="/images/favicon/browserconfig.xml">
<meta name="theme-color" content="#ffffff">
<!-- prefetch common tinymce resources to speed up the initial loading times -->
<link rel="prefetch" href="./libs/tinymce/skins/content/default/content.min.css" as="style" />
<link rel="prefetch" href="./libs/tinymce/skins/ui/pocketbase/skin.min.css" as="style" />
<link rel="prefetch" href="./libs/tinymce/skins/ui/pocketbase/content.min.css" as="style" />
<link rel="prefetch" href="./libs/tinymce/tinymce.min.js" as="script" />
<link rel="prefetch" href="./libs/tinymce/themes/silver/theme.min.js" as="script" />
<link rel="prefetch" href="./libs/tinymce/models/dom/model.min.js" as="script" />
<link rel="prefetch" href="./libs/tinymce/icons/default/icons.min.js" as="script" />
<link rel="prefetch" href="./libs/tinymce/plugins/directionality/plugin.min.js" as="script" />
<link rel="prefetch" href="./libs/tinymce/plugins/autoresize/plugin.min.js" as="script" />
<link rel="prefetch" href="./libs/tinymce/plugins/autolink/plugin.min.js" as="script" />
<link rel="prefetch" href="./libs/tinymce/plugins/lists/plugin.min.js" as="script" />
<link rel="prefetch" href="./libs/tinymce/plugins/link/plugin.min.js" as="script" />
<link rel="prefetch" href="./libs/tinymce/plugins/image/plugin.min.js" as="script" />
<link rel="prefetch" href="./libs/tinymce/plugins/searchreplace/plugin.min.js" as="script" />
<link rel="prefetch" href="./libs/tinymce/plugins/fullscreen/plugin.min.js" as="script" />
<link rel="prefetch" href="./libs/tinymce/plugins/media/plugin.min.js" as="script" />
<link rel="prefetch" href="./libs/tinymce/plugins/table/plugin.min.js" as="script" />
<link rel="prefetch" href="./libs/tinymce/plugins/code/plugin.min.js" as="script" />
<link rel="prefetch" href="./libs/tinymce/plugins/codesample/plugin.min.js" as="script" />
<script>
window.Prism = window.Prism || {};
window.Prism.manual = true;
</script>
<script type="module" crossorigin src="./assets/index-3f8c6248.js"></script>
<link rel="stylesheet" href="./assets/index-20683d26.css">
</head>
<body>
<div id="app"></div>
</body>
</html>
| ui/dist/index.html | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.00017656154523137957,
0.00017075188225135207,
0.00016640838293824345,
0.0001695210230536759,
0.0000037134198009880492
] |
{
"id": 3,
"code_window": [
"\n",
"\t// reload on app settings change\n",
"\tapp.OnModelAfterUpdate((&models.Param{}).TableName()).Add(func(e *ModelEvent) error {\n",
"\t\tif !c.HasStarted() {\n",
"\t\t\treturn nil // no need to reload as it hasn't been started yet\n",
"\t\t}\n",
"\n",
"\t\tp := e.Model.(*models.Param)\n",
"\t\tif p == nil || p.Key != models.ParamAppSettings {\n",
"\t\t\treturn nil\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "core/base_backup.go",
"type": "replace",
"edit_start_line_idx": 335
} | package daos_test
import (
"testing"
"github.com/pocketbase/pocketbase/models"
"github.com/pocketbase/pocketbase/tests"
)
func TestExternalAuthQuery(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
expected := "SELECT {{_externalAuths}}.* FROM `_externalAuths`"
sql := app.Dao().ExternalAuthQuery().Build().SQL()
if sql != expected {
t.Errorf("Expected sql %s, got %s", expected, sql)
}
}
func TestFindAllExternalAuthsByRecord(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
scenarios := []struct {
userId string
expectedCount int
}{
{"oap640cot4yru2s", 0},
{"4q1xlclmfloku33", 2},
}
for i, s := range scenarios {
record, err := app.Dao().FindRecordById("users", s.userId)
if err != nil {
t.Errorf("(%d) Unexpected record fetch error %v", i, err)
continue
}
auths, err := app.Dao().FindAllExternalAuthsByRecord(record)
if err != nil {
t.Errorf("(%d) Unexpected auths fetch error %v", i, err)
continue
}
if len(auths) != s.expectedCount {
t.Errorf("(%d) Expected %d auths, got %d", i, s.expectedCount, len(auths))
}
for _, auth := range auths {
if auth.RecordId != record.Id {
t.Errorf("(%d) Expected all auths to be linked to record id %s, got %v", i, record.Id, auth)
}
}
}
}
func TestFindExternalAuthByProvider(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
scenarios := []struct {
provider string
providerId string
expectedId string
}{
{"", "", ""},
{"github", "", ""},
{"github", "id1", ""},
{"github", "id2", ""},
{"google", "test123", "clmflokuq1xl341"},
{"gitlab", "test123", "dlmflokuq1xl342"},
}
for i, s := range scenarios {
auth, err := app.Dao().FindExternalAuthByProvider(s.provider, s.providerId)
hasErr := err != nil
expectErr := s.expectedId == ""
if hasErr != expectErr {
t.Errorf("(%d) Expected hasErr %v, got %v", i, expectErr, err)
continue
}
if auth != nil && auth.Id != s.expectedId {
t.Errorf("(%d) Expected external auth with ID %s, got \n%v", i, s.expectedId, auth)
}
}
}
func TestFindExternalAuthByRecordAndProvider(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
scenarios := []struct {
userId string
provider string
expectedId string
}{
{"bgs820n361vj1qd", "google", ""},
{"4q1xlclmfloku33", "google", "clmflokuq1xl341"},
{"4q1xlclmfloku33", "gitlab", "dlmflokuq1xl342"},
}
for i, s := range scenarios {
record, err := app.Dao().FindRecordById("users", s.userId)
if err != nil {
t.Errorf("(%d) Unexpected record fetch error %v", i, err)
continue
}
auth, err := app.Dao().FindExternalAuthByRecordAndProvider(record, s.provider)
hasErr := err != nil
expectErr := s.expectedId == ""
if hasErr != expectErr {
t.Errorf("(%d) Expected hasErr %v, got %v", i, expectErr, err)
continue
}
if auth != nil && auth.Id != s.expectedId {
t.Errorf("(%d) Expected external auth with ID %s, got \n%v", i, s.expectedId, auth)
}
}
}
func TestSaveExternalAuth(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
// save with empty provider data
emptyAuth := &models.ExternalAuth{}
if err := app.Dao().SaveExternalAuth(emptyAuth); err == nil {
t.Fatal("Expected error, got nil")
}
auth := &models.ExternalAuth{
RecordId: "o1y0dd0spd786md",
CollectionId: "v851q4r790rhknl",
Provider: "test",
ProviderId: "test_id",
}
if err := app.Dao().SaveExternalAuth(auth); err != nil {
t.Fatal(err)
}
// check if it was really saved
foundAuth, err := app.Dao().FindExternalAuthByProvider("test", "test_id")
if err != nil {
t.Fatal(err)
}
if auth.Id != foundAuth.Id {
t.Fatalf("Expected ExternalAuth with id %s, got \n%v", auth.Id, foundAuth)
}
}
func TestDeleteExternalAuth(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
record, err := app.Dao().FindRecordById("users", "4q1xlclmfloku33")
if err != nil {
t.Fatal(err)
}
auths, err := app.Dao().FindAllExternalAuthsByRecord(record)
if err != nil {
t.Fatal(err)
}
for _, auth := range auths {
if err := app.Dao().DeleteExternalAuth(auth); err != nil {
t.Fatalf("Failed to delete the ExternalAuth relation, got \n%v", err)
}
}
// check if the relations were really deleted
newAuths, err := app.Dao().FindAllExternalAuthsByRecord(record)
if err != nil {
t.Fatal(err)
}
if len(newAuths) != 0 {
t.Fatalf("Expected all record %s ExternalAuth relations to be deleted, got \n%v", record.Id, newAuths)
}
}
| daos/external_auth_test.go | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.00017524106078781188,
0.0001683538721408695,
0.0001609220780665055,
0.00016965725808404386,
0.000003555399644028512
] |
{
"id": 3,
"code_window": [
"\n",
"\t// reload on app settings change\n",
"\tapp.OnModelAfterUpdate((&models.Param{}).TableName()).Add(func(e *ModelEvent) error {\n",
"\t\tif !c.HasStarted() {\n",
"\t\t\treturn nil // no need to reload as it hasn't been started yet\n",
"\t\t}\n",
"\n",
"\t\tp := e.Model.(*models.Param)\n",
"\t\tif p == nil || p.Key != models.ParamAppSettings {\n",
"\t\t\treturn nil\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "core/base_backup.go",
"type": "replace",
"edit_start_line_idx": 335
} | <?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg width="256px" height="262px" viewBox="0 0 256 262" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" preserveAspectRatio="xMidYMid">
<g>
<path d="M255.878,133.451 C255.878,122.717 255.007,114.884 253.122,106.761 L130.55,106.761 L130.55,155.209 L202.497,155.209 C201.047,167.249 193.214,185.381 175.807,197.565 L175.563,199.187 L214.318,229.21 L217.003,229.478 C241.662,206.704 255.878,173.196 255.878,133.451" fill="#4285F4"></path>
<path d="M130.55,261.1 C165.798,261.1 195.389,249.495 217.003,229.478 L175.807,197.565 C164.783,205.253 149.987,210.62 130.55,210.62 C96.027,210.62 66.726,187.847 56.281,156.37 L54.75,156.5 L14.452,187.687 L13.925,189.152 C35.393,231.798 79.49,261.1 130.55,261.1" fill="#34A853"></path>
<path d="M56.281,156.37 C53.525,148.247 51.93,139.543 51.93,130.55 C51.93,121.556 53.525,112.853 56.136,104.73 L56.063,103 L15.26,71.312 L13.925,71.947 C5.077,89.644 0,109.517 0,130.55 C0,151.583 5.077,171.455 13.925,189.152 L56.281,156.37" fill="#FBBC05"></path>
<path d="M130.55,50.479 C155.064,50.479 171.6,61.068 181.029,69.917 L217.873,33.943 C195.245,12.91 165.798,0 130.55,0 C79.49,0 35.393,29.301 13.925,71.947 L56.136,104.73 C66.726,73.253 96.027,50.479 130.55,50.479" fill="#EB4335"></path>
</g>
</svg> | ui/public/images/oauth2/google.svg | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.0001633584324736148,
0.0001633584324736148,
0.0001633584324736148,
0.0001633584324736148,
0
] |
{
"id": 4,
"code_window": [
"\n",
"\treturn runInterceptors(form.Settings, func(s *settings.Settings) error {\n",
"\t\tform.Settings = s\n",
"\n",
"\t\tencryptionKey := os.Getenv(form.app.EncryptionEnv())\n",
"\t\tif err := form.dao.SaveSettings(form.Settings, encryptionKey); err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\toldSettings, err := form.app.Settings().Clone();\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n",
"\t\t// eagerly merge the application settings with the form ones\n",
"\t\tif err := form.app.Settings().Merge(form.Settings); err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n",
"\t\t// persists settings change\n"
],
"file_path": "forms/settings_upsert.go",
"type": "add",
"edit_start_line_idx": 60
} | package core
import (
"context"
"errors"
"fmt"
"io"
"log"
"os"
"path/filepath"
"runtime"
"sort"
"time"
"github.com/pocketbase/pocketbase/daos"
"github.com/pocketbase/pocketbase/models"
"github.com/pocketbase/pocketbase/tools/archive"
"github.com/pocketbase/pocketbase/tools/cron"
"github.com/pocketbase/pocketbase/tools/filesystem"
"github.com/pocketbase/pocketbase/tools/security"
)
const CacheKeyActiveBackup string = "@activeBackup"
// CreateBackup creates a new backup of the current app pb_data directory.
//
// If name is empty, it will be autogenerated.
// If backup with the same name exists, the new backup file will replace it.
//
// The backup is executed within a transaction, meaning that new writes
// will be temporary "blocked" until the backup file is generated.
//
// By default backups are stored in pb_data/backups
// (the backups directory itself is excluded from the generated backup).
//
// When using S3 storage for the uploaded collection files, you have to
// take care manually to backup those since they are not part of the pb_data.
//
// Backups can be stored on S3 if it is configured in app.Settings().Backups.
func (app *BaseApp) CreateBackup(ctx context.Context, name string) error {
if app.Cache().Has(CacheKeyActiveBackup) {
return errors.New("try again later - another backup/restore operation has already been started")
}
// auto generate backup name
if name == "" {
name = fmt.Sprintf(
"pb_backup_%s.zip",
time.Now().UTC().Format("20060102150405"),
)
}
app.Cache().Set(CacheKeyActiveBackup, name)
defer app.Cache().Remove(CacheKeyActiveBackup)
// Archive pb_data in a temp directory, exluding the "backups" dir itself (if exist).
//
// Run in transaction to temporary block other writes (transactions uses the NonconcurrentDB connection).
// ---
tempPath := filepath.Join(os.TempDir(), "pb_backup_"+security.PseudorandomString(4))
createErr := app.Dao().RunInTransaction(func(txDao *daos.Dao) error {
if err := archive.Create(app.DataDir(), tempPath, LocalBackupsDirName); err != nil {
return err
}
return nil
})
if createErr != nil {
return createErr
}
defer os.Remove(tempPath)
// Persist the backup in the backups filesystem.
// ---
fsys, err := app.NewBackupsFilesystem()
if err != nil {
return err
}
defer fsys.Close()
fsys.SetContext(ctx)
file, err := filesystem.NewFileFromPath(tempPath)
if err != nil {
return err
}
file.OriginalName = name
file.Name = file.OriginalName
if err := fsys.UploadFile(file, file.Name); err != nil {
return err
}
return nil
}
// RestoreBackup restores the backup with the specified name and restarts
// the current running application process.
//
// NB! This feature is experimental and currently is expected to work only on UNIX based systems.
//
// To safely perform the restore it is recommended to have free disk space
// for at least 2x the size of the restored pb_data backup.
//
// The performed steps are:
//
// 1. Download the backup with the specified name in a temp location
// (this is in case of S3; otherwise it creates a temp copy of the zip)
//
// 2. Extract the backup in a temp directory next to the app "pb_data"
// (eg. "pb_data/../pb_data_to_restore").
//
// 3. Move the current app "pb_data" under a special sub temp dir that
// will be deleted on the next app start up (eg. "pb_data_to_restore/.pb_temp_to_delete/").
// This is because on some operating systems it may not be allowed
// to delete the currently open "pb_data" files.
//
// 4. Rename the extracted dir from step 1 as the new "pb_data".
//
// 5. Move from the old "pb_data" any local backups that may have been
// created previously to the new "pb_data/backups".
//
// 6. Restart the app (on successfull app bootstap it will also remove the old pb_data).
//
// If a failure occure during the restore process the dir changes are reverted.
// If for whatever reason the revert is not possible, it panics.
func (app *BaseApp) RestoreBackup(ctx context.Context, name string) error {
if runtime.GOOS == "windows" {
return errors.New("restore is not supported on windows")
}
if app.Cache().Has(CacheKeyActiveBackup) {
return errors.New("try again later - another backup/restore operation has already been started")
}
app.Cache().Set(CacheKeyActiveBackup, name)
defer app.Cache().Remove(CacheKeyActiveBackup)
fsys, err := app.NewBackupsFilesystem()
if err != nil {
return err
}
defer fsys.Close()
fsys.SetContext(ctx)
// fetch the backup file in a temp location
br, err := fsys.GetFile(name)
if err != nil {
return err
}
defer br.Close()
tempZip, err := os.CreateTemp(os.TempDir(), "pb_restore")
if err != nil {
return err
}
defer os.Remove(tempZip.Name())
if _, err := io.Copy(tempZip, br); err != nil {
return err
}
parentDataDir := filepath.Dir(app.DataDir())
extractedDataDir := filepath.Join(parentDataDir, "pb_restore_"+security.PseudorandomString(4))
defer os.RemoveAll(extractedDataDir)
if err := archive.Extract(tempZip.Name(), extractedDataDir); err != nil {
return err
}
// ensure that a database file exists
extractedDB := filepath.Join(extractedDataDir, "data.db")
if _, err := os.Stat(extractedDB); err != nil {
return fmt.Errorf("data.db file is missing or invalid: %w", err)
}
// remove the extracted zip file since we no longer need it
// (this is in case the app restarts and the defer calls are not called)
if err := os.Remove(tempZip.Name()); err != nil && app.IsDebug() {
log.Println(err)
}
// make sure that a special temp directory exists in the extracted one
if err := os.MkdirAll(filepath.Join(extractedDataDir, LocalTempDirName), os.ModePerm); err != nil {
return fmt.Errorf("failed to create a temp dir: %w", err)
}
// move the current pb_data to a special temp location that will
// hold the old data between dirs replace
// (the temp dir will be automatically removed on the next app start)
oldTempDataDir := filepath.Join(extractedDataDir, LocalTempDirName, "old_pb_data")
if err := os.Rename(app.DataDir(), oldTempDataDir); err != nil {
return fmt.Errorf("failed to move the current pb_data to a temp location: %w", err)
}
// "restore", aka. set the extracted backup as the new pb_data directory
if err := os.Rename(extractedDataDir, app.DataDir()); err != nil {
return fmt.Errorf("failed to set the extracted backup as pb_data dir: %w", err)
}
// update the old temp data dir path after the restore
oldTempDataDir = filepath.Join(app.DataDir(), LocalTempDirName, "old_pb_data")
oldLocalBackupsDir := filepath.Join(oldTempDataDir, LocalBackupsDirName)
newLocalBackupsDir := filepath.Join(app.DataDir(), LocalBackupsDirName)
revertDataDirChanges := func(revertLocalBackupsDir bool) error {
if revertLocalBackupsDir {
if _, err := os.Stat(newLocalBackupsDir); err == nil {
if err := os.Rename(newLocalBackupsDir, oldLocalBackupsDir); err != nil {
return fmt.Errorf("failed to revert the backups dir change: %w", err)
}
}
}
if err := os.Rename(app.DataDir(), extractedDataDir); err != nil {
return fmt.Errorf("failed to revert the extracted dir change: %w", err)
}
if err := os.Rename(oldTempDataDir, app.DataDir()); err != nil {
return fmt.Errorf("failed to revert old pb_data dir change: %w", err)
}
return nil
}
// restore the local pb_data/backups dir (if any)
if _, err := os.Stat(oldLocalBackupsDir); err == nil {
if err := os.Rename(oldLocalBackupsDir, newLocalBackupsDir); err != nil {
if err := revertDataDirChanges(false); err != nil && app.IsDebug() {
log.Println(err)
}
return fmt.Errorf("failed to move the local pb_data/backups dir: %w", err)
}
}
// restart the app
if err := app.Restart(); err != nil {
if err := revertDataDirChanges(true); err != nil {
panic(err)
}
return fmt.Errorf("failed to restart the app process: %w", err)
}
return nil
}
// initAutobackupHooks registers the autobackup app serve hooks.
// @todo add tests
func (app *BaseApp) initAutobackupHooks() error {
c := cron.New()
loadJob := func() {
c.Stop()
rawSchedule := app.Settings().Backups.Cron
if rawSchedule == "" || !app.IsBootstrapped() {
return
}
c.Add("@autobackup", rawSchedule, func() {
autoPrefix := "@auto_pb_backup_"
name := fmt.Sprintf(
"%s%s.zip",
autoPrefix,
time.Now().UTC().Format("20060102150405"),
)
if err := app.CreateBackup(context.Background(), name); err != nil && app.IsDebug() {
// @todo replace after logs generalization
log.Println(err)
}
maxKeep := app.Settings().Backups.CronMaxKeep
if maxKeep == 0 {
return // no explicit limit
}
fsys, err := app.NewBackupsFilesystem()
if err != nil && app.IsDebug() {
// @todo replace after logs generalization
log.Println(err)
return
}
defer fsys.Close()
files, err := fsys.List(autoPrefix)
if err != nil && app.IsDebug() {
// @todo replace after logs generalization
log.Println(err)
return
}
if maxKeep >= len(files) {
return // nothing to remove
}
// sort desc
sort.Slice(files, func(i, j int) bool {
return files[i].ModTime.After(files[j].ModTime)
})
// keep only the most recent n auto backup files
toRemove := files[maxKeep:]
for _, f := range toRemove {
if err := fsys.Delete(f.Key); err != nil && app.IsDebug() {
// @todo replace after logs generalization
log.Println(err)
}
}
})
// restart the ticker
c.Start()
}
// load on app serve
app.OnBeforeServe().Add(func(e *ServeEvent) error {
loadJob()
return nil
})
// stop the ticker on app termination
app.OnTerminate().Add(func(e *TerminateEvent) error {
c.Stop()
return nil
})
// reload on app settings change
app.OnModelAfterUpdate((&models.Param{}).TableName()).Add(func(e *ModelEvent) error {
if !c.HasStarted() {
return nil // no need to reload as it hasn't been started yet
}
p := e.Model.(*models.Param)
if p == nil || p.Key != models.ParamAppSettings {
return nil
}
loadJob()
return nil
})
return nil
}
| core/base_backup.go | 1 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.0011134513188153505,
0.0002192515239585191,
0.00016292055079247802,
0.0001693851372692734,
0.00017134909285232425
] |
{
"id": 4,
"code_window": [
"\n",
"\treturn runInterceptors(form.Settings, func(s *settings.Settings) error {\n",
"\t\tform.Settings = s\n",
"\n",
"\t\tencryptionKey := os.Getenv(form.app.EncryptionEnv())\n",
"\t\tif err := form.dao.SaveSettings(form.Settings, encryptionKey); err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\toldSettings, err := form.app.Settings().Clone();\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n",
"\t\t// eagerly merge the application settings with the form ones\n",
"\t\tif err := form.app.Settings().Merge(form.Settings); err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n",
"\t\t// persists settings change\n"
],
"file_path": "forms/settings_upsert.go",
"type": "add",
"edit_start_line_idx": 60
} | package validators
import (
"fmt"
"net/url"
"regexp"
"strings"
validation "github.com/go-ozzo/ozzo-validation/v4"
"github.com/go-ozzo/ozzo-validation/v4/is"
"github.com/pocketbase/dbx"
"github.com/pocketbase/pocketbase/daos"
"github.com/pocketbase/pocketbase/models"
"github.com/pocketbase/pocketbase/models/schema"
"github.com/pocketbase/pocketbase/tools/filesystem"
"github.com/pocketbase/pocketbase/tools/list"
"github.com/pocketbase/pocketbase/tools/types"
)
var requiredErr = validation.NewError("validation_required", "Missing required value")
// NewRecordDataValidator creates new [models.Record] data validator
// using the provided record constraints and schema.
//
// Example:
//
// validator := NewRecordDataValidator(app.Dao(), record, nil)
// err := validator.Validate(map[string]any{"test":123})
func NewRecordDataValidator(
dao *daos.Dao,
record *models.Record,
uploadedFiles map[string][]*filesystem.File,
) *RecordDataValidator {
return &RecordDataValidator{
dao: dao,
record: record,
uploadedFiles: uploadedFiles,
}
}
// RecordDataValidator defines a model.Record data validator
// using the provided record constraints and schema.
type RecordDataValidator struct {
dao *daos.Dao
record *models.Record
uploadedFiles map[string][]*filesystem.File
}
// Validate validates the provided `data` by checking it against
// the validator record constraints and schema.
func (validator *RecordDataValidator) Validate(data map[string]any) error {
keyedSchema := validator.record.Collection().Schema.AsMap()
if len(keyedSchema) == 0 {
return nil // no fields to check
}
if len(data) == 0 {
return validation.NewError("validation_empty_data", "No data to validate")
}
errs := validation.Errors{}
// check for unknown fields
for key := range data {
if _, ok := keyedSchema[key]; !ok {
errs[key] = validation.NewError("validation_unknown_field", "Unknown field")
}
}
if len(errs) > 0 {
return errs
}
for key, field := range keyedSchema {
// normalize value to emulate the same behavior
// when fetching or persisting the record model
value := field.PrepareValue(data[key])
// check required constraint
if field.Required && validation.Required.Validate(value) != nil {
errs[key] = requiredErr
continue
}
// validate field value by its field type
if err := validator.checkFieldValue(field, value); err != nil {
errs[key] = err
continue
}
}
if len(errs) == 0 {
return nil
}
return errs
}
func (validator *RecordDataValidator) checkFieldValue(field *schema.SchemaField, value any) error {
switch field.Type {
case schema.FieldTypeText:
return validator.checkTextValue(field, value)
case schema.FieldTypeNumber:
return validator.checkNumberValue(field, value)
case schema.FieldTypeBool:
return validator.checkBoolValue(field, value)
case schema.FieldTypeEmail:
return validator.checkEmailValue(field, value)
case schema.FieldTypeUrl:
return validator.checkUrlValue(field, value)
case schema.FieldTypeEditor:
return validator.checkEditorValue(field, value)
case schema.FieldTypeDate:
return validator.checkDateValue(field, value)
case schema.FieldTypeSelect:
return validator.checkSelectValue(field, value)
case schema.FieldTypeJson:
return validator.checkJsonValue(field, value)
case schema.FieldTypeFile:
return validator.checkFileValue(field, value)
case schema.FieldTypeRelation:
return validator.checkRelationValue(field, value)
}
return nil
}
func (validator *RecordDataValidator) checkTextValue(field *schema.SchemaField, value any) error {
val, _ := value.(string)
if val == "" {
return nil // nothing to check (skip zero-defaults)
}
options, _ := field.Options.(*schema.TextOptions)
if options.Min != nil && len(val) < *options.Min {
return validation.NewError("validation_min_text_constraint", fmt.Sprintf("Must be at least %d character(s)", *options.Min))
}
if options.Max != nil && len(val) > *options.Max {
return validation.NewError("validation_max_text_constraint", fmt.Sprintf("Must be less than %d character(s)", *options.Max))
}
if options.Pattern != "" {
match, _ := regexp.MatchString(options.Pattern, val)
if !match {
return validation.NewError("validation_invalid_format", "Invalid value format")
}
}
return nil
}
func (validator *RecordDataValidator) checkNumberValue(field *schema.SchemaField, value any) error {
val, _ := value.(float64)
if val == 0 {
return nil // nothing to check (skip zero-defaults)
}
options, _ := field.Options.(*schema.NumberOptions)
if options.Min != nil && val < *options.Min {
return validation.NewError("validation_min_number_constraint", fmt.Sprintf("Must be larger than %f", *options.Min))
}
if options.Max != nil && val > *options.Max {
return validation.NewError("validation_max_number_constraint", fmt.Sprintf("Must be less than %f", *options.Max))
}
return nil
}
func (validator *RecordDataValidator) checkBoolValue(field *schema.SchemaField, value any) error {
return nil
}
func (validator *RecordDataValidator) checkEmailValue(field *schema.SchemaField, value any) error {
val, _ := value.(string)
if val == "" {
return nil // nothing to check
}
if is.EmailFormat.Validate(val) != nil {
return validation.NewError("validation_invalid_email", "Must be a valid email")
}
options, _ := field.Options.(*schema.EmailOptions)
domain := val[strings.LastIndex(val, "@")+1:]
// only domains check
if len(options.OnlyDomains) > 0 && !list.ExistInSlice(domain, options.OnlyDomains) {
return validation.NewError("validation_email_domain_not_allowed", "Email domain is not allowed")
}
// except domains check
if len(options.ExceptDomains) > 0 && list.ExistInSlice(domain, options.ExceptDomains) {
return validation.NewError("validation_email_domain_not_allowed", "Email domain is not allowed")
}
return nil
}
func (validator *RecordDataValidator) checkUrlValue(field *schema.SchemaField, value any) error {
val, _ := value.(string)
if val == "" {
return nil // nothing to check
}
if is.URL.Validate(val) != nil {
return validation.NewError("validation_invalid_url", "Must be a valid url")
}
options, _ := field.Options.(*schema.UrlOptions)
// extract host/domain
u, _ := url.Parse(val)
host := u.Host
// only domains check
if len(options.OnlyDomains) > 0 && !list.ExistInSlice(host, options.OnlyDomains) {
return validation.NewError("validation_url_domain_not_allowed", "Url domain is not allowed")
}
// except domains check
if len(options.ExceptDomains) > 0 && list.ExistInSlice(host, options.ExceptDomains) {
return validation.NewError("validation_url_domain_not_allowed", "Url domain is not allowed")
}
return nil
}
func (validator *RecordDataValidator) checkEditorValue(field *schema.SchemaField, value any) error {
return nil
}
func (validator *RecordDataValidator) checkDateValue(field *schema.SchemaField, value any) error {
val, _ := value.(types.DateTime)
if val.IsZero() {
if field.Required {
return requiredErr
}
return nil // nothing to check
}
options, _ := field.Options.(*schema.DateOptions)
if !options.Min.IsZero() {
if err := validation.Min(options.Min.Time()).Validate(val.Time()); err != nil {
return err
}
}
if !options.Max.IsZero() {
if err := validation.Max(options.Max.Time()).Validate(val.Time()); err != nil {
return err
}
}
return nil
}
func (validator *RecordDataValidator) checkSelectValue(field *schema.SchemaField, value any) error {
normalizedVal := list.ToUniqueStringSlice(value)
if len(normalizedVal) == 0 {
if field.Required {
return requiredErr
}
return nil // nothing to check
}
options, _ := field.Options.(*schema.SelectOptions)
// check max selected items
if len(normalizedVal) > options.MaxSelect {
return validation.NewError("validation_too_many_values", fmt.Sprintf("Select no more than %d", options.MaxSelect))
}
// check against the allowed values
for _, val := range normalizedVal {
if !list.ExistInSlice(val, options.Values) {
return validation.NewError("validation_invalid_value", "Invalid value "+val)
}
}
return nil
}
var emptyJsonValues = []string{
"null", `""`, "[]", "{}",
}
func (validator *RecordDataValidator) checkJsonValue(field *schema.SchemaField, value any) error {
if is.JSON.Validate(value) != nil {
return validation.NewError("validation_invalid_json", "Must be a valid json value")
}
raw, _ := types.ParseJsonRaw(value)
rawStr := strings.TrimSpace(raw.String())
if field.Required && list.ExistInSlice(rawStr, emptyJsonValues) {
return requiredErr
}
return nil
}
func (validator *RecordDataValidator) checkFileValue(field *schema.SchemaField, value any) error {
names := list.ToUniqueStringSlice(value)
if len(names) == 0 && field.Required {
return requiredErr
}
options, _ := field.Options.(*schema.FileOptions)
if len(names) > options.MaxSelect {
return validation.NewError("validation_too_many_values", fmt.Sprintf("Select no more than %d", options.MaxSelect))
}
// extract the uploaded files
files := make([]*filesystem.File, 0, len(validator.uploadedFiles[field.Name]))
for _, file := range validator.uploadedFiles[field.Name] {
if list.ExistInSlice(file.Name, names) {
files = append(files, file)
}
}
for _, file := range files {
// check size
if err := UploadedFileSize(options.MaxSize)(file); err != nil {
return err
}
// check type
if len(options.MimeTypes) > 0 {
if err := UploadedFileMimeType(options.MimeTypes)(file); err != nil {
return err
}
}
}
return nil
}
func (validator *RecordDataValidator) checkRelationValue(field *schema.SchemaField, value any) error {
ids := list.ToUniqueStringSlice(value)
if len(ids) == 0 {
if field.Required {
return requiredErr
}
return nil // nothing to check
}
options, _ := field.Options.(*schema.RelationOptions)
if options.MinSelect != nil && len(ids) < *options.MinSelect {
return validation.NewError("validation_not_enough_values", fmt.Sprintf("Select at least %d", *options.MinSelect))
}
if options.MaxSelect != nil && len(ids) > *options.MaxSelect {
return validation.NewError("validation_too_many_values", fmt.Sprintf("Select no more than %d", *options.MaxSelect))
}
// check if the related records exist
// ---
relCollection, err := validator.dao.FindCollectionByNameOrId(options.CollectionId)
if err != nil {
return validation.NewError("validation_missing_rel_collection", "Relation connection is missing or cannot be accessed")
}
var total int
validator.dao.RecordQuery(relCollection).
Select("count(*)").
AndWhere(dbx.In("id", list.ToInterfaceSlice(ids)...)).
Row(&total)
if total != len(ids) {
return validation.NewError("validation_missing_rel_records", "Failed to fetch all relation records with the provided ids")
}
// ---
return nil
}
| forms/validators/record_data.go | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.0010679536499083042,
0.00021273661695886403,
0.00016352908278349787,
0.00017146441678050905,
0.00018354947678744793
] |
{
"id": 4,
"code_window": [
"\n",
"\treturn runInterceptors(form.Settings, func(s *settings.Settings) error {\n",
"\t\tform.Settings = s\n",
"\n",
"\t\tencryptionKey := os.Getenv(form.app.EncryptionEnv())\n",
"\t\tif err := form.dao.SaveSettings(form.Settings, encryptionKey); err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\toldSettings, err := form.app.Settings().Clone();\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n",
"\t\t// eagerly merge the application settings with the form ones\n",
"\t\tif err := form.app.Settings().Merge(form.Settings); err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n",
"\t\t// persists settings change\n"
],
"file_path": "forms/settings_upsert.go",
"type": "add",
"edit_start_line_idx": 60
} | <?xml version="1.0" encoding="UTF-8"?>
<svg width="256px" height="256px" viewBox="0 0 256 256" version="1.1" xmlns="http://www.w3.org/2000/svg" preserveAspectRatio="xMidYMid">
<title>Facebook</title>
<g>
<path d="M256,128 C256,57.3075 198.6925,0 128,0 C57.3075,0 0,57.3075 0,128 C0,191.8885 46.80775,244.8425 108,254.445 L108,165 L75.5,165 L75.5,128 L108,128 L108,99.8 C108,67.72 127.1095,50 156.3475,50 C170.35175,50 185,52.5 185,52.5 L185,84 L168.8595,84 C152.95875,84 148,93.86675 148,103.98925 L148,128 L183.5,128 L177.825,165 L148,165 L148,254.445 C209.19225,244.8425 256,191.8885 256,128" fill="#1877F2"></path>
<path d="M177.825,165 L183.5,128 L148,128 L148,103.98925 C148,93.86675 152.95875,84 168.8595,84 L185,84 L185,52.5 C185,52.5 170.35175,50 156.3475,50 C127.1095,50 108,67.72 108,99.8 L108,128 L75.5,128 L75.5,165 L108,165 L108,254.445 C114.51675,255.4675 121.196,256 128,256 C134.804,256 141.48325,255.4675 148,254.445 L148,165 L177.825,165" fill="#FFFFFF"></path>
</g>
</svg>
| ui/public/images/oauth2/facebook.svg | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.00016380983288399875,
0.00016380983288399875,
0.00016380983288399875,
0.00016380983288399875,
0
] |
{
"id": 4,
"code_window": [
"\n",
"\treturn runInterceptors(form.Settings, func(s *settings.Settings) error {\n",
"\t\tform.Settings = s\n",
"\n",
"\t\tencryptionKey := os.Getenv(form.app.EncryptionEnv())\n",
"\t\tif err := form.dao.SaveSettings(form.Settings, encryptionKey); err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\toldSettings, err := form.app.Settings().Clone();\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n",
"\t\t// eagerly merge the application settings with the form ones\n",
"\t\tif err := form.app.Settings().Merge(form.Settings); err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n",
"\t\t// persists settings change\n"
],
"file_path": "forms/settings_upsert.go",
"type": "add",
"edit_start_line_idx": 60
} | package tests
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/labstack/echo/v5"
"github.com/pocketbase/pocketbase/apis"
)
// ApiScenario defines a single api request test case/scenario.
type ApiScenario struct {
Name string
Method string
Url string
Body io.Reader
RequestHeaders map[string]string
// Delay adds a delay before checking the expectations usually
// to ensure that all fired non-awaited go routines have finished
Delay time.Duration
// expectations
// ---
ExpectedStatus int
ExpectedContent []string
NotExpectedContent []string
ExpectedEvents map[string]int
// test hooks
// ---
TestAppFactory func() (*TestApp, error)
BeforeTestFunc func(t *testing.T, app *TestApp, e *echo.Echo)
AfterTestFunc func(t *testing.T, app *TestApp, e *echo.Echo)
}
// Test executes the test case/scenario.
func (scenario *ApiScenario) Test(t *testing.T) {
var testApp *TestApp
var testAppErr error
if scenario.TestAppFactory != nil {
testApp, testAppErr = scenario.TestAppFactory()
} else {
testApp, testAppErr = NewTestApp()
}
if testAppErr != nil {
t.Fatalf("Failed to initialize the test app instance: %v", testAppErr)
}
defer testApp.Cleanup()
e, err := apis.InitApi(testApp)
if err != nil {
t.Fatal(err)
}
if scenario.BeforeTestFunc != nil {
scenario.BeforeTestFunc(t, testApp, e)
}
recorder := httptest.NewRecorder()
req := httptest.NewRequest(scenario.Method, scenario.Url, scenario.Body)
// add middleware to timeout long-running requests (eg. keep-alive routes)
e.Pre(func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
ctx, cancelFunc := context.WithTimeout(c.Request().Context(), 100*time.Millisecond)
defer cancelFunc()
c.SetRequest(c.Request().Clone(ctx))
return next(c)
}
})
// set default header
req.Header.Set(echo.HeaderContentType, echo.MIMEApplicationJSON)
// set scenario headers
for k, v := range scenario.RequestHeaders {
req.Header.Set(k, v)
}
// execute request
e.ServeHTTP(recorder, req)
res := recorder.Result()
var prefix = scenario.Name
if prefix == "" {
prefix = fmt.Sprintf("%s:%s", scenario.Method, scenario.Url)
}
if res.StatusCode != scenario.ExpectedStatus {
t.Errorf("[%s] Expected status code %d, got %d", prefix, scenario.ExpectedStatus, res.StatusCode)
}
if scenario.Delay > 0 {
time.Sleep(scenario.Delay)
}
if len(scenario.ExpectedContent) == 0 && len(scenario.NotExpectedContent) == 0 {
if len(recorder.Body.Bytes()) != 0 {
t.Errorf("[%s] Expected empty body, got \n%v", prefix, recorder.Body.String())
}
} else {
// normalize json response format
buffer := new(bytes.Buffer)
err := json.Compact(buffer, recorder.Body.Bytes())
var normalizedBody string
if err != nil {
// not a json...
normalizedBody = recorder.Body.String()
} else {
normalizedBody = buffer.String()
}
for _, item := range scenario.ExpectedContent {
if !strings.Contains(normalizedBody, item) {
t.Errorf("[%s] Cannot find %v in response body \n%v", prefix, item, normalizedBody)
break
}
}
for _, item := range scenario.NotExpectedContent {
if strings.Contains(normalizedBody, item) {
t.Errorf("[%s] Didn't expect %v in response body \n%v", prefix, item, normalizedBody)
break
}
}
}
// to minimize the breaking changes we always expect the error
// events to be called on API error
if res.StatusCode >= 400 {
if scenario.ExpectedEvents == nil {
scenario.ExpectedEvents = map[string]int{}
}
if _, ok := scenario.ExpectedEvents["OnBeforeApiError"]; !ok {
scenario.ExpectedEvents["OnBeforeApiError"] = 1
}
if _, ok := scenario.ExpectedEvents["OnAfterApiError"]; !ok {
scenario.ExpectedEvents["OnAfterApiError"] = 1
}
}
if len(testApp.EventCalls) > len(scenario.ExpectedEvents) {
t.Errorf("[%s] Expected events %v, got %v", prefix, scenario.ExpectedEvents, testApp.EventCalls)
}
for event, expectedCalls := range scenario.ExpectedEvents {
actualCalls := testApp.EventCalls[event]
if actualCalls != expectedCalls {
t.Errorf("[%s] Expected event %s to be called %d, got %d", prefix, event, expectedCalls, actualCalls)
}
}
// @todo consider adding the response body to the AfterTestFunc args
if scenario.AfterTestFunc != nil {
scenario.AfterTestFunc(t, testApp, e)
}
}
| tests/api.go | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.0008560327696613967,
0.00021159605239517987,
0.00016305777535308152,
0.00017198576824739575,
0.00016114412574097514
] |
{
"id": 5,
"code_window": [
"\t\tencryptionKey := os.Getenv(form.app.EncryptionEnv())\n",
"\t\tif err := form.dao.SaveSettings(form.Settings, encryptionKey); err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n",
"\t\t// explicitly trigger old logs deletion\n",
"\t\tform.app.LogsDao().DeleteOldRequests(\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t// try to revert app settings\n",
"\t\t\tform.app.Settings().Merge(oldSettings)\n",
"\n"
],
"file_path": "forms/settings_upsert.go",
"type": "add",
"edit_start_line_idx": 62
} | package forms
import (
"os"
"time"
"github.com/pocketbase/pocketbase/core"
"github.com/pocketbase/pocketbase/daos"
"github.com/pocketbase/pocketbase/models/settings"
)
// SettingsUpsert is a [settings.Settings] upsert (create/update) form.
type SettingsUpsert struct {
*settings.Settings
app core.App
dao *daos.Dao
}
// NewSettingsUpsert creates a new [SettingsUpsert] form with initializer
// config created from the provided [core.App] instance.
//
// If you want to submit the form as part of a transaction,
// you can change the default Dao via [SetDao()].
func NewSettingsUpsert(app core.App) *SettingsUpsert {
form := &SettingsUpsert{
app: app,
dao: app.Dao(),
}
// load the application settings into the form
form.Settings, _ = app.Settings().Clone()
return form
}
// SetDao replaces the default form Dao instance with the provided one.
func (form *SettingsUpsert) SetDao(dao *daos.Dao) {
form.dao = dao
}
// Validate makes the form validatable by implementing [validation.Validatable] interface.
func (form *SettingsUpsert) Validate() error {
return form.Settings.Validate()
}
// Submit validates the form and upserts the loaded settings.
//
// On success the app settings will be refreshed with the form ones.
//
// You can optionally provide a list of InterceptorFunc to further
// modify the form behavior before persisting it.
func (form *SettingsUpsert) Submit(interceptors ...InterceptorFunc[*settings.Settings]) error {
if err := form.Validate(); err != nil {
return err
}
return runInterceptors(form.Settings, func(s *settings.Settings) error {
form.Settings = s
encryptionKey := os.Getenv(form.app.EncryptionEnv())
if err := form.dao.SaveSettings(form.Settings, encryptionKey); err != nil {
return err
}
// explicitly trigger old logs deletion
form.app.LogsDao().DeleteOldRequests(
time.Now().AddDate(0, 0, -1*form.Settings.Logs.MaxDays),
)
if form.Settings.Logs.MaxDays == 0 {
// no logs are allowed -> reclaim preserved disk space after the previous delete operation
form.app.LogsDao().Vacuum()
}
// merge the application settings with the form ones
return form.app.Settings().Merge(form.Settings)
}, interceptors...)
}
| forms/settings_upsert.go | 1 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.9984927177429199,
0.12658636271953583,
0.00015968580555636436,
0.0024970739614218473,
0.32955098152160645
] |
{
"id": 5,
"code_window": [
"\t\tencryptionKey := os.Getenv(form.app.EncryptionEnv())\n",
"\t\tif err := form.dao.SaveSettings(form.Settings, encryptionKey); err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n",
"\t\t// explicitly trigger old logs deletion\n",
"\t\tform.app.LogsDao().DeleteOldRequests(\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t// try to revert app settings\n",
"\t\t\tform.app.Settings().Merge(oldSettings)\n",
"\n"
],
"file_path": "forms/settings_upsert.go",
"type": "add",
"edit_start_line_idx": 62
} | <script>
/**
* @todo consider combining with the CodeEditor component.
*
* This component uses Codemirror editor under the hood and its a "little heavy".
* To allow manuall chunking it is recommended to load the component lazily!
*
* Example usage:
* ```
* <script>
* import { onMount } from "svelte";
*
* let inputComponent;
*
* onMount(async () => {
* try {
* inputComponent = (await import("@/components/base/FilterAutocompleteInput.svelte")).default;
* } catch (err) {
* console.warn(err);
* }
* });
* <//script>
*
* ...
*
* <svelte:component
* this={inputComponent}
* bind:value={value}
* baseCollection={baseCollection}
* disabled={disabled}
* />
* ```
*/
import { onMount, createEventDispatcher } from "svelte";
import { collections } from "@/stores/collections";
import CommonHelper from "@/utils/CommonHelper";
// code mirror imports
// ---
import {
keymap,
highlightSpecialChars,
drawSelection,
dropCursor,
rectangularSelection,
highlightActiveLineGutter,
EditorView,
placeholder as placeholderExt,
} from "@codemirror/view";
import { EditorState, Compartment } from "@codemirror/state";
import {
defaultHighlightStyle,
syntaxHighlighting,
bracketMatching,
StreamLanguage,
} from "@codemirror/language";
import { defaultKeymap, history, historyKeymap } from "@codemirror/commands";
import { searchKeymap, highlightSelectionMatches } from "@codemirror/search";
import {
autocompletion,
completionKeymap,
closeBrackets,
closeBracketsKeymap,
} from "@codemirror/autocomplete";
import { simpleMode } from "@codemirror/legacy-modes/mode/simple-mode";
// ---
const dispatch = createEventDispatcher();
export let id = "";
export let value = "";
export let disabled = false;
export let placeholder = "";
export let baseCollection = null;
export let singleLine = false;
export let extraAutocompleteKeys = []; // eg. ["test1", "test2"]
export let disableRequestKeys = false;
export let disableIndirectCollectionsKeys = false;
let editor;
let container;
let oldDisabledState = disabled;
let langCompartment = new Compartment();
let editableCompartment = new Compartment();
let readOnlyCompartment = new Compartment();
let placeholderCompartment = new Compartment();
let cachedCollections = [];
let cachedRequestKeys = [];
let cachedIndirectCollectionKeys = [];
let cachedBaseKeys = [];
let baseKeysChangeHash = "";
let oldBaseKeysChangeHash = "";
$: baseKeysChangeHash = getCollectionKeysChangeHash(baseCollection);
$: if (
!disabled &&
(oldBaseKeysChangeHash != baseKeysChangeHash ||
disableRequestKeys !== -1 ||
disableIndirectCollectionsKeys !== -1)
) {
oldBaseKeysChangeHash = baseKeysChangeHash;
refreshCachedKeys();
}
$: if (id) {
addLabelListeners();
}
$: if (editor && baseCollection?.schema) {
editor.dispatch({
effects: [langCompartment.reconfigure(ruleLang())],
});
}
$: if (editor && oldDisabledState != disabled) {
editor.dispatch({
effects: [
editableCompartment.reconfigure(EditorView.editable.of(!disabled)),
readOnlyCompartment.reconfigure(EditorState.readOnly.of(disabled)),
],
});
oldDisabledState = disabled;
triggerNativeChange();
}
$: if (editor && value != editor.state.doc.toString()) {
editor.dispatch({
changes: {
from: 0,
to: editor.state.doc.length,
insert: value,
},
});
}
$: if (editor && typeof placeholder !== "undefined") {
editor.dispatch({
effects: [placeholderCompartment.reconfigure(placeholderExt(placeholder))],
});
}
// Focus the editor (if inited).
export function focus() {
editor?.focus();
}
let refreshDebounceId = null;
// Refresh the cached autocomplete keys.
function refreshCachedKeys() {
clearTimeout(refreshDebounceId);
refreshDebounceId = setTimeout(() => {
cachedCollections = concatWithBaseCollection($collections);
cachedBaseKeys = getBaseKeys();
cachedRequestKeys = !disableRequestKeys ? getRequestKeys() : [];
cachedIndirectCollectionKeys = !disableIndirectCollectionsKeys ? getIndirectCollectionKeys() : [];
}, 300);
}
// Return a collection keys hash string that can be used to compare with previous states.
function getCollectionKeysChangeHash(collection) {
return JSON.stringify([collection?.name, collection?.type, collection?.schema]);
}
// Merge the base collection in a new list with the provided collections.
function concatWithBaseCollection(collections) {
let copy = collections.slice();
if (baseCollection) {
CommonHelper.pushOrReplaceByKey(copy, baseCollection, "id");
}
return copy;
}
// Emulate native change event for the editor container element.
function triggerNativeChange() {
container?.dispatchEvent(
new CustomEvent("change", {
detail: { value },
bubbles: true,
})
);
}
// Remove any attached label listeners.
function removeLabelListeners() {
if (!id) {
return;
}
const labels = document.querySelectorAll('[for="' + id + '"]');
for (let label of labels) {
label.removeEventListener("click", focus);
}
}
// Add `<label for="ID">...</label>` focus support.
function addLabelListeners() {
if (!id) {
return;
}
removeLabelListeners();
const labels = document.querySelectorAll('[for="' + id + '"]');
for (let label of labels) {
label.addEventListener("click", focus);
}
}
// Returns a list with all collection field keys recursively.
function getCollectionFieldKeys(nameOrId, prefix = "", level = 0) {
let collection = cachedCollections.find((item) => item.name == nameOrId || item.id == nameOrId);
if (!collection || level >= 4) {
return [];
}
let result = CommonHelper.getAllCollectionIdentifiers(collection, prefix);
for (const field of collection.schema) {
const key = prefix + field.name;
// add relation fields
if (field.type === "relation" && field.options?.collectionId) {
const subKeys = getCollectionFieldKeys(field.options.collectionId, key + ".", level + 1);
if (subKeys.length) {
result = result.concat(subKeys);
}
}
// add ":each" field modifier
if (field.type === "select" && field.options?.maxSelect != 1) {
result.push(key + ":each");
}
// add ":length" field modifier to arrayble fields
if (field.options?.maxSelect != 1 && ["select", "file", "relation"].includes(field.type)) {
result.push(key + ":length");
}
}
return result;
}
// Returns baseCollection keys.
function getBaseKeys() {
return getCollectionFieldKeys(baseCollection?.name);
}
// Returns @request.* keys.
function getRequestKeys() {
const result = [];
result.push("@request.method");
result.push("@request.query.");
result.push("@request.data.");
result.push("@request.headers.");
result.push("@request.auth.id");
result.push("@request.auth.collectionId");
result.push("@request.auth.collectionName");
result.push("@request.auth.verified");
result.push("@request.auth.username");
result.push("@request.auth.email");
result.push("@request.auth.emailVisibility");
result.push("@request.auth.created");
result.push("@request.auth.updated");
// load auth collection fields
const authCollections = cachedCollections.filter((collection) => collection.$isAuth);
for (const collection of authCollections) {
const authKeys = getCollectionFieldKeys(collection.id, "@request.auth.");
for (const k of authKeys) {
CommonHelper.pushUnique(result, k);
}
}
// load base collection fields into @request.data.*
const issetExcludeList = ["created", "updated"];
if (baseCollection?.id) {
const keys = getCollectionFieldKeys(baseCollection.name, "@request.data.");
for (const key of keys) {
result.push(key);
// add ":isset" modifier to non-base keys
const parts = key.split(".");
if (
parts.length === 3 &&
// doesn't contain another modifier
parts[2].indexOf(":") === -1 &&
// is not from the exclude list
!issetExcludeList.includes(parts[2])
) {
result.push(key + ":isset");
}
}
}
return result;
}
// Returns @collection.* keys.
function getIndirectCollectionKeys() {
const result = [];
for (const collection of cachedCollections) {
const prefix = "@collection." + collection.name + ".";
const keys = getCollectionFieldKeys(collection.name, prefix);
for (const key of keys) {
result.push(key);
}
}
return result;
}
// Returns an array with all the supported keys.
function getAllKeys(includeRequestKeys = true, includeIndirectCollectionsKeys = true) {
let result = [].concat(extraAutocompleteKeys);
// add base keys
result = result.concat(cachedBaseKeys || []);
// add @request.* keys
if (includeRequestKeys) {
result = result.concat(cachedRequestKeys || []);
}
// add @collections.* keys
if (includeIndirectCollectionsKeys) {
result = result.concat(cachedIndirectCollectionKeys || []);
}
// sort longer keys first because the highlighter will highlight
// the first match and stops until an operator is found
result.sort(function (a, b) {
return b.length - a.length;
});
return result;
}
// Returns object with all the completions matching the context.
function completions(context) {
let word = context.matchBefore(/[\'\"\@\w\.]*/);
if (word && word.from == word.to && !context.explicit) {
return null;
}
let options = [{ label: "false" }, { label: "true" }, { label: "@now" }];
if (!disableIndirectCollectionsKeys) {
options.push({ label: "@collection.*", apply: "@collection." });
}
const keys = getAllKeys(!disableRequestKeys, !disableRequestKeys && word.text.startsWith("@c"));
for (const key of keys) {
options.push({
label: key.endsWith(".") ? key + "*" : key,
apply: key,
});
}
return {
from: word.from,
options: options,
};
}
// Creates a new language mode.
// @see https://codemirror.net/5/demo/simplemode.html
function ruleLang() {
return StreamLanguage.define(
simpleMode({
start: [
// base literals
{
regex: /true|false|null/,
token: "atom",
},
// double quoted string
{ regex: /"(?:[^\\]|\\.)*?(?:"|$)/, token: "string" },
// single quoted string
{ regex: /'(?:[^\\]|\\.)*?(?:'|$)/, token: "string" },
// numbers
{
regex: /0x[a-f\d]+|[-+]?(?:\.\d+|\d+\.?\d*)(?:e[-+]?\d+)?/i,
token: "number",
},
// operators
{
regex: /\&\&|\|\||\=|\!\=|\~|\!\~|\>|\<|\>\=|\<\=/,
token: "operator",
},
// indent and dedent properties guide autoindentation
{ regex: /[\{\[\(]/, indent: true },
{ regex: /[\}\]\)]/, dedent: true },
// keywords
{ regex: /\w+[\w\.]*\w+/, token: "keyword" },
{ regex: CommonHelper.escapeRegExp("@now"), token: "keyword" },
{ regex: CommonHelper.escapeRegExp("@request.method"), token: "keyword" },
],
})
);
}
onMount(() => {
const submitShortcut = {
key: "Enter",
run: (_) => {
// trigger submit on enter for singleline input
if (singleLine) {
dispatch("submit", value);
}
},
};
addLabelListeners();
editor = new EditorView({
parent: container,
state: EditorState.create({
doc: value,
extensions: [
highlightActiveLineGutter(),
highlightSpecialChars(),
history(),
drawSelection(),
dropCursor(),
EditorState.allowMultipleSelections.of(true),
syntaxHighlighting(defaultHighlightStyle, { fallback: true }),
bracketMatching(),
closeBrackets(),
rectangularSelection(),
highlightSelectionMatches(),
keymap.of([
submitShortcut,
...closeBracketsKeymap,
...defaultKeymap,
searchKeymap.find((item) => item.key === "Mod-d"),
...historyKeymap,
...completionKeymap,
]),
EditorView.lineWrapping,
autocompletion({
override: [completions],
icons: false,
}),
placeholderCompartment.of(placeholderExt(placeholder)),
editableCompartment.of(EditorView.editable.of(!disabled)),
readOnlyCompartment.of(EditorState.readOnly.of(disabled)),
langCompartment.of(ruleLang()),
EditorState.transactionFilter.of((tr) => {
return singleLine && tr.newDoc.lines > 1 ? [] : tr;
}),
EditorView.updateListener.of((v) => {
if (!v.docChanged || disabled) {
return;
}
value = v.state.doc.toString();
triggerNativeChange();
}),
],
}),
});
return () => {
clearTimeout(refreshDebounceId);
removeLabelListeners();
editor?.destroy();
};
});
</script>
<div bind:this={container} class="code-editor" />
| ui/src/components/base/FilterAutocompleteInput.svelte | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.00017719651805236936,
0.00017236806161236018,
0.00016372489335481077,
0.00017260151798836887,
0.000003131037829007255
] |
{
"id": 5,
"code_window": [
"\t\tencryptionKey := os.Getenv(form.app.EncryptionEnv())\n",
"\t\tif err := form.dao.SaveSettings(form.Settings, encryptionKey); err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n",
"\t\t// explicitly trigger old logs deletion\n",
"\t\tform.app.LogsDao().DeleteOldRequests(\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t// try to revert app settings\n",
"\t\t\tform.app.Settings().Merge(oldSettings)\n",
"\n"
],
"file_path": "forms/settings_upsert.go",
"type": "add",
"edit_start_line_idx": 62
} | package forms_test
import (
"encoding/json"
"errors"
"testing"
validation "github.com/go-ozzo/ozzo-validation/v4"
"github.com/pocketbase/pocketbase/forms"
"github.com/pocketbase/pocketbase/models"
"github.com/pocketbase/pocketbase/models/schema"
"github.com/pocketbase/pocketbase/tests"
"github.com/pocketbase/pocketbase/tools/dbutils"
"github.com/pocketbase/pocketbase/tools/security"
"github.com/spf13/cast"
)
func TestNewCollectionUpsert(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
collection := &models.Collection{}
collection.Name = "test_name"
collection.Type = "test_type"
collection.System = true
listRule := "test_list"
collection.ListRule = &listRule
viewRule := "test_view"
collection.ViewRule = &viewRule
createRule := "test_create"
collection.CreateRule = &createRule
updateRule := "test_update"
collection.UpdateRule = &updateRule
deleteRule := "test_delete"
collection.DeleteRule = &deleteRule
collection.Schema = schema.NewSchema(&schema.SchemaField{
Name: "test",
Type: schema.FieldTypeText,
})
form := forms.NewCollectionUpsert(app, collection)
if form.Name != collection.Name {
t.Errorf("Expected Name %q, got %q", collection.Name, form.Name)
}
if form.Type != collection.Type {
t.Errorf("Expected Type %q, got %q", collection.Type, form.Type)
}
if form.System != collection.System {
t.Errorf("Expected System %v, got %v", collection.System, form.System)
}
if form.ListRule != collection.ListRule {
t.Errorf("Expected ListRule %v, got %v", collection.ListRule, form.ListRule)
}
if form.ViewRule != collection.ViewRule {
t.Errorf("Expected ViewRule %v, got %v", collection.ViewRule, form.ViewRule)
}
if form.CreateRule != collection.CreateRule {
t.Errorf("Expected CreateRule %v, got %v", collection.CreateRule, form.CreateRule)
}
if form.UpdateRule != collection.UpdateRule {
t.Errorf("Expected UpdateRule %v, got %v", collection.UpdateRule, form.UpdateRule)
}
if form.DeleteRule != collection.DeleteRule {
t.Errorf("Expected DeleteRule %v, got %v", collection.DeleteRule, form.DeleteRule)
}
// store previous state and modify the collection schema to verify
// that the form.Schema is a deep clone
loadedSchema, _ := collection.Schema.MarshalJSON()
collection.Schema.AddField(&schema.SchemaField{
Name: "new_field",
Type: schema.FieldTypeBool,
})
formSchema, _ := form.Schema.MarshalJSON()
if string(formSchema) != string(loadedSchema) {
t.Errorf("Expected Schema %v, got %v", string(loadedSchema), string(formSchema))
}
}
func TestCollectionUpsertValidateAndSubmit(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
scenarios := []struct {
testName string
existingName string
jsonData string
expectedErrors []string
}{
{"empty create (base)", "", "{}", []string{"name", "schema"}},
{"empty create (auth)", "", `{"type":"auth"}`, []string{"name"}},
{"empty create (view)", "", `{"type":"view"}`, []string{"name", "options"}},
{"empty update", "demo2", "{}", []string{}},
{
"create failure",
"",
`{
"name": "test ?!@#$",
"type": "invalid",
"system": true,
"schema": [
{"name":"","type":"text"}
],
"listRule": "missing = '123'",
"viewRule": "missing = '123'",
"createRule": "missing = '123'",
"updateRule": "missing = '123'",
"deleteRule": "missing = '123'",
"indexes": ["create index '' on '' ()"]
}`,
[]string{"name", "type", "schema", "listRule", "viewRule", "createRule", "updateRule", "deleteRule", "indexes"},
},
{
"create failure - existing name",
"",
`{
"name": "demo1",
"system": true,
"schema": [
{"name":"test","type":"text"}
],
"listRule": "test='123'",
"viewRule": "test='123'",
"createRule": "test='123'",
"updateRule": "test='123'",
"deleteRule": "test='123'"
}`,
[]string{"name"},
},
{
"create failure - existing internal table",
"",
`{
"name": "_admins",
"schema": [
{"name":"test","type":"text"}
]
}`,
[]string{"name"},
},
{
"create failure - name starting with underscore",
"",
`{
"name": "_test_new",
"schema": [
{"name":"test","type":"text"}
]
}`,
[]string{"name"},
},
{
"create failure - duplicated field names (case insensitive)",
"",
`{
"name": "test_new",
"schema": [
{"name":"test","type":"text"},
{"name":"tESt","type":"text"}
]
}`,
[]string{"schema"},
},
{
"create failure - missing relation display field",
"",
`{
"name": "test_new",
"type": "base",
"schema": [
{
"name":"test",
"type":"relation",
"options":{
"collectionId":"wsmn24bux7wo113",
"displayFields":["text", "missing"]
}
}
]
}`,
[]string{"schema"},
},
{
"create failure - check auth options validators",
"",
`{
"name": "test_new",
"type": "auth",
"schema": [
{"name":"test","type":"text"}
],
"options": { "minPasswordLength": 3 }
}`,
[]string{"options"},
},
{
"create failure - check view options validators",
"",
`{
"name": "test_new",
"type": "view",
"options": { "query": "invalid query" }
}`,
[]string{"options"},
},
{
"create success",
"",
`{
"name": "test_new",
"type": "auth",
"system": true,
"schema": [
{"id":"a123456","name":"test1","type":"text"},
{"id":"b123456","name":"test2","type":"email"},
{
"name":"test3",
"type":"relation",
"options":{
"collectionId":"v851q4r790rhknl",
"displayFields":["name","id","created","updated","username","email","emailVisibility","verified"]
}
}
],
"listRule": "test1='123' && verified = true",
"viewRule": "test1='123' && emailVisibility = true",
"createRule": "test1='123' && email != ''",
"updateRule": "test1='123' && username != ''",
"deleteRule": "test1='123' && id != ''",
"indexes": ["create index idx_test_new on anything (test1)"]
}`,
[]string{},
},
{
"update failure - changing field type",
"test_new",
`{
"schema": [
{"id":"a123456","name":"test1","type":"url"},
{"id":"b123456","name":"test2","type":"bool"}
],
"indexes": ["create index idx_test_new on test_new (test1)", "invalid"]
}`,
[]string{"schema", "indexes"},
},
{
"update success - rename fields to existing field names (aka. reusing field names)",
"test_new",
`{
"schema": [
{"id":"a123456","name":"test2","type":"text"},
{"id":"b123456","name":"test1","type":"email"}
]
}`,
[]string{},
},
{
"update failure - existing name",
"demo2",
`{"name": "demo3"}`,
[]string{"name"},
},
{
"update failure - changing system collection",
"nologin",
`{
"name": "update",
"system": false,
"schema": [
{"id":"koih1lqx","name":"abc","type":"text"}
],
"listRule": "abc = '123'",
"viewRule": "abc = '123'",
"createRule": "abc = '123'",
"updateRule": "abc = '123'",
"deleteRule": "abc = '123'"
}`,
[]string{"name", "system"},
},
{
"update failure - changing collection type",
"demo3",
`{
"type": "auth"
}`,
[]string{"type"},
},
{
"update failure - changing relation collection",
"users",
`{
"schema": [
{
"id": "lkeigvv3",
"name": "rel",
"type": "relation",
"options": {
"collectionId": "wzlqyes4orhoygb",
"cascadeDelete": false,
"maxSelect": 1,
"displayFields": null
}
}
]
}`,
[]string{"schema"},
},
{
"update failure - all fields",
"demo2",
`{
"name": "test ?!@#$",
"type": "invalid",
"system": true,
"schema": [
{"name":"","type":"text"}
],
"listRule": "missing = '123'",
"viewRule": "missing = '123'",
"createRule": "missing = '123'",
"updateRule": "missing = '123'",
"deleteRule": "missing = '123'",
"options": {"test": 123},
"indexes": ["create index '' from demo2 on (id)"]
}`,
[]string{"name", "type", "system", "schema", "listRule", "viewRule", "createRule", "updateRule", "deleteRule", "indexes"},
},
{
"update success - update all fields",
"clients",
`{
"name": "demo_update",
"type": "auth",
"schema": [
{"id":"_2hlxbmp","name":"test","type":"text"}
],
"listRule": "test='123' && verified = true",
"viewRule": "test='123' && emailVisibility = true",
"createRule": "test='123' && email != ''",
"updateRule": "test='123' && username != ''",
"deleteRule": "test='123' && id != ''",
"options": {"minPasswordLength": 10},
"indexes": [
"create index idx_clients_test1 on anything (id, email, test)",
"create unique index idx_clients_test2 on clients (id, username, email)"
]
}`,
[]string{},
},
// (fail due to filters old field references)
{
"update failure - rename the schema field of the last updated collection",
"demo_update",
`{
"schema": [
{"id":"_2hlxbmp","name":"test_renamed","type":"text"}
]
}`,
[]string{"listRule", "viewRule", "createRule", "updateRule", "deleteRule"},
},
// (cleared filter references)
{
"update success - rename the schema field of the last updated collection",
"demo_update",
`{
"schema": [
{"id":"_2hlxbmp","name":"test_renamed","type":"text"}
],
"listRule": null,
"viewRule": null,
"createRule": null,
"updateRule": null,
"deleteRule": null,
"indexes": []
}`,
[]string{},
},
{
"update success - system collection",
"nologin",
`{
"listRule": "name='123'",
"viewRule": "name='123'",
"createRule": "name='123'",
"updateRule": "name='123'",
"deleteRule": "name='123'"
}`,
[]string{},
},
// view tests
// -----------------------------------------------------------
{
"view create failure",
"",
`{
"name": "upsert_view",
"type": "view",
"listRule": "id='123' && verified = true",
"viewRule": "id='123' && emailVisibility = true",
"schema": [
{"id":"abc123","name":"some invalid field name that will be overwritten !@#$","type":"bool"}
],
"options": {
"query": "select id, email from users; drop table _admins;"
},
"indexes": ["create index idx_test_view on upsert_view (id)"]
}`,
[]string{
"listRule",
"viewRule",
"options",
"indexes", // views don't have indexes
},
},
{
"view create success",
"",
`{
"name": "upsert_view",
"type": "view",
"listRule": "id='123' && verified = true",
"viewRule": "id='123' && emailVisibility = true",
"schema": [
{"id":"abc123","name":"some invalid field name that will be overwritten !@#$","type":"bool"}
],
"options": {
"query": "select id, emailVisibility, verified from users"
}
}`,
[]string{
// "schema", should be overwritten by an autogenerated from the query
},
},
{
"view update failure (schema autogeneration and rule fields check)",
"upsert_view",
`{
"name": "upsert_view_2",
"listRule": "id='456' && verified = true",
"viewRule": "id='456'",
"createRule": "id='123'",
"updateRule": "id='123'",
"deleteRule": "id='123'",
"schema": [
{"id":"abc123","name":"verified","type":"bool"}
],
"options": {
"query": "select 1 as id"
}
}`,
[]string{
"listRule", // missing field (ignoring the old or explicit schema)
"createRule", // not allowed
"updateRule", // not allowed
"deleteRule", // not allowed
},
},
{
"view update failure (check query identifiers format)",
"upsert_view",
`{
"listRule": null,
"viewRule": null,
"options": {
"query": "select 1 as id, 2 as [invalid!@#]"
}
}`,
[]string{
"schema", // should fail due to invalid field name
},
},
{
"view update success",
"upsert_view",
`{
"listRule": null,
"viewRule": null,
"options": {
"query": "select 1 as id, 2 as valid"
}
}`,
[]string{},
},
}
for _, s := range scenarios {
collection := &models.Collection{}
if s.existingName != "" {
var err error
collection, err = app.Dao().FindCollectionByNameOrId(s.existingName)
if err != nil {
t.Fatal(err)
}
}
form := forms.NewCollectionUpsert(app, collection)
// load data
loadErr := json.Unmarshal([]byte(s.jsonData), form)
if loadErr != nil {
t.Errorf("[%s] Failed to load form data: %v", s.testName, loadErr)
continue
}
interceptorCalls := 0
interceptor := func(next forms.InterceptorNextFunc[*models.Collection]) forms.InterceptorNextFunc[*models.Collection] {
return func(c *models.Collection) error {
interceptorCalls++
return next(c)
}
}
// parse errors
result := form.Submit(interceptor)
errs, ok := result.(validation.Errors)
if !ok && result != nil {
t.Errorf("[%s] Failed to parse errors %v", s.testName, result)
continue
}
// check interceptor calls
expectInterceptorCalls := 1
if len(s.expectedErrors) > 0 {
expectInterceptorCalls = 0
}
if interceptorCalls != expectInterceptorCalls {
t.Errorf("[%s] Expected interceptor to be called %d, got %d", s.testName, expectInterceptorCalls, interceptorCalls)
}
// check errors
if len(errs) > len(s.expectedErrors) {
t.Errorf("[%s] Expected error keys %v, got %v", s.testName, s.expectedErrors, errs)
}
for _, k := range s.expectedErrors {
if _, ok := errs[k]; !ok {
t.Errorf("[%s] Missing expected error key %q in %v", s.testName, k, errs)
}
}
if len(s.expectedErrors) > 0 {
continue
}
collection, _ = app.Dao().FindCollectionByNameOrId(form.Name)
if collection == nil {
t.Errorf("[%s] Expected to find collection %q, got nil", s.testName, form.Name)
continue
}
if form.Name != collection.Name {
t.Errorf("[%s] Expected Name %q, got %q", s.testName, collection.Name, form.Name)
}
if form.Type != collection.Type {
t.Errorf("[%s] Expected Type %q, got %q", s.testName, collection.Type, form.Type)
}
if form.System != collection.System {
t.Errorf("[%s] Expected System %v, got %v", s.testName, collection.System, form.System)
}
if cast.ToString(form.ListRule) != cast.ToString(collection.ListRule) {
t.Errorf("[%s] Expected ListRule %v, got %v", s.testName, collection.ListRule, form.ListRule)
}
if cast.ToString(form.ViewRule) != cast.ToString(collection.ViewRule) {
t.Errorf("[%s] Expected ViewRule %v, got %v", s.testName, collection.ViewRule, form.ViewRule)
}
if cast.ToString(form.CreateRule) != cast.ToString(collection.CreateRule) {
t.Errorf("[%s] Expected CreateRule %v, got %v", s.testName, collection.CreateRule, form.CreateRule)
}
if cast.ToString(form.UpdateRule) != cast.ToString(collection.UpdateRule) {
t.Errorf("[%s] Expected UpdateRule %v, got %v", s.testName, collection.UpdateRule, form.UpdateRule)
}
if cast.ToString(form.DeleteRule) != cast.ToString(collection.DeleteRule) {
t.Errorf("[%s] Expected DeleteRule %v, got %v", s.testName, collection.DeleteRule, form.DeleteRule)
}
rawFormSchema, _ := form.Schema.MarshalJSON()
rawCollectionSchema, _ := collection.Schema.MarshalJSON()
if len(form.Schema.Fields()) != len(collection.Schema.Fields()) {
t.Errorf("[%s] Expected Schema \n%v, \ngot \n%v", s.testName, string(rawCollectionSchema), string(rawFormSchema))
continue
}
for _, f := range form.Schema.Fields() {
if collection.Schema.GetFieldByName(f.Name) == nil {
t.Errorf("[%s] Missing field %s \nin \n%v", s.testName, f.Name, string(rawFormSchema))
continue
}
}
// check indexes (if any)
allIndexes, _ := app.Dao().TableIndexes(form.Name)
for _, formIdx := range form.Indexes {
parsed := dbutils.ParseIndex(formIdx)
parsed.TableName = form.Name
normalizedIdx := parsed.Build()
var exists bool
for _, idx := range allIndexes {
if dbutils.ParseIndex(idx).Build() == normalizedIdx {
exists = true
continue
}
}
if !exists {
t.Errorf(
"[%s] Missing index %s \nin \n%v", s.testName, normalizedIdx, allIndexes)
continue
}
}
}
}
func TestCollectionUpsertSubmitInterceptors(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
collection, err := app.Dao().FindCollectionByNameOrId("demo2")
if err != nil {
t.Fatal(err)
}
form := forms.NewCollectionUpsert(app, collection)
form.Name = "test_new"
testErr := errors.New("test_error")
interceptorCollectionName := ""
interceptor1Called := false
interceptor1 := func(next forms.InterceptorNextFunc[*models.Collection]) forms.InterceptorNextFunc[*models.Collection] {
return func(c *models.Collection) error {
interceptor1Called = true
return next(c)
}
}
interceptor2Called := false
interceptor2 := func(next forms.InterceptorNextFunc[*models.Collection]) forms.InterceptorNextFunc[*models.Collection] {
return func(c *models.Collection) error {
interceptorCollectionName = collection.Name // to check if the record was filled
interceptor2Called = true
return testErr
}
}
submitErr := form.Submit(interceptor1, interceptor2)
if submitErr != testErr {
t.Fatalf("Expected submitError %v, got %v", testErr, submitErr)
}
if !interceptor1Called {
t.Fatalf("Expected interceptor1 to be called")
}
if !interceptor2Called {
t.Fatalf("Expected interceptor2 to be called")
}
if interceptorCollectionName != form.Name {
t.Fatalf("Expected the form model to be filled before calling the interceptors")
}
}
func TestCollectionUpsertWithCustomId(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
existingCollection, err := app.Dao().FindCollectionByNameOrId("demo2")
if err != nil {
t.Fatal(err)
}
newCollection := func() *models.Collection {
return &models.Collection{
Name: "c_" + security.PseudorandomString(4),
Schema: existingCollection.Schema,
}
}
scenarios := []struct {
name string
jsonData string
collection *models.Collection
expectError bool
}{
{
"empty data",
"{}",
newCollection(),
false,
},
{
"empty id",
`{"id":""}`,
newCollection(),
false,
},
{
"id < 15 chars",
`{"id":"a23"}`,
newCollection(),
true,
},
{
"id > 15 chars",
`{"id":"a234567890123456"}`,
newCollection(),
true,
},
{
"id = 15 chars (invalid chars)",
`{"id":"a@3456789012345"}`,
newCollection(),
true,
},
{
"id = 15 chars (valid chars)",
`{"id":"a23456789012345"}`,
newCollection(),
false,
},
{
"changing the id of an existing item",
`{"id":"b23456789012345"}`,
existingCollection,
true,
},
{
"using the same existing item id",
`{"id":"` + existingCollection.Id + `"}`,
existingCollection,
false,
},
{
"skipping the id for existing item",
`{}`,
existingCollection,
false,
},
}
for _, s := range scenarios {
form := forms.NewCollectionUpsert(app, s.collection)
// load data
loadErr := json.Unmarshal([]byte(s.jsonData), form)
if loadErr != nil {
t.Errorf("[%s] Failed to load form data: %v", s.name, loadErr)
continue
}
submitErr := form.Submit()
hasErr := submitErr != nil
if hasErr != s.expectError {
t.Errorf("[%s] Expected hasErr to be %v, got %v (%v)", s.name, s.expectError, hasErr, submitErr)
}
if !hasErr && form.Id != "" {
_, err := app.Dao().FindCollectionByNameOrId(form.Id)
if err != nil {
t.Errorf("[%s] Expected to find record with id %s, got %v", s.name, form.Id, err)
}
}
}
}
| forms/collection_upsert_test.go | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.0031648536678403616,
0.0004124863480683416,
0.00016206387954298407,
0.00017287064110860229,
0.0007000711048021913
] |
{
"id": 5,
"code_window": [
"\t\tencryptionKey := os.Getenv(form.app.EncryptionEnv())\n",
"\t\tif err := form.dao.SaveSettings(form.Settings, encryptionKey); err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n",
"\t\t// explicitly trigger old logs deletion\n",
"\t\tform.app.LogsDao().DeleteOldRequests(\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t// try to revert app settings\n",
"\t\t\tform.app.Settings().Merge(oldSettings)\n",
"\n"
],
"file_path": "forms/settings_upsert.go",
"type": "add",
"edit_start_line_idx": 62
} | package forms_test
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"testing"
"github.com/labstack/echo/v5"
"github.com/pocketbase/pocketbase/core"
"github.com/pocketbase/pocketbase/daos"
"github.com/pocketbase/pocketbase/forms"
"github.com/pocketbase/pocketbase/models"
"github.com/pocketbase/pocketbase/tests"
"github.com/pocketbase/pocketbase/tools/filesystem"
"github.com/pocketbase/pocketbase/tools/list"
)
func hasRecordFile(app core.App, record *models.Record, filename string) bool {
fs, _ := app.NewFilesystem()
defer fs.Close()
fileKey := filepath.Join(
record.Collection().Id,
record.Id,
filename,
)
exists, _ := fs.Exists(fileKey)
return exists
}
func TestNewRecordUpsert(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
collection, _ := app.Dao().FindCollectionByNameOrId("demo2")
record := models.NewRecord(collection)
record.Set("title", "test_value")
form := forms.NewRecordUpsert(app, record)
val := form.Data()["title"]
if val != "test_value" {
t.Errorf("Expected record data to be loaded, got %v", form.Data())
}
}
func TestRecordUpsertLoadRequestUnsupported(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
record, err := app.Dao().FindRecordById("demo2", "0yxhwia2amd8gec")
if err != nil {
t.Fatal(err)
}
testData := "title=test123"
form := forms.NewRecordUpsert(app, record)
req := httptest.NewRequest(http.MethodGet, "/", strings.NewReader(testData))
req.Header.Set(echo.HeaderContentType, echo.MIMEApplicationForm)
if err := form.LoadRequest(req, ""); err == nil {
t.Fatal("Expected LoadRequest to fail, got nil")
}
}
func TestRecordUpsertLoadRequestJson(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
record, err := app.Dao().FindRecordById("demo1", "84nmscqy84lsi1t")
if err != nil {
t.Fatal(err)
}
testData := map[string]any{
"a": map[string]any{
"b": map[string]any{
"id": "test_id",
"text": "test123",
"unknown": "test456",
// file fields unset/delete
"file_one": nil,
"file_many.0": "", // delete by index
"file_many-": []string{"test_MaWC6mWyrP.txt", "test_tC1Yc87DfC.txt"}, // multiple delete with modifier
"file_many.300_WlbFWSGmW9.png": nil, // delete by filename
"file_many.2": "test.png", // should be ignored
},
},
}
form := forms.NewRecordUpsert(app, record)
jsonBody, _ := json.Marshal(testData)
req := httptest.NewRequest(http.MethodGet, "/", bytes.NewReader(jsonBody))
req.Header.Set(echo.HeaderContentType, echo.MIMEApplicationJSON)
loadErr := form.LoadRequest(req, "a.b")
if loadErr != nil {
t.Fatal(loadErr)
}
if form.Id != "test_id" {
t.Fatalf("Expect id field to be %q, got %q", "test_id", form.Id)
}
if v, ok := form.Data()["text"]; !ok || v != "test123" {
t.Fatalf("Expect title field to be %q, got %q", "test123", v)
}
if v, ok := form.Data()["unknown"]; ok {
t.Fatalf("Didn't expect unknown field to be set, got %v", v)
}
fileOne, ok := form.Data()["file_one"]
if !ok {
t.Fatal("Expect file_one field to be set")
}
if fileOne != "" {
t.Fatalf("Expect file_one field to be empty string, got %v", fileOne)
}
fileMany, ok := form.Data()["file_many"]
if !ok || fileMany == nil {
t.Fatal("Expect file_many field to be set")
}
manyfilesRemains := len(list.ToUniqueStringSlice(fileMany))
if manyfilesRemains != 1 {
t.Fatalf("Expect only 1 file_many to remain, got \n%v", fileMany)
}
}
func TestRecordUpsertLoadRequestMultipart(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
record, err := app.Dao().FindRecordById("demo1", "84nmscqy84lsi1t")
if err != nil {
t.Fatal(err)
}
formData, mp, err := tests.MockMultipartData(map[string]string{
"a.b.id": "test_id",
"a.b.text": "test123",
"a.b.unknown": "test456",
// file fields unset/delete
"a.b.file_one-": "test_d61b33QdDU.txt", // delete with modifier
"a.b.file_many.0": "", // delete by index
"a.b.file_many-": "test_tC1Yc87DfC.txt", // delete with modifier
"a.b.file_many.300_WlbFWSGmW9.png": "", // delete by filename
"a.b.file_many.2": "test.png", // should be ignored
}, "a.b.file_many")
if err != nil {
t.Fatal(err)
}
form := forms.NewRecordUpsert(app, record)
req := httptest.NewRequest(http.MethodGet, "/", formData)
req.Header.Set(echo.HeaderContentType, mp.FormDataContentType())
loadErr := form.LoadRequest(req, "a.b")
if loadErr != nil {
t.Fatal(loadErr)
}
if form.Id != "test_id" {
t.Fatalf("Expect id field to be %q, got %q", "test_id", form.Id)
}
if v, ok := form.Data()["text"]; !ok || v != "test123" {
t.Fatalf("Expect text field to be %q, got %q", "test123", v)
}
if v, ok := form.Data()["unknown"]; ok {
t.Fatalf("Didn't expect unknown field to be set, got %v", v)
}
fileOne, ok := form.Data()["file_one"]
if !ok {
t.Fatal("Expect file_one field to be set")
}
if fileOne != "" {
t.Fatalf("Expect file_one field to be empty string, got %v", fileOne)
}
fileMany, ok := form.Data()["file_many"]
if !ok || fileMany == nil {
t.Fatal("Expect file_many field to be set")
}
manyfilesRemains := len(list.ToUniqueStringSlice(fileMany))
expectedRemains := 3 // 5 old; 3 deleted and 1 new uploaded
if manyfilesRemains != expectedRemains {
t.Fatalf("Expect file_many to be %d, got %d (%v)", expectedRemains, manyfilesRemains, fileMany)
}
}
func TestRecordUpsertLoadData(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
record, err := app.Dao().FindRecordById("demo2", "llvuca81nly1qls")
if err != nil {
t.Fatal(err)
}
form := forms.NewRecordUpsert(app, record)
loadErr := form.LoadData(map[string]any{
"title": "test_new",
"active": true,
})
if loadErr != nil {
t.Fatal(loadErr)
}
if v, ok := form.Data()["title"]; !ok || v != "test_new" {
t.Fatalf("Expect title field to be %v, got %v", "test_new", v)
}
if v, ok := form.Data()["active"]; !ok || v != true {
t.Fatalf("Expect active field to be %v, got %v", true, v)
}
}
func TestRecordUpsertDrySubmitFailure(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
collection, _ := app.Dao().FindCollectionByNameOrId("demo1")
recordBefore, err := app.Dao().FindRecordById(collection.Id, "al1h9ijdeojtsjy")
if err != nil {
t.Fatal(err)
}
formData, mp, err := tests.MockMultipartData(map[string]string{
"title": "abc",
"rel_one": "missing",
})
if err != nil {
t.Fatal(err)
}
form := forms.NewRecordUpsert(app, recordBefore)
req := httptest.NewRequest(http.MethodGet, "/", formData)
req.Header.Set(echo.HeaderContentType, mp.FormDataContentType())
form.LoadRequest(req, "")
callbackCalls := 0
// ensure that validate is triggered
// ---
result := form.DrySubmit(func(txDao *daos.Dao) error {
callbackCalls++
return nil
})
if result == nil {
t.Fatal("Expected error, got nil")
}
if callbackCalls != 0 {
t.Fatalf("Expected callbackCalls to be 0, got %d", callbackCalls)
}
// ensure that the record changes weren't persisted
// ---
recordAfter, err := app.Dao().FindRecordById(collection.Id, recordBefore.Id)
if err != nil {
t.Fatal(err)
}
if recordAfter.GetString("title") == "abc" {
t.Fatalf("Expected record.title to be %v, got %v", recordAfter.GetString("title"), "abc")
}
if recordAfter.GetString("rel_one") == "missing" {
t.Fatalf("Expected record.rel_one to be %s, got %s", recordBefore.GetString("rel_one"), "missing")
}
}
func TestRecordUpsertDrySubmitSuccess(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
collection, _ := app.Dao().FindCollectionByNameOrId("demo1")
recordBefore, err := app.Dao().FindRecordById(collection.Id, "84nmscqy84lsi1t")
if err != nil {
t.Fatal(err)
}
formData, mp, err := tests.MockMultipartData(map[string]string{
"title": "dry_test",
"file_one": "",
}, "file_many")
if err != nil {
t.Fatal(err)
}
form := forms.NewRecordUpsert(app, recordBefore)
req := httptest.NewRequest(http.MethodGet, "/", formData)
req.Header.Set(echo.HeaderContentType, mp.FormDataContentType())
form.LoadRequest(req, "")
callbackCalls := 0
result := form.DrySubmit(func(txDao *daos.Dao) error {
callbackCalls++
return nil
})
if result != nil {
t.Fatalf("Expected nil, got error %v", result)
}
// ensure callback was called
if callbackCalls != 1 {
t.Fatalf("Expected callbackCalls to be 1, got %d", callbackCalls)
}
// ensure that the record changes weren't persisted
recordAfter, err := app.Dao().FindRecordById(collection.Id, recordBefore.Id)
if err != nil {
t.Fatal(err)
}
if recordAfter.GetString("title") == "dry_test" {
t.Fatalf("Expected record.title to be %v, got %v", recordAfter.GetString("title"), "dry_test")
}
if recordAfter.GetString("file_one") == "" {
t.Fatal("Expected record.file_one to not be changed, got empty string")
}
// file wasn't removed
if !hasRecordFile(app, recordAfter, recordAfter.GetString("file_one")) {
t.Fatal("file_one file should not have been deleted")
}
}
func TestRecordUpsertDrySubmitWithNestedTx(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
collection, _ := app.Dao().FindCollectionByNameOrId("demo1")
recordBefore, err := app.Dao().FindRecordById(collection.Id, "84nmscqy84lsi1t")
if err != nil {
t.Fatal(err)
}
formData, mp, err := tests.MockMultipartData(map[string]string{
"title": "dry_test",
})
if err != nil {
t.Fatal(err)
}
txErr := app.Dao().RunInTransaction(func(txDao *daos.Dao) error {
form := forms.NewRecordUpsert(app, recordBefore)
form.SetDao(txDao)
req := httptest.NewRequest(http.MethodGet, "/", formData)
req.Header.Set(echo.HeaderContentType, mp.FormDataContentType())
form.LoadRequest(req, "")
callbackCalls := 0
result := form.DrySubmit(func(innerTxDao *daos.Dao) error {
callbackCalls++
return nil
})
if result != nil {
t.Fatalf("Expected nil, got error %v", result)
}
// ensure callback was called
if callbackCalls != 1 {
t.Fatalf("Expected callbackCalls to be 1, got %d", callbackCalls)
}
// ensure that the original txDao can still be used after the DrySubmit rollback
if _, err := txDao.FindRecordById(collection.Id, recordBefore.Id); err != nil {
t.Fatalf("Expected the dry submit rollback to not affect the outer tx context, got %v", err)
}
// ensure that the record changes weren't persisted
recordAfter, err := app.Dao().FindRecordById(collection.Id, recordBefore.Id)
if err != nil {
t.Fatal(err)
}
if recordAfter.GetString("title") == "dry_test" {
t.Fatalf("Expected record.title to be %v, got %v", recordBefore.GetString("title"), "dry_test")
}
return nil
})
if txErr != nil {
t.Fatalf("Nested transactions failure: %v", txErr)
}
}
func TestRecordUpsertSubmitFailure(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
collection, err := app.Dao().FindCollectionByNameOrId("demo1")
if err != nil {
t.Fatal(err)
}
recordBefore, err := app.Dao().FindRecordById(collection.Id, "84nmscqy84lsi1t")
if err != nil {
t.Fatal(err)
}
formData, mp, err := tests.MockMultipartData(map[string]string{
"text": "abc",
"bool": "false",
"select_one": "invalid",
"file_many": "invalid",
"email": "invalid",
}, "file_one")
if err != nil {
t.Fatal(err)
}
form := forms.NewRecordUpsert(app, recordBefore)
req := httptest.NewRequest(http.MethodGet, "/", formData)
req.Header.Set(echo.HeaderContentType, mp.FormDataContentType())
form.LoadRequest(req, "")
interceptorCalls := 0
interceptor := func(next forms.InterceptorNextFunc[*models.Record]) forms.InterceptorNextFunc[*models.Record] {
return func(r *models.Record) error {
interceptorCalls++
return next(r)
}
}
// ensure that validate is triggered
// ---
result := form.Submit(interceptor)
if result == nil {
t.Fatal("Expected error, got nil")
}
// check interceptor calls
// ---
if interceptorCalls != 0 {
t.Fatalf("Expected interceptor to be called 0 times, got %d", interceptorCalls)
}
// ensure that the record changes weren't persisted
// ---
recordAfter, err := app.Dao().FindRecordById(collection.Id, recordBefore.Id)
if err != nil {
t.Fatal(err)
}
if v := recordAfter.Get("text"); v == "abc" {
t.Fatalf("Expected record.text not to change, got %v", v)
}
if v := recordAfter.Get("bool"); v == false {
t.Fatalf("Expected record.bool not to change, got %v", v)
}
if v := recordAfter.Get("select_one"); v == "invalid" {
t.Fatalf("Expected record.select_one not to change, got %v", v)
}
if v := recordAfter.Get("email"); v == "invalid" {
t.Fatalf("Expected record.email not to change, got %v", v)
}
if v := recordAfter.GetStringSlice("file_many"); len(v) != 5 {
t.Fatalf("Expected record.file_many not to change, got %v", v)
}
// ensure the files weren't removed
for _, f := range recordAfter.GetStringSlice("file_many") {
if !hasRecordFile(app, recordAfter, f) {
t.Fatal("file_many file should not have been deleted")
}
}
}
func TestRecordUpsertSubmitSuccess(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
collection, _ := app.Dao().FindCollectionByNameOrId("demo1")
recordBefore, err := app.Dao().FindRecordById(collection.Id, "84nmscqy84lsi1t")
if err != nil {
t.Fatal(err)
}
formData, mp, err := tests.MockMultipartData(map[string]string{
"text": "test_save",
"bool": "true",
"select_one": "optionA",
"file_one": "",
}, "file_many.1", "file_many") // replace + new file
if err != nil {
t.Fatal(err)
}
form := forms.NewRecordUpsert(app, recordBefore)
req := httptest.NewRequest(http.MethodGet, "/", formData)
req.Header.Set(echo.HeaderContentType, mp.FormDataContentType())
form.LoadRequest(req, "")
interceptorCalls := 0
interceptor := func(next forms.InterceptorNextFunc[*models.Record]) forms.InterceptorNextFunc[*models.Record] {
return func(r *models.Record) error {
interceptorCalls++
return next(r)
}
}
result := form.Submit(interceptor)
if result != nil {
t.Fatalf("Expected nil, got error %v", result)
}
// check interceptor calls
// ---
if interceptorCalls != 1 {
t.Fatalf("Expected interceptor to be called 1 time, got %d", interceptorCalls)
}
// ensure that the record changes were persisted
// ---
recordAfter, err := app.Dao().FindRecordById(collection.Id, recordBefore.Id)
if err != nil {
t.Fatal(err)
}
if v := recordAfter.GetString("text"); v != "test_save" {
t.Fatalf("Expected record.text to be %v, got %v", v, "test_save")
}
if hasRecordFile(app, recordAfter, recordAfter.GetString("file_one")) {
t.Fatal("Expected record.file_one to be deleted")
}
fileMany := (recordAfter.GetStringSlice("file_many"))
if len(fileMany) != 6 { // 1 replace + 1 new
t.Fatalf("Expected 6 record.file_many, got %d (%v)", len(fileMany), fileMany)
}
for _, f := range fileMany {
if !hasRecordFile(app, recordAfter, f) {
t.Fatalf("Expected file %q to exist", f)
}
}
}
func TestRecordUpsertSubmitInterceptors(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
collection, _ := app.Dao().FindCollectionByNameOrId("demo3")
record, err := app.Dao().FindRecordById(collection.Id, "mk5fmymtx4wsprk")
if err != nil {
t.Fatal(err)
}
form := forms.NewRecordUpsert(app, record)
form.Data()["title"] = "test_new"
testErr := errors.New("test_error")
interceptorRecordTitle := ""
interceptor1Called := false
interceptor1 := func(next forms.InterceptorNextFunc[*models.Record]) forms.InterceptorNextFunc[*models.Record] {
return func(r *models.Record) error {
interceptor1Called = true
return next(r)
}
}
interceptor2Called := false
interceptor2 := func(next forms.InterceptorNextFunc[*models.Record]) forms.InterceptorNextFunc[*models.Record] {
return func(r *models.Record) error {
interceptorRecordTitle = record.GetString("title") // to check if the record was filled
interceptor2Called = true
return testErr
}
}
submitErr := form.Submit(interceptor1, interceptor2)
if submitErr != testErr {
t.Fatalf("Expected submitError %v, got %v", testErr, submitErr)
}
if !interceptor1Called {
t.Fatalf("Expected interceptor1 to be called")
}
if !interceptor2Called {
t.Fatalf("Expected interceptor2 to be called")
}
if interceptorRecordTitle != form.Data()["title"].(string) {
t.Fatalf("Expected the form model to be filled before calling the interceptors")
}
}
func TestRecordUpsertWithCustomId(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
collection, err := app.Dao().FindCollectionByNameOrId("demo3")
if err != nil {
t.Fatal(err)
}
existingRecord, err := app.Dao().FindRecordById(collection.Id, "mk5fmymtx4wsprk")
if err != nil {
t.Fatal(err)
}
scenarios := []struct {
name string
data map[string]string
record *models.Record
expectError bool
}{
{
"empty data",
map[string]string{},
models.NewRecord(collection),
false,
},
{
"empty id",
map[string]string{"id": ""},
models.NewRecord(collection),
false,
},
{
"id < 15 chars",
map[string]string{"id": "a23"},
models.NewRecord(collection),
true,
},
{
"id > 15 chars",
map[string]string{"id": "a234567890123456"},
models.NewRecord(collection),
true,
},
{
"id = 15 chars (invalid chars)",
map[string]string{"id": "a@3456789012345"},
models.NewRecord(collection),
true,
},
{
"id = 15 chars (valid chars)",
map[string]string{"id": "a23456789012345"},
models.NewRecord(collection),
false,
},
{
"changing the id of an existing record",
map[string]string{"id": "b23456789012345"},
existingRecord,
true,
},
{
"using the same existing record id",
map[string]string{"id": existingRecord.Id},
existingRecord,
false,
},
{
"skipping the id for existing record",
map[string]string{},
existingRecord,
false,
},
}
for _, scenario := range scenarios {
formData, mp, err := tests.MockMultipartData(scenario.data)
if err != nil {
t.Fatal(err)
}
form := forms.NewRecordUpsert(app, scenario.record)
req := httptest.NewRequest(http.MethodGet, "/", formData)
req.Header.Set(echo.HeaderContentType, mp.FormDataContentType())
form.LoadRequest(req, "")
dryErr := form.DrySubmit(nil)
hasDryErr := dryErr != nil
submitErr := form.Submit()
hasSubmitErr := submitErr != nil
if hasDryErr != hasSubmitErr {
t.Errorf("[%s] Expected hasDryErr and hasSubmitErr to have the same value, got %v vs %v", scenario.name, hasDryErr, hasSubmitErr)
}
if hasSubmitErr != scenario.expectError {
t.Errorf("[%s] Expected hasSubmitErr to be %v, got %v (%v)", scenario.name, scenario.expectError, hasSubmitErr, submitErr)
}
if id, ok := scenario.data["id"]; ok && id != "" && !hasSubmitErr {
_, err := app.Dao().FindRecordById(collection.Id, id)
if err != nil {
t.Errorf("[%s] Expected to find record with id %s, got %v", scenario.name, id, err)
}
}
}
}
func TestRecordUpsertAuthRecord(t *testing.T) {
scenarios := []struct {
testName string
existingId string
data map[string]any
manageAccess bool
expectError bool
}{
{
"empty create data",
"",
map[string]any{},
false,
true,
},
{
"empty update data",
"4q1xlclmfloku33",
map[string]any{},
false,
false,
},
{
"minimum valid create data",
"",
map[string]any{
"password": "12345678",
"passwordConfirm": "12345678",
},
false,
false,
},
{
"create with all allowed auth fields",
"",
map[string]any{
"username": "test_new",
"email": "[email protected]",
"emailVisibility": true,
"password": "12345678",
"passwordConfirm": "12345678",
},
false,
false,
},
// username
{
"invalid username characters",
"",
map[string]any{
"username": "test abc!@#",
"password": "12345678",
"passwordConfirm": "12345678",
},
false,
true,
},
{
"invalid username length (less than 3)",
"",
map[string]any{
"username": "ab",
"password": "12345678",
"passwordConfirm": "12345678",
},
false,
true,
},
{
"invalid username length (more than 150)",
"",
map[string]any{
"username": strings.Repeat("a", 151),
"password": "12345678",
"passwordConfirm": "12345678",
},
false,
true,
},
// verified
{
"try to set verified without managed access",
"",
map[string]any{
"verified": true,
"password": "12345678",
"passwordConfirm": "12345678",
},
false,
true,
},
{
"try to update verified without managed access",
"4q1xlclmfloku33",
map[string]any{
"verified": true,
},
false,
true,
},
{
"set verified with managed access",
"",
map[string]any{
"verified": true,
"password": "12345678",
"passwordConfirm": "12345678",
},
true,
false,
},
{
"update verified with managed access",
"4q1xlclmfloku33",
map[string]any{
"verified": true,
},
true,
false,
},
// email
{
"try to update email without managed access",
"4q1xlclmfloku33",
map[string]any{
"email": "[email protected]",
},
false,
true,
},
{
"update email with managed access",
"4q1xlclmfloku33",
map[string]any{
"email": "[email protected]",
},
true,
false,
},
// password
{
"trigger the password validations if only oldPassword is set",
"4q1xlclmfloku33",
map[string]any{
"oldPassword": "1234567890",
},
false,
true,
},
{
"trigger the password validations if only passwordConfirm is set",
"4q1xlclmfloku33",
map[string]any{
"passwordConfirm": "1234567890",
},
false,
true,
},
{
"try to update password without managed access",
"4q1xlclmfloku33",
map[string]any{
"password": "1234567890",
"passwordConfirm": "1234567890",
},
false,
true,
},
{
"update password without managed access but with oldPassword",
"4q1xlclmfloku33",
map[string]any{
"oldPassword": "1234567890",
"password": "1234567890",
"passwordConfirm": "1234567890",
},
false,
false,
},
{
"update email with managed access (without oldPassword)",
"4q1xlclmfloku33",
map[string]any{
"password": "1234567890",
"passwordConfirm": "1234567890",
},
true,
false,
},
}
for _, s := range scenarios {
app, _ := tests.NewTestApp()
defer app.Cleanup()
collection, err := app.Dao().FindCollectionByNameOrId("users")
if err != nil {
t.Fatal(err)
}
record := models.NewRecord(collection)
if s.existingId != "" {
var err error
record, err = app.Dao().FindRecordById(collection.Id, s.existingId)
if err != nil {
t.Errorf("[%s] Failed to fetch auth record with id %s", s.testName, s.existingId)
continue
}
}
form := forms.NewRecordUpsert(app, record)
form.SetFullManageAccess(s.manageAccess)
if err := form.LoadData(s.data); err != nil {
t.Errorf("[%s] Failed to load form data", s.testName)
continue
}
submitErr := form.Submit()
hasErr := submitErr != nil
if hasErr != s.expectError {
t.Errorf("[%s] Expected hasErr %v, got %v (%v)", s.testName, s.expectError, hasErr, submitErr)
}
if !hasErr && record.Username() == "" {
t.Errorf("[%s] Expected username to be set, got empty string: \n%v", s.testName, record)
}
}
}
func TestRecordUpsertAddAndRemoveFiles(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
recordBefore, err := app.Dao().FindRecordById("demo1", "84nmscqy84lsi1t")
if err != nil {
t.Fatal(err)
}
// create test temp files
tempDir := filepath.Join(app.DataDir(), "temp")
if err := os.MkdirAll(app.DataDir(), os.ModePerm); err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tempDir)
tmpFile, _ := os.CreateTemp(os.TempDir(), "tmpfile1-*.txt")
tmpFile.Close()
form := forms.NewRecordUpsert(app, recordBefore)
f1, err := filesystem.NewFileFromPath(tmpFile.Name())
if err != nil {
t.Fatal(err)
}
f2, err := filesystem.NewFileFromPath(tmpFile.Name())
if err != nil {
t.Fatal(err)
}
f3, err := filesystem.NewFileFromPath(tmpFile.Name())
if err != nil {
t.Fatal(err)
}
removed0 := "test_d61b33QdDU.txt" // replaced
removed1 := "300_WlbFWSGmW9.png"
removed2 := "logo_vcfJJG5TAh.svg"
form.AddFiles("file_one", f1) // should replace the existin file
form.AddFiles("file_many", f2, f3) // should append
form.RemoveFiles("file_many", removed1, removed2) // should remove
filesToUpload := form.FilesToUpload()
if v, ok := filesToUpload["file_one"]; !ok || len(v) != 1 {
t.Fatalf("Expected filesToUpload[file_one] to have exactly 1 file, got %v", v)
}
if v, ok := filesToUpload["file_many"]; !ok || len(v) != 2 {
t.Fatalf("Expected filesToUpload[file_many] to have exactly 2 file, got %v", v)
}
filesToDelete := form.FilesToDelete()
if len(filesToDelete) != 3 {
t.Fatalf("Expected exactly 2 file to delete, got %v", filesToDelete)
}
for _, f := range []string{removed0, removed1, removed2} {
if !list.ExistInSlice(f, filesToDelete) {
t.Fatalf("Missing file %q from filesToDelete %v", f, filesToDelete)
}
}
if err := form.Submit(); err != nil {
t.Fatalf("Failed to submit the RecordUpsert form, got %v", err)
}
recordAfter, err := app.Dao().FindRecordById("demo1", "84nmscqy84lsi1t")
if err != nil {
t.Fatal(err)
}
// ensure files deletion
if hasRecordFile(app, recordAfter, removed0) {
t.Fatalf("Expected the old file_one file to be deleted")
}
if hasRecordFile(app, recordAfter, removed1) {
t.Fatalf("Expected %s to be deleted", removed1)
}
if hasRecordFile(app, recordAfter, removed2) {
t.Fatalf("Expected %s to be deleted", removed2)
}
fileOne := recordAfter.GetStringSlice("file_one")
if len(fileOne) == 0 {
t.Fatalf("Expected new file_one file to be uploaded")
}
fileMany := recordAfter.GetStringSlice("file_many")
if len(fileMany) != 5 {
t.Fatalf("Expected file_many to be 5, got %v", fileMany)
}
}
func TestRecordUpsertUploadFailure(t *testing.T) {
app, _ := tests.NewTestApp()
defer app.Cleanup()
collection, err := app.Dao().FindCollectionByNameOrId("demo3")
if err != nil {
t.Fatal(err)
}
testDaos := []*daos.Dao{
app.Dao(), // with hooks
daos.New(app.Dao().DB()), // without hooks
}
for i, dao := range testDaos {
// create with invalid file
{
prefix := fmt.Sprintf("%d-create", i)
new := models.NewRecord(collection)
new.Id = "123456789012341"
form := forms.NewRecordUpsert(app, new)
form.SetDao(dao)
form.LoadData(map[string]any{"title": "new_test"})
form.AddFiles("files", &filesystem.File{Reader: &filesystem.PathReader{Path: "/tmp/__missing__"}})
if err := form.Submit(); err == nil {
t.Fatalf("[%s] Expected error, got nil", prefix)
}
if r, err := app.Dao().FindRecordById(collection.Id, new.Id); err == nil {
t.Fatalf("[%s] Expected the inserted record to be deleted, found \n%v", prefix, r.PublicExport())
}
}
// update with invalid file
{
prefix := fmt.Sprintf("%d-update", i)
record, err := app.Dao().FindRecordById(collection.Id, "1tmknxy2868d869")
if err != nil {
t.Fatal(err)
}
form := forms.NewRecordUpsert(app, record)
form.SetDao(dao)
form.LoadData(map[string]any{"title": "update_test"})
form.AddFiles("files", &filesystem.File{Reader: &filesystem.PathReader{Path: "/tmp/__missing__"}})
if err := form.Submit(); err == nil {
t.Fatalf("[%s] Expected error, got nil", prefix)
}
if r, _ := app.Dao().FindRecordById(collection.Id, record.Id); r == nil || r.GetString("title") == "update_test" {
t.Fatalf("[%s] Expected the record changes to be reverted, got \n%v", prefix, r.PublicExport())
}
}
}
}
| forms/record_upsert_test.go | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.007293761242181063,
0.0007674028747715056,
0.00016181959654204547,
0.0001770203816704452,
0.0011852644383907318
] |
{
"id": 6,
"code_window": [
"\t\t\tform.app.LogsDao().Vacuum()\n",
"\t\t}\n",
"\n",
"\t\t// merge the application settings with the form ones\n",
"\t\treturn form.app.Settings().Merge(form.Settings)\n",
"\t}, interceptors...)\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\treturn nil\n"
],
"file_path": "forms/settings_upsert.go",
"type": "replace",
"edit_start_line_idx": 75
} | package core
import (
"context"
"errors"
"fmt"
"io"
"log"
"os"
"path/filepath"
"runtime"
"sort"
"time"
"github.com/pocketbase/pocketbase/daos"
"github.com/pocketbase/pocketbase/models"
"github.com/pocketbase/pocketbase/tools/archive"
"github.com/pocketbase/pocketbase/tools/cron"
"github.com/pocketbase/pocketbase/tools/filesystem"
"github.com/pocketbase/pocketbase/tools/security"
)
const CacheKeyActiveBackup string = "@activeBackup"
// CreateBackup creates a new backup of the current app pb_data directory.
//
// If name is empty, it will be autogenerated.
// If backup with the same name exists, the new backup file will replace it.
//
// The backup is executed within a transaction, meaning that new writes
// will be temporary "blocked" until the backup file is generated.
//
// By default backups are stored in pb_data/backups
// (the backups directory itself is excluded from the generated backup).
//
// When using S3 storage for the uploaded collection files, you have to
// take care manually to backup those since they are not part of the pb_data.
//
// Backups can be stored on S3 if it is configured in app.Settings().Backups.
func (app *BaseApp) CreateBackup(ctx context.Context, name string) error {
if app.Cache().Has(CacheKeyActiveBackup) {
return errors.New("try again later - another backup/restore operation has already been started")
}
// auto generate backup name
if name == "" {
name = fmt.Sprintf(
"pb_backup_%s.zip",
time.Now().UTC().Format("20060102150405"),
)
}
app.Cache().Set(CacheKeyActiveBackup, name)
defer app.Cache().Remove(CacheKeyActiveBackup)
// Archive pb_data in a temp directory, exluding the "backups" dir itself (if exist).
//
// Run in transaction to temporary block other writes (transactions uses the NonconcurrentDB connection).
// ---
tempPath := filepath.Join(os.TempDir(), "pb_backup_"+security.PseudorandomString(4))
createErr := app.Dao().RunInTransaction(func(txDao *daos.Dao) error {
if err := archive.Create(app.DataDir(), tempPath, LocalBackupsDirName); err != nil {
return err
}
return nil
})
if createErr != nil {
return createErr
}
defer os.Remove(tempPath)
// Persist the backup in the backups filesystem.
// ---
fsys, err := app.NewBackupsFilesystem()
if err != nil {
return err
}
defer fsys.Close()
fsys.SetContext(ctx)
file, err := filesystem.NewFileFromPath(tempPath)
if err != nil {
return err
}
file.OriginalName = name
file.Name = file.OriginalName
if err := fsys.UploadFile(file, file.Name); err != nil {
return err
}
return nil
}
// RestoreBackup restores the backup with the specified name and restarts
// the current running application process.
//
// NB! This feature is experimental and currently is expected to work only on UNIX based systems.
//
// To safely perform the restore it is recommended to have free disk space
// for at least 2x the size of the restored pb_data backup.
//
// The performed steps are:
//
// 1. Download the backup with the specified name in a temp location
// (this is in case of S3; otherwise it creates a temp copy of the zip)
//
// 2. Extract the backup in a temp directory next to the app "pb_data"
// (eg. "pb_data/../pb_data_to_restore").
//
// 3. Move the current app "pb_data" under a special sub temp dir that
// will be deleted on the next app start up (eg. "pb_data_to_restore/.pb_temp_to_delete/").
// This is because on some operating systems it may not be allowed
// to delete the currently open "pb_data" files.
//
// 4. Rename the extracted dir from step 1 as the new "pb_data".
//
// 5. Move from the old "pb_data" any local backups that may have been
// created previously to the new "pb_data/backups".
//
// 6. Restart the app (on successfull app bootstap it will also remove the old pb_data).
//
// If a failure occure during the restore process the dir changes are reverted.
// If for whatever reason the revert is not possible, it panics.
func (app *BaseApp) RestoreBackup(ctx context.Context, name string) error {
if runtime.GOOS == "windows" {
return errors.New("restore is not supported on windows")
}
if app.Cache().Has(CacheKeyActiveBackup) {
return errors.New("try again later - another backup/restore operation has already been started")
}
app.Cache().Set(CacheKeyActiveBackup, name)
defer app.Cache().Remove(CacheKeyActiveBackup)
fsys, err := app.NewBackupsFilesystem()
if err != nil {
return err
}
defer fsys.Close()
fsys.SetContext(ctx)
// fetch the backup file in a temp location
br, err := fsys.GetFile(name)
if err != nil {
return err
}
defer br.Close()
tempZip, err := os.CreateTemp(os.TempDir(), "pb_restore")
if err != nil {
return err
}
defer os.Remove(tempZip.Name())
if _, err := io.Copy(tempZip, br); err != nil {
return err
}
parentDataDir := filepath.Dir(app.DataDir())
extractedDataDir := filepath.Join(parentDataDir, "pb_restore_"+security.PseudorandomString(4))
defer os.RemoveAll(extractedDataDir)
if err := archive.Extract(tempZip.Name(), extractedDataDir); err != nil {
return err
}
// ensure that a database file exists
extractedDB := filepath.Join(extractedDataDir, "data.db")
if _, err := os.Stat(extractedDB); err != nil {
return fmt.Errorf("data.db file is missing or invalid: %w", err)
}
// remove the extracted zip file since we no longer need it
// (this is in case the app restarts and the defer calls are not called)
if err := os.Remove(tempZip.Name()); err != nil && app.IsDebug() {
log.Println(err)
}
// make sure that a special temp directory exists in the extracted one
if err := os.MkdirAll(filepath.Join(extractedDataDir, LocalTempDirName), os.ModePerm); err != nil {
return fmt.Errorf("failed to create a temp dir: %w", err)
}
// move the current pb_data to a special temp location that will
// hold the old data between dirs replace
// (the temp dir will be automatically removed on the next app start)
oldTempDataDir := filepath.Join(extractedDataDir, LocalTempDirName, "old_pb_data")
if err := os.Rename(app.DataDir(), oldTempDataDir); err != nil {
return fmt.Errorf("failed to move the current pb_data to a temp location: %w", err)
}
// "restore", aka. set the extracted backup as the new pb_data directory
if err := os.Rename(extractedDataDir, app.DataDir()); err != nil {
return fmt.Errorf("failed to set the extracted backup as pb_data dir: %w", err)
}
// update the old temp data dir path after the restore
oldTempDataDir = filepath.Join(app.DataDir(), LocalTempDirName, "old_pb_data")
oldLocalBackupsDir := filepath.Join(oldTempDataDir, LocalBackupsDirName)
newLocalBackupsDir := filepath.Join(app.DataDir(), LocalBackupsDirName)
revertDataDirChanges := func(revertLocalBackupsDir bool) error {
if revertLocalBackupsDir {
if _, err := os.Stat(newLocalBackupsDir); err == nil {
if err := os.Rename(newLocalBackupsDir, oldLocalBackupsDir); err != nil {
return fmt.Errorf("failed to revert the backups dir change: %w", err)
}
}
}
if err := os.Rename(app.DataDir(), extractedDataDir); err != nil {
return fmt.Errorf("failed to revert the extracted dir change: %w", err)
}
if err := os.Rename(oldTempDataDir, app.DataDir()); err != nil {
return fmt.Errorf("failed to revert old pb_data dir change: %w", err)
}
return nil
}
// restore the local pb_data/backups dir (if any)
if _, err := os.Stat(oldLocalBackupsDir); err == nil {
if err := os.Rename(oldLocalBackupsDir, newLocalBackupsDir); err != nil {
if err := revertDataDirChanges(false); err != nil && app.IsDebug() {
log.Println(err)
}
return fmt.Errorf("failed to move the local pb_data/backups dir: %w", err)
}
}
// restart the app
if err := app.Restart(); err != nil {
if err := revertDataDirChanges(true); err != nil {
panic(err)
}
return fmt.Errorf("failed to restart the app process: %w", err)
}
return nil
}
// initAutobackupHooks registers the autobackup app serve hooks.
// @todo add tests
func (app *BaseApp) initAutobackupHooks() error {
c := cron.New()
loadJob := func() {
c.Stop()
rawSchedule := app.Settings().Backups.Cron
if rawSchedule == "" || !app.IsBootstrapped() {
return
}
c.Add("@autobackup", rawSchedule, func() {
autoPrefix := "@auto_pb_backup_"
name := fmt.Sprintf(
"%s%s.zip",
autoPrefix,
time.Now().UTC().Format("20060102150405"),
)
if err := app.CreateBackup(context.Background(), name); err != nil && app.IsDebug() {
// @todo replace after logs generalization
log.Println(err)
}
maxKeep := app.Settings().Backups.CronMaxKeep
if maxKeep == 0 {
return // no explicit limit
}
fsys, err := app.NewBackupsFilesystem()
if err != nil && app.IsDebug() {
// @todo replace after logs generalization
log.Println(err)
return
}
defer fsys.Close()
files, err := fsys.List(autoPrefix)
if err != nil && app.IsDebug() {
// @todo replace after logs generalization
log.Println(err)
return
}
if maxKeep >= len(files) {
return // nothing to remove
}
// sort desc
sort.Slice(files, func(i, j int) bool {
return files[i].ModTime.After(files[j].ModTime)
})
// keep only the most recent n auto backup files
toRemove := files[maxKeep:]
for _, f := range toRemove {
if err := fsys.Delete(f.Key); err != nil && app.IsDebug() {
// @todo replace after logs generalization
log.Println(err)
}
}
})
// restart the ticker
c.Start()
}
// load on app serve
app.OnBeforeServe().Add(func(e *ServeEvent) error {
loadJob()
return nil
})
// stop the ticker on app termination
app.OnTerminate().Add(func(e *TerminateEvent) error {
c.Stop()
return nil
})
// reload on app settings change
app.OnModelAfterUpdate((&models.Param{}).TableName()).Add(func(e *ModelEvent) error {
if !c.HasStarted() {
return nil // no need to reload as it hasn't been started yet
}
p := e.Model.(*models.Param)
if p == nil || p.Key != models.ParamAppSettings {
return nil
}
loadJob()
return nil
})
return nil
}
| core/base_backup.go | 1 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.000675161078106612,
0.0001885746169136837,
0.00016351317754015326,
0.0001704997557681054,
0.00008525281009497121
] |
{
"id": 6,
"code_window": [
"\t\t\tform.app.LogsDao().Vacuum()\n",
"\t\t}\n",
"\n",
"\t\t// merge the application settings with the form ones\n",
"\t\treturn form.app.Settings().Merge(form.Settings)\n",
"\t}, interceptors...)\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\treturn nil\n"
],
"file_path": "forms/settings_upsert.go",
"type": "replace",
"edit_start_line_idx": 75
} | import{S as Ue,i as je,s as xe,M as Qe,e as s,w as k,b as p,c as J,f as b,g as d,h as o,m as K,x as ce,N as Oe,P as Je,k as Ke,Q as Ie,n as We,t as N,a as V,o as u,d as I,T as Ge,C as Ee,p as Xe,r as W,u as Ye}from"./index-3f8c6248.js";import{S as Ze}from"./SdkTabs-bc729778.js";import{F as et}from"./FieldsQueryParam-1df07d10.js";function Le(r,l,a){const n=r.slice();return n[5]=l[a],n}function Ne(r,l,a){const n=r.slice();return n[5]=l[a],n}function Ve(r,l){let a,n=l[5].code+"",m,_,i,f;function v(){return l[4](l[5])}return{key:r,first:null,c(){a=s("button"),m=k(n),_=p(),b(a,"class","tab-item"),W(a,"active",l[1]===l[5].code),this.first=a},m($,w){d($,a,w),o(a,m),o(a,_),i||(f=Ye(a,"click",v),i=!0)},p($,w){l=$,w&4&&n!==(n=l[5].code+"")&&ce(m,n),w&6&&W(a,"active",l[1]===l[5].code)},d($){$&&u(a),i=!1,f()}}}function ze(r,l){let a,n,m,_;return n=new Qe({props:{content:l[5].body}}),{key:r,first:null,c(){a=s("div"),J(n.$$.fragment),m=p(),b(a,"class","tab-item"),W(a,"active",l[1]===l[5].code),this.first=a},m(i,f){d(i,a,f),K(n,a,null),o(a,m),_=!0},p(i,f){l=i;const v={};f&4&&(v.content=l[5].body),n.$set(v),(!_||f&6)&&W(a,"active",l[1]===l[5].code)},i(i){_||(N(n.$$.fragment,i),_=!0)},o(i){V(n.$$.fragment,i),_=!1},d(i){i&&u(a),I(n)}}}function tt(r){var qe,De;let l,a,n=r[0].name+"",m,_,i,f,v,$,w,B,G,S,z,de,Q,q,ue,X,U=r[0].name+"",Y,pe,fe,j,Z,D,ee,T,te,he,F,C,oe,be,le,me,h,_e,R,ke,ve,$e,ae,ge,se,ye,Se,we,ne,Te,Ce,M,re,H,ie,P,O,y=[],Pe=new Map,Re,E,g=[],Me=new Map,A;$=new Ze({props:{js:`
import PocketBase from 'pocketbase';
const pb = new PocketBase('${r[3]}');
...
const authData = await pb.collection('${(qe=r[0])==null?void 0:qe.name}').authRefresh();
// after the above you can also access the refreshed auth data from the authStore
console.log(pb.authStore.isValid);
console.log(pb.authStore.token);
console.log(pb.authStore.model.id);
`,dart:`
import 'package:pocketbase/pocketbase.dart';
final pb = PocketBase('${r[3]}');
...
final authData = await pb.collection('${(De=r[0])==null?void 0:De.name}').authRefresh();
// after the above you can also access the refreshed auth data from the authStore
print(pb.authStore.isValid);
print(pb.authStore.token);
print(pb.authStore.model.id);
`}}),R=new Qe({props:{content:"?expand=relField1,relField2.subRelField"}}),M=new et({});let x=r[2];const Ae=e=>e[5].code;for(let e=0;e<x.length;e+=1){let t=Ne(r,x,e),c=Ae(t);Pe.set(c,y[e]=Ve(c,t))}let L=r[2];const Be=e=>e[5].code;for(let e=0;e<L.length;e+=1){let t=Le(r,L,e),c=Be(t);Me.set(c,g[e]=ze(c,t))}return{c(){l=s("h3"),a=k("Auth refresh ("),m=k(n),_=k(")"),i=p(),f=s("div"),f.innerHTML=`<p>Returns a new auth response (token and record data) for an
<strong>already authenticated record</strong>.</p>
<p><em>This method is usually called by users on page/screen reload to ensure that the previously stored
data in <code>pb.authStore</code> is still valid and up-to-date.</em></p>`,v=p(),J($.$$.fragment),w=p(),B=s("h6"),B.textContent="API details",G=p(),S=s("div"),z=s("strong"),z.textContent="POST",de=p(),Q=s("div"),q=s("p"),ue=k("/api/collections/"),X=s("strong"),Y=k(U),pe=k("/auth-refresh"),fe=p(),j=s("p"),j.innerHTML="Requires record <code>Authorization:TOKEN</code> header",Z=p(),D=s("div"),D.textContent="Query parameters",ee=p(),T=s("table"),te=s("thead"),te.innerHTML=`<tr><th>Param</th>
<th>Type</th>
<th width="60%">Description</th></tr>`,he=p(),F=s("tbody"),C=s("tr"),oe=s("td"),oe.textContent="expand",be=p(),le=s("td"),le.innerHTML='<span class="label">String</span>',me=p(),h=s("td"),_e=k(`Auto expand record relations. Ex.:
`),J(R.$$.fragment),ke=k(`
Supports up to 6-levels depth nested relations expansion. `),ve=s("br"),$e=k(`
The expanded relations will be appended to the record under the
`),ae=s("code"),ae.textContent="expand",ge=k(" property (eg. "),se=s("code"),se.textContent='"expand": {"relField1": {...}, ...}',ye=k(`).
`),Se=s("br"),we=k(`
Only the relations to which the request user has permissions to `),ne=s("strong"),ne.textContent="view",Te=k(" will be expanded."),Ce=p(),J(M.$$.fragment),re=p(),H=s("div"),H.textContent="Responses",ie=p(),P=s("div"),O=s("div");for(let e=0;e<y.length;e+=1)y[e].c();Re=p(),E=s("div");for(let e=0;e<g.length;e+=1)g[e].c();b(l,"class","m-b-sm"),b(f,"class","content txt-lg m-b-sm"),b(B,"class","m-b-xs"),b(z,"class","label label-primary"),b(Q,"class","content"),b(j,"class","txt-hint txt-sm txt-right"),b(S,"class","alert alert-success"),b(D,"class","section-title"),b(T,"class","table-compact table-border m-b-base"),b(H,"class","section-title"),b(O,"class","tabs-header compact left"),b(E,"class","tabs-content"),b(P,"class","tabs")},m(e,t){d(e,l,t),o(l,a),o(l,m),o(l,_),d(e,i,t),d(e,f,t),d(e,v,t),K($,e,t),d(e,w,t),d(e,B,t),d(e,G,t),d(e,S,t),o(S,z),o(S,de),o(S,Q),o(Q,q),o(q,ue),o(q,X),o(X,Y),o(q,pe),o(S,fe),o(S,j),d(e,Z,t),d(e,D,t),d(e,ee,t),d(e,T,t),o(T,te),o(T,he),o(T,F),o(F,C),o(C,oe),o(C,be),o(C,le),o(C,me),o(C,h),o(h,_e),K(R,h,null),o(h,ke),o(h,ve),o(h,$e),o(h,ae),o(h,ge),o(h,se),o(h,ye),o(h,Se),o(h,we),o(h,ne),o(h,Te),o(F,Ce),K(M,F,null),d(e,re,t),d(e,H,t),d(e,ie,t),d(e,P,t),o(P,O);for(let c=0;c<y.length;c+=1)y[c]&&y[c].m(O,null);o(P,Re),o(P,E);for(let c=0;c<g.length;c+=1)g[c]&&g[c].m(E,null);A=!0},p(e,[t]){var Fe,He;(!A||t&1)&&n!==(n=e[0].name+"")&&ce(m,n);const c={};t&9&&(c.js=`
import PocketBase from 'pocketbase';
const pb = new PocketBase('${e[3]}');
...
const authData = await pb.collection('${(Fe=e[0])==null?void 0:Fe.name}').authRefresh();
// after the above you can also access the refreshed auth data from the authStore
console.log(pb.authStore.isValid);
console.log(pb.authStore.token);
console.log(pb.authStore.model.id);
`),t&9&&(c.dart=`
import 'package:pocketbase/pocketbase.dart';
final pb = PocketBase('${e[3]}');
...
final authData = await pb.collection('${(He=e[0])==null?void 0:He.name}').authRefresh();
// after the above you can also access the refreshed auth data from the authStore
print(pb.authStore.isValid);
print(pb.authStore.token);
print(pb.authStore.model.id);
`),$.$set(c),(!A||t&1)&&U!==(U=e[0].name+"")&&ce(Y,U),t&6&&(x=e[2],y=Oe(y,t,Ae,1,e,x,Pe,O,Je,Ve,null,Ne)),t&6&&(L=e[2],Ke(),g=Oe(g,t,Be,1,e,L,Me,E,Ie,ze,null,Le),We())},i(e){if(!A){N($.$$.fragment,e),N(R.$$.fragment,e),N(M.$$.fragment,e);for(let t=0;t<L.length;t+=1)N(g[t]);A=!0}},o(e){V($.$$.fragment,e),V(R.$$.fragment,e),V(M.$$.fragment,e);for(let t=0;t<g.length;t+=1)V(g[t]);A=!1},d(e){e&&u(l),e&&u(i),e&&u(f),e&&u(v),I($,e),e&&u(w),e&&u(B),e&&u(G),e&&u(S),e&&u(Z),e&&u(D),e&&u(ee),e&&u(T),I(R),I(M),e&&u(re),e&&u(H),e&&u(ie),e&&u(P);for(let t=0;t<y.length;t+=1)y[t].d();for(let t=0;t<g.length;t+=1)g[t].d()}}}function ot(r,l,a){let n,{collection:m=new Ge}=l,_=200,i=[];const f=v=>a(1,_=v.code);return r.$$set=v=>{"collection"in v&&a(0,m=v.collection)},r.$$.update=()=>{r.$$.dirty&1&&a(2,i=[{code:200,body:JSON.stringify({token:"JWT_TOKEN",record:Ee.dummyCollectionRecord(m)},null,2)},{code:401,body:`
{
"code": 401,
"message": "The request requires valid record authorization token to be set.",
"data": {}
}
`},{code:403,body:`
{
"code": 403,
"message": "The authorized record model is not allowed to perform this action.",
"data": {}
}
`},{code:404,body:`
{
"code": 404,
"message": "Missing auth record context.",
"data": {}
}
`}])},a(3,n=Ee.getApiExampleUrl(Xe.baseUrl)),[m,_,i,n,f]}class nt extends Ue{constructor(l){super(),je(this,l,ot,tt,xe,{collection:0})}}export{nt as default};
| ui/dist/assets/AuthRefreshDocs-0cde32ad.js | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.0002803742536343634,
0.0001830350374802947,
0.00016782739839982241,
0.00017250014934688807,
0.000034491335100028664
] |
{
"id": 6,
"code_window": [
"\t\t\tform.app.LogsDao().Vacuum()\n",
"\t\t}\n",
"\n",
"\t\t// merge the application settings with the form ones\n",
"\t\treturn form.app.Settings().Merge(form.Settings)\n",
"\t}, interceptors...)\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\treturn nil\n"
],
"file_path": "forms/settings_upsert.go",
"type": "replace",
"edit_start_line_idx": 75
} | /**
* TinyMCE version 6.4.1 (2023-03-29)
*/
!function(){"use strict";var e=tinymce.util.Tools.resolve("tinymce.PluginManager");const t=e=>t=>t.options.get(e),a=t("insertdatetime_dateformat"),r=t("insertdatetime_timeformat"),n=t("insertdatetime_formats"),s=t("insertdatetime_element"),i="Sun Mon Tue Wed Thu Fri Sat Sun".split(" "),o="Sunday Monday Tuesday Wednesday Thursday Friday Saturday Sunday".split(" "),l="Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec".split(" "),m="January February March April May June July August September October November December".split(" "),c=(e,t)=>{if((e=""+e).length<t)for(let a=0;a<t-e.length;a++)e="0"+e;return e},d=(e,t,a=new Date)=>(t=(t=(t=(t=(t=(t=(t=(t=(t=(t=(t=(t=(t=(t=(t=t.replace("%D","%m/%d/%Y")).replace("%r","%I:%M:%S %p")).replace("%Y",""+a.getFullYear())).replace("%y",""+a.getYear())).replace("%m",c(a.getMonth()+1,2))).replace("%d",c(a.getDate(),2))).replace("%H",""+c(a.getHours(),2))).replace("%M",""+c(a.getMinutes(),2))).replace("%S",""+c(a.getSeconds(),2))).replace("%I",""+((a.getHours()+11)%12+1))).replace("%p",a.getHours()<12?"AM":"PM")).replace("%B",""+e.translate(m[a.getMonth()]))).replace("%b",""+e.translate(l[a.getMonth()]))).replace("%A",""+e.translate(o[a.getDay()]))).replace("%a",""+e.translate(i[a.getDay()]))).replace("%%","%"),u=(e,t)=>{if(s(e)){const a=d(e,t);let r;r=/%[HMSIp]/.test(t)?d(e,"%Y-%m-%dT%H:%M"):d(e,"%Y-%m-%d");const n=e.dom.getParent(e.selection.getStart(),"time");n?((e,t,a,r)=>{const n=e.dom.create("time",{datetime:a},r);e.dom.replace(n,t),e.selection.select(n,!0),e.selection.collapse(!1)})(e,n,r,a):e.insertContent('<time datetime="'+r+'">'+a+"</time>")}else e.insertContent(d(e,t))};var p=tinymce.util.Tools.resolve("tinymce.util.Tools");e.add("insertdatetime",(e=>{(e=>{const t=e.options.register;t("insertdatetime_dateformat",{processor:"string",default:e.translate("%Y-%m-%d")}),t("insertdatetime_timeformat",{processor:"string",default:e.translate("%H:%M:%S")}),t("insertdatetime_formats",{processor:"string[]",default:["%H:%M:%S","%Y-%m-%d","%I:%M:%S %p","%D"]}),t("insertdatetime_element",{processor:"boolean",default:!1})})(e),(e=>{e.addCommand("mceInsertDate",((t,r)=>{u(e,null!=r?r:a(e))})),e.addCommand("mceInsertTime",((t,a)=>{u(e,null!=a?a:r(e))}))})(e),(e=>{const t=n(e),a=(e=>{let t=e;return{get:()=>t,set:e=>{t=e}}})((e=>{const t=n(e);return t.length>0?t[0]:r(e)})(e)),s=t=>e.execCommand("mceInsertDate",!1,t);e.ui.registry.addSplitButton("insertdatetime",{icon:"insert-time",tooltip:"Insert date/time",select:e=>e===a.get(),fetch:a=>{a(p.map(t,(t=>({type:"choiceitem",text:d(e,t),value:t}))))},onAction:e=>{s(a.get())},onItemAction:(e,t)=>{a.set(t),s(t)}});const i=e=>()=>{a.set(e),s(e)};e.ui.registry.addNestedMenuItem("insertdatetime",{icon:"insert-time",text:"Date/time",getSubmenuItems:()=>p.map(t,(t=>({type:"menuitem",text:d(e,t),onAction:i(t)})))})})(e)}))}(); | ui/dist/libs/tinymce/plugins/insertdatetime/plugin.min.js | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.00016788372886367142,
0.00016788372886367142,
0.00016788372886367142,
0.00016788372886367142,
0
] |
{
"id": 6,
"code_window": [
"\t\t\tform.app.LogsDao().Vacuum()\n",
"\t\t}\n",
"\n",
"\t\t// merge the application settings with the form ones\n",
"\t\treturn form.app.Settings().Merge(form.Settings)\n",
"\t}, interceptors...)\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\treturn nil\n"
],
"file_path": "forms/settings_upsert.go",
"type": "replace",
"edit_start_line_idx": 75
} | package forms_test
import (
"errors"
"testing"
"github.com/pocketbase/pocketbase/forms"
"github.com/pocketbase/pocketbase/models"
"github.com/pocketbase/pocketbase/tests"
)
func TestRecordPasswordLoginValidateAndSubmit(t *testing.T) {
testApp, _ := tests.NewTestApp()
defer testApp.Cleanup()
scenarios := []struct {
testName string
collectionName string
identity string
password string
expectError bool
}{
{
"empty data",
"users",
"",
"",
true,
},
// username
{
"existing username + wrong password",
"users",
"users75657",
"invalid",
true,
},
{
"missing username + valid password",
"users",
"clients57772", // not in the "users" collection
"1234567890",
true,
},
{
"existing username + valid password but in restricted username auth collection",
"clients",
"clients57772",
"1234567890",
true,
},
{
"existing username + valid password but in restricted username and email auth collection",
"nologin",
"test_username",
"1234567890",
true,
},
{
"existing username + valid password",
"users",
"users75657",
"1234567890",
false,
},
// email
{
"existing email + wrong password",
"users",
"[email protected]",
"invalid",
true,
},
{
"missing email + valid password",
"users",
"[email protected]",
"1234567890",
true,
},
{
"existing username + valid password but in restricted username auth collection",
"clients",
"[email protected]",
"1234567890",
false,
},
{
"existing username + valid password but in restricted username and email auth collection",
"nologin",
"[email protected]",
"1234567890",
true,
},
{
"existing email + valid password",
"users",
"[email protected]",
"1234567890",
false,
},
}
for _, s := range scenarios {
authCollection, err := testApp.Dao().FindCollectionByNameOrId(s.collectionName)
if err != nil {
t.Errorf("[%s] Failed to fetch auth collection: %v", s.testName, err)
}
form := forms.NewRecordPasswordLogin(testApp, authCollection)
form.Identity = s.identity
form.Password = s.password
record, err := form.Submit()
hasErr := err != nil
if hasErr != s.expectError {
t.Errorf("[%s] Expected hasErr to be %v, got %v (%v)", s.testName, s.expectError, hasErr, err)
continue
}
if hasErr {
continue
}
if record.Email() != s.identity && record.Username() != s.identity {
t.Errorf("[%s] Expected record with identity %q, got \n%v", s.testName, s.identity, record)
}
}
}
func TestRecordPasswordLoginInterceptors(t *testing.T) {
testApp, _ := tests.NewTestApp()
defer testApp.Cleanup()
authCollection, err := testApp.Dao().FindCollectionByNameOrId("users")
if err != nil {
t.Fatal(err)
}
form := forms.NewRecordPasswordLogin(testApp, authCollection)
form.Identity = "[email protected]"
form.Password = "123456"
var interceptorRecord *models.Record
testErr := errors.New("test_error")
interceptor1Called := false
interceptor1 := func(next forms.InterceptorNextFunc[*models.Record]) forms.InterceptorNextFunc[*models.Record] {
return func(record *models.Record) error {
interceptor1Called = true
return next(record)
}
}
interceptor2Called := false
interceptor2 := func(next forms.InterceptorNextFunc[*models.Record]) forms.InterceptorNextFunc[*models.Record] {
return func(record *models.Record) error {
interceptorRecord = record
interceptor2Called = true
return testErr
}
}
_, submitErr := form.Submit(interceptor1, interceptor2)
if submitErr != testErr {
t.Fatalf("Expected submitError %v, got %v", testErr, submitErr)
}
if !interceptor1Called {
t.Fatalf("Expected interceptor1 to be called")
}
if !interceptor2Called {
t.Fatalf("Expected interceptor2 to be called")
}
if interceptorRecord == nil || interceptorRecord.Email() != form.Identity {
t.Fatalf("Expected auth Record model with email %s, got %v", form.Identity, interceptorRecord)
}
}
| forms/record_password_login_test.go | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.004712425172328949,
0.0006469867075793445,
0.00016602262621745467,
0.0001696838007774204,
0.0013380306772887707
] |
{
"id": 0,
"code_window": [
"statement error value of \"expire_after\" must be an interval\n",
"CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = ' xx invalid interval xx')\n",
"\n"
],
"labels": [
"replace",
"replace",
"keep"
],
"after_edit": [
"statement error value of \"ttl_expire_after\" must be an interval\n",
"CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (ttl_expire_after = ' xx invalid interval xx')\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 0
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"bytes"
"context"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catformat"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
)
type shouldOmitFKClausesFromCreate int
const (
_ shouldOmitFKClausesFromCreate = iota
// OmitFKClausesFromCreate will not include any foreign key information in the
// create statement.
OmitFKClausesFromCreate
// IncludeFkClausesInCreate will include foreign key information in the create
// statement, and error if a FK cannot be resolved.
IncludeFkClausesInCreate
// OmitMissingFKClausesFromCreate will include foreign key information only if they
// can be resolved. If not, it will ignore those constraints.
// This is used in the case when showing the create statement for
// tables stored in backups. Not all relevant tables may have been
// included in the back up, so some foreign key information may be
// impossible to retrieve.
OmitMissingFKClausesFromCreate
)
// ShowCreateDisplayOptions is a container struct holding the options that
// ShowCreate uses to determine how much information should be included in the
// CREATE statement.
type ShowCreateDisplayOptions struct {
FKDisplayMode shouldOmitFKClausesFromCreate
// Comment resolution requires looking up table data from system.comments
// table. This is sometimes not possible. For example, in the context of a
// SHOW BACKUP which may resolve the create statement, there is no mechanism
// to read any table data from the backup (nor is there a guarantee that the
// system.comments table is included in the backup at all).
IgnoreComments bool
}
// ShowCreateTable returns a valid SQL representation of the CREATE
// TABLE statement used to create the given table.
//
// The names of the tables referenced by foreign keys are prefixed by their own
// database name unless it is equal to the given dbPrefix. This allows us to
// elide the prefix when the given table references other tables in the
// current database.
func ShowCreateTable(
ctx context.Context,
p PlanHookState,
tn *tree.TableName,
dbPrefix string,
desc catalog.TableDescriptor,
lCtx simpleSchemaResolver,
displayOptions ShowCreateDisplayOptions,
) (string, error) {
a := &tree.DatumAlloc{}
f := p.ExtendedEvalContext().FmtCtx(tree.FmtSimple)
f.WriteString("CREATE ")
if desc.IsTemporary() {
f.WriteString("TEMP ")
}
f.WriteString("TABLE ")
f.FormatNode(tn)
f.WriteString(" (")
// Inaccessible columns are not displayed in SHOW CREATE TABLE.
for i, col := range desc.AccessibleColumns() {
if i != 0 {
f.WriteString(",")
}
f.WriteString("\n\t")
colstr, err := schemaexpr.FormatColumnForDisplay(
ctx, desc, col, &p.RunParams(ctx).p.semaCtx, p.RunParams(ctx).p.SessionData(),
)
if err != nil {
return "", err
}
f.WriteString(colstr)
}
if desc.IsPhysicalTable() {
f.WriteString(",\n\tCONSTRAINT ")
formatQuoteNames(&f.Buffer, desc.GetPrimaryIndex().GetName())
f.WriteString(" ")
f.WriteString(tabledesc.PrimaryKeyString(desc))
}
// TODO (lucy): Possibly include FKs in the mutations list here, or else
// exclude check mutations below, for consistency.
if displayOptions.FKDisplayMode != OmitFKClausesFromCreate {
if err := desc.ForeachOutboundFK(func(fk *descpb.ForeignKeyConstraint) error {
fkCtx := tree.NewFmtCtx(tree.FmtSimple)
fkCtx.WriteString(",\n\tCONSTRAINT ")
fkCtx.FormatNameP(&fk.Name)
fkCtx.WriteString(" ")
// Passing in EmptySearchPath causes the schema name to show up in the
// constraint definition, which we need for `cockroach dump` output to be
// usable.
if err := showForeignKeyConstraint(
&fkCtx.Buffer,
dbPrefix,
desc,
fk,
lCtx,
sessiondata.EmptySearchPath,
); err != nil {
if displayOptions.FKDisplayMode == OmitMissingFKClausesFromCreate {
return nil
}
// When FKDisplayMode == IncludeFkClausesInCreate.
return err
}
f.WriteString(fkCtx.String())
return nil
}); err != nil {
return "", err
}
}
for _, idx := range desc.PublicNonPrimaryIndexes() {
// Showing the primary index is handled above.
// Build the PARTITION BY clause.
var partitionBuf bytes.Buffer
if err := ShowCreatePartitioning(
a, p.ExecCfg().Codec, desc, idx, idx.GetPartitioning(), &partitionBuf, 1 /* indent */, 0, /* colOffset */
); err != nil {
return "", err
}
f.WriteString(",\n\t")
idxStr, err := catformat.IndexForDisplay(
ctx,
desc,
&descpb.AnonymousTable,
idx,
partitionBuf.String(),
tree.FmtSimple,
p.RunParams(ctx).p.SemaCtx(),
p.RunParams(ctx).p.SessionData(),
catformat.IndexDisplayDefOnly,
)
if err != nil {
return "", err
}
f.WriteString(idxStr)
}
// Create the FAMILY and CONSTRAINTs of the CREATE statement
showFamilyClause(desc, f)
if err := showConstraintClause(ctx, desc, &p.RunParams(ctx).p.semaCtx, p.RunParams(ctx).p.SessionData(), f); err != nil {
return "", err
}
if err := ShowCreatePartitioning(
a, p.ExecCfg().Codec, desc, desc.GetPrimaryIndex(), desc.GetPrimaryIndex().GetPartitioning(), &f.Buffer, 0 /* indent */, 0, /* colOffset */
); err != nil {
return "", err
}
if ttl := desc.GetRowLevelTTL(); ttl != nil {
f.Buffer.WriteString(` WITH (expire_after = `)
f.Buffer.WriteString(ttl.DurationExpr)
f.Buffer.WriteString(`)`)
}
if err := showCreateLocality(desc, f); err != nil {
return "", err
}
if !displayOptions.IgnoreComments {
if err := showComments(tn, desc, selectComment(ctx, p, desc.GetID()), &f.Buffer); err != nil {
return "", err
}
}
return f.CloseAndGetString(), nil
}
// formatQuoteNames quotes and adds commas between names.
func formatQuoteNames(buf *bytes.Buffer, names ...string) {
f := tree.NewFmtCtx(tree.FmtSimple)
for i := range names {
if i > 0 {
f.WriteString(", ")
}
f.FormatNameP(&names[i])
}
buf.WriteString(f.CloseAndGetString())
}
// ShowCreate returns a valid SQL representation of the CREATE
// statement used to create the descriptor passed in.
//
// The names of the tables references by foreign keys are prefixed by their own
// database name unless it is equal to the given dbPrefix. This allows us to
// elide the prefix when the given table references other tables in the current
// database.
func (p *planner) ShowCreate(
ctx context.Context,
dbPrefix string,
allDescs []descpb.Descriptor,
desc catalog.TableDescriptor,
displayOptions ShowCreateDisplayOptions,
) (string, error) {
var stmt string
var err error
tn := tree.MakeUnqualifiedTableName(tree.Name(desc.GetName()))
if desc.IsView() {
stmt, err = ShowCreateView(ctx, &p.RunParams(ctx).p.semaCtx, p.RunParams(ctx).p.SessionData(), &tn, desc)
} else if desc.IsSequence() {
stmt, err = ShowCreateSequence(ctx, &tn, desc)
} else {
lCtx, lErr := newInternalLookupCtxFromDescriptorProtos(
ctx, allDescs, nil, /* want all tables */
)
if lErr != nil {
return "", lErr
}
// Overwrite desc with hydrated descriptor.
desc, err = lCtx.getTableByID(desc.GetID())
if err != nil {
return "", err
}
stmt, err = ShowCreateTable(ctx, p, &tn, dbPrefix, desc, lCtx, displayOptions)
}
return stmt, err
}
| pkg/sql/show_create.go | 1 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.9844451546669006,
0.20524032413959503,
0.00017437266069464386,
0.010218174196779728,
0.33722740411758423
] |
{
"id": 0,
"code_window": [
"statement error value of \"expire_after\" must be an interval\n",
"CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = ' xx invalid interval xx')\n",
"\n"
],
"labels": [
"replace",
"replace",
"keep"
],
"after_edit": [
"statement error value of \"ttl_expire_after\" must be an interval\n",
"CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (ttl_expire_after = ' xx invalid interval xx')\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 0
} | // Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// Package hash defines an Analyzer that detects correct use of hash.Hash.
package hash
import (
"go/ast"
"go/types"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
)
// Doc documents this pass.
const Doc = `check for correct use of hash.Hash`
// Analyzer defines this pass.
var Analyzer = &analysis.Analyzer{
Name: "hash",
Doc: Doc,
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
}
// hashChecker assures that the hash.Hash interface is not misused. A common
// mistake is to assume that the Sum function returns the hash of its input,
// like so:
//
// hashedBytes := sha256.New().Sum(inputBytes)
//
// In fact, the parameter to Sum is not the bytes to be hashed, but a slice that
// will be used as output in case the caller wants to avoid an allocation. In
// the example above, hashedBytes is not the SHA-256 hash of inputBytes, but
// the concatenation of inputBytes with the hash of the empty string.
//
// Correct uses of the hash.Hash interface are as follows:
//
// h := sha256.New()
// h.Write(inputBytes)
// hashedBytes := h.Sum(nil)
//
// h := sha256.New()
// h.Write(inputBytes)
// var hashedBytes [sha256.Size]byte
// h.Sum(hashedBytes[:0])
//
// To differentiate between correct and incorrect usages, hashChecker applies a
// simple heuristic: it flags calls to Sum where a) the parameter is non-nil and
// b) the return value is used.
//
// The hash.Hash interface may be remedied in Go 2. See golang/go#21070.
func run(pass *analysis.Pass) (interface{}, error) {
selectorIsHash := func(s *ast.SelectorExpr) bool {
tv, ok := pass.TypesInfo.Types[s.X]
if !ok {
return false
}
named, ok := tv.Type.(*types.Named)
if !ok {
return false
}
if named.Obj().Type().String() != "hash.Hash" {
return false
}
return true
}
stack := make([]ast.Node, 0, 32)
forAllFiles(pass.Files, func(n ast.Node) bool {
if n == nil {
stack = stack[:len(stack)-1] // pop
return true
}
stack = append(stack, n) // push
// Find a call to hash.Hash.Sum.
selExpr, ok := n.(*ast.SelectorExpr)
if !ok {
return true
}
if selExpr.Sel.Name != "Sum" {
return true
}
if !selectorIsHash(selExpr) {
return true
}
callExpr, ok := stack[len(stack)-2].(*ast.CallExpr)
if !ok {
return true
}
if len(callExpr.Args) != 1 {
return true
}
// We have a valid call to hash.Hash.Sum.
// Is the argument nil?
var nilArg bool
if id, ok := callExpr.Args[0].(*ast.Ident); ok && id.Name == "nil" {
nilArg = true
}
// Is the return value unused?
var retUnused bool
Switch:
switch t := stack[len(stack)-3].(type) {
case *ast.AssignStmt:
for i := range t.Rhs {
if t.Rhs[i] == stack[len(stack)-2] {
if id, ok := t.Lhs[i].(*ast.Ident); ok && id.Name == "_" {
// Assigning to the blank identifier does not count as using the
// return value.
retUnused = true
}
break Switch
}
}
panic("unreachable")
case *ast.ExprStmt:
// An expression statement means the return value is unused.
retUnused = true
default:
}
if !nilArg && !retUnused {
pass.Reportf(callExpr.Pos(), "probable misuse of hash.Hash.Sum: "+
"provide parameter or use return value, but not both")
}
return true
})
return nil, nil
}
func forAllFiles(files []*ast.File, fn func(node ast.Node) bool) {
for _, f := range files {
ast.Inspect(f, fn)
}
}
| pkg/testutils/lint/passes/hash/hash.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.0012760254321619868,
0.00027652588323689997,
0.0001718711864668876,
0.0001882323413155973,
0.0002709695545490831
] |
{
"id": 0,
"code_window": [
"statement error value of \"expire_after\" must be an interval\n",
"CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = ' xx invalid interval xx')\n",
"\n"
],
"labels": [
"replace",
"replace",
"keep"
],
"after_edit": [
"statement error value of \"ttl_expire_after\" must be an interval\n",
"CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (ttl_expire_after = ' xx invalid interval xx')\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 0
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package migrations_test
import (
"context"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/migration/migrations"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema"
"github.com/cockroachdb/cockroach/pkg/sql/privilege"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
)
func TestAlterSystemProtectedTimestampRecordsTable(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
clusterArgs := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
DisableAutomaticVersionUpgrade: 1,
BinaryVersionOverride: clusterversion.ByKey(
clusterversion.AlterSystemProtectedTimestampAddColumn - 1),
},
},
},
}
var (
ctx = context.Background()
tc = testcluster.StartTestCluster(t, 1, clusterArgs)
s = tc.Server(0)
sqlDB = tc.ServerConn(0)
)
defer tc.Stopper().Stop(ctx)
var (
validationSchemas = []migrations.Schema{
{Name: "target", ValidationFn: migrations.HasColumn},
}
)
// Inject the old copy of the descriptor.
migrations.InjectLegacyTable(ctx, t, s, systemschema.ProtectedTimestampsRecordsTable, getDeprecatedProtectedTimestampRecordsDescriptor)
// Validate that the protected timestamp records table has the old schema.
migrations.ValidateSchemaExists(
ctx,
t,
s,
sqlDB,
keys.ProtectedTimestampsRecordsTableID,
systemschema.ProtectedTimestampsRecordsTable,
[]string{},
validationSchemas,
false, /* expectExists */
)
// Run the migration.
migrations.Migrate(
t,
sqlDB,
clusterversion.AlterSystemProtectedTimestampAddColumn,
nil, /* done */
false, /* expectError */
)
// Validate that the table has new schema.
migrations.ValidateSchemaExists(
ctx,
t,
s,
sqlDB,
keys.ProtectedTimestampsRecordsTableID,
systemschema.ProtectedTimestampsRecordsTable,
[]string{},
validationSchemas,
true, /* expectExists */
)
}
// getDeprecatedProtectedTimestampRecordsDescriptor returns the
// system.pts_records table descriptor that was being used before adding a new
// column in the current version.
func getDeprecatedProtectedTimestampRecordsDescriptor() *descpb.TableDescriptor {
falseBoolString := "false"
return &descpb.TableDescriptor{
Name: "protected_ts_records",
ID: keys.ProtectedTimestampsRecordsTableID,
ParentID: keys.SystemDatabaseID,
UnexposedParentSchemaID: keys.PublicSchemaID,
Version: 1,
Columns: []descpb.ColumnDescriptor{
{Name: "id", ID: 1, Type: types.Uuid},
{Name: "ts", ID: 2, Type: types.Decimal},
{Name: "meta_type", ID: 3, Type: types.String},
{Name: "meta", ID: 4, Type: types.Bytes, Nullable: true},
{Name: "num_spans", ID: 5, Type: types.Int},
{Name: "spans", ID: 6, Type: types.Bytes},
{Name: "verified", ID: 7, Type: types.Bool, DefaultExpr: &falseBoolString},
},
NextColumnID: 8,
Families: []descpb.ColumnFamilyDescriptor{
{
Name: "primary",
ColumnNames: []string{"id", "ts", "meta_type", "meta", "num_spans", "spans", "verified"},
ColumnIDs: []descpb.ColumnID{1, 2, 3, 4, 5, 6, 7},
},
},
NextFamilyID: 1,
PrimaryIndex: descpb.IndexDescriptor{
Name: "primary",
ID: 1,
Unique: true,
KeyColumnNames: []string{"id"},
KeyColumnIDs: []descpb.ColumnID{1},
KeyColumnDirections: []descpb.IndexDescriptor_Direction{
descpb.IndexDescriptor_ASC,
},
},
NextIndexID: 2,
Privileges: descpb.NewCustomSuperuserPrivilegeDescriptor(privilege.ReadWriteData, security.NodeUserName()),
NextMutationID: 1,
FormatVersion: 3,
}
}
| pkg/migration/migrations/alter_table_protected_timestamp_records_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.005196652375161648,
0.0012283732648938894,
0.00016895905719138682,
0.00024445599410682917,
0.001525073079392314
] |
{
"id": 0,
"code_window": [
"statement error value of \"expire_after\" must be an interval\n",
"CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = ' xx invalid interval xx')\n",
"\n"
],
"labels": [
"replace",
"replace",
"keep"
],
"after_edit": [
"statement error value of \"ttl_expire_after\" must be an interval\n",
"CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (ttl_expire_after = ' xx invalid interval xx')\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 0
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "pgconnect",
srcs = ["pgconnect.go"],
importpath = "github.com/cockroachdb/cockroach/pkg/cmd/cmp-protocol/pgconnect",
visibility = ["//visibility:public"],
deps = [
"//pkg/sql/pgwire/pgwirebase",
"//pkg/util/ctxgroup",
"@com_github_cockroachdb_errors//:errors",
"@com_github_jackc_pgproto3_v2//:pgproto3",
],
)
| pkg/cmd/cmp-protocol/pgconnect/BUILD.bazel | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.0002132093213731423,
0.00019228577730245888,
0.0001713622477836907,
0.00019228577730245888,
0.000020923536794725806
] |
{
"id": 1,
"code_window": [
"\n",
"statement error value of \"expire_after\" must be at least zero\n",
"CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = '-10 minutes')\n",
"\n",
"statement ok\n",
"CREATE TABLE tbl (\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"statement error value of \"ttl_expire_after\" must be at least zero\n",
"CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (ttl_expire_after = '-10 minutes')\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 3
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package paramparse
import (
"context"
"github.com/cockroachdb/cockroach/pkg/geo/geoindex"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/duration"
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented"
"github.com/cockroachdb/errors"
)
// SetStorageParameters sets the given storage parameters using the
// given observer.
func SetStorageParameters(
ctx context.Context,
semaCtx *tree.SemaContext,
evalCtx *tree.EvalContext,
params tree.StorageParams,
paramObserver StorageParamObserver,
) error {
for _, sp := range params {
key := string(sp.Key)
if sp.Value == nil {
return pgerror.Newf(pgcode.InvalidParameterValue, "storage parameter %q requires a value", key)
}
// Expressions may be an unresolved name.
// Cast these as strings.
expr := UnresolvedNameToStrVal(sp.Value)
// Convert the expressions to a datum.
typedExpr, err := tree.TypeCheck(ctx, expr, semaCtx, types.Any)
if err != nil {
return err
}
if typedExpr, err = evalCtx.NormalizeExpr(typedExpr); err != nil {
return err
}
datum, err := typedExpr.Eval(evalCtx)
if err != nil {
return err
}
if err := paramObserver.onSet(ctx, semaCtx, evalCtx, key, datum); err != nil {
return err
}
}
return paramObserver.runPostChecks()
}
// ResetStorageParameters sets the given storage parameters using the
// given observer.
func ResetStorageParameters(
ctx context.Context,
evalCtx *tree.EvalContext,
params tree.NameList,
paramObserver StorageParamObserver,
) error {
for _, p := range params {
if err := paramObserver.onReset(evalCtx, string(p)); err != nil {
return err
}
}
return paramObserver.runPostChecks()
}
// StorageParamObserver applies a storage parameter to an underlying item.
type StorageParamObserver interface {
// onSet is called during CREATE [TABLE | INDEX] ... WITH (...) or
// ALTER [TABLE | INDEX] ... WITH (...).
onSet(ctx context.Context, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error
// onReset is called during ALTER [TABLE | INDEX] ... RESET (...)
onReset(evalCtx *tree.EvalContext, key string) error
// runPostChecks is called after all storage parameters have been set.
// This allows checking whether multiple storage parameters together
// form a valid configuration.
runPostChecks() error
}
// TableStorageParamObserver observes storage parameters for tables.
type TableStorageParamObserver struct {
tableDesc *tabledesc.Mutable
}
// NewTableStorageParamObserver returns a new TableStorageParamObserver.
func NewTableStorageParamObserver(tableDesc *tabledesc.Mutable) *TableStorageParamObserver {
return &TableStorageParamObserver{tableDesc: tableDesc}
}
var _ StorageParamObserver = (*TableStorageParamObserver)(nil)
// runPostChecks implements the StorageParamObserver interface.
func (po *TableStorageParamObserver) runPostChecks() error {
if err := tabledesc.ValidateRowLevelTTL(po.tableDesc.GetRowLevelTTL()); err != nil {
return err
}
return nil
}
type tableParam struct {
onSet func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error
onReset func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error
}
var tableParams = map[string]tableParam{
`fillfactor`: {
onSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
return setFillFactorStorageParam(evalCtx, key, datum)
},
onReset: func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error {
// Operation is a no-op so do nothing.
return nil
},
},
`autovacuum_enabled`: {
onSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
var boolVal bool
if stringVal, err := DatumAsString(evalCtx, key, datum); err == nil {
boolVal, err = ParseBoolVar(key, stringVal)
if err != nil {
return err
}
} else {
s, err := GetSingleBool(key, datum)
if err != nil {
return err
}
boolVal = bool(*s)
}
if !boolVal && evalCtx != nil {
evalCtx.ClientNoticeSender.BufferClientNotice(
evalCtx.Context,
pgnotice.Newf(`storage parameter "%s = %s" is ignored`, key, datum.String()),
)
}
return nil
},
onReset: func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error {
// Operation is a no-op so do nothing.
return nil
},
},
`expire_after`: {
onSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
var d *tree.DInterval
if stringVal, err := DatumAsString(evalCtx, key, datum); err == nil {
d, err = tree.ParseDInterval(evalCtx.SessionData().GetIntervalStyle(), stringVal)
if err != nil || d == nil {
return pgerror.Newf(
pgcode.InvalidParameterValue,
`value of "expire_after" must be an interval`,
)
}
} else {
var ok bool
d, ok = datum.(*tree.DInterval)
if !ok || d == nil {
return pgerror.Newf(
pgcode.InvalidParameterValue,
`value of "expire_after" must be an interval`,
)
}
}
if d.Duration.Compare(duration.MakeDuration(0, 0, 0)) < 0 {
return pgerror.Newf(
pgcode.InvalidParameterValue,
`value of "expire_after" must be at least zero`,
)
}
if po.tableDesc.RowLevelTTL == nil {
po.tableDesc.RowLevelTTL = &descpb.TableDescriptor_RowLevelTTL{}
}
po.tableDesc.RowLevelTTL.DurationExpr = tree.Serialize(d)
return nil
},
onReset: func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error {
po.tableDesc.RowLevelTTL = nil
return nil
},
},
}
func init() {
for _, param := range []string{
`toast_tuple_target`,
`parallel_workers`,
`toast.autovacuum_enabled`,
`autovacuum_vacuum_threshold`,
`toast.autovacuum_vacuum_threshold`,
`autovacuum_vacuum_scale_factor`,
`toast.autovacuum_vacuum_scale_factor`,
`autovacuum_analyze_threshold`,
`autovacuum_analyze_scale_factor`,
`autovacuum_vacuum_cost_delay`,
`toast.autovacuum_vacuum_cost_delay`,
`autovacuum_vacuum_cost_limit`,
`autovacuum_freeze_min_age`,
`toast.autovacuum_freeze_min_age`,
`autovacuum_freeze_max_age`,
`toast.autovacuum_freeze_max_age`,
`autovacuum_freeze_table_age`,
`toast.autovacuum_freeze_table_age`,
`autovacuum_multixact_freeze_min_age`,
`toast.autovacuum_multixact_freeze_min_age`,
`autovacuum_multixact_freeze_max_age`,
`toast.autovacuum_multixact_freeze_max_age`,
`autovacuum_multixact_freeze_table_age`,
`toast.autovacuum_multixact_freeze_table_age`,
`log_autovacuum_min_duration`,
`toast.log_autovacuum_min_duration`,
`user_catalog_table`,
} {
tableParams[param] = tableParam{
onSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
return unimplemented.NewWithIssuef(43299, "storage parameter %q", key)
},
onReset: func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error {
return nil
},
}
}
}
// onSet implements the StorageParamObserver interface.
func (po *TableStorageParamObserver) onSet(
ctx context.Context,
semaCtx *tree.SemaContext,
evalCtx *tree.EvalContext,
key string,
datum tree.Datum,
) error {
if p, ok := tableParams[key]; ok {
return p.onSet(ctx, po, semaCtx, evalCtx, key, datum)
}
return pgerror.Newf(pgcode.InvalidParameterValue, "invalid storage parameter %q", key)
}
// onReset implements the StorageParamObserver interface.
func (po *TableStorageParamObserver) onReset(evalCtx *tree.EvalContext, key string) error {
if p, ok := tableParams[key]; ok {
return p.onReset(po, evalCtx, key)
}
return pgerror.Newf(pgcode.InvalidParameterValue, "invalid storage parameter %q", key)
}
func setFillFactorStorageParam(evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
val, err := DatumAsFloat(evalCtx, key, datum)
if err != nil {
return err
}
if val < 0 || val > 100 {
return pgerror.Newf(pgcode.InvalidParameterValue, "%q must be between 0 and 100", key)
}
if evalCtx != nil {
evalCtx.ClientNoticeSender.BufferClientNotice(
evalCtx.Context,
pgnotice.Newf("storage parameter %q is ignored", key),
)
}
return nil
}
// IndexStorageParamObserver observes storage parameters for indexes.
type IndexStorageParamObserver struct {
IndexDesc *descpb.IndexDescriptor
}
var _ StorageParamObserver = (*IndexStorageParamObserver)(nil)
func getS2ConfigFromIndex(indexDesc *descpb.IndexDescriptor) *geoindex.S2Config {
var s2Config *geoindex.S2Config
if indexDesc.GeoConfig.S2Geometry != nil {
s2Config = indexDesc.GeoConfig.S2Geometry.S2Config
}
if indexDesc.GeoConfig.S2Geography != nil {
s2Config = indexDesc.GeoConfig.S2Geography.S2Config
}
return s2Config
}
func (po *IndexStorageParamObserver) applyS2ConfigSetting(
evalCtx *tree.EvalContext, key string, expr tree.Datum, min int64, max int64,
) error {
s2Config := getS2ConfigFromIndex(po.IndexDesc)
if s2Config == nil {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"index setting %q can only be set on GEOMETRY or GEOGRAPHY spatial indexes",
key,
)
}
val, err := DatumAsInt(evalCtx, key, expr)
if err != nil {
return errors.Wrapf(err, "error decoding %q", key)
}
if val < min || val > max {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"%q value must be between %d and %d inclusive",
key,
min,
max,
)
}
switch key {
case `s2_max_level`:
s2Config.MaxLevel = int32(val)
case `s2_level_mod`:
s2Config.LevelMod = int32(val)
case `s2_max_cells`:
s2Config.MaxCells = int32(val)
}
return nil
}
func (po *IndexStorageParamObserver) applyGeometryIndexSetting(
evalCtx *tree.EvalContext, key string, expr tree.Datum,
) error {
if po.IndexDesc.GeoConfig.S2Geometry == nil {
return pgerror.Newf(pgcode.InvalidParameterValue, "%q can only be applied to GEOMETRY spatial indexes", key)
}
val, err := DatumAsFloat(evalCtx, key, expr)
if err != nil {
return errors.Wrapf(err, "error decoding %q", key)
}
switch key {
case `geometry_min_x`:
po.IndexDesc.GeoConfig.S2Geometry.MinX = val
case `geometry_max_x`:
po.IndexDesc.GeoConfig.S2Geometry.MaxX = val
case `geometry_min_y`:
po.IndexDesc.GeoConfig.S2Geometry.MinY = val
case `geometry_max_y`:
po.IndexDesc.GeoConfig.S2Geometry.MaxY = val
default:
return pgerror.Newf(pgcode.InvalidParameterValue, "unknown key: %q", key)
}
return nil
}
// onSet implements the StorageParamObserver interface.
func (po *IndexStorageParamObserver) onSet(
ctx context.Context,
semaCtx *tree.SemaContext,
evalCtx *tree.EvalContext,
key string,
expr tree.Datum,
) error {
switch key {
case `fillfactor`:
return setFillFactorStorageParam(evalCtx, key, expr)
case `s2_max_level`:
return po.applyS2ConfigSetting(evalCtx, key, expr, 0, 30)
case `s2_level_mod`:
return po.applyS2ConfigSetting(evalCtx, key, expr, 1, 3)
case `s2_max_cells`:
return po.applyS2ConfigSetting(evalCtx, key, expr, 1, 32)
case `geometry_min_x`, `geometry_max_x`, `geometry_min_y`, `geometry_max_y`:
return po.applyGeometryIndexSetting(evalCtx, key, expr)
case `vacuum_cleanup_index_scale_factor`,
`buffering`,
`fastupdate`,
`gin_pending_list_limit`,
`pages_per_range`,
`autosummarize`:
return unimplemented.NewWithIssuef(43299, "storage parameter %q", key)
}
return pgerror.Newf(pgcode.InvalidParameterValue, "invalid storage parameter %q", key)
}
// onReset implements the StorageParameterObserver interface.
func (po *IndexStorageParamObserver) onReset(evalCtx *tree.EvalContext, key string) error {
return errors.AssertionFailedf("non-implemented codepath")
}
// runPostChecks implements the StorageParamObserver interface.
func (po *IndexStorageParamObserver) runPostChecks() error {
s2Config := getS2ConfigFromIndex(po.IndexDesc)
if s2Config != nil {
if (s2Config.MaxLevel)%s2Config.LevelMod != 0 {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"s2_max_level (%d) must be divisible by s2_level_mod (%d)",
s2Config.MaxLevel,
s2Config.LevelMod,
)
}
}
if cfg := po.IndexDesc.GeoConfig.S2Geometry; cfg != nil {
if cfg.MaxX <= cfg.MinX {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"geometry_max_x (%f) must be greater than geometry_min_x (%f)",
cfg.MaxX,
cfg.MinX,
)
}
if cfg.MaxY <= cfg.MinY {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"geometry_max_y (%f) must be greater than geometry_min_y (%f)",
cfg.MaxY,
cfg.MinY,
)
}
}
return nil
}
| pkg/sql/paramparse/paramobserver.go | 1 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.9577020406723022,
0.038499291986227036,
0.0001686143223196268,
0.00022304430603981018,
0.1659986823797226
] |
{
"id": 1,
"code_window": [
"\n",
"statement error value of \"expire_after\" must be at least zero\n",
"CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = '-10 minutes')\n",
"\n",
"statement ok\n",
"CREATE TABLE tbl (\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"statement error value of \"ttl_expire_after\" must be at least zero\n",
"CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (ttl_expire_after = '-10 minutes')\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 3
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
)
type invertedJoinNode struct {
input planNode
table *scanNode
// joinType is one of INNER, LEFT_OUTER, LEFT_SEMI, LEFT_ANTI.
joinType descpb.JoinType
// prefixEqCols identifies the columns from the input which are used for the
// lookup if the index is a multi-column inverted index. These correspond to
// the non-inverted prefix columns of the index we are looking up. This is
// empty if the index is not a multi-column inverted index.
prefixEqCols []int
// The inverted expression to evaluate.
invertedExpr tree.TypedExpr
// columns are the produced columns, namely the input columns and (unless the
// join type is semi or anti join) the columns in the table scanNode. It can
// include an additional continuation column for paired joins.
columns colinfo.ResultColumns
// onExpr is any ON condition to be used in conjunction with the inverted
// expression.
onExpr tree.TypedExpr
isFirstJoinInPairedJoiner bool
reqOrdering ReqOrdering
}
func (ij *invertedJoinNode) startExec(params runParams) error {
panic("invertedJoinNode cannot be run in local mode")
}
func (ij *invertedJoinNode) Next(params runParams) (bool, error) {
panic("invertedJoinNode cannot be run in local mode")
}
func (ij *invertedJoinNode) Values() tree.Datums {
panic("invertedJoinNode cannot be run in local mode")
}
func (ij *invertedJoinNode) Close(ctx context.Context) {
ij.input.Close(ctx)
ij.table.Close(ctx)
}
| pkg/sql/inverted_join.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.0023601625580340624,
0.0008195392438210547,
0.00016924718511290848,
0.00017869181465357542,
0.0009634087327867746
] |
{
"id": 1,
"code_window": [
"\n",
"statement error value of \"expire_after\" must be at least zero\n",
"CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = '-10 minutes')\n",
"\n",
"statement ok\n",
"CREATE TABLE tbl (\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"statement error value of \"ttl_expire_after\" must be at least zero\n",
"CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (ttl_expire_after = '-10 minutes')\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 3
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
//go:build race
// +build race
package util
import "runtime"
// RaceEnabled is true if CockroachDB was built with the race build tag.
const RaceEnabled = true
// racePreemptionPoints is set in EnableRacePreemptionPoints.
var racePreemptionPoints = false
// EnableRacePreemptionPoints enables goroutine preemption points declared with
// RacePreempt for builds using the race build tag.
func EnableRacePreemptionPoints() func() {
racePreemptionPoints = true
return func() {
racePreemptionPoints = false
}
}
// RacePreempt adds a goroutine preemption point if CockroachDB was built with
// the race build tag and preemption points have been enabled. The function is a
// no-op (and should be optimized out through dead code elimination) if the race
// build tag was not used.
func RacePreempt() {
if racePreemptionPoints {
runtime.Gosched()
}
}
| pkg/util/race_on.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.0005901527474634349,
0.0002828975557349622,
0.0001753022224875167,
0.0002051510091405362,
0.00015721371164545417
] |
{
"id": 1,
"code_window": [
"\n",
"statement error value of \"expire_after\" must be at least zero\n",
"CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = '-10 minutes')\n",
"\n",
"statement ok\n",
"CREATE TABLE tbl (\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"statement error value of \"ttl_expire_after\" must be at least zero\n",
"CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (ttl_expire_after = '-10 minutes')\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 3
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import React from "react";
import _ from "lodash";
import { LineGraph } from "src/views/cluster/components/linegraph";
import {
Metric,
Axis,
AxisUnits,
} from "src/views/shared/components/metricQuery";
import { GraphDashboardProps, nodeDisplayName } from "./dashboardUtils";
export default function(props: GraphDashboardProps) {
const { nodeIDs, nodesSummary, nodeSources } = props;
return [
<LineGraph title="Batches" sources={nodeSources}>
<Axis label="batches">
<Metric
name="cr.node.distsender.batches"
title="Batches"
nonNegativeRate
/>
<Metric
name="cr.node.distsender.batches.partial"
title="Partial Batches"
nonNegativeRate
/>
</Axis>
</LineGraph>,
<LineGraph title="RPCs" sources={nodeSources}>
<Axis label="rpcs">
<Metric
name="cr.node.distsender.rpc.sent"
title="RPCs Sent"
nonNegativeRate
/>
<Metric
name="cr.node.distsender.rpc.sent.local"
title="Local Fast-path"
nonNegativeRate
/>
</Axis>
</LineGraph>,
<LineGraph title="RPC Errors" sources={nodeSources}>
<Axis label="errors">
<Metric
name="cr.node.distsender.rpc.sent.sendnexttimeout"
title="RPC Timeouts"
nonNegativeRate
/>
<Metric
name="cr.node.distsender.rpc.sent.nextreplicaerror"
title="Replica Errors"
nonNegativeRate
/>
<Metric
name="cr.node.distsender.errors.notleaseholder"
title="Not Leaseholder Errors"
nonNegativeRate
/>
</Axis>
</LineGraph>,
<LineGraph title="KV Transactions" sources={nodeSources}>
<Axis label="transactions">
<Metric name="cr.node.txn.commits" title="Committed" nonNegativeRate />
<Metric
name="cr.node.txn.commits1PC"
title="Fast-path Committed"
nonNegativeRate
/>
<Metric name="cr.node.txn.aborts" title="Aborted" nonNegativeRate />
</Axis>
</LineGraph>,
<LineGraph
title="KV Transaction Durations: 99th percentile"
tooltip={`The 99th percentile of transaction durations over a 1 minute period.
Values are displayed individually for each node.`}
>
<Axis units={AxisUnits.Duration} label="transaction duration">
{_.map(nodeIDs, node => (
<Metric
key={node}
name="cr.node.txn.durations-p99"
title={nodeDisplayName(nodesSummary, node)}
sources={[node]}
downsampleMax
/>
))}
</Axis>
</LineGraph>,
<LineGraph
title="KV Transaction Durations: 90th percentile"
tooltip={`The 90th percentile of transaction durations over a 1 minute period.
Values are displayed individually for each node.`}
>
<Axis units={AxisUnits.Duration} label="transaction duration">
{_.map(nodeIDs, node => (
<Metric
key={node}
name="cr.node.txn.durations-p90"
title={nodeDisplayName(nodesSummary, node)}
sources={[node]}
downsampleMax
/>
))}
</Axis>
</LineGraph>,
<LineGraph
title="Node Heartbeat Latency: 99th percentile"
tooltip={`The 99th percentile of latency to heartbeat a node's internal liveness record over a 1 minute period.
Values are displayed individually for each node.`}
>
<Axis units={AxisUnits.Duration} label="heartbeat latency">
{_.map(nodeIDs, node => (
<Metric
key={node}
name="cr.node.liveness.heartbeatlatency-p99"
title={nodeDisplayName(nodesSummary, node)}
sources={[node]}
downsampleMax
/>
))}
</Axis>
</LineGraph>,
<LineGraph
title="Node Heartbeat Latency: 90th percentile"
tooltip={`The 90th percentile of latency to heartbeat a node's internal liveness record over a 1 minute period.
Values are displayed individually for each node.`}
>
<Axis units={AxisUnits.Duration} label="heartbeat latency">
{_.map(nodeIDs, node => (
<Metric
key={node}
name="cr.node.liveness.heartbeatlatency-p90"
title={nodeDisplayName(nodesSummary, node)}
sources={[node]}
downsampleMax
/>
))}
</Axis>
</LineGraph>,
];
}
| pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards/distributed.tsx | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.00020402137306518853,
0.0001767932262737304,
0.0001682252186583355,
0.0001731220108922571,
0.000008811091902316548
] |
{
"id": 2,
"code_window": [
"CREATE TABLE tbl (\n",
" id INT PRIMARY KEY,\n",
" text TEXT,\n",
" FAMILY (id, text)\n",
") WITH (expire_after = '10 minutes')\n",
"\n",
"query TT\n",
"SHOW CREATE TABLE tbl\n",
"----\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
") WITH (ttl_expire_after = '10 minutes')\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 11
} | statement error value of "expire_after" must be an interval
CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = ' xx invalid interval xx')
statement error value of "expire_after" must be at least zero
CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = '-10 minutes')
statement ok
CREATE TABLE tbl (
id INT PRIMARY KEY,
text TEXT,
FAMILY (id, text)
) WITH (expire_after = '10 minutes')
query TT
SHOW CREATE TABLE tbl
----
tbl CREATE TABLE public.tbl (
id INT8 NOT NULL,
text STRING NULL,
CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),
FAMILY fam_0_id_text (id, text)
) WITH (expire_after = '00:10:00':::INTERVAL)
query T
SELECT reloptions FROM pg_class WHERE relname = 'tbl'
----
{expire_after='00:10:00':::INTERVAL}
statement ok
DROP TABLE tbl;
CREATE TABLE tbl (
id INT PRIMARY KEY,
text TEXT,
FAMILY (id, text)
) WITH (expire_after = '10 minutes'::interval)
query TT
SHOW CREATE TABLE tbl
----
tbl CREATE TABLE public.tbl (
id INT8 NOT NULL,
text STRING NULL,
CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),
FAMILY fam_0_id_text (id, text)
) WITH (expire_after = '00:10:00':::INTERVAL)
| pkg/sql/logictest/testdata/logic_test/row_level_ttl | 1 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.9949475526809692,
0.7955774068832397,
0.1454780101776123,
0.9636965990066528,
0.32721787691116333
] |
{
"id": 2,
"code_window": [
"CREATE TABLE tbl (\n",
" id INT PRIMARY KEY,\n",
" text TEXT,\n",
" FAMILY (id, text)\n",
") WITH (expire_after = '10 minutes')\n",
"\n",
"query TT\n",
"SHOW CREATE TABLE tbl\n",
"----\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
") WITH (ttl_expire_after = '10 minutes')\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 11
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tests
import (
"context"
"net/http"
"time"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/option"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test"
"github.com/cockroachdb/cockroach/pkg/ts/tspb"
"github.com/cockroachdb/cockroach/pkg/util/httputil"
)
// tsQueryType represents the type of the time series query to retrieve. In
// most cases, tests are verifying either the "total" or "rate" metrics, so
// this enum type simplifies the API of tspb.Query.
type tsQueryType int
const (
// total indicates to query the total of the metric. Specifically,
// downsampler will be average, aggregator will be sum, and derivative will
// be none.
total tsQueryType = iota
// rate indicates to query the rate of change of the metric. Specifically,
// downsampler will be average, aggregator will be sum, and derivative will
// be non-negative derivative.
rate
)
type tsQuery struct {
name string
queryType tsQueryType
}
func mustGetMetrics(
t test.Test, adminURL string, start, end time.Time, tsQueries []tsQuery,
) tspb.TimeSeriesQueryResponse {
response, err := getMetrics(adminURL, start, end, tsQueries)
if err != nil {
t.Fatal(err)
}
return response
}
func getMetrics(
adminURL string, start, end time.Time, tsQueries []tsQuery,
) (tspb.TimeSeriesQueryResponse, error) {
url := "http://" + adminURL + "/ts/query"
queries := make([]tspb.Query, len(tsQueries))
for i := 0; i < len(tsQueries); i++ {
switch tsQueries[i].queryType {
case total:
queries[i] = tspb.Query{
Name: tsQueries[i].name,
Downsampler: tspb.TimeSeriesQueryAggregator_AVG.Enum(),
SourceAggregator: tspb.TimeSeriesQueryAggregator_SUM.Enum(),
}
case rate:
queries[i] = tspb.Query{
Name: tsQueries[i].name,
Downsampler: tspb.TimeSeriesQueryAggregator_AVG.Enum(),
SourceAggregator: tspb.TimeSeriesQueryAggregator_SUM.Enum(),
Derivative: tspb.TimeSeriesQueryDerivative_NON_NEGATIVE_DERIVATIVE.Enum(),
}
default:
panic("unexpected")
}
}
request := tspb.TimeSeriesQueryRequest{
StartNanos: start.UnixNano(),
EndNanos: end.UnixNano(),
// Ask for one minute intervals. We can't just ask for the whole hour
// because the time series query system does not support downsampling
// offsets.
SampleNanos: (1 * time.Minute).Nanoseconds(),
Queries: queries,
}
var response tspb.TimeSeriesQueryResponse
err := httputil.PostJSON(http.Client{Timeout: 500 * time.Millisecond}, url, &request, &response)
return response, err
}
func verifyTxnPerSecond(
ctx context.Context,
c cluster.Cluster,
t test.Test,
adminNode option.NodeListOption,
start, end time.Time,
txnTarget, maxPercentTimeUnderTarget float64,
) {
// Query needed information over the timespan of the query.
adminUIAddrs, err := c.ExternalAdminUIAddr(ctx, t.L(), adminNode)
if err != nil {
t.Fatal(err)
}
adminURL := adminUIAddrs[0]
response := mustGetMetrics(t, adminURL, start, end, []tsQuery{
{name: "cr.node.txn.commits", queryType: rate},
{name: "cr.node.txn.commits", queryType: total},
})
// Drop the first two minutes of datapoints as a "ramp-up" period.
perMinute := response.Results[0].Datapoints[2:]
cumulative := response.Results[1].Datapoints[2:]
// Check average txns per second over the entire test was above the target.
totalTxns := cumulative[len(cumulative)-1].Value - cumulative[0].Value
avgTxnPerSec := totalTxns / float64(end.Sub(start)/time.Second)
if avgTxnPerSec < txnTarget {
t.Fatalf("average txns per second %f was under target %f", avgTxnPerSec, txnTarget)
} else {
t.L().Printf("average txns per second: %f", avgTxnPerSec)
}
// Verify that less than the specified limit of each individual one minute
// period was underneath the target.
minutesBelowTarget := 0.0
for _, dp := range perMinute {
if dp.Value < txnTarget {
minutesBelowTarget++
}
}
if perc := minutesBelowTarget / float64(len(perMinute)); perc > maxPercentTimeUnderTarget {
t.Fatalf(
"spent %f%% of time below target of %f txn/s, wanted no more than %f%%",
perc*100, txnTarget, maxPercentTimeUnderTarget*100,
)
} else {
t.L().Printf("spent %f%% of time below target of %f txn/s", perc*100, txnTarget)
}
}
func verifyLookupsPerSec(
ctx context.Context,
c cluster.Cluster,
t test.Test,
adminNode option.NodeListOption,
start, end time.Time,
rangeLookupsTarget float64,
) {
// Query needed information over the timespan of the query.
adminUIAddrs, err := c.ExternalAdminUIAddr(ctx, t.L(), adminNode)
if err != nil {
t.Fatal(err)
}
adminURL := adminUIAddrs[0]
response := mustGetMetrics(t, adminURL, start, end, []tsQuery{
{name: "cr.node.distsender.rangelookups", queryType: rate},
})
// Drop the first two minutes of datapoints as a "ramp-up" period.
perMinute := response.Results[0].Datapoints[2:]
// Verify that each individual one minute periods were below the target.
for _, dp := range perMinute {
if dp.Value > rangeLookupsTarget {
t.Fatalf("Found minute interval with %f lookup/sec above target of %f lookup/sec\n", dp.Value, rangeLookupsTarget)
} else {
t.L().Printf("Found minute interval with %f lookup/sec\n", dp.Value)
}
}
}
| pkg/cmd/roachtest/tests/ts_util.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.00019435207650531083,
0.00017576126265339553,
0.00016681732085999101,
0.00017074232164304703,
0.000009412761755811516
] |
{
"id": 2,
"code_window": [
"CREATE TABLE tbl (\n",
" id INT PRIMARY KEY,\n",
" text TEXT,\n",
" FAMILY (id, text)\n",
") WITH (expire_after = '10 minutes')\n",
"\n",
"query TT\n",
"SHOW CREATE TABLE tbl\n",
"----\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
") WITH (ttl_expire_after = '10 minutes')\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 11
} | ["\u200B"] | pkg/util/json/testdata/raw/string_unicode_U+200B_ZERO_WIDTH_SPACE.json | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.0001720461150398478,
0.0001720461150398478,
0.0001720461150398478,
0.0001720461150398478,
0
] |
{
"id": 2,
"code_window": [
"CREATE TABLE tbl (\n",
" id INT PRIMARY KEY,\n",
" text TEXT,\n",
" FAMILY (id, text)\n",
") WITH (expire_after = '10 minutes')\n",
"\n",
"query TT\n",
"SHOW CREATE TABLE tbl\n",
"----\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
") WITH (ttl_expire_after = '10 minutes')\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 11
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package issues
import (
"fmt"
"html"
"strings"
"unicode/utf8"
)
// An IssueFormatter turns TemplateData for a test failure into markdown
// that can form a GitHub issue comment.
type IssueFormatter struct {
Title func(TemplateData) string
Body func(*Renderer, TemplateData) error
}
// A Renderer facilitates creating a reduced and opinionated subset of markdown.
type Renderer struct {
buf strings.Builder
}
func (r *Renderer) printf(format string, args ...interface{}) {
fmt.Fprintf(&r.buf, format, args...)
}
func (r *Renderer) esc(in string, chars string, with rune) string {
for {
r, n := utf8.DecodeRuneInString(chars)
if r == utf8.RuneError {
return in
}
chars = chars[n:]
s := string(r)
in = strings.Replace(in, s, string(with)+s, -1)
}
}
func (r *Renderer) nl() {
if n := r.buf.Len(); n > 0 && r.buf.String()[n-1] == '\n' {
return
}
r.buf.WriteByte('\n')
}
// A renders a hyperlink.
func (r *Renderer) A(title, href string) {
r.printf("[")
r.Escaped(r.esc(title, "[]()", '\\'))
r.printf("]")
r.printf("(")
r.printf("%s", r.esc(href, "[]()", '\\'))
r.printf(")")
}
// P renders the inner function as a paragraph.
func (r *Renderer) P(inner func()) {
r.HTML("p", inner)
}
// Escaped renders text, which it HTML escapes.
func (r *Renderer) Escaped(txt string) {
r.printf("%s", html.EscapeString(txt))
}
// CodeBlock renders a code block.
func (r *Renderer) CodeBlock(typ string, txt string) {
r.nl()
// NB: the leading newline may be spurious, but quotes
// always need to be preceded by a blank line, or at
// least GitHub doesn't interpret the ``` right. The
// below will misbehave, we need a blank line after `<p>`.
//
// <details><summary>foo</summary>
// <p>
// ```
// bar
// ```
// </p>
// </details>
r.printf("\n```%s\n", r.esc(typ, "`", '`'))
r.printf("%s", r.esc(txt, "`", '`'))
r.nl()
r.printf("%s", "```")
r.nl()
}
// HTML renders inner as enclosed by the supplied HTML tag.
func (r *Renderer) HTML(tag string, inner func()) {
r.printf("<%s>", tag)
inner()
r.printf("</%s>", tag)
r.nl()
}
// Collapsed renders an expandable section via the details HTML tag.
func (r *Renderer) Collapsed(title string, inner func()) {
r.HTML("details", func() {
r.HTML("summary", func() {
r.Escaped(title)
})
r.nl()
r.P(func() {
r.nl()
inner()
})
})
r.nl()
}
// String prints the buffer.
func (r *Renderer) String() string {
return r.buf.String()
}
| pkg/cmd/internal/issues/render.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.00019048051035497338,
0.0001735728874336928,
0.00016509220586158335,
0.00017129088519141078,
0.000007045400252536638
] |
{
"id": 3,
"code_window": [
"\n",
"query TT\n",
"SHOW CREATE TABLE tbl\n",
"----\n",
"tbl CREATE TABLE public.tbl (\n",
" id INT8 NOT NULL,\n",
" text STRING NULL,\n",
" CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),\n",
" FAMILY fam_0_id_text (id, text)\n",
") WITH (expire_after = '00:10:00':::INTERVAL)\n",
"\n",
"query T\n",
"SELECT reloptions FROM pg_class WHERE relname = 'tbl'\n",
"----\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"tbl CREATE TABLE public.tbl (\n",
" id INT8 NOT NULL,\n",
" text STRING NULL,\n",
" CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),\n",
" FAMILY fam_0_id_text (id, text)\n",
") WITH (ttl_expire_after = '00:10:00':::INTERVAL)\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 16
} | statement error value of "expire_after" must be an interval
CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = ' xx invalid interval xx')
statement error value of "expire_after" must be at least zero
CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = '-10 minutes')
statement ok
CREATE TABLE tbl (
id INT PRIMARY KEY,
text TEXT,
FAMILY (id, text)
) WITH (expire_after = '10 minutes')
query TT
SHOW CREATE TABLE tbl
----
tbl CREATE TABLE public.tbl (
id INT8 NOT NULL,
text STRING NULL,
CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),
FAMILY fam_0_id_text (id, text)
) WITH (expire_after = '00:10:00':::INTERVAL)
query T
SELECT reloptions FROM pg_class WHERE relname = 'tbl'
----
{expire_after='00:10:00':::INTERVAL}
statement ok
DROP TABLE tbl;
CREATE TABLE tbl (
id INT PRIMARY KEY,
text TEXT,
FAMILY (id, text)
) WITH (expire_after = '10 minutes'::interval)
query TT
SHOW CREATE TABLE tbl
----
tbl CREATE TABLE public.tbl (
id INT8 NOT NULL,
text STRING NULL,
CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),
FAMILY fam_0_id_text (id, text)
) WITH (expire_after = '00:10:00':::INTERVAL)
| pkg/sql/logictest/testdata/logic_test/row_level_ttl | 1 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.9946416616439819,
0.617185115814209,
0.007275233510881662,
0.9898108243942261,
0.4604308009147644
] |
{
"id": 3,
"code_window": [
"\n",
"query TT\n",
"SHOW CREATE TABLE tbl\n",
"----\n",
"tbl CREATE TABLE public.tbl (\n",
" id INT8 NOT NULL,\n",
" text STRING NULL,\n",
" CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),\n",
" FAMILY fam_0_id_text (id, text)\n",
") WITH (expire_after = '00:10:00':::INTERVAL)\n",
"\n",
"query T\n",
"SELECT reloptions FROM pg_class WHERE relname = 'tbl'\n",
"----\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"tbl CREATE TABLE public.tbl (\n",
" id INT8 NOT NULL,\n",
" text STRING NULL,\n",
" CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),\n",
" FAMILY fam_0_id_text (id, text)\n",
") WITH (ttl_expire_after = '00:10:00':::INTERVAL)\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 16
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
@require nib
@require "~styl/base/palette.styl"
@require "~src/components/core/index.styl"
.metric-table-dropdown
line-height 17px
padding 6px 12px
vertical-align middle
border-radius 2px
padding-right 10px
color $body-color
&:hover
background-color $dropdown-hover-color
&__title
vertical-align middle
display inline-block
&__select
display inline-block
vertical-align middle
white-space nowrap
&:hover
background-color $dropdown-hover-color
.Select-menu-outer
margin-top $spacing-base
.Select-control
width 100%!important
.Select-placeholder
padding-left 10px!important
button.edit-button
padding 12px 24px
margin 0px 9px
background-color inherit
text-transform uppercase
color $link-color
font-size 14px
letter-spacing 2px
border 1px solid $button-border-color
border-radius 3px
vertical-align middle
cursor pointer
.metric-edit-button
&--add
margin 17px 9px
.metric-table
&__header
background white
padding 10px 20px
font-family $font-family--base
font-size 12px
font-weight bold
letter-spacing 2px
text-transform uppercase
text-align left
color $body-color
&__cell
text-align center
.custom-metric__chart-controls-container
display flex
flex-direction row
margin-bottom $spacing-medium
| pkg/ui/workspaces/db-console/src/views/reports/containers/customChart/customChart.styl | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.00023558914836030453,
0.00018863222794607282,
0.00017055714852176607,
0.00018510391237214208,
0.000019256718587712385
] |
{
"id": 3,
"code_window": [
"\n",
"query TT\n",
"SHOW CREATE TABLE tbl\n",
"----\n",
"tbl CREATE TABLE public.tbl (\n",
" id INT8 NOT NULL,\n",
" text STRING NULL,\n",
" CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),\n",
" FAMILY fam_0_id_text (id, text)\n",
") WITH (expire_after = '00:10:00':::INTERVAL)\n",
"\n",
"query T\n",
"SELECT reloptions FROM pg_class WHERE relname = 'tbl'\n",
"----\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"tbl CREATE TABLE public.tbl (\n",
" id INT8 NOT NULL,\n",
" text STRING NULL,\n",
" CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),\n",
" FAMILY fam_0_id_text (id, text)\n",
") WITH (ttl_expire_after = '00:10:00':::INTERVAL)\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 16
} | load("@bazel_gomock//:gomock.bzl", "gomock")
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
load("//build:STRINGER.bzl", "stringer")
go_library(
name = "kvcoord",
srcs = [
"batch.go",
"condensable_span_set.go",
"dist_sender.go",
"dist_sender_rangefeed.go",
"doc.go",
"local_test_cluster_util.go",
"lock_spans_over_budget_error.go",
"node_store.go",
"range_iter.go",
"replica_slice.go",
"testing_knobs.go",
"transport.go",
"transport_race.go",
"transport_regular.go",
"txn_coord_sender.go",
"txn_coord_sender_factory.go",
"txn_coord_sender_savepoints.go",
"txn_interceptor_committer.go",
"txn_interceptor_heartbeater.go",
"txn_interceptor_metric_recorder.go",
"txn_interceptor_pipeliner.go",
"txn_interceptor_seq_num_allocator.go",
"txn_interceptor_span_refresher.go",
"txn_lock_gatekeeper.go",
"txn_metrics.go",
":gen-txnstate-stringer", # keep
],
importpath = "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord",
visibility = ["//visibility:public"],
deps = [
"//pkg/base",
"//pkg/gossip",
"//pkg/keys",
"//pkg/kv",
"//pkg/kv/kvbase",
"//pkg/kv/kvclient/rangecache:with-mocks",
"//pkg/kv/kvserver/concurrency/lock",
"//pkg/multitenant",
"//pkg/multitenant/tenantcostmodel",
"//pkg/roachpb:with-mocks",
"//pkg/rpc",
"//pkg/rpc/nodedialer",
"//pkg/server/telemetry",
"//pkg/settings",
"//pkg/settings/cluster",
"//pkg/sql/pgwire/pgcode",
"//pkg/sql/pgwire/pgerror",
"//pkg/storage/enginepb",
"//pkg/util",
"//pkg/util/contextutil",
"//pkg/util/ctxgroup",
"//pkg/util/envutil",
"//pkg/util/errorutil/unimplemented",
"//pkg/util/grpcutil",
"//pkg/util/hlc",
"//pkg/util/iterutil",
"//pkg/util/log",
"//pkg/util/metric",
"//pkg/util/quotapool",
"//pkg/util/retry",
"//pkg/util/shuffle",
"//pkg/util/stop",
"//pkg/util/syncutil",
"//pkg/util/timeutil",
"//pkg/util/tracing",
"//pkg/util/uuid",
"@com_github_cockroachdb_errors//:errors",
"@com_github_cockroachdb_errors//errorspb",
"@com_github_cockroachdb_logtags//:logtags",
"@com_github_cockroachdb_redact//:redact",
"@com_github_gogo_protobuf//proto",
"@com_github_google_btree//:btree",
"@io_opentelemetry_go_otel//attribute",
],
)
gomock(
name = "mock_kvcoord",
out = "mocks_generated.go",
interfaces = [
"Transport",
],
library = ":kvcoord",
package = "kvcoord",
self_package = "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord",
)
go_library(
name = "with-mocks",
srcs = [":mock_kvcoord"],
embed = [":kvcoord"],
visibility = ["//visibility:public"],
deps = [
"@com_github_golang_mock//gomock",
],
)
# This noop target is a workaround for https://github.com/bazelbuild/bazel-gazelle/issues/1078.
#
# gazelle:resolve go github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord //build/bazelutil:noop
go_test(
name = "kvcoord_test",
size = "medium",
srcs = [
"batch_test.go",
"condensable_span_set_test.go",
"dist_sender_rangefeed_test.go",
"dist_sender_server_test.go",
"dist_sender_test.go",
"helpers_test.go",
"integration_test.go",
"main_test.go",
"range_iter_test.go",
"replayed_commit_test.go",
"replica_slice_test.go",
"send_test.go",
"split_test.go",
"transport_test.go",
"truncate_test.go",
"txn_coord_sender_savepoints_test.go",
"txn_coord_sender_server_test.go",
"txn_coord_sender_test.go",
"txn_correctness_test.go",
"txn_intercepter_pipeliner_client_test.go",
"txn_interceptor_committer_test.go",
"txn_interceptor_heartbeater_test.go",
"txn_interceptor_pipeliner_test.go",
"txn_interceptor_seq_num_allocator_test.go",
"txn_interceptor_span_refresher_test.go",
"txn_test.go",
],
data = glob(["testdata/**"]),
embed = [":with-mocks"], # keep
tags = ["no-remote"],
deps = [
"//build/bazelutil:noop",
"//pkg/base",
"//pkg/config",
"//pkg/config/zonepb",
"//pkg/gossip",
"//pkg/gossip/simulation",
"//pkg/keys",
"//pkg/kv",
"//pkg/kv/kvbase",
"//pkg/kv/kvclient/rangecache:with-mocks",
"//pkg/kv/kvserver",
"//pkg/kv/kvserver/closedts",
"//pkg/kv/kvserver/concurrency/lock",
"//pkg/kv/kvserver/kvserverbase",
"//pkg/kv/kvserver/tscache",
"//pkg/kv/kvserver/txnwait",
"//pkg/roachpb:with-mocks",
"//pkg/rpc",
"//pkg/rpc/nodedialer",
"//pkg/security",
"//pkg/security/securitytest",
"//pkg/server",
"//pkg/settings/cluster",
"//pkg/sql/pgwire/pgcode",
"//pkg/sql/pgwire/pgerror",
"//pkg/storage",
"//pkg/storage/enginepb",
"//pkg/testutils",
"//pkg/testutils/buildutil",
"//pkg/testutils/kvclientutils",
"//pkg/testutils/localtestcluster",
"//pkg/testutils/serverutils",
"//pkg/testutils/skip",
"//pkg/testutils/testcluster",
"//pkg/util",
"//pkg/util/caller",
"//pkg/util/grpcutil",
"//pkg/util/hlc",
"//pkg/util/leaktest",
"//pkg/util/log",
"//pkg/util/metric",
"//pkg/util/netutil",
"//pkg/util/randutil",
"//pkg/util/retry",
"//pkg/util/shuffle",
"//pkg/util/stop",
"//pkg/util/syncutil",
"//pkg/util/tracing",
"//pkg/util/uuid",
"@com_github_cockroachdb_circuitbreaker//:circuitbreaker",
"@com_github_cockroachdb_datadriven//:datadriven",
"@com_github_cockroachdb_errors//:errors",
"@com_github_cockroachdb_errors//errutil",
"@com_github_cockroachdb_redact//:redact",
"@com_github_golang_mock//gomock",
"@com_github_stretchr_testify//assert",
"@com_github_stretchr_testify//require",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes",
"@org_golang_google_grpc//status",
"@org_golang_x_sync//errgroup",
],
)
stringer(
name = "gen-txnstate-stringer",
src = "txn_coord_sender.go",
typ = "txnState",
)
| pkg/kv/kvclient/kvcoord/BUILD.bazel | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.00027875290834344923,
0.00018996567814610898,
0.0001651616330491379,
0.00017430105071980506,
0.000031757113902131096
] |
{
"id": 3,
"code_window": [
"\n",
"query TT\n",
"SHOW CREATE TABLE tbl\n",
"----\n",
"tbl CREATE TABLE public.tbl (\n",
" id INT8 NOT NULL,\n",
" text STRING NULL,\n",
" CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),\n",
" FAMILY fam_0_id_text (id, text)\n",
") WITH (expire_after = '00:10:00':::INTERVAL)\n",
"\n",
"query T\n",
"SELECT reloptions FROM pg_class WHERE relname = 'tbl'\n",
"----\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"tbl CREATE TABLE public.tbl (\n",
" id INT8 NOT NULL,\n",
" text STRING NULL,\n",
" CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),\n",
" FAMILY fam_0_id_text (id, text)\n",
") WITH (ttl_expire_after = '00:10:00':::INTERVAL)\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 16
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "sa1028",
srcs = ["analyzer.go"],
importpath = "github.com/cockroachdb/cockroach/build/bazelutil/staticcheckanalyzers/sa1028",
visibility = ["//visibility:public"],
deps = [
"//pkg/testutils/lint/passes/staticcheck",
"@co_honnef_go_tools//staticcheck",
"@org_golang_x_tools//go/analysis",
],
)
| build/bazelutil/staticcheckanalyzers/sa1028/BUILD.bazel | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.00018398481188341975,
0.0001761951280059293,
0.00016840544412843883,
0.0001761951280059293,
0.000007789683877490461
] |
{
"id": 4,
"code_window": [
"query T\n",
"SELECT reloptions FROM pg_class WHERE relname = 'tbl'\n",
"----\n",
"{expire_after='00:10:00':::INTERVAL}\n",
"\n",
"statement ok\n",
"DROP TABLE tbl;\n",
"CREATE TABLE tbl (\n",
" id INT PRIMARY KEY,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"{ttl_expire_after='00:10:00':::INTERVAL}\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 26
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"bytes"
"context"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catformat"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
)
type shouldOmitFKClausesFromCreate int
const (
_ shouldOmitFKClausesFromCreate = iota
// OmitFKClausesFromCreate will not include any foreign key information in the
// create statement.
OmitFKClausesFromCreate
// IncludeFkClausesInCreate will include foreign key information in the create
// statement, and error if a FK cannot be resolved.
IncludeFkClausesInCreate
// OmitMissingFKClausesFromCreate will include foreign key information only if they
// can be resolved. If not, it will ignore those constraints.
// This is used in the case when showing the create statement for
// tables stored in backups. Not all relevant tables may have been
// included in the back up, so some foreign key information may be
// impossible to retrieve.
OmitMissingFKClausesFromCreate
)
// ShowCreateDisplayOptions is a container struct holding the options that
// ShowCreate uses to determine how much information should be included in the
// CREATE statement.
type ShowCreateDisplayOptions struct {
FKDisplayMode shouldOmitFKClausesFromCreate
// Comment resolution requires looking up table data from system.comments
// table. This is sometimes not possible. For example, in the context of a
// SHOW BACKUP which may resolve the create statement, there is no mechanism
// to read any table data from the backup (nor is there a guarantee that the
// system.comments table is included in the backup at all).
IgnoreComments bool
}
// ShowCreateTable returns a valid SQL representation of the CREATE
// TABLE statement used to create the given table.
//
// The names of the tables referenced by foreign keys are prefixed by their own
// database name unless it is equal to the given dbPrefix. This allows us to
// elide the prefix when the given table references other tables in the
// current database.
func ShowCreateTable(
ctx context.Context,
p PlanHookState,
tn *tree.TableName,
dbPrefix string,
desc catalog.TableDescriptor,
lCtx simpleSchemaResolver,
displayOptions ShowCreateDisplayOptions,
) (string, error) {
a := &tree.DatumAlloc{}
f := p.ExtendedEvalContext().FmtCtx(tree.FmtSimple)
f.WriteString("CREATE ")
if desc.IsTemporary() {
f.WriteString("TEMP ")
}
f.WriteString("TABLE ")
f.FormatNode(tn)
f.WriteString(" (")
// Inaccessible columns are not displayed in SHOW CREATE TABLE.
for i, col := range desc.AccessibleColumns() {
if i != 0 {
f.WriteString(",")
}
f.WriteString("\n\t")
colstr, err := schemaexpr.FormatColumnForDisplay(
ctx, desc, col, &p.RunParams(ctx).p.semaCtx, p.RunParams(ctx).p.SessionData(),
)
if err != nil {
return "", err
}
f.WriteString(colstr)
}
if desc.IsPhysicalTable() {
f.WriteString(",\n\tCONSTRAINT ")
formatQuoteNames(&f.Buffer, desc.GetPrimaryIndex().GetName())
f.WriteString(" ")
f.WriteString(tabledesc.PrimaryKeyString(desc))
}
// TODO (lucy): Possibly include FKs in the mutations list here, or else
// exclude check mutations below, for consistency.
if displayOptions.FKDisplayMode != OmitFKClausesFromCreate {
if err := desc.ForeachOutboundFK(func(fk *descpb.ForeignKeyConstraint) error {
fkCtx := tree.NewFmtCtx(tree.FmtSimple)
fkCtx.WriteString(",\n\tCONSTRAINT ")
fkCtx.FormatNameP(&fk.Name)
fkCtx.WriteString(" ")
// Passing in EmptySearchPath causes the schema name to show up in the
// constraint definition, which we need for `cockroach dump` output to be
// usable.
if err := showForeignKeyConstraint(
&fkCtx.Buffer,
dbPrefix,
desc,
fk,
lCtx,
sessiondata.EmptySearchPath,
); err != nil {
if displayOptions.FKDisplayMode == OmitMissingFKClausesFromCreate {
return nil
}
// When FKDisplayMode == IncludeFkClausesInCreate.
return err
}
f.WriteString(fkCtx.String())
return nil
}); err != nil {
return "", err
}
}
for _, idx := range desc.PublicNonPrimaryIndexes() {
// Showing the primary index is handled above.
// Build the PARTITION BY clause.
var partitionBuf bytes.Buffer
if err := ShowCreatePartitioning(
a, p.ExecCfg().Codec, desc, idx, idx.GetPartitioning(), &partitionBuf, 1 /* indent */, 0, /* colOffset */
); err != nil {
return "", err
}
f.WriteString(",\n\t")
idxStr, err := catformat.IndexForDisplay(
ctx,
desc,
&descpb.AnonymousTable,
idx,
partitionBuf.String(),
tree.FmtSimple,
p.RunParams(ctx).p.SemaCtx(),
p.RunParams(ctx).p.SessionData(),
catformat.IndexDisplayDefOnly,
)
if err != nil {
return "", err
}
f.WriteString(idxStr)
}
// Create the FAMILY and CONSTRAINTs of the CREATE statement
showFamilyClause(desc, f)
if err := showConstraintClause(ctx, desc, &p.RunParams(ctx).p.semaCtx, p.RunParams(ctx).p.SessionData(), f); err != nil {
return "", err
}
if err := ShowCreatePartitioning(
a, p.ExecCfg().Codec, desc, desc.GetPrimaryIndex(), desc.GetPrimaryIndex().GetPartitioning(), &f.Buffer, 0 /* indent */, 0, /* colOffset */
); err != nil {
return "", err
}
if ttl := desc.GetRowLevelTTL(); ttl != nil {
f.Buffer.WriteString(` WITH (expire_after = `)
f.Buffer.WriteString(ttl.DurationExpr)
f.Buffer.WriteString(`)`)
}
if err := showCreateLocality(desc, f); err != nil {
return "", err
}
if !displayOptions.IgnoreComments {
if err := showComments(tn, desc, selectComment(ctx, p, desc.GetID()), &f.Buffer); err != nil {
return "", err
}
}
return f.CloseAndGetString(), nil
}
// formatQuoteNames quotes and adds commas between names.
func formatQuoteNames(buf *bytes.Buffer, names ...string) {
f := tree.NewFmtCtx(tree.FmtSimple)
for i := range names {
if i > 0 {
f.WriteString(", ")
}
f.FormatNameP(&names[i])
}
buf.WriteString(f.CloseAndGetString())
}
// ShowCreate returns a valid SQL representation of the CREATE
// statement used to create the descriptor passed in.
//
// The names of the tables references by foreign keys are prefixed by their own
// database name unless it is equal to the given dbPrefix. This allows us to
// elide the prefix when the given table references other tables in the current
// database.
func (p *planner) ShowCreate(
ctx context.Context,
dbPrefix string,
allDescs []descpb.Descriptor,
desc catalog.TableDescriptor,
displayOptions ShowCreateDisplayOptions,
) (string, error) {
var stmt string
var err error
tn := tree.MakeUnqualifiedTableName(tree.Name(desc.GetName()))
if desc.IsView() {
stmt, err = ShowCreateView(ctx, &p.RunParams(ctx).p.semaCtx, p.RunParams(ctx).p.SessionData(), &tn, desc)
} else if desc.IsSequence() {
stmt, err = ShowCreateSequence(ctx, &tn, desc)
} else {
lCtx, lErr := newInternalLookupCtxFromDescriptorProtos(
ctx, allDescs, nil, /* want all tables */
)
if lErr != nil {
return "", lErr
}
// Overwrite desc with hydrated descriptor.
desc, err = lCtx.getTableByID(desc.GetID())
if err != nil {
return "", err
}
stmt, err = ShowCreateTable(ctx, p, &tn, dbPrefix, desc, lCtx, displayOptions)
}
return stmt, err
}
| pkg/sql/show_create.go | 1 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.002746827667579055,
0.0003829138004221022,
0.00016274850349873304,
0.0001706467883195728,
0.0005772485747002065
] |
{
"id": 4,
"code_window": [
"query T\n",
"SELECT reloptions FROM pg_class WHERE relname = 'tbl'\n",
"----\n",
"{expire_after='00:10:00':::INTERVAL}\n",
"\n",
"statement ok\n",
"DROP TABLE tbl;\n",
"CREATE TABLE tbl (\n",
" id INT PRIMARY KEY,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"{ttl_expire_after='00:10:00':::INTERVAL}\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 26
} | # These files are generated by the build system, but also must be
# checked into tree. The syntax here is:
# $TARGET|$GENERATED_FILENAME|$FILENAME_TO_RENAME_TO
# This list is consumed by `dev` as well as in CI for validation.
# Lines beginning with # are ignored.
//pkg/roachpb:gen-batch-generated|batch_generated-gen.go|batch_generated.go
//pkg/sql/opt/optgen/lang:gen-expr|expr-gen.og.go|expr.og.go
//pkg/sql/opt/optgen/lang:gen-operator|operator-gen.og.go|operator.og.go
| build/bazelutil/checked_in_genfiles.txt | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.00017000219668261707,
0.00017000219668261707,
0.00017000219668261707,
0.00017000219668261707,
0
] |
{
"id": 4,
"code_window": [
"query T\n",
"SELECT reloptions FROM pg_class WHERE relname = 'tbl'\n",
"----\n",
"{expire_after='00:10:00':::INTERVAL}\n",
"\n",
"statement ok\n",
"DROP TABLE tbl;\n",
"CREATE TABLE tbl (\n",
" id INT PRIMARY KEY,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"{ttl_expire_after='00:10:00':::INTERVAL}\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 26
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"fmt"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/errors"
)
// saveTableNode is used for internal testing. It is a node that passes through
// input data but saves it in a table. The table can be used subsequently, e.g.
// to look at statistics.
//
// The node creates the table on startup. If the table exists, it errors out.
type saveTableNode struct {
source planNode
target tree.TableName
// Column names from the saved table. These could be different than the names
// of the columns in the source plan. Note that saveTableNode passes through
// the source plan's column names.
colNames []string
run struct {
// vals accumulates a ValuesClause with the rows.
vals tree.ValuesClause
}
}
// saveTableInsertBatch is the number of rows per issued INSERT statement.
const saveTableInsertBatch = 100
func (p *planner) makeSaveTable(
source planNode, target *tree.TableName, colNames []string,
) planNode {
return &saveTableNode{source: source, target: *target, colNames: colNames}
}
func (n *saveTableNode) startExec(params runParams) error {
create := &tree.CreateTable{
Table: n.target,
}
cols := planColumns(n.source)
if len(n.colNames) != len(cols) {
return errors.AssertionFailedf(
"number of column names (%d) does not match number of columns (%d)",
len(n.colNames), len(cols),
)
}
for i := 0; i < len(cols); i++ {
def := &tree.ColumnTableDef{
Name: tree.Name(n.colNames[i]),
Type: cols[i].Typ,
}
def.Nullable.Nullability = tree.SilentNull
create.Defs = append(create.Defs, def)
}
_, err := params.p.ExtendedEvalContext().ExecCfg.InternalExecutor.Exec(
params.ctx,
"create save table",
nil, /* txn */
create.String(),
)
return err
}
// issue inserts rows into the target table of the saveTableNode.
func (n *saveTableNode) issue(params runParams) error {
if v := &n.run.vals; len(v.Rows) > 0 {
stmt := fmt.Sprintf("INSERT INTO %s %s", n.target.String(), v.String())
if _, err := params.p.ExtendedEvalContext().ExecCfg.InternalExecutor.Exec(
params.ctx,
"insert into save table",
nil, /* txn */
stmt,
); err != nil {
return errors.Wrapf(err, "while running %s", stmt)
}
v.Rows = nil
}
return nil
}
// Next is part of the planNode interface.
func (n *saveTableNode) Next(params runParams) (bool, error) {
res, err := n.source.Next(params)
if err != nil {
return res, err
}
if !res {
// We are done. Insert any accumulated rows.
err := n.issue(params)
return false, err
}
row := n.source.Values()
exprs := make(tree.Exprs, len(row))
for i := range row {
exprs[i] = row[i]
}
n.run.vals.Rows = append(n.run.vals.Rows, exprs)
if len(n.run.vals.Rows) >= saveTableInsertBatch {
if err := n.issue(params); err != nil {
return false, err
}
}
return true, nil
}
// Values is part of the planNode interface.
func (n *saveTableNode) Values() tree.Datums {
return n.source.Values()
}
// Close is part of the planNode interface.
func (n *saveTableNode) Close(ctx context.Context) {
n.source.Close(ctx)
}
| pkg/sql/save_table.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.000489443598780781,
0.00020337256137281656,
0.0001654625521041453,
0.00017306905647274107,
0.00008226893987739459
] |
{
"id": 4,
"code_window": [
"query T\n",
"SELECT reloptions FROM pg_class WHERE relname = 'tbl'\n",
"----\n",
"{expire_after='00:10:00':::INTERVAL}\n",
"\n",
"statement ok\n",
"DROP TABLE tbl;\n",
"CREATE TABLE tbl (\n",
" id INT PRIMARY KEY,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"{ttl_expire_after='00:10:00':::INTERVAL}\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 26
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tests
import (
"context"
"fmt"
"time"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/option"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/registry"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/testutils"
)
type backgroundFn func(ctx context.Context, u *versionUpgradeTest) error
// A backgroundStepper is a tool to run long-lived commands while a cluster is
// going through a sequence of version upgrade operations.
// It exposes a `launch` step that launches the method carrying out long-running
// work (in the background) and a `stop` step collecting any errors.
type backgroundStepper struct {
// This is the operation that will be launched in the background. When the
// context gets canceled, it should shut down and return without an error.
// The way to typically get this is:
//
// err := doSomething(ctx)
// ctx.Err() != nil {
// return nil
// }
// return err
run backgroundFn
// When not nil, called with the error within `.stop()`. The interceptor
// gets a chance to ignore the error or produce a different one (via t.Fatal).
onStop func(context.Context, test.Test, *versionUpgradeTest, error)
nodes option.NodeListOption // nodes to monitor, defaults to c.All()
// Internal.
m cluster.Monitor
}
// launch spawns the function the background step was initialized with.
func (s *backgroundStepper) launch(ctx context.Context, t test.Test, u *versionUpgradeTest) {
nodes := s.nodes
if nodes == nil {
nodes = u.c.All()
}
s.m = u.c.NewMonitor(ctx, nodes)
s.m.Go(func(ctx context.Context) error {
return s.run(ctx, u)
})
}
func (s *backgroundStepper) wait(ctx context.Context, t test.Test, u *versionUpgradeTest) {
// We don't care about the workload failing since we only use it to produce a
// few `RESTORE` jobs. And indeed workload will fail because it does not
// tolerate pausing of its jobs.
err := s.m.WaitE()
if s.onStop != nil {
s.onStop(ctx, t, u, err)
} else if err != nil {
t.Fatal(err)
}
}
func overrideErrorFromJobsTable(ctx context.Context, t test.Test, u *versionUpgradeTest, _ error) {
db := u.conn(ctx, t, 1)
t.L().Printf("Resuming any paused jobs left")
for {
_, err := db.ExecContext(
ctx,
`RESUME JOBS (SELECT job_id FROM [SHOW JOBS] WHERE status = $1);`,
jobs.StatusPaused,
)
if err != nil {
t.Fatal(err)
}
row := db.QueryRow(
"SELECT count(*) FROM [SHOW JOBS] WHERE status = $1",
jobs.StatusPauseRequested,
)
var nNotYetPaused int
if err = row.Scan(&nNotYetPaused); err != nil {
t.Fatal(err)
}
if nNotYetPaused <= 0 {
break
}
// Sleep a bit not to DOS the jobs table.
time.Sleep(10 * time.Second)
t.L().Printf("Waiting for %d jobs to pause", nNotYetPaused)
}
t.L().Printf("Waiting for jobs to complete...")
var err error
for {
q := "SHOW JOBS WHEN COMPLETE (SELECT job_id FROM [SHOW JOBS]);"
_, err = db.ExecContext(ctx, q)
if testutils.IsError(err, "pq: restart transaction:.*") {
t.L().Printf("SHOW JOBS WHEN COMPLETE returned %s, retrying", err.Error())
time.Sleep(10 * time.Second)
continue
}
break
}
if err != nil {
t.Fatal(err)
}
}
func backgroundJobsTestTPCCImport(t test.Test, warehouses int) backgroundStepper {
return backgroundStepper{run: func(ctx context.Context, u *versionUpgradeTest) error {
// The workload has to run on one of the nodes of the cluster.
err := u.c.RunE(ctx, u.c.Node(1), tpccImportCmd(warehouses))
if ctx.Err() != nil {
// If the context is canceled, that's probably why the workload returned
// so swallow error. (This is how the harness tells us to shut down the
// workload).
return nil
}
return err
},
onStop: overrideErrorFromJobsTable,
}
}
func pauseAllJobsStep() versionStep {
return func(ctx context.Context, t test.Test, u *versionUpgradeTest) {
db := u.conn(ctx, t, 1)
_, err := db.ExecContext(
ctx,
`PAUSE JOBS (SELECT job_id FROM [SHOW JOBS] WHERE status = $1);`,
jobs.StatusRunning,
)
if err != nil {
t.Fatal(err)
}
row := db.QueryRow("SELECT count(*) FROM [SHOW JOBS] WHERE status LIKE 'pause%'")
var nPaused int
if err := row.Scan(&nPaused); err != nil {
t.Fatal(err)
}
t.L().Printf("Paused %d jobs", nPaused)
time.Sleep(time.Second)
}
}
func makeResumeAllJobsAndWaitStep(d time.Duration) versionStep {
var numResumes int
return func(ctx context.Context, t test.Test, u *versionUpgradeTest) {
numResumes++
t.L().Printf("Resume all jobs number: %d", numResumes)
db := u.conn(ctx, t, 1)
_, err := db.ExecContext(
ctx,
`RESUME JOBS (SELECT job_id FROM [SHOW JOBS] WHERE status = $1);`,
jobs.StatusPaused,
)
if err != nil {
t.Fatal(err)
}
row := db.QueryRow(
"SELECT count(*) FROM [SHOW JOBS] WHERE status = $1",
jobs.StatusRunning,
)
var nRunning int
if err := row.Scan(&nRunning); err != nil {
t.Fatal(err)
}
t.L().Printf("Resumed %d jobs", nRunning)
time.Sleep(d)
}
}
func checkForFailedJobsStep(ctx context.Context, t test.Test, u *versionUpgradeTest) {
t.L().Printf("Checking for failed jobs.")
db := u.conn(ctx, t, 1)
// The ifnull is because the move to session-based job claims in 20.2 has left
// us without a populated coordinator_id in crdb_internal.jobs. We may start
// populating it with the claim_instance_id.
rows, err := db.Query(`
SELECT job_id, job_type, description, status, error, ifnull(coordinator_id, 0)
FROM [SHOW JOBS] WHERE status = $1 OR status = $2`,
jobs.StatusFailed, jobs.StatusReverting,
)
if err != nil {
t.Fatal(err)
}
var jobType, desc, status, jobError string
var jobID jobspb.JobID
var coordinatorID int64
var errMsg string
for rows.Next() {
err := rows.Scan(&jobID, &jobType, &desc, &status, &jobError, &coordinatorID)
if err != nil {
t.Fatal(err)
}
// Concatenate all unsuccessful jobs info.
errMsg = fmt.Sprintf(
"%sUnsuccessful job %d of type %s, description %s, status %s, error %s, coordinator %d\n",
errMsg, jobID, jobType, desc, status, jobError, coordinatorID,
)
}
if errMsg != "" {
nodeInfo := "Cluster info\n"
for i := range u.c.All() {
nodeInfo = fmt.Sprintf(
"%sNode %d: %s\n", nodeInfo, i+1, u.binaryVersion(ctx, t, i+1))
}
t.Fatalf("%s\n%s", nodeInfo, errMsg)
}
}
func runJobsMixedVersions(
ctx context.Context, t test.Test, c cluster.Cluster, warehouses int, predecessorVersion string,
) {
// An empty string means that the cockroach binary specified by flag
// `cockroach` will be used.
const mainVersion = ""
roachNodes := c.All()
backgroundTPCC := backgroundJobsTestTPCCImport(t, warehouses)
resumeAllJobsAndWaitStep := makeResumeAllJobsAndWaitStep(10 * time.Second)
c.Put(ctx, t.DeprecatedWorkload(), "./workload", c.Node(1))
u := newVersionUpgradeTest(c,
uploadAndStartFromCheckpointFixture(roachNodes, predecessorVersion),
waitForUpgradeStep(roachNodes),
preventAutoUpgradeStep(1),
backgroundTPCC.launch,
func(ctx context.Context, _ test.Test, u *versionUpgradeTest) {
time.Sleep(10 * time.Second)
},
checkForFailedJobsStep,
pauseAllJobsStep(),
// Roll the nodes into the new version one by one, while repeatedly pausing
// and resuming all jobs.
binaryUpgradeStep(c.Node(3), mainVersion),
resumeAllJobsAndWaitStep,
checkForFailedJobsStep,
pauseAllJobsStep(),
binaryUpgradeStep(c.Node(2), mainVersion),
resumeAllJobsAndWaitStep,
checkForFailedJobsStep,
pauseAllJobsStep(),
binaryUpgradeStep(c.Node(1), mainVersion),
resumeAllJobsAndWaitStep,
checkForFailedJobsStep,
pauseAllJobsStep(),
binaryUpgradeStep(c.Node(4), mainVersion),
resumeAllJobsAndWaitStep,
checkForFailedJobsStep,
pauseAllJobsStep(),
// Roll back again, which ought to be fine because the cluster upgrade was
// not finalized.
binaryUpgradeStep(c.Node(2), predecessorVersion),
resumeAllJobsAndWaitStep,
checkForFailedJobsStep,
pauseAllJobsStep(),
binaryUpgradeStep(c.Node(4), predecessorVersion),
resumeAllJobsAndWaitStep,
checkForFailedJobsStep,
pauseAllJobsStep(),
binaryUpgradeStep(c.Node(3), predecessorVersion),
resumeAllJobsAndWaitStep,
checkForFailedJobsStep,
pauseAllJobsStep(),
binaryUpgradeStep(c.Node(1), predecessorVersion),
resumeAllJobsAndWaitStep,
checkForFailedJobsStep,
pauseAllJobsStep(),
// Roll nodes forward and finalize upgrade.
binaryUpgradeStep(c.Node(4), mainVersion),
resumeAllJobsAndWaitStep,
checkForFailedJobsStep,
pauseAllJobsStep(),
binaryUpgradeStep(c.Node(3), mainVersion),
resumeAllJobsAndWaitStep,
checkForFailedJobsStep,
pauseAllJobsStep(),
binaryUpgradeStep(c.Node(1), mainVersion),
resumeAllJobsAndWaitStep,
checkForFailedJobsStep,
pauseAllJobsStep(),
binaryUpgradeStep(c.Node(2), mainVersion),
resumeAllJobsAndWaitStep,
checkForFailedJobsStep,
pauseAllJobsStep(),
allowAutoUpgradeStep(1),
waitForUpgradeStep(roachNodes),
resumeAllJobsAndWaitStep,
backgroundTPCC.wait,
checkForFailedJobsStep,
)
u.run(ctx, t)
}
func registerJobsMixedVersions(r registry.Registry) {
r.Add(registry.TestSpec{
Name: "jobs/mixed-versions",
Owner: registry.OwnerBulkIO,
Skip: "#67587",
// Jobs infrastructure was unstable prior to 20.1 in terms of the behavior
// of `PAUSE/CANCEL JOB` commands which were best effort and relied on the
// job itself to detect the request. These were fixed by introducing new job
// state machine states `Status{Pause,Cancel}Requested`. This test purpose
// is to to test the state transitions of jobs from paused to resumed and
// vice versa in order to detect regressions in the work done for 20.1.
Cluster: r.MakeClusterSpec(4),
Run: func(ctx context.Context, t test.Test, c cluster.Cluster) {
predV, err := PredecessorVersion(*t.BuildVersion())
if err != nil {
t.Fatal(err)
}
warehouses := 10
runJobsMixedVersions(ctx, t, c, warehouses, predV)
},
})
}
| pkg/cmd/roachtest/tests/mixed_version_jobs.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.0016477751778438687,
0.0002130636858055368,
0.00016513456648681313,
0.00017020557424984872,
0.00024607928935438395
] |
{
"id": 5,
"code_window": [
"CREATE TABLE tbl (\n",
" id INT PRIMARY KEY,\n",
" text TEXT,\n",
" FAMILY (id, text)\n",
") WITH (expire_after = '10 minutes'::interval)\n",
"\n",
"query TT\n",
"SHOW CREATE TABLE tbl\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
") WITH (ttl_expire_after = '10 minutes'::interval)\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 34
} | statement error value of "expire_after" must be an interval
CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = ' xx invalid interval xx')
statement error value of "expire_after" must be at least zero
CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = '-10 minutes')
statement ok
CREATE TABLE tbl (
id INT PRIMARY KEY,
text TEXT,
FAMILY (id, text)
) WITH (expire_after = '10 minutes')
query TT
SHOW CREATE TABLE tbl
----
tbl CREATE TABLE public.tbl (
id INT8 NOT NULL,
text STRING NULL,
CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),
FAMILY fam_0_id_text (id, text)
) WITH (expire_after = '00:10:00':::INTERVAL)
query T
SELECT reloptions FROM pg_class WHERE relname = 'tbl'
----
{expire_after='00:10:00':::INTERVAL}
statement ok
DROP TABLE tbl;
CREATE TABLE tbl (
id INT PRIMARY KEY,
text TEXT,
FAMILY (id, text)
) WITH (expire_after = '10 minutes'::interval)
query TT
SHOW CREATE TABLE tbl
----
tbl CREATE TABLE public.tbl (
id INT8 NOT NULL,
text STRING NULL,
CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),
FAMILY fam_0_id_text (id, text)
) WITH (expire_after = '00:10:00':::INTERVAL)
| pkg/sql/logictest/testdata/logic_test/row_level_ttl | 1 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.99601811170578,
0.802452564239502,
0.1258479803800583,
0.9742200970649719,
0.3392963707447052
] |
{
"id": 5,
"code_window": [
"CREATE TABLE tbl (\n",
" id INT PRIMARY KEY,\n",
" text TEXT,\n",
" FAMILY (id, text)\n",
") WITH (expire_after = '10 minutes'::interval)\n",
"\n",
"query TT\n",
"SHOW CREATE TABLE tbl\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
") WITH (ttl_expire_after = '10 minutes'::interval)\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 34
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package log
type logFormatter interface {
formatterName() string
// doc is used to generate the formatter documentation.
doc() string
// formatEntry formats a logEntry into a newly allocated *buffer.
// The caller is responsible for calling putBuffer() afterwards.
formatEntry(entry logEntry) *buffer
}
var formatParsers = map[string]string{
"crdb-v1": "v1",
"crdb-v1-count": "v1",
"crdb-v1-tty": "v1",
"crdb-v1-tty-count": "v1",
"crdb-v2": "v2",
"crdb-v2-tty": "v2",
"json": "json",
"json-compact": "json",
"json-fluent": "json",
"json-fluent-compact": "json",
}
var formatters = func() map[string]logFormatter {
m := make(map[string]logFormatter)
r := func(f logFormatter) {
m[f.formatterName()] = f
}
r(formatCrdbV1{})
r(formatCrdbV1WithCounter{})
r(formatCrdbV1TTY{})
r(formatCrdbV1TTYWithCounter{})
r(formatCrdbV2{})
r(formatCrdbV2TTY{})
r(formatFluentJSONCompact{})
r(formatFluentJSONFull{})
r(formatJSONCompact{})
r(formatJSONFull{})
return m
}()
// GetFormatterDocs returns the embedded documentation for all the
// supported formats.
func GetFormatterDocs() map[string]string {
m := make(map[string]string)
for fmtName, f := range formatters {
m[fmtName] = f.doc()
}
return m
}
| pkg/util/log/formats.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.0005244096391834319,
0.0002309290721314028,
0.00016943045193329453,
0.00017560538253746927,
0.00012117995356675237
] |
{
"id": 5,
"code_window": [
"CREATE TABLE tbl (\n",
" id INT PRIMARY KEY,\n",
" text TEXT,\n",
" FAMILY (id, text)\n",
") WITH (expire_after = '10 minutes'::interval)\n",
"\n",
"query TT\n",
"SHOW CREATE TABLE tbl\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
") WITH (ttl_expire_after = '10 minutes'::interval)\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 34
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package schemachanger_test
import (
"context"
gosql "database/sql"
"fmt"
"sync"
"sync/atomic"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scrun"
"github.com/cockroachdb/cockroach/pkg/sql/tests"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSchemaChangeWaitsForOtherSchemaChanges(t *testing.T) {
defer leaktest.AfterTest(t)()
t.Run("wait for legacy schema changes", func(t *testing.T) {
// This test starts an legacy schema change job (job 1), and then starts
// another legacy schema change job (job 2) and a declarative schema change
// job (job 3) while job 1 is backfilling. Job 1 is resumed after job 2
// has started running.
ctx := context.Background()
var job1Backfill sync.Once
var job2Resume sync.Once
var job3Wait sync.Once
// Closed when we enter the RunBeforeBackfill knob of job 1.
job1BackfillNotification := make(chan struct{})
// Closed when we're ready to continue with job 1.
job1ContinueNotification := make(chan struct{})
// Closed when job 2 starts.
job2ResumeNotification := make(chan struct{})
// Closed when job 3 starts waiting for concurrent schema changes to finish.
job3WaitNotification := make(chan struct{})
var job1ID jobspb.JobID
var s serverutils.TestServerInterface
var kvDB *kv.DB
params, _ := tests.CreateTestServerParams()
params.Knobs = base.TestingKnobs{
SQLSchemaChanger: &sql.SchemaChangerTestingKnobs{
RunBeforeResume: func(jobID jobspb.JobID) error {
// Only block in job 2.
if job1ID == 0 || jobID == job1ID {
job1ID = jobID
return nil
}
job2Resume.Do(func() {
close(job2ResumeNotification)
})
return nil
},
RunBeforeBackfill: func() error {
job1Backfill.Do(func() {
close(job1BackfillNotification)
<-job1ContinueNotification
})
return nil
},
},
SQLDeclarativeSchemaChanger: &scrun.TestingKnobs{
BeforeStage: func(p scplan.Plan, idx int) error {
// Assert that when job 3 is running, there are no mutations other
// than the ones associated with this schema change.
if p.Params.ExecutionPhase < scop.PostCommitPhase {
return nil
}
table := desctestutils.TestingGetTableDescriptor(
kvDB, keys.SystemSQLCodec, "db", "public", "t")
// There are 2 schema changes that should precede job 3.
// The declarative schema changer uses the same mutation ID for all
// its mutations.
for _, m := range table.AllMutations() {
assert.Equal(t, int(m.MutationID()), 3)
}
return nil
},
BeforeWaitingForConcurrentSchemaChanges: func(_ []string) {
job3Wait.Do(func() {
close(job3WaitNotification)
})
},
},
JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(),
}
var sqlDB *gosql.DB
s, sqlDB, kvDB = serverutils.StartServer(t, params)
defer s.Stopper().Stop(ctx)
tdb := sqlutils.MakeSQLRunner(sqlDB)
tdb.Exec(t, `CREATE DATABASE db`)
tdb.Exec(t, `CREATE TABLE db.t (a INT PRIMARY KEY)`)
g := ctxgroup.WithContext(ctx)
// Start job 1: An index schema change, which does not use the new schema
// changer.
g.GoCtx(func(ctx context.Context) error {
_, err := sqlDB.ExecContext(ctx, `CREATE INDEX idx ON db.t(a)`)
assert.NoError(t, err)
return nil
})
<-job1BackfillNotification
// Start job 3: A column schema change which uses the new schema changer.
// The transaction will not actually commit until job 1 has finished.
g.GoCtx(func(ctx context.Context) error {
conn, err := sqlDB.Conn(ctx)
if err != nil {
return err
}
_, err = conn.ExecContext(ctx, `SET experimental_use_new_schema_changer = 'unsafe'`)
assert.NoError(t, err)
_, err = conn.ExecContext(ctx, `ALTER TABLE db.t ADD COLUMN b INT DEFAULT 1`)
assert.NoError(t, err)
return nil
})
<-job3WaitNotification
// Start job 2: Another index schema change which does not use the new
// schema changer.
g.GoCtx(func(ctx context.Context) error {
_, err := sqlDB.ExecContext(ctx, `CREATE INDEX idx2 ON db.t(a)`)
assert.NoError(t, err)
return nil
})
// Wait for job 2 to start.
<-job2ResumeNotification
// Finally, let job 1 finish, which will unblock the
// others.
close(job1ContinueNotification)
require.NoError(t, g.Wait())
// Check that job 3 was created last.
tdb.CheckQueryResults(t,
fmt.Sprintf(`SELECT job_type, status, description FROM crdb_internal.jobs WHERE job_type = '%s' OR job_type = '%s' ORDER BY created`,
jobspb.TypeSchemaChange.String(), jobspb.TypeNewSchemaChange.String(),
),
[][]string{
{jobspb.TypeSchemaChange.String(), string(jobs.StatusSucceeded), `CREATE INDEX idx ON db.public.t (a)`},
{jobspb.TypeSchemaChange.String(), string(jobs.StatusSucceeded), `CREATE INDEX idx2 ON db.public.t (a)`},
{jobspb.TypeNewSchemaChange.String(), string(jobs.StatusSucceeded), `schema change job`},
},
)
})
t.Run("wait for declarative schema changes", func(t *testing.T) {
// This test starts a declarative schema change job (job 1), and then starts
// another declarative schema change job (job 2) while job 1 is backfilling.
ctx := context.Background()
var job1Backfill sync.Once
var job2Wait sync.Once
// Closed when we enter the RunBeforeBackfill knob of job 1.
job1BackfillNotification := make(chan struct{})
// Closed when we're ready to continue with job 1.
job1ContinueNotification := make(chan struct{})
// Closed when job 2 starts waiting for concurrent schema changes to finish.
job2WaitNotification := make(chan struct{})
stmt1 := `ALTER TABLE db.t ADD COLUMN b INT8 DEFAULT 1`
stmt2 := `ALTER TABLE db.t ADD COLUMN c INT8 DEFAULT 2`
var kvDB *kv.DB
params, _ := tests.CreateTestServerParams()
params.Knobs = base.TestingKnobs{
SQLDeclarativeSchemaChanger: &scrun.TestingKnobs{
BeforeStage: func(p scplan.Plan, idx int) error {
// Verify that we never queue mutations for job 2 before finishing job
// 1.
if p.Params.ExecutionPhase < scop.PostCommitPhase {
return nil
}
table := desctestutils.TestingGetTableDescriptor(
kvDB, keys.SystemSQLCodec, "db", "public", "t")
mutations := table.AllMutations()
if len(mutations) == 0 {
t.Errorf("unexpected empty mutations")
return errors.Errorf("test failure")
}
var idsSeen []descpb.MutationID
for _, m := range mutations {
if len(idsSeen) == 0 || m.MutationID() > idsSeen[len(idsSeen)-1] {
idsSeen = append(idsSeen, m.MutationID())
}
}
highestID := idsSeen[len(idsSeen)-1]
assert.Truef(t, highestID <= 1, "unexpected mutation IDs %v", idsSeen)
// Block job 1 during the backfill.
s := p.Stages[idx]
stmt := p.TargetState.Statements[0].Statement
if stmt != stmt1 || s.Type() != scop.BackfillType {
return nil
}
for _, op := range s.EdgeOps {
if backfillOp, ok := op.(*scop.BackfillIndex); ok && backfillOp.IndexID == descpb.IndexID(2) {
job1Backfill.Do(func() {
close(job1BackfillNotification)
<-job1ContinueNotification
})
}
}
return nil
},
BeforeWaitingForConcurrentSchemaChanges: func(stmts []string) {
if stmts[0] != stmt2 {
return
}
job2Wait.Do(func() {
close(job2WaitNotification)
})
},
},
}
var s serverutils.TestServerInterface
var sqlDB *gosql.DB
s, sqlDB, kvDB = serverutils.StartServer(t, params)
defer s.Stopper().Stop(ctx)
tdb := sqlutils.MakeSQLRunner(sqlDB)
tdb.Exec(t, `CREATE DATABASE db`)
tdb.Exec(t, `CREATE TABLE db.t (a INT PRIMARY KEY)`)
g := ctxgroup.WithContext(ctx)
g.GoCtx(func(ctx context.Context) error {
conn, err := sqlDB.Conn(ctx)
if err != nil {
return err
}
_, err = conn.ExecContext(ctx, `SET experimental_use_new_schema_changer = 'unsafe'`)
assert.NoError(t, err)
_, err = conn.ExecContext(ctx, stmt1)
assert.NoError(t, err)
return nil
})
<-job1BackfillNotification
g.GoCtx(func(ctx context.Context) error {
conn, err := sqlDB.Conn(ctx)
if err != nil {
return err
}
_, err = conn.ExecContext(ctx, `SET experimental_use_new_schema_changer = 'unsafe'`)
assert.NoError(t, err)
_, err = conn.ExecContext(ctx, stmt2)
assert.NoError(t, err)
return nil
})
<-job2WaitNotification
close(job1ContinueNotification)
require.NoError(t, g.Wait())
tdb.CheckQueryResults(t,
fmt.Sprintf(`SELECT job_type, status FROM crdb_internal.jobs WHERE job_type = '%s' OR job_type = '%s' ORDER BY created`,
jobspb.TypeSchemaChange.String(), jobspb.TypeNewSchemaChange.String(),
),
[][]string{
{jobspb.TypeNewSchemaChange.String(), string(jobs.StatusSucceeded)},
{jobspb.TypeNewSchemaChange.String(), string(jobs.StatusSucceeded)},
},
)
})
}
func TestConcurrentOldSchemaChangesCannotStart(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
var doOnce sync.Once
// Closed when we enter the RunBeforeBackfill knob.
beforeBackfillNotification := make(chan struct{})
// Closed when we're ready to continue with the schema change.
continueNotification := make(chan struct{})
var kvDB *kv.DB
params, _ := tests.CreateTestServerParams()
params.Knobs = base.TestingKnobs{
SQLSchemaChanger: &sql.SchemaChangerTestingKnobs{
RunBeforeResume: func(jobID jobspb.JobID) error {
// Assert that old schema change jobs never run in this test.
t.Errorf("unexpected old schema change job %d", jobID)
return nil
},
},
SQLDeclarativeSchemaChanger: &scrun.TestingKnobs{
BeforeStage: func(p scplan.Plan, idx int) error {
// Verify that we never get a mutation ID not associated with the schema
// change that is running.
if p.Params.ExecutionPhase < scop.PostCommitPhase {
return nil
}
table := desctestutils.TestingGetTableDescriptor(
kvDB, keys.SystemSQLCodec, "db", "public", "t")
for _, m := range table.AllMutations() {
assert.LessOrEqual(t, int(m.MutationID()), 2)
}
s := p.Stages[idx]
if s.Type() != scop.BackfillType {
return nil
}
for _, op := range s.EdgeOps {
if _, ok := op.(*scop.BackfillIndex); ok {
doOnce.Do(func() {
close(beforeBackfillNotification)
<-continueNotification
})
}
}
return nil
},
},
JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(),
}
var s serverutils.TestServerInterface
var sqlDB *gosql.DB
s, sqlDB, kvDB = serverutils.StartServer(t, params)
defer s.Stopper().Stop(ctx)
tdb := sqlutils.MakeSQLRunner(sqlDB)
tdb.Exec(t, `CREATE DATABASE db`)
tdb.Exec(t, `CREATE TABLE db.t (a INT PRIMARY KEY)`)
g := ctxgroup.WithContext(ctx)
g.GoCtx(func(ctx context.Context) error {
conn, err := sqlDB.Conn(ctx)
if err != nil {
return err
}
_, err = conn.ExecContext(ctx, `SET experimental_use_new_schema_changer = 'unsafe'`)
assert.NoError(t, err)
_, err = conn.ExecContext(ctx, `ALTER TABLE db.t ADD COLUMN b INT DEFAULT 1`)
assert.NoError(t, err)
return nil
})
<-beforeBackfillNotification
{
conn, err := sqlDB.Conn(ctx)
require.NoError(t, err)
_, err = conn.ExecContext(ctx, `SET experimental_use_new_schema_changer = 'off'`)
require.NoError(t, err)
for _, stmt := range []string{
`ALTER TABLE db.t ADD COLUMN c INT DEFAULT 2`,
`CREATE INDEX ON db.t(a)`,
`ALTER TABLE db.t RENAME COLUMN a TO c`,
`CREATE TABLE db.t2 (i INT PRIMARY KEY, a INT REFERENCES db.t)`,
`CREATE VIEW db.v AS SELECT a FROM db.t`,
`ALTER TABLE db.t RENAME TO db.new`,
`GRANT ALL ON db.t TO root`,
`TRUNCATE TABLE db.t`,
`DROP TABLE db.t`,
} {
_, err = conn.ExecContext(ctx, stmt)
assert.Truef(t,
testutils.IsError(err, `cannot perform a schema change on table "t"`) ||
testutils.IsError(err, `cannot perform TRUNCATE on "t" which has indexes being dropped`),
"statement: %s, error: %s", stmt, err,
)
}
}
close(continueNotification)
require.NoError(t, g.Wait())
}
func TestInsertDuringAddColumnNotWritingToCurrentPrimaryIndex(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
var doOnce sync.Once
// Closed when we enter the RunBeforeBackfill knob.
beforeBackfillNotification := make(chan struct{})
// Closed when we're ready to continue with the schema change.
continueNotification := make(chan struct{})
var kvDB *kv.DB
params, _ := tests.CreateTestServerParams()
params.Knobs = base.TestingKnobs{
SQLSchemaChanger: &sql.SchemaChangerTestingKnobs{
RunBeforeResume: func(jobID jobspb.JobID) error {
// Assert that old schema change jobs never run in this test.
t.Errorf("unexpected old schema change job %d", jobID)
return nil
},
},
SQLDeclarativeSchemaChanger: &scrun.TestingKnobs{
BeforeStage: func(p scplan.Plan, stageIdx int) error {
// Verify that we never get a mutation ID not associated with the schema
// change that is running.
if p.Params.ExecutionPhase < scop.PostCommitPhase {
return nil
}
table := desctestutils.TestingGetTableDescriptor(
kvDB, keys.SystemSQLCodec, "db", "public", "t")
for _, m := range table.AllMutations() {
assert.LessOrEqual(t, int(m.MutationID()), 2)
}
s := p.Stages[stageIdx]
if s.Type() != scop.BackfillType {
return nil
}
for _, op := range s.EdgeOps {
if _, ok := op.(*scop.BackfillIndex); ok {
doOnce.Do(func() {
close(beforeBackfillNotification)
<-continueNotification
})
}
}
return nil
},
},
}
var s serverutils.TestServerInterface
var sqlDB *gosql.DB
s, sqlDB, kvDB = serverutils.StartServer(t, params)
defer s.Stopper().Stop(ctx)
tdb := sqlutils.MakeSQLRunner(sqlDB)
tdb.Exec(t, `CREATE DATABASE db`)
tdb.Exec(t, `CREATE TABLE db.t (a INT PRIMARY KEY)`)
desc := desctestutils.TestingGetPublicTableDescriptor(kvDB, keys.SystemSQLCodec, "db", "t")
g := ctxgroup.WithContext(ctx)
g.GoCtx(func(ctx context.Context) error {
conn, err := sqlDB.Conn(ctx)
if err != nil {
return err
}
_, err = conn.ExecContext(ctx, `SET experimental_use_new_schema_changer = 'unsafe'`)
assert.NoError(t, err)
_, err = conn.ExecContext(ctx, `ALTER TABLE db.t ADD COLUMN b INT DEFAULT 100`)
assert.NoError(t, err)
return nil
})
<-beforeBackfillNotification
// At this point the backfill operation is paused as it's about to begin.
// The new column `b` is not yet public, so a concurrent insert should:
// - in the current primary index, only insert a value for `a`,
// - in the new secondary index, which will be the future primary index,
// insert a value both for `a` and the default value for `b`, because that
// new index is delete-and-write-only as it is being backfilled.
tdb.Exec(t, `
SET tracing = on,kv;
INSERT INTO db.t (a) VALUES (10);
SET tracing = off;`)
// Trigger the resumption and conclusion of the backfill,
// and hence of the ADD COLUMN transaction.
close(continueNotification)
require.NoError(t, g.Wait())
// Check that the expectations set out above are verified.
results := tdb.QueryStr(t, `
SELECT message
FROM [SHOW KV TRACE FOR SESSION]
WHERE message LIKE 'CPut %' OR message LIKE 'InitPut %'`)
require.GreaterOrEqual(t, len(results), 2)
require.Equal(t, fmt.Sprintf("CPut /Table/%d/1/10/0 -> /TUPLE/", desc.GetID()), results[0][0])
require.Equal(t, fmt.Sprintf("InitPut /Table/%d/2/10/0 -> /TUPLE/2:2:Int/100", desc.GetID()), results[1][0])
}
// TestDropJobCancelable ensure that certain operations like
// drops are not cancelable for simple operations.
func TestDropJobCancelable(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testCases := []struct {
desc string
query string
cancelable bool
}{
{
"simple drop sequence",
"BEGIN;DROP SEQUENCE db.sq1; END;",
false,
},
{
"simple drop view",
"BEGIN;DROP VIEW db.v1; END;",
false,
},
{
"simple drop table",
"BEGIN;DROP TABLE db.t1 CASCADE; END;",
false,
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
params, _ := tests.CreateTestServerParams()
// Wait groups for synchronizing various parts of the test.
var schemaChangeStarted sync.WaitGroup
schemaChangeStarted.Add(1)
var blockSchemaChange sync.WaitGroup
blockSchemaChange.Add(1)
var finishedSchemaChange sync.WaitGroup
finishedSchemaChange.Add(1)
// Atomic for checking if job control hook
// was enabled.
jobControlHookEnabled := uint64(0)
params.Knobs.JobsTestingKnobs = jobs.NewTestingKnobsWithShortIntervals()
params.Knobs.SQLSchemaChanger = &sql.SchemaChangerTestingKnobs{
RunBeforeResume: func(jobID jobspb.JobID) error {
if atomic.SwapUint64(&jobControlHookEnabled, 0) == 1 {
schemaChangeStarted.Done()
blockSchemaChange.Wait()
}
return nil
},
}
s, sqlDB, _ := serverutils.StartServer(t, params)
ctx := context.Background()
defer s.Stopper().Stop(ctx)
// Setup.
_, err := sqlDB.Exec(`
CREATE DATABASE db;
CREATE TABLE db.t1 (name VARCHAR(256));
CREATE TABLE db.t2 (name VARCHAR(256));
CREATE VIEW db.v1 AS (SELECT a.name as name2, b.name FROM db.t1 as a, db.t2 as b);
CREATE SEQUENCE db.sq1;
`)
require.NoError(t, err)
go func() {
atomic.StoreUint64(&jobControlHookEnabled, 1)
_, err := sqlDB.Exec(tc.query)
if tc.cancelable && !testutils.IsError(err, "job canceled by user") {
t.Errorf("expected user to have canceled job, got %v", err)
}
if !tc.cancelable && err != nil {
t.Error(err)
}
finishedSchemaChange.Done()
}()
schemaChangeStarted.Wait()
rows, err := sqlDB.Query(`
SELECT job_id FROM [SHOW JOBS]
WHERE
job_type = 'SCHEMA CHANGE' AND
status = $1`, jobs.StatusRunning)
if err != nil {
t.Fatalf("unexpected error querying rows %s", err)
}
for rows.Next() {
jobID := ""
err := rows.Scan(&jobID)
if err != nil {
t.Fatalf("unexpected error fetching job ID %s", err)
}
_, err = sqlDB.Exec(`CANCEL JOB $1`, jobID)
if !tc.cancelable && !testutils.IsError(err, "not cancelable") {
t.Fatalf("expected schema change job to be not cancelable; found %v ", err)
} else if tc.cancelable && err != nil {
t.Fatal(err)
}
}
blockSchemaChange.Done()
finishedSchemaChange.Wait()
})
}
}
| pkg/sql/schemachanger/schemachanger_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.9832792282104492,
0.07244246453046799,
0.000166814133990556,
0.00017560855485498905,
0.23020721971988678
] |
{
"id": 5,
"code_window": [
"CREATE TABLE tbl (\n",
" id INT PRIMARY KEY,\n",
" text TEXT,\n",
" FAMILY (id, text)\n",
") WITH (expire_after = '10 minutes'::interval)\n",
"\n",
"query TT\n",
"SHOW CREATE TABLE tbl\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
") WITH (ttl_expire_after = '10 minutes'::interval)\n"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 34
} | reset_stmt ::=
reset_session_stmt
| reset_csetting_stmt
| docs/generated/sql/bnf/reset_stmt.bnf | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.0001810301619116217,
0.0001810301619116217,
0.0001810301619116217,
0.0001810301619116217,
0
] |
{
"id": 6,
"code_window": [
"\n",
"query TT\n",
"SHOW CREATE TABLE tbl\n",
"----\n",
"tbl CREATE TABLE public.tbl (\n",
" id INT8 NOT NULL,\n",
" text STRING NULL,\n",
" CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),\n",
" FAMILY fam_0_id_text (id, text)\n",
") WITH (expire_after = '00:10:00':::INTERVAL)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace"
],
"after_edit": [
"tbl CREATE TABLE public.tbl (\n",
" id INT8 NOT NULL,\n",
" text STRING NULL,\n",
" CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),\n",
" FAMILY fam_0_id_text (id, text)\n",
") WITH (ttl_expire_after = '00:10:00':::INTERVAL)"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 39
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package paramparse
import (
"context"
"github.com/cockroachdb/cockroach/pkg/geo/geoindex"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/duration"
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented"
"github.com/cockroachdb/errors"
)
// SetStorageParameters sets the given storage parameters using the
// given observer.
func SetStorageParameters(
ctx context.Context,
semaCtx *tree.SemaContext,
evalCtx *tree.EvalContext,
params tree.StorageParams,
paramObserver StorageParamObserver,
) error {
for _, sp := range params {
key := string(sp.Key)
if sp.Value == nil {
return pgerror.Newf(pgcode.InvalidParameterValue, "storage parameter %q requires a value", key)
}
// Expressions may be an unresolved name.
// Cast these as strings.
expr := UnresolvedNameToStrVal(sp.Value)
// Convert the expressions to a datum.
typedExpr, err := tree.TypeCheck(ctx, expr, semaCtx, types.Any)
if err != nil {
return err
}
if typedExpr, err = evalCtx.NormalizeExpr(typedExpr); err != nil {
return err
}
datum, err := typedExpr.Eval(evalCtx)
if err != nil {
return err
}
if err := paramObserver.onSet(ctx, semaCtx, evalCtx, key, datum); err != nil {
return err
}
}
return paramObserver.runPostChecks()
}
// ResetStorageParameters sets the given storage parameters using the
// given observer.
func ResetStorageParameters(
ctx context.Context,
evalCtx *tree.EvalContext,
params tree.NameList,
paramObserver StorageParamObserver,
) error {
for _, p := range params {
if err := paramObserver.onReset(evalCtx, string(p)); err != nil {
return err
}
}
return paramObserver.runPostChecks()
}
// StorageParamObserver applies a storage parameter to an underlying item.
type StorageParamObserver interface {
// onSet is called during CREATE [TABLE | INDEX] ... WITH (...) or
// ALTER [TABLE | INDEX] ... WITH (...).
onSet(ctx context.Context, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error
// onReset is called during ALTER [TABLE | INDEX] ... RESET (...)
onReset(evalCtx *tree.EvalContext, key string) error
// runPostChecks is called after all storage parameters have been set.
// This allows checking whether multiple storage parameters together
// form a valid configuration.
runPostChecks() error
}
// TableStorageParamObserver observes storage parameters for tables.
type TableStorageParamObserver struct {
tableDesc *tabledesc.Mutable
}
// NewTableStorageParamObserver returns a new TableStorageParamObserver.
func NewTableStorageParamObserver(tableDesc *tabledesc.Mutable) *TableStorageParamObserver {
return &TableStorageParamObserver{tableDesc: tableDesc}
}
var _ StorageParamObserver = (*TableStorageParamObserver)(nil)
// runPostChecks implements the StorageParamObserver interface.
func (po *TableStorageParamObserver) runPostChecks() error {
if err := tabledesc.ValidateRowLevelTTL(po.tableDesc.GetRowLevelTTL()); err != nil {
return err
}
return nil
}
type tableParam struct {
onSet func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error
onReset func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error
}
var tableParams = map[string]tableParam{
`fillfactor`: {
onSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
return setFillFactorStorageParam(evalCtx, key, datum)
},
onReset: func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error {
// Operation is a no-op so do nothing.
return nil
},
},
`autovacuum_enabled`: {
onSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
var boolVal bool
if stringVal, err := DatumAsString(evalCtx, key, datum); err == nil {
boolVal, err = ParseBoolVar(key, stringVal)
if err != nil {
return err
}
} else {
s, err := GetSingleBool(key, datum)
if err != nil {
return err
}
boolVal = bool(*s)
}
if !boolVal && evalCtx != nil {
evalCtx.ClientNoticeSender.BufferClientNotice(
evalCtx.Context,
pgnotice.Newf(`storage parameter "%s = %s" is ignored`, key, datum.String()),
)
}
return nil
},
onReset: func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error {
// Operation is a no-op so do nothing.
return nil
},
},
`expire_after`: {
onSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
var d *tree.DInterval
if stringVal, err := DatumAsString(evalCtx, key, datum); err == nil {
d, err = tree.ParseDInterval(evalCtx.SessionData().GetIntervalStyle(), stringVal)
if err != nil || d == nil {
return pgerror.Newf(
pgcode.InvalidParameterValue,
`value of "expire_after" must be an interval`,
)
}
} else {
var ok bool
d, ok = datum.(*tree.DInterval)
if !ok || d == nil {
return pgerror.Newf(
pgcode.InvalidParameterValue,
`value of "expire_after" must be an interval`,
)
}
}
if d.Duration.Compare(duration.MakeDuration(0, 0, 0)) < 0 {
return pgerror.Newf(
pgcode.InvalidParameterValue,
`value of "expire_after" must be at least zero`,
)
}
if po.tableDesc.RowLevelTTL == nil {
po.tableDesc.RowLevelTTL = &descpb.TableDescriptor_RowLevelTTL{}
}
po.tableDesc.RowLevelTTL.DurationExpr = tree.Serialize(d)
return nil
},
onReset: func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error {
po.tableDesc.RowLevelTTL = nil
return nil
},
},
}
func init() {
for _, param := range []string{
`toast_tuple_target`,
`parallel_workers`,
`toast.autovacuum_enabled`,
`autovacuum_vacuum_threshold`,
`toast.autovacuum_vacuum_threshold`,
`autovacuum_vacuum_scale_factor`,
`toast.autovacuum_vacuum_scale_factor`,
`autovacuum_analyze_threshold`,
`autovacuum_analyze_scale_factor`,
`autovacuum_vacuum_cost_delay`,
`toast.autovacuum_vacuum_cost_delay`,
`autovacuum_vacuum_cost_limit`,
`autovacuum_freeze_min_age`,
`toast.autovacuum_freeze_min_age`,
`autovacuum_freeze_max_age`,
`toast.autovacuum_freeze_max_age`,
`autovacuum_freeze_table_age`,
`toast.autovacuum_freeze_table_age`,
`autovacuum_multixact_freeze_min_age`,
`toast.autovacuum_multixact_freeze_min_age`,
`autovacuum_multixact_freeze_max_age`,
`toast.autovacuum_multixact_freeze_max_age`,
`autovacuum_multixact_freeze_table_age`,
`toast.autovacuum_multixact_freeze_table_age`,
`log_autovacuum_min_duration`,
`toast.log_autovacuum_min_duration`,
`user_catalog_table`,
} {
tableParams[param] = tableParam{
onSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
return unimplemented.NewWithIssuef(43299, "storage parameter %q", key)
},
onReset: func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error {
return nil
},
}
}
}
// onSet implements the StorageParamObserver interface.
func (po *TableStorageParamObserver) onSet(
ctx context.Context,
semaCtx *tree.SemaContext,
evalCtx *tree.EvalContext,
key string,
datum tree.Datum,
) error {
if p, ok := tableParams[key]; ok {
return p.onSet(ctx, po, semaCtx, evalCtx, key, datum)
}
return pgerror.Newf(pgcode.InvalidParameterValue, "invalid storage parameter %q", key)
}
// onReset implements the StorageParamObserver interface.
func (po *TableStorageParamObserver) onReset(evalCtx *tree.EvalContext, key string) error {
if p, ok := tableParams[key]; ok {
return p.onReset(po, evalCtx, key)
}
return pgerror.Newf(pgcode.InvalidParameterValue, "invalid storage parameter %q", key)
}
func setFillFactorStorageParam(evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
val, err := DatumAsFloat(evalCtx, key, datum)
if err != nil {
return err
}
if val < 0 || val > 100 {
return pgerror.Newf(pgcode.InvalidParameterValue, "%q must be between 0 and 100", key)
}
if evalCtx != nil {
evalCtx.ClientNoticeSender.BufferClientNotice(
evalCtx.Context,
pgnotice.Newf("storage parameter %q is ignored", key),
)
}
return nil
}
// IndexStorageParamObserver observes storage parameters for indexes.
type IndexStorageParamObserver struct {
IndexDesc *descpb.IndexDescriptor
}
var _ StorageParamObserver = (*IndexStorageParamObserver)(nil)
func getS2ConfigFromIndex(indexDesc *descpb.IndexDescriptor) *geoindex.S2Config {
var s2Config *geoindex.S2Config
if indexDesc.GeoConfig.S2Geometry != nil {
s2Config = indexDesc.GeoConfig.S2Geometry.S2Config
}
if indexDesc.GeoConfig.S2Geography != nil {
s2Config = indexDesc.GeoConfig.S2Geography.S2Config
}
return s2Config
}
func (po *IndexStorageParamObserver) applyS2ConfigSetting(
evalCtx *tree.EvalContext, key string, expr tree.Datum, min int64, max int64,
) error {
s2Config := getS2ConfigFromIndex(po.IndexDesc)
if s2Config == nil {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"index setting %q can only be set on GEOMETRY or GEOGRAPHY spatial indexes",
key,
)
}
val, err := DatumAsInt(evalCtx, key, expr)
if err != nil {
return errors.Wrapf(err, "error decoding %q", key)
}
if val < min || val > max {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"%q value must be between %d and %d inclusive",
key,
min,
max,
)
}
switch key {
case `s2_max_level`:
s2Config.MaxLevel = int32(val)
case `s2_level_mod`:
s2Config.LevelMod = int32(val)
case `s2_max_cells`:
s2Config.MaxCells = int32(val)
}
return nil
}
func (po *IndexStorageParamObserver) applyGeometryIndexSetting(
evalCtx *tree.EvalContext, key string, expr tree.Datum,
) error {
if po.IndexDesc.GeoConfig.S2Geometry == nil {
return pgerror.Newf(pgcode.InvalidParameterValue, "%q can only be applied to GEOMETRY spatial indexes", key)
}
val, err := DatumAsFloat(evalCtx, key, expr)
if err != nil {
return errors.Wrapf(err, "error decoding %q", key)
}
switch key {
case `geometry_min_x`:
po.IndexDesc.GeoConfig.S2Geometry.MinX = val
case `geometry_max_x`:
po.IndexDesc.GeoConfig.S2Geometry.MaxX = val
case `geometry_min_y`:
po.IndexDesc.GeoConfig.S2Geometry.MinY = val
case `geometry_max_y`:
po.IndexDesc.GeoConfig.S2Geometry.MaxY = val
default:
return pgerror.Newf(pgcode.InvalidParameterValue, "unknown key: %q", key)
}
return nil
}
// onSet implements the StorageParamObserver interface.
func (po *IndexStorageParamObserver) onSet(
ctx context.Context,
semaCtx *tree.SemaContext,
evalCtx *tree.EvalContext,
key string,
expr tree.Datum,
) error {
switch key {
case `fillfactor`:
return setFillFactorStorageParam(evalCtx, key, expr)
case `s2_max_level`:
return po.applyS2ConfigSetting(evalCtx, key, expr, 0, 30)
case `s2_level_mod`:
return po.applyS2ConfigSetting(evalCtx, key, expr, 1, 3)
case `s2_max_cells`:
return po.applyS2ConfigSetting(evalCtx, key, expr, 1, 32)
case `geometry_min_x`, `geometry_max_x`, `geometry_min_y`, `geometry_max_y`:
return po.applyGeometryIndexSetting(evalCtx, key, expr)
case `vacuum_cleanup_index_scale_factor`,
`buffering`,
`fastupdate`,
`gin_pending_list_limit`,
`pages_per_range`,
`autosummarize`:
return unimplemented.NewWithIssuef(43299, "storage parameter %q", key)
}
return pgerror.Newf(pgcode.InvalidParameterValue, "invalid storage parameter %q", key)
}
// onReset implements the StorageParameterObserver interface.
func (po *IndexStorageParamObserver) onReset(evalCtx *tree.EvalContext, key string) error {
return errors.AssertionFailedf("non-implemented codepath")
}
// runPostChecks implements the StorageParamObserver interface.
func (po *IndexStorageParamObserver) runPostChecks() error {
s2Config := getS2ConfigFromIndex(po.IndexDesc)
if s2Config != nil {
if (s2Config.MaxLevel)%s2Config.LevelMod != 0 {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"s2_max_level (%d) must be divisible by s2_level_mod (%d)",
s2Config.MaxLevel,
s2Config.LevelMod,
)
}
}
if cfg := po.IndexDesc.GeoConfig.S2Geometry; cfg != nil {
if cfg.MaxX <= cfg.MinX {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"geometry_max_x (%f) must be greater than geometry_min_x (%f)",
cfg.MaxX,
cfg.MinX,
)
}
if cfg.MaxY <= cfg.MinY {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"geometry_max_y (%f) must be greater than geometry_min_y (%f)",
cfg.MaxY,
cfg.MinY,
)
}
}
return nil
}
| pkg/sql/paramparse/paramobserver.go | 1 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.003750842297449708,
0.00031549192499369383,
0.0001660026900935918,
0.0001724939065752551,
0.0005582994199357927
] |
{
"id": 6,
"code_window": [
"\n",
"query TT\n",
"SHOW CREATE TABLE tbl\n",
"----\n",
"tbl CREATE TABLE public.tbl (\n",
" id INT8 NOT NULL,\n",
" text STRING NULL,\n",
" CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),\n",
" FAMILY fam_0_id_text (id, text)\n",
") WITH (expire_after = '00:10:00':::INTERVAL)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace"
],
"after_edit": [
"tbl CREATE TABLE public.tbl (\n",
" id INT8 NOT NULL,\n",
" text STRING NULL,\n",
" CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),\n",
" FAMILY fam_0_id_text (id, text)\n",
") WITH (ttl_expire_after = '00:10:00':::INTERVAL)"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 39
} | create-sequence
CREATE SEQUENCE defaultdb.SQ1
----
build
DROP SEQUENCE defaultdb.SQ1 CASCADE
----
- [[Locality:{DescID: 54}, ABSENT], PUBLIC]
details:
descriptorId: 54
- [[Namespace:{DescID: 54, Name: sq1}, ABSENT], PUBLIC]
details:
databaseId: 50
descriptorId: 54
name: sq1
schemaId: 51
- [[Owner:{DescID: 54}, ABSENT], PUBLIC]
details:
descriptorId: 54
owner: root
- [[Sequence:{DescID: 54}, ABSENT], PUBLIC]
details:
sequenceId: 54
- [[TableComment:{DescID: 54}, ABSENT], PUBLIC]
details:
comment: TODO(fqazi) Comments are not currently fetched from system.comments when
doing decomposition
tableId: 54
- [[UserPrivileges:{DescID: 54, Username: admin}, ABSENT], PUBLIC]
details:
descriptorId: 54
privileges: 2
username: admin
- [[UserPrivileges:{DescID: 54, Username: public}, ABSENT], PUBLIC]
details:
descriptorId: 54
username: public
- [[UserPrivileges:{DescID: 54, Username: root}, ABSENT], PUBLIC]
details:
descriptorId: 54
privileges: 2
username: root
create-table
CREATE TABLE defaultdb.blog_posts (id INT PRIMARY KEY, val int DEFAULT nextval('defaultdb.sq1'), title text)
----
create-table
CREATE TABLE defaultdb.blog_posts2 (id INT PRIMARY KEY, val int DEFAULT nextval('defaultdb.sq1'), title text)
----
create-type
CREATE TYPE defaultdb.typ AS ENUM('a')
----
create-table
CREATE TABLE defaultdb.blog_posts3 (id INT PRIMARY KEY, val typ DEFAULT CAST(chr(nextval('defaultdb.sq1')) as TYP ), title text)
----
build
DROP SEQUENCE defaultdb.SQ1 CASCADE
----
- [[DefaultExprTypeReference:{DescID: 59, ColumnID: 2, ReferencedDescID: 57}, ABSENT], PUBLIC]
details:
columnId: 2
tableId: 59
typeId: 57
- [[DefaultExprTypeReference:{DescID: 59, ColumnID: 2, ReferencedDescID: 58}, ABSENT], PUBLIC]
details:
columnId: 2
tableId: 59
typeId: 58
- [[DefaultExpression:{DescID: 55, ColumnID: 2}, ABSENT], PUBLIC]
details:
columnId: 2
defaultExpr: nextval(54:::REGCLASS)
tableId: 55
usesSequenceIDs:
- 54
- [[DefaultExpression:{DescID: 56, ColumnID: 2}, ABSENT], PUBLIC]
details:
columnId: 2
defaultExpr: nextval(54:::REGCLASS)
tableId: 56
usesSequenceIDs:
- 54
- [[DefaultExpression:{DescID: 59, ColumnID: 2}, ABSENT], PUBLIC]
details:
columnId: 2
defaultExpr: CAST(chr(nextval(54:::REGCLASS)) AS @100057)
tableId: 59
usesSequenceIDs:
- 54
- [[Locality:{DescID: 54}, ABSENT], PUBLIC]
details:
descriptorId: 54
- [[Namespace:{DescID: 54, Name: sq1}, ABSENT], PUBLIC]
details:
databaseId: 50
descriptorId: 54
name: sq1
schemaId: 51
- [[Owner:{DescID: 54}, ABSENT], PUBLIC]
details:
descriptorId: 54
owner: root
- [[RelationDependedOnBy:{DescID: 54, ReferencedDescID: 55}, ABSENT], PUBLIC]
details:
columnID: 2
dependedOn: 55
tableId: 54
- [[RelationDependedOnBy:{DescID: 54, ReferencedDescID: 56}, ABSENT], PUBLIC]
details:
columnID: 2
dependedOn: 56
tableId: 54
- [[RelationDependedOnBy:{DescID: 54, ReferencedDescID: 59}, ABSENT], PUBLIC]
details:
columnID: 2
dependedOn: 59
tableId: 54
- [[Sequence:{DescID: 54}, ABSENT], PUBLIC]
details:
sequenceId: 54
- [[TableComment:{DescID: 54}, ABSENT], PUBLIC]
details:
comment: TODO(fqazi) Comments are not currently fetched from system.comments when
doing decomposition
tableId: 54
- [[UserPrivileges:{DescID: 54, Username: admin}, ABSENT], PUBLIC]
details:
descriptorId: 54
privileges: 2
username: admin
- [[UserPrivileges:{DescID: 54, Username: public}, ABSENT], PUBLIC]
details:
descriptorId: 54
username: public
- [[UserPrivileges:{DescID: 54, Username: root}, ABSENT], PUBLIC]
details:
descriptorId: 54
privileges: 2
username: root
| pkg/sql/schemachanger/scbuild/testdata/drop_sequence | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.08464587479829788,
0.006356789730489254,
0.00016948216944001615,
0.00017224770272150636,
0.021023740991950035
] |
{
"id": 6,
"code_window": [
"\n",
"query TT\n",
"SHOW CREATE TABLE tbl\n",
"----\n",
"tbl CREATE TABLE public.tbl (\n",
" id INT8 NOT NULL,\n",
" text STRING NULL,\n",
" CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),\n",
" FAMILY fam_0_id_text (id, text)\n",
") WITH (expire_after = '00:10:00':::INTERVAL)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace"
],
"after_edit": [
"tbl CREATE TABLE public.tbl (\n",
" id INT8 NOT NULL,\n",
" text STRING NULL,\n",
" CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),\n",
" FAMILY fam_0_id_text (id, text)\n",
") WITH (ttl_expire_after = '00:10:00':::INTERVAL)"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 39
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package heapprofiler
import (
"context"
"os"
"runtime/pprof"
"github.com/cockroachdb/cockroach/pkg/server/dumpstore"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
)
// HeapProfiler is used to take Go heap profiles.
//
// MaybeTakeProfile() is supposed to be called periodically. A profile is taken
// every time Go heap allocated bytes exceeds the previous high-water mark. The
// recorded high-water mark is also reset periodically, so that we take some
// profiles periodically.
// Profiles are also GCed periodically. The latest is always kept, and a couple
// of the ones with the largest heap are also kept.
type HeapProfiler struct {
profiler
}
// HeapFileNamePrefix is the prefix of files containing pprof data.
const HeapFileNamePrefix = "memprof"
// HeapFileNameSuffix is the suffix of files containing pprof data.
const HeapFileNameSuffix = ".pprof"
// NewHeapProfiler creates a HeapProfiler. dir is the directory in which
// profiles are to be stored.
func NewHeapProfiler(ctx context.Context, dir string, st *cluster.Settings) (*HeapProfiler, error) {
if dir == "" {
return nil, errors.AssertionFailedf("need to specify dir for NewHeapProfiler")
}
log.Infof(ctx, "writing go heap profiles to %s at least every %s", dir, resetHighWaterMarkInterval)
dumpStore := dumpstore.NewStore(dir, maxCombinedFileSize, st)
hp := &HeapProfiler{
profiler{
store: newProfileStore(dumpStore, HeapFileNamePrefix, HeapFileNameSuffix, st),
},
}
return hp, nil
}
// MaybeTakeProfile takes a heap profile if the heap is big enough.
func (o *HeapProfiler) MaybeTakeProfile(ctx context.Context, curHeap int64) {
o.maybeTakeProfile(ctx, curHeap, takeHeapProfile)
}
// takeHeapProfile returns true if and only if the profile dump was
// taken successfully.
func takeHeapProfile(ctx context.Context, path string) (success bool) {
// Try writing a go heap profile.
f, err := os.Create(path)
if err != nil {
log.Warningf(ctx, "error creating go heap profile %s: %v", path, err)
return false
}
defer f.Close()
if err = pprof.WriteHeapProfile(f); err != nil {
log.Warningf(ctx, "error writing go heap profile %s: %v", path, err)
return false
}
return true
}
| pkg/server/heapprofiler/heapprofiler.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.00020722014596685767,
0.00017608010966796428,
0.0001678177941357717,
0.00017265023780055344,
0.0000111619474409963
] |
{
"id": 6,
"code_window": [
"\n",
"query TT\n",
"SHOW CREATE TABLE tbl\n",
"----\n",
"tbl CREATE TABLE public.tbl (\n",
" id INT8 NOT NULL,\n",
" text STRING NULL,\n",
" CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),\n",
" FAMILY fam_0_id_text (id, text)\n",
") WITH (expire_after = '00:10:00':::INTERVAL)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace"
],
"after_edit": [
"tbl CREATE TABLE public.tbl (\n",
" id INT8 NOT NULL,\n",
" text STRING NULL,\n",
" CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),\n",
" FAMILY fam_0_id_text (id, text)\n",
") WITH (ttl_expire_after = '00:10:00':::INTERVAL)"
],
"file_path": "pkg/sql/logictest/testdata/logic_test/row_level_ttl",
"type": "replace",
"edit_start_line_idx": 39
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import _ from "lodash";
import React, { useEffect, useState } from "react";
import { Helmet } from "react-helmet";
import { getDataFromServer } from "src/util/dataFromServer";
import DebugAnnotation from "src/views/shared/components/debugAnnotation";
import InfoBox from "src/views/shared/components/infoBox";
import LicenseType from "src/views/shared/components/licenseType";
import {
PanelSection,
PanelTitle,
PanelPair,
Panel,
} from "src/views/shared/components/panelSection";
import "./debug.styl";
import { connect } from "react-redux";
import { AdminUIState } from "src/redux/state";
import { nodeIDsSelector } from "src/redux/nodes";
import { refreshNodes, refreshUserSQLRoles } from "src/redux/apiReducers";
import { selectHasViewActivityRedactedRole } from "src/redux/user";
const COMMUNITY_URL = "https://www.cockroachlabs.com/community/";
const NODE_ID = getDataFromServer().NodeID;
export function DebugTableLink(props: {
name: string;
url: string;
note?: string;
params?: {
node?: string;
seconds?: string;
si?: string;
labels?: string;
};
}) {
const params = new URLSearchParams(props.params);
const urlWithParams = props.params
? `${props.url}?${params.toString()}`
: props.url;
return (
<tr className="debug-inner-table__row">
<td className="debug-inner-table__cell">
<a className="debug-link" href={urlWithParams}>
{props.name}
</a>
</td>
<td className="debug-inner-table__cell--notes">
{_.isNil(props.note) ? urlWithParams : props.note}
</td>
</tr>
);
}
function DebugTableRow(props: { title: string; children?: React.ReactNode }) {
return (
<tr className="debug-table__row">
<th className="debug-table__cell debug-table__cell--header">
{props.title}
</th>
<td className="debug-table__cell">
<table className="debug-inner-table">
<tbody>{props.children}</tbody>
</table>
</td>
</tr>
);
}
function DebugTable(props: {
heading: string | React.ReactNode;
children?: React.ReactNode;
}) {
return (
<div>
<h2 className="base-heading">{props.heading}</h2>
<table className="debug-table">
<tbody>{props.children}</tbody>
</table>
</div>
);
}
function DebugPanelLink(props: { name: string; url: string; note: string }) {
return (
<PanelPair>
<Panel>
<a href={props.url}>{props.name}</a>
<p>{props.note}</p>
</Panel>
<Panel>
<div className="debug-url">
<div>{props.url}</div>
</div>
</Panel>
</PanelPair>
);
}
// NodeIDSelector is a standalone component that displays a list of nodeIDs and allows for
// their selection using a standard `<select>` component. If this component is used outside
// of the Advanced Debug page, it should be styled appropriately. In order to make use of
// this component and its "connected" version below (which retrieves and manages the nodeIDs
// in the cluster automatically via redux) you will need to pass it the selected nodeID and
// a function for mutating the nodeID (typical outputs of the `setState` hook) as props.
function NodeIDSelector(props: {
nodeID: string;
setNodeID: (nodeID: string) => void;
nodeIDs: string[];
refreshNodes: typeof refreshNodes;
}) {
const { nodeID, setNodeID, nodeIDs, refreshNodes } = props;
const nodeIDsWithLocal = ["local", ...nodeIDs];
useEffect(() => {
refreshNodes();
}, [props, refreshNodes]);
return (
<select
onChange={e => {
setNodeID(e.target.value);
}}
>
{nodeIDsWithLocal.map(n => {
return (
<option value={n} selected={n === nodeID}>
{n}
</option>
);
})}
</select>
);
}
const NodeIDSelectorConnected = connect(
(state: AdminUIState) => {
return {
nodeIDs: nodeIDsSelector(state),
};
},
{
refreshNodes,
},
)(NodeIDSelector);
function StatementDiagnosticsSelector(props: {
canSeeDebugPanelLink: boolean;
refreshUserSQLRoles: typeof refreshUserSQLRoles;
}) {
const { canSeeDebugPanelLink, refreshUserSQLRoles } = props;
useEffect(() => {
refreshUserSQLRoles();
}, [refreshUserSQLRoles]);
return (
canSeeDebugPanelLink && (
<DebugPanelLink
name="Statement Diagnostics History"
url="#/reports/statements/diagnosticshistory"
note="View the history of statement diagnostics requests"
/>
)
);
}
const StatementDiagnosticsConnected = connect(
(state: AdminUIState) => {
return {
canSeeDebugPanelLink: !selectHasViewActivityRedactedRole(state),
};
},
{
refreshUserSQLRoles,
},
)(StatementDiagnosticsSelector);
export default function Debug() {
const [nodeID, setNodeID] = useState<string>("local");
return (
<div className="section">
<Helmet title="Debug" />
<h3 className="base-heading">Advanced Debugging</h3>
<div className="debug-header">
<InfoBox>
<p>
The following pages are meant for advanced monitoring and
troubleshooting. Note that these pages are experimental and
undocumented. If you find an issue, let us know through{" "}
<a href={COMMUNITY_URL}>these channels.</a>
</p>
</InfoBox>
<div className="debug-header__annotations">
<LicenseType />
<DebugAnnotation label="Web server" value={`n${NODE_ID}`} />
</div>
</div>
<PanelSection>
<PanelTitle>Reports</PanelTitle>
<DebugPanelLink
name="Custom Time Series Chart"
url="#/debug/chart"
note="Create a custom chart of time series data."
/>
<DebugPanelLink
name="Problem Ranges"
url="#/reports/problemranges"
note="View ranges in your cluster that are unavailable, underreplicated, slow, or have other problems."
/>
<DebugPanelLink
name="Data Distribution and Zone Configs"
url="#/data-distribution"
note="View the distribution of table data across nodes and verify zone configuration."
/>
<StatementDiagnosticsConnected />
<PanelTitle>Configuration</PanelTitle>
<DebugPanelLink
name="Cluster Settings"
url="#/reports/settings"
note="View all cluster settings."
/>
<DebugPanelLink
name="Localities"
url="#/reports/localities"
note="Check node localities and locations for your cluster."
/>
</PanelSection>
<DebugTable heading="Even More Advanced Debugging">
<DebugTableRow title="Node Diagnostics">
<DebugTableLink name="All Nodes" url="#/reports/nodes" />
<DebugTableLink
name="Nodes filtered by node IDs"
url="#/reports/nodes?node_ids=1,2"
note="#/reports/nodes?node_ids=[node_id{,node_id...}]"
/>
<DebugTableLink
name="Nodes filtered by locality (regex)"
url="#/reports/nodes?locality=region=us-east"
note="#/reports/nodes?locality=[regex]"
/>
<DebugTableLink
name="Decommissioned node history"
url="#/reports/nodes/history"
note="#/reports/nodes/history"
/>
</DebugTableRow>
<DebugTableRow title="Stores">
<DebugTableLink
name="Stores on this node"
url="#/reports/stores/local"
/>
<DebugTableLink
name="Stores on a specific node"
url="#/reports/stores/1"
note="#/reports/stores/[node_id]"
/>
<DebugTableLink
name="Store LSM details on this node"
url="debug/lsm"
note="debug/lsm"
/>
</DebugTableRow>
<DebugTableRow title="Security">
<DebugTableLink
name="Certificates on this node"
url="#/reports/certificates/local"
/>
<DebugTableLink
name="Certificates on a specific node"
url="#/reports/certificates/1"
note="#/reports/certificates/[node_id]"
/>
</DebugTableRow>
<DebugTableRow title="Problem Ranges">
<DebugTableLink
name="Problem Ranges on a specific node"
url="#/reports/problemranges/local"
note="#/reports/problemranges/[node_id]"
/>
</DebugTableRow>
<DebugTableRow title="Ranges">
<DebugTableLink
name="Range Status"
url="#/reports/range/1"
note="#/reports/range/[range_id]"
/>
<DebugTableLink name="Raft Messages" url="#/raft/messages/all" />
<DebugTableLink name="Raft for all ranges" url="#/raft/ranges" />
</DebugTableRow>
<DebugTableRow title="Closed timestamps">
<DebugTableLink
name="Sender on this node"
url="debug/closedts-sender"
/>
<DebugTableLink
name="Receiver on this node"
url="debug/closedts-receiver"
/>
</DebugTableRow>
</DebugTable>
<DebugTable
heading={
<>
{"Profiling UI (Target node: "}
<NodeIDSelectorConnected nodeID={nodeID} setNodeID={setNodeID} />
{")"}
</>
}
>
<DebugTableRow title="Profiling UI/pprof">
<DebugTableLink
name="Heap"
url="debug/pprof/ui/heap/"
params={{ node: nodeID }}
/>
<DebugTableLink
name="Heap (recent allocs)"
url="debug/pprof/ui/heap/"
params={{ node: nodeID, seconds: "5", si: "alloc_objects" }}
/>
<DebugTableLink
name="CPU Profile"
url="debug/pprof/ui/cpu/"
params={{ node: nodeID, seconds: "5", labels: "true" }}
/>
<DebugTableLink
name="Block"
url="debug/pprof/ui/block/"
params={{ node: nodeID }}
/>
<DebugTableLink
name="Mutex"
url="debug/pprof/ui/mutex/"
params={{ node: nodeID }}
/>
<DebugTableLink
name="Thread Create"
url="debug/pprof/ui/threadcreate/"
params={{ node: nodeID }}
/>
<DebugTableLink
name="Goroutines"
url="debug/pprof/ui/goroutine/"
params={{ node: nodeID }}
/>
</DebugTableRow>
</DebugTable>
<DebugTable heading="Tracing and Profiling Endpoints (local node only)">
<DebugTableRow title="Tracing">
<DebugTableLink name="Active operations" url="debug/tracez" />
<DebugTableLink name="Requests" url="debug/requests" />
<DebugTableLink name="Events" url="debug/events" />
<DebugTableLink
name="Logs (JSON)"
url="debug/logspy?count=100&duration=10s&grep=.&flatten=0"
note="debug/logspy?count=[count]&duration=[duration]&grep=[regexp]"
/>
<DebugTableLink
name="Logs (text)"
url="debug/logspy?count=100&duration=10s&grep=.&flatten=1"
note="debug/logspy?count=[count]&duration=[duration]&grep=[regexp]&flatten=1"
/>
<DebugTableLink
name="Logs (text, high verbosity; IMPACTS PERFORMANCE)"
url="debug/logspy?count=100&duration=10s&grep=.&flatten=1&vmodule=*=2"
note="debug/logspy?count=[count]&duration=[duration]&grep=[regexp]&flatten=[0/1]&vmodule=[vmodule]"
/>
<DebugTableLink
name="VModule setting"
url="debug/vmodule"
note="debug/vmodule?duration=[duration]&vmodule=[vmodule]"
/>
</DebugTableRow>
<DebugTableRow title="Enqueue Range">
<DebugTableLink
name="Run a range through an internal queue"
url="#/debug/enqueue_range"
note="#/debug/enqueue_range"
/>
</DebugTableRow>
<DebugTableRow title="Stopper">
<DebugTableLink name="Active Tasks" url="debug/stopper" />
</DebugTableRow>
<DebugTableRow title="Goroutines">
<DebugTableLink name="UI" url="debug/pprof/goroutineui" />
<DebugTableLink
name="UI (count)"
url="debug/pprof/goroutineui?sort=count"
/>
<DebugTableLink
name="UI (wait)"
url="debug/pprof/goroutineui?sort=wait"
/>
<DebugTableLink name="Raw" url="debug/pprof/goroutine?debug=2" />
</DebugTableRow>
<DebugTableRow title="Runtime Trace">
<DebugTableLink name="Trace" url="debug/pprof/trace?debug=1" />
</DebugTableRow>
</DebugTable>
<DebugTable heading="Raw Status Endpoints (JSON)">
<DebugTableRow title="Logs (single node only)">
<DebugTableLink
name="On a Specific Node"
url="_status/logs/local"
note="_status/logs/[node_id]"
/>
<DebugTableLink
name="Log Files"
url="_status/logfiles/local"
note="_status/logfiles/[node_id]"
/>
<DebugTableLink
name="Specific Log File"
url="_status/logfiles/local/cockroach.log"
note="_status/logfiles/[node_id]/[filename]"
/>
</DebugTableRow>
<DebugTableRow title="Metrics">
<DebugTableLink name="Variables" url="debug/metrics" />
<DebugTableLink name="Prometheus" url="_status/vars" />
<DebugTableLink name="Rules" url="api/v2/rules/" />
</DebugTableRow>
<DebugTableRow title="Node Status">
<DebugTableLink
name="All Nodes"
url="_status/nodes"
note="_status/nodes"
/>
<DebugTableLink
name="Single node status"
url="_status/nodes/local"
note="_status/nodes/[node_id]"
/>
</DebugTableRow>
<DebugTableRow title="Hot Ranges">
<DebugTableLink
name="All Nodes"
url="_status/hotranges"
note="_status/hotranges"
/>
<DebugTableLink
name="Single node's ranges"
url="_status/hotranges?node_id=local"
note="_status/hotranges?node_id=[node_id]"
/>
</DebugTableRow>
<DebugTableRow title="Single Node Specific">
<DebugTableLink
name="Stores"
url="_status/stores/local"
note="_status/stores/[node_id]"
/>
<DebugTableLink
name="Gossip"
url="_status/gossip/local"
note="_status/gossip/[node_id]"
/>
<DebugTableLink
name="Ranges"
url="_status/ranges/local"
note="_status/ranges/[node_id]"
/>
<DebugTableLink
name="Stacks"
url="_status/stacks/local"
note="_status/stacks/[node_id]"
/>
<DebugTableLink
name="Engine Stats"
url="_status/enginestats/local"
note="_status/enginestats/[node_id]"
/>
<DebugTableLink
name="Certificates"
url="_status/certificates/local"
note="_status/certificates/[node_id]"
/>
<DebugTableLink
name="Diagnostics Reporting Data"
url="_status/diagnostics/local"
note="_status/diagnostics/[node_id]"
/>
</DebugTableRow>
<DebugTableRow title="Sessions">
<DebugTableLink name="Local Sessions" url="_status/local_sessions" />
<DebugTableLink name="All Sessions" url="_status/sessions" />
</DebugTableRow>
<DebugTableRow title="Cluster Wide">
<DebugTableLink name="Raft" url="_status/raft" />
<DebugTableLink
name="Range"
url="_status/range/1"
note="_status/range/[range_id]"
/>
<DebugTableLink name="Range Log" url="_admin/v1/rangelog?limit=100" />
<DebugTableLink
name="Range Log for Specific Range"
url="_admin/v1/rangelog/1?limit=100"
note="_admin/v1/rangelog/[range_id]?limit=100"
/>
</DebugTableRow>
<DebugTableRow title="Allocator">
<DebugTableLink
name="Simulated Allocator Runs on a Specific Node"
url="_status/allocator/node/local"
note="_status/allocator/node/[node_id]"
/>
<DebugTableLink
name="Simulated Allocator Runs on a Specific Range"
url="_status/allocator/range/1"
note="_status/allocator/range/[range_id]"
/>
</DebugTableRow>
</DebugTable>
<DebugTable heading="UI Debugging">
<DebugTableRow title="Redux State">
<DebugTableLink
name="Export the Redux State of the UI"
url="#/debug/redux"
/>
</DebugTableRow>
</DebugTable>
</div>
);
}
| pkg/ui/workspaces/db-console/src/views/reports/containers/debug/index.tsx | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.0035746798384934664,
0.000332149735186249,
0.00016643505659885705,
0.00018514037947170436,
0.0005869794986210763
] |
{
"id": 7,
"code_window": [
"\t\t\treturn nil\n",
"\t\t},\n",
"\t},\n",
"\t`expire_after`: {\n",
"\t\tonSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {\n",
"\t\t\tvar d *tree.DInterval\n",
"\t\t\tif stringVal, err := DatumAsString(evalCtx, key, datum); err == nil {\n",
"\t\t\t\td, err = tree.ParseDInterval(evalCtx.SessionData().GetIntervalStyle(), stringVal)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t`ttl_expire_after`: {\n"
],
"file_path": "pkg/sql/paramparse/paramobserver.go",
"type": "replace",
"edit_start_line_idx": 158
} | statement error value of "expire_after" must be an interval
CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = ' xx invalid interval xx')
statement error value of "expire_after" must be at least zero
CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = '-10 minutes')
statement ok
CREATE TABLE tbl (
id INT PRIMARY KEY,
text TEXT,
FAMILY (id, text)
) WITH (expire_after = '10 minutes')
query TT
SHOW CREATE TABLE tbl
----
tbl CREATE TABLE public.tbl (
id INT8 NOT NULL,
text STRING NULL,
CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),
FAMILY fam_0_id_text (id, text)
) WITH (expire_after = '00:10:00':::INTERVAL)
query T
SELECT reloptions FROM pg_class WHERE relname = 'tbl'
----
{expire_after='00:10:00':::INTERVAL}
statement ok
DROP TABLE tbl;
CREATE TABLE tbl (
id INT PRIMARY KEY,
text TEXT,
FAMILY (id, text)
) WITH (expire_after = '10 minutes'::interval)
query TT
SHOW CREATE TABLE tbl
----
tbl CREATE TABLE public.tbl (
id INT8 NOT NULL,
text STRING NULL,
CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),
FAMILY fam_0_id_text (id, text)
) WITH (expire_after = '00:10:00':::INTERVAL)
| pkg/sql/logictest/testdata/logic_test/row_level_ttl | 1 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.001307331956923008,
0.0004462098586373031,
0.00016432374832220376,
0.00026557367527857423,
0.0004338551370892674
] |
{
"id": 7,
"code_window": [
"\t\t\treturn nil\n",
"\t\t},\n",
"\t},\n",
"\t`expire_after`: {\n",
"\t\tonSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {\n",
"\t\t\tvar d *tree.DInterval\n",
"\t\t\tif stringVal, err := DatumAsString(evalCtx, key, datum); err == nil {\n",
"\t\t\t\td, err = tree.ParseDInterval(evalCtx.SessionData().GetIntervalStyle(), stringVal)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t`ttl_expire_after`: {\n"
],
"file_path": "pkg/sql/paramparse/paramobserver.go",
"type": "replace",
"edit_start_line_idx": 158
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "sa5008",
srcs = ["analyzer.go"],
importpath = "github.com/cockroachdb/cockroach/build/bazelutil/staticcheckanalyzers/sa5008",
visibility = ["//visibility:public"],
deps = [
"//pkg/testutils/lint/passes/staticcheck",
"@co_honnef_go_tools//staticcheck",
"@org_golang_x_tools//go/analysis",
],
)
| build/bazelutil/staticcheckanalyzers/sa5008/BUILD.bazel | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.00017519129323773086,
0.0001745724875945598,
0.0001739536673994735,
0.0001745724875945598,
6.188129191286862e-7
] |
{
"id": 7,
"code_window": [
"\t\t\treturn nil\n",
"\t\t},\n",
"\t},\n",
"\t`expire_after`: {\n",
"\t\tonSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {\n",
"\t\t\tvar d *tree.DInterval\n",
"\t\t\tif stringVal, err := DatumAsString(evalCtx, key, datum); err == nil {\n",
"\t\t\t\td, err = tree.ParseDInterval(evalCtx.SessionData().GetIntervalStyle(), stringVal)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t`ttl_expire_after`: {\n"
],
"file_path": "pkg/sql/paramparse/paramobserver.go",
"type": "replace",
"edit_start_line_idx": 158
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package explain
import (
"bytes"
"fmt"
"strings"
"time"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/util/humanizeutil"
"github.com/cockroachdb/cockroach/pkg/util/treeprinter"
"github.com/cockroachdb/errors"
)
// OutputBuilder is used to build the output of an explain tree.
//
// See ExampleOutputBuilder for sample usage.
type OutputBuilder struct {
flags Flags
entries []entry
// Current depth level (# of EnterNode() calls - # of LeaveNode() calls).
level int
}
// NewOutputBuilder creates a new OutputBuilder.
//
// EnterNode / EnterMetaNode and AddField should be used to populate data, after
// which a Build* method should be used.
func NewOutputBuilder(flags Flags) *OutputBuilder {
return &OutputBuilder{flags: flags}
}
type entry struct {
// level is non-zero for node entries, zero for field entries.
level int
node string
columns string
ordering string
field string
fieldVal string
}
func (e *entry) isNode() bool {
return e.level > 0
}
// fieldStr returns a "field" or "field: val" string; only used when this entry
// is a field.
func (e *entry) fieldStr() string {
if e.fieldVal == "" {
return e.field
}
return fmt.Sprintf("%s: %s", e.field, e.fieldVal)
}
// EnterNode creates a new node as a child of the current node.
func (ob *OutputBuilder) EnterNode(
name string, columns colinfo.ResultColumns, ordering colinfo.ColumnOrdering,
) {
var colStr, ordStr string
if ob.flags.Verbose {
colStr = columns.String(ob.flags.ShowTypes, false /* showHidden */)
ordStr = ordering.String(columns)
}
ob.enterNode(name, colStr, ordStr)
}
// EnterMetaNode is like EnterNode, but the output will always have empty
// strings for the columns and ordering. This is used for "meta nodes" like
// "fk-cascade".
func (ob *OutputBuilder) EnterMetaNode(name string) {
ob.enterNode(name, "", "")
}
func (ob *OutputBuilder) enterNode(name, columns, ordering string) {
ob.level++
ob.entries = append(ob.entries, entry{
level: ob.level,
node: name,
columns: columns,
ordering: ordering,
})
}
// LeaveNode moves the current node back up the tree by one level.
func (ob *OutputBuilder) LeaveNode() {
ob.level--
}
// AddField adds an information field under the current node.
func (ob *OutputBuilder) AddField(key, value string) {
ob.entries = append(ob.entries, entry{field: key, fieldVal: value})
}
// AddRedactableField adds an information field under the current node, hiding
// the value depending depending on the given redact flag.
func (ob *OutputBuilder) AddRedactableField(flag RedactFlags, key, value string) {
if ob.flags.Redact.Has(flag) {
value = "<hidden>"
}
ob.AddField(key, value)
}
// Attr adds an information field under the current node.
func (ob *OutputBuilder) Attr(key string, value interface{}) {
ob.AddField(key, fmt.Sprint(value))
}
// VAttr adds an information field under the current node, if the Verbose flag
// is set.
func (ob *OutputBuilder) VAttr(key string, value interface{}) {
if ob.flags.Verbose {
ob.AddField(key, fmt.Sprint(value))
}
}
// Attrf is a formatter version of Attr.
func (ob *OutputBuilder) Attrf(key, format string, args ...interface{}) {
ob.AddField(key, fmt.Sprintf(format, args...))
}
// Expr adds an information field with an expression. The expression's
// IndexedVars refer to the given columns. If the expression is nil, nothing is
// emitted.
func (ob *OutputBuilder) Expr(key string, expr tree.TypedExpr, varColumns colinfo.ResultColumns) {
if expr == nil {
return
}
flags := tree.FmtSymbolicSubqueries
if ob.flags.ShowTypes {
flags |= tree.FmtShowTypes
}
if ob.flags.HideValues {
flags |= tree.FmtHideConstants
}
f := tree.NewFmtCtx(
flags,
tree.FmtIndexedVarFormat(func(ctx *tree.FmtCtx, idx int) {
// Ensure proper quoting.
n := tree.Name(varColumns[idx].Name)
ctx.WriteString(n.String())
}),
)
f.FormatNode(expr)
ob.AddField(key, f.CloseAndGetString())
}
// VExpr is a verbose-only variant of Expr.
func (ob *OutputBuilder) VExpr(key string, expr tree.TypedExpr, varColumns colinfo.ResultColumns) {
if ob.flags.Verbose {
ob.Expr(key, expr, varColumns)
}
}
// buildTreeRows creates the treeprinter structure; returns one string for each
// entry in ob.entries.
func (ob *OutputBuilder) buildTreeRows() []string {
// We reconstruct the hierarchy using the levels.
// stack keeps track of the current node on each level.
tp := treeprinter.New()
stack := []treeprinter.Node{tp}
for _, entry := range ob.entries {
if entry.isNode() {
stack = append(stack[:entry.level], stack[entry.level-1].Child(entry.node))
} else {
tp.AddEmptyLine()
}
}
treeRows := tp.FormattedRows()
for len(treeRows) < len(ob.entries) {
// This shouldn't happen - the formatter should emit one row per entry.
// But just in case, add empty strings if necessary to avoid a panic later.
treeRows = append(treeRows, "")
}
return treeRows
}
// BuildExplainRows builds the output rows for an EXPLAIN (PLAN) statement.
//
// The columns are:
// verbose=false: Tree Field Description
// verbose=true: Tree Level Type Field Description
func (ob *OutputBuilder) BuildExplainRows() []tree.Datums {
treeRows := ob.buildTreeRows()
rows := make([]tree.Datums, len(ob.entries))
level := 1
for i, e := range ob.entries {
if e.isNode() {
level = e.level
}
if !ob.flags.Verbose {
rows[i] = tree.Datums{
tree.NewDString(treeRows[i]), // Tree
tree.NewDString(e.field), // Field
tree.NewDString(e.fieldVal), // Description
}
} else {
rows[i] = tree.Datums{
tree.NewDString(treeRows[i]), // Tree
tree.NewDInt(tree.DInt(level - 1)), // Level
tree.NewDString(e.node), // Type
tree.NewDString(e.field), // Field
tree.NewDString(e.fieldVal), // Description
tree.NewDString(e.columns), // Columns
tree.NewDString(e.ordering), // Ordering
}
}
}
return rows
}
// BuildStringRows creates a string representation of the plan information and
// returns it as a list of strings (one for each row). The strings do not end in
// newline.
func (ob *OutputBuilder) BuildStringRows() []string {
var result []string
tp := treeprinter.NewWithStyle(treeprinter.BulletStyle)
stack := []treeprinter.Node{tp}
entries := ob.entries
pop := func() *entry {
e := &entries[0]
entries = entries[1:]
return e
}
popField := func() *entry {
if len(entries) > 0 && !entries[0].isNode() {
return pop()
}
return nil
}
// There may be some top-level non-node entries (like "distributed"). Print
// them separately, as they can't be part of the tree.
for e := popField(); e != nil; e = popField() {
result = append(result, e.fieldStr())
}
if len(result) > 0 {
result = append(result, "")
}
for len(entries) > 0 {
entry := pop()
child := stack[entry.level-1].Child(entry.node)
stack = append(stack[:entry.level], child)
if entry.columns != "" {
child.AddLine(fmt.Sprintf("columns: %s", entry.columns))
}
if entry.ordering != "" {
child.AddLine(fmt.Sprintf("ordering: %s", entry.ordering))
}
// Add any fields for the node.
for entry = popField(); entry != nil; entry = popField() {
child.AddLine(entry.fieldStr())
}
}
result = append(result, tp.FormattedRows()...)
return result
}
// BuildString creates a string representation of the plan information.
// The output string always ends in a newline.
func (ob *OutputBuilder) BuildString() string {
rows := ob.BuildStringRows()
var buf bytes.Buffer
for _, row := range rows {
buf.WriteString(row)
buf.WriteString("\n")
}
return buf.String()
}
// BuildProtoTree creates a representation of the plan as a tree of
// roachpb.ExplainTreePlanNodes.
func (ob *OutputBuilder) BuildProtoTree() *roachpb.ExplainTreePlanNode {
// We reconstruct the hierarchy using the levels.
// stack keeps track of the current node on each level. We use a sentinel node
// for level 0.
sentinel := &roachpb.ExplainTreePlanNode{}
stack := []*roachpb.ExplainTreePlanNode{sentinel}
for _, entry := range ob.entries {
if entry.isNode() {
parent := stack[entry.level-1]
child := &roachpb.ExplainTreePlanNode{Name: entry.node}
parent.Children = append(parent.Children, child)
stack = append(stack[:entry.level], child)
} else {
node := stack[len(stack)-1]
node.Attrs = append(node.Attrs, &roachpb.ExplainTreePlanNode_Attr{
Key: entry.field,
Value: entry.fieldVal,
})
}
}
return sentinel.Children[0]
}
// AddTopLevelField adds a top-level field. Cannot be called while inside a
// node.
func (ob *OutputBuilder) AddTopLevelField(key, value string) {
if ob.level != 0 {
panic(errors.AssertionFailedf("inside node"))
}
ob.AddField(key, value)
}
// AddRedactableTopLevelField adds a top-level field, hiding the value depending
// depending on the given redact flag.
func (ob *OutputBuilder) AddRedactableTopLevelField(redactFlag RedactFlags, key, value string) {
if ob.flags.Redact.Has(redactFlag) {
value = "<hidden>"
}
ob.AddTopLevelField(key, value)
}
// AddDistribution adds a top-level distribution field. Cannot be called
// while inside a node.
func (ob *OutputBuilder) AddDistribution(value string) {
ob.AddRedactableTopLevelField(RedactDistribution, "distribution", value)
}
// AddVectorized adds a top-level vectorized field. Cannot be called
// while inside a node.
func (ob *OutputBuilder) AddVectorized(value bool) {
ob.AddRedactableTopLevelField(RedactVectorized, "vectorized", fmt.Sprintf("%t", value))
}
// AddPlanningTime adds a top-level planning time field. Cannot be called
// while inside a node.
func (ob *OutputBuilder) AddPlanningTime(delta time.Duration) {
if ob.flags.Redact.Has(RedactVolatile) {
delta = 10 * time.Microsecond
}
ob.AddTopLevelField("planning time", string(humanizeutil.Duration(delta)))
}
// AddExecutionTime adds a top-level execution time field. Cannot be called
// while inside a node.
func (ob *OutputBuilder) AddExecutionTime(delta time.Duration) {
if ob.flags.Redact.Has(RedactVolatile) {
delta = 100 * time.Microsecond
}
ob.AddTopLevelField("execution time", string(humanizeutil.Duration(delta)))
}
// AddKVReadStats adds a top-level field for the bytes/rows read from KV.
func (ob *OutputBuilder) AddKVReadStats(rows, bytes int64) {
ob.AddTopLevelField("rows read from KV", fmt.Sprintf(
"%s (%s)", humanizeutil.Count(uint64(rows)), humanizeutil.IBytes(bytes),
))
}
// AddKVTime adds a top-level field for the cumulative time spent in KV.
func (ob *OutputBuilder) AddKVTime(kvTime time.Duration) {
ob.AddRedactableTopLevelField(
RedactVolatile, "cumulative time spent in KV", string(humanizeutil.Duration(kvTime)))
}
// AddContentionTime adds a top-level field for the cumulative contention time.
func (ob *OutputBuilder) AddContentionTime(contentionTime time.Duration) {
ob.AddRedactableTopLevelField(
RedactVolatile,
"cumulative time spent due to contention",
string(humanizeutil.Duration(contentionTime)),
)
}
// AddMaxMemUsage adds a top-level field for the memory used by the query.
func (ob *OutputBuilder) AddMaxMemUsage(bytes int64) {
ob.AddRedactableTopLevelField(
RedactVolatile, "maximum memory usage", string(humanizeutil.IBytes(bytes)),
)
}
// AddNetworkStats adds a top-level field for network statistics.
func (ob *OutputBuilder) AddNetworkStats(messages, bytes int64) {
ob.AddRedactableTopLevelField(
RedactVolatile,
"network usage",
fmt.Sprintf("%s (%s messages)", humanizeutil.IBytes(bytes), humanizeutil.Count(uint64(messages))),
)
}
// AddMaxDiskUsage adds a top-level field for the sql temporary disk space used
// by the query. If we're redacting leave this out to keep logic test output
// independent of disk spilling. Disk spilling is controlled by a metamorphic
// constant so it may or may not occur randomly so it makes sense to omit this
// information entirely if we're redacting. Since disk spilling is rare we only
// include this field is bytes is greater than zero.
func (ob *OutputBuilder) AddMaxDiskUsage(bytes int64) {
if !ob.flags.Redact.Has(RedactVolatile) && bytes > 0 {
ob.AddTopLevelField("max sql temp disk usage",
string(humanizeutil.IBytes(bytes)))
}
}
// AddRegionsStats adds a top-level field for regions executed on statistics.
func (ob *OutputBuilder) AddRegionsStats(regions []string) {
ob.AddRedactableTopLevelField(
RedactNodes,
"regions",
strings.Join(regions, ", "),
)
}
| pkg/sql/opt/exec/explain/output.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.0008854010957293212,
0.00021460223069880158,
0.00015982062905095518,
0.00016668695025146008,
0.00015039836580399424
] |
{
"id": 7,
"code_window": [
"\t\t\treturn nil\n",
"\t\t},\n",
"\t},\n",
"\t`expire_after`: {\n",
"\t\tonSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {\n",
"\t\t\tvar d *tree.DInterval\n",
"\t\t\tif stringVal, err := DatumAsString(evalCtx, key, datum); err == nil {\n",
"\t\t\t\td, err = tree.ParseDInterval(evalCtx.SessionData().GetIntervalStyle(), stringVal)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t`ttl_expire_after`: {\n"
],
"file_path": "pkg/sql/paramparse/paramobserver.go",
"type": "replace",
"edit_start_line_idx": 158
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package spanconfigsqltranslator
import (
"context"
"sort"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/jobs/jobsprotectedts"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/stretchr/testify/require"
)
func TestProtectedTimestampStateReader(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
mkRecordAndAddToState := func(state *ptpb.State, ts hlc.Timestamp, target *ptpb.Target) {
recordID := uuid.MakeV4()
rec := jobsprotectedts.MakeRecord(recordID, int64(1), ts, nil, /* deprecatedSpans */
jobsprotectedts.Jobs, target)
state.Records = append(state.Records, *rec)
}
protectSchemaObject := func(state *ptpb.State, ts hlc.Timestamp, ids []descpb.ID) {
mkRecordAndAddToState(state, ts, ptpb.MakeSchemaObjectsTarget(ids))
}
protectCluster := func(state *ptpb.State, ts hlc.Timestamp) {
mkRecordAndAddToState(state, ts, ptpb.MakeClusterTarget())
}
protectTenants := func(state *ptpb.State, ts hlc.Timestamp, ids []roachpb.TenantID) {
mkRecordAndAddToState(state, ts, ptpb.MakeTenantsTarget(ids))
}
ts := func(seconds int) hlc.Timestamp {
return hlc.Timestamp{WallTime: (time.Duration(seconds) * time.Second).Nanoseconds()}
}
// Create some ptpb.State and then run the ProtectedTimestampStateReader on it
// to ensure it outputs the expected records.
state := &ptpb.State{}
protectSchemaObject(state, ts(1), []descpb.ID{56})
protectSchemaObject(state, ts(2), []descpb.ID{56, 57})
protectCluster(state, ts(3))
protectTenants(state, ts(4), []roachpb.TenantID{roachpb.MakeTenantID(1)})
protectTenants(state, ts(5), []roachpb.TenantID{roachpb.MakeTenantID(2)})
protectTenants(state, ts(6), []roachpb.TenantID{roachpb.MakeTenantID(2)})
ptsStateReader := newProtectedTimestampStateReader(context.Background(), *state)
clusterTimestamps := ptsStateReader.getProtectionPoliciesForCluster()
require.Len(t, clusterTimestamps, 1)
require.Equal(t, []roachpb.ProtectionPolicy{{ProtectedTimestamp: ts(3)}}, clusterTimestamps)
tenantTimestamps := ptsStateReader.getProtectionPoliciesForTenants()
sort.Slice(tenantTimestamps, func(i, j int) bool {
return tenantTimestamps[i].tenantID.ToUint64() < tenantTimestamps[j].tenantID.ToUint64()
})
require.Len(t, tenantTimestamps, 2)
require.Equal(t, []tenantProtectedTimestamps{
{
tenantID: roachpb.MakeTenantID(1),
protections: []roachpb.ProtectionPolicy{{ProtectedTimestamp: ts(4)}},
},
{
tenantID: roachpb.MakeTenantID(2),
protections: []roachpb.ProtectionPolicy{{ProtectedTimestamp: ts(5)},
{ProtectedTimestamp: ts(6)}},
},
}, tenantTimestamps)
tableTimestamps := ptsStateReader.getProtectionPoliciesForSchemaObject(56)
sort.Slice(tableTimestamps, func(i, j int) bool {
return tableTimestamps[i].ProtectedTimestamp.Less(tableTimestamps[j].ProtectedTimestamp)
})
require.Len(t, tableTimestamps, 2)
require.Equal(t, []roachpb.ProtectionPolicy{{ProtectedTimestamp: ts(1)},
{ProtectedTimestamp: ts(2)}}, tableTimestamps)
tableTimestamps2 := ptsStateReader.getProtectionPoliciesForSchemaObject(57)
require.Len(t, tableTimestamps2, 1)
require.Equal(t, []roachpb.ProtectionPolicy{{ProtectedTimestamp: ts(2)}}, tableTimestamps2)
}
| pkg/spanconfig/spanconfigsqltranslator/protectedts_state_reader_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.0001794138370314613,
0.00017178778944071382,
0.0001680825080256909,
0.00017156204557977617,
0.0000033932597034436185
] |
{
"id": 8,
"code_window": [
"\t\t\t\tif err != nil || d == nil {\n",
"\t\t\t\t\treturn pgerror.Newf(\n",
"\t\t\t\t\t\tpgcode.InvalidParameterValue,\n",
"\t\t\t\t\t\t`value of \"expire_after\" must be an interval`,\n",
"\t\t\t\t\t)\n",
"\t\t\t\t}\n",
"\t\t\t} else {\n",
"\t\t\t\tvar ok bool\n",
"\t\t\t\td, ok = datum.(*tree.DInterval)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\t\t`value of \"ttl_expire_after\" must be an interval`,\n"
],
"file_path": "pkg/sql/paramparse/paramobserver.go",
"type": "replace",
"edit_start_line_idx": 166
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package paramparse
import (
"context"
"github.com/cockroachdb/cockroach/pkg/geo/geoindex"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/duration"
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented"
"github.com/cockroachdb/errors"
)
// SetStorageParameters sets the given storage parameters using the
// given observer.
func SetStorageParameters(
ctx context.Context,
semaCtx *tree.SemaContext,
evalCtx *tree.EvalContext,
params tree.StorageParams,
paramObserver StorageParamObserver,
) error {
for _, sp := range params {
key := string(sp.Key)
if sp.Value == nil {
return pgerror.Newf(pgcode.InvalidParameterValue, "storage parameter %q requires a value", key)
}
// Expressions may be an unresolved name.
// Cast these as strings.
expr := UnresolvedNameToStrVal(sp.Value)
// Convert the expressions to a datum.
typedExpr, err := tree.TypeCheck(ctx, expr, semaCtx, types.Any)
if err != nil {
return err
}
if typedExpr, err = evalCtx.NormalizeExpr(typedExpr); err != nil {
return err
}
datum, err := typedExpr.Eval(evalCtx)
if err != nil {
return err
}
if err := paramObserver.onSet(ctx, semaCtx, evalCtx, key, datum); err != nil {
return err
}
}
return paramObserver.runPostChecks()
}
// ResetStorageParameters sets the given storage parameters using the
// given observer.
func ResetStorageParameters(
ctx context.Context,
evalCtx *tree.EvalContext,
params tree.NameList,
paramObserver StorageParamObserver,
) error {
for _, p := range params {
if err := paramObserver.onReset(evalCtx, string(p)); err != nil {
return err
}
}
return paramObserver.runPostChecks()
}
// StorageParamObserver applies a storage parameter to an underlying item.
type StorageParamObserver interface {
// onSet is called during CREATE [TABLE | INDEX] ... WITH (...) or
// ALTER [TABLE | INDEX] ... WITH (...).
onSet(ctx context.Context, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error
// onReset is called during ALTER [TABLE | INDEX] ... RESET (...)
onReset(evalCtx *tree.EvalContext, key string) error
// runPostChecks is called after all storage parameters have been set.
// This allows checking whether multiple storage parameters together
// form a valid configuration.
runPostChecks() error
}
// TableStorageParamObserver observes storage parameters for tables.
type TableStorageParamObserver struct {
tableDesc *tabledesc.Mutable
}
// NewTableStorageParamObserver returns a new TableStorageParamObserver.
func NewTableStorageParamObserver(tableDesc *tabledesc.Mutable) *TableStorageParamObserver {
return &TableStorageParamObserver{tableDesc: tableDesc}
}
var _ StorageParamObserver = (*TableStorageParamObserver)(nil)
// runPostChecks implements the StorageParamObserver interface.
func (po *TableStorageParamObserver) runPostChecks() error {
if err := tabledesc.ValidateRowLevelTTL(po.tableDesc.GetRowLevelTTL()); err != nil {
return err
}
return nil
}
type tableParam struct {
onSet func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error
onReset func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error
}
var tableParams = map[string]tableParam{
`fillfactor`: {
onSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
return setFillFactorStorageParam(evalCtx, key, datum)
},
onReset: func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error {
// Operation is a no-op so do nothing.
return nil
},
},
`autovacuum_enabled`: {
onSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
var boolVal bool
if stringVal, err := DatumAsString(evalCtx, key, datum); err == nil {
boolVal, err = ParseBoolVar(key, stringVal)
if err != nil {
return err
}
} else {
s, err := GetSingleBool(key, datum)
if err != nil {
return err
}
boolVal = bool(*s)
}
if !boolVal && evalCtx != nil {
evalCtx.ClientNoticeSender.BufferClientNotice(
evalCtx.Context,
pgnotice.Newf(`storage parameter "%s = %s" is ignored`, key, datum.String()),
)
}
return nil
},
onReset: func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error {
// Operation is a no-op so do nothing.
return nil
},
},
`expire_after`: {
onSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
var d *tree.DInterval
if stringVal, err := DatumAsString(evalCtx, key, datum); err == nil {
d, err = tree.ParseDInterval(evalCtx.SessionData().GetIntervalStyle(), stringVal)
if err != nil || d == nil {
return pgerror.Newf(
pgcode.InvalidParameterValue,
`value of "expire_after" must be an interval`,
)
}
} else {
var ok bool
d, ok = datum.(*tree.DInterval)
if !ok || d == nil {
return pgerror.Newf(
pgcode.InvalidParameterValue,
`value of "expire_after" must be an interval`,
)
}
}
if d.Duration.Compare(duration.MakeDuration(0, 0, 0)) < 0 {
return pgerror.Newf(
pgcode.InvalidParameterValue,
`value of "expire_after" must be at least zero`,
)
}
if po.tableDesc.RowLevelTTL == nil {
po.tableDesc.RowLevelTTL = &descpb.TableDescriptor_RowLevelTTL{}
}
po.tableDesc.RowLevelTTL.DurationExpr = tree.Serialize(d)
return nil
},
onReset: func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error {
po.tableDesc.RowLevelTTL = nil
return nil
},
},
}
func init() {
for _, param := range []string{
`toast_tuple_target`,
`parallel_workers`,
`toast.autovacuum_enabled`,
`autovacuum_vacuum_threshold`,
`toast.autovacuum_vacuum_threshold`,
`autovacuum_vacuum_scale_factor`,
`toast.autovacuum_vacuum_scale_factor`,
`autovacuum_analyze_threshold`,
`autovacuum_analyze_scale_factor`,
`autovacuum_vacuum_cost_delay`,
`toast.autovacuum_vacuum_cost_delay`,
`autovacuum_vacuum_cost_limit`,
`autovacuum_freeze_min_age`,
`toast.autovacuum_freeze_min_age`,
`autovacuum_freeze_max_age`,
`toast.autovacuum_freeze_max_age`,
`autovacuum_freeze_table_age`,
`toast.autovacuum_freeze_table_age`,
`autovacuum_multixact_freeze_min_age`,
`toast.autovacuum_multixact_freeze_min_age`,
`autovacuum_multixact_freeze_max_age`,
`toast.autovacuum_multixact_freeze_max_age`,
`autovacuum_multixact_freeze_table_age`,
`toast.autovacuum_multixact_freeze_table_age`,
`log_autovacuum_min_duration`,
`toast.log_autovacuum_min_duration`,
`user_catalog_table`,
} {
tableParams[param] = tableParam{
onSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
return unimplemented.NewWithIssuef(43299, "storage parameter %q", key)
},
onReset: func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error {
return nil
},
}
}
}
// onSet implements the StorageParamObserver interface.
func (po *TableStorageParamObserver) onSet(
ctx context.Context,
semaCtx *tree.SemaContext,
evalCtx *tree.EvalContext,
key string,
datum tree.Datum,
) error {
if p, ok := tableParams[key]; ok {
return p.onSet(ctx, po, semaCtx, evalCtx, key, datum)
}
return pgerror.Newf(pgcode.InvalidParameterValue, "invalid storage parameter %q", key)
}
// onReset implements the StorageParamObserver interface.
func (po *TableStorageParamObserver) onReset(evalCtx *tree.EvalContext, key string) error {
if p, ok := tableParams[key]; ok {
return p.onReset(po, evalCtx, key)
}
return pgerror.Newf(pgcode.InvalidParameterValue, "invalid storage parameter %q", key)
}
func setFillFactorStorageParam(evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
val, err := DatumAsFloat(evalCtx, key, datum)
if err != nil {
return err
}
if val < 0 || val > 100 {
return pgerror.Newf(pgcode.InvalidParameterValue, "%q must be between 0 and 100", key)
}
if evalCtx != nil {
evalCtx.ClientNoticeSender.BufferClientNotice(
evalCtx.Context,
pgnotice.Newf("storage parameter %q is ignored", key),
)
}
return nil
}
// IndexStorageParamObserver observes storage parameters for indexes.
type IndexStorageParamObserver struct {
IndexDesc *descpb.IndexDescriptor
}
var _ StorageParamObserver = (*IndexStorageParamObserver)(nil)
func getS2ConfigFromIndex(indexDesc *descpb.IndexDescriptor) *geoindex.S2Config {
var s2Config *geoindex.S2Config
if indexDesc.GeoConfig.S2Geometry != nil {
s2Config = indexDesc.GeoConfig.S2Geometry.S2Config
}
if indexDesc.GeoConfig.S2Geography != nil {
s2Config = indexDesc.GeoConfig.S2Geography.S2Config
}
return s2Config
}
func (po *IndexStorageParamObserver) applyS2ConfigSetting(
evalCtx *tree.EvalContext, key string, expr tree.Datum, min int64, max int64,
) error {
s2Config := getS2ConfigFromIndex(po.IndexDesc)
if s2Config == nil {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"index setting %q can only be set on GEOMETRY or GEOGRAPHY spatial indexes",
key,
)
}
val, err := DatumAsInt(evalCtx, key, expr)
if err != nil {
return errors.Wrapf(err, "error decoding %q", key)
}
if val < min || val > max {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"%q value must be between %d and %d inclusive",
key,
min,
max,
)
}
switch key {
case `s2_max_level`:
s2Config.MaxLevel = int32(val)
case `s2_level_mod`:
s2Config.LevelMod = int32(val)
case `s2_max_cells`:
s2Config.MaxCells = int32(val)
}
return nil
}
func (po *IndexStorageParamObserver) applyGeometryIndexSetting(
evalCtx *tree.EvalContext, key string, expr tree.Datum,
) error {
if po.IndexDesc.GeoConfig.S2Geometry == nil {
return pgerror.Newf(pgcode.InvalidParameterValue, "%q can only be applied to GEOMETRY spatial indexes", key)
}
val, err := DatumAsFloat(evalCtx, key, expr)
if err != nil {
return errors.Wrapf(err, "error decoding %q", key)
}
switch key {
case `geometry_min_x`:
po.IndexDesc.GeoConfig.S2Geometry.MinX = val
case `geometry_max_x`:
po.IndexDesc.GeoConfig.S2Geometry.MaxX = val
case `geometry_min_y`:
po.IndexDesc.GeoConfig.S2Geometry.MinY = val
case `geometry_max_y`:
po.IndexDesc.GeoConfig.S2Geometry.MaxY = val
default:
return pgerror.Newf(pgcode.InvalidParameterValue, "unknown key: %q", key)
}
return nil
}
// onSet implements the StorageParamObserver interface.
func (po *IndexStorageParamObserver) onSet(
ctx context.Context,
semaCtx *tree.SemaContext,
evalCtx *tree.EvalContext,
key string,
expr tree.Datum,
) error {
switch key {
case `fillfactor`:
return setFillFactorStorageParam(evalCtx, key, expr)
case `s2_max_level`:
return po.applyS2ConfigSetting(evalCtx, key, expr, 0, 30)
case `s2_level_mod`:
return po.applyS2ConfigSetting(evalCtx, key, expr, 1, 3)
case `s2_max_cells`:
return po.applyS2ConfigSetting(evalCtx, key, expr, 1, 32)
case `geometry_min_x`, `geometry_max_x`, `geometry_min_y`, `geometry_max_y`:
return po.applyGeometryIndexSetting(evalCtx, key, expr)
case `vacuum_cleanup_index_scale_factor`,
`buffering`,
`fastupdate`,
`gin_pending_list_limit`,
`pages_per_range`,
`autosummarize`:
return unimplemented.NewWithIssuef(43299, "storage parameter %q", key)
}
return pgerror.Newf(pgcode.InvalidParameterValue, "invalid storage parameter %q", key)
}
// onReset implements the StorageParameterObserver interface.
func (po *IndexStorageParamObserver) onReset(evalCtx *tree.EvalContext, key string) error {
return errors.AssertionFailedf("non-implemented codepath")
}
// runPostChecks implements the StorageParamObserver interface.
func (po *IndexStorageParamObserver) runPostChecks() error {
s2Config := getS2ConfigFromIndex(po.IndexDesc)
if s2Config != nil {
if (s2Config.MaxLevel)%s2Config.LevelMod != 0 {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"s2_max_level (%d) must be divisible by s2_level_mod (%d)",
s2Config.MaxLevel,
s2Config.LevelMod,
)
}
}
if cfg := po.IndexDesc.GeoConfig.S2Geometry; cfg != nil {
if cfg.MaxX <= cfg.MinX {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"geometry_max_x (%f) must be greater than geometry_min_x (%f)",
cfg.MaxX,
cfg.MinX,
)
}
if cfg.MaxY <= cfg.MinY {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"geometry_max_y (%f) must be greater than geometry_min_y (%f)",
cfg.MaxY,
cfg.MinY,
)
}
}
return nil
}
| pkg/sql/paramparse/paramobserver.go | 1 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.9975207448005676,
0.08917239308357239,
0.00016275765665341169,
0.00022221689869184047,
0.2688879370689392
] |
{
"id": 8,
"code_window": [
"\t\t\t\tif err != nil || d == nil {\n",
"\t\t\t\t\treturn pgerror.Newf(\n",
"\t\t\t\t\t\tpgcode.InvalidParameterValue,\n",
"\t\t\t\t\t\t`value of \"expire_after\" must be an interval`,\n",
"\t\t\t\t\t)\n",
"\t\t\t\t}\n",
"\t\t\t} else {\n",
"\t\t\t\tvar ok bool\n",
"\t\t\t\td, ok = datum.(*tree.DInterval)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\t\t`value of \"ttl_expire_after\" must be an interval`,\n"
],
"file_path": "pkg/sql/paramparse/paramobserver.go",
"type": "replace",
"edit_start_line_idx": 166
} | statement ok
CREATE TABLE u (token uuid PRIMARY KEY,
token2 uuid,
token3 uuid,
UNIQUE INDEX i_token2 (token2))
statement ok
INSERT INTO u VALUES
('63616665-6630-3064-6465-616462656562', '{63616665-6630-3064-6465-616462656563}', b'kafef00ddeadbeed'),
('urn:uuid:63616665-6630-3064-6465-616462656564', '63616665-6630-3064-6465-616462656565'::uuid, b'kafef00ddeadbeee'),
(b'cafef00ddeadbeef', '63616665-6630-3064-6465-616462656567', b'kafef00ddeadbeef')
query TTT
SELECT * FROM u ORDER BY token
----
63616665-6630-3064-6465-616462656562 63616665-6630-3064-6465-616462656563 6b616665-6630-3064-6465-616462656564
63616665-6630-3064-6465-616462656564 63616665-6630-3064-6465-616462656565 6b616665-6630-3064-6465-616462656565
63616665-6630-3064-6465-616462656566 63616665-6630-3064-6465-616462656567 6b616665-6630-3064-6465-616462656566
query TTT
SELECT * FROM u WHERE token < '63616665-6630-3064-6465-616462656564'::uuid
----
63616665-6630-3064-6465-616462656562 63616665-6630-3064-6465-616462656563 6b616665-6630-3064-6465-616462656564
query TTT
SELECT * FROM u WHERE token <= '63616665-6630-3064-6465-616462656564'::uuid ORDER BY token
----
63616665-6630-3064-6465-616462656562 63616665-6630-3064-6465-616462656563 6b616665-6630-3064-6465-616462656564
63616665-6630-3064-6465-616462656564 63616665-6630-3064-6465-616462656565 6b616665-6630-3064-6465-616462656565
statement error duplicate key value
INSERT INTO u VALUES ('63616665-6630-3064-6465-616462656566')
statement error duplicate key value
INSERT INTO u VALUES ('63616665-6630-3064-6465-616462656569', '63616665-6630-3064-6465-616462656565')
statement error UUID must be exactly 16 bytes long, got 15 bytes
INSERT INTO u VALUES (b'cafef00ddeadbee')
statement error UUID must be exactly 16 bytes long, got 17 bytes
INSERT INTO u VALUES (b'cafef00ddeadbeefs')
statement error uuid: incorrect UUID format
INSERT INTO u VALUES ('63616665-6630-3064-6465-61646265656')
statement error uuid: incorrect UUID format
INSERT INTO u VALUES ('63616665-6630-3064-6465-6164626565620')
statement error unsupported comparison operator: <uuid> = <bytes>
SELECT token FROM u WHERE token=b'cafef00ddeadbeef'::bytes
statement error unsupported comparison operator: <uuid> = <string>
SELECT token FROM u WHERE token='63616665-6630-3064-6465-616462656562'::string
statement ok
SELECT token FROM u WHERE token='63616665-6630-3064-6465-616462656562'::uuid
query T
SELECT token FROM u WHERE token='urn:uuid:63616665-6630-3064-6465-616462656562'
----
63616665-6630-3064-6465-616462656562
query T
SELECT token FROM u WHERE token=b'cafef00ddeadbeef'
----
63616665-6630-3064-6465-616462656566
query T
SELECT token2 FROM u WHERE token2='63616665-6630-3064-6465-616462656563'
----
63616665-6630-3064-6465-616462656563
query T
SELECT token FROM u WHERE token IN ('63616665-6630-3064-6465-616462656562', '63616665-6630-3064-6465-616462656564') ORDER BY token
----
63616665-6630-3064-6465-616462656562
63616665-6630-3064-6465-616462656564
statement ok
INSERT INTO u VALUES ('63616665-6630-3064-6465-616462656567'::uuid)
statement ok
INSERT INTO u VALUES ('urn:uuid:63616665-6630-3064-6465-616462656568'::uuid)
statement ok
INSERT INTO u VALUES (uuid_v4()::uuid)
statement error value type bytes doesn't match type uuid
INSERT INTO u VALUES ('cafef00ddeadbeef'::bytes)
statement error value type string doesn't match type uuid
INSERT INTO u VALUES ('63616665-6630-3064-6465-616462656562'::string)
statement error value type bytes doesn't match type uuid
INSERT INTO u VALUES (uuid_v4())
query T
SELECT token::uuid FROM u WHERE token=b'cafef00ddeadbeef'
----
63616665-6630-3064-6465-616462656566
query T
SELECT token::string FROM u WHERE token=b'cafef00ddeadbeef'
----
63616665-6630-3064-6465-616462656566
query T
SELECT token::bytes FROM u WHERE token=b'cafef00ddeadbeef'
----
cafef00ddeadbeef
statement error invalid cast: uuid -> int
SELECT token::int FROM u
query T
SELECT ('63616665-6630-3064-6465-616462656562' COLLATE en)::uuid
----
63616665-6630-3064-6465-616462656562
| pkg/sql/logictest/testdata/logic_test/uuid | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.0006729165907017887,
0.00020941661205142736,
0.0001603091659490019,
0.00016478789621032774,
0.00013986720296088606
] |
{
"id": 8,
"code_window": [
"\t\t\t\tif err != nil || d == nil {\n",
"\t\t\t\t\treturn pgerror.Newf(\n",
"\t\t\t\t\t\tpgcode.InvalidParameterValue,\n",
"\t\t\t\t\t\t`value of \"expire_after\" must be an interval`,\n",
"\t\t\t\t\t)\n",
"\t\t\t\t}\n",
"\t\t\t} else {\n",
"\t\t\t\tvar ok bool\n",
"\t\t\t\td, ok = datum.(*tree.DInterval)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\t\t`value of \"ttl_expire_after\" must be an interval`,\n"
],
"file_path": "pkg/sql/paramparse/paramobserver.go",
"type": "replace",
"edit_start_line_idx": 166
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package fuzzystrmatch
import "testing"
func TestSoundex(t *testing.T) {
tt := []struct {
Source string
Expected string
}{
{
Source: "hello world!",
Expected: "H464",
},
{
Source: "Anne",
Expected: "A500",
},
{
Source: "Ann",
Expected: "A500",
},
{
Source: "Andrew",
Expected: "A536",
},
{
Source: "Margaret",
Expected: "M626",
},
{
Source: "🌞",
Expected: "000",
},
{
Source: "😄 🐃 🐯 🕣 💲 🏜 👞 🔠 🌟 📌",
Expected: "",
},
}
for _, tc := range tt {
got := Soundex(tc.Source)
if tc.Expected != got {
t.Fatalf("error convert string to its Soundex code with source=%q"+
" expected %s got %s", tc.Source, tc.Expected, got)
}
}
}
func TestDifference(t *testing.T) {
tt := []struct {
Source string
Target string
Expected int
}{
{
Source: "Anne",
Target: "Ann",
Expected: 4,
},
{
Source: "Anne",
Target: "Andrew",
Expected: 2,
},
{
Source: "Anne",
Target: "Margaret",
Expected: 0,
},
}
for _, tc := range tt {
got := Difference(tc.Source, tc.Target)
if tc.Expected != got {
t.Fatalf("error reports the number of matching code positions with source=%q"+
" target=%q: expected %d got %d", tc.Source, tc.Target, tc.Expected, got)
}
}
}
| pkg/util/fuzzystrmatch/soundex_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.00018000448471866548,
0.00017596088582649827,
0.0001718983839964494,
0.00017634700634516776,
0.0000025697859200590756
] |
{
"id": 8,
"code_window": [
"\t\t\t\tif err != nil || d == nil {\n",
"\t\t\t\t\treturn pgerror.Newf(\n",
"\t\t\t\t\t\tpgcode.InvalidParameterValue,\n",
"\t\t\t\t\t\t`value of \"expire_after\" must be an interval`,\n",
"\t\t\t\t\t)\n",
"\t\t\t\t}\n",
"\t\t\t} else {\n",
"\t\t\t\tvar ok bool\n",
"\t\t\t\td, ok = datum.(*tree.DInterval)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\t\t`value of \"ttl_expire_after\" must be an interval`,\n"
],
"file_path": "pkg/sql/paramparse/paramobserver.go",
"type": "replace",
"edit_start_line_idx": 166
} | cancel_sessions_stmt ::=
'CANCEL' 'SESSION' session_id
| 'CANCEL' 'SESSION' 'IF' 'EXISTS' session_id
| 'CANCEL' 'SESSIONS' select_stmt
| 'CANCEL' 'SESSIONS' 'IF' 'EXISTS' select_stmt
| docs/generated/sql/bnf/cancel_session.bnf | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.00017266142822336406,
0.00017266142822336406,
0.00017266142822336406,
0.00017266142822336406,
0
] |
{
"id": 9,
"code_window": [
"\t\t\t\tvar ok bool\n",
"\t\t\t\td, ok = datum.(*tree.DInterval)\n",
"\t\t\t\tif !ok || d == nil {\n",
"\t\t\t\t\treturn pgerror.Newf(\n",
"\t\t\t\t\t\tpgcode.InvalidParameterValue,\n",
"\t\t\t\t\t\t`value of \"expire_after\" must be an interval`,\n",
"\t\t\t\t\t)\n",
"\t\t\t\t}\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\t\t`value of \"ttl_expire_after\" must be an interval`,\n"
],
"file_path": "pkg/sql/paramparse/paramobserver.go",
"type": "replace",
"edit_start_line_idx": 175
} | statement error value of "expire_after" must be an interval
CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = ' xx invalid interval xx')
statement error value of "expire_after" must be at least zero
CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = '-10 minutes')
statement ok
CREATE TABLE tbl (
id INT PRIMARY KEY,
text TEXT,
FAMILY (id, text)
) WITH (expire_after = '10 minutes')
query TT
SHOW CREATE TABLE tbl
----
tbl CREATE TABLE public.tbl (
id INT8 NOT NULL,
text STRING NULL,
CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),
FAMILY fam_0_id_text (id, text)
) WITH (expire_after = '00:10:00':::INTERVAL)
query T
SELECT reloptions FROM pg_class WHERE relname = 'tbl'
----
{expire_after='00:10:00':::INTERVAL}
statement ok
DROP TABLE tbl;
CREATE TABLE tbl (
id INT PRIMARY KEY,
text TEXT,
FAMILY (id, text)
) WITH (expire_after = '10 minutes'::interval)
query TT
SHOW CREATE TABLE tbl
----
tbl CREATE TABLE public.tbl (
id INT8 NOT NULL,
text STRING NULL,
CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),
FAMILY fam_0_id_text (id, text)
) WITH (expire_after = '00:10:00':::INTERVAL)
| pkg/sql/logictest/testdata/logic_test/row_level_ttl | 1 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.01053400058299303,
0.0027500397991389036,
0.00016513535229023546,
0.001341534312814474,
0.003931047860532999
] |
{
"id": 9,
"code_window": [
"\t\t\t\tvar ok bool\n",
"\t\t\t\td, ok = datum.(*tree.DInterval)\n",
"\t\t\t\tif !ok || d == nil {\n",
"\t\t\t\t\treturn pgerror.Newf(\n",
"\t\t\t\t\t\tpgcode.InvalidParameterValue,\n",
"\t\t\t\t\t\t`value of \"expire_after\" must be an interval`,\n",
"\t\t\t\t\t)\n",
"\t\t\t\t}\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\t\t`value of \"ttl_expire_after\" must be an interval`,\n"
],
"file_path": "pkg/sql/paramparse/paramobserver.go",
"type": "replace",
"edit_start_line_idx": 175
} | // Code generated by TestPretty. DO NOT EDIT.
// GENERATED FILE DO NOT EDIT
1:
-
SELECT a, b, c FROM x
UNION ALL SELECT d, e, f FROM y
| pkg/sql/sem/tree/testdata/pretty/union_all.align-only.golden.short | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.0009995383443310857,
0.0009995383443310857,
0.0009995383443310857,
0.0009995383443310857,
0
] |
{
"id": 9,
"code_window": [
"\t\t\t\tvar ok bool\n",
"\t\t\t\td, ok = datum.(*tree.DInterval)\n",
"\t\t\t\tif !ok || d == nil {\n",
"\t\t\t\t\treturn pgerror.Newf(\n",
"\t\t\t\t\t\tpgcode.InvalidParameterValue,\n",
"\t\t\t\t\t\t`value of \"expire_after\" must be an interval`,\n",
"\t\t\t\t\t)\n",
"\t\t\t\t}\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\t\t`value of \"ttl_expire_after\" must be an interval`,\n"
],
"file_path": "pkg/sql/paramparse/paramobserver.go",
"type": "replace",
"edit_start_line_idx": 175
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package catalog
import (
"bytes"
"fmt"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/util"
)
// TableColMap is a map from descpb.ColumnID to int. It is typically used to
// store a mapping from column id to ordinal position within a row, but can be
// used for any similar purpose.
//
// It stores the mapping for ColumnIDs of the system columns separately since
// those IDs are very large and incur an allocation in util.FastIntMap all the
// time.
type TableColMap struct {
m util.FastIntMap
// systemColMap maps all system columns to their values. Columns here are
// in increasing order of their IDs (in other words, since we started giving
// out IDs from math.MaxUint32 and are going down, the newer system columns
// appear here earlier).
systemColMap [NumSystemColumns]int
// systemColIsSet indicates whether a value has been set for the
// corresponding system column in systemColMap (needed in order to
// differentiate between unset 0 and set 0).
systemColIsSet [NumSystemColumns]bool
}
// Set maps a key to the given value.
func (s *TableColMap) Set(col descpb.ColumnID, val int) {
if col < SmallestSystemColumnColumnID {
s.m.Set(int(col), val)
} else {
pos := col - SmallestSystemColumnColumnID
s.systemColMap[pos] = val
s.systemColIsSet[pos] = true
}
}
// Get returns the current value mapped to key, or ok=false if the
// key is unmapped.
func (s *TableColMap) Get(col descpb.ColumnID) (val int, ok bool) {
if col < SmallestSystemColumnColumnID {
return s.m.Get(int(col))
}
pos := col - SmallestSystemColumnColumnID
return s.systemColMap[pos], s.systemColIsSet[pos]
}
// GetDefault returns the current value mapped to key, or 0 if the key is
// unmapped.
func (s *TableColMap) GetDefault(col descpb.ColumnID) (val int) {
if col < SmallestSystemColumnColumnID {
return s.m.GetDefault(int(col))
}
pos := col - SmallestSystemColumnColumnID
return s.systemColMap[pos]
}
// Len returns the number of keys in the map.
func (s *TableColMap) Len() (val int) {
l := s.m.Len()
for _, isSet := range s.systemColIsSet {
if isSet {
l++
}
}
return l
}
// ForEach calls the given function for each key/value pair in the map (in
// arbitrary order).
func (s *TableColMap) ForEach(f func(colID descpb.ColumnID, returnIndex int)) {
s.m.ForEach(func(k, v int) {
f(descpb.ColumnID(k), v)
})
for pos, isSet := range s.systemColIsSet {
if isSet {
id := SmallestSystemColumnColumnID + pos
f(descpb.ColumnID(id), s.systemColMap[pos])
}
}
}
// String prints out the contents of the map in the following format:
// map[key1:val1 key2:val2 ...]
// The keys are in ascending order.
func (s *TableColMap) String() string {
var buf bytes.Buffer
buf.WriteString("map[")
s.m.ContentsIntoBuffer(&buf)
first := buf.Len() == len("map[")
for pos, isSet := range s.systemColIsSet {
if isSet {
if !first {
buf.WriteByte(' ')
}
first = false
id := SmallestSystemColumnColumnID + pos
fmt.Fprintf(&buf, "%d:%d", id, s.systemColMap[pos])
}
}
buf.WriteByte(']')
return buf.String()
}
| pkg/sql/catalog/table_col_map.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.00033110493677668273,
0.00018228765111416578,
0.00015881480067037046,
0.00017097947420552373,
0.00004524625910562463
] |
{
"id": 9,
"code_window": [
"\t\t\t\tvar ok bool\n",
"\t\t\t\td, ok = datum.(*tree.DInterval)\n",
"\t\t\t\tif !ok || d == nil {\n",
"\t\t\t\t\treturn pgerror.Newf(\n",
"\t\t\t\t\t\tpgcode.InvalidParameterValue,\n",
"\t\t\t\t\t\t`value of \"expire_after\" must be an interval`,\n",
"\t\t\t\t\t)\n",
"\t\t\t\t}\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\t\t`value of \"ttl_expire_after\" must be an interval`,\n"
],
"file_path": "pkg/sql/paramparse/paramobserver.go",
"type": "replace",
"edit_start_line_idx": 175
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexec
import (
"context"
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecagg"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexectestutils"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/stretchr/testify/require"
)
func TestDefaultAggregateFunc(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testCases := []aggregatorTestCase{
{
name: "StringAgg",
typs: []*types.T{types.Int, types.String, types.String},
input: colexectestutils.Tuples{
{nil, "a", "1"},
{nil, "b", "2"},
{0, "c", "3"},
{0, "d", "4"},
{0, "e", "5"},
{1, "f", "6"},
{1, "g", "7"},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1, 2}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.StringAgg,
},
expected: colexectestutils.Tuples{
{nil, "a2b"},
{0, "c4d5e"},
{1, "f7g"},
},
},
{
name: "StringAggWithConstDelimiter",
typs: []*types.T{types.Int, types.String},
input: colexectestutils.Tuples{
{nil, "a"},
{nil, "b"},
{0, "c"},
{0, "d"},
{0, "e"},
{1, "f"},
{1, "g"},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.StringAgg,
},
expected: colexectestutils.Tuples{
{nil, "a_b"},
{0, "c_d_e"},
{1, "f_g"},
},
constArguments: [][]execinfrapb.Expression{nil, {{Expr: "'_'"}}},
},
{
name: "JsonAggWithStringAgg",
typs: []*types.T{types.Int, types.Jsonb, types.String},
input: colexectestutils.Tuples{
{nil, `{"id": 1}`, "a"},
{nil, `{"id": 2}`, "b"},
{0, `{"id": 1}`, "c"},
{0, `{"id": 2}`, "d"},
{0, `{"id": 2}`, "e"},
{1, `{"id": 3}`, "f"},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}, {2}, {2}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.JSONAgg,
execinfrapb.JSONAgg,
execinfrapb.StringAgg,
},
expected: colexectestutils.Tuples{
{nil, `[{"id": 1}, {"id": 2}]`, `["a", "b"]`, "a_b"},
{0, `[{"id": 1}, {"id": 2}, {"id": 2}]`, `["c", "d", "e"]`, "c_d_e"},
{1, `[{"id": 3}]`, `["f"]`, "f"},
},
constArguments: [][]execinfrapb.Expression{nil, nil, nil, {{Expr: "'_'"}}},
},
{
name: "XorAgg",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{nil, 3},
{nil, 1},
{0, -5},
{0, -1},
{0, 0},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.XorAgg,
},
expected: colexectestutils.Tuples{
{nil, 2},
{0, 4},
},
},
}
evalCtx := tree.MakeTestingEvalContext(cluster.MakeTestingClusterSettings())
defer evalCtx.Stop(context.Background())
aggMemAcc := evalCtx.Mon.MakeBoundAccount()
defer aggMemAcc.Close(context.Background())
evalCtx.SingleDatumAggMemAccount = &aggMemAcc
semaCtx := tree.MakeSemaContext()
for _, agg := range aggTypes {
for _, tc := range testCases {
t.Run(fmt.Sprintf("%s/%s", agg.name, tc.name), func(t *testing.T) {
if err := tc.init(); err != nil {
t.Fatal(err)
}
constructors, constArguments, outputTypes, err := colexecagg.ProcessAggregations(
&evalCtx, &semaCtx, tc.spec.Aggregations, tc.typs,
)
require.NoError(t, err)
colexectestutils.RunTestsWithTyps(t, testAllocator, []colexectestutils.Tuples{tc.input}, [][]*types.T{tc.typs}, tc.expected, colexectestutils.UnorderedVerifier, func(input []colexecop.Operator) (colexecop.Operator, error) {
return agg.new(&colexecagg.NewAggregatorArgs{
Allocator: testAllocator,
MemAccount: testMemAcc,
Input: input[0],
InputTypes: tc.typs,
Spec: tc.spec,
EvalCtx: &evalCtx,
Constructors: constructors,
ConstArguments: constArguments,
OutputTypes: outputTypes,
}), nil
})
})
}
}
}
func BenchmarkDefaultAggregateFunction(b *testing.B) {
aggFn := execinfrapb.StringAgg
for _, agg := range aggTypes {
for _, numInputRows := range []int{32, 32 * coldata.BatchSize()} {
for _, groupSize := range []int{1, 2, 32, 128, coldata.BatchSize()} {
benchmarkAggregateFunction(
b, agg, aggFn, []*types.T{types.String, types.String},
1 /* numGroupCol */, groupSize,
0 /* distinctProb */, numInputRows,
0 /* chunkSize */, 0 /* limit */)
}
}
}
}
| pkg/sql/colexec/default_agg_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.00017930300964508206,
0.00017246845527552068,
0.00016548116400372237,
0.00017215132538694888,
0.0000031010436032374855
] |
{
"id": 10,
"code_window": [
"\t\t\t}\n",
"\n",
"\t\t\tif d.Duration.Compare(duration.MakeDuration(0, 0, 0)) < 0 {\n",
"\t\t\t\treturn pgerror.Newf(\n",
"\t\t\t\t\tpgcode.InvalidParameterValue,\n",
"\t\t\t\t\t`value of \"expire_after\" must be at least zero`,\n",
"\t\t\t\t)\n",
"\t\t\t}\n",
"\t\t\tif po.tableDesc.RowLevelTTL == nil {\n",
"\t\t\t\tpo.tableDesc.RowLevelTTL = &descpb.TableDescriptor_RowLevelTTL{}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\t`value of \"ttl_expire_after\" must be at least zero`,\n"
],
"file_path": "pkg/sql/paramparse/paramobserver.go",
"type": "replace",
"edit_start_line_idx": 183
} | statement error value of "expire_after" must be an interval
CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = ' xx invalid interval xx')
statement error value of "expire_after" must be at least zero
CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = '-10 minutes')
statement ok
CREATE TABLE tbl (
id INT PRIMARY KEY,
text TEXT,
FAMILY (id, text)
) WITH (expire_after = '10 minutes')
query TT
SHOW CREATE TABLE tbl
----
tbl CREATE TABLE public.tbl (
id INT8 NOT NULL,
text STRING NULL,
CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),
FAMILY fam_0_id_text (id, text)
) WITH (expire_after = '00:10:00':::INTERVAL)
query T
SELECT reloptions FROM pg_class WHERE relname = 'tbl'
----
{expire_after='00:10:00':::INTERVAL}
statement ok
DROP TABLE tbl;
CREATE TABLE tbl (
id INT PRIMARY KEY,
text TEXT,
FAMILY (id, text)
) WITH (expire_after = '10 minutes'::interval)
query TT
SHOW CREATE TABLE tbl
----
tbl CREATE TABLE public.tbl (
id INT8 NOT NULL,
text STRING NULL,
CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),
FAMILY fam_0_id_text (id, text)
) WITH (expire_after = '00:10:00':::INTERVAL)
| pkg/sql/logictest/testdata/logic_test/row_level_ttl | 1 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.002864364068955183,
0.0008036106592044234,
0.00016226613661274314,
0.00026885580155067146,
0.0010404859203845263
] |
{
"id": 10,
"code_window": [
"\t\t\t}\n",
"\n",
"\t\t\tif d.Duration.Compare(duration.MakeDuration(0, 0, 0)) < 0 {\n",
"\t\t\t\treturn pgerror.Newf(\n",
"\t\t\t\t\tpgcode.InvalidParameterValue,\n",
"\t\t\t\t\t`value of \"expire_after\" must be at least zero`,\n",
"\t\t\t\t)\n",
"\t\t\t}\n",
"\t\t\tif po.tableDesc.RowLevelTTL == nil {\n",
"\t\t\t\tpo.tableDesc.RowLevelTTL = &descpb.TableDescriptor_RowLevelTTL{}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\t`value of \"ttl_expire_after\" must be at least zero`,\n"
],
"file_path": "pkg/sql/paramparse/paramobserver.go",
"type": "replace",
"edit_start_line_idx": 183
} | [128 0 0 1 144 0 0 2 207 128] | pkg/util/json/testdata/encoded/string_pi.json.bytes | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.00017330865375697613,
0.00017330865375697613,
0.00017330865375697613,
0.00017330865375697613,
0
] |
{
"id": 10,
"code_window": [
"\t\t\t}\n",
"\n",
"\t\t\tif d.Duration.Compare(duration.MakeDuration(0, 0, 0)) < 0 {\n",
"\t\t\t\treturn pgerror.Newf(\n",
"\t\t\t\t\tpgcode.InvalidParameterValue,\n",
"\t\t\t\t\t`value of \"expire_after\" must be at least zero`,\n",
"\t\t\t\t)\n",
"\t\t\t}\n",
"\t\t\tif po.tableDesc.RowLevelTTL == nil {\n",
"\t\t\t\tpo.tableDesc.RowLevelTTL = &descpb.TableDescriptor_RowLevelTTL{}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\t`value of \"ttl_expire_after\" must be at least zero`,\n"
],
"file_path": "pkg/sql/paramparse/paramobserver.go",
"type": "replace",
"edit_start_line_idx": 183
} | - Feature Name: Streaming Replication Between Clusters
- Status: in-progress
- Start Date: 2020-11-19
- Authors: BulkIO Team
- RFC PR: 56932
- Cockroach Issue: #57433
# Streaming Replication Between Clusters
## Summary
This document describes a mechanism for **replicating changes made to a key-span** of one cluster to another cluster, either directly from one cluster to another or indirectly, writing to and playing back a changelog in some external storage like S3.
This replication is done by capturing and copying every key-value change made to that key-span. During replication, the span in the destination cluster must be **offline** i.e. unavailable to SQL and internal traffic and processes. To bring it online, the replication process must take steps to terminate in **consistent** state, that is, such that it has replicated all writes as of _and only as of_ a single logical timestamp from the source cluster.
## Motivation
Cluster-to-cluster replication or "streaming backup/restore" is an often requested feature, particularly for primary/secondary deployments. In such deployments the secondary would provide an option of **rapid failover** (RTO) on the order of minutes compared to waiting for a full RESTORE, as well as **minimal data loss** (RPO) on the order of seconds, compared to taking periodic backups.
In our typically recommended deployments, replicas placed in different failure domains provide high availability and reduce the need to depend on recovery tools -- and their RPO and RTO characteristics -- for events like node failures or even region failures. However some operators have constraints that preclude such deployments, but still have availability needs that thus make RTO critical. And even some operators of HA clusters require the additional security of a **separate and isolated failover cluster** to mitigate risk of cluster-level or control-plane failures.
Finally online migrations between clusters, for example **moving a tenant** between two clusters, is another use-case that could leverage streaming replication, allowing a minimal downtime migration compared to doing a blocking BACKUP and then a RESTORE while offline during the cut-over.
## Design Considerations
### Replaying Logical SQL Writes Versus Replicating Raw Data
If we just replicated the logical SQL writes -- the INSERTs, UPDATEs and DELETEs -- that were sent to a tenant, table or database from one cluster to another cluster, the second cluster would then re-evaluate those and then write the results to its own storage. Given the complexity of the systems and subsystems involved in executing such statements, this approach gives very weak guarantees that the second cluster actually contains an _identical_ replica of the data in the first: that re-evaluation could produce a different result due to non-determinism, such as due to clock skew, randomized decisions, hardware or network conditions, etc. The configuration -- from the schema to the settings, users, grants, etc -- would all also need to match for this second cluster to be an identical, drop-in replacement, however this would be very difficult to achieve: simply sending the same schema changes to both clusters could see a change succeed on one but fail on the other, or even just take longer to complete. For example, what happens if a column addition is still ongoing on the secondary when the primary starts to send writes that manipulate that column?
These challenges are not unlike those we previously encountered with maintaining exact replicas in our KV layer when we replicated higher-level operations rather the results of those operations, before we [migrated to the latter](https://github.com/cockroachdb/cockroach/pull/6166).
A more robust approach to maintaining a true, identical replica is to copy all of the raw stored data -- the actual key-value pairs. This can yield a high degree of confidence that the second cluster actually contains an exact replica of the first, as it contains exactly the same bytes.
### What can be replicated?
Given a mechanism, described in more detail below, for exactly replicating the key-value pairs in a key-span between clusters, what spans make sense to replicate?
One could imagine replicating any given table, by simply determining its span and then replicating that. However "a table" consists of more than just its row data. Perhaps most prominently, it has its schema which is stored in a descriptor and is needed to make sense of that row data. That schema may in turn reference other objects in the cluster such as the users who have been granted privileges on that table, user-defined types for its columns, parent databases and schemas, foreign keys to other tables, etc. Furthermore there may be background jobs in the system's job queue acting on that table or optimizer statistics that pertain to that table in the system's stats store but are needed to plan successful queries on that table.
Additionally, most applications interact with multiple tables at once, and may rely on transactions across them being consistent. To be a drop-in replacement, to which that application could cut over, a secondary standby cluster needs to have all the tables, consistent with each other, as well as the cluster's configuration: the same users, granted the same privileges, configured with the same settings, etc.
Thus, in most deployments, the span that makes the most sense to replicate is that which encapsulates all the table data and all the metadata. Given the metadata is stored in system tables, this is the entire SQL table key-span.
### Replicating State Into An Online Cluster
Replicating the key-span of "the whole cluster" -- all user tables and all system tables -- poses its own set of challenges.
Chief among these is that the cluster being replicated _into_ has its own system tables. The recipient cluster is an online, functional cluster: operators need to have user accounts to be able to login to start and control the job that is receiving and writing this replicated data, the cluster's settings may need to be correctly configured to connect to the stream and correctly handle the cluster's hardware, network, clocks. etc. The restore itself is a job, with persisted state in the jobs table, and relies on the jobs subsystem being active to run it.
However the source cluster's configuration and state -- its users table, jobs table, settings, etc, as mentioned above, need to be replicated for it to be a true replica. Somehow the recipient cluster needs to maintain its own state and configuration while it is a recipient, but simultaneously also receive and write the replicated state, and it cannot simply write the incoming state over its own.
For example, if a job is created and starts executing on the source cluster, it must be replicated over to the destination cluster. However if it is written to the destination cluster's jobs table, given that it is established above that the destination cluster has a running jobs system to run the replication job itself, it would potentially start executing on the destination cluster as well. This is a problem though, as it is still executing on the source cluster and the results of the execution on the source cluster are being replicated. Additionally, as the execution updates job's persisted state, a conflicting execution would overwrite that.
Similar situations arise with other background processes like stats computations or expiry/cleanup processes. In short, correctly **replicating into a key-span requires that it be offline**, i.e. nothing else be reading or more importantly writing to it.
Herein lies the challenge that has historically made replicating between clusters difficult: the destination cluster simultaneously needs to be **online** to run the replication process, but replicated data needs to be streamed into an **offline** cluster. This would appear to present a paradox.
### Tenants Are Easier To Replicate
The introduction of multi-tenancy functionality built the pieces required to run "virtual" clusters, which exist within a span of the keyspace of a host cluster. This change has two important facets that affect replicating between clusters.
The first important aspect of tenants is that they are fully encapsulated within a span of the host cluster's keyspace, within which they have their own system tables, users, grants, persisted background job state, etc along with the tables and row data. As discussed above, this makes that span a unit which can be useful if replicated in its entirety, without any complexity in determining what keys within it to or not to copy. But even more importantly they have _their own_ system tables, meaning a tenant -- including its system tables -- can be copied from one cluster to another without affecting the destination cluster's system tables.
Additionally virtual tenant clusters separate their execution processes from those of the host cluster's and allow for starting and stopping these execution of the tenant processes independently. Thus, between having separate system tables from the host cluster, and having the ability to control when processes which read and write to those tables are running, tenants provide a clean mechanism for side-stepping the offline/online contradiction described above. By not starting the tenant's execution processes, the tenant span is _offline_ and can be _replicated into_, by a replication process run on the _online_ host cluster. When the replication process is concluded, the tenant processes can be started and can read and write to it.
From a distance, this is similar to OS virtualization: the host or hypervisor can snapshot or copy the execution state of a guest VM, or can load the persisted state of a prior snapshot or a snapshot from another host and then resume that VM. While the guest is still suspended, i.e. not executing its own processes and changing its own state, the host can change the snapshot from the outside, but once it resumes the guest's execution, the guest then "owns" its state.
### Streaming Clusters vs Tenants
As discussed above, the tenant primitive encapsulates all potentially related data/state/etc in one well-defined prefix, and the ability to start and stop tenant processes provides the required "offline" destination keys-span in an otherwise "online" cluster.
However enterprise customers with **non-tenant** deployments want to use cluster-to-cluster replication. While it is possible that they may someday migrate to run their workloads as tenants of multi-tenant clusters (or indeed we may opt to run _all clusters _as_ _"multi-tenant" clusters even if they just host one tenant), the multi-tenancy features are not yet ready for on-premise customer deployments, and are not likely to be in the immediate term. Meanwhile there is active demand for cluster-to-cluster replication from these customers _now<sup>.</sup>_
Given that the host/tenant separation is what allowed side-stepping the online/offline contradiction, one potential solution for replicating non-tenant clusters is to invert the above online host cluster, offline guest tenant design. More specifically, by booting the processes for the second cluster in a special "recovery mode", where they read and write only within the span of a designated "recovery tenant" key prefix, then only that tenant is actually "online" with respect to SQL processes, including those running the replication job, while the rest of the keyspace of that cluster is effectively left offline, and can thus ingest replicated data.
Whereas the previous paradigm, of online host clusters replicating offline guest tenants, might be compared to or thought of in terms of OS virtualization and moving and resuming guest image snapshots, this pattern is conceptually more similar to booting a computer to a separate recovery or maintenance partition, from which one can then act on, upgrade or alter the OS installed on the main or root partition while it is safely offline.
While there will certainly be subtleties to resolve in this configuration that will be addressed in its own design document, this approach should be able to utilize the same stream format, job, helpers, producer/ingestion components, etc as steaming a tenant. Other than being run in a cluster that is booted in this special mode, wherein it stores its SQL state within a special prefix, the rest of the process is the same -- it just is streaming the whole SQL key span of the source cluster, rather than the key-span of a tenant within it.
## Detailed Streaming Replication Design
Replicating a tenant requires two main pieces:
1. A stream of all KV changes made to the tenant span.
2. A job ingesting that stream into a tenant.
These two pieces may run concurrently but physically separated e.g. to maintain a "hot standby" in a second datacenter, where the copy is ingesting changes as soon as they are emitted by source and ready to be brought online at a moment's notice. They could also instead be temporarily separated, i.e. using the persisted stream to replay later.
### Change Stream
To replicate a tenant (or a whole cluster) we need a stream of all changes to the content of that tenant span (or of the whole cluster's data spans). This has, for the most part, already been built for CDC in the form of rangefeeds, change aggregators, etc. This stream will need to be consumable by the ingesting cluster, or, alternatively, written to files that can be "replayed" later. This stream should be partitioned for distribution of the work both of producing and consuming it. The consumer however will need to know how that stream is partitioned at any given time to ensure it expects and consumes the right partitions, and the partitioning may change over time as producing cluster changes.
### Stream Logical Format
**Topology**
The number of stream partitions and their location is the **topology** of the stream. Locations can be cloud storage URIs or network addresses of nodes within a cluster.
**Generation**
Since the stream is long-lived and the producing cluster's topology may change over time such that the number of partitions of the stream changes as well, we divide the stream by time into epochs or **generations**. Within a given generation the topology of the steam is constant, i.e. it will have the same number of partitions and the partitions will have the same locations. The distSQL "flow" used to produce the stream; if the flow is re-planned to adapt to data placement changes, node failures, etc, that starts a new generation. Generations are identified by the logical start time at which they begin emitting changes. When writing to cloud storage, separate generations are stored in separate prefixes.
**Partition**
A partition is a stream of events as emitted by one CDC change aggregator within the producing cluster -- it is the output of the "kvFeed" aggregator. These events can be of one of two types **key-values** or **resolved timestamps**.
**Resolved Timestamp**
The resolved timestamp is an event which indicates that a stream has emitted all changes up to the specified timestamp. That is to say, no new events with an older timestamp will be emitted.
#### Streaming Directly Between Clusters
Operators may wish to stream directly between clusters, either to reduce operational complexity and costs by eliminating the need to have an external storage intermediary or to minimize latency associated with buffering and flushing to storage.
To stream directly between clusters, nodes in the producing cluster can allocate "outboxes" or fixed size buffers of emitted stream entries. It can then provide the addresses of these outboxes and expose them via an API so that the consuming cluster can dial them directly to fetch their contents. A consumer connected to a given outbox could hold its connection open and receive emitted events with minimal latency.
#### Streaming to/from Files
In addition to streaming directly between clusters, operators might wish to stream to an intermediary buffer which can be read by a consumer. Spinning up an intermediary buffer saves the need for maintaining outboxes on the source cluster, as well as enables decoupling of the 2 clusters.
The intermediary buffer can be considered like a file-system, such as S3. In this case, the stream client needs to provide a stream of changes on a per-generation basis, as well as the ability to start emitting changes from a particular timestamp. The streaming client should be able to determine the topology of a generation efficiently based on the files in the buffer.
The proposed format for the files produced by a stream is:
`<cluster_id>/<generation_id>/TOPOLOGY`: Describes the number and locations of the partitions for this generation.
`<cluster_id>/<generation_id>/<partition_id>/<timestamp-prefix>/<timestamp>-{data,checkpoint}`: The events emitted by the stream.
The **generation_id** is uniquely identified by the start time of the streaming job, to enable quick lookup for the specific generation that contains a given timestamp. This should be unique for every cluster, and each generation would correspond to a particular DistSQL flow that is set up on the source cluster.
The **partition_id** would uniquely identify each partition for a given generation. This is akin to the processor ID in the DistSQL flow that produces the stream. Events will be persisted as files, prefixed with the maximum logical timestamp of the batch contained in a file. **timestamp-prefix** is some prefix of the timestamp, used to chunking files into "directories" with more bounded numbers of files (i.e. to allow easier prefix-filtered enumeration). **Key-values** (`roachpb.KeyValue`s) will be batched in files and **resolved timestamps** will be emitted as checkpoint files.
Starting an ingestion stream from a given timestamp involves finding the latest generation before the given timestamp and then reading files in order starting from the latest resolved timestamp before the target timestamp. Note that files need not be added in lexicographical order, but files added before the last resolved timestamp file should be safe to ignore.
A cluster continuously ingesting a stream from files would need to poll to determine when new files are present. It may thus be beneficial to include some form of "last write" file or files in a well-known location, to indicate if/when a more expensive re-enumeration is required. Alternatively, it could potentially establish a gRPC connection directly to the source cluster to receive instant notifications of which new files are available. This however is an optimization that can be explored later if needed.
### Stream Client API
The streaming client should be able to answer requests to:
* Get the corresponding generation and its topology for a given timestamp
* Start reading a generation’s partition at a given timestamp (i.e. consume the events of the stream)
* Send a notification of a new generation, as well as the start timestamp of that generation
* Drain all events from the partitions of a generation until a given timestamp has been resolved (used when a notification of a new generation has been received.)
The API should be transparent to whether the streaming is directly cluster to cluster, or facilitated by an intermediary buffer.
### Stream Ingestion
A job in the recipient cluster will need to consume the stream of changes and ingest them into the target tenant key span, while that span is offline (i.e. no tenant VMs are running for it). The tenant record should be in an "adding" or "offline" state to ensure a VM cannot start for it.
When the operator opts to stop ingesting the stream and bring the standby cluster online, all of its ranges must be consistent, holding all of the data data up to, and only up to, a single logical timestamp from the source cluster, before it can be brought online.
Given the stream is partitioned, we expect to see changes from the same logical system timestamp in the origin cluster appear at different wall times in the different partitions of the stream: a partition may fall arbitrarily behind the others, or a burst of data in one table may mean that 100mb of changes from its partition of the stream may cover one a few seconds of timestamps while that same size buffer could cover hours in another partition. Two approaches to handle ingesting these unsynchronized partitions to produce a consistent result are either buffering the incoming and coordinating what is flushed to be consistent, or directly ingesting then then rolling back to a consistent time afterwards.
#### Terminology
**Low-water mark Resolved Timestamp**
Each partition in a generation periodically emits a resolved timestamp. The minimum resolved timestamp across all partitions is referred to as the **low-water mark resolved timestamp**. This is the most recent timestamp at which we can claim we can provide a consistent view of the data, and thus the timestamp that must be presented after roll over.
**Cut Over**
The event which designates the restoring cluster to stop listening to the incoming stream and become the primary cluster is referred to as the “cut over”.
#### Buffered Ingestion
Buffered ingestion would write the incoming streams to a buffer, and wait until all partitions have received data at least up to a given source-cluster resolved logical timestamp, and only then** flush that prefix of their buffer for that resolved timestamp** to their actual storage.
Given that a partition of the stream could fall _arbitrarily_ behind or another could burst much more data for a given time period, this implies this buffering must be prepared to hold an _unbounded _amount of incoming data before it is allowed to flush it, and thus likely will need to be disk-backed or at least able to spill to disk, potentially increasing write-amplification in the steady-state of tailing the stream.
It is worth considering what the buffered ingestion implementation could look like in a bit more detail. One proposed approach would have each node maintain a Pebble instance with the WAL disabled (and generally otherwise optimized for a write workload). (timestamp, KV) pairs would be added to the store keyed on their timestamp. Processing the event stream would behave as follows:
1. Key-value pairs would be ingested into Pebble store as _(ts, KV)_ pairs, sorted by _ts_.
2. Upon notification of the increase of the low-water mark timestamp to _ts1_, keys up to _ts1_ are read from the store (which is keyed on timestamp) and are added to a BufferingAdder to be ingested into the main data store.
3. That partition for the ingestion job can then report that it has ingested up to _ts1_.
4. ClearRange up to _ts1_ in the buffered Pebble store.
If all partitions remain relatively up to date, most interaction with this store should be in memory. In the unhappy case where the low-water mark falls behind, the store would spill to disk.
An advantage of buffered ingestion is that once the operator decides to stop ingesting the stream, once the in-progress flushes complete, all the ranges are already consistent as of the source-cluster resolved timestamp corresponding to that flush and ready to be brought online immediately. However, if a node responsible for flushing a partition were to become unavailable during one of these flushes, the ingested data would be left in an inconsistent state. We would have a low-watermark applied timestamp, which is the latest timestamp at which all nodes have successfully flushed which lags the low-watermark applied timestamp. Since the data is only consistent up to the lwat, we are forced to RevertRange back to it if the stream were stopped in this state.
#### Direct Ingestion and Rollback
Direct ingestion -- batching up and flushing data from the incoming stream directly to ranges as it is received, with **no coordination of what is flushed** by the partitions -- would simplify the ingestion and minimize write-amplification of actually tailing the stream.
However, when the stream is stopped ingesting directly, the ranges would be inconsistent, as some partitions of the stream would have ingested up to different timestamps or above the last resolved timestamp.
Thus once the stream is stopped, directly ingested data would need to be **rolled back** to the last resolved timestamp that all partitions had ingested up to before the cluster would be ready to use. This would be done, using `RevertRange` on the entire keyspace into which ingestion occurred, reverting it to that chosen timestamp. Doing this means `RevertRange` needs to iterate _all_ the ingested data, scanning the to find and rollback those keys that are above target timestamp.
Using Time-Bound Iteration (TBI) table filtering at the storage layer could improve the runtime of RevertRange when used on very recent timestamps, changing `n` in its O(n) runtime to be just the size of those flushed SSTables that actually contain keys in the relevant period. Given the likely recent timestamp to which we'd be reverting, unless a partition of the stream had fallen very far behind, this would likely reduce the scan size to just a few minutes worth of data.
Rolling back would also have a lower-bound cost of the O(m) where m is how much has been written above the target timestamp. In practice it would be likely the O(n) cost of finding what needs to be rolled back would dominate, unless a single partition had fallen too far behind.
#### Ingestion Tradeoffs
##### Tradeoff: Storage Utilization Spikes
Both approaches may experience spikes in utilization of storage resources if one or more partitions falls far behind.
In the case of buffered ingestion, the partitions that are caught up will need to buffer the data they cannot yet flush to disk. Although these partitions will be writing to the on-disk buffer rather than the main store, the write load on the standby cluster will mirror that of the source cluster. When the lagging partition catches up to the resolved timestamp, a large storage spike is expected in flushing all of the buffered data to the main store. In summary, these storage utilization spikes are expected whenever we catch up on a stream that has fallen behind. Until we catch up, our RP and potentially RT is behind/elevated as well.
In the case of direct ingestion and rollback, during the rollback phase, a spike in storage resources is expected. In order to rollback, RevertRange needs to locate the SSTs which contain relevant data and perform point deletes over all values which have changed since the rollback time. Before the cut-over, all of the data received by the standby cluster will be ingested identically to the source cluster. Although this spike is only expected to occur once, _it will occur at a critical time for the user_: when they are cutting over to the standby cluster.
With either approach, in order to properly support a partition falling arbitrarily behind, it is important to ensure that appropriate back pressure systems are in place to avoid overloading the storage layer, regardless of when the spike is expected to occur.
##### Tradeoff: RTO
With the buffering solution, performing a cut-over should be a constant time operation since it has only ingested data that is known to be consistent, as long as all buffers have had a chance to flush.
Rolling back to the low-water mark time with the direct ingestion approach is potentially a function of how far behind one stream is. Notably, this means that the **RT will be a function of the RP**. However, this makes observability into how far behind each stream all the more critical so that the operator can ensure that the streams don’t fall too far behind.
##### Tradeoff: Node Failure Resiliency
Since these ingestion jobs are expected to be very long running, they should be resilient to node failures. This failure mode is fairly well handled by the direct ingestion approach since all ingested data is already replicated and low-watermark calculations can be based off of ingested data. Recovery looks like restarting the ingestion job from the latest low-watermark resolved timestamp.
However, the node failure case is more interesting when considering the buffered ingestion implementation. As previously mentioned, since flushing is not atomic, during a flush there will be some time where the ingested data is not in a consistent state. If a cut-over were to happen at this time, we would need to wait for all nodes to finish flushing. However, if a node were to become unavailable, the nodes that have flushed need to rollback to the last timestamp at which all partitions successfully flushed. **This leads to the realization that to fully support the buffering ingestion implementation we'll need to be able to support running a RevertRange in any case.**
##### Tradeoff: Simplicity
One strong argument against the buffered approach is its relative complexity. Not only is there added complexity in adding another component to the system, but the buffered ingestion solution requires us to handle the RevertRange case anyway.
##### Tradeoff: Summary
While Buffered Ingestion imposes a potential double write-amplification cost on the steady-state of tailing the stream, it has better RTO in that the ranges are keep nearly ready to be brought online at any time, with only in-progress flushes needing to conclude to start before they're ready in the happy case. However, to support the case of cutting over during a partial flush, it also needs to support rolling back. Directly ingesting on the other hand is simpler as we can just batch and write as we receive data, reducing the cost of the steady-state of tailing the stream, but at the expense of shifting some cost to the end, when it concludes.
In both ingestion approaches, we'll need to coordinate between ingestors of the various partitions to at least track the received or ingested change frontier, much the way changefeeds do for emitting resolved timestamps.
Given the motivation for cluster-to-cluster replication is to have a warm standby cluster, it seems likely that minimizing ongoing cost at the expense of cutover time is not the preferred trade, and instead it would be preferable to pay the extra write-amplification as we go to minimize RTO. It offers benefits such as no resource utilization spike during cut over and constant RTO in the happy case of no node-failures. However, given the non-atomic nature of the flush across nodes, and the fact it still needs to support rolling back partial flushes, the buffered approach is actually a superset of the direct-ingest approach, and the buffering is just an optimization.
Thus the *proposed approach is start with the direct-ingestion approach with rollback* and pursue buffering as an optimization later.
**Observability Considerations**
It is important to provide metrics for monitoring the health of the replicating stream. Important metrics include:
- RP: how far back is the latest ingested consistent timestamp? This is the timestamp we would rollback to on cut-over.
- What range or ranges are the lagging ones that are holding it back? Is it ingestion delay or is the stream lagging?
- What is the size of the pending ingestion buffers?
- How far behind are the ingested timestamps vs the received timestamps?
- What is the size of the unconsumed portion of the outboxes in the producing cluster i.e. how far behind is the stream consumer?
- For file-based streams, what's the latest flushed consistent timestamp/how far behind is it?
- If/when we integrate with BACKUPs (e.g. restore+replay), what's the size of the stream since last backup?
**Version-Upgrade Considerations**
In order to support version upgrades while maintaining the replication stream, the clusters should be upgraded in a particular order. The source cluster cannot finalize its upgrade before the nodes on the standby cluster is upgraded and finalized, since otherwise the source cluster could send backwards-incompatible data to the standby cluster.
## Drawbacks and Limitations
### Single Slow Range Blocks Everything
If a single node/range/etc falls behind and prevents the closed timestamp frontier from advancing, the RP for the entire cluster falls behind (since we only revert to a single consistent cluster-wide time.)
### Offline Destinations While Ingesting
In other systems, operators often opt to use their secondaries to serve some traffic, such as OLAP reporting/analytics queries or even some of their applications read traffic. This design assumes the destination replica is wholly offline. This is _mostly_ with respect to writes -- we establish that we cannot have a job or background process writing while we are replicating and still maintain correctness. However in general reads over the replicated data while we are still replicating cannot assume it is consistent -- some spans may have been replicated to different times than others until the process concludes ensuring a single, consistent replicated timestamp. It is possible however to get correct, consistent reads if they are backdated to at or before the minimum ingested resolved timestamp. However care would need to be taken when starting SQL execution processes on the replicated data that they a) enforce this and b) are read-only, and do not attempt to run the background systems, like jobs, that are discussed at length above.
### One-way Replication
This proposed design is focused solely on creating passive replicas of a primary copy -- it one-way, from that primary and does not allow for two-way replication, where the replica could also be written to. Such use cases, i.e. "active-active" pairings, are left to a single cluster with nodes in both regions.
In some 2DC cases, if two workloads are fully disjoint, i.e. do not require transactions that consistently read and commit across both, they could be to run them in two separate tenants, where each DC hosts its tenant and a streaming replica of the other. However as mentioned above, the tenancy features will not be ready for on-premise users to use in such a deployment any time soon.
### Replicating Bulk Job Output vs Replication Throughput and RPO
Replicating the entire tenant span (or entire cluster span) while keeping the destination entirely offline simplifies many things. One of those explicitly is that a job and that job's output is replicated wholesale and nothing in the destination needs to understand or coordinate with the source cluster's execution of that job. However this comes at a cost: the link used for replication -- more likely a WAN link as this is generally used for replicating between regions -- needs to have throughput available to accommodate not just the application traffic's write-rate but also that job's write-rate, which could be _much_ higher e.g. a primary key swap can bulk read and bulk-write data, likely writing much more that normal SQL clients would in the same period. If the throughput of the link is too low to handle this burst, the stream may fall behind increasing RPO potentially beyond targets. Deployments with hard RPO requirements must therefore be prepared to provision adequate links for such replication and/or rate-limit their bulk operations in their active cluster to only what can be streamed over that link.
### Replicating Whole Tenants/Clusters vs Tables/DBs
Replicating just one table or one database from one cluster to another is another common use-case for moving changes between clusters, such as to have an analytics cluster, keep a given table updated in a staging cluster, etc. Replicating whole-tenants nearly handles how to handle schema changes, grants, etc by replicating it all blindly, but any sort of individual table replication would need to figure out how to handle such cases -- what do you do if you're replicating one database but not another, but a table is created in the replicated database referencing a type in the other?
Such use cases are often unique to a specific deployment's requirements -- why they want one table but not another, and thus what they want to do in edge cases, etc can vary. Thus for such cases it likely makes more sense to have those operators use our existing CDC offerings and choose how to use those changes to keep their replicas up-to-date.
## Unresolved Questions
### Metrics
How will the user monitor that the data ingested in the standby cluster is exactly the data they expect?
How will per-partition metrics be exposed so that users can monitor if any partitions fall behind?
| docs/RFCS/20201119_streaming_cluster_to_cluster.md | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.00017350856796838343,
0.0001651619968470186,
0.00016044557560235262,
0.00016517659241799265,
0.000003303899575257674
] |
{
"id": 10,
"code_window": [
"\t\t\t}\n",
"\n",
"\t\t\tif d.Duration.Compare(duration.MakeDuration(0, 0, 0)) < 0 {\n",
"\t\t\t\treturn pgerror.Newf(\n",
"\t\t\t\t\tpgcode.InvalidParameterValue,\n",
"\t\t\t\t\t`value of \"expire_after\" must be at least zero`,\n",
"\t\t\t\t)\n",
"\t\t\t}\n",
"\t\t\tif po.tableDesc.RowLevelTTL == nil {\n",
"\t\t\t\tpo.tableDesc.RowLevelTTL = &descpb.TableDescriptor_RowLevelTTL{}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\t`value of \"ttl_expire_after\" must be at least zero`,\n"
],
"file_path": "pkg/sql/paramparse/paramobserver.go",
"type": "replace",
"edit_start_line_idx": 183
} | // Code generated by generate-staticcheck; DO NOT EDIT.
//go:build bazel
// +build bazel
package s1035
import (
util "github.com/cockroachdb/cockroach/pkg/testutils/lint/passes/staticcheck"
"golang.org/x/tools/go/analysis"
"honnef.co/go/tools/simple"
)
var Analyzer *analysis.Analyzer
func init() {
for _, analyzer := range simple.Analyzers {
if analyzer.Analyzer.Name == "S1035" {
Analyzer = analyzer.Analyzer
break
}
}
util.MungeAnalyzer(Analyzer)
}
| build/bazelutil/staticcheckanalyzers/s1035/analyzer.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.00017477165965829045,
0.00017404028039891273,
0.00017329180263914168,
0.00017405737889930606,
6.042700988473371e-7
] |
{
"id": 11,
"code_window": [
"\t\t\trelPersistence = relPersistenceTemporary\n",
"\t\t}\n",
"\t\tvar relOptions tree.Datum = tree.DNull\n",
"\t\tif ttl := table.GetRowLevelTTL(); ttl != nil {\n",
"\t\t\trelOptionsArr := tree.NewDArray(types.String)\n",
"\t\t\tif err := relOptionsArr.Append(tree.NewDString(fmt.Sprintf(\"expire_after=%s\", ttl.DurationExpr))); err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\trelOptions = relOptionsArr\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif err := relOptionsArr.Append(tree.NewDString(fmt.Sprintf(\"ttl_expire_after=%s\", ttl.DurationExpr))); err != nil {\n"
],
"file_path": "pkg/sql/pg_catalog.go",
"type": "replace",
"edit_start_line_idx": 628
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package paramparse
import (
"context"
"github.com/cockroachdb/cockroach/pkg/geo/geoindex"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/duration"
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented"
"github.com/cockroachdb/errors"
)
// SetStorageParameters sets the given storage parameters using the
// given observer.
func SetStorageParameters(
ctx context.Context,
semaCtx *tree.SemaContext,
evalCtx *tree.EvalContext,
params tree.StorageParams,
paramObserver StorageParamObserver,
) error {
for _, sp := range params {
key := string(sp.Key)
if sp.Value == nil {
return pgerror.Newf(pgcode.InvalidParameterValue, "storage parameter %q requires a value", key)
}
// Expressions may be an unresolved name.
// Cast these as strings.
expr := UnresolvedNameToStrVal(sp.Value)
// Convert the expressions to a datum.
typedExpr, err := tree.TypeCheck(ctx, expr, semaCtx, types.Any)
if err != nil {
return err
}
if typedExpr, err = evalCtx.NormalizeExpr(typedExpr); err != nil {
return err
}
datum, err := typedExpr.Eval(evalCtx)
if err != nil {
return err
}
if err := paramObserver.onSet(ctx, semaCtx, evalCtx, key, datum); err != nil {
return err
}
}
return paramObserver.runPostChecks()
}
// ResetStorageParameters sets the given storage parameters using the
// given observer.
func ResetStorageParameters(
ctx context.Context,
evalCtx *tree.EvalContext,
params tree.NameList,
paramObserver StorageParamObserver,
) error {
for _, p := range params {
if err := paramObserver.onReset(evalCtx, string(p)); err != nil {
return err
}
}
return paramObserver.runPostChecks()
}
// StorageParamObserver applies a storage parameter to an underlying item.
type StorageParamObserver interface {
// onSet is called during CREATE [TABLE | INDEX] ... WITH (...) or
// ALTER [TABLE | INDEX] ... WITH (...).
onSet(ctx context.Context, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error
// onReset is called during ALTER [TABLE | INDEX] ... RESET (...)
onReset(evalCtx *tree.EvalContext, key string) error
// runPostChecks is called after all storage parameters have been set.
// This allows checking whether multiple storage parameters together
// form a valid configuration.
runPostChecks() error
}
// TableStorageParamObserver observes storage parameters for tables.
type TableStorageParamObserver struct {
tableDesc *tabledesc.Mutable
}
// NewTableStorageParamObserver returns a new TableStorageParamObserver.
func NewTableStorageParamObserver(tableDesc *tabledesc.Mutable) *TableStorageParamObserver {
return &TableStorageParamObserver{tableDesc: tableDesc}
}
var _ StorageParamObserver = (*TableStorageParamObserver)(nil)
// runPostChecks implements the StorageParamObserver interface.
func (po *TableStorageParamObserver) runPostChecks() error {
if err := tabledesc.ValidateRowLevelTTL(po.tableDesc.GetRowLevelTTL()); err != nil {
return err
}
return nil
}
type tableParam struct {
onSet func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error
onReset func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error
}
var tableParams = map[string]tableParam{
`fillfactor`: {
onSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
return setFillFactorStorageParam(evalCtx, key, datum)
},
onReset: func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error {
// Operation is a no-op so do nothing.
return nil
},
},
`autovacuum_enabled`: {
onSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
var boolVal bool
if stringVal, err := DatumAsString(evalCtx, key, datum); err == nil {
boolVal, err = ParseBoolVar(key, stringVal)
if err != nil {
return err
}
} else {
s, err := GetSingleBool(key, datum)
if err != nil {
return err
}
boolVal = bool(*s)
}
if !boolVal && evalCtx != nil {
evalCtx.ClientNoticeSender.BufferClientNotice(
evalCtx.Context,
pgnotice.Newf(`storage parameter "%s = %s" is ignored`, key, datum.String()),
)
}
return nil
},
onReset: func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error {
// Operation is a no-op so do nothing.
return nil
},
},
`expire_after`: {
onSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
var d *tree.DInterval
if stringVal, err := DatumAsString(evalCtx, key, datum); err == nil {
d, err = tree.ParseDInterval(evalCtx.SessionData().GetIntervalStyle(), stringVal)
if err != nil || d == nil {
return pgerror.Newf(
pgcode.InvalidParameterValue,
`value of "expire_after" must be an interval`,
)
}
} else {
var ok bool
d, ok = datum.(*tree.DInterval)
if !ok || d == nil {
return pgerror.Newf(
pgcode.InvalidParameterValue,
`value of "expire_after" must be an interval`,
)
}
}
if d.Duration.Compare(duration.MakeDuration(0, 0, 0)) < 0 {
return pgerror.Newf(
pgcode.InvalidParameterValue,
`value of "expire_after" must be at least zero`,
)
}
if po.tableDesc.RowLevelTTL == nil {
po.tableDesc.RowLevelTTL = &descpb.TableDescriptor_RowLevelTTL{}
}
po.tableDesc.RowLevelTTL.DurationExpr = tree.Serialize(d)
return nil
},
onReset: func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error {
po.tableDesc.RowLevelTTL = nil
return nil
},
},
}
func init() {
for _, param := range []string{
`toast_tuple_target`,
`parallel_workers`,
`toast.autovacuum_enabled`,
`autovacuum_vacuum_threshold`,
`toast.autovacuum_vacuum_threshold`,
`autovacuum_vacuum_scale_factor`,
`toast.autovacuum_vacuum_scale_factor`,
`autovacuum_analyze_threshold`,
`autovacuum_analyze_scale_factor`,
`autovacuum_vacuum_cost_delay`,
`toast.autovacuum_vacuum_cost_delay`,
`autovacuum_vacuum_cost_limit`,
`autovacuum_freeze_min_age`,
`toast.autovacuum_freeze_min_age`,
`autovacuum_freeze_max_age`,
`toast.autovacuum_freeze_max_age`,
`autovacuum_freeze_table_age`,
`toast.autovacuum_freeze_table_age`,
`autovacuum_multixact_freeze_min_age`,
`toast.autovacuum_multixact_freeze_min_age`,
`autovacuum_multixact_freeze_max_age`,
`toast.autovacuum_multixact_freeze_max_age`,
`autovacuum_multixact_freeze_table_age`,
`toast.autovacuum_multixact_freeze_table_age`,
`log_autovacuum_min_duration`,
`toast.log_autovacuum_min_duration`,
`user_catalog_table`,
} {
tableParams[param] = tableParam{
onSet: func(ctx context.Context, po *TableStorageParamObserver, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
return unimplemented.NewWithIssuef(43299, "storage parameter %q", key)
},
onReset: func(po *TableStorageParamObserver, evalCtx *tree.EvalContext, key string) error {
return nil
},
}
}
}
// onSet implements the StorageParamObserver interface.
func (po *TableStorageParamObserver) onSet(
ctx context.Context,
semaCtx *tree.SemaContext,
evalCtx *tree.EvalContext,
key string,
datum tree.Datum,
) error {
if p, ok := tableParams[key]; ok {
return p.onSet(ctx, po, semaCtx, evalCtx, key, datum)
}
return pgerror.Newf(pgcode.InvalidParameterValue, "invalid storage parameter %q", key)
}
// onReset implements the StorageParamObserver interface.
func (po *TableStorageParamObserver) onReset(evalCtx *tree.EvalContext, key string) error {
if p, ok := tableParams[key]; ok {
return p.onReset(po, evalCtx, key)
}
return pgerror.Newf(pgcode.InvalidParameterValue, "invalid storage parameter %q", key)
}
func setFillFactorStorageParam(evalCtx *tree.EvalContext, key string, datum tree.Datum) error {
val, err := DatumAsFloat(evalCtx, key, datum)
if err != nil {
return err
}
if val < 0 || val > 100 {
return pgerror.Newf(pgcode.InvalidParameterValue, "%q must be between 0 and 100", key)
}
if evalCtx != nil {
evalCtx.ClientNoticeSender.BufferClientNotice(
evalCtx.Context,
pgnotice.Newf("storage parameter %q is ignored", key),
)
}
return nil
}
// IndexStorageParamObserver observes storage parameters for indexes.
type IndexStorageParamObserver struct {
IndexDesc *descpb.IndexDescriptor
}
var _ StorageParamObserver = (*IndexStorageParamObserver)(nil)
func getS2ConfigFromIndex(indexDesc *descpb.IndexDescriptor) *geoindex.S2Config {
var s2Config *geoindex.S2Config
if indexDesc.GeoConfig.S2Geometry != nil {
s2Config = indexDesc.GeoConfig.S2Geometry.S2Config
}
if indexDesc.GeoConfig.S2Geography != nil {
s2Config = indexDesc.GeoConfig.S2Geography.S2Config
}
return s2Config
}
func (po *IndexStorageParamObserver) applyS2ConfigSetting(
evalCtx *tree.EvalContext, key string, expr tree.Datum, min int64, max int64,
) error {
s2Config := getS2ConfigFromIndex(po.IndexDesc)
if s2Config == nil {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"index setting %q can only be set on GEOMETRY or GEOGRAPHY spatial indexes",
key,
)
}
val, err := DatumAsInt(evalCtx, key, expr)
if err != nil {
return errors.Wrapf(err, "error decoding %q", key)
}
if val < min || val > max {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"%q value must be between %d and %d inclusive",
key,
min,
max,
)
}
switch key {
case `s2_max_level`:
s2Config.MaxLevel = int32(val)
case `s2_level_mod`:
s2Config.LevelMod = int32(val)
case `s2_max_cells`:
s2Config.MaxCells = int32(val)
}
return nil
}
func (po *IndexStorageParamObserver) applyGeometryIndexSetting(
evalCtx *tree.EvalContext, key string, expr tree.Datum,
) error {
if po.IndexDesc.GeoConfig.S2Geometry == nil {
return pgerror.Newf(pgcode.InvalidParameterValue, "%q can only be applied to GEOMETRY spatial indexes", key)
}
val, err := DatumAsFloat(evalCtx, key, expr)
if err != nil {
return errors.Wrapf(err, "error decoding %q", key)
}
switch key {
case `geometry_min_x`:
po.IndexDesc.GeoConfig.S2Geometry.MinX = val
case `geometry_max_x`:
po.IndexDesc.GeoConfig.S2Geometry.MaxX = val
case `geometry_min_y`:
po.IndexDesc.GeoConfig.S2Geometry.MinY = val
case `geometry_max_y`:
po.IndexDesc.GeoConfig.S2Geometry.MaxY = val
default:
return pgerror.Newf(pgcode.InvalidParameterValue, "unknown key: %q", key)
}
return nil
}
// onSet implements the StorageParamObserver interface.
func (po *IndexStorageParamObserver) onSet(
ctx context.Context,
semaCtx *tree.SemaContext,
evalCtx *tree.EvalContext,
key string,
expr tree.Datum,
) error {
switch key {
case `fillfactor`:
return setFillFactorStorageParam(evalCtx, key, expr)
case `s2_max_level`:
return po.applyS2ConfigSetting(evalCtx, key, expr, 0, 30)
case `s2_level_mod`:
return po.applyS2ConfigSetting(evalCtx, key, expr, 1, 3)
case `s2_max_cells`:
return po.applyS2ConfigSetting(evalCtx, key, expr, 1, 32)
case `geometry_min_x`, `geometry_max_x`, `geometry_min_y`, `geometry_max_y`:
return po.applyGeometryIndexSetting(evalCtx, key, expr)
case `vacuum_cleanup_index_scale_factor`,
`buffering`,
`fastupdate`,
`gin_pending_list_limit`,
`pages_per_range`,
`autosummarize`:
return unimplemented.NewWithIssuef(43299, "storage parameter %q", key)
}
return pgerror.Newf(pgcode.InvalidParameterValue, "invalid storage parameter %q", key)
}
// onReset implements the StorageParameterObserver interface.
func (po *IndexStorageParamObserver) onReset(evalCtx *tree.EvalContext, key string) error {
return errors.AssertionFailedf("non-implemented codepath")
}
// runPostChecks implements the StorageParamObserver interface.
func (po *IndexStorageParamObserver) runPostChecks() error {
s2Config := getS2ConfigFromIndex(po.IndexDesc)
if s2Config != nil {
if (s2Config.MaxLevel)%s2Config.LevelMod != 0 {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"s2_max_level (%d) must be divisible by s2_level_mod (%d)",
s2Config.MaxLevel,
s2Config.LevelMod,
)
}
}
if cfg := po.IndexDesc.GeoConfig.S2Geometry; cfg != nil {
if cfg.MaxX <= cfg.MinX {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"geometry_max_x (%f) must be greater than geometry_min_x (%f)",
cfg.MaxX,
cfg.MinX,
)
}
if cfg.MaxY <= cfg.MinY {
return pgerror.Newf(
pgcode.InvalidParameterValue,
"geometry_max_y (%f) must be greater than geometry_min_y (%f)",
cfg.MaxY,
cfg.MinY,
)
}
}
return nil
}
| pkg/sql/paramparse/paramobserver.go | 1 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.003698898246511817,
0.00027428235625848174,
0.00015988356608431786,
0.00017094024224206805,
0.0005334785673767328
] |
{
"id": 11,
"code_window": [
"\t\t\trelPersistence = relPersistenceTemporary\n",
"\t\t}\n",
"\t\tvar relOptions tree.Datum = tree.DNull\n",
"\t\tif ttl := table.GetRowLevelTTL(); ttl != nil {\n",
"\t\t\trelOptionsArr := tree.NewDArray(types.String)\n",
"\t\t\tif err := relOptionsArr.Append(tree.NewDString(fmt.Sprintf(\"expire_after=%s\", ttl.DurationExpr))); err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\trelOptions = relOptionsArr\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif err := relOptionsArr.Append(tree.NewDString(fmt.Sprintf(\"ttl_expire_after=%s\", ttl.DurationExpr))); err != nil {\n"
],
"file_path": "pkg/sql/pg_catalog.go",
"type": "replace",
"edit_start_line_idx": 628
} | // Code generated by TestPretty. DO NOT EDIT.
// GENERATED FILE DO NOT EDIT
1:
-
SELECT
*
FROM
generate_series(a),
generate_series(b),
ROWS FROM (
generate_series(a),
generate_series(b)
)
| pkg/sql/sem/tree/testdata/pretty/srfs.ref.golden.short | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.00017535220831632614,
0.0001751408854033798,
0.00017492954793851823,
0.0001751408854033798,
2.113301889039576e-7
] |
{
"id": 11,
"code_window": [
"\t\t\trelPersistence = relPersistenceTemporary\n",
"\t\t}\n",
"\t\tvar relOptions tree.Datum = tree.DNull\n",
"\t\tif ttl := table.GetRowLevelTTL(); ttl != nil {\n",
"\t\t\trelOptionsArr := tree.NewDArray(types.String)\n",
"\t\t\tif err := relOptionsArr.Append(tree.NewDString(fmt.Sprintf(\"expire_after=%s\", ttl.DurationExpr))); err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\trelOptions = relOptionsArr\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif err := relOptionsArr.Append(tree.NewDString(fmt.Sprintf(\"ttl_expire_after=%s\", ttl.DurationExpr))); err != nil {\n"
],
"file_path": "pkg/sql/pg_catalog.go",
"type": "replace",
"edit_start_line_idx": 628
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package physical
import (
"bytes"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
)
// Provided physical properties of an operator. An operator might be able to
// satisfy a required property in multiple ways, and additional information is
// necessary for execution. For example, the required properties may allow
// multiple ordering choices; the provided properties would describe the
// specific ordering that has to be respected during execution.
//
// Provided properties are derived bottom-up (on the lowest cost tree).
type Provided struct {
// Ordering is an ordering that needs to be maintained on the rows produced by
// this operator in order to satisfy its required ordering. This is useful for
// configuring execution in a distributed setting, where results from multiple
// nodes may need to be merged. A best-effort attempt is made to have as few
// columns as possible.
//
// The ordering, in conjunction with the functional dependencies (in the
// logical properties), must intersect the required ordering.
//
// See the documentation for the opt/ordering package for some examples.
Ordering opt.Ordering
// Distribution is a distribution that needs to be maintained on the rows
// produced by this operator in order to satisfy its required distribution. If
// there is a required distribution, the provided distribution must match it
// exactly.
//
// The provided distribution is not yet used when building the DistSQL plan,
// but eventually it should inform the decision about whether to plan
// processors locally or remotely. Currently, it is used to determine whether
// a Distribute operator is needed between this operator and its parent, which
// can affect the cost of a plan.
Distribution Distribution
}
// Equals returns true if the two sets of provided properties are identical.
func (p *Provided) Equals(other *Provided) bool {
return p.Ordering.Equals(other.Ordering) && p.Distribution.Equals(other.Distribution)
}
func (p *Provided) String() string {
var buf bytes.Buffer
if len(p.Ordering) > 0 {
buf.WriteString("[ordering: ")
p.Ordering.Format(&buf)
if p.Distribution.Any() {
buf.WriteByte(']')
} else {
buf.WriteString(", ")
}
}
if !p.Distribution.Any() {
if len(p.Ordering) == 0 {
buf.WriteByte('[')
}
buf.WriteString("distribution: ")
p.Distribution.format(&buf)
buf.WriteByte(']')
}
return buf.String()
}
| pkg/sql/opt/props/physical/provided.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.00017974927322939038,
0.00016910511476453394,
0.00015823269495740533,
0.00017130785272456706,
0.000007642322998435702
] |
{
"id": 11,
"code_window": [
"\t\t\trelPersistence = relPersistenceTemporary\n",
"\t\t}\n",
"\t\tvar relOptions tree.Datum = tree.DNull\n",
"\t\tif ttl := table.GetRowLevelTTL(); ttl != nil {\n",
"\t\t\trelOptionsArr := tree.NewDArray(types.String)\n",
"\t\t\tif err := relOptionsArr.Append(tree.NewDString(fmt.Sprintf(\"expire_after=%s\", ttl.DurationExpr))); err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\trelOptions = relOptionsArr\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif err := relOptionsArr.Append(tree.NewDString(fmt.Sprintf(\"ttl_expire_after=%s\", ttl.DurationExpr))); err != nil {\n"
],
"file_path": "pkg/sql/pg_catalog.go",
"type": "replace",
"edit_start_line_idx": 628
} | [1
] | pkg/util/json/testdata/raw/array_with_1_and_newline.json | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.00017515168292447925,
0.00017515168292447925,
0.00017515168292447925,
0.00017515168292447925,
0
] |
{
"id": 12,
"code_window": [
"\t); err != nil {\n",
"\t\treturn \"\", err\n",
"\t}\n",
"\n",
"\tif ttl := desc.GetRowLevelTTL(); ttl != nil {\n",
"\t\tf.Buffer.WriteString(` WITH (expire_after = `)\n",
"\t\tf.Buffer.WriteString(ttl.DurationExpr)\n",
"\t\tf.Buffer.WriteString(`)`)\n",
"\t}\n",
"\n",
"\tif err := showCreateLocality(desc, f); err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tf.Buffer.WriteString(` WITH (ttl_expire_after = `)\n"
],
"file_path": "pkg/sql/show_create.go",
"type": "replace",
"edit_start_line_idx": 178
} | statement error value of "expire_after" must be an interval
CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = ' xx invalid interval xx')
statement error value of "expire_after" must be at least zero
CREATE TABLE tbl (id INT PRIMARY KEY, text TEXT) WITH (expire_after = '-10 minutes')
statement ok
CREATE TABLE tbl (
id INT PRIMARY KEY,
text TEXT,
FAMILY (id, text)
) WITH (expire_after = '10 minutes')
query TT
SHOW CREATE TABLE tbl
----
tbl CREATE TABLE public.tbl (
id INT8 NOT NULL,
text STRING NULL,
CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),
FAMILY fam_0_id_text (id, text)
) WITH (expire_after = '00:10:00':::INTERVAL)
query T
SELECT reloptions FROM pg_class WHERE relname = 'tbl'
----
{expire_after='00:10:00':::INTERVAL}
statement ok
DROP TABLE tbl;
CREATE TABLE tbl (
id INT PRIMARY KEY,
text TEXT,
FAMILY (id, text)
) WITH (expire_after = '10 minutes'::interval)
query TT
SHOW CREATE TABLE tbl
----
tbl CREATE TABLE public.tbl (
id INT8 NOT NULL,
text STRING NULL,
CONSTRAINT tbl_pkey PRIMARY KEY (id ASC),
FAMILY fam_0_id_text (id, text)
) WITH (expire_after = '00:10:00':::INTERVAL)
| pkg/sql/logictest/testdata/logic_test/row_level_ttl | 1 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.0024350571911782026,
0.0007580275414511561,
0.00017715783906169236,
0.00045652801054529846,
0.0008469198946841061
] |
{
"id": 12,
"code_window": [
"\t); err != nil {\n",
"\t\treturn \"\", err\n",
"\t}\n",
"\n",
"\tif ttl := desc.GetRowLevelTTL(); ttl != nil {\n",
"\t\tf.Buffer.WriteString(` WITH (expire_after = `)\n",
"\t\tf.Buffer.WriteString(ttl.DurationExpr)\n",
"\t\tf.Buffer.WriteString(`)`)\n",
"\t}\n",
"\n",
"\tif err := showCreateLocality(desc, f); err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tf.Buffer.WriteString(` WITH (ttl_expire_after = `)\n"
],
"file_path": "pkg/sql/show_create.go",
"type": "replace",
"edit_start_line_idx": 178
} | // Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sessiondata
import (
"bytes"
"strings"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catconstants"
"github.com/cockroachdb/cockroach/pkg/sql/lexbase"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
)
// DefaultSearchPath is the search path used by virgin sessions.
var DefaultSearchPath = MakeSearchPath(
[]string{catconstants.UserSchemaName, catconstants.PublicSchemaName},
)
// SearchPath represents a list of namespaces to search builtins in.
// The names must be normalized (as per Name.Normalize) already.
type SearchPath struct {
paths []string
containsPgCatalog bool
containsPgExtension bool
containsPgTempSchema bool
tempSchemaName string
userSchemaName string
}
// EmptySearchPath is a SearchPath with no schema names in it.
var EmptySearchPath = SearchPath{}
// DefaultSearchPathForUser returns the default search path with the user
// specific schema name set so that it can be expanded during resolution.
func DefaultSearchPathForUser(username security.SQLUsername) SearchPath {
return DefaultSearchPath.WithUserSchemaName(username.Normalized())
}
// MakeSearchPath returns a new immutable SearchPath struct. The paths slice
// must not be modified after hand-off to MakeSearchPath.
func MakeSearchPath(paths []string) SearchPath {
containsPgCatalog := false
containsPgExtension := false
containsPgTempSchema := false
for _, e := range paths {
switch e {
case catconstants.PgCatalogName:
containsPgCatalog = true
case catconstants.PgTempSchemaName:
containsPgTempSchema = true
case catconstants.PgExtensionSchemaName:
containsPgExtension = true
}
}
return SearchPath{
paths: paths,
containsPgCatalog: containsPgCatalog,
containsPgExtension: containsPgExtension,
containsPgTempSchema: containsPgTempSchema,
}
}
// WithTemporarySchemaName returns a new immutable SearchPath struct with
// the tempSchemaName supplied and the same paths as before.
// This should be called every time a session creates a temporary schema
// for the first time.
func (s SearchPath) WithTemporarySchemaName(tempSchemaName string) SearchPath {
return SearchPath{
paths: s.paths,
containsPgCatalog: s.containsPgCatalog,
containsPgTempSchema: s.containsPgTempSchema,
containsPgExtension: s.containsPgExtension,
userSchemaName: s.userSchemaName,
tempSchemaName: tempSchemaName,
}
}
// WithUserSchemaName returns a new immutable SearchPath struct with the
// userSchemaName populated and the same values for all other fields as before.
func (s SearchPath) WithUserSchemaName(userSchemaName string) SearchPath {
return SearchPath{
paths: s.paths,
containsPgCatalog: s.containsPgCatalog,
containsPgTempSchema: s.containsPgTempSchema,
containsPgExtension: s.containsPgExtension,
userSchemaName: userSchemaName,
tempSchemaName: s.tempSchemaName,
}
}
// UpdatePaths returns a new immutable SearchPath struct with the paths supplied
// and the same tempSchemaName and userSchemaName as before.
func (s SearchPath) UpdatePaths(paths []string) SearchPath {
return MakeSearchPath(paths).WithTemporarySchemaName(s.tempSchemaName).WithUserSchemaName(s.userSchemaName)
}
// MaybeResolveTemporarySchema returns the session specific temporary schema
// for the pg_temp alias (only if a temporary schema exists). It acts as a pass
// through for all other schema names.
func (s SearchPath) MaybeResolveTemporarySchema(schemaName string) (string, error) {
// Only allow access to the session specific temporary schema.
if strings.HasPrefix(schemaName, catconstants.PgTempSchemaName) && schemaName != catconstants.PgTempSchemaName && schemaName != s.tempSchemaName {
return schemaName, pgerror.New(pgcode.FeatureNotSupported, "cannot access temporary tables of other sessions")
}
// If the schemaName is pg_temp and the tempSchemaName has been set, pg_temp
// is an alias the session specific temp schema.
if schemaName == catconstants.PgTempSchemaName && s.tempSchemaName != "" {
return s.tempSchemaName, nil
}
return schemaName, nil
}
// Iter returns an iterator through the search path. We must include the
// implicit pg_catalog and temporary schema at the beginning of the search path,
// unless they have been explicitly set later by the user.
// We also include pg_extension in the path, as this normally be used in place
// of the public schema. This should be read before "public" is read.
// "The system catalog schema, pg_catalog, is always searched, whether it is
// mentioned in the path or not. If it is mentioned in the path then it will be
// searched in the specified order. If pg_catalog is not in the path then it
// will be searched before searching any of the path items."
// "Likewise, the current session's temporary-table schema, pg_temp_nnn, is
// always searched if it exists. It can be explicitly listed in the path by
// using the alias pg_temp. If it is not listed in the path then it is searched
// first (even before pg_catalog)."
// - https://www.postgresql.org/docs/9.1/static/runtime-config-client.html
func (s SearchPath) Iter() SearchPathIter {
implicitPgTempSchema := !s.containsPgTempSchema && s.tempSchemaName != ""
sp := SearchPathIter{
paths: s.paths,
implicitPgCatalog: !s.containsPgCatalog,
implicitPgExtension: !s.containsPgExtension,
implicitPgTempSchema: implicitPgTempSchema,
tempSchemaName: s.tempSchemaName,
userSchemaName: s.userSchemaName,
}
return sp
}
// IterWithoutImplicitPGSchemas is the same as Iter, but does not include the
// implicit pg_temp and pg_catalog.
func (s SearchPath) IterWithoutImplicitPGSchemas() SearchPathIter {
sp := SearchPathIter{
paths: s.paths,
implicitPgCatalog: false,
implicitPgTempSchema: false,
tempSchemaName: s.tempSchemaName,
userSchemaName: s.userSchemaName,
}
return sp
}
// GetPathArray returns the underlying path array of this SearchPath. The
// resultant slice is not to be modified.
func (s SearchPath) GetPathArray() []string {
return s.paths
}
// Contains returns true iff the SearchPath contains the given string.
func (s SearchPath) Contains(target string) bool {
for _, candidate := range s.GetPathArray() {
if candidate == target {
return true
}
}
return false
}
// GetTemporarySchemaName returns the temporary schema specific to the current
// session, or an empty string if the current session has not yet created a
// temporary schema.
//
// Note that even after the current session has created a temporary schema, a
// schema with that name may not exist in the session's current database.
func (s SearchPath) GetTemporarySchemaName() string {
return s.tempSchemaName
}
// Equals returns true if two SearchPaths are the same.
func (s SearchPath) Equals(other *SearchPath) bool {
if s.containsPgCatalog != other.containsPgCatalog {
return false
}
if s.containsPgExtension != other.containsPgExtension {
return false
}
if s.containsPgTempSchema != other.containsPgTempSchema {
return false
}
if len(s.paths) != len(other.paths) {
return false
}
if s.tempSchemaName != other.tempSchemaName {
return false
}
// Fast path: skip the check if it is the same slice.
if &s.paths[0] != &other.paths[0] {
for i := range s.paths {
if s.paths[i] != other.paths[i] {
return false
}
}
}
return true
}
// SQLIdentifiers returns quotes for string starting with special characters.
func (s SearchPath) SQLIdentifiers() string {
var buf bytes.Buffer
for i, path := range s.paths {
if i > 0 {
buf.WriteString(", ")
}
lexbase.EncodeRestrictedSQLIdent(&buf, path, lexbase.EncNoFlags)
}
return buf.String()
}
func (s SearchPath) String() string {
return strings.Join(s.paths, ",")
}
// SearchPathIter enables iteration over the search paths without triggering an
// allocation. Use one of the SearchPath.Iter methods to get an instance of the
// iterator, and then repeatedly call the Next method in order to iterate over
// each search path. The tempSchemaName in the iterator is only set if the session
// has created a temporary schema.
type SearchPathIter struct {
paths []string
implicitPgCatalog bool
implicitPgExtension bool
implicitPgTempSchema bool
tempSchemaName string
userSchemaName string
i int
}
// Next returns the next search path, or false if there are no remaining paths.
func (iter *SearchPathIter) Next() (path string, ok bool) {
// If the session specific temporary schema has not been created, we can
// preempt the name resolution failure by simply skipping the implicit pg_temp.
if iter.implicitPgTempSchema && iter.tempSchemaName != "" {
iter.implicitPgTempSchema = false
return iter.tempSchemaName, true
}
if iter.implicitPgCatalog {
iter.implicitPgCatalog = false
return catconstants.PgCatalogName, true
}
if iter.i < len(iter.paths) {
iter.i++
// If pg_temp is explicitly present in the paths, it must be resolved to the
// session specific temp schema (if one exists). tempSchemaName is set in the
// iterator iff the session has created a temporary schema.
if iter.paths[iter.i-1] == catconstants.PgTempSchemaName {
// If the session specific temporary schema has not been created we can
// preempt the resolution failure and iterate to the next entry.
if iter.tempSchemaName == "" {
return iter.Next()
}
return iter.tempSchemaName, true
}
if iter.paths[iter.i-1] == catconstants.UserSchemaName {
// In case the user schema name is unset, we simply iterate to the next
// entry.
if iter.userSchemaName == "" {
return iter.Next()
}
return iter.userSchemaName, true
}
// pg_extension should be read before delving into the schema.
if iter.paths[iter.i-1] == catconstants.PublicSchemaName && iter.implicitPgExtension {
iter.implicitPgExtension = false
// Go back one so `public` can be found again next.
iter.i--
return catconstants.PgExtensionSchemaName, true
}
return iter.paths[iter.i-1], true
}
return "", false
}
| pkg/sql/sessiondata/search_path.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.0026893585454672575,
0.0002585815964266658,
0.0001635732187423855,
0.00016835995484143496,
0.00045198947191238403
] |
{
"id": 12,
"code_window": [
"\t); err != nil {\n",
"\t\treturn \"\", err\n",
"\t}\n",
"\n",
"\tif ttl := desc.GetRowLevelTTL(); ttl != nil {\n",
"\t\tf.Buffer.WriteString(` WITH (expire_after = `)\n",
"\t\tf.Buffer.WriteString(ttl.DurationExpr)\n",
"\t\tf.Buffer.WriteString(`)`)\n",
"\t}\n",
"\n",
"\tif err := showCreateLocality(desc, f); err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tf.Buffer.WriteString(` WITH (ttl_expire_after = `)\n"
],
"file_path": "pkg/sql/show_create.go",
"type": "replace",
"edit_start_line_idx": 178
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package cli
import (
"context"
gosql "database/sql"
"strings"
"github.com/cockroachdb/cockroach/pkg/workload"
"github.com/cockroachdb/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
func init() {
AddSubCmd(func(userFacing bool) *cobra.Command {
var checkCmd = SetCmdDefaults(&cobra.Command{
Use: `check`,
Short: `check a running cluster's data for consistency`,
})
for _, meta := range workload.Registered() {
gen := meta.New()
if hooks, ok := gen.(workload.Hookser); !ok || hooks.Hooks().CheckConsistency == nil {
continue
}
var genFlags *pflag.FlagSet
if f, ok := gen.(workload.Flagser); ok {
genFlags = f.Flags().FlagSet
// Hide irrelevant flags so they don't clutter up the help text, but
// don't remove them entirely so if someone switches from
// `./workload run` to `./workload check` they don't have to remove
// them from the invocation.
for flagName, meta := range f.Flags().Meta {
if meta.RuntimeOnly && !meta.CheckConsistencyOnly {
_ = genFlags.MarkHidden(flagName)
}
}
}
genCheckCmd := SetCmdDefaults(&cobra.Command{
Use: meta.Name + ` [CRDB URI]`,
Args: cobra.RangeArgs(0, 1),
})
genCheckCmd.Flags().AddFlagSet(genFlags)
genCheckCmd.Run = CmdHelper(gen, check)
checkCmd.AddCommand(genCheckCmd)
}
return checkCmd
})
}
func check(gen workload.Generator, urls []string, dbName string) error {
ctx := context.Background()
var fn func(context.Context, *gosql.DB) error
if hooks, ok := gen.(workload.Hookser); ok {
fn = hooks.Hooks().CheckConsistency
}
if fn == nil {
return errors.Errorf(`no consistency checks are defined for %s`, gen.Meta().Name)
}
sqlDB, err := gosql.Open(`cockroach`, strings.Join(urls, ` `))
if err != nil {
return err
}
defer sqlDB.Close()
if err := sqlDB.Ping(); err != nil {
return err
}
return fn(ctx, sqlDB)
}
| pkg/workload/cli/check.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.0010249356273561716,
0.00026433513266965747,
0.00016281494754366577,
0.0001691424986347556,
0.0002689553366508335
] |
{
"id": 12,
"code_window": [
"\t); err != nil {\n",
"\t\treturn \"\", err\n",
"\t}\n",
"\n",
"\tif ttl := desc.GetRowLevelTTL(); ttl != nil {\n",
"\t\tf.Buffer.WriteString(` WITH (expire_after = `)\n",
"\t\tf.Buffer.WriteString(ttl.DurationExpr)\n",
"\t\tf.Buffer.WriteString(`)`)\n",
"\t}\n",
"\n",
"\tif err := showCreateLocality(desc, f); err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tf.Buffer.WriteString(` WITH (ttl_expire_after = `)\n"
],
"file_path": "pkg/sql/show_create.go",
"type": "replace",
"edit_start_line_idx": 178
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package errorutil
import (
"context"
"fmt"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/util/log/logcrash"
"github.com/cockroachdb/errors"
)
// UnexpectedWithIssueErrorf indicates an error with an associated Github issue.
// It's supposed to be used for conditions that would otherwise be checked by
// assertions, except that they fail and we need the public's help for tracking
// it down.
// The error message will invite users to report repros.
func UnexpectedWithIssueErrorf(issue int, format string, args ...interface{}) error {
err := errors.Newf(format, args...)
err = errors.Wrap(err, "unexpected error")
err = errors.WithSafeDetails(err, "issue #%d", errors.Safe(issue))
err = errors.WithHint(err,
fmt.Sprintf("We've been trying to track this particular issue down. "+
"Please report your reproduction at "+
"https://github.com/cockroachdb/cockroach/issues/%d "+
"unless that issue seems to have been resolved "+
"(in which case you might want to update crdb to a newer version).",
issue))
return err
}
// SendReport creates a Sentry report about the error, if the settings allow.
// The format string will be reproduced ad litteram in the report; the arguments
// will be sanitized.
func SendReport(ctx context.Context, sv *settings.Values, err error) {
if !logcrash.ShouldSendReport(sv) {
return
}
event, extraDetails := errors.BuildSentryReport(err)
logcrash.SendReport(ctx, logcrash.ReportTypeError, event, extraDetails)
}
| pkg/util/errorutil/error.go | 0 | https://github.com/cockroachdb/cockroach/commit/1f785d2901fb3639ad8914d69db2d7b8fa868059 | [
0.0016399120213463902,
0.00041441191569902003,
0.00016294587112497538,
0.0001695321552688256,
0.0005480804829858243
] |
{
"id": 0,
"code_window": [
"\n",
"\t\t// Handle destroy time transformations for output and local values.\n",
"\t\t// Reverse the edges from outputs and locals, so that\n",
"\t\t// interpolations don't fail during destroy.\n",
"\t\t// Create a destroy node for outputs to remove them from the state.\n",
"\t\t// Prune unreferenced values, which may have interpolations that can't\n",
"\t\t// be resolved.\n",
"\t\tGraphTransformIf(\n",
"\t\t\tfunc() bool { return b.Destroy },\n",
"\t\t\tGraphTransformMulti(\n",
"\t\t\t\t&DestroyValueReferenceTransformer{},\n",
"\t\t\t\t&DestroyOutputTransformer{},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "terraform/graph_builder_apply.go",
"type": "replace",
"edit_start_line_idx": 177
} | package terraform
import (
"fmt"
"log"
"sort"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/lang"
"github.com/hashicorp/terraform/states"
)
// GraphNodeReferenceable must be implemented by any node that represents
// a Terraform thing that can be referenced (resource, module, etc.).
//
// Even if the thing has no name, this should return an empty list. By
// implementing this and returning a non-nil result, you say that this CAN
// be referenced and other methods of referencing may still be possible (such
// as by path!)
type GraphNodeReferenceable interface {
GraphNodeSubPath
// ReferenceableAddrs returns a list of addresses through which this can be
// referenced.
ReferenceableAddrs() []addrs.Referenceable
}
// GraphNodeReferencer must be implemented by nodes that reference other
// Terraform items and therefore depend on them.
type GraphNodeReferencer interface {
GraphNodeSubPath
// References returns a list of references made by this node, which
// include both a referenced address and source location information for
// the reference.
References() []*addrs.Reference
}
type GraphNodeAttachDependencies interface {
GraphNodeResource
AttachDependencies([]addrs.AbsResource)
}
// GraphNodeReferenceOutside is an interface that can optionally be implemented.
// A node that implements it can specify that its own referenceable addresses
// and/or the addresses it references are in a different module than the
// node itself.
//
// Any referenceable addresses returned by ReferenceableAddrs are interpreted
// relative to the returned selfPath.
//
// Any references returned by References are interpreted relative to the
// returned referencePath.
//
// It is valid but not required for either of these paths to match what is
// returned by method Path, though if both match the main Path then there
// is no reason to implement this method.
//
// The primary use-case for this is the nodes representing module input
// variables, since their expressions are resolved in terms of their calling
// module, but they are still referenced from their own module.
type GraphNodeReferenceOutside interface {
// ReferenceOutside returns a path in which any references from this node
// are resolved.
ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance)
}
// ReferenceTransformer is a GraphTransformer that connects all the
// nodes that reference each other in order to form the proper ordering.
type ReferenceTransformer struct{}
func (t *ReferenceTransformer) Transform(g *Graph) error {
// Build a reference map so we can efficiently look up the references
vs := g.Vertices()
m := NewReferenceMap(vs)
// Find the things that reference things and connect them
for _, v := range vs {
parents, _ := m.References(v)
parentsDbg := make([]string, len(parents))
for i, v := range parents {
parentsDbg[i] = dag.VertexName(v)
}
log.Printf(
"[DEBUG] ReferenceTransformer: %q references: %v",
dag.VertexName(v), parentsDbg)
for _, parent := range parents {
g.Connect(dag.BasicEdge(v, parent))
}
if len(parents) > 0 {
continue
}
}
return nil
}
// AttachDependenciesTransformer records all resource dependencies for each
// instance, and attaches the addresses to the node itself. Managed resource
// will record these in the state for proper ordering of destroy operations.
type AttachDependenciesTransformer struct {
Config *configs.Config
State *states.State
Schemas *Schemas
}
func (t AttachDependenciesTransformer) Transform(g *Graph) error {
for _, v := range g.Vertices() {
attacher, ok := v.(GraphNodeAttachDependencies)
if !ok {
continue
}
selfAddr := attacher.ResourceAddr()
// Data sources don't need to track destroy dependencies
if selfAddr.Resource.Mode == addrs.DataResourceMode {
continue
}
ans, err := g.Ancestors(v)
if err != nil {
return err
}
// dedupe addrs when there's multiple instances involved, or
// multiple paths in the un-reduced graph
depMap := map[string]addrs.AbsResource{}
for _, d := range ans.List() {
var addr addrs.AbsResource
switch d := d.(type) {
case GraphNodeResourceInstance:
instAddr := d.ResourceInstanceAddr()
addr = instAddr.Resource.Resource.Absolute(instAddr.Module)
case GraphNodeResource:
addr = d.ResourceAddr()
default:
continue
}
// Data sources don't need to track destroy dependencies
if addr.Resource.Mode == addrs.DataResourceMode {
continue
}
if addr.Equal(selfAddr) {
continue
}
depMap[addr.String()] = addr
}
deps := make([]addrs.AbsResource, 0, len(depMap))
for _, d := range depMap {
deps = append(deps, d)
}
sort.Slice(deps, func(i, j int) bool {
return deps[i].String() < deps[j].String()
})
log.Printf("[TRACE] AttachDependenciesTransformer: %s depends on %s", attacher.ResourceAddr(), deps)
attacher.AttachDependencies(deps)
}
return nil
}
// DestroyReferenceTransformer is a GraphTransformer that reverses the edges
// for locals and outputs that depend on other nodes which will be
// removed during destroy. If a destroy node is evaluated before the local or
// output value, it will be removed from the state, and the later interpolation
// will fail.
type DestroyValueReferenceTransformer struct{}
func (t *DestroyValueReferenceTransformer) Transform(g *Graph) error {
vs := g.Vertices()
for _, v := range vs {
switch v.(type) {
case *NodeApplyableOutput, *NodeLocal:
// OK
default:
continue
}
// reverse any outgoing edges so that the value is evaluated first.
for _, e := range g.EdgesFrom(v) {
target := e.Target()
// only destroy nodes will be evaluated in reverse
if _, ok := target.(GraphNodeDestroyer); !ok {
continue
}
log.Printf("[TRACE] output dep: %s", dag.VertexName(target))
g.RemoveEdge(e)
g.Connect(&DestroyEdge{S: target, T: v})
}
}
return nil
}
// PruneUnusedValuesTransformer is s GraphTransformer that removes local and
// output values which are not referenced in the graph. Since outputs and
// locals always need to be evaluated, if they reference a resource that is not
// available in the state the interpolation could fail.
type PruneUnusedValuesTransformer struct{}
func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error {
// this might need multiple runs in order to ensure that pruning a value
// doesn't effect a previously checked value.
for removed := 0; ; removed = 0 {
for _, v := range g.Vertices() {
switch v.(type) {
case *NodeApplyableOutput, *NodeLocal:
// OK
default:
continue
}
dependants := g.UpEdges(v)
switch dependants.Len() {
case 0:
// nothing at all depends on this
g.Remove(v)
removed++
case 1:
// because an output's destroy node always depends on the output,
// we need to check for the case of a single destroy node.
d := dependants.List()[0]
if _, ok := d.(*NodeDestroyableOutput); ok {
g.Remove(v)
removed++
}
}
}
if removed == 0 {
break
}
}
return nil
}
// ReferenceMap is a structure that can be used to efficiently check
// for references on a graph.
type ReferenceMap struct {
// vertices is a map from internal reference keys (as produced by the
// mapKey method) to one or more vertices that are identified by each key.
//
// A particular reference key might actually identify multiple vertices,
// e.g. in situations where one object is contained inside another.
vertices map[string][]dag.Vertex
// edges is a map whose keys are a subset of the internal reference keys
// from "vertices", and whose values are the nodes that refer to each
// key. The values in this map are the referrers, while values in
// "verticies" are the referents. The keys in both cases are referents.
edges map[string][]dag.Vertex
}
// References returns the set of vertices that the given vertex refers to,
// and any referenced addresses that do not have corresponding vertices.
func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []addrs.Referenceable) {
rn, ok := v.(GraphNodeReferencer)
if !ok {
return nil, nil
}
if _, ok := v.(GraphNodeSubPath); !ok {
return nil, nil
}
var matches []dag.Vertex
var missing []addrs.Referenceable
for _, ref := range rn.References() {
subject := ref.Subject
key := m.referenceMapKey(v, subject)
if _, exists := m.vertices[key]; !exists {
// If what we were looking for was a ResourceInstance then we
// might be in a resource-oriented graph rather than an
// instance-oriented graph, and so we'll see if we have the
// resource itself instead.
switch ri := subject.(type) {
case addrs.ResourceInstance:
subject = ri.ContainingResource()
case addrs.ResourceInstancePhase:
subject = ri.ContainingResource()
}
key = m.referenceMapKey(v, subject)
}
vertices := m.vertices[key]
for _, rv := range vertices {
// don't include self-references
if rv == v {
continue
}
matches = append(matches, rv)
}
if len(vertices) == 0 {
missing = append(missing, ref.Subject)
}
}
return matches, missing
}
// Referrers returns the set of vertices that refer to the given vertex.
func (m *ReferenceMap) Referrers(v dag.Vertex) []dag.Vertex {
rn, ok := v.(GraphNodeReferenceable)
if !ok {
return nil
}
sp, ok := v.(GraphNodeSubPath)
if !ok {
return nil
}
var matches []dag.Vertex
for _, addr := range rn.ReferenceableAddrs() {
key := m.mapKey(sp.Path(), addr)
referrers, ok := m.edges[key]
if !ok {
continue
}
// If the referrer set includes our own given vertex then we skip,
// since we don't want to return self-references.
selfRef := false
for _, p := range referrers {
if p == v {
selfRef = true
break
}
}
if selfRef {
continue
}
matches = append(matches, referrers...)
}
return matches
}
func (m *ReferenceMap) mapKey(path addrs.ModuleInstance, addr addrs.Referenceable) string {
return fmt.Sprintf("%s|%s", path.String(), addr.String())
}
// vertexReferenceablePath returns the path in which the given vertex can be
// referenced. This is the path that its results from ReferenceableAddrs
// are considered to be relative to.
//
// Only GraphNodeSubPath implementations can be referenced, so this method will
// panic if the given vertex does not implement that interface.
func (m *ReferenceMap) vertexReferenceablePath(v dag.Vertex) addrs.ModuleInstance {
sp, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
panic(fmt.Errorf("vertexMapKey on vertex type %T which doesn't implement GraphNodeSubPath", sp))
}
if outside, ok := v.(GraphNodeReferenceOutside); ok {
// Vertex is referenced from a different module than where it was
// declared.
path, _ := outside.ReferenceOutside()
return path
}
// Vertex is referenced from the same module as where it was declared.
return sp.Path()
}
// vertexReferencePath returns the path in which references _from_ the given
// vertex must be interpreted.
//
// Only GraphNodeSubPath implementations can have references, so this method
// will panic if the given vertex does not implement that interface.
func vertexReferencePath(referrer dag.Vertex) addrs.ModuleInstance {
sp, ok := referrer.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
panic(fmt.Errorf("vertexReferencePath on vertex type %T which doesn't implement GraphNodeSubPath", sp))
}
var path addrs.ModuleInstance
if outside, ok := referrer.(GraphNodeReferenceOutside); ok {
// Vertex makes references to objects in a different module than where
// it was declared.
_, path = outside.ReferenceOutside()
return path
}
// Vertex makes references to objects in the same module as where it
// was declared.
return sp.Path()
}
// referenceMapKey produces keys for the "edges" map. "referrer" is the vertex
// that the reference is from, and "addr" is the address of the object being
// referenced.
//
// The result is an opaque string that includes both the address of the given
// object and the address of the module instance that object belongs to.
//
// Only GraphNodeSubPath implementations can be referrers, so this method will
// panic if the given vertex does not implement that interface.
func (m *ReferenceMap) referenceMapKey(referrer dag.Vertex, addr addrs.Referenceable) string {
path := vertexReferencePath(referrer)
return m.mapKey(path, addr)
}
// NewReferenceMap is used to create a new reference map for the
// given set of vertices.
func NewReferenceMap(vs []dag.Vertex) *ReferenceMap {
var m ReferenceMap
// Build the lookup table
vertices := make(map[string][]dag.Vertex)
for _, v := range vs {
_, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
continue
}
// We're only looking for referenceable nodes
rn, ok := v.(GraphNodeReferenceable)
if !ok {
continue
}
path := m.vertexReferenceablePath(v)
// Go through and cache them
for _, addr := range rn.ReferenceableAddrs() {
key := m.mapKey(path, addr)
vertices[key] = append(vertices[key], v)
}
// Any node can be referenced by the address of the module it belongs
// to or any of that module's ancestors.
for _, addr := range path.Ancestors()[1:] {
// Can be referenced either as the specific call instance (with
// an instance key) or as the bare module call itself (the "module"
// block in the parent module that created the instance).
callPath, call := addr.Call()
callInstPath, callInst := addr.CallInstance()
callKey := m.mapKey(callPath, call)
callInstKey := m.mapKey(callInstPath, callInst)
vertices[callKey] = append(vertices[callKey], v)
vertices[callInstKey] = append(vertices[callInstKey], v)
}
}
// Build the lookup table for referenced by
edges := make(map[string][]dag.Vertex)
for _, v := range vs {
_, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
continue
}
rn, ok := v.(GraphNodeReferencer)
if !ok {
// We're only looking for referenceable nodes
continue
}
// Go through and cache them
for _, ref := range rn.References() {
if ref.Subject == nil {
// Should never happen
panic(fmt.Sprintf("%T.References returned reference with nil subject", rn))
}
key := m.referenceMapKey(v, ref.Subject)
edges[key] = append(edges[key], v)
}
}
m.vertices = vertices
m.edges = edges
return &m
}
// ReferencesFromConfig returns the references that a configuration has
// based on the interpolated variables in a configuration.
func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Reference {
if body == nil {
return nil
}
refs, _ := lang.ReferencesInBlock(body, schema)
return refs
}
// appendResourceDestroyReferences identifies resource and resource instance
// references in the given slice and appends to it the "destroy-phase"
// equivalents of those references, returning the result.
//
// This can be used in the References implementation for a node which must also
// depend on the destruction of anything it references.
func appendResourceDestroyReferences(refs []*addrs.Reference) []*addrs.Reference {
given := refs
for _, ref := range given {
switch tr := ref.Subject.(type) {
case addrs.Resource:
newRef := *ref // shallow copy
newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
refs = append(refs, &newRef)
case addrs.ResourceInstance:
newRef := *ref // shallow copy
newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
refs = append(refs, &newRef)
}
}
return refs
}
func modulePrefixStr(p addrs.ModuleInstance) string {
return p.String()
}
func modulePrefixList(result []string, prefix string) []string {
if prefix != "" {
for i, v := range result {
result[i] = fmt.Sprintf("%s.%s", prefix, v)
}
}
return result
}
| terraform/transform_reference.go | 1 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.03351670876145363,
0.001634356682188809,
0.00016228809545282274,
0.0001703390444163233,
0.005100821610540152
] |
{
"id": 0,
"code_window": [
"\n",
"\t\t// Handle destroy time transformations for output and local values.\n",
"\t\t// Reverse the edges from outputs and locals, so that\n",
"\t\t// interpolations don't fail during destroy.\n",
"\t\t// Create a destroy node for outputs to remove them from the state.\n",
"\t\t// Prune unreferenced values, which may have interpolations that can't\n",
"\t\t// be resolved.\n",
"\t\tGraphTransformIf(\n",
"\t\t\tfunc() bool { return b.Destroy },\n",
"\t\t\tGraphTransformMulti(\n",
"\t\t\t\t&DestroyValueReferenceTransformer{},\n",
"\t\t\t\t&DestroyOutputTransformer{},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "terraform/graph_builder_apply.go",
"type": "replace",
"edit_start_line_idx": 177
} | // Copyright 2019, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metricproducer
import (
"sync"
)
// Manager maintains a list of active producers. Producers can register
// with the manager to allow readers to read all metrics provided by them.
// Readers can retrieve all producers registered with the manager,
// read metrics from the producers and export them.
type Manager struct {
mu sync.RWMutex
producers map[Producer]struct{}
}
var prodMgr *Manager
var once sync.Once
// GlobalManager is a single instance of producer manager
// that is used by all producers and all readers.
func GlobalManager() *Manager {
once.Do(func() {
prodMgr = &Manager{}
prodMgr.producers = make(map[Producer]struct{})
})
return prodMgr
}
// AddProducer adds the producer to the Manager if it is not already present.
func (pm *Manager) AddProducer(producer Producer) {
if producer == nil {
return
}
pm.mu.Lock()
defer pm.mu.Unlock()
pm.producers[producer] = struct{}{}
}
// DeleteProducer deletes the producer from the Manager if it is present.
func (pm *Manager) DeleteProducer(producer Producer) {
if producer == nil {
return
}
pm.mu.Lock()
defer pm.mu.Unlock()
delete(pm.producers, producer)
}
// GetAll returns a slice of all producer currently registered with
// the Manager. For each call it generates a new slice. The slice
// should not be cached as registration may change at any time. It is
// typically called periodically by exporter to read metrics from
// the producers.
func (pm *Manager) GetAll() []Producer {
pm.mu.Lock()
defer pm.mu.Unlock()
producers := make([]Producer, len(pm.producers))
i := 0
for producer := range pm.producers {
producers[i] = producer
i++
}
return producers
}
| vendor/go.opencensus.io/metric/metricproducer/manager.go | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.00017613495583646,
0.00016874402354005724,
0.0001633567298995331,
0.00016617207438685,
0.000004922960215480998
] |
{
"id": 0,
"code_window": [
"\n",
"\t\t// Handle destroy time transformations for output and local values.\n",
"\t\t// Reverse the edges from outputs and locals, so that\n",
"\t\t// interpolations don't fail during destroy.\n",
"\t\t// Create a destroy node for outputs to remove them from the state.\n",
"\t\t// Prune unreferenced values, which may have interpolations that can't\n",
"\t\t// be resolved.\n",
"\t\tGraphTransformIf(\n",
"\t\t\tfunc() bool { return b.Destroy },\n",
"\t\t\tGraphTransformMulti(\n",
"\t\t\t\t&DestroyValueReferenceTransformer{},\n",
"\t\t\t\t&DestroyOutputTransformer{},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "terraform/graph_builder_apply.go",
"type": "replace",
"edit_start_line_idx": 177
} | // Package treeprint provides a simple ASCII tree composing tool.
package treeprint
import (
"bytes"
"fmt"
"io"
"reflect"
)
type Value interface{}
type MetaValue interface{}
// Tree represents a tree structure with leaf-nodes and branch-nodes.
type Tree interface {
// AddNode adds a new node to a branch.
AddNode(v Value) Tree
// AddMetaNode adds a new node with meta value provided to a branch.
AddMetaNode(meta MetaValue, v Value) Tree
// AddBranch adds a new branch node (a level deeper).
AddBranch(v Value) Tree
// AddMetaBranch adds a new branch node (a level deeper) with meta value provided.
AddMetaBranch(meta MetaValue, v Value) Tree
// Branch converts a leaf-node to a branch-node,
// applying this on a branch-node does no effect.
Branch() Tree
// FindByMeta finds a node whose meta value matches the provided one by reflect.DeepEqual,
// returns nil if not found.
FindByMeta(meta MetaValue) Tree
// FindByValue finds a node whose value matches the provided one by reflect.DeepEqual,
// returns nil if not found.
FindByValue(value Value) Tree
// String renders the tree or subtree as a string.
String() string
// Bytes renders the tree or subtree as byteslice.
Bytes() []byte
}
type node struct {
Root *node
Meta MetaValue
Value Value
Nodes []*node
}
func (n *node) AddNode(v Value) Tree {
n.Nodes = append(n.Nodes, &node{
Root: n,
Value: v,
})
if n.Root != nil {
return n.Root
}
return n
}
func (n *node) AddMetaNode(meta MetaValue, v Value) Tree {
n.Nodes = append(n.Nodes, &node{
Root: n,
Meta: meta,
Value: v,
})
if n.Root != nil {
return n.Root
}
return n
}
func (n *node) AddBranch(v Value) Tree {
branch := &node{
Value: v,
}
n.Nodes = append(n.Nodes, branch)
return branch
}
func (n *node) AddMetaBranch(meta MetaValue, v Value) Tree {
branch := &node{
Meta: meta,
Value: v,
}
n.Nodes = append(n.Nodes, branch)
return branch
}
func (n *node) Branch() Tree {
n.Root = nil
return n
}
func (n *node) FindByMeta(meta MetaValue) Tree {
for _, node := range n.Nodes {
if reflect.DeepEqual(node.Meta, meta) {
return node
}
if v := node.FindByMeta(meta); v != nil {
return v
}
}
return nil
}
func (n *node) FindByValue(value Value) Tree {
for _, node := range n.Nodes {
if reflect.DeepEqual(node.Value, value) {
return node
}
if v := node.FindByMeta(value); v != nil {
return v
}
}
return nil
}
func (n *node) Bytes() []byte {
buf := new(bytes.Buffer)
level := 0
levelEnded := make(map[int]bool)
if n.Root == nil {
buf.WriteString(string(EdgeTypeStart))
buf.WriteByte('\n')
} else {
edge := EdgeTypeMid
if len(n.Nodes) == 0 {
edge = EdgeTypeEnd
levelEnded[level] = true
}
printValues(buf, 0, levelEnded, edge, n.Meta, n.Value)
}
if len(n.Nodes) > 0 {
printNodes(buf, level, levelEnded, n.Nodes)
}
return buf.Bytes()
}
func (n *node) String() string {
return string(n.Bytes())
}
func printNodes(wr io.Writer,
level int, levelEnded map[int]bool, nodes []*node) {
for i, node := range nodes {
edge := EdgeTypeMid
if i == len(nodes)-1 {
levelEnded[level] = true
edge = EdgeTypeEnd
}
printValues(wr, level, levelEnded, edge, node.Meta, node.Value)
if len(node.Nodes) > 0 {
printNodes(wr, level+1, levelEnded, node.Nodes)
}
}
}
func printValues(wr io.Writer,
level int, levelEnded map[int]bool, edge EdgeType, meta MetaValue, val Value) {
for i := 0; i < level; i++ {
if levelEnded[i] {
fmt.Fprint(wr, " ")
continue
}
fmt.Fprintf(wr, "%s ", EdgeTypeLink)
}
if meta != nil {
fmt.Fprintf(wr, "%s [%v] %v\n", edge, meta, val)
return
}
fmt.Fprintf(wr, "%s %v\n", edge, val)
}
type EdgeType string
const (
EdgeTypeStart EdgeType = "."
EdgeTypeLink EdgeType = "│"
EdgeTypeMid EdgeType = "├──"
EdgeTypeEnd EdgeType = "└──"
)
func New() Tree {
return &node{}
}
| vendor/github.com/xlab/treeprint/treeprint.go | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.0010133875766769052,
0.00022827052453067154,
0.00016201195830944926,
0.00016720654093660414,
0.00018968475342262536
] |
{
"id": 0,
"code_window": [
"\n",
"\t\t// Handle destroy time transformations for output and local values.\n",
"\t\t// Reverse the edges from outputs and locals, so that\n",
"\t\t// interpolations don't fail during destroy.\n",
"\t\t// Create a destroy node for outputs to remove them from the state.\n",
"\t\t// Prune unreferenced values, which may have interpolations that can't\n",
"\t\t// be resolved.\n",
"\t\tGraphTransformIf(\n",
"\t\t\tfunc() bool { return b.Destroy },\n",
"\t\t\tGraphTransformMulti(\n",
"\t\t\t\t&DestroyValueReferenceTransformer{},\n",
"\t\t\t\t&DestroyOutputTransformer{},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "terraform/graph_builder_apply.go",
"type": "replace",
"edit_start_line_idx": 177
} | # How to contribute
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution,
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
## Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult [GitHub Help] for more
information on using pull requests.
[GitHub Help]: https://help.github.com/articles/about-pull-requests/
## Instructions
Fork the repo, checkout the upstream repo to your GOPATH by:
```
$ go get -d go.opencensus.io
```
Add your fork as an origin:
```
cd $(go env GOPATH)/src/go.opencensus.io
git remote add fork [email protected]:YOUR_GITHUB_USERNAME/opencensus-go.git
```
Run tests:
```
$ make install-tools # Only first time.
$ make
```
Checkout a new branch, make modifications and push the branch to your fork:
```
$ git checkout -b feature
# edit files
$ git commit
$ git push fork feature
```
Open a pull request against the main opencensus-go repo.
## General Notes
This project uses Appveyor and Travis for CI.
The dependencies are managed with `go mod` if you work with the sources under your
`$GOPATH` you need to set the environment variable `GO111MODULE=on`. | vendor/go.opencensus.io/CONTRIBUTING.md | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.0001762901956681162,
0.00017271842807531357,
0.00016872496053110808,
0.00017298638704232872,
0.0000020979693999834126
] |
{
"id": 1,
"code_window": [
"\t\t\tGraphTransformMulti(\n",
"\t\t\t\t&DestroyValueReferenceTransformer{},\n",
"\t\t\t\t&DestroyOutputTransformer{},\n",
"\t\t\t\t&PruneUnusedValuesTransformer{},\n",
"\t\t\t),\n",
"\t\t),\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "terraform/graph_builder_apply.go",
"type": "replace",
"edit_start_line_idx": 184
} | package terraform
import (
"fmt"
"log"
"sort"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/lang"
"github.com/hashicorp/terraform/states"
)
// GraphNodeReferenceable must be implemented by any node that represents
// a Terraform thing that can be referenced (resource, module, etc.).
//
// Even if the thing has no name, this should return an empty list. By
// implementing this and returning a non-nil result, you say that this CAN
// be referenced and other methods of referencing may still be possible (such
// as by path!)
type GraphNodeReferenceable interface {
GraphNodeSubPath
// ReferenceableAddrs returns a list of addresses through which this can be
// referenced.
ReferenceableAddrs() []addrs.Referenceable
}
// GraphNodeReferencer must be implemented by nodes that reference other
// Terraform items and therefore depend on them.
type GraphNodeReferencer interface {
GraphNodeSubPath
// References returns a list of references made by this node, which
// include both a referenced address and source location information for
// the reference.
References() []*addrs.Reference
}
type GraphNodeAttachDependencies interface {
GraphNodeResource
AttachDependencies([]addrs.AbsResource)
}
// GraphNodeReferenceOutside is an interface that can optionally be implemented.
// A node that implements it can specify that its own referenceable addresses
// and/or the addresses it references are in a different module than the
// node itself.
//
// Any referenceable addresses returned by ReferenceableAddrs are interpreted
// relative to the returned selfPath.
//
// Any references returned by References are interpreted relative to the
// returned referencePath.
//
// It is valid but not required for either of these paths to match what is
// returned by method Path, though if both match the main Path then there
// is no reason to implement this method.
//
// The primary use-case for this is the nodes representing module input
// variables, since their expressions are resolved in terms of their calling
// module, but they are still referenced from their own module.
type GraphNodeReferenceOutside interface {
// ReferenceOutside returns a path in which any references from this node
// are resolved.
ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance)
}
// ReferenceTransformer is a GraphTransformer that connects all the
// nodes that reference each other in order to form the proper ordering.
type ReferenceTransformer struct{}
func (t *ReferenceTransformer) Transform(g *Graph) error {
// Build a reference map so we can efficiently look up the references
vs := g.Vertices()
m := NewReferenceMap(vs)
// Find the things that reference things and connect them
for _, v := range vs {
parents, _ := m.References(v)
parentsDbg := make([]string, len(parents))
for i, v := range parents {
parentsDbg[i] = dag.VertexName(v)
}
log.Printf(
"[DEBUG] ReferenceTransformer: %q references: %v",
dag.VertexName(v), parentsDbg)
for _, parent := range parents {
g.Connect(dag.BasicEdge(v, parent))
}
if len(parents) > 0 {
continue
}
}
return nil
}
// AttachDependenciesTransformer records all resource dependencies for each
// instance, and attaches the addresses to the node itself. Managed resource
// will record these in the state for proper ordering of destroy operations.
type AttachDependenciesTransformer struct {
Config *configs.Config
State *states.State
Schemas *Schemas
}
func (t AttachDependenciesTransformer) Transform(g *Graph) error {
for _, v := range g.Vertices() {
attacher, ok := v.(GraphNodeAttachDependencies)
if !ok {
continue
}
selfAddr := attacher.ResourceAddr()
// Data sources don't need to track destroy dependencies
if selfAddr.Resource.Mode == addrs.DataResourceMode {
continue
}
ans, err := g.Ancestors(v)
if err != nil {
return err
}
// dedupe addrs when there's multiple instances involved, or
// multiple paths in the un-reduced graph
depMap := map[string]addrs.AbsResource{}
for _, d := range ans.List() {
var addr addrs.AbsResource
switch d := d.(type) {
case GraphNodeResourceInstance:
instAddr := d.ResourceInstanceAddr()
addr = instAddr.Resource.Resource.Absolute(instAddr.Module)
case GraphNodeResource:
addr = d.ResourceAddr()
default:
continue
}
// Data sources don't need to track destroy dependencies
if addr.Resource.Mode == addrs.DataResourceMode {
continue
}
if addr.Equal(selfAddr) {
continue
}
depMap[addr.String()] = addr
}
deps := make([]addrs.AbsResource, 0, len(depMap))
for _, d := range depMap {
deps = append(deps, d)
}
sort.Slice(deps, func(i, j int) bool {
return deps[i].String() < deps[j].String()
})
log.Printf("[TRACE] AttachDependenciesTransformer: %s depends on %s", attacher.ResourceAddr(), deps)
attacher.AttachDependencies(deps)
}
return nil
}
// DestroyReferenceTransformer is a GraphTransformer that reverses the edges
// for locals and outputs that depend on other nodes which will be
// removed during destroy. If a destroy node is evaluated before the local or
// output value, it will be removed from the state, and the later interpolation
// will fail.
type DestroyValueReferenceTransformer struct{}
func (t *DestroyValueReferenceTransformer) Transform(g *Graph) error {
vs := g.Vertices()
for _, v := range vs {
switch v.(type) {
case *NodeApplyableOutput, *NodeLocal:
// OK
default:
continue
}
// reverse any outgoing edges so that the value is evaluated first.
for _, e := range g.EdgesFrom(v) {
target := e.Target()
// only destroy nodes will be evaluated in reverse
if _, ok := target.(GraphNodeDestroyer); !ok {
continue
}
log.Printf("[TRACE] output dep: %s", dag.VertexName(target))
g.RemoveEdge(e)
g.Connect(&DestroyEdge{S: target, T: v})
}
}
return nil
}
// PruneUnusedValuesTransformer is s GraphTransformer that removes local and
// output values which are not referenced in the graph. Since outputs and
// locals always need to be evaluated, if they reference a resource that is not
// available in the state the interpolation could fail.
type PruneUnusedValuesTransformer struct{}
func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error {
// this might need multiple runs in order to ensure that pruning a value
// doesn't effect a previously checked value.
for removed := 0; ; removed = 0 {
for _, v := range g.Vertices() {
switch v.(type) {
case *NodeApplyableOutput, *NodeLocal:
// OK
default:
continue
}
dependants := g.UpEdges(v)
switch dependants.Len() {
case 0:
// nothing at all depends on this
g.Remove(v)
removed++
case 1:
// because an output's destroy node always depends on the output,
// we need to check for the case of a single destroy node.
d := dependants.List()[0]
if _, ok := d.(*NodeDestroyableOutput); ok {
g.Remove(v)
removed++
}
}
}
if removed == 0 {
break
}
}
return nil
}
// ReferenceMap is a structure that can be used to efficiently check
// for references on a graph.
type ReferenceMap struct {
// vertices is a map from internal reference keys (as produced by the
// mapKey method) to one or more vertices that are identified by each key.
//
// A particular reference key might actually identify multiple vertices,
// e.g. in situations where one object is contained inside another.
vertices map[string][]dag.Vertex
// edges is a map whose keys are a subset of the internal reference keys
// from "vertices", and whose values are the nodes that refer to each
// key. The values in this map are the referrers, while values in
// "verticies" are the referents. The keys in both cases are referents.
edges map[string][]dag.Vertex
}
// References returns the set of vertices that the given vertex refers to,
// and any referenced addresses that do not have corresponding vertices.
func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []addrs.Referenceable) {
rn, ok := v.(GraphNodeReferencer)
if !ok {
return nil, nil
}
if _, ok := v.(GraphNodeSubPath); !ok {
return nil, nil
}
var matches []dag.Vertex
var missing []addrs.Referenceable
for _, ref := range rn.References() {
subject := ref.Subject
key := m.referenceMapKey(v, subject)
if _, exists := m.vertices[key]; !exists {
// If what we were looking for was a ResourceInstance then we
// might be in a resource-oriented graph rather than an
// instance-oriented graph, and so we'll see if we have the
// resource itself instead.
switch ri := subject.(type) {
case addrs.ResourceInstance:
subject = ri.ContainingResource()
case addrs.ResourceInstancePhase:
subject = ri.ContainingResource()
}
key = m.referenceMapKey(v, subject)
}
vertices := m.vertices[key]
for _, rv := range vertices {
// don't include self-references
if rv == v {
continue
}
matches = append(matches, rv)
}
if len(vertices) == 0 {
missing = append(missing, ref.Subject)
}
}
return matches, missing
}
// Referrers returns the set of vertices that refer to the given vertex.
func (m *ReferenceMap) Referrers(v dag.Vertex) []dag.Vertex {
rn, ok := v.(GraphNodeReferenceable)
if !ok {
return nil
}
sp, ok := v.(GraphNodeSubPath)
if !ok {
return nil
}
var matches []dag.Vertex
for _, addr := range rn.ReferenceableAddrs() {
key := m.mapKey(sp.Path(), addr)
referrers, ok := m.edges[key]
if !ok {
continue
}
// If the referrer set includes our own given vertex then we skip,
// since we don't want to return self-references.
selfRef := false
for _, p := range referrers {
if p == v {
selfRef = true
break
}
}
if selfRef {
continue
}
matches = append(matches, referrers...)
}
return matches
}
func (m *ReferenceMap) mapKey(path addrs.ModuleInstance, addr addrs.Referenceable) string {
return fmt.Sprintf("%s|%s", path.String(), addr.String())
}
// vertexReferenceablePath returns the path in which the given vertex can be
// referenced. This is the path that its results from ReferenceableAddrs
// are considered to be relative to.
//
// Only GraphNodeSubPath implementations can be referenced, so this method will
// panic if the given vertex does not implement that interface.
func (m *ReferenceMap) vertexReferenceablePath(v dag.Vertex) addrs.ModuleInstance {
sp, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
panic(fmt.Errorf("vertexMapKey on vertex type %T which doesn't implement GraphNodeSubPath", sp))
}
if outside, ok := v.(GraphNodeReferenceOutside); ok {
// Vertex is referenced from a different module than where it was
// declared.
path, _ := outside.ReferenceOutside()
return path
}
// Vertex is referenced from the same module as where it was declared.
return sp.Path()
}
// vertexReferencePath returns the path in which references _from_ the given
// vertex must be interpreted.
//
// Only GraphNodeSubPath implementations can have references, so this method
// will panic if the given vertex does not implement that interface.
func vertexReferencePath(referrer dag.Vertex) addrs.ModuleInstance {
sp, ok := referrer.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
panic(fmt.Errorf("vertexReferencePath on vertex type %T which doesn't implement GraphNodeSubPath", sp))
}
var path addrs.ModuleInstance
if outside, ok := referrer.(GraphNodeReferenceOutside); ok {
// Vertex makes references to objects in a different module than where
// it was declared.
_, path = outside.ReferenceOutside()
return path
}
// Vertex makes references to objects in the same module as where it
// was declared.
return sp.Path()
}
// referenceMapKey produces keys for the "edges" map. "referrer" is the vertex
// that the reference is from, and "addr" is the address of the object being
// referenced.
//
// The result is an opaque string that includes both the address of the given
// object and the address of the module instance that object belongs to.
//
// Only GraphNodeSubPath implementations can be referrers, so this method will
// panic if the given vertex does not implement that interface.
func (m *ReferenceMap) referenceMapKey(referrer dag.Vertex, addr addrs.Referenceable) string {
path := vertexReferencePath(referrer)
return m.mapKey(path, addr)
}
// NewReferenceMap is used to create a new reference map for the
// given set of vertices.
func NewReferenceMap(vs []dag.Vertex) *ReferenceMap {
var m ReferenceMap
// Build the lookup table
vertices := make(map[string][]dag.Vertex)
for _, v := range vs {
_, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
continue
}
// We're only looking for referenceable nodes
rn, ok := v.(GraphNodeReferenceable)
if !ok {
continue
}
path := m.vertexReferenceablePath(v)
// Go through and cache them
for _, addr := range rn.ReferenceableAddrs() {
key := m.mapKey(path, addr)
vertices[key] = append(vertices[key], v)
}
// Any node can be referenced by the address of the module it belongs
// to or any of that module's ancestors.
for _, addr := range path.Ancestors()[1:] {
// Can be referenced either as the specific call instance (with
// an instance key) or as the bare module call itself (the "module"
// block in the parent module that created the instance).
callPath, call := addr.Call()
callInstPath, callInst := addr.CallInstance()
callKey := m.mapKey(callPath, call)
callInstKey := m.mapKey(callInstPath, callInst)
vertices[callKey] = append(vertices[callKey], v)
vertices[callInstKey] = append(vertices[callInstKey], v)
}
}
// Build the lookup table for referenced by
edges := make(map[string][]dag.Vertex)
for _, v := range vs {
_, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
continue
}
rn, ok := v.(GraphNodeReferencer)
if !ok {
// We're only looking for referenceable nodes
continue
}
// Go through and cache them
for _, ref := range rn.References() {
if ref.Subject == nil {
// Should never happen
panic(fmt.Sprintf("%T.References returned reference with nil subject", rn))
}
key := m.referenceMapKey(v, ref.Subject)
edges[key] = append(edges[key], v)
}
}
m.vertices = vertices
m.edges = edges
return &m
}
// ReferencesFromConfig returns the references that a configuration has
// based on the interpolated variables in a configuration.
func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Reference {
if body == nil {
return nil
}
refs, _ := lang.ReferencesInBlock(body, schema)
return refs
}
// appendResourceDestroyReferences identifies resource and resource instance
// references in the given slice and appends to it the "destroy-phase"
// equivalents of those references, returning the result.
//
// This can be used in the References implementation for a node which must also
// depend on the destruction of anything it references.
func appendResourceDestroyReferences(refs []*addrs.Reference) []*addrs.Reference {
given := refs
for _, ref := range given {
switch tr := ref.Subject.(type) {
case addrs.Resource:
newRef := *ref // shallow copy
newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
refs = append(refs, &newRef)
case addrs.ResourceInstance:
newRef := *ref // shallow copy
newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
refs = append(refs, &newRef)
}
}
return refs
}
func modulePrefixStr(p addrs.ModuleInstance) string {
return p.String()
}
func modulePrefixList(result []string, prefix string) []string {
if prefix != "" {
for i, v := range result {
result[i] = fmt.Sprintf("%s.%s", prefix, v)
}
}
return result
}
| terraform/transform_reference.go | 1 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.00605793809518218,
0.00042652941192500293,
0.00016348871577065438,
0.00017039589874912053,
0.0010850854450836778
] |
{
"id": 1,
"code_window": [
"\t\t\tGraphTransformMulti(\n",
"\t\t\t\t&DestroyValueReferenceTransformer{},\n",
"\t\t\t\t&DestroyOutputTransformer{},\n",
"\t\t\t\t&PruneUnusedValuesTransformer{},\n",
"\t\t\t),\n",
"\t\t),\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "terraform/graph_builder_apply.go",
"type": "replace",
"edit_start_line_idx": 184
} | // generated by stringer -type=tokType; DO NOT EDIT
package jmespath
import "fmt"
const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF"
var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214}
func (i tokType) String() string {
if i < 0 || i >= tokType(len(_tokType_index)-1) {
return fmt.Sprintf("tokType(%d)", i)
}
return _tokType_name[_tokType_index[i]:_tokType_index[i+1]]
}
| vendor/github.com/jmespath/go-jmespath/toktype_string.go | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.0001696394756436348,
0.00016954813327174634,
0.00016945679089985788,
0.00016954813327174634,
9.134237188845873e-8
] |
{
"id": 1,
"code_window": [
"\t\t\tGraphTransformMulti(\n",
"\t\t\t\t&DestroyValueReferenceTransformer{},\n",
"\t\t\t\t&DestroyOutputTransformer{},\n",
"\t\t\t\t&PruneUnusedValuesTransformer{},\n",
"\t\t\t),\n",
"\t\t),\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "terraform/graph_builder_apply.go",
"type": "replace",
"edit_start_line_idx": 184
} | variable "login_username" {
}
resource "aws_instance" "foo" {
connection {
host = coalesce(self.public_ip, self.private_ip)
type = "ssh"
user = var.login_username
}
provisioner "test" {
commands = ["a", "b", "c"]
when = create
on_failure = fail
connection {
host = coalesce(self.public_ip, self.private_ip)
type = "winrm"
user = var.login_username
}
}
}
| configs/configupgrade/testdata/valid/provisioner/want/provisioner.tf | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.00017496157670393586,
0.00017147850303445011,
0.00016687093011569232,
0.00017260300228372216,
0.0000033973533390962984
] |
{
"id": 1,
"code_window": [
"\t\t\tGraphTransformMulti(\n",
"\t\t\t\t&DestroyValueReferenceTransformer{},\n",
"\t\t\t\t&DestroyOutputTransformer{},\n",
"\t\t\t\t&PruneUnusedValuesTransformer{},\n",
"\t\t\t),\n",
"\t\t),\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "terraform/graph_builder_apply.go",
"type": "replace",
"edit_start_line_idx": 184
} | package terraform
import (
"github.com/hashicorp/terraform/addrs"
)
// ProvisionerUIOutput is an implementation of UIOutput that calls a hook
// for the output so that the hooks can handle it.
type ProvisionerUIOutput struct {
InstanceAddr addrs.AbsResourceInstance
ProvisionerType string
Hooks []Hook
}
func (o *ProvisionerUIOutput) Output(msg string) {
for _, h := range o.Hooks {
h.ProvisionOutput(o.InstanceAddr, o.ProvisionerType, msg)
}
}
| terraform/ui_output_provisioner.go | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.00028309496701695025,
0.0002406112471362576,
0.00019812752725556493,
0.0002406112471362576,
0.00004248371988069266
] |
{
"id": 2,
"code_window": [
"\t\t\t),\n",
"\t\t),\n",
"\n",
"\t\t// Add the node to fix the state count boundaries\n",
"\t\t&CountBoundaryTransformer{\n",
"\t\t\tConfig: b.Config,\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// Prune unreferenced values, which may have interpolations that can't\n",
"\t\t// be resolved.\n",
"\t\t&PruneUnusedValuesTransformer{\n",
"\t\t\tDestroy: b.Destroy,\n",
"\t\t},\n",
"\n"
],
"file_path": "terraform/graph_builder_apply.go",
"type": "add",
"edit_start_line_idx": 188
} | package terraform
import (
"fmt"
"log"
"sort"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/lang"
"github.com/hashicorp/terraform/states"
)
// GraphNodeReferenceable must be implemented by any node that represents
// a Terraform thing that can be referenced (resource, module, etc.).
//
// Even if the thing has no name, this should return an empty list. By
// implementing this and returning a non-nil result, you say that this CAN
// be referenced and other methods of referencing may still be possible (such
// as by path!)
type GraphNodeReferenceable interface {
GraphNodeSubPath
// ReferenceableAddrs returns a list of addresses through which this can be
// referenced.
ReferenceableAddrs() []addrs.Referenceable
}
// GraphNodeReferencer must be implemented by nodes that reference other
// Terraform items and therefore depend on them.
type GraphNodeReferencer interface {
GraphNodeSubPath
// References returns a list of references made by this node, which
// include both a referenced address and source location information for
// the reference.
References() []*addrs.Reference
}
type GraphNodeAttachDependencies interface {
GraphNodeResource
AttachDependencies([]addrs.AbsResource)
}
// GraphNodeReferenceOutside is an interface that can optionally be implemented.
// A node that implements it can specify that its own referenceable addresses
// and/or the addresses it references are in a different module than the
// node itself.
//
// Any referenceable addresses returned by ReferenceableAddrs are interpreted
// relative to the returned selfPath.
//
// Any references returned by References are interpreted relative to the
// returned referencePath.
//
// It is valid but not required for either of these paths to match what is
// returned by method Path, though if both match the main Path then there
// is no reason to implement this method.
//
// The primary use-case for this is the nodes representing module input
// variables, since their expressions are resolved in terms of their calling
// module, but they are still referenced from their own module.
type GraphNodeReferenceOutside interface {
// ReferenceOutside returns a path in which any references from this node
// are resolved.
ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance)
}
// ReferenceTransformer is a GraphTransformer that connects all the
// nodes that reference each other in order to form the proper ordering.
type ReferenceTransformer struct{}
func (t *ReferenceTransformer) Transform(g *Graph) error {
// Build a reference map so we can efficiently look up the references
vs := g.Vertices()
m := NewReferenceMap(vs)
// Find the things that reference things and connect them
for _, v := range vs {
parents, _ := m.References(v)
parentsDbg := make([]string, len(parents))
for i, v := range parents {
parentsDbg[i] = dag.VertexName(v)
}
log.Printf(
"[DEBUG] ReferenceTransformer: %q references: %v",
dag.VertexName(v), parentsDbg)
for _, parent := range parents {
g.Connect(dag.BasicEdge(v, parent))
}
if len(parents) > 0 {
continue
}
}
return nil
}
// AttachDependenciesTransformer records all resource dependencies for each
// instance, and attaches the addresses to the node itself. Managed resource
// will record these in the state for proper ordering of destroy operations.
type AttachDependenciesTransformer struct {
Config *configs.Config
State *states.State
Schemas *Schemas
}
func (t AttachDependenciesTransformer) Transform(g *Graph) error {
for _, v := range g.Vertices() {
attacher, ok := v.(GraphNodeAttachDependencies)
if !ok {
continue
}
selfAddr := attacher.ResourceAddr()
// Data sources don't need to track destroy dependencies
if selfAddr.Resource.Mode == addrs.DataResourceMode {
continue
}
ans, err := g.Ancestors(v)
if err != nil {
return err
}
// dedupe addrs when there's multiple instances involved, or
// multiple paths in the un-reduced graph
depMap := map[string]addrs.AbsResource{}
for _, d := range ans.List() {
var addr addrs.AbsResource
switch d := d.(type) {
case GraphNodeResourceInstance:
instAddr := d.ResourceInstanceAddr()
addr = instAddr.Resource.Resource.Absolute(instAddr.Module)
case GraphNodeResource:
addr = d.ResourceAddr()
default:
continue
}
// Data sources don't need to track destroy dependencies
if addr.Resource.Mode == addrs.DataResourceMode {
continue
}
if addr.Equal(selfAddr) {
continue
}
depMap[addr.String()] = addr
}
deps := make([]addrs.AbsResource, 0, len(depMap))
for _, d := range depMap {
deps = append(deps, d)
}
sort.Slice(deps, func(i, j int) bool {
return deps[i].String() < deps[j].String()
})
log.Printf("[TRACE] AttachDependenciesTransformer: %s depends on %s", attacher.ResourceAddr(), deps)
attacher.AttachDependencies(deps)
}
return nil
}
// DestroyReferenceTransformer is a GraphTransformer that reverses the edges
// for locals and outputs that depend on other nodes which will be
// removed during destroy. If a destroy node is evaluated before the local or
// output value, it will be removed from the state, and the later interpolation
// will fail.
type DestroyValueReferenceTransformer struct{}
func (t *DestroyValueReferenceTransformer) Transform(g *Graph) error {
vs := g.Vertices()
for _, v := range vs {
switch v.(type) {
case *NodeApplyableOutput, *NodeLocal:
// OK
default:
continue
}
// reverse any outgoing edges so that the value is evaluated first.
for _, e := range g.EdgesFrom(v) {
target := e.Target()
// only destroy nodes will be evaluated in reverse
if _, ok := target.(GraphNodeDestroyer); !ok {
continue
}
log.Printf("[TRACE] output dep: %s", dag.VertexName(target))
g.RemoveEdge(e)
g.Connect(&DestroyEdge{S: target, T: v})
}
}
return nil
}
// PruneUnusedValuesTransformer is s GraphTransformer that removes local and
// output values which are not referenced in the graph. Since outputs and
// locals always need to be evaluated, if they reference a resource that is not
// available in the state the interpolation could fail.
type PruneUnusedValuesTransformer struct{}
func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error {
// this might need multiple runs in order to ensure that pruning a value
// doesn't effect a previously checked value.
for removed := 0; ; removed = 0 {
for _, v := range g.Vertices() {
switch v.(type) {
case *NodeApplyableOutput, *NodeLocal:
// OK
default:
continue
}
dependants := g.UpEdges(v)
switch dependants.Len() {
case 0:
// nothing at all depends on this
g.Remove(v)
removed++
case 1:
// because an output's destroy node always depends on the output,
// we need to check for the case of a single destroy node.
d := dependants.List()[0]
if _, ok := d.(*NodeDestroyableOutput); ok {
g.Remove(v)
removed++
}
}
}
if removed == 0 {
break
}
}
return nil
}
// ReferenceMap is a structure that can be used to efficiently check
// for references on a graph.
type ReferenceMap struct {
// vertices is a map from internal reference keys (as produced by the
// mapKey method) to one or more vertices that are identified by each key.
//
// A particular reference key might actually identify multiple vertices,
// e.g. in situations where one object is contained inside another.
vertices map[string][]dag.Vertex
// edges is a map whose keys are a subset of the internal reference keys
// from "vertices", and whose values are the nodes that refer to each
// key. The values in this map are the referrers, while values in
// "verticies" are the referents. The keys in both cases are referents.
edges map[string][]dag.Vertex
}
// References returns the set of vertices that the given vertex refers to,
// and any referenced addresses that do not have corresponding vertices.
func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []addrs.Referenceable) {
rn, ok := v.(GraphNodeReferencer)
if !ok {
return nil, nil
}
if _, ok := v.(GraphNodeSubPath); !ok {
return nil, nil
}
var matches []dag.Vertex
var missing []addrs.Referenceable
for _, ref := range rn.References() {
subject := ref.Subject
key := m.referenceMapKey(v, subject)
if _, exists := m.vertices[key]; !exists {
// If what we were looking for was a ResourceInstance then we
// might be in a resource-oriented graph rather than an
// instance-oriented graph, and so we'll see if we have the
// resource itself instead.
switch ri := subject.(type) {
case addrs.ResourceInstance:
subject = ri.ContainingResource()
case addrs.ResourceInstancePhase:
subject = ri.ContainingResource()
}
key = m.referenceMapKey(v, subject)
}
vertices := m.vertices[key]
for _, rv := range vertices {
// don't include self-references
if rv == v {
continue
}
matches = append(matches, rv)
}
if len(vertices) == 0 {
missing = append(missing, ref.Subject)
}
}
return matches, missing
}
// Referrers returns the set of vertices that refer to the given vertex.
func (m *ReferenceMap) Referrers(v dag.Vertex) []dag.Vertex {
rn, ok := v.(GraphNodeReferenceable)
if !ok {
return nil
}
sp, ok := v.(GraphNodeSubPath)
if !ok {
return nil
}
var matches []dag.Vertex
for _, addr := range rn.ReferenceableAddrs() {
key := m.mapKey(sp.Path(), addr)
referrers, ok := m.edges[key]
if !ok {
continue
}
// If the referrer set includes our own given vertex then we skip,
// since we don't want to return self-references.
selfRef := false
for _, p := range referrers {
if p == v {
selfRef = true
break
}
}
if selfRef {
continue
}
matches = append(matches, referrers...)
}
return matches
}
func (m *ReferenceMap) mapKey(path addrs.ModuleInstance, addr addrs.Referenceable) string {
return fmt.Sprintf("%s|%s", path.String(), addr.String())
}
// vertexReferenceablePath returns the path in which the given vertex can be
// referenced. This is the path that its results from ReferenceableAddrs
// are considered to be relative to.
//
// Only GraphNodeSubPath implementations can be referenced, so this method will
// panic if the given vertex does not implement that interface.
func (m *ReferenceMap) vertexReferenceablePath(v dag.Vertex) addrs.ModuleInstance {
sp, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
panic(fmt.Errorf("vertexMapKey on vertex type %T which doesn't implement GraphNodeSubPath", sp))
}
if outside, ok := v.(GraphNodeReferenceOutside); ok {
// Vertex is referenced from a different module than where it was
// declared.
path, _ := outside.ReferenceOutside()
return path
}
// Vertex is referenced from the same module as where it was declared.
return sp.Path()
}
// vertexReferencePath returns the path in which references _from_ the given
// vertex must be interpreted.
//
// Only GraphNodeSubPath implementations can have references, so this method
// will panic if the given vertex does not implement that interface.
func vertexReferencePath(referrer dag.Vertex) addrs.ModuleInstance {
sp, ok := referrer.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
panic(fmt.Errorf("vertexReferencePath on vertex type %T which doesn't implement GraphNodeSubPath", sp))
}
var path addrs.ModuleInstance
if outside, ok := referrer.(GraphNodeReferenceOutside); ok {
// Vertex makes references to objects in a different module than where
// it was declared.
_, path = outside.ReferenceOutside()
return path
}
// Vertex makes references to objects in the same module as where it
// was declared.
return sp.Path()
}
// referenceMapKey produces keys for the "edges" map. "referrer" is the vertex
// that the reference is from, and "addr" is the address of the object being
// referenced.
//
// The result is an opaque string that includes both the address of the given
// object and the address of the module instance that object belongs to.
//
// Only GraphNodeSubPath implementations can be referrers, so this method will
// panic if the given vertex does not implement that interface.
func (m *ReferenceMap) referenceMapKey(referrer dag.Vertex, addr addrs.Referenceable) string {
path := vertexReferencePath(referrer)
return m.mapKey(path, addr)
}
// NewReferenceMap is used to create a new reference map for the
// given set of vertices.
func NewReferenceMap(vs []dag.Vertex) *ReferenceMap {
var m ReferenceMap
// Build the lookup table
vertices := make(map[string][]dag.Vertex)
for _, v := range vs {
_, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
continue
}
// We're only looking for referenceable nodes
rn, ok := v.(GraphNodeReferenceable)
if !ok {
continue
}
path := m.vertexReferenceablePath(v)
// Go through and cache them
for _, addr := range rn.ReferenceableAddrs() {
key := m.mapKey(path, addr)
vertices[key] = append(vertices[key], v)
}
// Any node can be referenced by the address of the module it belongs
// to or any of that module's ancestors.
for _, addr := range path.Ancestors()[1:] {
// Can be referenced either as the specific call instance (with
// an instance key) or as the bare module call itself (the "module"
// block in the parent module that created the instance).
callPath, call := addr.Call()
callInstPath, callInst := addr.CallInstance()
callKey := m.mapKey(callPath, call)
callInstKey := m.mapKey(callInstPath, callInst)
vertices[callKey] = append(vertices[callKey], v)
vertices[callInstKey] = append(vertices[callInstKey], v)
}
}
// Build the lookup table for referenced by
edges := make(map[string][]dag.Vertex)
for _, v := range vs {
_, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
continue
}
rn, ok := v.(GraphNodeReferencer)
if !ok {
// We're only looking for referenceable nodes
continue
}
// Go through and cache them
for _, ref := range rn.References() {
if ref.Subject == nil {
// Should never happen
panic(fmt.Sprintf("%T.References returned reference with nil subject", rn))
}
key := m.referenceMapKey(v, ref.Subject)
edges[key] = append(edges[key], v)
}
}
m.vertices = vertices
m.edges = edges
return &m
}
// ReferencesFromConfig returns the references that a configuration has
// based on the interpolated variables in a configuration.
func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Reference {
if body == nil {
return nil
}
refs, _ := lang.ReferencesInBlock(body, schema)
return refs
}
// appendResourceDestroyReferences identifies resource and resource instance
// references in the given slice and appends to it the "destroy-phase"
// equivalents of those references, returning the result.
//
// This can be used in the References implementation for a node which must also
// depend on the destruction of anything it references.
func appendResourceDestroyReferences(refs []*addrs.Reference) []*addrs.Reference {
given := refs
for _, ref := range given {
switch tr := ref.Subject.(type) {
case addrs.Resource:
newRef := *ref // shallow copy
newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
refs = append(refs, &newRef)
case addrs.ResourceInstance:
newRef := *ref // shallow copy
newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
refs = append(refs, &newRef)
}
}
return refs
}
func modulePrefixStr(p addrs.ModuleInstance) string {
return p.String()
}
func modulePrefixList(result []string, prefix string) []string {
if prefix != "" {
for i, v := range result {
result[i] = fmt.Sprintf("%s.%s", prefix, v)
}
}
return result
}
| terraform/transform_reference.go | 1 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.004843938164412975,
0.0003166660899296403,
0.00016362765745725483,
0.00017010059673339128,
0.0006774981156922877
] |
{
"id": 2,
"code_window": [
"\t\t\t),\n",
"\t\t),\n",
"\n",
"\t\t// Add the node to fix the state count boundaries\n",
"\t\t&CountBoundaryTransformer{\n",
"\t\t\tConfig: b.Config,\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// Prune unreferenced values, which may have interpolations that can't\n",
"\t\t// be resolved.\n",
"\t\t&PruneUnusedValuesTransformer{\n",
"\t\t\tDestroy: b.Destroy,\n",
"\t\t},\n",
"\n"
],
"file_path": "terraform/graph_builder_apply.go",
"type": "add",
"edit_start_line_idx": 188
} | resource "aws_instance" "db" {}
resource "aws_instance" "web" {
foo = "${aws_instance.lb.id}"
}
| terraform/testdata/graph-missing-deps/main.tf | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.00017331096751149744,
0.00017331096751149744,
0.00017331096751149744,
0.00017331096751149744,
0
] |
{
"id": 2,
"code_window": [
"\t\t\t),\n",
"\t\t),\n",
"\n",
"\t\t// Add the node to fix the state count boundaries\n",
"\t\t&CountBoundaryTransformer{\n",
"\t\t\tConfig: b.Config,\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// Prune unreferenced values, which may have interpolations that can't\n",
"\t\t// be resolved.\n",
"\t\t&PruneUnusedValuesTransformer{\n",
"\t\t\tDestroy: b.Destroy,\n",
"\t\t},\n",
"\n"
],
"file_path": "terraform/graph_builder_apply.go",
"type": "add",
"edit_start_line_idx": 188
} | package ini
// Walk will traverse the AST using the v, the Visitor.
func Walk(tree []AST, v Visitor) error {
for _, node := range tree {
switch node.Kind {
case ASTKindExpr,
ASTKindExprStatement:
if err := v.VisitExpr(node); err != nil {
return err
}
case ASTKindStatement,
ASTKindCompletedSectionStatement,
ASTKindNestedSectionStatement,
ASTKindCompletedNestedSectionStatement:
if err := v.VisitStatement(node); err != nil {
return err
}
}
}
return nil
}
| vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.0001780036254785955,
0.0001726408809190616,
0.0001646986638661474,
0.0001752203534124419,
0.000005729791155317798
] |
{
"id": 2,
"code_window": [
"\t\t\t),\n",
"\t\t),\n",
"\n",
"\t\t// Add the node to fix the state count boundaries\n",
"\t\t&CountBoundaryTransformer{\n",
"\t\t\tConfig: b.Config,\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// Prune unreferenced values, which may have interpolations that can't\n",
"\t\t// be resolved.\n",
"\t\t&PruneUnusedValuesTransformer{\n",
"\t\t\tDestroy: b.Destroy,\n",
"\t\t},\n",
"\n"
],
"file_path": "terraform/graph_builder_apply.go",
"type": "add",
"edit_start_line_idx": 188
} | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package openpgp implements high level operations on OpenPGP messages.
package openpgp // import "golang.org/x/crypto/openpgp"
import (
"crypto"
_ "crypto/sha256"
"hash"
"io"
"strconv"
"golang.org/x/crypto/openpgp/armor"
"golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/packet"
)
// SignatureType is the armor type for a PGP signature.
var SignatureType = "PGP SIGNATURE"
// readArmored reads an armored block with the given type.
func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) {
block, err := armor.Decode(r)
if err != nil {
return
}
if block.Type != expectedType {
return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type)
}
return block.Body, nil
}
// MessageDetails contains the result of parsing an OpenPGP encrypted and/or
// signed message.
type MessageDetails struct {
IsEncrypted bool // true if the message was encrypted.
EncryptedToKeyIds []uint64 // the list of recipient key ids.
IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message.
DecryptedWith Key // the private key used to decrypt the message, if any.
IsSigned bool // true if the message is signed.
SignedByKeyId uint64 // the key id of the signer, if any.
SignedBy *Key // the key of the signer, if available.
LiteralData *packet.LiteralData // the metadata of the contents
UnverifiedBody io.Reader // the contents of the message.
// If IsSigned is true and SignedBy is non-zero then the signature will
// be verified as UnverifiedBody is read. The signature cannot be
// checked until the whole of UnverifiedBody is read so UnverifiedBody
// must be consumed until EOF before the data can be trusted. Even if a
// message isn't signed (or the signer is unknown) the data may contain
// an authentication code that is only checked once UnverifiedBody has
// been consumed. Once EOF has been seen, the following fields are
// valid. (An authentication code failure is reported as a
// SignatureError error when reading from UnverifiedBody.)
SignatureError error // nil if the signature is good.
Signature *packet.Signature // the signature packet itself, if v4 (default)
SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature
decrypted io.ReadCloser
}
// A PromptFunction is used as a callback by functions that may need to decrypt
// a private key, or prompt for a passphrase. It is called with a list of
// acceptable, encrypted private keys and a boolean that indicates whether a
// passphrase is usable. It should either decrypt a private key or return a
// passphrase to try. If the decrypted private key or given passphrase isn't
// correct, the function will be called again, forever. Any error returned will
// be passed up.
type PromptFunction func(keys []Key, symmetric bool) ([]byte, error)
// A keyEnvelopePair is used to store a private key with the envelope that
// contains a symmetric key, encrypted with that key.
type keyEnvelopePair struct {
key Key
encryptedKey *packet.EncryptedKey
}
// ReadMessage parses an OpenPGP message that may be signed and/or encrypted.
// The given KeyRing should contain both public keys (for signature
// verification) and, possibly encrypted, private keys for decrypting.
// If config is nil, sensible defaults will be used.
func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) {
var p packet.Packet
var symKeys []*packet.SymmetricKeyEncrypted
var pubKeys []keyEnvelopePair
var se *packet.SymmetricallyEncrypted
packets := packet.NewReader(r)
md = new(MessageDetails)
md.IsEncrypted = true
// The message, if encrypted, starts with a number of packets
// containing an encrypted decryption key. The decryption key is either
// encrypted to a public key, or with a passphrase. This loop
// collects these packets.
ParsePackets:
for {
p, err = packets.Next()
if err != nil {
return nil, err
}
switch p := p.(type) {
case *packet.SymmetricKeyEncrypted:
// This packet contains the decryption key encrypted with a passphrase.
md.IsSymmetricallyEncrypted = true
symKeys = append(symKeys, p)
case *packet.EncryptedKey:
// This packet contains the decryption key encrypted to a public key.
md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId)
switch p.Algo {
case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal:
break
default:
continue
}
var keys []Key
if p.KeyId == 0 {
keys = keyring.DecryptionKeys()
} else {
keys = keyring.KeysById(p.KeyId)
}
for _, k := range keys {
pubKeys = append(pubKeys, keyEnvelopePair{k, p})
}
case *packet.SymmetricallyEncrypted:
se = p
break ParsePackets
case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature:
// This message isn't encrypted.
if len(symKeys) != 0 || len(pubKeys) != 0 {
return nil, errors.StructuralError("key material not followed by encrypted message")
}
packets.Unread(p)
return readSignedMessage(packets, nil, keyring)
}
}
var candidates []Key
var decrypted io.ReadCloser
// Now that we have the list of encrypted keys we need to decrypt at
// least one of them or, if we cannot, we need to call the prompt
// function so that it can decrypt a key or give us a passphrase.
FindKey:
for {
// See if any of the keys already have a private key available
candidates = candidates[:0]
candidateFingerprints := make(map[string]bool)
for _, pk := range pubKeys {
if pk.key.PrivateKey == nil {
continue
}
if !pk.key.PrivateKey.Encrypted {
if len(pk.encryptedKey.Key) == 0 {
pk.encryptedKey.Decrypt(pk.key.PrivateKey, config)
}
if len(pk.encryptedKey.Key) == 0 {
continue
}
decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key)
if err != nil && err != errors.ErrKeyIncorrect {
return nil, err
}
if decrypted != nil {
md.DecryptedWith = pk.key
break FindKey
}
} else {
fpr := string(pk.key.PublicKey.Fingerprint[:])
if v := candidateFingerprints[fpr]; v {
continue
}
candidates = append(candidates, pk.key)
candidateFingerprints[fpr] = true
}
}
if len(candidates) == 0 && len(symKeys) == 0 {
return nil, errors.ErrKeyIncorrect
}
if prompt == nil {
return nil, errors.ErrKeyIncorrect
}
passphrase, err := prompt(candidates, len(symKeys) != 0)
if err != nil {
return nil, err
}
// Try the symmetric passphrase first
if len(symKeys) != 0 && passphrase != nil {
for _, s := range symKeys {
key, cipherFunc, err := s.Decrypt(passphrase)
if err == nil {
decrypted, err = se.Decrypt(cipherFunc, key)
if err != nil && err != errors.ErrKeyIncorrect {
return nil, err
}
if decrypted != nil {
break FindKey
}
}
}
}
}
md.decrypted = decrypted
if err := packets.Push(decrypted); err != nil {
return nil, err
}
return readSignedMessage(packets, md, keyring)
}
// readSignedMessage reads a possibly signed message if mdin is non-zero then
// that structure is updated and returned. Otherwise a fresh MessageDetails is
// used.
func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) {
if mdin == nil {
mdin = new(MessageDetails)
}
md = mdin
var p packet.Packet
var h hash.Hash
var wrappedHash hash.Hash
FindLiteralData:
for {
p, err = packets.Next()
if err != nil {
return nil, err
}
switch p := p.(type) {
case *packet.Compressed:
if err := packets.Push(p.Body); err != nil {
return nil, err
}
case *packet.OnePassSignature:
if !p.IsLast {
return nil, errors.UnsupportedError("nested signatures")
}
h, wrappedHash, err = hashForSignature(p.Hash, p.SigType)
if err != nil {
md = nil
return
}
md.IsSigned = true
md.SignedByKeyId = p.KeyId
keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign)
if len(keys) > 0 {
md.SignedBy = &keys[0]
}
case *packet.LiteralData:
md.LiteralData = p
break FindLiteralData
}
}
if md.SignedBy != nil {
md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md}
} else if md.decrypted != nil {
md.UnverifiedBody = checkReader{md}
} else {
md.UnverifiedBody = md.LiteralData.Body
}
return md, nil
}
// hashForSignature returns a pair of hashes that can be used to verify a
// signature. The signature may specify that the contents of the signed message
// should be preprocessed (i.e. to normalize line endings). Thus this function
// returns two hashes. The second should be used to hash the message itself and
// performs any needed preprocessing.
func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) {
if !hashId.Available() {
return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId)))
}
h := hashId.New()
switch sigType {
case packet.SigTypeBinary:
return h, h, nil
case packet.SigTypeText:
return h, NewCanonicalTextHash(h), nil
}
return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType)))
}
// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF
// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger
// MDC checks.
type checkReader struct {
md *MessageDetails
}
func (cr checkReader) Read(buf []byte) (n int, err error) {
n, err = cr.md.LiteralData.Body.Read(buf)
if err == io.EOF {
mdcErr := cr.md.decrypted.Close()
if mdcErr != nil {
err = mdcErr
}
}
return
}
// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes
// the data as it is read. When it sees an EOF from the underlying io.Reader
// it parses and checks a trailing Signature packet and triggers any MDC checks.
type signatureCheckReader struct {
packets *packet.Reader
h, wrappedHash hash.Hash
md *MessageDetails
}
func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) {
n, err = scr.md.LiteralData.Body.Read(buf)
scr.wrappedHash.Write(buf[:n])
if err == io.EOF {
var p packet.Packet
p, scr.md.SignatureError = scr.packets.Next()
if scr.md.SignatureError != nil {
return
}
var ok bool
if scr.md.Signature, ok = p.(*packet.Signature); ok {
scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature)
} else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok {
scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3)
} else {
scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature")
return
}
// The SymmetricallyEncrypted packet, if any, might have an
// unsigned hash of its own. In order to check this we need to
// close that Reader.
if scr.md.decrypted != nil {
mdcErr := scr.md.decrypted.Close()
if mdcErr != nil {
err = mdcErr
}
}
}
return
}
// CheckDetachedSignature takes a signed file and a detached signature and
// returns the signer if the signature is valid. If the signer isn't known,
// ErrUnknownIssuer is returned.
func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) {
var issuerKeyId uint64
var hashFunc crypto.Hash
var sigType packet.SignatureType
var keys []Key
var p packet.Packet
packets := packet.NewReader(signature)
for {
p, err = packets.Next()
if err == io.EOF {
return nil, errors.ErrUnknownIssuer
}
if err != nil {
return nil, err
}
switch sig := p.(type) {
case *packet.Signature:
if sig.IssuerKeyId == nil {
return nil, errors.StructuralError("signature doesn't have an issuer")
}
issuerKeyId = *sig.IssuerKeyId
hashFunc = sig.Hash
sigType = sig.SigType
case *packet.SignatureV3:
issuerKeyId = sig.IssuerKeyId
hashFunc = sig.Hash
sigType = sig.SigType
default:
return nil, errors.StructuralError("non signature packet found")
}
keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign)
if len(keys) > 0 {
break
}
}
if len(keys) == 0 {
panic("unreachable")
}
h, wrappedHash, err := hashForSignature(hashFunc, sigType)
if err != nil {
return nil, err
}
if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF {
return nil, err
}
for _, key := range keys {
switch sig := p.(type) {
case *packet.Signature:
err = key.PublicKey.VerifySignature(h, sig)
case *packet.SignatureV3:
err = key.PublicKey.VerifySignatureV3(h, sig)
default:
panic("unreachable")
}
if err == nil {
return key.Entity, nil
}
}
return nil, err
}
// CheckArmoredDetachedSignature performs the same actions as
// CheckDetachedSignature but expects the signature to be armored.
func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) {
body, err := readArmored(signature, SignatureType)
if err != nil {
return
}
return CheckDetachedSignature(keyring, signed, body)
}
| vendor/golang.org/x/crypto/openpgp/read.go | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.00021545150957535952,
0.00017220486188307405,
0.00016437468002550304,
0.00017097235831897706,
0.000008481916665914468
] |
{
"id": 3,
"code_window": [
"\t}\n",
"\n",
"\treturn nil\n",
"}\n",
"\n",
"// PruneUnusedValuesTransformer is s GraphTransformer that removes local and\n",
"// output values which are not referenced in the graph. Since outputs and\n",
"// locals always need to be evaluated, if they reference a resource that is not\n",
"// available in the state the interpolation could fail.\n",
"type PruneUnusedValuesTransformer struct{}\n",
"\n",
"func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"// PruneUnusedValuesTransformer is a GraphTransformer that removes local,\n",
"// variable, and output values which are not referenced in the graph. If these\n",
"// values reference a resource that is no longer in the state the interpolation\n",
"// could fail.\n",
"type PruneUnusedValuesTransformer struct {\n",
"\tDestroy bool\n",
"}\n"
],
"file_path": "terraform/transform_reference.go",
"type": "replace",
"edit_start_line_idx": 208
} | package terraform
import (
"fmt"
"log"
"sort"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/lang"
"github.com/hashicorp/terraform/states"
)
// GraphNodeReferenceable must be implemented by any node that represents
// a Terraform thing that can be referenced (resource, module, etc.).
//
// Even if the thing has no name, this should return an empty list. By
// implementing this and returning a non-nil result, you say that this CAN
// be referenced and other methods of referencing may still be possible (such
// as by path!)
type GraphNodeReferenceable interface {
GraphNodeSubPath
// ReferenceableAddrs returns a list of addresses through which this can be
// referenced.
ReferenceableAddrs() []addrs.Referenceable
}
// GraphNodeReferencer must be implemented by nodes that reference other
// Terraform items and therefore depend on them.
type GraphNodeReferencer interface {
GraphNodeSubPath
// References returns a list of references made by this node, which
// include both a referenced address and source location information for
// the reference.
References() []*addrs.Reference
}
type GraphNodeAttachDependencies interface {
GraphNodeResource
AttachDependencies([]addrs.AbsResource)
}
// GraphNodeReferenceOutside is an interface that can optionally be implemented.
// A node that implements it can specify that its own referenceable addresses
// and/or the addresses it references are in a different module than the
// node itself.
//
// Any referenceable addresses returned by ReferenceableAddrs are interpreted
// relative to the returned selfPath.
//
// Any references returned by References are interpreted relative to the
// returned referencePath.
//
// It is valid but not required for either of these paths to match what is
// returned by method Path, though if both match the main Path then there
// is no reason to implement this method.
//
// The primary use-case for this is the nodes representing module input
// variables, since their expressions are resolved in terms of their calling
// module, but they are still referenced from their own module.
type GraphNodeReferenceOutside interface {
// ReferenceOutside returns a path in which any references from this node
// are resolved.
ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance)
}
// ReferenceTransformer is a GraphTransformer that connects all the
// nodes that reference each other in order to form the proper ordering.
type ReferenceTransformer struct{}
func (t *ReferenceTransformer) Transform(g *Graph) error {
// Build a reference map so we can efficiently look up the references
vs := g.Vertices()
m := NewReferenceMap(vs)
// Find the things that reference things and connect them
for _, v := range vs {
parents, _ := m.References(v)
parentsDbg := make([]string, len(parents))
for i, v := range parents {
parentsDbg[i] = dag.VertexName(v)
}
log.Printf(
"[DEBUG] ReferenceTransformer: %q references: %v",
dag.VertexName(v), parentsDbg)
for _, parent := range parents {
g.Connect(dag.BasicEdge(v, parent))
}
if len(parents) > 0 {
continue
}
}
return nil
}
// AttachDependenciesTransformer records all resource dependencies for each
// instance, and attaches the addresses to the node itself. Managed resource
// will record these in the state for proper ordering of destroy operations.
type AttachDependenciesTransformer struct {
Config *configs.Config
State *states.State
Schemas *Schemas
}
func (t AttachDependenciesTransformer) Transform(g *Graph) error {
for _, v := range g.Vertices() {
attacher, ok := v.(GraphNodeAttachDependencies)
if !ok {
continue
}
selfAddr := attacher.ResourceAddr()
// Data sources don't need to track destroy dependencies
if selfAddr.Resource.Mode == addrs.DataResourceMode {
continue
}
ans, err := g.Ancestors(v)
if err != nil {
return err
}
// dedupe addrs when there's multiple instances involved, or
// multiple paths in the un-reduced graph
depMap := map[string]addrs.AbsResource{}
for _, d := range ans.List() {
var addr addrs.AbsResource
switch d := d.(type) {
case GraphNodeResourceInstance:
instAddr := d.ResourceInstanceAddr()
addr = instAddr.Resource.Resource.Absolute(instAddr.Module)
case GraphNodeResource:
addr = d.ResourceAddr()
default:
continue
}
// Data sources don't need to track destroy dependencies
if addr.Resource.Mode == addrs.DataResourceMode {
continue
}
if addr.Equal(selfAddr) {
continue
}
depMap[addr.String()] = addr
}
deps := make([]addrs.AbsResource, 0, len(depMap))
for _, d := range depMap {
deps = append(deps, d)
}
sort.Slice(deps, func(i, j int) bool {
return deps[i].String() < deps[j].String()
})
log.Printf("[TRACE] AttachDependenciesTransformer: %s depends on %s", attacher.ResourceAddr(), deps)
attacher.AttachDependencies(deps)
}
return nil
}
// DestroyReferenceTransformer is a GraphTransformer that reverses the edges
// for locals and outputs that depend on other nodes which will be
// removed during destroy. If a destroy node is evaluated before the local or
// output value, it will be removed from the state, and the later interpolation
// will fail.
type DestroyValueReferenceTransformer struct{}
func (t *DestroyValueReferenceTransformer) Transform(g *Graph) error {
vs := g.Vertices()
for _, v := range vs {
switch v.(type) {
case *NodeApplyableOutput, *NodeLocal:
// OK
default:
continue
}
// reverse any outgoing edges so that the value is evaluated first.
for _, e := range g.EdgesFrom(v) {
target := e.Target()
// only destroy nodes will be evaluated in reverse
if _, ok := target.(GraphNodeDestroyer); !ok {
continue
}
log.Printf("[TRACE] output dep: %s", dag.VertexName(target))
g.RemoveEdge(e)
g.Connect(&DestroyEdge{S: target, T: v})
}
}
return nil
}
// PruneUnusedValuesTransformer is s GraphTransformer that removes local and
// output values which are not referenced in the graph. Since outputs and
// locals always need to be evaluated, if they reference a resource that is not
// available in the state the interpolation could fail.
type PruneUnusedValuesTransformer struct{}
func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error {
// this might need multiple runs in order to ensure that pruning a value
// doesn't effect a previously checked value.
for removed := 0; ; removed = 0 {
for _, v := range g.Vertices() {
switch v.(type) {
case *NodeApplyableOutput, *NodeLocal:
// OK
default:
continue
}
dependants := g.UpEdges(v)
switch dependants.Len() {
case 0:
// nothing at all depends on this
g.Remove(v)
removed++
case 1:
// because an output's destroy node always depends on the output,
// we need to check for the case of a single destroy node.
d := dependants.List()[0]
if _, ok := d.(*NodeDestroyableOutput); ok {
g.Remove(v)
removed++
}
}
}
if removed == 0 {
break
}
}
return nil
}
// ReferenceMap is a structure that can be used to efficiently check
// for references on a graph.
type ReferenceMap struct {
// vertices is a map from internal reference keys (as produced by the
// mapKey method) to one or more vertices that are identified by each key.
//
// A particular reference key might actually identify multiple vertices,
// e.g. in situations where one object is contained inside another.
vertices map[string][]dag.Vertex
// edges is a map whose keys are a subset of the internal reference keys
// from "vertices", and whose values are the nodes that refer to each
// key. The values in this map are the referrers, while values in
// "verticies" are the referents. The keys in both cases are referents.
edges map[string][]dag.Vertex
}
// References returns the set of vertices that the given vertex refers to,
// and any referenced addresses that do not have corresponding vertices.
func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []addrs.Referenceable) {
rn, ok := v.(GraphNodeReferencer)
if !ok {
return nil, nil
}
if _, ok := v.(GraphNodeSubPath); !ok {
return nil, nil
}
var matches []dag.Vertex
var missing []addrs.Referenceable
for _, ref := range rn.References() {
subject := ref.Subject
key := m.referenceMapKey(v, subject)
if _, exists := m.vertices[key]; !exists {
// If what we were looking for was a ResourceInstance then we
// might be in a resource-oriented graph rather than an
// instance-oriented graph, and so we'll see if we have the
// resource itself instead.
switch ri := subject.(type) {
case addrs.ResourceInstance:
subject = ri.ContainingResource()
case addrs.ResourceInstancePhase:
subject = ri.ContainingResource()
}
key = m.referenceMapKey(v, subject)
}
vertices := m.vertices[key]
for _, rv := range vertices {
// don't include self-references
if rv == v {
continue
}
matches = append(matches, rv)
}
if len(vertices) == 0 {
missing = append(missing, ref.Subject)
}
}
return matches, missing
}
// Referrers returns the set of vertices that refer to the given vertex.
func (m *ReferenceMap) Referrers(v dag.Vertex) []dag.Vertex {
rn, ok := v.(GraphNodeReferenceable)
if !ok {
return nil
}
sp, ok := v.(GraphNodeSubPath)
if !ok {
return nil
}
var matches []dag.Vertex
for _, addr := range rn.ReferenceableAddrs() {
key := m.mapKey(sp.Path(), addr)
referrers, ok := m.edges[key]
if !ok {
continue
}
// If the referrer set includes our own given vertex then we skip,
// since we don't want to return self-references.
selfRef := false
for _, p := range referrers {
if p == v {
selfRef = true
break
}
}
if selfRef {
continue
}
matches = append(matches, referrers...)
}
return matches
}
func (m *ReferenceMap) mapKey(path addrs.ModuleInstance, addr addrs.Referenceable) string {
return fmt.Sprintf("%s|%s", path.String(), addr.String())
}
// vertexReferenceablePath returns the path in which the given vertex can be
// referenced. This is the path that its results from ReferenceableAddrs
// are considered to be relative to.
//
// Only GraphNodeSubPath implementations can be referenced, so this method will
// panic if the given vertex does not implement that interface.
func (m *ReferenceMap) vertexReferenceablePath(v dag.Vertex) addrs.ModuleInstance {
sp, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
panic(fmt.Errorf("vertexMapKey on vertex type %T which doesn't implement GraphNodeSubPath", sp))
}
if outside, ok := v.(GraphNodeReferenceOutside); ok {
// Vertex is referenced from a different module than where it was
// declared.
path, _ := outside.ReferenceOutside()
return path
}
// Vertex is referenced from the same module as where it was declared.
return sp.Path()
}
// vertexReferencePath returns the path in which references _from_ the given
// vertex must be interpreted.
//
// Only GraphNodeSubPath implementations can have references, so this method
// will panic if the given vertex does not implement that interface.
func vertexReferencePath(referrer dag.Vertex) addrs.ModuleInstance {
sp, ok := referrer.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
panic(fmt.Errorf("vertexReferencePath on vertex type %T which doesn't implement GraphNodeSubPath", sp))
}
var path addrs.ModuleInstance
if outside, ok := referrer.(GraphNodeReferenceOutside); ok {
// Vertex makes references to objects in a different module than where
// it was declared.
_, path = outside.ReferenceOutside()
return path
}
// Vertex makes references to objects in the same module as where it
// was declared.
return sp.Path()
}
// referenceMapKey produces keys for the "edges" map. "referrer" is the vertex
// that the reference is from, and "addr" is the address of the object being
// referenced.
//
// The result is an opaque string that includes both the address of the given
// object and the address of the module instance that object belongs to.
//
// Only GraphNodeSubPath implementations can be referrers, so this method will
// panic if the given vertex does not implement that interface.
func (m *ReferenceMap) referenceMapKey(referrer dag.Vertex, addr addrs.Referenceable) string {
path := vertexReferencePath(referrer)
return m.mapKey(path, addr)
}
// NewReferenceMap is used to create a new reference map for the
// given set of vertices.
func NewReferenceMap(vs []dag.Vertex) *ReferenceMap {
var m ReferenceMap
// Build the lookup table
vertices := make(map[string][]dag.Vertex)
for _, v := range vs {
_, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
continue
}
// We're only looking for referenceable nodes
rn, ok := v.(GraphNodeReferenceable)
if !ok {
continue
}
path := m.vertexReferenceablePath(v)
// Go through and cache them
for _, addr := range rn.ReferenceableAddrs() {
key := m.mapKey(path, addr)
vertices[key] = append(vertices[key], v)
}
// Any node can be referenced by the address of the module it belongs
// to or any of that module's ancestors.
for _, addr := range path.Ancestors()[1:] {
// Can be referenced either as the specific call instance (with
// an instance key) or as the bare module call itself (the "module"
// block in the parent module that created the instance).
callPath, call := addr.Call()
callInstPath, callInst := addr.CallInstance()
callKey := m.mapKey(callPath, call)
callInstKey := m.mapKey(callInstPath, callInst)
vertices[callKey] = append(vertices[callKey], v)
vertices[callInstKey] = append(vertices[callInstKey], v)
}
}
// Build the lookup table for referenced by
edges := make(map[string][]dag.Vertex)
for _, v := range vs {
_, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
continue
}
rn, ok := v.(GraphNodeReferencer)
if !ok {
// We're only looking for referenceable nodes
continue
}
// Go through and cache them
for _, ref := range rn.References() {
if ref.Subject == nil {
// Should never happen
panic(fmt.Sprintf("%T.References returned reference with nil subject", rn))
}
key := m.referenceMapKey(v, ref.Subject)
edges[key] = append(edges[key], v)
}
}
m.vertices = vertices
m.edges = edges
return &m
}
// ReferencesFromConfig returns the references that a configuration has
// based on the interpolated variables in a configuration.
func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Reference {
if body == nil {
return nil
}
refs, _ := lang.ReferencesInBlock(body, schema)
return refs
}
// appendResourceDestroyReferences identifies resource and resource instance
// references in the given slice and appends to it the "destroy-phase"
// equivalents of those references, returning the result.
//
// This can be used in the References implementation for a node which must also
// depend on the destruction of anything it references.
func appendResourceDestroyReferences(refs []*addrs.Reference) []*addrs.Reference {
given := refs
for _, ref := range given {
switch tr := ref.Subject.(type) {
case addrs.Resource:
newRef := *ref // shallow copy
newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
refs = append(refs, &newRef)
case addrs.ResourceInstance:
newRef := *ref // shallow copy
newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
refs = append(refs, &newRef)
}
}
return refs
}
func modulePrefixStr(p addrs.ModuleInstance) string {
return p.String()
}
func modulePrefixList(result []string, prefix string) []string {
if prefix != "" {
for i, v := range result {
result[i] = fmt.Sprintf("%s.%s", prefix, v)
}
}
return result
}
| terraform/transform_reference.go | 1 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.9817178249359131,
0.03462284803390503,
0.00016353049431927502,
0.00024208043760154396,
0.1662924587726593
] |
{
"id": 3,
"code_window": [
"\t}\n",
"\n",
"\treturn nil\n",
"}\n",
"\n",
"// PruneUnusedValuesTransformer is s GraphTransformer that removes local and\n",
"// output values which are not referenced in the graph. Since outputs and\n",
"// locals always need to be evaluated, if they reference a resource that is not\n",
"// available in the state the interpolation could fail.\n",
"type PruneUnusedValuesTransformer struct{}\n",
"\n",
"func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"// PruneUnusedValuesTransformer is a GraphTransformer that removes local,\n",
"// variable, and output values which are not referenced in the graph. If these\n",
"// values reference a resource that is no longer in the state the interpolation\n",
"// could fail.\n",
"type PruneUnusedValuesTransformer struct {\n",
"\tDestroy bool\n",
"}\n"
],
"file_path": "terraform/transform_reference.go",
"type": "replace",
"edit_start_line_idx": 208
} | variable "foo" {}
resource "aws_instance" "web" {
count = "${var.foo}"
}
| config/testdata/validate-count-user-var/main.tf | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.0001691816869424656,
0.0001691816869424656,
0.0001691816869424656,
0.0001691816869424656,
0
] |
{
"id": 3,
"code_window": [
"\t}\n",
"\n",
"\treturn nil\n",
"}\n",
"\n",
"// PruneUnusedValuesTransformer is s GraphTransformer that removes local and\n",
"// output values which are not referenced in the graph. Since outputs and\n",
"// locals always need to be evaluated, if they reference a resource that is not\n",
"// available in the state the interpolation could fail.\n",
"type PruneUnusedValuesTransformer struct{}\n",
"\n",
"func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"// PruneUnusedValuesTransformer is a GraphTransformer that removes local,\n",
"// variable, and output values which are not referenced in the graph. If these\n",
"// values reference a resource that is no longer in the state the interpolation\n",
"// could fail.\n",
"type PruneUnusedValuesTransformer struct {\n",
"\tDestroy bool\n",
"}\n"
],
"file_path": "terraform/transform_reference.go",
"type": "replace",
"edit_start_line_idx": 208
} | package command
import (
"context"
"fmt"
"strings"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/command/clistate"
"github.com/hashicorp/terraform/tfdiags"
"github.com/mitchellh/cli"
)
// StateRmCommand is a Command implementation that shows a single resource.
type StateRmCommand struct {
StateMeta
}
func (c *StateRmCommand) Run(args []string) int {
args, err := c.Meta.process(args, true)
if err != nil {
return 1
}
var dryRun bool
cmdFlags := c.Meta.defaultFlagSet("state rm")
cmdFlags.BoolVar(&dryRun, "dry-run", false, "dry run")
cmdFlags.StringVar(&c.backupPath, "backup", "-", "backup")
cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state")
cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout")
cmdFlags.StringVar(&c.statePath, "state", "", "path")
if err := cmdFlags.Parse(args); err != nil {
c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error()))
return 1
}
args = cmdFlags.Args()
if len(args) < 1 {
c.Ui.Error("At least one address is required.\n")
return cli.RunResultHelp
}
// Get the state
stateMgr, err := c.State()
if err != nil {
c.Ui.Error(fmt.Sprintf(errStateLoadingState, err))
return 1
}
if c.stateLock {
stateLocker := clistate.NewLocker(context.Background(), c.stateLockTimeout, c.Ui, c.Colorize())
if err := stateLocker.Lock(stateMgr, "state-rm"); err != nil {
c.Ui.Error(fmt.Sprintf("Error locking state: %s", err))
return 1
}
defer stateLocker.Unlock(nil)
}
if err := stateMgr.RefreshState(); err != nil {
c.Ui.Error(fmt.Sprintf("Failed to refresh state: %s", err))
return 1
}
state := stateMgr.State()
if state == nil {
c.Ui.Error(fmt.Sprintf(errStateNotFound))
return 1
}
// This command primarily works with resource instances, though it will
// also clean up any modules and resources left empty by actions it takes.
var addrs []addrs.AbsResourceInstance
var diags tfdiags.Diagnostics
for _, addrStr := range args {
moreAddrs, moreDiags := c.lookupResourceInstanceAddr(state, true, addrStr)
addrs = append(addrs, moreAddrs...)
diags = diags.Append(moreDiags)
}
if diags.HasErrors() {
c.showDiagnostics(diags)
return 1
}
prefix := "Removed "
if dryRun {
prefix = "Would remove "
}
var isCount int
ss := state.SyncWrapper()
for _, addr := range addrs {
isCount++
c.Ui.Output(prefix + addr.String())
if !dryRun {
ss.ForgetResourceInstanceAll(addr)
ss.RemoveResourceIfEmpty(addr.ContainingResource())
}
}
if dryRun {
if isCount == 0 {
c.Ui.Output("Would have removed nothing.")
}
return 0 // This is as far as we go in dry-run mode
}
if err := stateMgr.WriteState(state); err != nil {
c.Ui.Error(fmt.Sprintf(errStateRmPersist, err))
return 1
}
if err := stateMgr.PersistState(); err != nil {
c.Ui.Error(fmt.Sprintf(errStateRmPersist, err))
return 1
}
if len(diags) > 0 {
c.showDiagnostics(diags)
}
if isCount == 0 {
c.Ui.Output("No matching resource instances found.")
} else {
c.Ui.Output(fmt.Sprintf("Successfully removed %d resource instance(s).", isCount))
}
return 0
}
func (c *StateRmCommand) Help() string {
helpText := `
Usage: terraform state rm [options] ADDRESS...
Remove one or more items from the Terraform state, causing Terraform to
"forget" those items without first destroying them in the remote system.
This command removes one or more resource instances from the Terraform state
based on the addresses given. You can view and list the available instances
with "terraform state list".
If you give the address of an entire module then all of the instances in
that module and any of its child modules will be removed from the state.
If you give the address of a resource that has "count" or "for_each" set,
all of the instances of that resource will be removed from the state.
Options:
-dry-run If set, prints out what would've been removed but
doesn't actually remove anything.
-backup=PATH Path where Terraform should write the backup
state.
-lock=true Lock the state file when locking is supported.
-lock-timeout=0s Duration to retry a state lock.
-state=PATH Path to the state file to update. Defaults to the current
workspace state.
`
return strings.TrimSpace(helpText)
}
func (c *StateRmCommand) Synopsis() string {
return "Remove instances from the state"
}
const errStateRm = `Error removing items from the state: %s
The state was not saved. No items were removed from the persisted
state. No backup was created since no modification occurred. Please
resolve the issue above and try again.`
const errStateRmPersist = `Error saving the state: %s
The state was not saved. No items were removed from the persisted
state. No backup was created since no modification occurred. Please
resolve the issue above and try again.`
| command/state_rm.go | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.0002190672094002366,
0.00017037612269632518,
0.00016039311594795436,
0.00016669672913849354,
0.000012386571142997127
] |
{
"id": 3,
"code_window": [
"\t}\n",
"\n",
"\treturn nil\n",
"}\n",
"\n",
"// PruneUnusedValuesTransformer is s GraphTransformer that removes local and\n",
"// output values which are not referenced in the graph. Since outputs and\n",
"// locals always need to be evaluated, if they reference a resource that is not\n",
"// available in the state the interpolation could fail.\n",
"type PruneUnusedValuesTransformer struct{}\n",
"\n",
"func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"// PruneUnusedValuesTransformer is a GraphTransformer that removes local,\n",
"// variable, and output values which are not referenced in the graph. If these\n",
"// values reference a resource that is no longer in the state the interpolation\n",
"// could fail.\n",
"type PruneUnusedValuesTransformer struct {\n",
"\tDestroy bool\n",
"}\n"
],
"file_path": "terraform/transform_reference.go",
"type": "replace",
"edit_start_line_idx": 208
} | package getter
// Storage is an interface that knows how to lookup downloaded directories
// as well as download and update directories from their sources into the
// proper location.
type Storage interface {
// Dir returns the directory on local disk where the directory source
// can be loaded from.
Dir(string) (string, bool, error)
// Get will download and optionally update the given directory.
Get(string, string, bool) error
}
| vendor/github.com/hashicorp/go-getter/storage.go | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.00042481935815885663,
0.00029530038591474295,
0.00016578139911871403,
0.00029530038591474295,
0.0001295189867960289
] |
{
"id": 4,
"code_window": [
"\n",
"func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error {\n",
"\t// this might need multiple runs in order to ensure that pruning a value\n",
"\t// doesn't effect a previously checked value.\n",
"\tfor removed := 0; ; removed = 0 {\n",
"\t\tfor _, v := range g.Vertices() {\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t// Pruning a value can effect previously checked edges, so loop until there\n",
"\t// are no more changes.\n"
],
"file_path": "terraform/transform_reference.go",
"type": "replace",
"edit_start_line_idx": 215
} | package terraform
import (
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/plans"
"github.com/hashicorp/terraform/states"
"github.com/hashicorp/terraform/tfdiags"
)
// ApplyGraphBuilder implements GraphBuilder and is responsible for building
// a graph for applying a Terraform diff.
//
// Because the graph is built from the diff (vs. the config or state),
// this helps ensure that the apply-time graph doesn't modify any resources
// that aren't explicitly in the diff. There are other scenarios where the
// diff can be deviated, so this is just one layer of protection.
type ApplyGraphBuilder struct {
// Config is the configuration tree that the diff was built from.
Config *configs.Config
// Changes describes the changes that we need apply.
Changes *plans.Changes
// State is the current state
State *states.State
// Components is a factory for the plug-in components (providers and
// provisioners) available for use.
Components contextComponentFactory
// Schemas is the repository of schemas we will draw from to analyse
// the configuration.
Schemas *Schemas
// Targets are resources to target. This is only required to make sure
// unnecessary outputs aren't included in the apply graph. The plan
// builder successfully handles targeting resources. In the future,
// outputs should go into the diff so that this is unnecessary.
Targets []addrs.Targetable
// DisableReduce, if true, will not reduce the graph. Great for testing.
DisableReduce bool
// Destroy, if true, represents a pure destroy operation
Destroy bool
// Validate will do structural validation of the graph.
Validate bool
}
// See GraphBuilder
func (b *ApplyGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
return (&BasicGraphBuilder{
Steps: b.Steps(),
Validate: b.Validate,
Name: "ApplyGraphBuilder",
}).Build(path)
}
// See GraphBuilder
func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
// Custom factory for creating providers.
concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
return &NodeApplyableProvider{
NodeAbstractProvider: a,
}
}
concreteResource := func(a *NodeAbstractResource) dag.Vertex {
return &NodeApplyableResource{
NodeAbstractResource: a,
}
}
concreteOrphanResource := func(a *NodeAbstractResource) dag.Vertex {
return &NodeDestroyResource{
NodeAbstractResource: a,
}
}
concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex {
return &NodeApplyableResourceInstance{
NodeAbstractResourceInstance: a,
}
}
steps := []GraphTransformer{
// Creates all the resources represented in the config. During apply,
// we use this just to ensure that the whole-resource metadata is
// updated to reflect things such as whether the count argument is
// set in config, or which provider configuration manages each resource.
&ConfigTransformer{
Concrete: concreteResource,
Config: b.Config,
},
// Creates all the resource instances represented in the diff, along
// with dependency edges against the whole-resource nodes added by
// ConfigTransformer above.
&DiffTransformer{
Concrete: concreteResourceInstance,
State: b.State,
Changes: b.Changes,
},
// Creates extra cleanup nodes for any entire resources that are
// no longer present in config, so we can make sure we clean up the
// leftover empty resource states after the instances have been
// destroyed.
// (We don't track this particular type of change in the plan because
// it's just cleanup of our own state object, and so doesn't effect
// any real remote objects or consumable outputs.)
&OrphanResourceTransformer{
Concrete: concreteOrphanResource,
Config: b.Config,
State: b.State,
},
// Create orphan output nodes
&OrphanOutputTransformer{Config: b.Config, State: b.State},
// Attach the configuration to any resources
&AttachResourceConfigTransformer{Config: b.Config},
// Attach the state
&AttachStateTransformer{State: b.State},
// Provisioner-related transformations
&MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()},
&ProvisionerTransformer{},
// Add root variables
&RootVariableTransformer{Config: b.Config},
// Add the local values
&LocalTransformer{Config: b.Config},
// Add the outputs
&OutputTransformer{Config: b.Config},
// Add module variables
&ModuleVariableTransformer{Config: b.Config},
// add providers
TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config),
// Remove modules no longer present in the config
&RemovedModuleTransformer{Config: b.Config, State: b.State},
// Must attach schemas before ReferenceTransformer so that we can
// analyze the configuration to find references.
&AttachSchemaTransformer{Schemas: b.Schemas},
// Connect references so ordering is correct
&ReferenceTransformer{},
&AttachDependenciesTransformer{},
// Destruction ordering
&DestroyEdgeTransformer{
Config: b.Config,
State: b.State,
Schemas: b.Schemas,
},
&CBDEdgeTransformer{
Config: b.Config,
State: b.State,
Schemas: b.Schemas,
Destroy: b.Destroy,
},
// Handle destroy time transformations for output and local values.
// Reverse the edges from outputs and locals, so that
// interpolations don't fail during destroy.
// Create a destroy node for outputs to remove them from the state.
// Prune unreferenced values, which may have interpolations that can't
// be resolved.
GraphTransformIf(
func() bool { return b.Destroy },
GraphTransformMulti(
&DestroyValueReferenceTransformer{},
&DestroyOutputTransformer{},
&PruneUnusedValuesTransformer{},
),
),
// Add the node to fix the state count boundaries
&CountBoundaryTransformer{
Config: b.Config,
},
// Target
&TargetsTransformer{Targets: b.Targets},
// Close opened plugin connections
&CloseProviderTransformer{},
&CloseProvisionerTransformer{},
// Single root
&RootTransformer{},
}
if !b.DisableReduce {
// Perform the transitive reduction to make our graph a bit
// more sane if possible (it usually is possible).
steps = append(steps, &TransitiveReductionTransformer{})
}
return steps
}
| terraform/graph_builder_apply.go | 1 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.003331356216222048,
0.0003632878651842475,
0.000163553879247047,
0.00016945951210800558,
0.0006751145701855421
] |
{
"id": 4,
"code_window": [
"\n",
"func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error {\n",
"\t// this might need multiple runs in order to ensure that pruning a value\n",
"\t// doesn't effect a previously checked value.\n",
"\tfor removed := 0; ; removed = 0 {\n",
"\t\tfor _, v := range g.Vertices() {\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t// Pruning a value can effect previously checked edges, so loop until there\n",
"\t// are no more changes.\n"
],
"file_path": "terraform/transform_reference.go",
"type": "replace",
"edit_start_line_idx": 215
} | package openstack
import (
"fmt"
"log"
"github.com/hashicorp/terraform/helper/schema"
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips"
)
func resourceNetworkingFloatingIPAssociateV2() *schema.Resource {
return &schema.Resource{
Create: resourceNetworkingFloatingIPAssociateV2Create,
Read: resourceNetworkingFloatingIPAssociateV2Read,
Delete: resourceNetworkingFloatingIPAssociateV2Delete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"region": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"floating_ip": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"port_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
},
}
}
func resourceNetworkingFloatingIPAssociateV2Create(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
networkingClient, err := config.networkingV2Client(GetRegion(d, config))
if err != nil {
return fmt.Errorf("Error creating OpenStack network client: %s", err)
}
floatingIP := d.Get("floating_ip").(string)
portID := d.Get("port_id").(string)
fipID, err := networkingFloatingIPV2ID(networkingClient, floatingIP)
if err != nil {
return fmt.Errorf("Unable to get ID of openstack_networking_floatingip_v2: %s", err)
}
updateOpts := floatingips.UpdateOpts{
PortID: &portID,
}
log.Printf("[DEBUG] openstack_networking_floatingip_associate_v2 create options: %#v", updateOpts)
_, err = floatingips.Update(networkingClient, fipID, updateOpts).Extract()
if err != nil {
return fmt.Errorf("Error associating openstack_networking_floatingip_v2 %s to openstack_networking_port_v2 %s: %s",
fipID, portID, err)
}
d.SetId(fipID)
log.Printf("[DEBUG] Created association between openstack_networking_floatingip_v2 %s and openstack_networking_port_v2 %s",
fipID, portID)
return resourceNetworkingFloatingIPAssociateV2Read(d, meta)
}
func resourceNetworkingFloatingIPAssociateV2Read(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
networkingClient, err := config.networkingV2Client(GetRegion(d, config))
if err != nil {
return fmt.Errorf("Error creating OpenStack network client: %s", err)
}
fip, err := floatingips.Get(networkingClient, d.Id()).Extract()
if err != nil {
return CheckDeleted(d, err, "Error getting openstack_networking_floatingip_v2")
}
log.Printf("[DEBUG] Retrieved openstack_networking_floatingip_v2 %s: %#v", d.Id(), fip)
d.Set("floating_ip", fip.FloatingIP)
d.Set("port_id", fip.PortID)
d.Set("region", GetRegion(d, config))
return nil
}
func resourceNetworkingFloatingIPAssociateV2Delete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
networkingClient, err := config.networkingV2Client(GetRegion(d, config))
if err != nil {
return fmt.Errorf("Error creating OpenStack network client: %s", err)
}
portID := d.Get("port_id").(string)
updateOpts := floatingips.UpdateOpts{
PortID: new(string),
}
log.Printf("[DEBUG] openstack_networking_floatingip_v2 disassociating options: %#v", updateOpts)
_, err = floatingips.Update(networkingClient, d.Id(), updateOpts).Extract()
if err != nil {
return fmt.Errorf("Error disassociating openstack_networking_floatingip_v2 %s from openstack_networking_port_v2 %s: %s",
d.Id(), portID, err)
}
return nil
}
| vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_floatingip_associate_v2.go | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.00017377149197272956,
0.0001697275583865121,
0.00016558184870518744,
0.00017005820700433105,
0.000002543292794143781
] |
{
"id": 4,
"code_window": [
"\n",
"func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error {\n",
"\t// this might need multiple runs in order to ensure that pruning a value\n",
"\t// doesn't effect a previously checked value.\n",
"\tfor removed := 0; ; removed = 0 {\n",
"\t\tfor _, v := range g.Vertices() {\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t// Pruning a value can effect previously checked edges, so loop until there\n",
"\t// are no more changes.\n"
],
"file_path": "terraform/transform_reference.go",
"type": "replace",
"edit_start_line_idx": 215
} | module "subchild" {
source = "./subchild"
}
| terraform/testdata/apply-destroy-nested-module/child/main.tf | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.0001756196143105626,
0.0001756196143105626,
0.0001756196143105626,
0.0001756196143105626,
0
] |
{
"id": 4,
"code_window": [
"\n",
"func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error {\n",
"\t// this might need multiple runs in order to ensure that pruning a value\n",
"\t// doesn't effect a previously checked value.\n",
"\tfor removed := 0; ; removed = 0 {\n",
"\t\tfor _, v := range g.Vertices() {\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t// Pruning a value can effect previously checked edges, so loop until there\n",
"\t// are no more changes.\n"
],
"file_path": "terraform/transform_reference.go",
"type": "replace",
"edit_start_line_idx": 215
} | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package md4 implements the MD4 hash algorithm as defined in RFC 1320.
//
// Deprecated: MD4 is cryptographically broken and should should only be used
// where compatibility with legacy systems, not security, is the goal. Instead,
// use a secure hash like SHA-256 (from crypto/sha256).
package md4 // import "golang.org/x/crypto/md4"
import (
"crypto"
"hash"
)
func init() {
crypto.RegisterHash(crypto.MD4, New)
}
// The size of an MD4 checksum in bytes.
const Size = 16
// The blocksize of MD4 in bytes.
const BlockSize = 64
const (
_Chunk = 64
_Init0 = 0x67452301
_Init1 = 0xEFCDAB89
_Init2 = 0x98BADCFE
_Init3 = 0x10325476
)
// digest represents the partial evaluation of a checksum.
type digest struct {
s [4]uint32
x [_Chunk]byte
nx int
len uint64
}
func (d *digest) Reset() {
d.s[0] = _Init0
d.s[1] = _Init1
d.s[2] = _Init2
d.s[3] = _Init3
d.nx = 0
d.len = 0
}
// New returns a new hash.Hash computing the MD4 checksum.
func New() hash.Hash {
d := new(digest)
d.Reset()
return d
}
func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p)
d.len += uint64(nn)
if d.nx > 0 {
n := len(p)
if n > _Chunk-d.nx {
n = _Chunk - d.nx
}
for i := 0; i < n; i++ {
d.x[d.nx+i] = p[i]
}
d.nx += n
if d.nx == _Chunk {
_Block(d, d.x[0:])
d.nx = 0
}
p = p[n:]
}
n := _Block(d, p)
p = p[n:]
if len(p) > 0 {
d.nx = copy(d.x[:], p)
}
return
}
func (d0 *digest) Sum(in []byte) []byte {
// Make a copy of d0, so that caller can keep writing and summing.
d := new(digest)
*d = *d0
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
len := d.len
var tmp [64]byte
tmp[0] = 0x80
if len%64 < 56 {
d.Write(tmp[0 : 56-len%64])
} else {
d.Write(tmp[0 : 64+56-len%64])
}
// Length in bits.
len <<= 3
for i := uint(0); i < 8; i++ {
tmp[i] = byte(len >> (8 * i))
}
d.Write(tmp[0:8])
if d.nx != 0 {
panic("d.nx != 0")
}
for _, s := range d.s {
in = append(in, byte(s>>0))
in = append(in, byte(s>>8))
in = append(in, byte(s>>16))
in = append(in, byte(s>>24))
}
return in
}
| vendor/golang.org/x/crypto/md4/md4.go | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.0004210148472338915,
0.0001885306410258636,
0.00016337321721948683,
0.00016843144840095192,
0.00006720489909639582
] |
{
"id": 5,
"code_window": [
"\tfor removed := 0; ; removed = 0 {\n",
"\t\tfor _, v := range g.Vertices() {\n",
"\t\t\tswitch v.(type) {\n",
"\t\t\tcase *NodeApplyableOutput, *NodeLocal:\n",
"\t\t\t\t// OK\n",
"\t\t\tdefault:\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\t\tswitch v := v.(type) {\n",
"\t\t\tcase *NodeApplyableOutput:\n",
"\t\t\t\t// If we're not certain this is a full destroy, we need to keep any\n",
"\t\t\t\t// root module outputs\n",
"\t\t\t\tif v.Addr.Module.IsRoot() && !t.Destroy {\n",
"\t\t\t\t\tcontinue\n",
"\t\t\t\t}\n",
"\t\t\tcase *NodeLocal, *NodeApplyableModuleVariable:\n"
],
"file_path": "terraform/transform_reference.go",
"type": "replace",
"edit_start_line_idx": 219
} | package terraform
import (
"fmt"
"log"
"sort"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/lang"
"github.com/hashicorp/terraform/states"
)
// GraphNodeReferenceable must be implemented by any node that represents
// a Terraform thing that can be referenced (resource, module, etc.).
//
// Even if the thing has no name, this should return an empty list. By
// implementing this and returning a non-nil result, you say that this CAN
// be referenced and other methods of referencing may still be possible (such
// as by path!)
type GraphNodeReferenceable interface {
GraphNodeSubPath
// ReferenceableAddrs returns a list of addresses through which this can be
// referenced.
ReferenceableAddrs() []addrs.Referenceable
}
// GraphNodeReferencer must be implemented by nodes that reference other
// Terraform items and therefore depend on them.
type GraphNodeReferencer interface {
GraphNodeSubPath
// References returns a list of references made by this node, which
// include both a referenced address and source location information for
// the reference.
References() []*addrs.Reference
}
type GraphNodeAttachDependencies interface {
GraphNodeResource
AttachDependencies([]addrs.AbsResource)
}
// GraphNodeReferenceOutside is an interface that can optionally be implemented.
// A node that implements it can specify that its own referenceable addresses
// and/or the addresses it references are in a different module than the
// node itself.
//
// Any referenceable addresses returned by ReferenceableAddrs are interpreted
// relative to the returned selfPath.
//
// Any references returned by References are interpreted relative to the
// returned referencePath.
//
// It is valid but not required for either of these paths to match what is
// returned by method Path, though if both match the main Path then there
// is no reason to implement this method.
//
// The primary use-case for this is the nodes representing module input
// variables, since their expressions are resolved in terms of their calling
// module, but they are still referenced from their own module.
type GraphNodeReferenceOutside interface {
// ReferenceOutside returns a path in which any references from this node
// are resolved.
ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance)
}
// ReferenceTransformer is a GraphTransformer that connects all the
// nodes that reference each other in order to form the proper ordering.
type ReferenceTransformer struct{}
func (t *ReferenceTransformer) Transform(g *Graph) error {
// Build a reference map so we can efficiently look up the references
vs := g.Vertices()
m := NewReferenceMap(vs)
// Find the things that reference things and connect them
for _, v := range vs {
parents, _ := m.References(v)
parentsDbg := make([]string, len(parents))
for i, v := range parents {
parentsDbg[i] = dag.VertexName(v)
}
log.Printf(
"[DEBUG] ReferenceTransformer: %q references: %v",
dag.VertexName(v), parentsDbg)
for _, parent := range parents {
g.Connect(dag.BasicEdge(v, parent))
}
if len(parents) > 0 {
continue
}
}
return nil
}
// AttachDependenciesTransformer records all resource dependencies for each
// instance, and attaches the addresses to the node itself. Managed resource
// will record these in the state for proper ordering of destroy operations.
type AttachDependenciesTransformer struct {
Config *configs.Config
State *states.State
Schemas *Schemas
}
func (t AttachDependenciesTransformer) Transform(g *Graph) error {
for _, v := range g.Vertices() {
attacher, ok := v.(GraphNodeAttachDependencies)
if !ok {
continue
}
selfAddr := attacher.ResourceAddr()
// Data sources don't need to track destroy dependencies
if selfAddr.Resource.Mode == addrs.DataResourceMode {
continue
}
ans, err := g.Ancestors(v)
if err != nil {
return err
}
// dedupe addrs when there's multiple instances involved, or
// multiple paths in the un-reduced graph
depMap := map[string]addrs.AbsResource{}
for _, d := range ans.List() {
var addr addrs.AbsResource
switch d := d.(type) {
case GraphNodeResourceInstance:
instAddr := d.ResourceInstanceAddr()
addr = instAddr.Resource.Resource.Absolute(instAddr.Module)
case GraphNodeResource:
addr = d.ResourceAddr()
default:
continue
}
// Data sources don't need to track destroy dependencies
if addr.Resource.Mode == addrs.DataResourceMode {
continue
}
if addr.Equal(selfAddr) {
continue
}
depMap[addr.String()] = addr
}
deps := make([]addrs.AbsResource, 0, len(depMap))
for _, d := range depMap {
deps = append(deps, d)
}
sort.Slice(deps, func(i, j int) bool {
return deps[i].String() < deps[j].String()
})
log.Printf("[TRACE] AttachDependenciesTransformer: %s depends on %s", attacher.ResourceAddr(), deps)
attacher.AttachDependencies(deps)
}
return nil
}
// DestroyReferenceTransformer is a GraphTransformer that reverses the edges
// for locals and outputs that depend on other nodes which will be
// removed during destroy. If a destroy node is evaluated before the local or
// output value, it will be removed from the state, and the later interpolation
// will fail.
type DestroyValueReferenceTransformer struct{}
func (t *DestroyValueReferenceTransformer) Transform(g *Graph) error {
vs := g.Vertices()
for _, v := range vs {
switch v.(type) {
case *NodeApplyableOutput, *NodeLocal:
// OK
default:
continue
}
// reverse any outgoing edges so that the value is evaluated first.
for _, e := range g.EdgesFrom(v) {
target := e.Target()
// only destroy nodes will be evaluated in reverse
if _, ok := target.(GraphNodeDestroyer); !ok {
continue
}
log.Printf("[TRACE] output dep: %s", dag.VertexName(target))
g.RemoveEdge(e)
g.Connect(&DestroyEdge{S: target, T: v})
}
}
return nil
}
// PruneUnusedValuesTransformer is s GraphTransformer that removes local and
// output values which are not referenced in the graph. Since outputs and
// locals always need to be evaluated, if they reference a resource that is not
// available in the state the interpolation could fail.
type PruneUnusedValuesTransformer struct{}
func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error {
// this might need multiple runs in order to ensure that pruning a value
// doesn't effect a previously checked value.
for removed := 0; ; removed = 0 {
for _, v := range g.Vertices() {
switch v.(type) {
case *NodeApplyableOutput, *NodeLocal:
// OK
default:
continue
}
dependants := g.UpEdges(v)
switch dependants.Len() {
case 0:
// nothing at all depends on this
g.Remove(v)
removed++
case 1:
// because an output's destroy node always depends on the output,
// we need to check for the case of a single destroy node.
d := dependants.List()[0]
if _, ok := d.(*NodeDestroyableOutput); ok {
g.Remove(v)
removed++
}
}
}
if removed == 0 {
break
}
}
return nil
}
// ReferenceMap is a structure that can be used to efficiently check
// for references on a graph.
type ReferenceMap struct {
// vertices is a map from internal reference keys (as produced by the
// mapKey method) to one or more vertices that are identified by each key.
//
// A particular reference key might actually identify multiple vertices,
// e.g. in situations where one object is contained inside another.
vertices map[string][]dag.Vertex
// edges is a map whose keys are a subset of the internal reference keys
// from "vertices", and whose values are the nodes that refer to each
// key. The values in this map are the referrers, while values in
// "verticies" are the referents. The keys in both cases are referents.
edges map[string][]dag.Vertex
}
// References returns the set of vertices that the given vertex refers to,
// and any referenced addresses that do not have corresponding vertices.
func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []addrs.Referenceable) {
rn, ok := v.(GraphNodeReferencer)
if !ok {
return nil, nil
}
if _, ok := v.(GraphNodeSubPath); !ok {
return nil, nil
}
var matches []dag.Vertex
var missing []addrs.Referenceable
for _, ref := range rn.References() {
subject := ref.Subject
key := m.referenceMapKey(v, subject)
if _, exists := m.vertices[key]; !exists {
// If what we were looking for was a ResourceInstance then we
// might be in a resource-oriented graph rather than an
// instance-oriented graph, and so we'll see if we have the
// resource itself instead.
switch ri := subject.(type) {
case addrs.ResourceInstance:
subject = ri.ContainingResource()
case addrs.ResourceInstancePhase:
subject = ri.ContainingResource()
}
key = m.referenceMapKey(v, subject)
}
vertices := m.vertices[key]
for _, rv := range vertices {
// don't include self-references
if rv == v {
continue
}
matches = append(matches, rv)
}
if len(vertices) == 0 {
missing = append(missing, ref.Subject)
}
}
return matches, missing
}
// Referrers returns the set of vertices that refer to the given vertex.
func (m *ReferenceMap) Referrers(v dag.Vertex) []dag.Vertex {
rn, ok := v.(GraphNodeReferenceable)
if !ok {
return nil
}
sp, ok := v.(GraphNodeSubPath)
if !ok {
return nil
}
var matches []dag.Vertex
for _, addr := range rn.ReferenceableAddrs() {
key := m.mapKey(sp.Path(), addr)
referrers, ok := m.edges[key]
if !ok {
continue
}
// If the referrer set includes our own given vertex then we skip,
// since we don't want to return self-references.
selfRef := false
for _, p := range referrers {
if p == v {
selfRef = true
break
}
}
if selfRef {
continue
}
matches = append(matches, referrers...)
}
return matches
}
func (m *ReferenceMap) mapKey(path addrs.ModuleInstance, addr addrs.Referenceable) string {
return fmt.Sprintf("%s|%s", path.String(), addr.String())
}
// vertexReferenceablePath returns the path in which the given vertex can be
// referenced. This is the path that its results from ReferenceableAddrs
// are considered to be relative to.
//
// Only GraphNodeSubPath implementations can be referenced, so this method will
// panic if the given vertex does not implement that interface.
func (m *ReferenceMap) vertexReferenceablePath(v dag.Vertex) addrs.ModuleInstance {
sp, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
panic(fmt.Errorf("vertexMapKey on vertex type %T which doesn't implement GraphNodeSubPath", sp))
}
if outside, ok := v.(GraphNodeReferenceOutside); ok {
// Vertex is referenced from a different module than where it was
// declared.
path, _ := outside.ReferenceOutside()
return path
}
// Vertex is referenced from the same module as where it was declared.
return sp.Path()
}
// vertexReferencePath returns the path in which references _from_ the given
// vertex must be interpreted.
//
// Only GraphNodeSubPath implementations can have references, so this method
// will panic if the given vertex does not implement that interface.
func vertexReferencePath(referrer dag.Vertex) addrs.ModuleInstance {
sp, ok := referrer.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
panic(fmt.Errorf("vertexReferencePath on vertex type %T which doesn't implement GraphNodeSubPath", sp))
}
var path addrs.ModuleInstance
if outside, ok := referrer.(GraphNodeReferenceOutside); ok {
// Vertex makes references to objects in a different module than where
// it was declared.
_, path = outside.ReferenceOutside()
return path
}
// Vertex makes references to objects in the same module as where it
// was declared.
return sp.Path()
}
// referenceMapKey produces keys for the "edges" map. "referrer" is the vertex
// that the reference is from, and "addr" is the address of the object being
// referenced.
//
// The result is an opaque string that includes both the address of the given
// object and the address of the module instance that object belongs to.
//
// Only GraphNodeSubPath implementations can be referrers, so this method will
// panic if the given vertex does not implement that interface.
func (m *ReferenceMap) referenceMapKey(referrer dag.Vertex, addr addrs.Referenceable) string {
path := vertexReferencePath(referrer)
return m.mapKey(path, addr)
}
// NewReferenceMap is used to create a new reference map for the
// given set of vertices.
func NewReferenceMap(vs []dag.Vertex) *ReferenceMap {
var m ReferenceMap
// Build the lookup table
vertices := make(map[string][]dag.Vertex)
for _, v := range vs {
_, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
continue
}
// We're only looking for referenceable nodes
rn, ok := v.(GraphNodeReferenceable)
if !ok {
continue
}
path := m.vertexReferenceablePath(v)
// Go through and cache them
for _, addr := range rn.ReferenceableAddrs() {
key := m.mapKey(path, addr)
vertices[key] = append(vertices[key], v)
}
// Any node can be referenced by the address of the module it belongs
// to or any of that module's ancestors.
for _, addr := range path.Ancestors()[1:] {
// Can be referenced either as the specific call instance (with
// an instance key) or as the bare module call itself (the "module"
// block in the parent module that created the instance).
callPath, call := addr.Call()
callInstPath, callInst := addr.CallInstance()
callKey := m.mapKey(callPath, call)
callInstKey := m.mapKey(callInstPath, callInst)
vertices[callKey] = append(vertices[callKey], v)
vertices[callInstKey] = append(vertices[callInstKey], v)
}
}
// Build the lookup table for referenced by
edges := make(map[string][]dag.Vertex)
for _, v := range vs {
_, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
continue
}
rn, ok := v.(GraphNodeReferencer)
if !ok {
// We're only looking for referenceable nodes
continue
}
// Go through and cache them
for _, ref := range rn.References() {
if ref.Subject == nil {
// Should never happen
panic(fmt.Sprintf("%T.References returned reference with nil subject", rn))
}
key := m.referenceMapKey(v, ref.Subject)
edges[key] = append(edges[key], v)
}
}
m.vertices = vertices
m.edges = edges
return &m
}
// ReferencesFromConfig returns the references that a configuration has
// based on the interpolated variables in a configuration.
func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Reference {
if body == nil {
return nil
}
refs, _ := lang.ReferencesInBlock(body, schema)
return refs
}
// appendResourceDestroyReferences identifies resource and resource instance
// references in the given slice and appends to it the "destroy-phase"
// equivalents of those references, returning the result.
//
// This can be used in the References implementation for a node which must also
// depend on the destruction of anything it references.
func appendResourceDestroyReferences(refs []*addrs.Reference) []*addrs.Reference {
given := refs
for _, ref := range given {
switch tr := ref.Subject.(type) {
case addrs.Resource:
newRef := *ref // shallow copy
newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
refs = append(refs, &newRef)
case addrs.ResourceInstance:
newRef := *ref // shallow copy
newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
refs = append(refs, &newRef)
}
}
return refs
}
func modulePrefixStr(p addrs.ModuleInstance) string {
return p.String()
}
func modulePrefixList(result []string, prefix string) []string {
if prefix != "" {
for i, v := range result {
result[i] = fmt.Sprintf("%s.%s", prefix, v)
}
}
return result
}
| terraform/transform_reference.go | 1 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.9985060691833496,
0.44560593366622925,
0.0001619406248209998,
0.006159801501780748,
0.4844120442867279
] |
{
"id": 5,
"code_window": [
"\tfor removed := 0; ; removed = 0 {\n",
"\t\tfor _, v := range g.Vertices() {\n",
"\t\t\tswitch v.(type) {\n",
"\t\t\tcase *NodeApplyableOutput, *NodeLocal:\n",
"\t\t\t\t// OK\n",
"\t\t\tdefault:\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\t\tswitch v := v.(type) {\n",
"\t\t\tcase *NodeApplyableOutput:\n",
"\t\t\t\t// If we're not certain this is a full destroy, we need to keep any\n",
"\t\t\t\t// root module outputs\n",
"\t\t\t\tif v.Addr.Module.IsRoot() && !t.Destroy {\n",
"\t\t\t\t\tcontinue\n",
"\t\t\t\t}\n",
"\t\t\tcase *NodeLocal, *NodeApplyableModuleVariable:\n"
],
"file_path": "terraform/transform_reference.go",
"type": "replace",
"edit_start_line_idx": 219
} | package containers
import (
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/pagination"
)
// ListOptsBuilder allows extensions to add additional parameters to the List
// request.
type ListOptsBuilder interface {
ToContainerListParams() (bool, string, error)
}
// ListOpts is a structure that holds options for listing containers.
type ListOpts struct {
Full bool
Limit int `q:"limit"`
Marker string `q:"marker"`
EndMarker string `q:"end_marker"`
Format string `q:"format"`
Prefix string `q:"prefix"`
Delimiter string `q:"delimiter"`
}
// ToContainerListParams formats a ListOpts into a query string and boolean
// representing whether to list complete information for each container.
func (opts ListOpts) ToContainerListParams() (bool, string, error) {
q, err := gophercloud.BuildQueryString(opts)
return opts.Full, q.String(), err
}
// List is a function that retrieves containers associated with the account as
// well as account metadata. It returns a pager which can be iterated with the
// EachPage function.
func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
headers := map[string]string{"Accept": "text/plain", "Content-Type": "text/plain"}
url := listURL(c)
if opts != nil {
full, query, err := opts.ToContainerListParams()
if err != nil {
return pagination.Pager{Err: err}
}
url += query
if full {
headers = map[string]string{"Accept": "application/json", "Content-Type": "application/json"}
}
}
pager := pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page {
p := ContainerPage{pagination.MarkerPageBase{PageResult: r}}
p.MarkerPageBase.Owner = p
return p
})
pager.Headers = headers
return pager
}
// CreateOptsBuilder allows extensions to add additional parameters to the
// Create request.
type CreateOptsBuilder interface {
ToContainerCreateMap() (map[string]string, error)
}
// CreateOpts is a structure that holds parameters for creating a container.
type CreateOpts struct {
Metadata map[string]string
ContainerRead string `h:"X-Container-Read"`
ContainerSyncTo string `h:"X-Container-Sync-To"`
ContainerSyncKey string `h:"X-Container-Sync-Key"`
ContainerWrite string `h:"X-Container-Write"`
ContentType string `h:"Content-Type"`
DetectContentType bool `h:"X-Detect-Content-Type"`
IfNoneMatch string `h:"If-None-Match"`
VersionsLocation string `h:"X-Versions-Location"`
HistoryLocation string `h:"X-History-Location"`
}
// ToContainerCreateMap formats a CreateOpts into a map of headers.
func (opts CreateOpts) ToContainerCreateMap() (map[string]string, error) {
h, err := gophercloud.BuildHeaders(opts)
if err != nil {
return nil, err
}
for k, v := range opts.Metadata {
h["X-Container-Meta-"+k] = v
}
return h, nil
}
// Create is a function that creates a new container.
func Create(c *gophercloud.ServiceClient, containerName string, opts CreateOptsBuilder) (r CreateResult) {
h := make(map[string]string)
if opts != nil {
headers, err := opts.ToContainerCreateMap()
if err != nil {
r.Err = err
return
}
for k, v := range headers {
h[k] = v
}
}
resp, err := c.Request("PUT", createURL(c, containerName), &gophercloud.RequestOpts{
MoreHeaders: h,
OkCodes: []int{201, 202, 204},
})
if resp != nil {
r.Header = resp.Header
resp.Body.Close()
}
r.Err = err
return
}
// Delete is a function that deletes a container.
func Delete(c *gophercloud.ServiceClient, containerName string) (r DeleteResult) {
_, r.Err = c.Delete(deleteURL(c, containerName), nil)
return
}
// UpdateOptsBuilder allows extensions to add additional parameters to the
// Update request.
type UpdateOptsBuilder interface {
ToContainerUpdateMap() (map[string]string, error)
}
// UpdateOpts is a structure that holds parameters for updating, creating, or
// deleting a container's metadata.
type UpdateOpts struct {
Metadata map[string]string
ContainerRead string `h:"X-Container-Read"`
ContainerSyncTo string `h:"X-Container-Sync-To"`
ContainerSyncKey string `h:"X-Container-Sync-Key"`
ContainerWrite string `h:"X-Container-Write"`
ContentType string `h:"Content-Type"`
DetectContentType bool `h:"X-Detect-Content-Type"`
RemoveVersionsLocation string `h:"X-Remove-Versions-Location"`
VersionsLocation string `h:"X-Versions-Location"`
RemoveHistoryLocation string `h:"X-Remove-History-Location"`
HistoryLocation string `h:"X-History-Location"`
}
// ToContainerUpdateMap formats a UpdateOpts into a map of headers.
func (opts UpdateOpts) ToContainerUpdateMap() (map[string]string, error) {
h, err := gophercloud.BuildHeaders(opts)
if err != nil {
return nil, err
}
for k, v := range opts.Metadata {
h["X-Container-Meta-"+k] = v
}
return h, nil
}
// Update is a function that creates, updates, or deletes a container's
// metadata.
func Update(c *gophercloud.ServiceClient, containerName string, opts UpdateOptsBuilder) (r UpdateResult) {
h := make(map[string]string)
if opts != nil {
headers, err := opts.ToContainerUpdateMap()
if err != nil {
r.Err = err
return
}
for k, v := range headers {
h[k] = v
}
}
resp, err := c.Request("POST", updateURL(c, containerName), &gophercloud.RequestOpts{
MoreHeaders: h,
OkCodes: []int{201, 202, 204},
})
if resp != nil {
r.Header = resp.Header
}
r.Err = err
return
}
// GetOptsBuilder allows extensions to add additional parameters to the Get
// request.
type GetOptsBuilder interface {
ToContainerGetMap() (map[string]string, error)
}
// GetOpts is a structure that holds options for listing containers.
type GetOpts struct {
Newest bool `h:"X-Newest"`
}
// ToContainerGetMap formats a GetOpts into a map of headers.
func (opts GetOpts) ToContainerGetMap() (map[string]string, error) {
return gophercloud.BuildHeaders(opts)
}
// Get is a function that retrieves the metadata of a container. To extract just
// the custom metadata, pass the GetResult response to the ExtractMetadata
// function.
func Get(c *gophercloud.ServiceClient, containerName string, opts GetOptsBuilder) (r GetResult) {
h := make(map[string]string)
if opts != nil {
headers, err := opts.ToContainerGetMap()
if err != nil {
r.Err = err
return
}
for k, v := range headers {
h[k] = v
}
}
resp, err := c.Head(getURL(c, containerName), &gophercloud.RequestOpts{
MoreHeaders: h,
OkCodes: []int{200, 204},
})
if resp != nil {
r.Header = resp.Header
}
r.Err = err
return
}
| vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.038731109350919724,
0.0036577791906893253,
0.0001642991410335526,
0.00017553623183630407,
0.008889985270798206
] |
{
"id": 5,
"code_window": [
"\tfor removed := 0; ; removed = 0 {\n",
"\t\tfor _, v := range g.Vertices() {\n",
"\t\t\tswitch v.(type) {\n",
"\t\t\tcase *NodeApplyableOutput, *NodeLocal:\n",
"\t\t\t\t// OK\n",
"\t\t\tdefault:\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\t\tswitch v := v.(type) {\n",
"\t\t\tcase *NodeApplyableOutput:\n",
"\t\t\t\t// If we're not certain this is a full destroy, we need to keep any\n",
"\t\t\t\t// root module outputs\n",
"\t\t\t\tif v.Addr.Module.IsRoot() && !t.Destroy {\n",
"\t\t\t\t\tcontinue\n",
"\t\t\t\t}\n",
"\t\t\tcase *NodeLocal, *NodeApplyableModuleVariable:\n"
],
"file_path": "terraform/transform_reference.go",
"type": "replace",
"edit_start_line_idx": 219
} | resource "null_resource" "foo" {
# This construct trips up the HCL2 parser because it looks like a nested block
# but has quoted keys like a map. The upgrade tool would add an equals sign
# here to turn this into a map attribute, but "terraform init" must first
# be able to install the null provider so the upgrade tool can know that
# "triggers" is a map attribute.
triggers {
"foo" = "bar"
}
}
| command/testdata/init-012upgrade/main.tf | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.00017907729488797486,
0.0001753501856001094,
0.00017162307631224394,
0.0001753501856001094,
0.00000372710928786546
] |
{
"id": 5,
"code_window": [
"\tfor removed := 0; ; removed = 0 {\n",
"\t\tfor _, v := range g.Vertices() {\n",
"\t\t\tswitch v.(type) {\n",
"\t\t\tcase *NodeApplyableOutput, *NodeLocal:\n",
"\t\t\t\t// OK\n",
"\t\t\tdefault:\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\t\tswitch v := v.(type) {\n",
"\t\t\tcase *NodeApplyableOutput:\n",
"\t\t\t\t// If we're not certain this is a full destroy, we need to keep any\n",
"\t\t\t\t// root module outputs\n",
"\t\t\t\tif v.Addr.Module.IsRoot() && !t.Destroy {\n",
"\t\t\t\t\tcontinue\n",
"\t\t\t\t}\n",
"\t\t\tcase *NodeLocal, *NodeApplyableModuleVariable:\n"
],
"file_path": "terraform/transform_reference.go",
"type": "replace",
"edit_start_line_idx": 219
} | package hcl
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"sort"
wordwrap "github.com/mitchellh/go-wordwrap"
"github.com/zclconf/go-cty/cty"
)
type diagnosticTextWriter struct {
files map[string]*File
wr io.Writer
width uint
color bool
}
// NewDiagnosticTextWriter creates a DiagnosticWriter that writes diagnostics
// to the given writer as formatted text.
//
// It is designed to produce text appropriate to print in a monospaced font
// in a terminal of a particular width, or optionally with no width limit.
//
// The given width may be zero to disable word-wrapping of the detail text
// and truncation of source code snippets.
//
// If color is set to true, the output will include VT100 escape sequences to
// color-code the severity indicators. It is suggested to turn this off if
// the target writer is not a terminal.
func NewDiagnosticTextWriter(wr io.Writer, files map[string]*File, width uint, color bool) DiagnosticWriter {
return &diagnosticTextWriter{
files: files,
wr: wr,
width: width,
color: color,
}
}
func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error {
if diag == nil {
return errors.New("nil diagnostic")
}
var colorCode, highlightCode, resetCode string
if w.color {
switch diag.Severity {
case DiagError:
colorCode = "\x1b[31m"
case DiagWarning:
colorCode = "\x1b[33m"
}
resetCode = "\x1b[0m"
highlightCode = "\x1b[1;4m"
}
var severityStr string
switch diag.Severity {
case DiagError:
severityStr = "Error"
case DiagWarning:
severityStr = "Warning"
default:
// should never happen
severityStr = "???????"
}
fmt.Fprintf(w.wr, "%s%s%s: %s\n\n", colorCode, severityStr, resetCode, diag.Summary)
if diag.Subject != nil {
snipRange := *diag.Subject
highlightRange := snipRange
if diag.Context != nil {
// Show enough of the source code to include both the subject
// and context ranges, which overlap in all reasonable
// situations.
snipRange = RangeOver(snipRange, *diag.Context)
}
// We can't illustrate an empty range, so we'll turn such ranges into
// single-character ranges, which might not be totally valid (may point
// off the end of a line, or off the end of the file) but are good
// enough for the bounds checks we do below.
if snipRange.Empty() {
snipRange.End.Byte++
snipRange.End.Column++
}
if highlightRange.Empty() {
highlightRange.End.Byte++
highlightRange.End.Column++
}
file := w.files[diag.Subject.Filename]
if file == nil || file.Bytes == nil {
fmt.Fprintf(w.wr, " on %s line %d:\n (source code not available)\n\n", diag.Subject.Filename, diag.Subject.Start.Line)
} else {
var contextLine string
if diag.Subject != nil {
contextLine = contextString(file, diag.Subject.Start.Byte)
if contextLine != "" {
contextLine = ", in " + contextLine
}
}
fmt.Fprintf(w.wr, " on %s line %d%s:\n", diag.Subject.Filename, diag.Subject.Start.Line, contextLine)
src := file.Bytes
sc := NewRangeScanner(src, diag.Subject.Filename, bufio.ScanLines)
for sc.Scan() {
lineRange := sc.Range()
if !lineRange.Overlaps(snipRange) {
continue
}
beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange)
if highlightedRange.Empty() {
fmt.Fprintf(w.wr, "%4d: %s\n", lineRange.Start.Line, sc.Bytes())
} else {
before := beforeRange.SliceBytes(src)
highlighted := highlightedRange.SliceBytes(src)
after := afterRange.SliceBytes(src)
fmt.Fprintf(
w.wr, "%4d: %s%s%s%s%s\n",
lineRange.Start.Line,
before,
highlightCode, highlighted, resetCode,
after,
)
}
}
w.wr.Write([]byte{'\n'})
}
if diag.Expression != nil && diag.EvalContext != nil {
// We will attempt to render the values for any variables
// referenced in the given expression as additional context, for
// situations where the same expression is evaluated multiple
// times in different scopes.
expr := diag.Expression
ctx := diag.EvalContext
vars := expr.Variables()
stmts := make([]string, 0, len(vars))
seen := make(map[string]struct{}, len(vars))
for _, traversal := range vars {
val, diags := traversal.TraverseAbs(ctx)
if diags.HasErrors() {
// Skip anything that generates errors, since we probably
// already have the same error in our diagnostics set
// already.
continue
}
traversalStr := w.traversalStr(traversal)
if _, exists := seen[traversalStr]; exists {
continue // don't show duplicates when the same variable is referenced multiple times
}
switch {
case !val.IsKnown():
// Can't say anything about this yet, then.
continue
case val.IsNull():
stmts = append(stmts, fmt.Sprintf("%s set to null", traversalStr))
default:
stmts = append(stmts, fmt.Sprintf("%s as %s", traversalStr, w.valueStr(val)))
}
seen[traversalStr] = struct{}{}
}
sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly?
last := len(stmts) - 1
for i, stmt := range stmts {
switch i {
case 0:
w.wr.Write([]byte{'w', 'i', 't', 'h', ' '})
default:
w.wr.Write([]byte{' ', ' ', ' ', ' ', ' '})
}
w.wr.Write([]byte(stmt))
switch i {
case last:
w.wr.Write([]byte{'.', '\n', '\n'})
default:
w.wr.Write([]byte{',', '\n'})
}
}
}
}
if diag.Detail != "" {
detail := diag.Detail
if w.width != 0 {
detail = wordwrap.WrapString(detail, w.width)
}
fmt.Fprintf(w.wr, "%s\n\n", detail)
}
return nil
}
func (w *diagnosticTextWriter) WriteDiagnostics(diags Diagnostics) error {
for _, diag := range diags {
err := w.WriteDiagnostic(diag)
if err != nil {
return err
}
}
return nil
}
func (w *diagnosticTextWriter) traversalStr(traversal Traversal) string {
// This is a specialized subset of traversal rendering tailored to
// producing helpful contextual messages in diagnostics. It is not
// comprehensive nor intended to be used for other purposes.
var buf bytes.Buffer
for _, step := range traversal {
switch tStep := step.(type) {
case TraverseRoot:
buf.WriteString(tStep.Name)
case TraverseAttr:
buf.WriteByte('.')
buf.WriteString(tStep.Name)
case TraverseIndex:
buf.WriteByte('[')
if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() {
buf.WriteString(w.valueStr(tStep.Key))
} else {
// We'll just use a placeholder for more complex values,
// since otherwise our result could grow ridiculously long.
buf.WriteString("...")
}
buf.WriteByte(']')
}
}
return buf.String()
}
func (w *diagnosticTextWriter) valueStr(val cty.Value) string {
// This is a specialized subset of value rendering tailored to producing
// helpful but concise messages in diagnostics. It is not comprehensive
// nor intended to be used for other purposes.
ty := val.Type()
switch {
case val.IsNull():
return "null"
case !val.IsKnown():
// Should never happen here because we should filter before we get
// in here, but we'll do something reasonable rather than panic.
return "(not yet known)"
case ty == cty.Bool:
if val.True() {
return "true"
}
return "false"
case ty == cty.Number:
bf := val.AsBigFloat()
return bf.Text('g', 10)
case ty == cty.String:
// Go string syntax is not exactly the same as HCL native string syntax,
// but we'll accept the minor edge-cases where this is different here
// for now, just to get something reasonable here.
return fmt.Sprintf("%q", val.AsString())
case ty.IsCollectionType() || ty.IsTupleType():
l := val.LengthInt()
switch l {
case 0:
return "empty " + ty.FriendlyName()
case 1:
return ty.FriendlyName() + " with 1 element"
default:
return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l)
}
case ty.IsObjectType():
atys := ty.AttributeTypes()
l := len(atys)
switch l {
case 0:
return "object with no attributes"
case 1:
var name string
for k := range atys {
name = k
}
return fmt.Sprintf("object with 1 attribute %q", name)
default:
return fmt.Sprintf("object with %d attributes", l)
}
default:
return ty.FriendlyName()
}
}
func contextString(file *File, offset int) string {
type contextStringer interface {
ContextString(offset int) string
}
if cser, ok := file.Nav.(contextStringer); ok {
return cser.ContextString(offset)
}
return ""
}
| vendor/github.com/hashicorp/hcl/v2/diagnostic_text.go | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.0028085820376873016,
0.0003870292566716671,
0.00016411018441431224,
0.00017412318265996873,
0.0006026193732395768
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.