hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 10,
"code_window": [
"\t\t\t\tif err := json.Unmarshal(block.JsonTable, jsonTable); err != nil {\n",
"\t\t\t\t\treturn errors.Trace(err)\n",
"\t\t\t\t}\n",
"\t\t\t\tselect {\n",
"\t\t\t\tcase <-ectx.Done():\n",
"\t\t\t\t\treturn nil\n",
"\t\t\t\tcase taskCh <- &statstypes.PartitionStatisticLoadTask{\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t// reset the block.JsonTable to nil to make it garbage collected as soon as possible\n",
"\t\t\t\tblock.JsonTable = nil\n",
"\n"
],
"file_path": "br/pkg/metautil/statsfile.go",
"type": "add",
"edit_start_line_idx": 227
} | // Copyright 2024 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metautil
import (
"bytes"
"context"
"crypto/sha256"
"encoding/json"
"fmt"
"github.com/gogo/protobuf/proto"
"github.com/pingcap/errors"
backuppb "github.com/pingcap/kvproto/pkg/brpb"
berrors "github.com/pingcap/tidb/br/pkg/errors"
"github.com/pingcap/tidb/br/pkg/storage"
"github.com/pingcap/tidb/br/pkg/utils"
"github.com/pingcap/tidb/pkg/parser/model"
"github.com/pingcap/tidb/pkg/statistics/handle"
statstypes "github.com/pingcap/tidb/pkg/statistics/handle/types"
statsutil "github.com/pingcap/tidb/pkg/statistics/handle/util"
"golang.org/x/sync/errgroup"
)
var maxStatsJsonTableSize = 32 * 1024 * 1024 // 32 MiB
var inlineSize = 8 * 1024 // 8 KiB
func getStatsFileName(physicalID int64) string {
return fmt.Sprintf("backupmeta.schema.stats.%09d", physicalID)
}
// A lightweight function wrapper to dump the statistic
type StatsWriter struct {
storage storage.ExternalStorage
cipher *backuppb.CipherInfo
// final stats file indexes
statsFileIndexes []*backuppb.StatsFileIndex
// temporary variables, clear after each flush
totalSize int
statsFile *backuppb.StatsFile
}
func newStatsWriter(
storage storage.ExternalStorage,
cipher *backuppb.CipherInfo,
) *StatsWriter {
return &StatsWriter{
storage: storage,
cipher: cipher,
statsFileIndexes: make([]*backuppb.StatsFileIndex, 0),
totalSize: 0,
statsFile: &backuppb.StatsFile{
Blocks: make([]*backuppb.StatsBlock, 0, 8),
},
}
}
func (s *StatsWriter) clearTemporary() {
// clear the temporary variables
s.totalSize = 0
s.statsFile = &backuppb.StatsFile{
Blocks: make([]*backuppb.StatsBlock, 0, 8),
}
}
func (s *StatsWriter) writeStatsFileAndClear(ctx context.Context, physicalID int64) error {
fileName := getStatsFileName(physicalID)
content, err := proto.Marshal(s.statsFile)
if err != nil {
return errors.Trace(err)
}
if len(s.statsFileIndexes) == 0 && len(content) < inlineSize {
s.statsFileIndexes = append(s.statsFileIndexes, &backuppb.StatsFileIndex{InlineData: content})
return nil
}
checksum := sha256.Sum256(content)
encryptedContent, iv, err := Encrypt(content, s.cipher)
if err != nil {
return errors.Trace(err)
}
if err := s.storage.WriteFile(ctx, fileName, encryptedContent); err != nil {
return errors.Trace(err)
}
s.statsFileIndexes = append(s.statsFileIndexes, &backuppb.StatsFileIndex{
Name: fileName,
Sha256: checksum[:],
SizeEnc: uint64(len(encryptedContent)),
SizeOri: uint64(len(content)),
CipherIv: iv,
})
s.clearTemporary()
return nil
}
func (s *StatsWriter) BackupStats(ctx context.Context, jsonTable *statsutil.JSONTable, physicalID int64) error {
if jsonTable == nil {
return nil
}
statsBytes, err := json.Marshal(jsonTable)
if err != nil {
return errors.Trace(err)
}
s.totalSize += len(statsBytes)
s.statsFile.Blocks = append(s.statsFile.Blocks, &backuppb.StatsBlock{
PhysicalId: physicalID,
JsonTable: statsBytes,
})
// check whether need to flush
if s.totalSize > maxStatsJsonTableSize {
if err := s.writeStatsFileAndClear(ctx, physicalID); err != nil {
return errors.Trace(err)
}
}
return nil
}
func (s *StatsWriter) BackupStatsDone(ctx context.Context) ([]*backuppb.StatsFileIndex, error) {
if s.totalSize == 0 || len(s.statsFile.Blocks) == 0 {
return s.statsFileIndexes, nil
}
if err := s.writeStatsFileAndClear(ctx, s.statsFile.Blocks[0].PhysicalId); err != nil {
return nil, errors.Trace(err)
}
return s.statsFileIndexes, nil
}
func RestoreStats(
ctx context.Context,
storage storage.ExternalStorage,
cipher *backuppb.CipherInfo,
statsHandler *handle.Handle,
newTableInfo *model.TableInfo,
statsFileIndexes []*backuppb.StatsFileIndex,
rewriteIDMap map[int64]int64,
) error {
eg, ectx := errgroup.WithContext(ctx)
taskCh := make(chan *statstypes.PartitionStatisticLoadTask, 8)
eg.Go(func() error {
return downloadStats(ectx, storage, cipher, statsFileIndexes, rewriteIDMap, taskCh)
})
eg.Go(func() error {
// NOTICE: skip updating cache after load stats from json
return statsHandler.LoadStatsFromJSONConcurrently(ectx, newTableInfo, taskCh, 0)
})
return eg.Wait()
}
func downloadStats(
ctx context.Context,
storage storage.ExternalStorage,
cipher *backuppb.CipherInfo,
statsFileIndexes []*backuppb.StatsFileIndex,
rewriteIDMap map[int64]int64,
taskCh chan<- *statstypes.PartitionStatisticLoadTask,
) error {
defer close(taskCh)
eg, ectx := errgroup.WithContext(ctx)
downloadWorkerpool := utils.NewWorkerPool(4, "download stats for each partition")
for _, statsFileIndex := range statsFileIndexes {
if ectx.Err() != nil {
break
}
statsFile := statsFileIndex
downloadWorkerpool.ApplyOnErrorGroup(eg, func() error {
var statsContent []byte
if len(statsFile.InlineData) > 0 {
statsContent = statsFile.InlineData
} else {
content, err := storage.ReadFile(ectx, statsFile.Name)
if err != nil {
return errors.Trace(err)
}
decryptContent, err := Decrypt(content, cipher, statsFile.CipherIv)
if err != nil {
return errors.Trace(err)
}
checksum := sha256.Sum256(decryptContent)
if !bytes.Equal(statsFile.Sha256, checksum[:]) {
return berrors.ErrInvalidMetaFile.GenWithStackByArgs(fmt.Sprintf(
"checksum mismatch expect %x, got %x", statsFile.Sha256, checksum[:]))
}
statsContent = decryptContent
}
statsFileBlocks := &backuppb.StatsFile{}
if err := proto.Unmarshal(statsContent, statsFileBlocks); err != nil {
return errors.Trace(err)
}
for _, block := range statsFileBlocks.Blocks {
physicalId, ok := rewriteIDMap[block.PhysicalId]
if !ok {
return berrors.ErrRestoreInvalidRewrite.GenWithStackByArgs(fmt.Sprintf(
"not rewrite rule matched, old physical id: %d", block.PhysicalId))
}
jsonTable := &statsutil.JSONTable{}
if err := json.Unmarshal(block.JsonTable, jsonTable); err != nil {
return errors.Trace(err)
}
select {
case <-ectx.Done():
return nil
case taskCh <- &statstypes.PartitionStatisticLoadTask{
PhysicalID: physicalId,
JSONTable: jsonTable,
}:
}
}
return nil
})
}
return eg.Wait()
}
| br/pkg/metautil/statsfile.go | 1 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.995969295501709,
0.04077591001987457,
0.0001627011806704104,
0.00017417341587133706,
0.1949862539768219
] |
{
"id": 10,
"code_window": [
"\t\t\t\tif err := json.Unmarshal(block.JsonTable, jsonTable); err != nil {\n",
"\t\t\t\t\treturn errors.Trace(err)\n",
"\t\t\t\t}\n",
"\t\t\t\tselect {\n",
"\t\t\t\tcase <-ectx.Done():\n",
"\t\t\t\t\treturn nil\n",
"\t\t\t\tcase taskCh <- &statstypes.PartitionStatisticLoadTask{\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t// reset the block.JsonTable to nil to make it garbage collected as soon as possible\n",
"\t\t\t\tblock.JsonTable = nil\n",
"\n"
],
"file_path": "br/pkg/metautil/statsfile.go",
"type": "add",
"edit_start_line_idx": 227
} | // Copyright 2019-present PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lockstore
import (
"bytes"
"fmt"
"math/rand"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestMemStore(t *testing.T) {
prefix := "ls"
n := 30000
ls := NewMemStore(1 << 10)
val := ls.Get([]byte("a"), nil)
require.Len(t, val, 0)
insertMemStore(ls, prefix, "", n)
numBlocks := len(ls.getArena().blocks)
checkMemStore(t, ls, prefix, "", n)
deleteMemStore(t, ls, prefix, n)
require.Len(t, ls.getArena().blocks, numBlocks)
time.Sleep(reuseSafeDuration)
insertMemStore(ls, prefix, "", n)
// Because the height is random, we insert again, the block number may be different.
diff := len(ls.getArena().blocks) - numBlocks
require.True(t, diff < numBlocks/100)
require.Len(t, ls.Get(numToKey(n), nil), 0)
require.Len(t, ls.Get([]byte("abc"), nil), 0)
}
const keyFormat = "%s%020d"
func insertMemStore(ls *MemStore, prefix, valPrefix string, n int) *MemStore {
perms := rand.Perm(n)
hint := new(Hint)
for _, v := range perms {
keyStr := fmt.Sprintf(keyFormat, prefix, v)
key := []byte(keyStr)
val := []byte(valPrefix + keyStr)
ls.PutWithHint(key, val, hint)
}
return ls
}
func checkMemStore(t *testing.T, ls *MemStore, prefix, valPrefix string, n int) {
perms := rand.Perm(n)
for _, v := range perms {
key := []byte(fmt.Sprintf(keyFormat, prefix, v))
val := ls.Get(key, nil)
require.True(t, bytes.Equal(val[:len(valPrefix)], []byte(valPrefix)))
require.True(t, bytes.Equal(key, val[len(valPrefix):]))
}
}
func deleteMemStore(t *testing.T, ls *MemStore, prefix string, n int) {
perms := rand.Perm(n)
for _, v := range perms {
key := []byte(fmt.Sprintf(keyFormat, prefix, v))
require.True(t, ls.Delete(key))
}
}
func TestIterator(t *testing.T) {
_ = checkKey
t.Skip("Skip this unstable test(#26235) and bring it back before 2021-07-29.")
ls := NewMemStore(1 << 10)
hint := new(Hint)
for i := 10; i < 1000; i += 10 {
key := []byte(fmt.Sprintf(keyFormat, "ls", i))
ls.PutWithHint(key, bytes.Repeat(key, 10), hint)
}
require.Len(t, ls.getArena().blocks, 33)
it := ls.NewIterator()
it.SeekToFirst()
checkKey(t, it, 10)
it.Next()
checkKey(t, it, 20)
it.SeekToFirst()
checkKey(t, it, 10)
it.SeekToLast()
checkKey(t, it, 990)
it.Seek(numToKey(11))
checkKey(t, it, 20)
it.Seek(numToKey(989))
checkKey(t, it, 990)
it.Seek(numToKey(0))
checkKey(t, it, 10)
it.Seek(numToKey(2000))
require.False(t, it.Valid())
it.Seek(numToKey(500))
checkKey(t, it, 500)
it.Prev()
checkKey(t, it, 490)
it.SeekForPrev(numToKey(100))
checkKey(t, it, 100)
it.SeekForPrev(numToKey(99))
checkKey(t, it, 90)
it.SeekForPrev(numToKey(2000))
checkKey(t, it, 990)
}
func checkKey(t *testing.T, it *Iterator, n int) {
require.True(t, it.Valid())
require.True(t, bytes.Equal(it.Key(), []byte(fmt.Sprintf(keyFormat, "ls", n))))
require.True(t, bytes.Equal(it.Value(), bytes.Repeat(it.Key(), 10)))
}
func numToKey(n int) []byte {
return []byte(fmt.Sprintf(keyFormat, "ls", n))
}
func TestReplace(t *testing.T) {
prefix := "ls"
n := 30000
ls := NewMemStore(1 << 10)
insertMemStore(ls, prefix, "old", n)
checkMemStore(t, ls, prefix, "old", n)
insertMemStore(ls, prefix, "new", n)
checkMemStore(t, ls, prefix, "new", n)
}
func TestMemStoreConcurrent(t *testing.T) {
keyRange := 10
concurrentKeys := make([][]byte, keyRange)
for i := 0; i < keyRange; i++ {
concurrentKeys[i] = numToKey(i)
}
lock := sync.RWMutex{}
ls := NewMemStore(1 << 20)
// Starts 10 readers and 1 writer.
closeCh := make(chan bool)
wg := new(sync.WaitGroup)
wg.Add(keyRange)
for i := 0; i < keyRange; i++ {
go runReader(ls, &lock, closeCh, i, wg)
}
ran := rand.New(rand.NewSource(time.Now().Unix()))
start := time.Now()
var totalInsert, totalDelete int
hint := new(Hint)
for {
if totalInsert%128 == 0 && time.Since(start) > time.Second*10 {
break
}
n := ran.Intn(keyRange)
key := concurrentKeys[n]
lock.Lock()
if ls.PutWithHint(key, key, hint) {
totalInsert++
}
lock.Unlock()
n = ran.Intn(keyRange)
key = concurrentKeys[n]
lock.Lock()
if ls.DeleteWithHint(key, hint) {
totalDelete++
}
lock.Unlock()
}
close(closeCh)
wg.Wait()
arena := ls.getArena()
fmt.Println("total insert", totalInsert, "total delete", totalDelete)
fmt.Println(len(arena.pendingBlocks), len(arena.writableQueue), len(arena.blocks))
}
func runReader(ls *MemStore, lock *sync.RWMutex, closeCh chan bool, i int, wg *sync.WaitGroup) {
defer wg.Done()
key := numToKey(i)
buf := make([]byte, 100)
var n int
for {
n++
if n%128 == 0 {
select {
case <-closeCh:
fmt.Println("read", n)
return
default:
}
}
lock.RLock()
result := ls.Get(key, buf)
lock.RUnlock()
if len(result) > 0 && !bytes.Equal(key, result) {
panic("data corruption")
}
}
}
func BenchmarkMemStoreDeleteInsertGet(b *testing.B) {
ls := NewMemStore(1 << 23)
keys := make([][]byte, 10000)
for i := 0; i < 10000; i++ {
keys[i] = numToKey(i)
ls.Put(keys[i], keys[i])
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
buf := make([]byte, 100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
n := r.Intn(10000)
ls.Delete(keys[n])
ls.Put(keys[n], keys[n])
ls.Get(keys[n], buf)
}
}
func BenchmarkMemStoreIterate(b *testing.B) {
ls := NewMemStore(1 << 23)
keys := make([][]byte, 10000)
for i := 0; i < 10000; i++ {
keys[i] = numToKey(i)
ls.Put(keys[i], keys[i])
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
it := ls.NewIterator()
it.SeekToFirst()
for it.Valid() {
it.Next()
}
}
}
func BenchmarkPutWithHint(b *testing.B) {
ls := NewMemStore(1 << 20)
numKeys := 100000
keys := make([][]byte, numKeys)
hint := new(Hint)
for i := 0; i < numKeys; i++ {
keys[i] = numToKey(i)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
idx := i % numKeys
ls.PutWithHint(keys[idx], keys[idx], hint)
}
}
func BenchmarkPut(b *testing.B) {
ls := NewMemStore(1 << 20)
numKeys := 100000
keys := make([][]byte, numKeys)
for i := 0; i < numKeys; i++ {
keys[i] = numToKey(i)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
idx := i % numKeys
ls.Put(keys[idx], keys[idx])
}
}
| pkg/store/mockstore/unistore/lockstore/lockstore_test.go | 0 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.00021550594829022884,
0.00017421423399355263,
0.00016363921167794615,
0.0001729401119519025,
0.000008693758900335524
] |
{
"id": 10,
"code_window": [
"\t\t\t\tif err := json.Unmarshal(block.JsonTable, jsonTable); err != nil {\n",
"\t\t\t\t\treturn errors.Trace(err)\n",
"\t\t\t\t}\n",
"\t\t\t\tselect {\n",
"\t\t\t\tcase <-ectx.Done():\n",
"\t\t\t\t\treturn nil\n",
"\t\t\t\tcase taskCh <- &statstypes.PartitionStatisticLoadTask{\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t// reset the block.JsonTable to nil to make it garbage collected as soon as possible\n",
"\t\t\t\tblock.JsonTable = nil\n",
"\n"
],
"file_path": "br/pkg/metautil/statsfile.go",
"type": "add",
"edit_start_line_idx": 227
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package copr
import (
"context"
"math/rand"
"sort"
"strconv"
"testing"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/pkg/kv"
"github.com/pingcap/tidb/pkg/store/driver/backoff"
"github.com/pingcap/tidb/pkg/util/logutil"
"github.com/stathat/consistent"
"github.com/stretchr/testify/require"
"github.com/tikv/client-go/v2/tikv"
"go.uber.org/zap"
)
// StoreID: [1, storeCount]
func buildStoreTaskMap(storeCount int) map[uint64]*batchCopTask {
storeTasks := make(map[uint64]*batchCopTask)
for i := 0; i < storeCount; i++ {
storeTasks[uint64(i+1)] = &batchCopTask{}
}
return storeTasks
}
func buildRegionInfos(storeCount, regionCount, replicaNum int) []RegionInfo {
var ss []string
for i := 0; i < regionCount; i++ {
s := strconv.Itoa(i)
ss = append(ss, s)
}
sort.Strings(ss)
storeIDExist := func(storeID uint64, storeIDs []uint64) bool {
for _, i := range storeIDs {
if i == storeID {
return true
}
}
return false
}
randomStores := func(storeCount, replicaNum int) []uint64 {
var storeIDs []uint64
for len(storeIDs) < replicaNum {
t := uint64(rand.Intn(storeCount) + 1)
if storeIDExist(t, storeIDs) {
continue
}
storeIDs = append(storeIDs, t)
}
return storeIDs
}
var startKey string
regionInfos := make([]RegionInfo, 0, len(ss))
for i, s := range ss {
var ri RegionInfo
ri.Region = tikv.NewRegionVerID(uint64(i), 1, 1)
ri.Meta = nil
ri.AllStores = randomStores(storeCount, replicaNum)
var keyRange kv.KeyRange
if len(startKey) == 0 {
keyRange.StartKey = nil
} else {
keyRange.StartKey = kv.Key(startKey)
}
keyRange.EndKey = kv.Key(s)
ri.Ranges = NewKeyRanges([]kv.KeyRange{keyRange})
regionInfos = append(regionInfos, ri)
startKey = s
}
return regionInfos
}
func calcReginCount(tasks []*batchCopTask) int {
count := 0
for _, task := range tasks {
count += len(task.regionInfos)
}
return count
}
func TestBalanceBatchCopTaskWithContinuity(t *testing.T) {
for replicaNum := 1; replicaNum < 6; replicaNum++ {
storeCount := 10
regionCount := 100000
storeTasks := buildStoreTaskMap(storeCount)
regionInfos := buildRegionInfos(storeCount, regionCount, replicaNum)
tasks, score := balanceBatchCopTaskWithContinuity(storeTasks, regionInfos, 20)
require.True(t, isBalance(score))
require.Equal(t, regionCount, calcReginCount(tasks))
}
{
storeCount := 10
regionCount := 100
replicaNum := 2
storeTasks := buildStoreTaskMap(storeCount)
regionInfos := buildRegionInfos(storeCount, regionCount, replicaNum)
tasks, _ := balanceBatchCopTaskWithContinuity(storeTasks, regionInfos, 20)
require.True(t, tasks == nil)
}
}
func TestBalanceBatchCopTaskWithEmptyTaskSet(t *testing.T) {
{
var nilTaskSet []*batchCopTask
nilResult := balanceBatchCopTask(nil, nil, nilTaskSet, false, 0)
require.True(t, nilResult == nil)
}
{
emptyTaskSet := make([]*batchCopTask, 0)
emptyResult := balanceBatchCopTask(nil, nil, emptyTaskSet, false, 0)
require.True(t, emptyResult != nil)
require.True(t, len(emptyResult) == 0)
}
}
func TestDeepCopyStoreTaskMap(t *testing.T) {
storeTasks1 := buildStoreTaskMap(10)
for _, task := range storeTasks1 {
task.regionInfos = append(task.regionInfos, RegionInfo{})
}
storeTasks2 := deepCopyStoreTaskMap(storeTasks1)
for _, task := range storeTasks2 {
task.regionInfos = append(task.regionInfos, RegionInfo{})
}
for _, task := range storeTasks1 {
require.Equal(t, 1, len(task.regionInfos))
}
for _, task := range storeTasks2 {
require.Equal(t, 2, len(task.regionInfos))
}
}
// Make sure no duplicated ip:addr.
func generateOneAddr() string {
var ip string
for i := 0; i < 4; i++ {
if i != 0 {
ip += "."
}
ip += strconv.Itoa(rand.Intn(255))
}
return ip + ":" + strconv.Itoa(rand.Intn(65535))
}
func generateDifferentAddrs(num int) (res []string) {
addrMap := make(map[string]struct{})
for len(addrMap) < num {
addr := generateOneAddr()
if _, ok := addrMap[addr]; !ok {
addrMap[addr] = struct{}{}
}
}
for addr := range addrMap {
res = append(res, addr)
}
return
}
func TestConsistentHash(t *testing.T) {
allAddrs := generateDifferentAddrs(100)
computeNodes := allAddrs[:30]
storageNodes := allAddrs[30:]
firstRoundMap := make(map[string]string)
for round := 0; round < 100; round++ {
hasher := consistent.New()
rand.Shuffle(len(computeNodes), func(i, j int) {
computeNodes[i], computeNodes[j] = computeNodes[j], computeNodes[i]
})
for _, computeNode := range computeNodes {
hasher.Add(computeNode)
}
for _, storageNode := range storageNodes {
computeNode, err := hasher.Get(storageNode)
require.NoError(t, err)
if round == 0 {
firstRoundMap[storageNode] = computeNode
} else {
firstRoundAddr, ok := firstRoundMap[storageNode]
require.True(t, ok)
require.Equal(t, firstRoundAddr, computeNode)
}
}
}
}
func TestDispatchPolicyRR(t *testing.T) {
allAddrs := generateDifferentAddrs(100)
for i := 0; i < 100; i++ {
regCnt := rand.Intn(10000)
regIDs := make([]tikv.RegionVerID, 0, regCnt)
for i := 0; i < regCnt; i++ {
regIDs = append(regIDs, tikv.NewRegionVerID(uint64(i), 0, 0))
}
rpcCtxs, err := getTiFlashComputeRPCContextByRoundRobin(regIDs, allAddrs)
require.NoError(t, err)
require.Equal(t, len(rpcCtxs), len(regIDs))
checkMap := make(map[string]int, len(rpcCtxs))
for _, c := range rpcCtxs {
if v, ok := checkMap[c.Addr]; !ok {
checkMap[c.Addr] = 1
} else {
checkMap[c.Addr] = v + 1
}
}
actCnt := 0
for _, v := range checkMap {
actCnt += v
}
require.Equal(t, regCnt, actCnt)
if len(regIDs) < len(allAddrs) {
require.Equal(t, len(regIDs), len(checkMap))
exp := -1
for _, v := range checkMap {
if exp == -1 {
exp = v
} else {
require.Equal(t, exp, v)
}
}
} else {
// Using RR, it means region cnt for each tiflash_compute node should be almost same.
minV := regCnt
for _, v := range checkMap {
if v < minV {
minV = v
}
}
for k, v := range checkMap {
checkMap[k] = v - minV
}
for _, v := range checkMap {
require.True(t, v == 0 || v == 1)
}
}
}
}
func TestTopoFetcherBackoff(t *testing.T) {
fetchTopoBo := backoff.NewBackofferWithVars(context.Background(), fetchTopoMaxBackoff, nil)
expectErr := errors.New("Cannot find proper topo from AutoScaler")
var retryNum int
start := time.Now()
for {
retryNum++
if err := fetchTopoBo.Backoff(tikv.BoTiFlashRPC(), expectErr); err != nil {
break
}
logutil.BgLogger().Info("TestTopoFetcherBackoff", zap.Any("retryNum", retryNum))
}
dura := time.Since(start)
// fetchTopoMaxBackoff is milliseconds.
require.GreaterOrEqual(t, dura, time.Duration(fetchTopoMaxBackoff*1000))
require.GreaterOrEqual(t, dura, 30*time.Second)
require.LessOrEqual(t, dura, 50*time.Second)
}
| pkg/store/copr/batch_coprocessor_test.go | 0 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.00030783755937591195,
0.00017697442672215402,
0.00016254688671324402,
0.00017225902411155403,
0.000026004974643001333
] |
{
"id": 10,
"code_window": [
"\t\t\t\tif err := json.Unmarshal(block.JsonTable, jsonTable); err != nil {\n",
"\t\t\t\t\treturn errors.Trace(err)\n",
"\t\t\t\t}\n",
"\t\t\t\tselect {\n",
"\t\t\t\tcase <-ectx.Done():\n",
"\t\t\t\t\treturn nil\n",
"\t\t\t\tcase taskCh <- &statstypes.PartitionStatisticLoadTask{\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t// reset the block.JsonTable to nil to make it garbage collected as soon as possible\n",
"\t\t\t\tblock.JsonTable = nil\n",
"\n"
],
"file_path": "br/pkg/metautil/statsfile.go",
"type": "add",
"edit_start_line_idx": 227
} | # TestTruncateAlloc
# It tests that the auto_increment ID does not reuse the old table's allocator.
drop table if exists truncate_id;
create table truncate_id (a int primary key auto_increment);
insert truncate_id values (), (), (), (), (), (), (), (), (), ();
truncate table truncate_id;
insert truncate_id values (), (), (), (), (), (), (), (), (), ();
select a from truncate_id where a > 11;
# TestIssue19127
drop table if exists issue19127;
create table issue19127 (c_int int, c_str varchar(40), primary key (c_int, c_str) ) partition by hash (c_int) partitions 4;
insert into issue19127 values (9, 'angry williams'), (10, 'thirsty hugle');
update issue19127 set c_int = c_int + 10, c_str = 'adoring stonebraker' where c_int in (10, 9);
--sorted_result
select * from issue19127;
# TestLoadClientInteractive
select @@wait_timeout;
# TestHostLengthMax
drop user if exists 'abcddfjakldfjaldddds'@'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa';
CREATE USER 'abcddfjakldfjaldddds'@'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa';
-- error 1470
CREATE USER 'abcddfjakldfjaldddds'@'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa';
# TestDeletePanic
drop table if exists t;
create table t (c int);
insert into t values (1), (2), (3);
delete from `t` where `c` = 1;
delete from `t` where `c` = 2;
select * from t;
# TestSpecifyIndexPrefixLength
drop table if exists t;
-- error 1089
create table t (c1 char, index(c1(3)));
-- error 1089
create table t (c1 int, index(c1(3)));
-- error 1089
create table t (c1 bit(10), index(c1(3)));
create table t (c1 char, c2 int, c3 bit(10));
-- error 1089
create index idx_c1 on t (c1(3));
-- error 1089
create index idx_c1 on t (c2(3));
-- error 1089
create index idx_c1 on t (c3(3));
drop table if exists t;
-- error 1170
create table t (c1 int, c2 blob, c3 varchar(64), index(c2));
create table t (c1 int, c2 blob, c3 varchar(64));
-- error 1170
create index idx_c1 on t (c2);
-- error 1071
create index idx_c1 on t (c2(555555));
-- error 1089
create index idx_c1 on t (c1(5));
create index idx_c1 on t (c1);
create index idx_c2 on t (c2(3));
create unique index idx_c3 on t (c3(5));
insert into t values (3, 'abc', 'def');
select c2 from t where c2 = 'abc';
insert into t values (4, 'abcd', 'xxx');
insert into t values (4, 'abcf', 'yyy');
select c2 from t where c2 = 'abcf';
select c2 from t where c2 = 'abcd';
insert into t values (4, 'ignore', 'abcdeXXX');
-- error 1062
insert into t values (5, 'ignore', 'abcdeYYY');
select c3 from t where c3 = 'abcde';
delete from t where c3 = 'abcdeXXX';
delete from t where c2 = 'abc';
select c2 from t where c2 > 'abcd';
select c2 from t where c2 < 'abcf';
select c2 from t where c2 >= 'abcd';
select c2 from t where c2 <= 'abcf';
select c2 from t where c2 != 'abc';
select c2 from t where c2 != 'abcd';
drop table if exists t1;
create table t1 (a int, b char(255), key(a, b(20)));
insert into t1 values (0, '1');
update t1 set b = b + 1 where a = 0;
select b from t1 where a = 0;
drop table if exists t;
create table t (a text, b text, c int, index (a(3), b(3), c));
insert into t values ('abc', 'abcd', 1);
insert into t values ('abcx', 'abcf', 2);
insert into t values ('abcy', 'abcf', 3);
insert into t values ('bbc', 'abcd', 4);
insert into t values ('bbcz', 'abcd', 5);
insert into t values ('cbck', 'abd', 6);
select c from t where a = 'abc' and b <= 'abc';
select c from t where a = 'abc' and b <= 'abd';
select c from t where a < 'cbc' and b > 'abcd';
select c from t where a <= 'abd' and b > 'abc';
select c from t where a < 'bbcc' and b = 'abcd';
select c from t where a > 'bbcf';
# TestLastInsertID
drop table if exists t;
create table t (c1 int not null auto_increment, c2 int, PRIMARY KEY (c1));
insert into t set c2 = 11;
select last_insert_id();
insert into t (c2) values (22), (33), (44);
select last_insert_id();
insert into t (c1, c2) values (10, 55);
select last_insert_id();
replace t (c2) values(66);
select * from t;
select last_insert_id();
update t set c1=last_insert_id(c1 + 100);
select * from t;
select last_insert_id();
insert into t (c2) values (77);
select last_insert_id();
drop table t;
select last_insert_id();
create table t (c2 int, c3 int, c1 int not null auto_increment, PRIMARY KEY (c1));
insert into t set c2 = 30;
prepare stmt1 from 'insert into t (c2) values (?)';
set @v1=10;
set @v2=20;
execute stmt1 using @v1;
execute stmt1 using @v2;
deallocate prepare stmt1;
select c1 from t where c2 = 20;
| tests/integrationtest/t/session/session.test | 0 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.00017545590526424348,
0.00017103469872381538,
0.0001658427354414016,
0.00017159144044853747,
0.0000027419048365118215
] |
{
"id": 11,
"code_window": [
"\tserverMemLimitBeforeAdjust atomicutil.Uint64\n",
"\tpercentageBeforeAdjust atomicutil.Float64\n",
"\tnextGCTriggeredByMemoryLimit atomicutil.Bool\n",
"}\n",
"\n",
"// fallbackPercentage indicates the fallback memory limit percentage when turning.\n",
"const fallbackPercentage float64 = 1.1\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\t// The flag to disable memory limit adjust. There might be many tasks need to activate it in future,\n",
"\t// so it is integer type.\n",
"\tadjustDisabled atomicutil.Int64\n"
],
"file_path": "pkg/util/gctuner/memory_limit_tuner.go",
"type": "add",
"edit_start_line_idx": 41
} | // Copyright 2024 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metautil
import (
"bytes"
"context"
"crypto/sha256"
"encoding/json"
"fmt"
"github.com/gogo/protobuf/proto"
"github.com/pingcap/errors"
backuppb "github.com/pingcap/kvproto/pkg/brpb"
berrors "github.com/pingcap/tidb/br/pkg/errors"
"github.com/pingcap/tidb/br/pkg/storage"
"github.com/pingcap/tidb/br/pkg/utils"
"github.com/pingcap/tidb/pkg/parser/model"
"github.com/pingcap/tidb/pkg/statistics/handle"
statstypes "github.com/pingcap/tidb/pkg/statistics/handle/types"
statsutil "github.com/pingcap/tidb/pkg/statistics/handle/util"
"golang.org/x/sync/errgroup"
)
var maxStatsJsonTableSize = 32 * 1024 * 1024 // 32 MiB
var inlineSize = 8 * 1024 // 8 KiB
func getStatsFileName(physicalID int64) string {
return fmt.Sprintf("backupmeta.schema.stats.%09d", physicalID)
}
// A lightweight function wrapper to dump the statistic
type StatsWriter struct {
storage storage.ExternalStorage
cipher *backuppb.CipherInfo
// final stats file indexes
statsFileIndexes []*backuppb.StatsFileIndex
// temporary variables, clear after each flush
totalSize int
statsFile *backuppb.StatsFile
}
func newStatsWriter(
storage storage.ExternalStorage,
cipher *backuppb.CipherInfo,
) *StatsWriter {
return &StatsWriter{
storage: storage,
cipher: cipher,
statsFileIndexes: make([]*backuppb.StatsFileIndex, 0),
totalSize: 0,
statsFile: &backuppb.StatsFile{
Blocks: make([]*backuppb.StatsBlock, 0, 8),
},
}
}
func (s *StatsWriter) clearTemporary() {
// clear the temporary variables
s.totalSize = 0
s.statsFile = &backuppb.StatsFile{
Blocks: make([]*backuppb.StatsBlock, 0, 8),
}
}
func (s *StatsWriter) writeStatsFileAndClear(ctx context.Context, physicalID int64) error {
fileName := getStatsFileName(physicalID)
content, err := proto.Marshal(s.statsFile)
if err != nil {
return errors.Trace(err)
}
if len(s.statsFileIndexes) == 0 && len(content) < inlineSize {
s.statsFileIndexes = append(s.statsFileIndexes, &backuppb.StatsFileIndex{InlineData: content})
return nil
}
checksum := sha256.Sum256(content)
encryptedContent, iv, err := Encrypt(content, s.cipher)
if err != nil {
return errors.Trace(err)
}
if err := s.storage.WriteFile(ctx, fileName, encryptedContent); err != nil {
return errors.Trace(err)
}
s.statsFileIndexes = append(s.statsFileIndexes, &backuppb.StatsFileIndex{
Name: fileName,
Sha256: checksum[:],
SizeEnc: uint64(len(encryptedContent)),
SizeOri: uint64(len(content)),
CipherIv: iv,
})
s.clearTemporary()
return nil
}
func (s *StatsWriter) BackupStats(ctx context.Context, jsonTable *statsutil.JSONTable, physicalID int64) error {
if jsonTable == nil {
return nil
}
statsBytes, err := json.Marshal(jsonTable)
if err != nil {
return errors.Trace(err)
}
s.totalSize += len(statsBytes)
s.statsFile.Blocks = append(s.statsFile.Blocks, &backuppb.StatsBlock{
PhysicalId: physicalID,
JsonTable: statsBytes,
})
// check whether need to flush
if s.totalSize > maxStatsJsonTableSize {
if err := s.writeStatsFileAndClear(ctx, physicalID); err != nil {
return errors.Trace(err)
}
}
return nil
}
func (s *StatsWriter) BackupStatsDone(ctx context.Context) ([]*backuppb.StatsFileIndex, error) {
if s.totalSize == 0 || len(s.statsFile.Blocks) == 0 {
return s.statsFileIndexes, nil
}
if err := s.writeStatsFileAndClear(ctx, s.statsFile.Blocks[0].PhysicalId); err != nil {
return nil, errors.Trace(err)
}
return s.statsFileIndexes, nil
}
func RestoreStats(
ctx context.Context,
storage storage.ExternalStorage,
cipher *backuppb.CipherInfo,
statsHandler *handle.Handle,
newTableInfo *model.TableInfo,
statsFileIndexes []*backuppb.StatsFileIndex,
rewriteIDMap map[int64]int64,
) error {
eg, ectx := errgroup.WithContext(ctx)
taskCh := make(chan *statstypes.PartitionStatisticLoadTask, 8)
eg.Go(func() error {
return downloadStats(ectx, storage, cipher, statsFileIndexes, rewriteIDMap, taskCh)
})
eg.Go(func() error {
// NOTICE: skip updating cache after load stats from json
return statsHandler.LoadStatsFromJSONConcurrently(ectx, newTableInfo, taskCh, 0)
})
return eg.Wait()
}
func downloadStats(
ctx context.Context,
storage storage.ExternalStorage,
cipher *backuppb.CipherInfo,
statsFileIndexes []*backuppb.StatsFileIndex,
rewriteIDMap map[int64]int64,
taskCh chan<- *statstypes.PartitionStatisticLoadTask,
) error {
defer close(taskCh)
eg, ectx := errgroup.WithContext(ctx)
downloadWorkerpool := utils.NewWorkerPool(4, "download stats for each partition")
for _, statsFileIndex := range statsFileIndexes {
if ectx.Err() != nil {
break
}
statsFile := statsFileIndex
downloadWorkerpool.ApplyOnErrorGroup(eg, func() error {
var statsContent []byte
if len(statsFile.InlineData) > 0 {
statsContent = statsFile.InlineData
} else {
content, err := storage.ReadFile(ectx, statsFile.Name)
if err != nil {
return errors.Trace(err)
}
decryptContent, err := Decrypt(content, cipher, statsFile.CipherIv)
if err != nil {
return errors.Trace(err)
}
checksum := sha256.Sum256(decryptContent)
if !bytes.Equal(statsFile.Sha256, checksum[:]) {
return berrors.ErrInvalidMetaFile.GenWithStackByArgs(fmt.Sprintf(
"checksum mismatch expect %x, got %x", statsFile.Sha256, checksum[:]))
}
statsContent = decryptContent
}
statsFileBlocks := &backuppb.StatsFile{}
if err := proto.Unmarshal(statsContent, statsFileBlocks); err != nil {
return errors.Trace(err)
}
for _, block := range statsFileBlocks.Blocks {
physicalId, ok := rewriteIDMap[block.PhysicalId]
if !ok {
return berrors.ErrRestoreInvalidRewrite.GenWithStackByArgs(fmt.Sprintf(
"not rewrite rule matched, old physical id: %d", block.PhysicalId))
}
jsonTable := &statsutil.JSONTable{}
if err := json.Unmarshal(block.JsonTable, jsonTable); err != nil {
return errors.Trace(err)
}
select {
case <-ectx.Done():
return nil
case taskCh <- &statstypes.PartitionStatisticLoadTask{
PhysicalID: physicalId,
JSONTable: jsonTable,
}:
}
}
return nil
})
}
return eg.Wait()
}
| br/pkg/metautil/statsfile.go | 1 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.0004231610510032624,
0.00018107547657564282,
0.00016617024084553123,
0.00017078383825719357,
0.00004948965215589851
] |
{
"id": 11,
"code_window": [
"\tserverMemLimitBeforeAdjust atomicutil.Uint64\n",
"\tpercentageBeforeAdjust atomicutil.Float64\n",
"\tnextGCTriggeredByMemoryLimit atomicutil.Bool\n",
"}\n",
"\n",
"// fallbackPercentage indicates the fallback memory limit percentage when turning.\n",
"const fallbackPercentage float64 = 1.1\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\t// The flag to disable memory limit adjust. There might be many tasks need to activate it in future,\n",
"\t// so it is integer type.\n",
"\tadjustDisabled atomicutil.Int64\n"
],
"file_path": "pkg/util/gctuner/memory_limit_tuner.go",
"type": "add",
"edit_start_line_idx": 41
} | // Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jointest
import (
"testing"
"github.com/pingcap/tidb/pkg/config"
"github.com/pingcap/tidb/pkg/meta/autoid"
"github.com/tikv/client-go/v2/tikv"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
autoid.SetStep(5000)
config.UpdateGlobal(func(conf *config.Config) {
conf.Instance.SlowThreshold = 30000 // 30s
conf.TiKVClient.AsyncCommit.SafeWindow = 0
conf.TiKVClient.AsyncCommit.AllowedClockDrift = 0
conf.Experimental.AllowsExpressionIndex = true
})
tikv.EnableFailpoints()
opts := []goleak.Option{
goleak.IgnoreTopFunction("github.com/golang/glog.(*fileSink).flushDaemon"),
goleak.IgnoreTopFunction("github.com/bazelbuild/rules_go/go/tools/bzltestutil.RegisterTimeoutHandler.func1"),
goleak.IgnoreTopFunction("github.com/lestrrat-go/httprc.runFetchWorker"),
goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"),
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"),
goleak.IgnoreTopFunction("gopkg.in/natefinch/lumberjack%2ev2.(*Logger).millRun"),
goleak.IgnoreTopFunction("github.com/tikv/client-go/v2/txnkv/transaction.keepAlive"),
}
goleak.VerifyTestMain(m, opts...)
}
| pkg/executor/test/jointest/main_test.go | 0 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.00017762619245331734,
0.00017496950749773532,
0.00017076055519282818,
0.0001754264667397365,
0.00000254819133260753
] |
{
"id": 11,
"code_window": [
"\tserverMemLimitBeforeAdjust atomicutil.Uint64\n",
"\tpercentageBeforeAdjust atomicutil.Float64\n",
"\tnextGCTriggeredByMemoryLimit atomicutil.Bool\n",
"}\n",
"\n",
"// fallbackPercentage indicates the fallback memory limit percentage when turning.\n",
"const fallbackPercentage float64 = 1.1\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\t// The flag to disable memory limit adjust. There might be many tasks need to activate it in future,\n",
"\t// so it is integer type.\n",
"\tadjustDisabled atomicutil.Int64\n"
],
"file_path": "pkg/util/gctuner/memory_limit_tuner.go",
"type": "add",
"edit_start_line_idx": 41
} | #!/bin/bash
#
# Copyright 2023 PingCAP, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -eu
cur=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
. $cur/../_utils/run_services
PD_CONFIG=${PD_CONFIG:-"$cur/../config/pd.toml"}
TIDB_CONFIG=${TIDB_CONFIG:-"$cur/../config/tidb.toml"}
bin/pd-server --join "https://$PD_ADDR" \
--client-urls "https://${PD_ADDR}2" \
--peer-urls "https://${PD_PEER_ADDR}2" \
--log-file "$TEST_DIR/pd2.log" \
--data-dir "$TEST_DIR/pd2" \
--name pd2 \
--config $PD_CONFIG &
# strange that new PD can't join too quickly
sleep 10
bin/pd-server --join "https://$PD_ADDR" \
--client-urls "https://${PD_ADDR}3" \
--peer-urls "https://${PD_PEER_ADDR}3" \
--log-file "$TEST_DIR/pd3.log" \
--data-dir "$TEST_DIR/pd3" \
--name pd3 \
--config $PD_CONFIG &
# restart TiDB to let TiDB load new PD nodes
killall tidb-server
# wait for TiDB to exit to release file lock
sleep 5
start_tidb
export GO_FAILPOINTS='github.com/pingcap/tidb/br/pkg/lightning/importer/beforeRun=sleep(60000)'
run_lightning --backend local --enable-checkpoint=0 --pd-urls '127.0.0.1:9999,127.0.0.1:2379' &
lightning_pid=$!
# in many libraries, etcd client's auto-sync-interval is 30s, so we need to wait at least 30s before kill PD leader
sleep 45
kill $(cat /tmp/backup_restore_test/pd_pid.txt)
# Check that everything is correctly imported
wait $lightning_pid
run_sql 'SELECT count(*), sum(c) FROM cpeng.a'
check_contains 'count(*): 4'
check_contains 'sum(c): 10'
run_sql 'SELECT count(*), sum(c) FROM cpeng.b'
check_contains 'count(*): 4'
check_contains 'sum(c): 46'
restart_services
| br/tests/lightning_pd_leader_switch/run.sh | 0 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.00017804722301661968,
0.00017173429660033435,
0.0001644431904423982,
0.00017206367920152843,
0.00000459749981018831
] |
{
"id": 11,
"code_window": [
"\tserverMemLimitBeforeAdjust atomicutil.Uint64\n",
"\tpercentageBeforeAdjust atomicutil.Float64\n",
"\tnextGCTriggeredByMemoryLimit atomicutil.Bool\n",
"}\n",
"\n",
"// fallbackPercentage indicates the fallback memory limit percentage when turning.\n",
"const fallbackPercentage float64 = 1.1\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\t// The flag to disable memory limit adjust. There might be many tasks need to activate it in future,\n",
"\t// so it is integer type.\n",
"\tadjustDisabled atomicutil.Int64\n"
],
"file_path": "pkg/util/gctuner/memory_limit_tuner.go",
"type": "add",
"edit_start_line_idx": 41
} | // Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv_test
import (
"context"
"fmt"
"sort"
"sync"
"testing"
"github.com/coreos/go-semver/semver"
"github.com/pingcap/kvproto/pkg/import_sstpb"
"github.com/pingcap/kvproto/pkg/metapb"
kv "github.com/pingcap/tidb/br/pkg/lightning/tikv"
"github.com/stretchr/testify/require"
pdhttp "github.com/tikv/pd/client/http"
)
var (
// Samples from importer backend for testing the Check***Version functions.
// No need keep these versions in sync.
requiredMinPDVersion = *semver.New("2.1.0")
requiredMinTiKVVersion = *semver.New("2.1.0")
requiredMaxPDVersion = *semver.New("6.0.0")
requiredMaxTiKVVersion = *semver.New("6.0.0")
)
type mockGetStoresCli struct {
pdhttp.Client
storesInfo *pdhttp.StoresInfo
}
func (c mockGetStoresCli) GetStores(context.Context) (*pdhttp.StoresInfo, error) {
return c.storesInfo, nil
}
func TestForAllStores(t *testing.T) {
cli := mockGetStoresCli{}
cli.storesInfo = &pdhttp.StoresInfo{
Count: 3,
Stores: []pdhttp.StoreInfo{
{
Store: pdhttp.MetaStore{
ID: 1,
Address: "127.0.0.1:20160",
Version: "3.0.0-beta.1",
State: int64(metapb.StoreState_Up),
},
},
{
Store: pdhttp.MetaStore{
ID: 5,
Address: "127.0.0.1:20164",
Version: "3.0.1",
State: int64(metapb.StoreState_Offline),
},
},
{
Store: pdhttp.MetaStore{
ID: 4,
Address: "127.0.0.1:20163",
Version: "3.0.0",
State: int64(metapb.StoreState_Tombstone),
},
},
},
}
ctx := context.Background()
var (
allStoresLock sync.Mutex
allStores []*pdhttp.MetaStore
)
err := kv.ForAllStores(ctx, cli, metapb.StoreState_Offline, func(c2 context.Context, store *pdhttp.MetaStore) error {
allStoresLock.Lock()
allStores = append(allStores, store)
allStoresLock.Unlock()
return nil
})
require.NoError(t, err)
sort.Slice(allStores, func(i, j int) bool { return allStores[i].Address < allStores[j].Address })
require.Equal(t, []*pdhttp.MetaStore{
{
ID: 1,
Address: "127.0.0.1:20160",
Version: "3.0.0-beta.1",
State: int64(metapb.StoreState_Up),
},
{
ID: 5,
Address: "127.0.0.1:20164",
Version: "3.0.1",
State: int64(metapb.StoreState_Offline),
},
}, allStores)
}
func TestFetchModeFromMetrics(t *testing.T) {
testCases := []struct {
metrics string
mode import_sstpb.SwitchMode
isErr bool
}{
{
metrics: `tikv_config_rocksdb{cf="default",name="hard_pending_compaction_bytes_limit"} 274877906944`,
mode: import_sstpb.SwitchMode_Normal,
},
{
metrics: `tikv_config_rocksdb{cf="default",name="hard_pending_compaction_bytes_limit"} 0`,
mode: import_sstpb.SwitchMode_Import,
},
{
metrics: ``,
isErr: true,
},
}
for _, tc := range testCases {
comment := fmt.Sprintf("test case '%s'", tc.metrics)
mode, err := kv.FetchModeFromMetrics(tc.metrics)
if tc.isErr {
require.Error(t, err, comment)
} else {
require.NoError(t, err, comment)
require.Equal(t, tc.mode, mode, comment)
}
}
}
type mockGetPDVersionCli struct {
pdhttp.Client
version string
}
func (c mockGetPDVersionCli) GetPDVersion(context.Context) (string, error) {
return c.version, nil
}
func TestCheckPDVersion(t *testing.T) {
ctx := context.Background()
cli := mockGetPDVersionCli{}
cli.version = "v4.0.0-rc.2-451-g760fb650"
require.NoError(t, kv.CheckPDVersion(ctx, cli, requiredMinPDVersion, requiredMaxPDVersion))
cli.version = "v4.0.0"
require.NoError(t, kv.CheckPDVersion(ctx, cli, requiredMinPDVersion, requiredMaxPDVersion))
cli.version = "v9999.0.0"
err := kv.CheckPDVersion(ctx, cli, requiredMinPDVersion, requiredMaxPDVersion)
require.Error(t, err)
require.Regexp(t, "PD version too new.*", err.Error())
cli.version = "v6.0.0"
err = kv.CheckPDVersion(ctx, cli, requiredMinPDVersion, requiredMaxPDVersion)
require.Error(t, err)
require.Regexp(t, "PD version too new.*", err.Error())
cli.version = "v6.0.0-beta"
err = kv.CheckPDVersion(ctx, cli, requiredMinPDVersion, requiredMaxPDVersion)
require.Error(t, err)
require.Regexp(t, "PD version too new.*", err.Error())
cli.version = "v1.0.0"
err = kv.CheckPDVersion(ctx, cli, requiredMinPDVersion, requiredMaxPDVersion)
require.Error(t, err)
require.Regexp(t, "PD version too old.*", err.Error())
}
func TestCheckTiKVVersion(t *testing.T) {
ctx := context.Background()
cli := mockGetStoresCli{}
genStoresInfo := func(versions []string) *pdhttp.StoresInfo {
stores := make([]pdhttp.StoreInfo, 0, len(versions))
for i, v := range versions {
stores = append(stores, pdhttp.StoreInfo{
Store: pdhttp.MetaStore{
Address: fmt.Sprintf("tikv%d.test:20160", i),
Version: v,
},
})
}
return &pdhttp.StoresInfo{
Count: len(versions),
Stores: stores,
}
}
versions := []string{"4.1.0", "v4.1.0-alpha-9-ga27a7dd"}
cli.storesInfo = genStoresInfo(versions)
require.NoError(t, kv.CheckTiKVVersion(ctx, cli, requiredMinTiKVVersion, requiredMaxTiKVVersion))
versions = []string{"9999.0.0", "4.0.0"}
cli.storesInfo = genStoresInfo(versions)
err := kv.CheckTiKVVersion(ctx, cli, requiredMinPDVersion, requiredMaxPDVersion)
require.Error(t, err)
require.Regexp(t, `TiKV \(at tikv0\.test:20160\) version too new.*`, err.Error())
versions = []string{"4.0.0", "1.0.0"}
cli.storesInfo = genStoresInfo(versions)
err = kv.CheckTiKVVersion(ctx, cli, requiredMinPDVersion, requiredMaxPDVersion)
require.Error(t, err)
require.Regexp(t, `TiKV \(at tikv1\.test:20160\) version too old.*`, err.Error())
versions = []string{"6.0.0"}
cli.storesInfo = genStoresInfo(versions)
err = kv.CheckTiKVVersion(ctx, cli, requiredMinPDVersion, requiredMaxPDVersion)
require.Error(t, err)
require.Regexp(t, `TiKV \(at tikv0\.test:20160\) version too new.*`, err.Error())
versions = []string{"6.0.0-beta"}
cli.storesInfo = genStoresInfo(versions)
err = kv.CheckTiKVVersion(ctx, cli, requiredMinPDVersion, requiredMaxPDVersion)
require.Error(t, err)
require.Regexp(t, `TiKV \(at tikv0\.test:20160\) version too new.*`, err.Error())
}
| br/pkg/lightning/tikv/tikv_test.go | 0 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.000346104905474931,
0.00017979143012780696,
0.000167272228281945,
0.00017255127022508532,
0.000034816479455912486
] |
{
"id": 12,
"code_window": [
"\t}\n",
"}\n",
"\n",
"// tuning check the memory nextGC and judge whether this GC is trigger by memory limit.\n",
"// Go runtime ensure that it will be called serially.\n",
"func (t *memoryLimitTuner) tuning() {\n",
"\tif !t.isValidValueSet.Load() {\n",
"\t\treturn\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// DisableAdjustMemoryLimit makes memoryLimitTuner directly return `initGOMemoryLimitValue` when function `calcMemoryLimit` is called.\n",
"func (t *memoryLimitTuner) DisableAdjustMemoryLimit() {\n",
"\tt.adjustDisabled.Add(1)\n",
"\tdebug.SetMemoryLimit(initGOMemoryLimitValue)\n",
"}\n",
"\n",
"// EnableAdjustMemoryLimit makes memoryLimitTuner return an adjusted memory limit when function `calcMemoryLimit` is called.\n",
"func (t *memoryLimitTuner) EnableAdjustMemoryLimit() {\n",
"\tt.adjustDisabled.Add(-1)\n",
"\tt.UpdateMemoryLimit()\n",
"}\n",
"\n"
],
"file_path": "pkg/util/gctuner/memory_limit_tuner.go",
"type": "add",
"edit_start_line_idx": 57
} | // Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0.
package main
import (
"github.com/pingcap/errors"
"github.com/pingcap/log"
"github.com/pingcap/tidb/br/pkg/gluetikv"
"github.com/pingcap/tidb/br/pkg/summary"
"github.com/pingcap/tidb/br/pkg/task"
"github.com/pingcap/tidb/br/pkg/trace"
"github.com/pingcap/tidb/br/pkg/utils"
"github.com/pingcap/tidb/br/pkg/version/build"
"github.com/pingcap/tidb/pkg/config"
"github.com/pingcap/tidb/pkg/session"
"github.com/pingcap/tidb/pkg/util/metricsutil"
"github.com/spf13/cobra"
"go.uber.org/zap"
"sourcegraph.com/sourcegraph/appdash"
)
func runBackupCommand(command *cobra.Command, cmdName string) error {
cfg := task.BackupConfig{Config: task.Config{LogProgress: HasLogFile()}}
if err := cfg.ParseFromFlags(command.Flags()); err != nil {
command.SilenceUsage = false
return errors.Trace(err)
}
if err := metricsutil.RegisterMetricsForBR(cfg.PD, cfg.KeyspaceName); err != nil {
return errors.Trace(err)
}
ctx := GetDefaultContext()
if cfg.EnableOpenTracing {
var store *appdash.MemoryStore
ctx, store = trace.TracerStartSpan(ctx)
defer trace.TracerFinishSpan(ctx, store)
}
if cfg.FullBackupType == task.FullBackupTypeEBS {
if err := task.RunBackupEBS(ctx, tidbGlue, &cfg); err != nil {
log.Error("failed to backup", zap.Error(err))
return errors.Trace(err)
}
return nil
}
// No need to cache the coproceesor result
config.GetGlobalConfig().TiKVClient.CoprCache.CapacityMB = 0
if err := task.RunBackup(ctx, tidbGlue, cmdName, &cfg); err != nil {
log.Error("failed to backup", zap.Error(err))
return errors.Trace(err)
}
return nil
}
func runBackupRawCommand(command *cobra.Command, cmdName string) error {
cfg := task.RawKvConfig{Config: task.Config{LogProgress: HasLogFile()}}
if err := cfg.ParseBackupConfigFromFlags(command.Flags()); err != nil {
command.SilenceUsage = false
return errors.Trace(err)
}
ctx := GetDefaultContext()
if cfg.EnableOpenTracing {
var store *appdash.MemoryStore
ctx, store = trace.TracerStartSpan(ctx)
defer trace.TracerFinishSpan(ctx, store)
}
if err := task.RunBackupRaw(ctx, gluetikv.Glue{}, cmdName, &cfg); err != nil {
log.Error("failed to backup raw kv", zap.Error(err))
return errors.Trace(err)
}
return nil
}
func runBackupTxnCommand(command *cobra.Command, cmdName string) error {
cfg := task.TxnKvConfig{Config: task.Config{LogProgress: HasLogFile()}}
if err := cfg.ParseBackupConfigFromFlags(command.Flags()); err != nil {
command.SilenceUsage = false
return errors.Trace(err)
}
ctx := GetDefaultContext()
if cfg.EnableOpenTracing {
var store *appdash.MemoryStore
ctx, store = trace.TracerStartSpan(ctx)
defer trace.TracerFinishSpan(ctx, store)
}
if err := task.RunBackupTxn(ctx, gluetikv.Glue{}, cmdName, &cfg); err != nil {
log.Error("failed to backup txn kv", zap.Error(err))
return errors.Trace(err)
}
return nil
}
// NewBackupCommand return a full backup subcommand.
func NewBackupCommand() *cobra.Command {
command := &cobra.Command{
Use: "backup",
Short: "backup a TiDB/TiKV cluster",
SilenceUsage: true,
PersistentPreRunE: func(c *cobra.Command, args []string) error {
if err := Init(c); err != nil {
return errors.Trace(err)
}
build.LogInfo(build.BR)
utils.LogEnvVariables()
task.LogArguments(c)
// Do not run stat worker in BR.
session.DisableStats4Test()
// Do not run ddl worker in BR.
config.GetGlobalConfig().Instance.TiDBEnableDDL.Store(false)
summary.SetUnit(summary.BackupUnit)
return nil
},
}
command.AddCommand(
newFullBackupCommand(),
newDBBackupCommand(),
newTableBackupCommand(),
newRawBackupCommand(),
newTxnBackupCommand(),
)
task.DefineBackupFlags(command.PersistentFlags())
return command
}
// newFullBackupCommand return a full backup subcommand.
func newFullBackupCommand() *cobra.Command {
command := &cobra.Command{
Use: "full",
Short: "backup all database",
// prevents incorrect usage like `--checksum false` instead of `--checksum=false`.
// the former, according to pflag parsing rules, means `--checksum=true false`.
Args: cobra.NoArgs,
RunE: func(command *cobra.Command, _ []string) error {
// empty db/table means full backup.
return runBackupCommand(command, task.FullBackupCmd)
},
}
task.DefineFilterFlags(command, acceptAllTables, false)
task.DefineBackupEBSFlags(command.PersistentFlags())
return command
}
// newDBBackupCommand return a db backup subcommand.
func newDBBackupCommand() *cobra.Command {
command := &cobra.Command{
Use: "db",
Short: "backup a database",
Args: cobra.NoArgs,
RunE: func(command *cobra.Command, _ []string) error {
return runBackupCommand(command, task.DBBackupCmd)
},
}
task.DefineDatabaseFlags(command)
return command
}
// newTableBackupCommand return a table backup subcommand.
func newTableBackupCommand() *cobra.Command {
command := &cobra.Command{
Use: "table",
Short: "backup a table",
Args: cobra.NoArgs,
RunE: func(command *cobra.Command, _ []string) error {
return runBackupCommand(command, task.TableBackupCmd)
},
}
task.DefineTableFlags(command)
return command
}
// newRawBackupCommand return a raw kv range backup subcommand.
func newRawBackupCommand() *cobra.Command {
// TODO: remove experimental tag if it's stable
command := &cobra.Command{
Use: "raw",
Short: "(experimental) backup a raw kv range from TiKV cluster",
Args: cobra.NoArgs,
RunE: func(command *cobra.Command, _ []string) error {
return runBackupRawCommand(command, task.RawBackupCmd)
},
}
task.DefineRawBackupFlags(command)
return command
}
// newTxnBackupCommand return a txn kv range backup subcommand.
func newTxnBackupCommand() *cobra.Command {
command := &cobra.Command{
Use: "txn",
Short: "(experimental) backup a txn kv range from TiKV cluster",
Args: cobra.NoArgs,
RunE: func(command *cobra.Command, _ []string) error {
return runBackupTxnCommand(command, task.TxnBackupCmd)
},
}
task.DefineTxnBackupFlags(command)
return command
}
| br/cmd/br/backup.go | 1 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.0001748443319229409,
0.00016913622675929219,
0.0001647899189265445,
0.00016855628928169608,
0.0000027689682156051276
] |
{
"id": 12,
"code_window": [
"\t}\n",
"}\n",
"\n",
"// tuning check the memory nextGC and judge whether this GC is trigger by memory limit.\n",
"// Go runtime ensure that it will be called serially.\n",
"func (t *memoryLimitTuner) tuning() {\n",
"\tif !t.isValidValueSet.Load() {\n",
"\t\treturn\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// DisableAdjustMemoryLimit makes memoryLimitTuner directly return `initGOMemoryLimitValue` when function `calcMemoryLimit` is called.\n",
"func (t *memoryLimitTuner) DisableAdjustMemoryLimit() {\n",
"\tt.adjustDisabled.Add(1)\n",
"\tdebug.SetMemoryLimit(initGOMemoryLimitValue)\n",
"}\n",
"\n",
"// EnableAdjustMemoryLimit makes memoryLimitTuner return an adjusted memory limit when function `calcMemoryLimit` is called.\n",
"func (t *memoryLimitTuner) EnableAdjustMemoryLimit() {\n",
"\tt.adjustDisabled.Add(-1)\n",
"\tt.UpdateMemoryLimit()\n",
"}\n",
"\n"
],
"file_path": "pkg/util/gctuner/memory_limit_tuner.go",
"type": "add",
"edit_start_line_idx": 57
} | // Copyright 2019-present PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package signal
// SetupSignalHandler setup signal handler for TiDB Server
func SetupSignalHandler(shutdownFunc func(bool)) {
}
| pkg/util/signal/signal_wasm.go | 0 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.00017862912500277162,
0.0001736751146381721,
0.00016872110427357256,
0.0001736751146381721,
0.000004954010364599526
] |
{
"id": 12,
"code_window": [
"\t}\n",
"}\n",
"\n",
"// tuning check the memory nextGC and judge whether this GC is trigger by memory limit.\n",
"// Go runtime ensure that it will be called serially.\n",
"func (t *memoryLimitTuner) tuning() {\n",
"\tif !t.isValidValueSet.Load() {\n",
"\t\treturn\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// DisableAdjustMemoryLimit makes memoryLimitTuner directly return `initGOMemoryLimitValue` when function `calcMemoryLimit` is called.\n",
"func (t *memoryLimitTuner) DisableAdjustMemoryLimit() {\n",
"\tt.adjustDisabled.Add(1)\n",
"\tdebug.SetMemoryLimit(initGOMemoryLimitValue)\n",
"}\n",
"\n",
"// EnableAdjustMemoryLimit makes memoryLimitTuner return an adjusted memory limit when function `calcMemoryLimit` is called.\n",
"func (t *memoryLimitTuner) EnableAdjustMemoryLimit() {\n",
"\tt.adjustDisabled.Add(-1)\n",
"\tt.UpdateMemoryLimit()\n",
"}\n",
"\n"
],
"file_path": "pkg/util/gctuner/memory_limit_tuner.go",
"type": "add",
"edit_start_line_idx": 57
} | drop table if exists t_issue_23993;
create table t_issue_23993(a double);
insert into t_issue_23993 values(-790822912);
select cast(a as time) from t_issue_23993;
cast(a as time)
NULL
select a from t_issue_23993 where cast(a as time);
a
drop table if exists t_issue_23993;
create table t_issue_23993(a int);
insert into t_issue_23993 values(-790822912);
select cast(a as time) from t_issue_23993;
cast(a as time)
NULL
select a from t_issue_23993 where cast(a as time);
a
drop table if exists t_issue_23993;
create table t_issue_23993(a decimal);
insert into t_issue_23993 values(-790822912);
select cast(a as time) from t_issue_23993;
cast(a as time)
NULL
select a from t_issue_23993 where cast(a as time);
a
drop table if exists t_issue_23993;
create table t_issue_23993(a varchar(255));
insert into t_issue_23993 values('-790822912');
select cast(a as time) from t_issue_23993;
cast(a as time)
-838:59:59
select a from t_issue_23993 where cast(a as time);
a
-790822912
SELECT HEX(WEIGHT_STRING('ab' AS BINARY(1000000000000000000)));
HEX(WEIGHT_STRING('ab' AS BINARY(1000000000000000000)))
NULL
Level Code Message
Warning 1301 Result of cast_as_binary() was larger than max_allowed_packet (67108864) - truncated
SELECT HEX(WEIGHT_STRING('ab' AS char(1000000000000000000)));
HEX(WEIGHT_STRING('ab' AS char(1000000000000000000)))
NULL
Level Code Message
Warning 1301 Result of weight_string() was larger than max_allowed_packet (67108864) - truncated
drop table if exists m, mp;
CREATE TABLE m (
mid varchar(50) NOT NULL,
ParentId varchar(50) DEFAULT NULL,
PRIMARY KEY (mid),
KEY ind_bm_parent (ParentId,mid)
);
CREATE TABLE mp (
mpid bigint(20) unsigned NOT NULL DEFAULT '0',
mid varchar(50) DEFAULT NULL COMMENT '模块主键',
sid int,
PRIMARY KEY (mpid)
);
insert into mp values("1","1","0");
insert into m values("0", "0");
SELECT ( SELECT COUNT(1) FROM m WHERE ParentId = c.mid ) expand, bmp.mpid, bmp.mpid IS NULL,bmp.mpid IS NOT NULL, sid FROM m c LEFT JOIN mp bmp ON c.mid = bmp.mid WHERE c.ParentId = '0';
expand mpid bmp.mpid IS NULL bmp.mpid IS NOT NULL sid
1 NULL 1 0 NULL
SELECT bmp.mpid, bmp.mpid IS NULL,bmp.mpid IS NOT NULL FROM m c LEFT JOIN mp bmp ON c.mid = bmp.mid WHERE c.ParentId = '0';
mpid bmp.mpid IS NULL bmp.mpid IS NOT NULL
NULL 1 0
drop table if exists t1;
CREATE TABLE `t1` (
`a` timestamp NULL DEFAULT NULL,
`b` year(4) DEFAULT NULL,
KEY `a` (`a`),
KEY `b` (`b`)
);
insert into t1 values("2002-10-03 04:28:53",2000), ("2002-10-03 04:28:53",2002), (NULL, 2002);
select /*+ inl_join (x,y) */ * from t1 x cross join t1 y on x.a=y.b;
a b a b
select * from t1 x cross join t1 y on x.a>y.b order by x.a, x.b, y.a, y.b;
a b a b
2002-10-03 04:28:53 2000 NULL 2002
2002-10-03 04:28:53 2000 2002-10-03 04:28:53 2000
2002-10-03 04:28:53 2000 2002-10-03 04:28:53 2002
2002-10-03 04:28:53 2002 NULL 2002
2002-10-03 04:28:53 2002 2002-10-03 04:28:53 2000
2002-10-03 04:28:53 2002 2002-10-03 04:28:53 2002
select * from t1 where a = b;
a b
select * from t1 where a < b;
a b
drop table if exists t;
create table t(a int) partition by hash (a div 0) partitions 10;
insert into t values (NULL);
select null div 0;
null div 0
NULL
select * from t;
a
NULL
drop table if exists t;
CREATE TABLE t (
a varchar(8) DEFAULT NULL,
b varchar(8) DEFAULT NULL,
c decimal(20,2) DEFAULT NULL,
d decimal(15,8) DEFAULT NULL
);
insert into t values(20210606, 20210606, 50000.00, 5.04600000);
select a * c *(d/36000) from t;
a * c *(d/36000)
141642663.71666598
select cast(a as double) * cast(c as double) *cast(d/36000 as double) from t;
cast(a as double) * cast(c as double) *cast(d/36000 as double)
141642663.71666598
select 20210606*50000.00*(5.04600000/36000);
20210606*50000.00*(5.04600000/36000)
141642663.71666599297980
select "20210606"*50000.00*(5.04600000/36000);
"20210606"*50000.00*(5.04600000/36000)
141642663.71666598
select cast("20210606" as double)*50000.00*(5.04600000/36000);
cast("20210606" as double)*50000.00*(5.04600000/36000)
141642663.71666598
drop table if exists t1, t2;
create table t1(a int, b varchar(8));
insert into t1 values(1,'1');
create table t2(a int , b varchar(8) GENERATED ALWAYS AS (c) VIRTUAL, c varchar(8), PRIMARY KEY (a));
insert into t2(a) values(1);
select /*+ tidb_inlj(t2) */ t2.b, t1.b from t1 join t2 ON t2.a=t1.a;
b b
NULL 1
drop table if exists t;
CREATE TABLE t (a bigint unsigned PRIMARY KEY);
INSERT INTO t VALUES (0),(1),(2),(3),(18446744073709551600),(18446744073709551605),(18446744073709551610),(18446744073709551615);
ANALYZE TABLE t;
EXPLAIN FORMAT = 'brief' SELECT a FROM t WHERE a >= 0x1 AND a <= 0x2;
id estRows task access object operator info
TableReader 2.00 root data:TableRangeScan
└─TableRangeScan 2.00 cop[tikv] table:t range:[1,2], keep order:false
EXPLAIN FORMAT = 'brief' SELECT a FROM t WHERE a BETWEEN 0x1 AND 0x2;
id estRows task access object operator info
TableReader 2.00 root data:TableRangeScan
└─TableRangeScan 2.00 cop[tikv] table:t range:[1,2], keep order:false
SELECT a FROM t WHERE a BETWEEN 0xFFFFFFFFFFFFFFF5 AND X'FFFFFFFFFFFFFFFA';
a
18446744073709551605
18446744073709551610
set @@tidb_enable_vectorized_expression=true;
select trim(leading from " a "), trim(both from " a "), trim(trailing from " a ");
trim(leading from " a ") trim(both from " a ") trim(trailing from " a ")
a a a
select trim(leading null from " a "), trim(both null from " a "), trim(trailing null from " a ");
trim(leading null from " a ") trim(both null from " a ") trim(trailing null from " a ")
NULL NULL NULL
select trim(null from " a ");
trim(null from " a ")
NULL
set @@tidb_enable_vectorized_expression=false;
select trim(leading from " a "), trim(both from " a "), trim(trailing from " a ");
trim(leading from " a ") trim(both from " a ") trim(trailing from " a ")
a a a
select trim(leading null from " a "), trim(both null from " a "), trim(trailing null from " a ");
trim(leading null from " a ") trim(both null from " a ") trim(trailing null from " a ")
NULL NULL NULL
select trim(null from " a ");
trim(null from " a ")
NULL
set tidb_enable_vectorized_expression=default;
drop table if exists t29142_1;
drop table if exists t29142_2;
create table t29142_1(a int);
create table t29142_2(a double);
insert into t29142_1 value(20);
select sum(distinct a) as x from t29142_1 having x > some ( select a from t29142_2 where x in (a));
x
drop table if exists e;
create table e (e enum('a', 'b'));
insert into e values ('a'), ('b');
select * from e where case 1 when 0 then e end;
e
select * from e where case 1 when 1 then e end;
e
a
b
select * from e where case e when 1 then e end;
e
a
select * from e where case 1 when e then e end;
e
a
drop table if exists t;
create table t (en enum('c', 'b', 'a'));
insert into t values ('a'), ('b'), ('c');
select max(en) from t;
max(en)
c
select min(en) from t;
min(en)
a
select * from t order by en;
en
c
b
a
drop table t;
create table t(s set('c', 'b', 'a'));
insert into t values ('a'), ('b'), ('c');
select max(s) from t;
max(s)
c
select min(s) from t;
min(s)
a
drop table t;
create table t(id int, en enum('c', 'b', 'a'));
insert into t values (1, 'a'),(2, 'b'), (3, 'c'), (1, 'c');
select id, max(en) from t where id=1 group by id;
id max(en)
1 c
select id, min(en) from t where id=1 group by id;
id min(en)
1 a
drop table t;
create table t(id int, s set('c', 'b', 'a'));
insert into t values (1, 'a'),(2, 'b'), (3, 'c'), (1, 'c');
select id, max(s) from t where id=1 group by id;
id max(s)
1 c
select id, min(s) from t where id=1 group by id;
id min(s)
1 a
drop table t;
create table t(e enum('e','d','c','b','a'));
insert into t values ('e'),('d'),('c'),('b'),('a');
select * from t order by e limit 1;
e
e
drop table t;
create table t(s set('e', 'd', 'c', 'b', 'a'));
insert into t values ('e'),('d'),('c'),('b'),('a');
select * from t order by s limit 1;
s
e
drop table t;
select distinct 0.7544678906163867 / 0.68234634;
0.7544678906163867 / 0.68234634
1.10569639842486251190
drop table if exists t_issue_22231;
create table t_issue_22231(a datetime);
insert into t_issue_22231 values('2020--05-20 01:22:12');
select * from t_issue_22231 where a >= '2020-05-13 00:00:00 00:00:00' and a <= '2020-05-28 23:59:59 00:00:00';
a
2020-05-20 01:22:12
Level Code Message
Warning 1292 Truncated incorrect datetime value: '2020-05-13 00:00:00 00:00:00'
Warning 1292 Truncated incorrect datetime value: '2020-05-28 23:59:59 00:00:00'
select cast('2020-10-22 10:31-10:12' as datetime);
cast('2020-10-22 10:31-10:12' as datetime)
2020-10-22 10:31:10
Level Code Message
Warning 1292 Truncated incorrect datetime value: '2020-10-22 10:31-10:12'
select cast('2020-05-28 23:59:59 00:00:00' as datetime);
cast('2020-05-28 23:59:59 00:00:00' as datetime)
2020-05-28 23:59:59
Level Code Message
Warning 1292 Truncated incorrect datetime value: '2020-05-28 23:59:59 00:00:00'
SELECT CAST("1111111111-" AS DATE);
CAST("1111111111-" AS DATE)
NULL
Level Code Message
Warning 1292 Incorrect datetime value: '1111111111-'
drop table if exists t;
create table t (
create_at datetime NOT NULL DEFAULT '1000-01-01 00:00:00',
finish_at datetime NOT NULL DEFAULT '1000-01-01 00:00:00');
insert into t values ('2016-02-13 15:32:24', '2016-02-11 17:23:22');
select timediff(finish_at, create_at) from t;
timediff(finish_at, create_at)
-46:09:02
drop table if exists t1, t2;
create table t1 (c1 int);
create table t2 (c2 int);
insert into t1 values (1);
insert into t2 values (2);
update t1, t2 set t1.c1 = 2, t2.c2 = 1;
update t1, t2 set c1 = 2, c2 = 1;
update t1 as a, t2 as b set a.c1 = 2, b.c2 = 1;
SELECT * FROM t1;
c1
2
SELECT * FROM t2;
c2
1
update t1 as a, t2 as t1 set a.c1 = 1, t1.c2 = 2;
SELECT * FROM t1;
c1
1
SELECT * FROM t2;
c2
2
update t1 as a, t2 set t1.c1 = 10;
Error 1054 (42S22): Unknown column 'c1' in 'field list'
drop table if exists t1, t2;
create table t1 (a int);
create table t2 (a int);
insert into t1 values(1);
insert into t2 values(1);
select tbl1.* from (select t1.a, 1 from t1) tbl1 left join t2 tbl2 on tbl1.a = tbl2.a order by tbl1.a desc limit 1;
a 1
1 1
create database executor__issues2;
use executor__issues2;
create table t(a int);
insert into t values(1);
use executor__issues;
drop table if exists t;
create table t(a int);
insert into t values(1);
update t, executor__issues2.t set executor__issues2.t.a=2;
select * from t;
a
1
select * from executor__issues2.t;
a
2
update executor__issues.t, executor__issues2.t set executor__issues.t.a=3;
select * from t;
a
3
select * from executor__issues2.t;
a
2
drop database executor__issues2;
set @@profiling=1;
SELECT QUERY_ID, SUM(DURATION) AS SUM_DURATION FROM INFORMATION_SCHEMA.PROFILING GROUP BY QUERY_ID;
QUERY_ID SUM_DURATION
0 0
drop table if exists t;
create table t(a char);
insert into t value('a');
select * from t where a < 1 order by a limit 0;
a
drop table if exists t;
create table t (a float);
create index a on t(a);
insert into t values (1.0), (NULL), (0), (2.0);
select `a` from `t` use index (a) where !`a`;
a
0
select `a` from `t` ignore index (a) where !`a`;
a
0
select `a` from `t` use index (a) where `a`;
a
1
2
select `a` from `t` ignore index (a) where `a`;
a
1
2
select a from t use index (a) where not a is true;
a
NULL
0
select a from t use index (a) where not not a is true;
a
1
2
select a from t use index (a) where not not a;
a
1
2
select a from t use index (a) where not not not a is true;
a
NULL
0
select a from t use index (a) where not not not a;
a
0
drop table if exists t1, t2;
create table t1 (c decimal);
create table t2 (c decimal, key(c));
insert into t1 values (null);
insert into t2 values (null);
select count(*) from t1 where not c;
count(*)
0
select count(*) from t2 where not c;
count(*)
0
select count(*) from t1 where c;
count(*)
0
select count(*) from t2 where c;
count(*)
0
drop table if exists t;
create table t (a timestamp);
insert into t values ("1970-07-23 10:04:59"), ("2038-01-19 03:14:07");
select * from t where date_sub(a, interval 10 month) = date_sub("1970-07-23 10:04:59", interval 10 month);
a
1970-07-23 10:04:59
select * from t where timestampadd(hour, 1, a ) = timestampadd(hour, 1, "2038-01-19 03:14:07");
a
2038-01-19 03:14:07
drop table if exists tt;
create table tt(a decimal(10, 0), b varchar(1), c time);
insert into tt values(0, '2', null), (7, null, '1122'), (NULL, 'w', null), (NULL, '2', '3344'), (NULL, NULL, '0'), (7, 'f', '33');
select a and b as d, a or c as e from tt;
d e
0 NULL
NULL 1
0 NULL
NULL 1
NULL NULL
0 1
drop table if exists tt;
create table tt(a decimal(10, 0), b varchar(1), c time);
insert into tt values(0, '2', '123'), (7, null, '1122'), (null, 'w', null);
select a and b as d, a, b from tt order by d limit 1;
d a b
NULL 7 NULL
select b or c as d, b, c from tt order by d limit 1;
d b c
NULL w NULL
drop table if exists t0;
CREATE TABLE t0(c0 FLOAT);
INSERT INTO t0(c0) VALUES (NULL);
SELECT * FROM t0 WHERE NOT(0 OR t0.c0);
c0
drop table if exists t;
create table t(a int, b char);
insert into t values (1,'s'),(2,'b'),(1,'c'),(2,'e'),(1,'a');
insert into t select * from t;
insert into t select * from t;
insert into t select * from t;
select b, count(*) from ( select b from t order by a limit 20 offset 2) as s group by b order by b;
b count(*)
a 6
c 7
s 7
drop table if exists t0;
CREATE TABLE t0(c0 NUMERIC PRIMARY KEY);
INSERT IGNORE INTO t0(c0) VALUES (NULL);
SELECT * FROM t0 WHERE c0;
c0
drop table if exists t;
CREATE TABLE `t` ( `a` enum('WAITING','PRINTED','STOCKUP','CHECKED','OUTSTOCK','PICKEDUP','WILLBACK','BACKED') DEFAULT NULL);
insert into t values(1),(2),(3),(4),(5),(6),(7);
insert into t select * from t;
insert into t select * from t;
insert into t select * from t;
insert into t select * from t;
insert into t select * from t;
insert into t select * from t;
insert into t select * from t;
set @@tidb_max_chunk_size=100;
select distinct a from t order by a;
a
WAITING
PRINTED
STOCKUP
CHECKED
OUTSTOCK
PICKEDUP
WILLBACK
drop table t;
CREATE TABLE `t` ( `a` set('WAITING','PRINTED','STOCKUP','CHECKED','OUTSTOCK','PICKEDUP','WILLBACK','BACKED') DEFAULT NULL);
insert into t values(1),(2),(3),(4),(5),(6),(7);
insert into t select * from t;
insert into t select * from t;
insert into t select * from t;
insert into t select * from t;
insert into t select * from t;
insert into t select * from t;
insert into t select * from t;
set @@tidb_max_chunk_size=100;
select distinct a from t order by a;
a
WAITING
PRINTED
WAITING,PRINTED
STOCKUP
WAITING,STOCKUP
PRINTED,STOCKUP
WAITING,PRINTED,STOCKUP
set @@tidb_max_chunk_size=default;
drop table if exists t2;
create table t2 (a year(4));
insert into t2 values(69);
select * from t2 where a <= 69;
a
2069
drop table if exists t3;
CREATE TABLE `t3` (`y` year DEFAULT NULL, `a` int DEFAULT NULL);
INSERT INTO `t3` VALUES (2069, 70), (2010, 11), (2155, 2156), (2069, 69);
SELECT * FROM `t3` where y <= a;
y a
2155 2156
drop table if exists t3;
create table t3 (a year);
insert into t3 values (1991), ("1992"), ("93"), (94);
select * from t3 where a >= NULL;
a
drop table if exists t;
CREATE TABLE `t` (`id` int(11) DEFAULT NULL, `tp_bigint` bigint(20) DEFAULT NULL );
insert into t values(0,1),(1,9215570218099803537);
select A.tp_bigint,B.id from t A join t B on A.id < B.id * 16 where A.tp_bigint = B.id;
tp_bigint id
1 1
drop table if exists t0;
create table t0 (c0 double);
insert into t0 values (1e30);
update t0 set c0=0 where t0.c0 like 0;
select count(*) from t0 where c0 = 0;
count(*)
0
drop table if exists t;
create table t (a year);
insert into t values(0);
select cast(a as char) from t;
cast(a as char)
0000
SELECT TIMESTAMP '9999-01-01 00:00:00';
TIMESTAMP '9999-01-01 00:00:00'
9999-01-01 00:00:00
drop table if exists ta;
create table ta(id decimal(60,2));
insert into ta values (JSON_EXTRACT('{"c": "1234567890123456789012345678901234567890123456789012345"}', '$.c'));
select * from ta;
id
1234567890123456789012345678901234567890123456789012345.00
drop table if exists t1;
create table t1 (f1 json);
insert into t1(f1) values ('"asd"'),('"asdf"'),('"asasas"');
select f1 from t1 where json_extract(f1,"$") in ("asd","asasas","asdf");
f1
"asd"
"asdf"
"asasas"
select f1 from t1 where json_extract(f1, '$') = 'asd';
f1
"asd"
select f1 from t1 where case json_extract(f1,"$") when "asd" then 1 else 0 end;
f1
"asd"
delete from t1;
insert into t1 values ('{"a": 1}');
select f1 from t1 where f1 in ('{"a": 1}', 'asdf', 'asdf');
f1
select f1 from t1 where f1 in (cast('{"a": 1}' as JSON), 'asdf', 'asdf');
f1
{"a": 1}
select json_extract('"asd"', '$') = 'asd';
json_extract('"asd"', '$') = 'asd'
1
select json_extract('"asd"', '$') <=> 'asd';
json_extract('"asd"', '$') <=> 'asd'
1
select json_extract('"asd"', '$') <> 'asd';
json_extract('"asd"', '$') <> 'asd'
0
select json_extract('{"f": 1.0}', '$.f') = 1.0;
json_extract('{"f": 1.0}', '$.f') = 1.0
1
select json_extract('{"f": 1.0}', '$.f') = '1.0';
json_extract('{"f": 1.0}', '$.f') = '1.0'
0
select json_extract('{"n": 1}', '$') = '{"n": 1}';
json_extract('{"n": 1}', '$') = '{"n": 1}'
0
select json_extract('{"n": 1}', '$') <> '{"n": 1}';
json_extract('{"n": 1}', '$') <> '{"n": 1}'
1
drop table if exists t;
create table t (a int, b int);
insert into t values (2, 20), (1, 10), (3, 30);
select a + 1 as field1, a as field2 from t order by field1, field2 limit 2;
field1 field2
2 1
3 2
drop table if exists t;
create table t (c int auto_increment, key(c)) auto_id_cache 1;
insert into t values();
insert into t values();
select * from t;
c
1
2
drop table if exists test;
create table test(id float primary key clustered AUTO_INCREMENT, col1 int);
replace into test(col1) values(1);
replace into test(col1) values(2);
select * from test;
id col1
1 1
2 2
drop table test;
drop table if exists test;
create table test(id float primary key nonclustered AUTO_INCREMENT, col1 int) AUTO_ID_CACHE 1;
replace into test(col1) values(1);
replace into test(col1) values(2);
select * from test;
id col1
1 1
2 2
drop table test;
create table test2(id double primary key clustered AUTO_INCREMENT, col1 int);
replace into test2(col1) values(1);
insert into test2(col1) values(1);
replace into test2(col1) values(1);
insert into test2(col1) values(1);
replace into test2(col1) values(1);
replace into test2(col1) values(1);
select * from test2;
id col1
1 1
2 1
3 1
4 1
5 1
6 1
drop table test2;
create table test2(id double primary key nonclustered AUTO_INCREMENT, col1 int) AUTO_ID_CACHE 1;
replace into test2(col1) values(1);
insert into test2(col1) values(1);
replace into test2(col1) values(1);
insert into test2(col1) values(1);
replace into test2(col1) values(1);
replace into test2(col1) values(1);
select * from test2;
id col1
1 1
2 1
3 1
4 1
5 1
6 1
drop table test2;
drop table if exists t1;
CREATE TABLE t1 (
c_int int(11) NOT NULL,
c_str varbinary(40) NOT NULL,
c_datetime datetime DEFAULT NULL,
c_timestamp timestamp NULL DEFAULT NULL,
c_double double DEFAULT NULL,
c_decimal decimal(12,6) DEFAULT NULL,
c_enum enum('blue','green','red','yellow','white','orange','purple') DEFAULT NULL,
PRIMARY KEY (c_int,c_str) /*T![clustered_index] CLUSTERED */,
KEY c_int_2 (c_int),
KEY c_decimal (c_decimal),
KEY c_datetime (c_datetime)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin
PARTITION BY LIST COLUMNS(c_int)
(PARTITION p0 VALUES IN (1,5,9,13,17,21,25,29,33,37),
PARTITION p1 VALUES IN (2,6,10,14,18,22,26,30,34,38),
PARTITION p2 VALUES IN (3,7,11,15,19,23,27,31,35,39),
PARTITION p3 VALUES IN (4,8,12,16,20,24,28,32,36,40));
INSERT INTO t1 VALUES (3,'bold goldberg','2020-01-07 12:08:19','2020-06-19 08:13:35',0.941002,5.303000,'yellow'),(1,'crazy wescoff','2020-03-24 21:51:02','2020-06-19 08:13:35',47.565275,6.313000,'orange'),(5,'relaxed gagarin','2020-05-20 11:36:26','2020-06-19 08:13:35',38.948617,3.143000,'green'),(9,'gifted vaughan','2020-04-09 16:19:45','2020-06-19 08:13:35',95.922976,8.708000,'yellow'),(2,'focused taussig','2020-05-17 17:58:34','2020-06-19 08:13:35',4.137803,4.902000,'white'),(6,'fervent yonath','2020-05-26 03:55:25','2020-06-19 08:13:35',72.394272,6.491000,'white'),(18,'mystifying bhaskara','2020-02-19 10:41:48','2020-06-19 08:13:35',10.832397,9.707000,'red'),(4,'goofy saha','2020-03-11 13:24:31','2020-06-19 08:13:35',39.007216,2.446000,'blue'),(20,'mystifying bhaskara','2020-04-03 11:33:27','2020-06-19 08:13:35',85.190386,6.787000,'blue');
DROP TABLE IF EXISTS t2;
CREATE TABLE t2 (
c_int int(11) NOT NULL,
c_str varbinary(40) NOT NULL,
c_datetime datetime DEFAULT NULL,
c_timestamp timestamp NULL DEFAULT NULL,
c_double double DEFAULT NULL,
c_decimal decimal(12,6) DEFAULT NULL,
c_enum enum('blue','green','red','yellow','white','orange','purple') DEFAULT NULL,
PRIMARY KEY (c_int,c_str) /*T![clustered_index] CLUSTERED */,
KEY c_int_2 (c_int),
KEY c_decimal (c_decimal),
KEY c_datetime (c_datetime)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin
PARTITION BY LIST COLUMNS(c_int)
(PARTITION p0 VALUES IN (1,5,9,13,17,21,25,29,33,37),
PARTITION p1 VALUES IN (2,6,10,14,18,22,26,30,34,38),
PARTITION p2 VALUES IN (3,7,11,15,19,23,27,31,35,39),
PARTITION p3 VALUES IN (4,8,12,16,20,24,28,32,36,40));
INSERT INTO t2 VALUES (1,'crazy wescoff','2020-03-24 21:51:02','2020-04-01 12:11:56',47.565275,6.313000,'orange'),(1,'unruffled johnson','2020-06-30 03:42:58','2020-06-14 00:16:50',35.444084,1.090000,'red'),(5,'relaxed gagarin','2020-05-20 11:36:26','2020-02-19 12:25:48',38.948617,3.143000,'green'),(9,'eloquent archimedes','2020-02-16 04:20:21','2020-05-23 15:42:33',32.310878,5.855000,'orange'),(9,'gifted vaughan','2020-04-09 16:19:45','2020-05-15 01:42:16',95.922976,8.708000,'yellow'),(13,'dreamy benz','2020-04-27 17:43:44','2020-03-27 06:33:03',39.539233,4.823000,'red'),(3,'bold goldberg','2020-01-07 12:08:19','2020-03-10 18:37:09',0.941002,5.303000,'yellow'),(3,'youthful yonath','2020-01-12 17:10:39','2020-06-10 15:13:44',66.288511,6.046000,'white'),(7,'upbeat bhabha','2020-04-29 01:17:05','2020-03-11 22:58:43',23.316987,9.026000,'yellow'),(11,'quizzical ritchie','2020-05-16 08:21:36','2020-03-05 19:23:25',75.019379,0.260000,'purple'),(2,'dazzling kepler','2020-04-11 04:38:59','2020-05-06 04:42:32',78.798503,2.274000,'purple'),(2,'focused taussig','2020-05-17 17:58:34','2020-02-25 09:11:03',4.137803,4.902000,'white'),(2,'sharp ptolemy',NULL,'2020-05-17 18:04:19',NULL,5.573000,'purple'),(6,'fervent yonath','2020-05-26 03:55:25','2020-05-06 14:23:44',72.394272,6.491000,'white'),(10,'musing wu','2020-04-03 11:33:27','2020-05-24 06:11:56',85.190386,6.787000,'blue'),(8,'hopeful keller','2020-02-19 10:41:48','2020-04-19 17:10:36',10.832397,9.707000,'red'),(12,'exciting boyd',NULL,'2020-03-28 18:27:23',NULL,9.249000,'blue');
set tidb_txn_assertion_level=strict;
begin;
delete t1, t2 from t1, t2 where t1.c_enum in ('blue');
commit;
set tidb_txn_assertion_level=default;
drop table if exists t1;
create table t1 (_id int PRIMARY KEY, c1 char, index (c1));
insert into t1 values (1, null);
select * from t1 where c1 is null and _id < 1;
_id c1
drop table if exists t1, t2;
CREATE TABLE t1 (
c1 double DEFAULT '1.335088259490289',
c2 set('mj','4s7ht','z','3i','b26','9','cg11','uvzcp','c','ns','fl9') NOT NULL DEFAULT 'mj,z,3i,9,cg11,c',
PRIMARY KEY (c2) /*T![clustered_index] CLUSTERED */,
KEY i1 (c1),
KEY i2 (c1),
KEY i3 (c1)
) ENGINE=InnoDB DEFAULT CHARSET=gbk COLLATE=gbk_chinese_ci;
INSERT INTO t1 VALUES (634.2783557491367,''),(2000.5041449792013,'4s7ht'),(634.2783557491367,'3i'),(634.2783557491367,'9'),(7803.173688589342,'uvzcp'),(634.2783557491367,'ns'),(634.2783557491367,'fl9');
CREATE TABLE t2 (
c3 decimal(56,16) DEFAULT '931359772706767457132645278260455518957.9866038319986886',
c4 set('3bqx','g','6op3','2g','jf','arkd3','y0b','jdy','1g','ff5z','224b') DEFAULT '3bqx,2g,ff5z,224b',
c5 smallint(6) NOT NULL DEFAULT '-25973',
c6 year(4) DEFAULT '2122',
c7 text DEFAULT NULL,
PRIMARY KEY (c5) /*T![clustered_index] CLUSTERED */,
KEY i4 (c6),
KEY i5 (c5)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT=''
PARTITION BY HASH (c5) PARTITIONS 4;
INSERT INTO t2 VALUES (465.0000000000000000,'jdy',-8542,2008,'FgZXe');
set @@sql_mode='';
set tidb_partition_prune_mode=dynamic;
analyze table t1;
analyze table t2;
select /*+ inl_join( t1 , t2 ) */ avg( t2.c5 ) as r0 , repeat( t2.c7 , t2.c5 ) as r1 , locate( t2.c7 , t2.c7 ) as r2 , unhex( t1.c1 ) as r3 from t1 right join t2 on t1.c2 = t2.c5 where not( t2.c5 in ( -7860 ,-13384 ,-12940 ) ) and not( t1.c2 between '4s7ht' and 'mj' );
r0 r1 r2 r3
NULL NULL NULL NULL
select /*+ inl_join (t1, t2) */ t2.c5 from t1 right join t2 on t1.c2 = t2.c5 where not( t1.c2 between '4s7ht' and 'mj' );
c5
set sql_mode=default;
set tidb_partition_prune_mode=default;
drop table if exists ta, tb, tc;
CREATE TABLE ta (
a1 json DEFAULT NULL,
a2 decimal(31, 1) DEFAULT '0'
);
CREATE TABLE tb (
b1 smallint(6) DEFAULT '-11385',
b2 decimal(63, 14) DEFAULT '-6197127648752447138876497216172307937317445669286.98661563645110'
);
CREATE TABLE tc (
c1 text DEFAULT NULL,
c2 float NOT NULL DEFAULT '1.8132474',
PRIMARY KEY (c2)
/*T![clustered_index] CLUSTERED */
);
insert into ta
values (NULL, 1228.0);
insert into ta
values ('"json string1"', 623.8);
insert into ta
values (NULL, 1337.0);
select count(*)from ta where not ( ta.a1 in ( select b2 from tb where not ( ta.a1 in ( select c1 from tc where ta.a2 in ( select b2 from tb where IsNull(ta.a1) ) ) ) ) );
Error 1815 (HY000): expression isnull(cast(executor__issues.ta.a1, var_string(4294967295))) cannot be pushed down
drop table if exists V, F;
create table V (id int primary key, col_int int);
insert into V values (1, 8);
create table F (id int primary key, col_int int);
insert into F values (1, 8);
select table1.`col_int` as field1, table1.`col_int` as field2 from V as table1 left join F as table2 on table1.`col_int` = table2.`col_int` order by field1, field2 desc limit 2;
field1 field2
8 8
set tidb_cost_model_version=2;
set @@session.tidb_enable_list_partition = ON;
drop table if exists t1, t2;
create table t1 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int) , key(c_str(2)) , key(c_decimal) ) partition by list (c_int) ( partition p0 values IN (1, 5, 9, 13, 17, 21, 25, 29, 33, 37), partition p1 values IN (2, 6, 10, 14, 18, 22, 26, 30, 34, 38), partition p2 values IN (3, 7, 11, 15, 19, 23, 27, 31, 35, 39), partition p3 values IN (4, 8, 12, 16, 20, 24, 28, 32, 36, 40)) ;
create table t2 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int) , key(c_str) , key(c_decimal) ) partition by hash (c_int) partitions 4;
insert into t1 values (6, 'musing mayer', 1.280), (7, 'wizardly heisenberg', 6.589), (8, 'optimistic swirles', 9.633), (9, 'hungry haslett', 2.659), (10, 'stupefied wiles', 2.336);
insert into t2 select * from t1 ;
analyze table t1;
analyze table t2;
begin;
select * from t1 where c_str <> any (select c_str from t2 where c_decimal < 5) for update;
c_int c_str c_decimal
10 stupefied wiles 2.336000
6 musing mayer 1.280000
7 wizardly heisenberg 6.589000
8 optimistic swirles 9.633000
9 hungry haslett 2.659000
explain format = 'brief' select * from t1 where c_str <> any (select c_str from t2 where c_decimal < 5) for update;
id estRows task access object operator info
SelectLock 3.20 root for update 0
└─HashJoin 3.20 root CARTESIAN inner join, other cond:or(gt(Column#8, 1), or(ne(executor__issues.t1.c_str, Column#7), if(ne(Column#9, 0), NULL, 0)))
├─Selection(Build) 0.80 root ne(Column#10, 0)
│ └─StreamAgg 1.00 root funcs:max(Column#17)->Column#7, funcs:count(distinct Column#18)->Column#8, funcs:sum(Column#19)->Column#9, funcs:count(1)->Column#10
│ └─Projection 3.00 root executor__issues.t2.c_str->Column#17, executor__issues.t2.c_str->Column#18, cast(isnull(executor__issues.t2.c_str), decimal(20,0) BINARY)->Column#19
│ └─TableReader 3.00 root partition:all data:Selection
│ └─Selection 3.00 cop[tikv] lt(executor__issues.t2.c_decimal, 5)
│ └─TableFullScan 5.00 cop[tikv] table:t2 keep order:false
└─TableReader(Probe) 4.00 root partition:all data:Selection
└─Selection 4.00 cop[tikv] if(isnull(executor__issues.t1.c_str), NULL, 1)
└─TableFullScan 5.00 cop[tikv] table:t1 keep order:false
commit;
set tidb_cost_model_version=default;
set @@session.tidb_enable_list_partition = default;
drop table if exists trade, trade_history, status_type;
set @@foreign_key_checks=0;
CREATE TABLE trade (
t_id bigint(16) NOT NULL AUTO_INCREMENT,
t_dts datetime NOT NULL,
t_st_id char(4) NOT NULL,
t_tt_id char(3) NOT NULL,
t_is_cash tinyint(1) NOT NULL,
t_s_symb char(15) NOT NULL,
t_qty mediumint(7) NOT NULL,
t_bid_price decimal(8,2) NOT NULL,
t_ca_id bigint(12) NOT NULL,
t_exec_name varchar(49) NOT NULL,
t_trade_price decimal(8,2) DEFAULT NULL,
t_chrg decimal(10,2) NOT NULL,
t_comm decimal(10,2) NOT NULL,
t_tax decimal(10,2) NOT NULL,
t_lifo tinyint(1) NOT NULL,
PRIMARY KEY (t_id) /*T![clustered_index] CLUSTERED */,
KEY i_t_ca_id_dts (t_ca_id,t_dts),
KEY i_t_s_symb_dts (t_s_symb,t_dts),
CONSTRAINT fk_trade_st FOREIGN KEY (t_st_id) REFERENCES status_type (st_id),
CONSTRAINT fk_trade_tt FOREIGN KEY (t_tt_id) REFERENCES trade_type (tt_id),
CONSTRAINT fk_trade_s FOREIGN KEY (t_s_symb) REFERENCES security (s_symb),
CONSTRAINT fk_trade_ca FOREIGN KEY (t_ca_id) REFERENCES customer_account (ca_id)
) ;
CREATE TABLE trade_history (
th_t_id bigint(16) NOT NULL,
th_dts datetime NOT NULL,
th_st_id char(4) NOT NULL,
PRIMARY KEY (th_t_id,th_st_id) /*T![clustered_index] NONCLUSTERED */,
KEY i_th_t_id_dts (th_t_id,th_dts),
CONSTRAINT fk_trade_history_t FOREIGN KEY (th_t_id) REFERENCES trade (t_id),
CONSTRAINT fk_trade_history_st FOREIGN KEY (th_st_id) REFERENCES status_type (st_id)
);
CREATE TABLE status_type (
st_id char(4) NOT NULL,
st_name char(10) NOT NULL,
PRIMARY KEY (st_id) /*T![clustered_index] NONCLUSTERED */
);
trace plan SELECT T_ID, T_S_SYMB, T_QTY, ST_NAME, TH_DTS FROM ( SELECT T_ID AS ID FROM TRADE WHERE T_CA_ID = 43000014236 ORDER BY T_DTS DESC LIMIT 10 ) T, TRADE, TRADE_HISTORY, STATUS_TYPE WHERE TRADE.T_ID = ID AND TRADE_HISTORY.TH_T_ID = TRADE.T_ID AND STATUS_TYPE.ST_ID = TRADE_HISTORY.TH_ST_ID ORDER BY TH_DTS DESC LIMIT 30;
set @@foreign_key_checks=default;
drop table if exists partsupp, supplier, nation;
SET GLOBAL tidb_mem_oom_action='CANCEL';
CREATE TABLE `partsupp` ( `PS_PARTKEY` bigint(20) NOT NULL,`PS_SUPPKEY` bigint(20) NOT NULL,`PS_AVAILQTY` bigint(20) NOT NULL,`PS_SUPPLYCOST` decimal(15,2) NOT NULL,`PS_COMMENT` varchar(199) NOT NULL,PRIMARY KEY (`PS_PARTKEY`,`PS_SUPPKEY`) /*T![clustered_index] CLUSTERED */) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
CREATE TABLE `supplier` (`S_SUPPKEY` bigint(20) NOT NULL,`S_NAME` char(25) NOT NULL,`S_ADDRESS` varchar(40) NOT NULL,`S_NATIONKEY` bigint(20) NOT NULL,`S_PHONE` char(15) NOT NULL,`S_ACCTBAL` decimal(15,2) NOT NULL,`S_COMMENT` varchar(101) NOT NULL,PRIMARY KEY (`S_SUPPKEY`) /*T![clustered_index] CLUSTERED */) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
CREATE TABLE `nation` (`N_NATIONKEY` bigint(20) NOT NULL,`N_NAME` char(25) NOT NULL,`N_REGIONKEY` bigint(20) NOT NULL,`N_COMMENT` varchar(152) DEFAULT NULL,PRIMARY KEY (`N_NATIONKEY`) /*T![clustered_index] CLUSTERED */) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
set @@tidb_mem_quota_query=128;
explain select ps_partkey, sum(ps_supplycost * ps_availqty) as value from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'MOZAMBIQUE' group by ps_partkey having sum(ps_supplycost * ps_availqty) > ( select sum(ps_supplycost * ps_availqty) * 0.0001000000 from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'MOZAMBIQUE' ) order by value desc;
Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=<num>]
SET GLOBAL tidb_mem_oom_action = DEFAULT;
set @@tidb_mem_quota_query=default;
drop table if exists issue49369;
CREATE TABLE `issue49369` (
`x` varchar(32) COLLATE utf8mb4_bin DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
insert into t select round(cast('88888899999999999888888888888888888888888888888888888.11111111111111111111' as decimal(18,12)) * cast('88888899999999999888888888888888888888888888888888888.11111111111111111111' as decimal(42,18)) );
Error 1690 (22003): DECIMAL value is out of range in '(18, 12)'
set @@sql_mode = '';
insert into t select round(cast('88888899999999999888888888888888888888888888888888888.11111111111111111111' as decimal(18,12)) * cast('88888899999999999888888888888888888888888888888888888.11111111111111111111' as decimal(42,18)) );
show warnings;
Level Code Message
Warning 1690 DECIMAL value is out of range in '(18, 12)'
Warning 1690 DECIMAL value is out of range in '(42, 18)'
Warning 1690 %s value is out of range in '%s'
select * from t;
c
1
2
2147483647
set @@sql_mode = default;
set @@tidb_max_chunk_size = 32;
drop table if exists t, s;
CREATE TABLE `t` (`c` char(1)) COLLATE=utf8_general_ci ;
insert into t values("V"),("v");
insert into t values("V"),("v"),("v");
CREATE TABLE `s` (`col_61` int);
insert into s values(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1);
insert into s values(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),(1);
SELECT /*+ stream_agg()*/ count(`t`.`c`) FROM (`s`) JOIN `t` GROUP BY `t`.`c`;
count(`t`.`c`)
170
SELECT count(`t`.`c`) FROM (`s`) JOIN `t` GROUP BY `t`.`c`;
count(`t`.`c`)
170
set @@tidb_max_chunk_size = default;
| tests/integrationtest/r/executor/issues.result | 0 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.0015340924728661776,
0.00027654931182041764,
0.00016034403233788908,
0.0001707166084088385,
0.00030541649903170764
] |
{
"id": 12,
"code_window": [
"\t}\n",
"}\n",
"\n",
"// tuning check the memory nextGC and judge whether this GC is trigger by memory limit.\n",
"// Go runtime ensure that it will be called serially.\n",
"func (t *memoryLimitTuner) tuning() {\n",
"\tif !t.isValidValueSet.Load() {\n",
"\t\treturn\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// DisableAdjustMemoryLimit makes memoryLimitTuner directly return `initGOMemoryLimitValue` when function `calcMemoryLimit` is called.\n",
"func (t *memoryLimitTuner) DisableAdjustMemoryLimit() {\n",
"\tt.adjustDisabled.Add(1)\n",
"\tdebug.SetMemoryLimit(initGOMemoryLimitValue)\n",
"}\n",
"\n",
"// EnableAdjustMemoryLimit makes memoryLimitTuner return an adjusted memory limit when function `calcMemoryLimit` is called.\n",
"func (t *memoryLimitTuner) EnableAdjustMemoryLimit() {\n",
"\tt.adjustDisabled.Add(-1)\n",
"\tt.UpdateMemoryLimit()\n",
"}\n",
"\n"
],
"file_path": "pkg/util/gctuner/memory_limit_tuner.go",
"type": "add",
"edit_start_line_idx": 57
} | // Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"fmt"
"runtime"
"sync"
"sync/atomic"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/pkg/config"
"github.com/pingcap/tidb/pkg/infoschema"
"github.com/pingcap/tidb/pkg/parser/model"
"github.com/pingcap/tidb/pkg/parser/mysql"
"github.com/pingcap/tidb/pkg/parser/terror"
"github.com/pingcap/tidb/pkg/sessionctx"
"github.com/pingcap/tidb/pkg/sessionctx/variable"
"github.com/pingcap/tidb/pkg/statistics"
handle_metrics "github.com/pingcap/tidb/pkg/statistics/handle/metrics"
statstypes "github.com/pingcap/tidb/pkg/statistics/handle/types"
"github.com/pingcap/tidb/pkg/statistics/handle/util"
"github.com/pingcap/tidb/pkg/types"
"github.com/pingcap/tidb/pkg/util/intest"
"github.com/pingcap/tidb/pkg/util/sqlexec"
)
// statsReadWriter implements the util.StatsReadWriter interface.
type statsReadWriter struct {
statsHandler statstypes.StatsHandle
}
// NewStatsReadWriter creates a new StatsReadWriter.
func NewStatsReadWriter(statsHandler statstypes.StatsHandle) statstypes.StatsReadWriter {
return &statsReadWriter{statsHandler: statsHandler}
}
// InsertColStats2KV insert a record to stats_histograms with distinct_count 1 and insert a bucket to stats_buckets with default value.
// This operation also updates version.
func (s *statsReadWriter) InsertColStats2KV(physicalID int64, colInfos []*model.ColumnInfo) (err error) {
statsVer := uint64(0)
defer func() {
if err == nil && statsVer != 0 {
s.statsHandler.RecordHistoricalStatsMeta(physicalID, statsVer, util.StatsMetaHistorySourceSchemaChange, false)
}
}()
return util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error {
startTS, err := util.GetStartTS(sctx)
if err != nil {
return errors.Trace(err)
}
// First of all, we update the version.
_, err = util.Exec(sctx, "update mysql.stats_meta set version = %? where table_id = %?", startTS, physicalID)
if err != nil {
return err
}
statsVer = startTS
// If we didn't update anything by last SQL, it means the stats of this table does not exist.
if sctx.GetSessionVars().StmtCtx.AffectedRows() > 0 {
// By this step we can get the count of this table, then we can sure the count and repeats of bucket.
var rs sqlexec.RecordSet
rs, err = util.Exec(sctx, "select count from mysql.stats_meta where table_id = %?", physicalID)
if err != nil {
return err
}
defer terror.Call(rs.Close)
req := rs.NewChunk(nil)
err = rs.Next(context.Background(), req)
if err != nil {
return err
}
count := req.GetRow(0).GetInt64(0)
for _, colInfo := range colInfos {
value := types.NewDatum(colInfo.GetOriginDefaultValue())
value, err = value.ConvertTo(sctx.GetSessionVars().StmtCtx.TypeCtx(), &colInfo.FieldType)
if err != nil {
return err
}
if value.IsNull() {
// If the adding column has default value null, all the existing rows have null value on the newly added column.
if _, err := util.Exec(sctx, "insert into mysql.stats_histograms (version, table_id, is_index, hist_id, distinct_count, null_count) values (%?, %?, 0, %?, 0, %?)", startTS, physicalID, colInfo.ID, count); err != nil {
return err
}
} else {
// If this stats exists, we insert histogram meta first, the distinct_count will always be one.
if _, err := util.Exec(sctx, "insert into mysql.stats_histograms (version, table_id, is_index, hist_id, distinct_count, tot_col_size) values (%?, %?, 0, %?, 1, %?)", startTS, physicalID, colInfo.ID, int64(len(value.GetBytes()))*count); err != nil {
return err
}
value, err = value.ConvertTo(sctx.GetSessionVars().StmtCtx.TypeCtx(), types.NewFieldType(mysql.TypeBlob))
if err != nil {
return err
}
// There must be only one bucket for this new column and the value is the default value.
if _, err := util.Exec(sctx, "insert into mysql.stats_buckets (table_id, is_index, hist_id, bucket_id, repeats, count, lower_bound, upper_bound) values (%?, 0, %?, 0, %?, %?, %?, %?)", physicalID, colInfo.ID, count, count, value.GetBytes(), value.GetBytes()); err != nil {
return err
}
}
}
}
return nil
}, util.FlagWrapTxn)
}
// InsertTableStats2KV inserts a record standing for a new table to stats_meta and inserts some records standing for the
// new columns and indices which belong to this table.
func (s *statsReadWriter) InsertTableStats2KV(info *model.TableInfo, physicalID int64) (err error) {
statsVer := uint64(0)
defer func() {
if err == nil && statsVer != 0 {
s.statsHandler.RecordHistoricalStatsMeta(physicalID, statsVer, util.StatsMetaHistorySourceSchemaChange, false)
}
}()
return util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error {
startTS, err := util.GetStartTS(sctx)
if err != nil {
return errors.Trace(err)
}
if _, err := util.Exec(sctx, "insert into mysql.stats_meta (version, table_id) values(%?, %?)", startTS, physicalID); err != nil {
return err
}
statsVer = startTS
for _, col := range info.Columns {
if _, err := util.Exec(sctx, "insert into mysql.stats_histograms (table_id, is_index, hist_id, distinct_count, version) values(%?, 0, %?, 0, %?)", physicalID, col.ID, startTS); err != nil {
return err
}
}
for _, idx := range info.Indices {
if _, err := util.Exec(sctx, "insert into mysql.stats_histograms (table_id, is_index, hist_id, distinct_count, version) values(%?, 1, %?, 0, %?)", physicalID, idx.ID, startTS); err != nil {
return err
}
}
return nil
}, util.FlagWrapTxn)
}
// ChangeGlobalStatsID changes the table ID in global-stats to the new table ID.
func (s *statsReadWriter) ChangeGlobalStatsID(from, to int64) (err error) {
return util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error {
for _, table := range []string{"stats_meta", "stats_top_n", "stats_fm_sketch", "stats_buckets", "stats_histograms", "column_stats_usage"} {
_, err = util.Exec(sctx, "update mysql."+table+" set table_id = %? where table_id = %?", to, from)
if err != nil {
return err
}
}
return nil
}, util.FlagWrapTxn)
}
// UpdateStatsMetaVersionForGC update the version of mysql.stats_meta.
// See more details in the interface definition.
func (s *statsReadWriter) UpdateStatsMetaVersionForGC(physicalID int64) (err error) {
statsVer := uint64(0)
defer func() {
if err == nil && statsVer != 0 {
s.statsHandler.RecordHistoricalStatsMeta(physicalID, statsVer, util.StatsMetaHistorySourceSchemaChange, false)
}
}()
return util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error {
startTS, err := util.GetStartTS(sctx)
if err != nil {
return errors.Trace(err)
}
if _, err := util.Exec(
sctx,
"update mysql.stats_meta set version=%? where table_id =%?",
startTS, physicalID,
); err != nil {
return err
}
statsVer = startTS
return nil
}, util.FlagWrapTxn)
}
// UpdateStatsVersion will set statistics version to the newest TS,
// then tidb-server will reload automatic.
func (s *statsReadWriter) UpdateStatsVersion() error {
return util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error {
return UpdateStatsVersion(sctx)
}, util.FlagWrapTxn)
}
// SaveTableStatsToStorage saves the stats of a table to storage.
func (s *statsReadWriter) SaveTableStatsToStorage(results *statistics.AnalyzeResults, analyzeSnapshot bool, source string) (err error) {
var statsVer uint64
err = util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error {
statsVer, err = SaveTableStatsToStorage(sctx, results, analyzeSnapshot)
return err
}, util.FlagWrapTxn)
if err == nil && statsVer != 0 {
tableID := results.TableID.GetStatisticsID()
s.statsHandler.RecordHistoricalStatsMeta(tableID, statsVer, source, true)
}
return err
}
// StatsMetaCountAndModifyCount reads count and modify_count for the given table from mysql.stats_meta.
func (s *statsReadWriter) StatsMetaCountAndModifyCount(tableID int64) (count, modifyCount int64, err error) {
err = util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error {
count, modifyCount, _, err = StatsMetaCountAndModifyCount(sctx, tableID)
return err
}, util.FlagWrapTxn)
return
}
// UpdateStatsMetaDelta updates the count and modify_count for the given table in mysql.stats_meta.
func (s *statsReadWriter) UpdateStatsMetaDelta(tableID int64, count, delta int64) (err error) {
err = util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error {
lockedTables, err := s.statsHandler.GetLockedTables(tableID)
if err != nil {
return errors.Trace(err)
}
isLocked := false
if len(lockedTables) > 0 {
isLocked = true
}
startTS, err := util.GetStartTS(sctx)
if err != nil {
return errors.Trace(err)
}
err = UpdateStatsMeta(
sctx,
startTS,
variable.TableDelta{Count: count, Delta: delta},
tableID,
isLocked,
)
return err
}, util.FlagWrapTxn)
return
}
// TableStatsFromStorage loads table stats info from storage.
func (s *statsReadWriter) TableStatsFromStorage(tableInfo *model.TableInfo, physicalID int64, loadAll bool, snapshot uint64) (statsTbl *statistics.Table, err error) {
err = util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error {
var ok bool
statsTbl, ok = s.statsHandler.Get(physicalID)
if !ok {
statsTbl = nil
}
statsTbl, err = TableStatsFromStorage(sctx, snapshot, tableInfo, physicalID, loadAll, s.statsHandler.Lease(), statsTbl)
return err
}, util.FlagWrapTxn)
return
}
// SaveStatsToStorage saves the stats to storage.
// If count is negative, both count and modify count would not be used and not be written to the table. Unless, corresponding
// fields in the stats_meta table will be updated.
// TODO: refactor to reduce the number of parameters
func (s *statsReadWriter) SaveStatsToStorage(
tableID int64,
count, modifyCount int64,
isIndex int,
hg *statistics.Histogram,
cms *statistics.CMSketch,
topN *statistics.TopN,
statsVersion int,
isAnalyzed int64,
updateAnalyzeTime bool,
source string,
) (err error) {
var statsVer uint64
err = util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error {
statsVer, err = SaveStatsToStorage(sctx, tableID,
count, modifyCount, isIndex, hg, cms, topN, statsVersion, isAnalyzed, updateAnalyzeTime)
return err
}, util.FlagWrapTxn)
if err == nil && statsVer != 0 {
s.statsHandler.RecordHistoricalStatsMeta(tableID, statsVer, source, false)
}
return
}
// saveMetaToStorage saves stats meta to the storage.
func (s *statsReadWriter) saveMetaToStorage(tableID, count, modifyCount int64, source string) (err error) {
var statsVer uint64
err = util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error {
statsVer, err = SaveMetaToStorage(sctx, tableID, count, modifyCount)
return err
}, util.FlagWrapTxn)
if err == nil && statsVer != 0 {
s.statsHandler.RecordHistoricalStatsMeta(tableID, statsVer, source, false)
}
return
}
// InsertExtendedStats inserts a record into mysql.stats_extended and update version in mysql.stats_meta.
func (s *statsReadWriter) InsertExtendedStats(statsName string, colIDs []int64, tp int, tableID int64, ifNotExists bool) (err error) {
var statsVer uint64
err = util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error {
statsVer, err = InsertExtendedStats(sctx, s.statsHandler, statsName, colIDs, tp, tableID, ifNotExists)
return err
}, util.FlagWrapTxn)
if err == nil && statsVer != 0 {
s.statsHandler.RecordHistoricalStatsMeta(tableID, statsVer, "extended stats", false)
}
return
}
// MarkExtendedStatsDeleted update the status of mysql.stats_extended to be `deleted` and the version of mysql.stats_meta.
func (s *statsReadWriter) MarkExtendedStatsDeleted(statsName string, tableID int64, ifExists bool) (err error) {
var statsVer uint64
err = util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error {
statsVer, err = MarkExtendedStatsDeleted(sctx, s.statsHandler, statsName, tableID, ifExists)
return err
}, util.FlagWrapTxn)
if err == nil && statsVer != 0 {
s.statsHandler.RecordHistoricalStatsMeta(tableID, statsVer, "extended stats", false)
}
return
}
// SaveExtendedStatsToStorage writes extended stats of a table into mysql.stats_extended.
func (s *statsReadWriter) SaveExtendedStatsToStorage(tableID int64, extStats *statistics.ExtendedStatsColl, isLoad bool) (err error) {
var statsVer uint64
err = util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error {
statsVer, err = SaveExtendedStatsToStorage(sctx, tableID, extStats, isLoad)
return err
}, util.FlagWrapTxn)
if err == nil && statsVer != 0 {
s.statsHandler.RecordHistoricalStatsMeta(tableID, statsVer, "extended stats", false)
}
return
}
func (s *statsReadWriter) LoadTablePartitionStats(tableInfo *model.TableInfo, partitionDef *model.PartitionDefinition) (*statistics.Table, error) {
var partitionStats *statistics.Table
partitionStats, err := s.TableStatsFromStorage(tableInfo, partitionDef.ID, true, 0)
if err != nil {
return nil, err
}
// if the err == nil && partitionStats == nil, it means we lack the partition-level stats which the physicalID is equal to partitionID.
if partitionStats == nil {
errMsg := fmt.Sprintf("table `%s` partition `%s`", tableInfo.Name.L, partitionDef.Name.L)
err = types.ErrPartitionStatsMissing.GenWithStackByArgs(errMsg)
return nil, err
}
return partitionStats, nil
}
// LoadNeededHistograms will load histograms for those needed columns/indices.
func (s *statsReadWriter) LoadNeededHistograms() (err error) {
err = util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error {
loadFMSketch := config.GetGlobalConfig().Performance.EnableLoadFMSketch
return LoadNeededHistograms(sctx, s.statsHandler, loadFMSketch)
}, util.FlagWrapTxn)
return err
}
// ReloadExtendedStatistics drops the cache for extended statistics and reload data from mysql.stats_extended.
func (s *statsReadWriter) ReloadExtendedStatistics() error {
return util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error {
tables := make([]*statistics.Table, 0, s.statsHandler.Len())
for _, tbl := range s.statsHandler.Values() {
t, err := ExtendedStatsFromStorage(sctx, tbl.Copy(), tbl.PhysicalID, true)
if err != nil {
return err
}
tables = append(tables, t)
}
s.statsHandler.UpdateStatsCache(tables, nil)
return nil
}, util.FlagWrapTxn)
}
// DumpStatsToJSON dumps statistic to json.
func (s *statsReadWriter) DumpStatsToJSON(dbName string, tableInfo *model.TableInfo,
historyStatsExec sqlexec.RestrictedSQLExecutor, dumpPartitionStats bool) (*util.JSONTable, error) {
var snapshot uint64
if historyStatsExec != nil {
sctx := historyStatsExec.(sessionctx.Context)
snapshot = sctx.GetSessionVars().SnapshotTS
}
return s.DumpStatsToJSONBySnapshot(dbName, tableInfo, snapshot, dumpPartitionStats)
}
// DumpHistoricalStatsBySnapshot dumped json tables from mysql.stats_meta_history and mysql.stats_history.
// As implemented in getTableHistoricalStatsToJSONWithFallback, if historical stats are nonexistent, it will fall back
// to the latest stats, and these table names (and partition names) will be returned in fallbackTbls.
func (s *statsReadWriter) DumpHistoricalStatsBySnapshot(
dbName string,
tableInfo *model.TableInfo,
snapshot uint64,
) (
jt *util.JSONTable,
fallbackTbls []string,
err error,
) {
historicalStatsEnabled, err := s.statsHandler.CheckHistoricalStatsEnable()
if err != nil {
return nil, nil, errors.Errorf("check %v failed: %v", variable.TiDBEnableHistoricalStats, err)
}
if !historicalStatsEnabled {
return nil, nil, errors.Errorf("%v should be enabled", variable.TiDBEnableHistoricalStats)
}
defer func() {
if err == nil {
handle_metrics.DumpHistoricalStatsSuccessCounter.Inc()
} else {
handle_metrics.DumpHistoricalStatsFailedCounter.Inc()
}
}()
pi := tableInfo.GetPartitionInfo()
if pi == nil {
jt, fallback, err := s.getTableHistoricalStatsToJSONWithFallback(dbName, tableInfo, tableInfo.ID, snapshot)
if fallback {
fallbackTbls = append(fallbackTbls, fmt.Sprintf("%s.%s", dbName, tableInfo.Name.O))
}
return jt, fallbackTbls, err
}
jsonTbl := &util.JSONTable{
DatabaseName: dbName,
TableName: tableInfo.Name.L,
Partitions: make(map[string]*util.JSONTable, len(pi.Definitions)),
}
for _, def := range pi.Definitions {
tbl, fallback, err := s.getTableHistoricalStatsToJSONWithFallback(dbName, tableInfo, def.ID, snapshot)
if err != nil {
return nil, nil, errors.Trace(err)
}
if fallback {
fallbackTbls = append(fallbackTbls, fmt.Sprintf("%s.%s %s", dbName, tableInfo.Name.O, def.Name.O))
}
jsonTbl.Partitions[def.Name.L] = tbl
}
tbl, fallback, err := s.getTableHistoricalStatsToJSONWithFallback(dbName, tableInfo, tableInfo.ID, snapshot)
if err != nil {
return nil, nil, err
}
if fallback {
fallbackTbls = append(fallbackTbls, fmt.Sprintf("%s.%s global", dbName, tableInfo.Name.O))
}
// dump its global-stats if existed
if tbl != nil {
jsonTbl.Partitions[util.TiDBGlobalStats] = tbl
}
return jsonTbl, fallbackTbls, nil
}
// PersistStatsBySnapshot dumps statistic to json and call the function for each partition statistic to persist.
// Notice:
// 1. It might call the function `persist` with nil jsontable.
// 2. It is only used by BR, so partitions' statistic are always dumped.
//
// TODO: once we support column-level statistic dump, it should replace the `PersistStatsBySnapshot` and `DumpStatsToJSON`.
func (s *statsReadWriter) PersistStatsBySnapshot(
ctx context.Context,
dbName string,
tableInfo *model.TableInfo,
snapshot uint64,
persist statstypes.PersistFunc,
) error {
pi := tableInfo.GetPartitionInfo()
if pi == nil {
jsonTable, err := s.TableStatsToJSON(dbName, tableInfo, tableInfo.ID, snapshot)
if err != nil {
return errors.Trace(err)
}
return persist(ctx, jsonTable, tableInfo.ID)
}
for _, def := range pi.Definitions {
tbl, err := s.TableStatsToJSON(dbName, tableInfo, def.ID, snapshot)
if err != nil {
return errors.Trace(err)
}
if tbl == nil {
continue
}
if err := persist(ctx, tbl, def.ID); err != nil {
return errors.Trace(err)
}
}
// dump its global-stats if existed
tbl, err := s.TableStatsToJSON(dbName, tableInfo, tableInfo.ID, snapshot)
if err != nil {
return errors.Trace(err)
}
if tbl != nil {
return persist(ctx, tbl, tableInfo.ID)
}
return nil
}
// DumpStatsToJSONBySnapshot dumps statistic to json.
func (s *statsReadWriter) DumpStatsToJSONBySnapshot(dbName string, tableInfo *model.TableInfo, snapshot uint64, dumpPartitionStats bool) (*util.JSONTable, error) {
pruneMode, err := util.GetCurrentPruneMode(s.statsHandler.SPool())
if err != nil {
return nil, err
}
isDynamicMode := variable.PartitionPruneMode(pruneMode) == variable.Dynamic
pi := tableInfo.GetPartitionInfo()
if pi == nil {
return s.TableStatsToJSON(dbName, tableInfo, tableInfo.ID, snapshot)
}
jsonTbl := &util.JSONTable{
DatabaseName: dbName,
TableName: tableInfo.Name.L,
Partitions: make(map[string]*util.JSONTable, len(pi.Definitions)),
}
// dump partition stats only if in static mode or enable dumpPartitionStats flag in dynamic mode
if !isDynamicMode || dumpPartitionStats {
for _, def := range pi.Definitions {
tbl, err := s.TableStatsToJSON(dbName, tableInfo, def.ID, snapshot)
if err != nil {
return nil, errors.Trace(err)
}
if tbl == nil {
continue
}
jsonTbl.Partitions[def.Name.L] = tbl
}
}
// dump its global-stats if existed
tbl, err := s.TableStatsToJSON(dbName, tableInfo, tableInfo.ID, snapshot)
if err != nil {
return nil, errors.Trace(err)
}
if tbl != nil {
jsonTbl.Partitions[util.TiDBGlobalStats] = tbl
}
return jsonTbl, nil
}
// getTableHistoricalStatsToJSONWithFallback try to get table historical stats, if not exist, directly fallback to the
// latest stats, and the second return value would be true.
func (s *statsReadWriter) getTableHistoricalStatsToJSONWithFallback(
dbName string,
tableInfo *model.TableInfo,
physicalID int64,
snapshot uint64,
) (
*util.JSONTable,
bool,
error,
) {
jt, exist, err := s.tableHistoricalStatsToJSON(physicalID, snapshot)
if err != nil {
return nil, false, err
}
if !exist {
jt, err = s.TableStatsToJSON(dbName, tableInfo, physicalID, 0)
fallback := true
if snapshot == 0 {
fallback = false
}
return jt, fallback, err
}
return jt, false, nil
}
func (s *statsReadWriter) tableHistoricalStatsToJSON(physicalID int64, snapshot uint64) (jt *util.JSONTable, exist bool, err error) {
err = util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error {
jt, exist, err = TableHistoricalStatsToJSON(sctx, physicalID, snapshot)
return err
}, util.FlagWrapTxn)
return
}
// TableStatsToJSON dumps statistic to json.
func (s *statsReadWriter) TableStatsToJSON(dbName string, tableInfo *model.TableInfo, physicalID int64, snapshot uint64) (*util.JSONTable, error) {
tbl, err := s.TableStatsFromStorage(tableInfo, physicalID, true, snapshot)
if err != nil || tbl == nil {
return nil, err
}
var jsonTbl *util.JSONTable
err = util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error {
tbl.Version, tbl.ModifyCount, tbl.RealtimeCount, err = StatsMetaByTableIDFromStorage(sctx, physicalID, snapshot)
if err != nil {
return err
}
jsonTbl, err = GenJSONTableFromStats(sctx, dbName, tableInfo, tbl)
return err
})
if err != nil {
return nil, err
}
return jsonTbl, nil
}
// TestLoadStatsErr is only for test.
type TestLoadStatsErr struct{}
// LoadStatsFromJSONConcurrently consumes concurrently the statistic task from `taskCh`.
func (s *statsReadWriter) LoadStatsFromJSONConcurrently(
ctx context.Context,
tableInfo *model.TableInfo,
taskCh chan *statstypes.PartitionStatisticLoadTask,
concurrencyForPartition int,
) error {
nCPU := runtime.GOMAXPROCS(0)
if concurrencyForPartition == 0 {
concurrencyForPartition = (nCPU + 1) / 2 // default
}
concurrencyForPartition = min(concurrencyForPartition, nCPU) // for safety
var wg sync.WaitGroup
e := new(atomic.Pointer[error])
for i := 0; i < concurrencyForPartition; i++ {
wg.Add(1)
s.statsHandler.GPool().Go(func() {
defer func() {
if r := recover(); r != nil {
err := fmt.Errorf("%v", r)
e.CompareAndSwap(nil, &err)
}
wg.Done()
}()
for tbl := range taskCh {
if tbl == nil {
continue
}
loadFunc := s.loadStatsFromJSON
if intest.InTest && ctx.Value(TestLoadStatsErr{}) != nil {
loadFunc = ctx.Value(TestLoadStatsErr{}).(func(*model.TableInfo, int64, *util.JSONTable) error)
}
err := loadFunc(tableInfo, tbl.PhysicalID, tbl.JSONTable)
if err != nil {
e.CompareAndSwap(nil, &err)
return
}
if e.Load() != nil {
return
}
}
})
}
wg.Wait()
if e.Load() != nil {
return *e.Load()
}
return nil
}
// LoadStatsFromJSONNoUpdate will load statistic from JSONTable, and save it to the storage.
func (s *statsReadWriter) LoadStatsFromJSONNoUpdate(ctx context.Context, is infoschema.InfoSchema,
jsonTbl *util.JSONTable, concurrencyForPartition int) error {
table, err := is.TableByName(model.NewCIStr(jsonTbl.DatabaseName), model.NewCIStr(jsonTbl.TableName))
if err != nil {
return errors.Trace(err)
}
tableInfo := table.Meta()
pi := tableInfo.GetPartitionInfo()
if pi == nil || jsonTbl.Partitions == nil {
err := s.loadStatsFromJSON(tableInfo, tableInfo.ID, jsonTbl)
if err != nil {
return errors.Trace(err)
}
} else {
// load partition statistics concurrently
taskCh := make(chan *statstypes.PartitionStatisticLoadTask, len(pi.Definitions)+1)
for _, def := range pi.Definitions {
tbl := jsonTbl.Partitions[def.Name.L]
if tbl != nil {
taskCh <- &statstypes.PartitionStatisticLoadTask{
PhysicalID: def.ID,
JSONTable: tbl,
}
}
}
// load global-stats if existed
if globalStats, ok := jsonTbl.Partitions[util.TiDBGlobalStats]; ok {
taskCh <- &statstypes.PartitionStatisticLoadTask{
PhysicalID: tableInfo.ID,
JSONTable: globalStats,
}
}
close(taskCh)
if err := s.LoadStatsFromJSONConcurrently(ctx, tableInfo, taskCh, concurrencyForPartition); err != nil {
return errors.Trace(err)
}
}
return nil
}
// LoadStatsFromJSON will load statistic from JSONTable, and save it to the storage.
// In final, it will also udpate the stats cache.
func (s *statsReadWriter) LoadStatsFromJSON(ctx context.Context, is infoschema.InfoSchema,
jsonTbl *util.JSONTable, concurrencyForPartition int) error {
if err := s.LoadStatsFromJSONNoUpdate(ctx, is, jsonTbl, concurrencyForPartition); err != nil {
return errors.Trace(err)
}
return errors.Trace(s.statsHandler.Update(is))
}
func (s *statsReadWriter) loadStatsFromJSON(tableInfo *model.TableInfo, physicalID int64, jsonTbl *util.JSONTable) error {
tbl, err := TableStatsFromJSON(tableInfo, physicalID, jsonTbl)
if err != nil {
return errors.Trace(err)
}
for _, col := range tbl.Columns {
// loadStatsFromJSON doesn't support partition table now.
// The table level count and modify_count would be overridden by the SaveMetaToStorage below, so we don't need
// to care about them here.
err = s.SaveStatsToStorage(tbl.PhysicalID, tbl.RealtimeCount, 0, 0, &col.Histogram, col.CMSketch, col.TopN, int(col.GetStatsVer()), statistics.AnalyzeFlag, false, util.StatsMetaHistorySourceLoadStats)
if err != nil {
return errors.Trace(err)
}
}
for _, idx := range tbl.Indices {
// loadStatsFromJSON doesn't support partition table now.
// The table level count and modify_count would be overridden by the SaveMetaToStorage below, so we don't need
// to care about them here.
err = s.SaveStatsToStorage(tbl.PhysicalID, tbl.RealtimeCount, 0, 1, &idx.Histogram, idx.CMSketch, idx.TopN, int(idx.GetStatsVer()), statistics.AnalyzeFlag, false, util.StatsMetaHistorySourceLoadStats)
if err != nil {
return errors.Trace(err)
}
}
err = s.SaveExtendedStatsToStorage(tbl.PhysicalID, tbl.ExtendedStats, true)
if err != nil {
return errors.Trace(err)
}
return s.saveMetaToStorage(tbl.PhysicalID, tbl.RealtimeCount, tbl.ModifyCount, util.StatsMetaHistorySourceLoadStats)
}
| pkg/statistics/handle/storage/stats_read_writer.go | 0 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.0015369743341580033,
0.00022752265795134008,
0.00016196254000533372,
0.00017117868992500007,
0.00023982071434147656
] |
{
"id": 13,
"code_window": [
"\t}\n",
"\tdebug.SetMemoryLimit(memoryLimit)\n",
"}\n",
"\n",
"func (*memoryLimitTuner) calcMemoryLimit(percentage float64) int64 {\n",
"\tmemoryLimit := int64(float64(memory.ServerMemoryLimit.Load()) * percentage) // `tidb_server_memory_limit` * `tidb_server_memory_limit_gc_trigger`\n",
"\tif memoryLimit == 0 {\n",
"\t\tmemoryLimit = math.MaxInt64\n",
"\t}\n",
"\treturn memoryLimit\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (t *memoryLimitTuner) calcMemoryLimit(percentage float64) int64 {\n",
"\tif t.adjustDisabled.Load() > 0 {\n",
"\t\treturn initGOMemoryLimitValue\n",
"\t}\n"
],
"file_path": "pkg/util/gctuner/memory_limit_tuner.go",
"type": "replace",
"edit_start_line_idx": 157
} | // Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gctuner
import (
"math"
"runtime/debug"
"time"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/pkg/util"
"github.com/pingcap/tidb/pkg/util/intest"
"github.com/pingcap/tidb/pkg/util/memory"
atomicutil "go.uber.org/atomic"
)
// GlobalMemoryLimitTuner only allow one memory limit tuner in one process
var GlobalMemoryLimitTuner = &memoryLimitTuner{}
// Go runtime trigger GC when hit memory limit which managed via runtime/debug.SetMemoryLimit.
// So we can change memory limit dynamically to avoid frequent GC when memory usage is greater than the limit.
type memoryLimitTuner struct {
finalizer *finalizer
isValidValueSet atomicutil.Bool
percentage atomicutil.Float64
adjustPercentageInProgress atomicutil.Bool
serverMemLimitBeforeAdjust atomicutil.Uint64
percentageBeforeAdjust atomicutil.Float64
nextGCTriggeredByMemoryLimit atomicutil.Bool
}
// fallbackPercentage indicates the fallback memory limit percentage when turning.
const fallbackPercentage float64 = 1.1
var memoryGoroutineCntInTest = *atomicutil.NewInt64(0)
// WaitMemoryLimitTunerExitInTest is used to wait memory limit tuner exit in test.
func WaitMemoryLimitTunerExitInTest() {
if intest.InTest {
for memoryGoroutineCntInTest.Load() > 0 {
time.Sleep(100 * time.Millisecond)
}
}
}
// tuning check the memory nextGC and judge whether this GC is trigger by memory limit.
// Go runtime ensure that it will be called serially.
func (t *memoryLimitTuner) tuning() {
if !t.isValidValueSet.Load() {
return
}
r := memory.ForceReadMemStats()
gogc := util.GetGOGC()
ratio := float64(100+gogc) / 100
// This `if` checks whether the **last** GC was triggered by MemoryLimit as far as possible.
// If the **last** GC was triggered by MemoryLimit, we'll set MemoryLimit to MAXVALUE to return control back to GOGC
// to avoid frequent GC when memory usage fluctuates above and below MemoryLimit.
// The logic we judge whether the **last** GC was triggered by MemoryLimit is as follows:
// suppose `NextGC` = `HeapInUse * (100 + GOGC) / 100)`,
// - If NextGC < MemoryLimit, the **next** GC will **not** be triggered by MemoryLimit thus we do not care about
// why the **last** GC is triggered. And MemoryLimit will not be reset this time.
// - Only if NextGC >= MemoryLimit , the **next** GC will be triggered by MemoryLimit. Thus, we need to reset
// MemoryLimit after the **next** GC happens if needed.
if float64(r.HeapInuse)*ratio > float64(debug.SetMemoryLimit(-1)) {
if t.nextGCTriggeredByMemoryLimit.Load() && t.adjustPercentageInProgress.CompareAndSwap(false, true) {
// It's ok to update `adjustPercentageInProgress`, `serverMemLimitBeforeAdjust` and `percentageBeforeAdjust` not in a transaction.
// The update of memory limit is eventually consistent.
t.serverMemLimitBeforeAdjust.Store(memory.ServerMemoryLimit.Load())
t.percentageBeforeAdjust.Store(t.GetPercentage())
go func() {
if intest.InTest {
memoryGoroutineCntInTest.Inc()
defer memoryGoroutineCntInTest.Dec()
}
memory.MemoryLimitGCLast.Store(time.Now())
memory.MemoryLimitGCTotal.Add(1)
debug.SetMemoryLimit(t.calcMemoryLimit(fallbackPercentage))
resetInterval := 1 * time.Minute // Wait 1 minute and set back, to avoid frequent GC
if intest.InTest {
resetInterval = 3 * time.Second
}
failpoint.Inject("mockUpdateGlobalVarDuringAdjustPercentage", func(val failpoint.Value) {
if val, ok := val.(bool); val && ok {
time.Sleep(300 * time.Millisecond)
t.UpdateMemoryLimit()
}
})
failpoint.Inject("testMemoryLimitTuner", func(val failpoint.Value) {
if val, ok := val.(bool); val && ok {
resetInterval = 1 * time.Second
}
})
time.Sleep(resetInterval)
debug.SetMemoryLimit(t.calcMemoryLimit(t.GetPercentage()))
for !t.adjustPercentageInProgress.CompareAndSwap(true, false) {
continue
}
}()
memory.TriggerMemoryLimitGC.Store(true)
}
t.nextGCTriggeredByMemoryLimit.Store(true)
} else {
t.nextGCTriggeredByMemoryLimit.Store(false)
memory.TriggerMemoryLimitGC.Store(false)
}
}
// Start starts the memory limit tuner.
func (t *memoryLimitTuner) Start() {
t.finalizer = newFinalizer(t.tuning) // Start tuning
}
// Stop stops the memory limit tuner.
func (t *memoryLimitTuner) Stop() {
t.finalizer.stop()
}
// SetPercentage set the percentage for memory limit tuner.
func (t *memoryLimitTuner) SetPercentage(percentage float64) {
t.percentage.Store(percentage)
}
// GetPercentage get the percentage from memory limit tuner.
func (t *memoryLimitTuner) GetPercentage() float64 {
return t.percentage.Load()
}
// UpdateMemoryLimit updates the memory limit.
// This function should be called when `tidb_server_memory_limit` or `tidb_server_memory_limit_gc_trigger` is modified.
func (t *memoryLimitTuner) UpdateMemoryLimit() {
if t.adjustPercentageInProgress.Load() {
if t.serverMemLimitBeforeAdjust.Load() == memory.ServerMemoryLimit.Load() && t.percentageBeforeAdjust.Load() == t.GetPercentage() {
return
}
}
var memoryLimit = t.calcMemoryLimit(t.GetPercentage())
if memoryLimit == math.MaxInt64 {
t.isValidValueSet.Store(false)
memoryLimit = initGOMemoryLimitValue
} else {
t.isValidValueSet.Store(true)
}
debug.SetMemoryLimit(memoryLimit)
}
func (*memoryLimitTuner) calcMemoryLimit(percentage float64) int64 {
memoryLimit := int64(float64(memory.ServerMemoryLimit.Load()) * percentage) // `tidb_server_memory_limit` * `tidb_server_memory_limit_gc_trigger`
if memoryLimit == 0 {
memoryLimit = math.MaxInt64
}
return memoryLimit
}
var initGOMemoryLimitValue int64
func init() {
initGOMemoryLimitValue = debug.SetMemoryLimit(-1)
GlobalMemoryLimitTuner.Start()
}
| pkg/util/gctuner/memory_limit_tuner.go | 1 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.9991669654846191,
0.34078362584114075,
0.00016896083252504468,
0.029649930074810982,
0.46243053674697876
] |
{
"id": 13,
"code_window": [
"\t}\n",
"\tdebug.SetMemoryLimit(memoryLimit)\n",
"}\n",
"\n",
"func (*memoryLimitTuner) calcMemoryLimit(percentage float64) int64 {\n",
"\tmemoryLimit := int64(float64(memory.ServerMemoryLimit.Load()) * percentage) // `tidb_server_memory_limit` * `tidb_server_memory_limit_gc_trigger`\n",
"\tif memoryLimit == 0 {\n",
"\t\tmemoryLimit = math.MaxInt64\n",
"\t}\n",
"\treturn memoryLimit\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (t *memoryLimitTuner) calcMemoryLimit(percentage float64) int64 {\n",
"\tif t.adjustDisabled.Load() > 0 {\n",
"\t\treturn initGOMemoryLimitValue\n",
"\t}\n"
],
"file_path": "pkg/util/gctuner/memory_limit_tuner.go",
"type": "replace",
"edit_start_line_idx": 157
} | // Copyright 2022 PingCAP, Inc. Licensed under Apache-2.0.
package spans_test
import (
"fmt"
"testing"
"github.com/pingcap/tidb/br/pkg/streamhelper/spans"
"github.com/stretchr/testify/require"
)
func TestValuedEquals(t *testing.T) {
s := func(start, end string, val spans.Value) spans.Valued {
return spans.Valued{
Key: spans.Span{
StartKey: []byte(start),
EndKey: []byte(end),
},
Value: val,
}
}
type Case struct {
inputA []spans.Valued
inputB []spans.Valued
required bool
}
cases := []Case{
{
inputA: []spans.Valued{s("0001", "0002", 3)},
inputB: []spans.Valued{s("0001", "0003", 3)},
required: false,
},
{
inputA: []spans.Valued{s("0001", "0002", 3)},
inputB: []spans.Valued{s("0001", "0002", 3)},
required: true,
},
{
inputA: []spans.Valued{s("0001", "0003", 3)},
inputB: []spans.Valued{s("0001", "0002", 3), s("0002", "0003", 3)},
required: true,
},
{
inputA: []spans.Valued{s("0001", "0003", 4)},
inputB: []spans.Valued{s("0001", "0002", 3), s("0002", "0003", 3)},
required: false,
},
{
inputA: []spans.Valued{s("0001", "0003", 3)},
inputB: []spans.Valued{s("0001", "0002", 4), s("0002", "0003", 3)},
required: false,
},
{
inputA: []spans.Valued{s("0001", "0003", 3)},
inputB: []spans.Valued{s("0001", "0002", 3), s("0002", "0004", 3)},
required: false,
},
{
inputA: []spans.Valued{s("", "0003", 3)},
inputB: []spans.Valued{s("0001", "0002", 3), s("0002", "0003", 3)},
required: false,
},
{
inputA: []spans.Valued{s("0001", "", 1)},
inputB: []spans.Valued{s("0001", "0003", 1), s("0004", "", 1)},
required: false,
},
{
inputA: []spans.Valued{s("0001", "0004", 1), s("0001", "0002", 1)},
inputB: []spans.Valued{s("0001", "0002", 1), s("0001", "0004", 1)},
required: true,
},
}
run := func(t *testing.T, c Case) {
require.Equal(t, c.required, spans.ValuedSetEquals(c.inputA, c.inputB))
require.Equal(t, c.required, spans.ValuedSetEquals(c.inputB, c.inputA))
}
for i, c := range cases {
t.Run(fmt.Sprintf("#%d", i+1), func(t *testing.T) { run(t, c) })
}
}
| br/pkg/streamhelper/spans/utils_test.go | 0 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.00017510075122117996,
0.0001718689891276881,
0.00016547690029256046,
0.0001731943484628573,
0.000003101985612374847
] |
{
"id": 13,
"code_window": [
"\t}\n",
"\tdebug.SetMemoryLimit(memoryLimit)\n",
"}\n",
"\n",
"func (*memoryLimitTuner) calcMemoryLimit(percentage float64) int64 {\n",
"\tmemoryLimit := int64(float64(memory.ServerMemoryLimit.Load()) * percentage) // `tidb_server_memory_limit` * `tidb_server_memory_limit_gc_trigger`\n",
"\tif memoryLimit == 0 {\n",
"\t\tmemoryLimit = math.MaxInt64\n",
"\t}\n",
"\treturn memoryLimit\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (t *memoryLimitTuner) calcMemoryLimit(percentage float64) int64 {\n",
"\tif t.adjustDisabled.Load() > 0 {\n",
"\t\treturn initGOMemoryLimitValue\n",
"\t}\n"
],
"file_path": "pkg/util/gctuner/memory_limit_tuner.go",
"type": "replace",
"edit_start_line_idx": 157
} | # TestReorganizeRangePartition
drop table if exists t;
create table t (a int unsigned PRIMARY KEY, b varchar(255), c int, key (b), key (c,b)) partition by range (a) (partition p0 values less than (10), partition p1 values less than (20), partition pMax values less than (MAXVALUE));
insert into t values (1,"1",1), (12,"12",21),(23,"23",32),(34,"34",43),(45,"45",54),(56,"56",65);
--sorted_result
select * from t where c < 40;
alter table t reorganize partition pMax into (partition p2 values less than (30), partition pMax values less than (MAXVALUE));
admin check table t;
show create table t;
--sorted_result
select * from t;
--sorted_result
select * from t partition (p0);
--sorted_result
select * from t partition (p1);
--sorted_result
select * from t partition (p2);
--sorted_result
select * from t partition (pMax);
--sorted_result
select * from t where b > "1";
--sorted_result
select * from t where c < 40;
alter table t reorganize partition p2,pMax into (partition p2 values less than (35),partition p3 values less than (47), partition pMax values less than (MAXVALUE));
admin check table t;
--sorted_result
select * from t;
show create table t;
--sorted_result
select * from t partition (p0);
--sorted_result
select * from t partition (p1);
--sorted_result
select * from t partition (p2);
--sorted_result
select * from t partition (p3);
--sorted_result
select * from t partition (pMax);
alter table t reorganize partition p0,p1 into (partition p1 values less than (20));
admin check table t;
show create table t;
--sorted_result
select * from t;
alter table t drop index b;
alter table t drop index c;
admin check table t;
show create table t;
create table t2 (a int unsigned not null, b varchar(255), c int, key (b), key (c,b)) partition by range (a) (PARTITION `p1` VALUES LESS THAN (20),
PARTITION `p2` VALUES LESS THAN (35),
PARTITION `p3` VALUES LESS THAN (47),
PARTITION `pMax` VALUES LESS THAN (MAXVALUE));
insert into t2 select * from t;
-- error 1493
alter table t2 reorganize partition p2 into (partition p2a values less than (20), partition p2b values less than (36));
-- error 1493
alter table t2 reorganize partition p2 into (partition p2a values less than (30), partition p2b values less than (36));
-- error 1493
alter table t2 reorganize partition p2 into (partition p2a values less than (30), partition p2b values less than (34));
-- error 1526
alter table t2 reorganize partition pMax into (partition p2b values less than (50));
show create table t2;
alter table t2 reorganize partition pMax into (partition p4 values less than (90));
admin check table t2;
show create table t2;
drop table t;
create table t (a int PRIMARY KEY, b varchar(255), c int, key (b), key (c,b)) partition by range (abs(a)) (partition p0 values less than (10), partition p1 values less than (20), partition pMax values less than (MAXVALUE));
insert into t values (0,"0",0),(1,"1",1),(2,"2",-2),(-12,"12",21),(23,"23",32),(-34,"34",43),(45,"45",54),(56,"56",65);
alter table t reorganize partition pMax into (partition p2 values less than (30), partition pMax values less than (MAXVALUE));
admin check table t;
show create table t;
--sorted_result
select * from t partition (p2);
--sorted_result
select * from t partition (pMax);
alter table t drop index b;
alter table t reorganize partition p0,p1,p2,pMax into (partition pAll values less than (maxvalue));
admin check table t;
show create table t;
--sorted_result
select * from t partition (pAll);
# TestReorganizeRangeColumnsPartition
drop table if exists t;
CREATE TABLE t (
a INT,
b CHAR(3),
c INT,
KEY b(b),
KEY c(c,b)
)
PARTITION BY RANGE COLUMNS(a,b) (
PARTITION p0 VALUES LESS THAN (5,'ggg'),
PARTITION p1 VALUES LESS THAN (10,'mmm'),
PARTITION p2 VALUES LESS THAN (15,'sss'),
PARTITION pMax VALUES LESS THAN (MAXVALUE,MAXVALUE)
);
INSERT INTO t VALUES (1,'abc',1), (3,'ggg',3),(5,'ggg',5), (9,'ggg',9),(10,'mmm',10),(19,'xxx',19);
--sorted_result
SELECT * FROM t PARTITION(p0);
ALTER TABLE t DROP INDEX c;
ALTER TABLE t REORGANIZE PARTITION p0 into (PARTITION p00 VALUES LESS THAN (2,'ggg'), PARTITION p01 VALUES LESS THAN (5,'ggg'));
ADMIN CHECK TABLE t;
SHOW CREATE TABLE t;
--sorted_result
SELECT * FROM t PARTITION(p00);
--sorted_result
SELECT * FROM t PARTITION(p01);
DROP TABLE t;
CREATE TABLE t (
a INT,
b CHAR(3),
c INT,
KEY b(b),
KEY c(c,b)
)
PARTITION BY RANGE COLUMNS(b,a) (
PARTITION p0 VALUES LESS THAN ('ggg',5),
PARTITION p1 VALUES LESS THAN ('mmm',10),
PARTITION p2 VALUES LESS THAN ('sss',15),
PARTITION pMax VALUES LESS THAN (MAXVALUE,MAXVALUE)
);
INSERT INTO t VALUES (1,'abc',1), (3,'ccc',3),(5,'ggg',5), (9,'ggg',9),(10,'mmm',10),(19,'xxx',19);
--sorted_result
SELECT * FROM t PARTITION(p0);
ALTER TABLE t DROP INDEX b;
ALTER TABLE t REORGANIZE PARTITION p0 into (PARTITION p00 VALUES LESS THAN ('ccc',2), PARTITION p01 VALUES LESS THAN ('ggg',5));
ADMIN CHECK TABLE t;
SHOW CREATE TABLE t;
--sorted_result
SELECT * FROM t PARTITION(p00);
--sorted_result
SELECT * FROM t PARTITION(p01);
ALTER TABLE t REORGANIZE PARTITION p00,p01,p1 into (PARTITION p1 VALUES LESS THAN ('mmm',10));
ADMIN CHECK TABLE t;
SHOW CREATE TABLE t;
--sorted_result
SELECT * FROM t PARTITION(p1);
DROP TABLE t;
CREATE TABLE t (
a DATE,
b DATETIME,
c INT,
KEY b(b),
KEY c(c,b)
)
PARTITION BY RANGE COLUMNS(a,b) (
PARTITION p0 VALUES LESS THAN ('2020-05-05','2020-05-05 10:10:10'),
PARTITION p1 VALUES LESS THAN ('2021-05-05','2021-05-05 10:10:10'),
PARTITION p2 VALUES LESS THAN ('2022-05-05','2022-05-05 10:10:10'),
PARTITION pMax VALUES LESS THAN (MAXVALUE,MAXVALUE)
);
INSERT INTO t VALUES('2020-04-10', '2020-04-10 10:10:10', 1), ('2020-05-04', '2020-05-04 10:10:10', 2),('2020-05-05', '2020-05-05 10:10:10', 3), ('2021-05-04', '2021-05-04 10:10:10', 4),('2022-05-05', '2022-05-05 10:10:10', 5), ('2023-05-05', '2023-05-05 10:10:10', 6);
ALTER TABLE t REORGANIZE PARTITION p0 into (PARTITION p00 VALUES LESS THAN ('2020-04-10', '2020-04-10 10:10:10'), PARTITION p01 VALUES LESS THAN ('2020-05-05', '2020-05-05 10:10:10'));
ADMIN CHECK TABLE t;
SHOW CREATE TABLE t;
--sorted_result
SELECT * FROM t PARTITION(p00);
--sorted_result
SELECT * FROM t PARTITION(p01);
# TODO(bb7133): different err message with MySQL
-- error 1493
ALTER TABLE t REORGANIZE PARTITION p00,p01,p1,p2 into (PARTITION p0 VALUES LESS THAN ('2022-05-05', '2022-05-05 10:10:11'));
ALTER TABLE t DROP INDEX c;
ALTER TABLE t REORGANIZE PARTITION p00,p01,p1,p2 into (PARTITION p0 VALUES LESS THAN ('2022-05-05', '2022-05-05 10:10:10'));
ADMIN CHECK TABLE t;
SHOW CREATE TABLE t;
ADMIN CHECK TABLE t;
--sorted_result
SELECT * FROM t PARTITION(p0);
--sorted_result
SELECT * FROM t PARTITION(pMax);
DROP TABLE t;
CREATE TABLE t (
a DATE,
b DATETIME,
c INT,
KEY b(b),
KEY c(c,b)
)
PARTITION BY RANGE COLUMNS(b,a) (
PARTITION p0 VALUES LESS THAN ('2020-05-05 10:10:10','2020-05-05'),
PARTITION p1 VALUES LESS THAN ('2021-05-05 10:10:10','2021-05-05'),
PARTITION p2 VALUES LESS THAN ('2022-05-05 10:10:10','2022-05-05'),
PARTITION pMax VALUES LESS THAN (MAXVALUE,MAXVALUE)
);
INSERT INTO t VALUES('2020-04-10', '2020-04-10 10:10:10', 1), ('2020-05-04', '2020-05-04 10:10:10', 2),('2020-05-05', '2020-05-05 10:10:10', 3), ('2021-05-04', '2021-05-04 10:10:10', 4),('2022-05-05', '2022-05-05 10:10:10', 5), ('2023-05-05', '2023-05-05 10:10:10', 6);
ALTER TABLE t REORGANIZE PARTITION p0 into (PARTITION p00 VALUES LESS THAN ('2020-04-10 10:10:10', '2020-04-10'), PARTITION p01 VALUES LESS THAN ('2020-05-05 10:10:10', '2020-05-05'));
ADMIN CHECK TABLE t;
SHOW CREATE TABLE t;
--sorted_result
SELECT * FROM t PARTITION(p00);
--sorted_result
SELECT * FROM t PARTITION(p01);
ALTER TABLE t DROP INDEX b;
# TODO(bb7133): different err message with MySQL
-- error 1493
ALTER TABLE t REORGANIZE PARTITION p00,p01,p1,p2 into (PARTITION p0 VALUES LESS THAN ('2022-05-05 10:10:11', '2022-05-05'));
ALTER TABLE t REORGANIZE PARTITION p00,p01,p1,p2 into (PARTITION p0 VALUES LESS THAN ('2022-05-05 10:10:10', '2022-05-05'));
ADMIN CHECK TABLE t;
SHOW CREATE TABLE t;
ADMIN CHECK TABLE t;
--sorted_result
SELECT * FROM t PARTITION(p0);
--sorted_result
SELECT * FROM t PARTITION(pMax);
# TestReorganizeListPartition
drop table if exists t;
create table t (a int, b varchar(55), c int) partition by list (a) (partition p1 values in (12,23,51,14), partition p2 values in (24,63), partition p3 values in (45));
insert into t values (12,"12",21), (24,"24",42),(51,"51",15),(23,"23",32),(63,"63",36),(45,"45",54);
alter table t reorganize partition p1 into (partition p0 values in (12,51,13), partition p1 values in (23));
admin check table t;
show create table t;
alter table t add primary key (a), add key (b), add key (c,b);
# Note: MySQL cannot reorganize two non-consecutive list partitions :)
# ERROR 1519 (HY000): When reorganizing a set of partitions they must be in consecutive order
# https://bugs.mysql.com/bug.php?id=106011
# https://bugs.mysql.com/bug.php?id=109939
alter table t reorganize partition p1, p3 into (partition pa values in (45,23,15));
admin check table t;
show create table t;
-- error 8200
alter table t modify a varchar(20);
drop table t;
create table t (a int, b varchar(55), c int) partition by list (abs(a))
(partition p0 values in (-1,0,1),
partition p1 values in (12,23,51,14),
partition p2 values in (24,63),
partition p3 values in (45));
insert into t values
(-1,"-1",11),(1,"1",11),(0,"0",0),(-12,"-12",21),
(-24,"-24",42),(51,"-51",15),(23,"23",32),(63,"63",36),(45,"45",54);
alter table t reorganize partition p0, p1 into (partition p0 values in (0,1,2,12,51,13), partition p1 values in (23));
admin check table t;
--sorted_result
select * from t partition (p0);
show create table t;
alter table t add primary key (a), add key (b), add key (c,b);
alter table t reorganize partition p0,p1,p2,p3 into (partition paa values in (0,1,2,12,13,23,24,45,51,63,64));
admin check table t;
--sorted_result
select * from t partition (paa);
show create table t;
# TestReorgPartitionData
drop table if exists t;
create table t (a int PRIMARY KEY AUTO_INCREMENT, b varchar(255), c int, d datetime, key (b), key (c,b)) partition by range (a) (partition p1 values less than (0), partition p1M values less than (1000000));
-- error 1292
insert into t values (0, "Zero value!", 0, '2022-02-30');
SET @@session.sql_mode = 'ALLOW_INVALID_DATES,NO_AUTO_VALUE_ON_ZERO';
insert into t values (0, "Zero value!", 0, '2022-02-30');
show warnings;
--sorted_result
select * from t;
SET @@session.sql_mode = default;
alter table t reorganize partition p1M into (partition p0 values less than (1), partition p2M values less than (2000000));
--sorted_result
select * from t;
admin check table t;
# TestReorganizeListColumnsPartition
drop table if exists t;
CREATE TABLE t (
a INT,
b CHAR(3),
c INT,
KEY b(b),
KEY c(c,b)
)
PARTITION BY LIST COLUMNS(a,b) (
PARTITION p0 VALUES IN ((1,'aaa'),(2,'bbb'),(3,'ccc')),
PARTITION p1 VALUES IN ((4,'ddd'),(5,'eee'),(6,'fff')),
PARTITION p2 VALUES IN ((16,'lll'),(17,'mmm'),(18,'lll'))
);
INSERT INTO t VALUES (1,'aaa',1), (3,'ccc',3),(5,'eee',5), (16,'lll',16);
--sorted_result
SELECT * FROM t PARTITION(p0);
# TODO(bb7133) MySQL 8 does not report an error if there's any row does not fit the new partitions, instead the row will be removed.
-- error 1526
ALTER TABLE t REORGANIZE PARTITION p0 into (PARTITION p00 VALUES IN ((0,'uuu'),(1,'aaa')), PARTITION p01 VALUES IN ((2,'bbb')));
ALTER TABLE t REORGANIZE PARTITION p0 into (PARTITION p00 VALUES IN ((0,'uuu'),(1,'aaa')), PARTITION p01 VALUES IN ((2,'bbb'),(3,'ccc')));
ADMIN CHECK TABLE t;
SHOW CREATE TABLE t;
--sorted_result
SELECT * FROM t PARTITION(p00);
--sorted_result
SELECT * FROM t PARTITION(p01);
ALTER TABLE t DROP INDEX b;
ALTER TABLE t REORGANIZE PARTITION p00,p01,p1,p2 into (PARTITION pAll VALUES IN
((0,'uuu'),(1,'aaa'),(2,'bbb'),(3,'ccc'),(4,'ddd'),(5,'eee'),(6,'fff'),(16,'lll'),(17,'mmm'),(18,'lll')));
ADMIN CHECK TABLE t;
--sorted_result
SELECT * FROM t PARTITION(pAll);
--sorted_result
SELECT * FROM t;
DROP TABLE t;
CREATE TABLE t (
a INT,
b CHAR(3),
c INT,
KEY b(b),
KEY c(c,b)
)
PARTITION BY LIST COLUMNS(b,a) (
PARTITION p0 VALUES IN (('aaa',1),('bbb',2),('ccc',3)),
PARTITION p1 VALUES IN (('ddd',4),('eee',5),('fff',6)),
PARTITION p2 VALUES IN (('lll',16),('mmm',17),('lll',18))
);
INSERT INTO t VALUES (1,'aaa',1), (3,'ccc',3),(5,'eee',5), (16,'lll',16);
--sorted_result
SELECT * FROM t PARTITION(p0);
ALTER TABLE t REORGANIZE PARTITION p0 into (PARTITION p00 VALUES IN (('uuu',-1),('aaa',1)), PARTITION p01 VALUES IN (('bbb',2),('ccc',3),('ccc',4)));
ADMIN CHECK TABLE t;
SHOW CREATE TABLE t;
--sorted_result
SELECT * FROM t PARTITION(p00);
--sorted_result
SELECT * FROM t PARTITION(p01);
ALTER TABLE t DROP INDEX c;
ALTER TABLE t REORGANIZE PARTITION p00,p01,p1,p2 into (PARTITION pAll VALUES IN
(('uuu',-1),('aaa',1),('bbb',2),('ccc',3),('ccc',4),('ddd',4),('eee',5),('fff',6),('lll',16),('mmm',17),('lll',18)));
ADMIN CHECK TABLE t;
--sorted_result
SELECT * FROM t PARTITION(pAll);
--sorted_result
SELECT * FROM t;
DROP TABLE t;
CREATE TABLE t (
a DATE,
b DATETIME,
c INT,
KEY b(b),
KEY c(c,b)
)
PARTITION BY LIST COLUMNS(a,b) (
PARTITION p0 VALUES IN (('2020-04-10','2020-04-10 10:10:10'),('2020-05-04','2020-05-04 10:10:10')),
PARTITION p1 VALUES IN (('2021-05-04','2021-05-04 10:10:10'),('2021-05-05','2021-05-05 10:10:10')),
PARTITION p2 VALUES IN (('2022-05-04','2022-05-04 10:10:10'),('2022-05-05','2022-05-06 11:11:11'))
);
INSERT INTO t VALUES('2020-04-10', '2020-04-10 10:10:10', 1), ('2020-05-04', '2020-05-04 10:10:10', 2),('2020-05-04', '2020-05-04 10:10:10', 3), ('2021-05-04', '2021-05-04 10:10:10', 4),('2022-05-04', '2022-05-04 10:10:10', 5), ('2022-05-05', '2022-05-06 11:11:11', 6);
ALTER TABLE t REORGANIZE PARTITION p0 into (PARTITION p00 VALUES IN (('2020-04-10', '2020-04-10 10:10:10')), PARTITION p01 VALUES IN (('2020-05-04', '2020-05-04 10:10:10')));
ADMIN CHECK TABLE t;
SHOW CREATE TABLE t;
--sorted_result
SELECT * FROM t PARTITION(p00);
--sorted_result
SELECT * FROM t PARTITION(p01);
ALTER TABLE t DROP INDEX b;
# TODO(bb7133) MySQL 8 does not report an error if there's any row does not fit the new partitions, instead the row will be removed.
-- error 1526
ALTER TABLE t REORGANIZE PARTITION p00,p01,p1,p2 into (PARTITION pAll VALUES IN (('2020-04-10','2020-04-10 10:10:10'),('2020-05-04','2020-05-04 10:10:10'), ('2021-05-04','2021-05-04 10:10:10'),('2021-05-05','2021-05-05 10:10:10'), ('2022-05-04','2022-05-04 10:10:10'),('2022-05-05','2023-05-05 11:11:11')));
ALTER TABLE t REORGANIZE PARTITION p00,p01,p1,p2 into (PARTITION pAll VALUES IN (('2020-04-10','2020-04-10 10:10:10'),('2020-05-04','2020-05-04 10:10:10'), ('2021-05-04','2021-05-04 10:10:10'),('2021-05-05','2021-05-05 10:10:10'), ('2022-05-04','2022-05-04 10:10:10'),('2022-05-05','2022-05-06 11:11:11')));
ADMIN CHECK TABLE t;
SHOW CREATE TABLE t;
ADMIN CHECK TABLE t;
--sorted_result
SELECT * FROM t PARTITION(pAll);
--sorted_result
SELECT * FROM t;
DROP TABLE t;
CREATE TABLE t (
a DATE,
b DATETIME,
c INT,
KEY b(b),
KEY c(c,b)
)
PARTITION BY LIST COLUMNS(b,a) (
PARTITION p0 VALUES IN (('2020-04-10 10:10:10','2020-04-10'),('2020-05-04 10:10:10','2020-05-04')),
PARTITION p1 VALUES IN (('2021-05-04 10:10:10','2021-05-04'),('2021-05-05 10:10:10','2021-05-05')),
PARTITION p2 VALUES IN (('2022-05-04 10:10:10','2022-05-04'),('2022-05-06 11:11:11','2022-05-05'))
);
INSERT INTO t VALUES('2020-04-10', '2020-04-10 10:10:10', 1), ('2020-05-04', '2020-05-04 10:10:10', 2),('2020-05-04', '2020-05-04 10:10:10', 3), ('2021-05-04', '2021-05-04 10:10:10', 4),('2022-05-04', '2022-05-04 10:10:10', 5), ('2022-05-05', '2022-05-06 11:11:11', 6);
ALTER TABLE t REORGANIZE PARTITION p0 into (PARTITION p00 VALUES IN (('2020-04-10 10:10:10','2020-04-10')), PARTITION p01 VALUES IN (('2020-05-04 10:10:10','2020-05-04')));
ADMIN CHECK TABLE t;
SHOW CREATE TABLE t;
--sorted_result
SELECT * FROM t PARTITION(p00);
--sorted_result
SELECT * FROM t PARTITION(p01);
ALTER TABLE t DROP INDEX b;
# TODO(bb7133) MySQL 8 does not report an error if there's any row does not fit the new partitions, instead the row will be removed.
-- error 1526
ALTER TABLE t REORGANIZE PARTITION p00,p01,p1,p2 into (PARTITION pAll VALUES IN (('2020-04-10 10:10:10','2020-04-10'),('2020-05-04 10:10:10','2020-05-04'), ('2021-05-04 10:10:10','2021-05-04'),('2021-05-05 10:10:10','2021-05-05'), ('2022-05-04 10:10:10','2022-05-04'),('2022-05-06 11:11:11','2023-05-05')));
ALTER TABLE t REORGANIZE PARTITION p00,p01,p1,p2 into (PARTITION pAll VALUES IN (('2020-04-10 10:10:10','2020-04-10'),('2020-05-04 10:10:10','2020-05-04'), ('2021-05-04 10:10:10','2021-05-04'),('2021-05-05 10:10:10','2021-05-05'), ('2022-05-04 10:10:10','2022-05-04'),('2022-05-06 11:11:11','2022-05-05')));
ADMIN CHECK TABLE t;
SHOW CREATE TABLE t;
ADMIN CHECK TABLE t;
--sorted_result
SELECT * FROM t PARTITION(pAll);
--sorted_result
SELECT * FROM t;
| tests/integrationtest/t/ddl/reorg_partition.test | 0 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.0006831513601355255,
0.00019408203661441803,
0.00016325691831298172,
0.0001699610729701817,
0.00008975754462881014
] |
{
"id": 13,
"code_window": [
"\t}\n",
"\tdebug.SetMemoryLimit(memoryLimit)\n",
"}\n",
"\n",
"func (*memoryLimitTuner) calcMemoryLimit(percentage float64) int64 {\n",
"\tmemoryLimit := int64(float64(memory.ServerMemoryLimit.Load()) * percentage) // `tidb_server_memory_limit` * `tidb_server_memory_limit_gc_trigger`\n",
"\tif memoryLimit == 0 {\n",
"\t\tmemoryLimit = math.MaxInt64\n",
"\t}\n",
"\treturn memoryLimit\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (t *memoryLimitTuner) calcMemoryLimit(percentage float64) int64 {\n",
"\tif t.adjustDisabled.Load() > 0 {\n",
"\t\treturn initGOMemoryLimitValue\n",
"\t}\n"
],
"file_path": "pkg/util/gctuner/memory_limit_tuner.go",
"type": "replace",
"edit_start_line_idx": 157
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package planstats_test
import (
"context"
"fmt"
"slices"
"testing"
"time"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/pkg/config"
"github.com/pingcap/tidb/pkg/domain"
"github.com/pingcap/tidb/pkg/executor"
"github.com/pingcap/tidb/pkg/parser"
"github.com/pingcap/tidb/pkg/parser/model"
"github.com/pingcap/tidb/pkg/planner"
plannercore "github.com/pingcap/tidb/pkg/planner/core"
"github.com/pingcap/tidb/pkg/sessionctx"
"github.com/pingcap/tidb/pkg/sessionctx/stmtctx"
"github.com/pingcap/tidb/pkg/statistics"
"github.com/pingcap/tidb/pkg/statistics/handle/types"
"github.com/pingcap/tidb/pkg/table"
"github.com/pingcap/tidb/pkg/testkit"
"github.com/pingcap/tidb/pkg/testkit/testdata"
"github.com/stretchr/testify/require"
)
func TestPlanStatsLoad(t *testing.T) {
p := parser.New()
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
ctx := tk.Session().(sessionctx.Context)
tk.MustExec("drop table if exists t")
tk.MustExec("set @@session.tidb_analyze_version=2")
tk.MustExec("set @@session.tidb_partition_prune_mode = 'static'")
tk.MustExec("set @@session.tidb_stats_load_sync_wait = 60000")
tk.MustExec("create table t(a int, b int, c int, d int, primary key(a), key idx(b))")
tk.MustExec("insert into t values (1,1,1,1),(2,2,2,2),(3,3,3,3)")
tk.MustExec("create table pt(a int, b int, c int) partition by range(a) (partition p0 values less than (10), partition p1 values less than (20), partition p2 values less than maxvalue)")
tk.MustExec("insert into pt values (1,1,1),(2,2,2),(13,13,13),(14,14,14),(25,25,25),(36,36,36)")
oriLease := dom.StatsHandle().Lease()
dom.StatsHandle().SetLease(1)
defer func() {
dom.StatsHandle().SetLease(oriLease)
}()
tk.MustExec("analyze table t")
tk.MustExec("analyze table pt")
testCases := []struct {
sql string
skip bool
check func(p plannercore.Plan, tableInfo *model.TableInfo)
}{
{ // DataSource
sql: "select * from t where c>1",
check: func(p plannercore.Plan, tableInfo *model.TableInfo) {
switch pp := p.(type) {
case *plannercore.PhysicalTableReader:
stats := pp.StatsInfo().HistColl
require.Equal(t, 0, countFullStats(stats, tableInfo.Columns[1].ID))
require.Greater(t, countFullStats(stats, tableInfo.Columns[2].ID), 0)
default:
t.Error("unexpected plan:", pp)
}
},
},
{ // PartitionTable
sql: "select * from pt where a < 15 and c > 1",
check: func(p plannercore.Plan, tableInfo *model.TableInfo) {
pua, ok := p.(*plannercore.PhysicalUnionAll)
require.True(t, ok)
for _, child := range pua.Children() {
require.Greater(t, countFullStats(child.StatsInfo().HistColl, tableInfo.Columns[2].ID), 0)
}
},
},
{ // Join
sql: "select * from t t1 inner join t t2 on t1.b=t2.b where t1.d=3",
check: func(p plannercore.Plan, tableInfo *model.TableInfo) {
pp, ok := p.(plannercore.PhysicalPlan)
require.True(t, ok)
require.Greater(t, countFullStats(pp.Children()[0].StatsInfo().HistColl, tableInfo.Columns[3].ID), 0)
require.Greater(t, countFullStats(pp.Children()[1].StatsInfo().HistColl, tableInfo.Columns[3].ID), 0)
},
},
{ // Apply
sql: "select * from t t1 where t1.b > (select count(*) from t t2 where t2.c > t1.a and t2.d>1) and t1.c>2",
check: func(p plannercore.Plan, tableInfo *model.TableInfo) {
pp, ok := p.(*plannercore.PhysicalProjection)
require.True(t, ok)
pa, ok := pp.Children()[0].(*plannercore.PhysicalApply)
require.True(t, ok)
left := pa.PhysicalHashJoin.Children()[0]
right := pa.PhysicalHashJoin.Children()[0]
require.Greater(t, countFullStats(left.StatsInfo().HistColl, tableInfo.Columns[2].ID), 0)
require.Greater(t, countFullStats(right.StatsInfo().HistColl, tableInfo.Columns[3].ID), 0)
},
},
{ // > Any
sql: "select * from t where t.b > any(select d from t where t.c > 2)",
check: func(p plannercore.Plan, tableInfo *model.TableInfo) {
ph, ok := p.(*plannercore.PhysicalHashJoin)
require.True(t, ok)
ptr, ok := ph.Children()[0].(*plannercore.PhysicalTableReader)
require.True(t, ok)
require.Greater(t, countFullStats(ptr.StatsInfo().HistColl, tableInfo.Columns[2].ID), 0)
},
},
{ // in
sql: "select * from t where t.b in (select d from t where t.c > 2)",
check: func(p plannercore.Plan, tableInfo *model.TableInfo) {
ph, ok := p.(*plannercore.PhysicalHashJoin)
require.True(t, ok)
ptr, ok := ph.Children()[1].(*plannercore.PhysicalTableReader)
require.True(t, ok)
require.Greater(t, countFullStats(ptr.StatsInfo().HistColl, tableInfo.Columns[2].ID), 0)
},
},
{ // not in
sql: "select * from t where t.b not in (select d from t where t.c > 2)",
check: func(p plannercore.Plan, tableInfo *model.TableInfo) {
ph, ok := p.(*plannercore.PhysicalHashJoin)
require.True(t, ok)
ptr, ok := ph.Children()[1].(*plannercore.PhysicalTableReader)
require.True(t, ok)
require.Greater(t, countFullStats(ptr.StatsInfo().HistColl, tableInfo.Columns[2].ID), 0)
},
},
{ // exists
sql: "select * from t t1 where exists (select * from t t2 where t1.b > t2.d and t2.c>1)",
check: func(p plannercore.Plan, tableInfo *model.TableInfo) {
ph, ok := p.(*plannercore.PhysicalHashJoin)
require.True(t, ok)
ptr, ok := ph.Children()[1].(*plannercore.PhysicalTableReader)
require.True(t, ok)
require.Greater(t, countFullStats(ptr.StatsInfo().HistColl, tableInfo.Columns[2].ID), 0)
},
},
{ // not exists
sql: "select * from t t1 where not exists (select * from t t2 where t1.b > t2.d and t2.c>1)",
check: func(p plannercore.Plan, tableInfo *model.TableInfo) {
ph, ok := p.(*plannercore.PhysicalHashJoin)
require.True(t, ok)
ptr, ok := ph.Children()[1].(*plannercore.PhysicalTableReader)
require.True(t, ok)
require.Greater(t, countFullStats(ptr.StatsInfo().HistColl, tableInfo.Columns[2].ID), 0)
},
},
{ // CTE
sql: "with cte(x, y) as (select d + 1, b from t where c > 1) select * from cte where x < 3",
check: func(p plannercore.Plan, tableInfo *model.TableInfo) {
ps, ok := p.(*plannercore.PhysicalProjection)
require.True(t, ok)
pc, ok := ps.Children()[0].(*plannercore.PhysicalTableReader)
require.True(t, ok)
pp, ok := pc.GetTablePlan().(*plannercore.PhysicalSelection)
require.True(t, ok)
reader, ok := pp.Children()[0].(*plannercore.PhysicalTableScan)
require.True(t, ok)
require.Greater(t, countFullStats(reader.StatsInfo().HistColl, tableInfo.Columns[2].ID), 0)
},
},
{ // recursive CTE
sql: "with recursive cte(x, y) as (select a, b from t where c > 1 union select x + 1, y from cte where x < 5) select * from cte",
check: func(p plannercore.Plan, tableInfo *model.TableInfo) {
pc, ok := p.(*plannercore.PhysicalCTE)
require.True(t, ok)
pp, ok := pc.SeedPlan.(*plannercore.PhysicalProjection)
require.True(t, ok)
reader, ok := pp.Children()[0].(*plannercore.PhysicalTableReader)
require.True(t, ok)
require.Greater(t, countFullStats(reader.StatsInfo().HistColl, tableInfo.Columns[2].ID), 0)
},
},
{ // check idx(b)
sql: "select * from t USE INDEX(idx) where b >= 10",
check: func(p plannercore.Plan, tableInfo *model.TableInfo) {
pr, ok := p.(*plannercore.PhysicalIndexLookUpReader)
require.True(t, ok)
pis, ok := pr.IndexPlans[0].(*plannercore.PhysicalIndexScan)
require.True(t, ok)
require.True(t, pis.StatsInfo().HistColl.Indices[1].IsEssentialStatsLoaded())
},
},
}
for _, testCase := range testCases {
if testCase.skip {
continue
}
is := dom.InfoSchema()
dom.StatsHandle().Clear() // clear statsCache
require.NoError(t, dom.StatsHandle().Update(is))
stmt, err := p.ParseOneStmt(testCase.sql, "", "")
require.NoError(t, err)
err = executor.ResetContextOfStmt(ctx, stmt)
require.NoError(t, err)
p, _, err := planner.Optimize(context.TODO(), ctx, stmt, is)
require.NoError(t, err)
tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
require.NoError(t, err)
tableInfo := tbl.Meta()
testCase.check(p, tableInfo)
}
}
func countFullStats(stats *statistics.HistColl, colID int64) int {
for _, col := range stats.Columns {
if col.Info.ID == colID {
return col.Histogram.Len() + col.TopN.Num()
}
}
return -1
}
func TestPlanStatsLoadTimeout(t *testing.T) {
p := parser.New()
originConfig := config.GetGlobalConfig()
newConfig := config.NewConfig()
newConfig.Performance.StatsLoadConcurrency = 0 // no worker to consume channel
newConfig.Performance.StatsLoadQueueSize = 1
config.StoreGlobalConfig(newConfig)
defer config.StoreGlobalConfig(originConfig)
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
originalVal1 := tk.MustQuery("select @@tidb_stats_load_pseudo_timeout").Rows()[0][0].(string)
defer func() {
tk.MustExec(fmt.Sprintf("set global tidb_stats_load_pseudo_timeout = %v", originalVal1))
}()
ctx := tk.Session().(sessionctx.Context)
tk.MustExec("drop table if exists t")
tk.MustExec("set @@session.tidb_analyze_version=2")
// since queue full, make sync-wait return as timeout as soon as possible
tk.MustExec("set @@session.tidb_stats_load_sync_wait = 1")
tk.MustExec("create table t(a int, b int, c int, primary key(a))")
tk.MustExec("insert into t values (1,1,1),(2,2,2),(3,3,3)")
oriLease := dom.StatsHandle().Lease()
dom.StatsHandle().SetLease(1)
defer func() {
dom.StatsHandle().SetLease(oriLease)
}()
tk.MustExec("analyze table t")
is := dom.InfoSchema()
require.NoError(t, dom.StatsHandle().Update(is))
tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
require.NoError(t, err)
tableInfo := tbl.Meta()
neededColumn := model.TableItemID{TableID: tableInfo.ID, ID: tableInfo.Columns[0].ID, IsIndex: false}
resultCh := make(chan stmtctx.StatsLoadResult, 1)
timeout := time.Duration(1<<63 - 1)
task := &types.NeededItemTask{
TableItemID: neededColumn,
ResultCh: resultCh,
ToTimeout: time.Now().Local().Add(timeout),
}
dom.StatsHandle().AppendNeededItem(task, timeout) // make channel queue full
sql := "select * from t where c>1"
stmt, err := p.ParseOneStmt(sql, "", "")
require.NoError(t, err)
tk.MustExec("set global tidb_stats_load_pseudo_timeout=false")
_, _, err = planner.Optimize(context.TODO(), ctx, stmt, is)
require.Error(t, err) // fail sql for timeout when pseudo=false
tk.MustExec("set global tidb_stats_load_pseudo_timeout=true")
require.NoError(t, failpoint.Enable("github.com/pingcap/executor/assertSyncStatsFailed", `return(true)`))
tk.MustExec(sql) // not fail sql for timeout when pseudo=true
failpoint.Disable("github.com/pingcap/executor/assertSyncStatsFailed")
plan, _, err := planner.Optimize(context.TODO(), ctx, stmt, is)
require.NoError(t, err) // not fail sql for timeout when pseudo=true
switch pp := plan.(type) {
case *plannercore.PhysicalTableReader:
stats := pp.StatsInfo().HistColl
require.Equal(t, 0, countFullStats(stats, tableInfo.Columns[0].ID))
require.Equal(t, 0, countFullStats(stats, tableInfo.Columns[2].ID)) // pseudo stats
default:
t.Error("unexpected plan:", pp)
}
}
func TestPlanStatsStatusRecord(t *testing.T) {
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.Performance.EnableStatsCacheMemQuota = true
})
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`set @@tidb_enable_non_prepared_plan_cache=0`) // affect this ut
tk.MustExec(`create table t (b int,key b(b))`)
tk.MustExec("insert into t (b) values (1)")
tk.MustExec("analyze table t")
tk.MustQuery("select * from t where b >= 1")
require.Equal(t, tk.Session().GetSessionVars().StmtCtx.RecordedStatsLoadStatusCnt(), 0)
// drop stats in order to change status
domain.GetDomain(tk.Session()).StatsHandle().SetStatsCacheCapacity(1)
tk.MustQuery("select * from t where b >= 1")
for _, usedStatsForTbl := range tk.Session().GetSessionVars().StmtCtx.GetUsedStatsInfo(false).Values() {
if usedStatsForTbl == nil {
continue
}
for _, status := range usedStatsForTbl.IndexStatsLoadStatus {
require.Equal(t, status, "allEvicted")
}
for _, status := range usedStatsForTbl.ColumnStatsLoadStatus {
require.Equal(t, status, "allEvicted")
}
}
}
func TestCollectDependingVirtualCols(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t(a int, b int, c json," +
"index ic_char((cast(c->'$' as char(32) array)))," +
"index ic_unsigned((cast(c->'$.unsigned' as unsigned array)))," +
"index ic_signed((cast(c->'$.signed' as unsigned array)))" +
")")
tk.MustExec("create table t1(a int, b int, c int," +
"vab int as (a + b) virtual," +
"vc int as (c - 5) virtual," +
"vvc int as (b - vc) virtual," +
"vvabvvc int as (vab * vvc) virtual," +
"index ib((b + 1))," +
"index icvab((c + vab))," +
"index ivvcvab((vvc / vab))" +
")")
is := dom.InfoSchema()
tableNames := []string{"t", "t1"}
tblName2TblID := make(map[string]int64)
tblID2Tbl := make(map[int64]table.Table)
for _, tblName := range tableNames {
tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr(tblName))
require.NoError(t, err)
tblName2TblID[tblName] = tbl.Meta().ID
tblID2Tbl[tbl.Meta().ID] = tbl
}
var input []struct {
TableName string
InputColNames []string
}
var output []struct {
TableName string
InputColNames []string
OutputColNames []string
}
testData := GetPlanStatsData()
testData.LoadTestCases(t, &input, &output)
for i, testCase := range input {
// prepare the input
tbl := tblID2Tbl[tblName2TblID[testCase.TableName]]
require.NotNil(t, tbl)
neededItems := make([]model.TableItemID, 0, len(testCase.InputColNames))
for _, colName := range testCase.InputColNames {
col := tbl.Meta().FindPublicColumnByName(colName)
require.NotNil(t, col)
neededItems = append(neededItems, model.TableItemID{TableID: tbl.Meta().ID, ID: col.ID})
}
// call the function
res := plannercore.CollectDependingVirtualCols(tblID2Tbl, neededItems)
// record and check the output
cols := make([]string, 0, len(res))
for _, tblColID := range res {
colName := tbl.Meta().FindColumnNameByID(tblColID.ID)
require.NotEmpty(t, colName)
cols = append(cols, colName)
}
slices.Sort(cols)
testdata.OnRecord(func() {
output[i].TableName = testCase.TableName
output[i].InputColNames = testCase.InputColNames
output[i].OutputColNames = cols
})
require.Equal(t, output[i].OutputColNames, cols)
}
}
| pkg/planner/core/casetest/planstats/plan_stats_test.go | 0 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.0012164429062977433,
0.00019624402921181172,
0.0001659434929024428,
0.00017004564870148897,
0.00016132793098222464
] |
{
"id": 14,
"code_window": [
"\twaitingTunningFinishFn()\n",
"\tcheckIfMemoryLimitIsModified()\n",
"}\n"
],
"labels": [
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"func TestSetMemoryLimit(t *testing.T) {\n",
"\tGlobalMemoryLimitTuner.DisableAdjustMemoryLimit()\n",
"\tmemory.ServerMemoryLimit.Store(1 << 30) // 1GB\n",
"\tGlobalMemoryLimitTuner.SetPercentage(0.8) // 1GB * 80% = 800MB\n",
"\tGlobalMemoryLimitTuner.UpdateMemoryLimit()\n",
"\trequire.Equal(t, initGOMemoryLimitValue, debug.SetMemoryLimit(-1))\n",
"\tGlobalMemoryLimitTuner.EnableAdjustMemoryLimit()\n",
"\tGlobalMemoryLimitTuner.UpdateMemoryLimit()\n",
"\trequire.Equal(t, int64(1<<30*80/100), debug.SetMemoryLimit(-1))\n",
"}"
],
"file_path": "pkg/util/gctuner/memory_limit_tuner_test.go",
"type": "add",
"edit_start_line_idx": 232
} | // Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gctuner
import (
"math"
"runtime/debug"
"time"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/pkg/util"
"github.com/pingcap/tidb/pkg/util/intest"
"github.com/pingcap/tidb/pkg/util/memory"
atomicutil "go.uber.org/atomic"
)
// GlobalMemoryLimitTuner only allow one memory limit tuner in one process
var GlobalMemoryLimitTuner = &memoryLimitTuner{}
// Go runtime trigger GC when hit memory limit which managed via runtime/debug.SetMemoryLimit.
// So we can change memory limit dynamically to avoid frequent GC when memory usage is greater than the limit.
type memoryLimitTuner struct {
finalizer *finalizer
isValidValueSet atomicutil.Bool
percentage atomicutil.Float64
adjustPercentageInProgress atomicutil.Bool
serverMemLimitBeforeAdjust atomicutil.Uint64
percentageBeforeAdjust atomicutil.Float64
nextGCTriggeredByMemoryLimit atomicutil.Bool
}
// fallbackPercentage indicates the fallback memory limit percentage when turning.
const fallbackPercentage float64 = 1.1
var memoryGoroutineCntInTest = *atomicutil.NewInt64(0)
// WaitMemoryLimitTunerExitInTest is used to wait memory limit tuner exit in test.
func WaitMemoryLimitTunerExitInTest() {
if intest.InTest {
for memoryGoroutineCntInTest.Load() > 0 {
time.Sleep(100 * time.Millisecond)
}
}
}
// tuning check the memory nextGC and judge whether this GC is trigger by memory limit.
// Go runtime ensure that it will be called serially.
func (t *memoryLimitTuner) tuning() {
if !t.isValidValueSet.Load() {
return
}
r := memory.ForceReadMemStats()
gogc := util.GetGOGC()
ratio := float64(100+gogc) / 100
// This `if` checks whether the **last** GC was triggered by MemoryLimit as far as possible.
// If the **last** GC was triggered by MemoryLimit, we'll set MemoryLimit to MAXVALUE to return control back to GOGC
// to avoid frequent GC when memory usage fluctuates above and below MemoryLimit.
// The logic we judge whether the **last** GC was triggered by MemoryLimit is as follows:
// suppose `NextGC` = `HeapInUse * (100 + GOGC) / 100)`,
// - If NextGC < MemoryLimit, the **next** GC will **not** be triggered by MemoryLimit thus we do not care about
// why the **last** GC is triggered. And MemoryLimit will not be reset this time.
// - Only if NextGC >= MemoryLimit , the **next** GC will be triggered by MemoryLimit. Thus, we need to reset
// MemoryLimit after the **next** GC happens if needed.
if float64(r.HeapInuse)*ratio > float64(debug.SetMemoryLimit(-1)) {
if t.nextGCTriggeredByMemoryLimit.Load() && t.adjustPercentageInProgress.CompareAndSwap(false, true) {
// It's ok to update `adjustPercentageInProgress`, `serverMemLimitBeforeAdjust` and `percentageBeforeAdjust` not in a transaction.
// The update of memory limit is eventually consistent.
t.serverMemLimitBeforeAdjust.Store(memory.ServerMemoryLimit.Load())
t.percentageBeforeAdjust.Store(t.GetPercentage())
go func() {
if intest.InTest {
memoryGoroutineCntInTest.Inc()
defer memoryGoroutineCntInTest.Dec()
}
memory.MemoryLimitGCLast.Store(time.Now())
memory.MemoryLimitGCTotal.Add(1)
debug.SetMemoryLimit(t.calcMemoryLimit(fallbackPercentage))
resetInterval := 1 * time.Minute // Wait 1 minute and set back, to avoid frequent GC
if intest.InTest {
resetInterval = 3 * time.Second
}
failpoint.Inject("mockUpdateGlobalVarDuringAdjustPercentage", func(val failpoint.Value) {
if val, ok := val.(bool); val && ok {
time.Sleep(300 * time.Millisecond)
t.UpdateMemoryLimit()
}
})
failpoint.Inject("testMemoryLimitTuner", func(val failpoint.Value) {
if val, ok := val.(bool); val && ok {
resetInterval = 1 * time.Second
}
})
time.Sleep(resetInterval)
debug.SetMemoryLimit(t.calcMemoryLimit(t.GetPercentage()))
for !t.adjustPercentageInProgress.CompareAndSwap(true, false) {
continue
}
}()
memory.TriggerMemoryLimitGC.Store(true)
}
t.nextGCTriggeredByMemoryLimit.Store(true)
} else {
t.nextGCTriggeredByMemoryLimit.Store(false)
memory.TriggerMemoryLimitGC.Store(false)
}
}
// Start starts the memory limit tuner.
func (t *memoryLimitTuner) Start() {
t.finalizer = newFinalizer(t.tuning) // Start tuning
}
// Stop stops the memory limit tuner.
func (t *memoryLimitTuner) Stop() {
t.finalizer.stop()
}
// SetPercentage set the percentage for memory limit tuner.
func (t *memoryLimitTuner) SetPercentage(percentage float64) {
t.percentage.Store(percentage)
}
// GetPercentage get the percentage from memory limit tuner.
func (t *memoryLimitTuner) GetPercentage() float64 {
return t.percentage.Load()
}
// UpdateMemoryLimit updates the memory limit.
// This function should be called when `tidb_server_memory_limit` or `tidb_server_memory_limit_gc_trigger` is modified.
func (t *memoryLimitTuner) UpdateMemoryLimit() {
if t.adjustPercentageInProgress.Load() {
if t.serverMemLimitBeforeAdjust.Load() == memory.ServerMemoryLimit.Load() && t.percentageBeforeAdjust.Load() == t.GetPercentage() {
return
}
}
var memoryLimit = t.calcMemoryLimit(t.GetPercentage())
if memoryLimit == math.MaxInt64 {
t.isValidValueSet.Store(false)
memoryLimit = initGOMemoryLimitValue
} else {
t.isValidValueSet.Store(true)
}
debug.SetMemoryLimit(memoryLimit)
}
func (*memoryLimitTuner) calcMemoryLimit(percentage float64) int64 {
memoryLimit := int64(float64(memory.ServerMemoryLimit.Load()) * percentage) // `tidb_server_memory_limit` * `tidb_server_memory_limit_gc_trigger`
if memoryLimit == 0 {
memoryLimit = math.MaxInt64
}
return memoryLimit
}
var initGOMemoryLimitValue int64
func init() {
initGOMemoryLimitValue = debug.SetMemoryLimit(-1)
GlobalMemoryLimitTuner.Start()
}
| pkg/util/gctuner/memory_limit_tuner.go | 1 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.04329896718263626,
0.0038302491884678602,
0.00017504734569229186,
0.001202701823785901,
0.009678086265921593
] |
{
"id": 14,
"code_window": [
"\twaitingTunningFinishFn()\n",
"\tcheckIfMemoryLimitIsModified()\n",
"}\n"
],
"labels": [
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"func TestSetMemoryLimit(t *testing.T) {\n",
"\tGlobalMemoryLimitTuner.DisableAdjustMemoryLimit()\n",
"\tmemory.ServerMemoryLimit.Store(1 << 30) // 1GB\n",
"\tGlobalMemoryLimitTuner.SetPercentage(0.8) // 1GB * 80% = 800MB\n",
"\tGlobalMemoryLimitTuner.UpdateMemoryLimit()\n",
"\trequire.Equal(t, initGOMemoryLimitValue, debug.SetMemoryLimit(-1))\n",
"\tGlobalMemoryLimitTuner.EnableAdjustMemoryLimit()\n",
"\tGlobalMemoryLimitTuner.UpdateMemoryLimit()\n",
"\trequire.Equal(t, int64(1<<30*80/100), debug.SetMemoryLimit(-1))\n",
"}"
],
"file_path": "pkg/util/gctuner/memory_limit_tuner_test.go",
"type": "add",
"edit_start_line_idx": 232
} | create table t(a text, b text);
| br/tests/lightning_issue_519/data/issue519.t-schema.sql | 0 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.00017154177476186305,
0.00017154177476186305,
0.00017154177476186305,
0.00017154177476186305,
0
] |
{
"id": 14,
"code_window": [
"\twaitingTunningFinishFn()\n",
"\tcheckIfMemoryLimitIsModified()\n",
"}\n"
],
"labels": [
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"func TestSetMemoryLimit(t *testing.T) {\n",
"\tGlobalMemoryLimitTuner.DisableAdjustMemoryLimit()\n",
"\tmemory.ServerMemoryLimit.Store(1 << 30) // 1GB\n",
"\tGlobalMemoryLimitTuner.SetPercentage(0.8) // 1GB * 80% = 800MB\n",
"\tGlobalMemoryLimitTuner.UpdateMemoryLimit()\n",
"\trequire.Equal(t, initGOMemoryLimitValue, debug.SetMemoryLimit(-1))\n",
"\tGlobalMemoryLimitTuner.EnableAdjustMemoryLimit()\n",
"\tGlobalMemoryLimitTuner.UpdateMemoryLimit()\n",
"\trequire.Equal(t, int64(1<<30*80/100), debug.SetMemoryLimit(-1))\n",
"}"
],
"file_path": "pkg/util/gctuner/memory_limit_tuner_test.go",
"type": "add",
"edit_start_line_idx": 232
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestContainsAnyAsterisk(t *testing.T) {
var tests = []struct {
expression string
containsAsterisks bool
}{
{"$.a[1]", false},
{"$.a[*]", true},
{"$.*[1]", true},
{"$**.a[1]", true},
}
for _, test := range tests {
// copy iterator variable into a new variable, see issue #27779
test := test
t.Run(test.expression, func(t *testing.T) {
pe, err := ParseJSONPathExpr(test.expression)
require.NoError(t, err)
require.Equal(t, test.containsAsterisks, pe.flags.containsAnyAsterisk())
})
}
}
func TestValidatePathExpr(t *testing.T) {
var tests = []struct {
expression string
success bool
legs int
}{
{` $ `, true, 0},
{" $ . key1 [ 3 ]\t[*].*.key3", true, 5},
{" $ . key1 [ 3 ]**[*].*.key3", true, 6},
{`$."key1 string"[ 3 ][*].*.key3`, true, 5},
{`$."hello \"escaped quotes\" world\\n"[3][*].*.key3`, true, 5},
{`$[1 to 5]`, true, 1},
{`$[2 to 1]`, false, 1},
{`$[last]`, true, 1},
{`$[1 to last]`, true, 1},
{`$[1to3]`, false, 1},
{`$[last - 5 to last - 10]`, false, 1},
{`$.\"escaped quotes\"[3][*].*.key3`, false, 0},
{`$.hello \"escaped quotes\" world[3][*].*.key3`, false, 0},
{`$NoValidLegsHere`, false, 0},
{`$ No Valid Legs Here .a.b.c`, false, 0},
{`$.a[b]`, false, 0},
{`$.*[b]`, false, 0},
{`$**.a[b]`, false, 0},
{`$.b[ 1 ].`, false, 0},
{`$.performance.txn-entry-size-limit`, false, 0},
{`$."performance".txn-entry-size-limit`, false, 0},
{`$."performance."txn-entry-size-limit`, false, 0},
{`$."performance."txn-entry-size-limit"`, false, 0},
{`$[`, false, 0},
{`$a.***[3]`, false, 0},
{`$1a`, false, 0},
}
for _, test := range tests {
// copy iterator variable into a new variable, see issue #27779
test := test
t.Run(test.expression, func(t *testing.T) {
pe, err := ParseJSONPathExpr(test.expression)
if test.success {
require.NoError(t, err)
require.Len(t, pe.legs, test.legs)
} else {
require.Error(t, err)
}
})
}
}
func TestPathExprToString(t *testing.T) {
var tests = []struct {
expression string
}{
{"$.a[1]"},
{"$.a[*]"},
{"$.*[2]"},
{"$**.a[3]"},
{`$."\"hello\""`},
{`$."a b"`},
{`$."one potato"`},
}
for _, test := range tests {
// copy iterator variable into a new variable, see issue #27779
test := test
t.Run(test.expression, func(t *testing.T) {
pe, err := ParseJSONPathExpr(test.expression)
require.NoError(t, err)
require.Equal(t, test.expression, pe.String())
})
}
}
func TestPushBackOneIndexLeg(t *testing.T) {
var tests = []struct {
expression string
index int
expected string
couldReturnMultipleValues bool
}{
{"$", 1, "$[1]", false},
{"$.a[1]", 1, "$.a[1][1]", false},
{"$.a[*]", 10, "$.a[*][10]", true},
{"$.*[2]", 2, "$.*[2][2]", true},
{"$**.a[3]", 3, "$**.a[3][3]", true},
{"$.a[1 to 3]", 3, "$.a[1 to 3][3]", true},
{"$.a[last-3 to last-3]", 3, "$.a[last-3 to last-3][3]", true},
{"$**.a[3]", -3, "$**.a[3][last-2]", true},
}
for _, test := range tests {
// copy iterator variable into a new variable, see issue #27779
test := test
t.Run(test.expression, func(t *testing.T) {
pe, err := ParseJSONPathExpr(test.expression)
require.NoError(t, err)
pe = pe.pushBackOneArraySelectionLeg(jsonPathArraySelectionIndex{index: jsonPathArrayIndexFromStart(test.index)})
require.Equal(t, test.expected, pe.String())
require.Equal(t, test.couldReturnMultipleValues, pe.CouldMatchMultipleValues())
})
}
}
func TestPushBackOneKeyLeg(t *testing.T) {
var tests = []struct {
expression string
key string
expected string
couldReturnMultipleValues bool
}{
{"$", "aa", "$.aa", false},
{"$.a[1]", "aa", "$.a[1].aa", false},
{"$.a[1]", "*", "$.a[1].*", true},
{"$.a[*]", "k", "$.a[*].k", true},
{"$.*[2]", "bb", "$.*[2].bb", true},
{"$**.a[3]", "cc", "$**.a[3].cc", true},
}
for _, test := range tests {
t.Run(test.expression, func(t *testing.T) {
pe, err := ParseJSONPathExpr(test.expression)
require.NoError(t, err)
pe = pe.pushBackOneKeyLeg(test.key)
require.Equal(t, test.expected, pe.String())
require.Equal(t, test.couldReturnMultipleValues, pe.CouldMatchMultipleValues())
})
}
}
| pkg/types/json_path_expr_test.go | 0 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.00018309139704797417,
0.00016996690828818828,
0.00016462533676531166,
0.00016826772480271757,
0.000004359854756330606
] |
{
"id": 14,
"code_window": [
"\twaitingTunningFinishFn()\n",
"\tcheckIfMemoryLimitIsModified()\n",
"}\n"
],
"labels": [
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"func TestSetMemoryLimit(t *testing.T) {\n",
"\tGlobalMemoryLimitTuner.DisableAdjustMemoryLimit()\n",
"\tmemory.ServerMemoryLimit.Store(1 << 30) // 1GB\n",
"\tGlobalMemoryLimitTuner.SetPercentage(0.8) // 1GB * 80% = 800MB\n",
"\tGlobalMemoryLimitTuner.UpdateMemoryLimit()\n",
"\trequire.Equal(t, initGOMemoryLimitValue, debug.SetMemoryLimit(-1))\n",
"\tGlobalMemoryLimitTuner.EnableAdjustMemoryLimit()\n",
"\tGlobalMemoryLimitTuner.UpdateMemoryLimit()\n",
"\trequire.Equal(t, int64(1<<30*80/100), debug.SetMemoryLimit(-1))\n",
"}"
],
"file_path": "pkg/util/gctuner/memory_limit_tuner_test.go",
"type": "add",
"edit_start_line_idx": 232
} | {
"compilerOptions": {
"target": "es5",
"module": "esnext",
"moduleResolution": "node",
"experimentalDecorators": true,
"strict": true,
"sourceMap": true,
"outDir": "./dist/",
"noImplicitAny": true,
"jsx": "react",
"baseUrl": ".",
"paths": {"@/*": ["src/*"]},
"lib": ["es2015", "dom"],
"newLine": "LF"
},
"include": ["src/**/*.ts", "src/**/*.tsx"],
"exclude": ["node_modules"]
} | br/web/tsconfig.json | 0 | https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4 | [
0.00017318938625976443,
0.00017222543829120696,
0.00017126147577073425,
0.00017222543829120696,
9.639552445150912e-7
] |
{
"id": 0,
"code_window": [
"\tlabelsutil \"k8s.io/kubernetes/pkg/util/labels\"\n",
"\ttestutils \"k8s.io/kubernetes/test/utils\"\n",
")\n",
"\n",
"func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*extensions.Deployment, error) {\n",
"\treturn testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf)\n",
"}\n",
"\n",
"// Waits for the deployment to clean up old rcs.\n",
"func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)\n"
],
"file_path": "test/e2e/framework/deployment_util.go",
"type": "replace",
"edit_start_line_idx": 38
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
"net/http/httptest"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller/deployment"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/controller/replicaset"
"k8s.io/kubernetes/test/integration/framework"
testutil "k8s.io/kubernetes/test/utils"
)
const (
pollInterval = 1 * time.Second
pollTimeout = 60 * time.Second
fakeImageName = "fake-name"
fakeImage = "fakeimage"
)
var pauseFn = func(update *v1beta1.Deployment) {
update.Spec.Paused = true
}
var resumeFn = func(update *v1beta1.Deployment) {
update.Spec.Paused = false
}
type deploymentTester struct {
t *testing.T
c clientset.Interface
deployment *v1beta1.Deployment
}
func testLabels() map[string]string {
return map[string]string{"name": "test"}
}
// newDeployment returns a RollingUpdate Deployment with with a fake container image
func newDeployment(name, ns string, replicas int32) *v1beta1.Deployment {
return &v1beta1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "extensions/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
},
Spec: v1beta1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: testLabels()},
Strategy: v1beta1.DeploymentStrategy{
Type: v1beta1.RollingUpdateDeploymentStrategyType,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: testLabels(),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: fakeImageName,
Image: fakeImage,
},
},
},
},
},
}
}
// dcSetup sets up necessities for Deployment integration test, including master, apiserver, informers, and clientset
func dcSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replicaset.ReplicaSetController, *deployment.DeploymentController, informers.SharedInformerFactory, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
_, s, closeFn := framework.RunAMaster(masterConfig)
config := restclient.Config{Host: s.URL}
clientSet, err := clientset.NewForConfig(&config)
if err != nil {
t.Fatalf("error in create clientset: %v", err)
}
resyncPeriod := 12 * time.Hour
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "deployment-informers")), resyncPeriod)
dc := deployment.NewDeploymentController(
informers.Extensions().V1beta1().Deployments(),
informers.Extensions().V1beta1().ReplicaSets(),
informers.Core().V1().Pods(),
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "deployment-controller")),
)
rm := replicaset.NewReplicaSetController(
informers.Extensions().V1beta1().ReplicaSets(),
informers.Core().V1().Pods(),
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
replicaset.BurstReplicas,
)
return s, closeFn, rm, dc, informers, clientSet
}
// dcSimpleSetup sets up necessities for Deployment integration test, including master, apiserver,
// and clientset, but not controllers and informers
func dcSimpleSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
_, s, closeFn := framework.RunAMaster(masterConfig)
config := restclient.Config{Host: s.URL}
clientSet, err := clientset.NewForConfig(&config)
if err != nil {
t.Fatalf("error in create clientset: %v", err)
}
return s, closeFn, clientSet
}
// addPodConditionReady sets given pod status to ready at given time
func addPodConditionReady(pod *v1.Pod, time metav1.Time) {
pod.Status = v1.PodStatus{
Phase: v1.PodRunning,
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
LastTransitionTime: time,
},
},
}
}
func (d *deploymentTester) waitForDeploymentRevisionAndImage(revision, image string) error {
if err := testutil.WaitForDeploymentRevisionAndImage(d.c, d.deployment.Namespace, d.deployment.Name, revision, image, d.t.Logf, pollInterval, pollTimeout); err != nil {
return fmt.Errorf("failed to wait for Deployment revision %s: %v", d.deployment.Name, err)
}
return nil
}
// markAllPodsReady manually updates all Deployment pods status to ready
func (d *deploymentTester) markAllPodsReady() {
ns := d.deployment.Namespace
selector, err := metav1.LabelSelectorAsSelector(d.deployment.Spec.Selector)
if err != nil {
d.t.Fatalf("failed to parse Deployment selector: %v", err)
}
var readyPods int32
err = wait.Poll(100*time.Millisecond, pollTimeout, func() (bool, error) {
readyPods = 0
pods, err := d.c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()})
if err != nil {
d.t.Logf("failed to list Deployment pods, will retry later: %v", err)
return false, nil
}
for i := range pods.Items {
pod := pods.Items[i]
if podutil.IsPodReady(&pod) {
readyPods++
continue
}
addPodConditionReady(&pod, metav1.Now())
if _, err = d.c.Core().Pods(ns).UpdateStatus(&pod); err != nil {
d.t.Logf("failed to update Deployment pod %s, will retry later: %v", pod.Name, err)
} else {
readyPods++
}
}
if readyPods >= *d.deployment.Spec.Replicas {
return true, nil
}
return false, nil
})
if err != nil {
d.t.Fatalf("failed to mark all Deployment pods to ready: %v", err)
}
}
func (d *deploymentTester) waitForDeploymentStatusValid() error {
return testutil.WaitForDeploymentStatusValid(d.c, d.deployment, d.t.Logf, pollInterval, pollTimeout)
}
// waitForDeploymentStatusValidAndMarkPodsReady waits for the Deployment status to become valid
// while marking all Deployment pods as ready at the same time.
func (d *deploymentTester) waitForDeploymentStatusValidAndMarkPodsReady() error {
// Manually mark all Deployment pods as ready in a separate goroutine
go d.markAllPodsReady()
// Make sure the Deployment status is valid while Deployment pods are becoming ready
err := d.waitForDeploymentStatusValid()
if err != nil {
return fmt.Errorf("failed to wait for Deployment status %s: %v", d.deployment.Name, err)
}
return nil
}
func (d *deploymentTester) updateDeployment(applyUpdate testutil.UpdateDeploymentFunc) (*v1beta1.Deployment, error) {
return testutil.UpdateDeploymentWithRetries(d.c, d.deployment.Namespace, d.deployment.Name, applyUpdate, d.t.Logf)
}
func (d *deploymentTester) waitForObservedDeployment(desiredGeneration int64) error {
if err := testutil.WaitForObservedDeployment(d.c, d.deployment.Namespace, d.deployment.Name, desiredGeneration); err != nil {
return fmt.Errorf("failed waiting for ObservedGeneration of deployment %s to become %d: %v", d.deployment.Name, desiredGeneration, err)
}
return nil
}
func (d *deploymentTester) getNewReplicaSet() (*v1beta1.ReplicaSet, error) {
rs, err := deploymentutil.GetNewReplicaSet(d.deployment, d.c.ExtensionsV1beta1())
if err != nil {
return nil, fmt.Errorf("failed retrieving new replicaset of deployment %s: %v", d.deployment.Name, err)
}
return rs, nil
}
func (d *deploymentTester) expectNoNewReplicaSet() error {
rs, err := d.getNewReplicaSet()
if err != nil {
return err
}
if rs != nil {
return fmt.Errorf("expected deployment %s not to create a new replicaset, got %v", d.deployment.Name, rs)
}
return nil
}
func (d *deploymentTester) expectNewReplicaSet() (*v1beta1.ReplicaSet, error) {
rs, err := d.getNewReplicaSet()
if err != nil {
return nil, err
}
if rs == nil {
return nil, fmt.Errorf("expected deployment %s to create a new replicaset, got nil", d.deployment.Name)
}
return rs, nil
}
func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.UpdateReplicaSetFunc) (*v1beta1.ReplicaSet, error) {
return testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf)
}
| test/integration/deployment/util.go | 1 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.9991288781166077,
0.0774877518415451,
0.00016472049173898995,
0.00041927158599719405,
0.25760123133659363
] |
{
"id": 0,
"code_window": [
"\tlabelsutil \"k8s.io/kubernetes/pkg/util/labels\"\n",
"\ttestutils \"k8s.io/kubernetes/test/utils\"\n",
")\n",
"\n",
"func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*extensions.Deployment, error) {\n",
"\treturn testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf)\n",
"}\n",
"\n",
"// Waits for the deployment to clean up old rcs.\n",
"func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)\n"
],
"file_path": "test/e2e/framework/deployment_util.go",
"type": "replace",
"edit_start_line_idx": 38
} | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/registry/generic"
etcdtesting "k8s.io/apiserver/pkg/storage/etcd/testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/registry/registrytest"
)
func newStorage(t *testing.T) (*REST, *etcdtesting.EtcdTestServer) {
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
restOptions := generic.RESTOptions{
StorageConfig: etcdStorage,
Decorator: generic.UndecoratedStorage,
DeleteCollectionWorkers: 1,
ResourcePrefix: "podtemplates",
}
return NewREST(restOptions), server
}
func validNewPodTemplate(name string) *api.PodTemplate {
return &api.PodTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: metav1.NamespaceDefault,
},
Template: api.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"test": "foo"},
},
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
Containers: []api.Container{
{
Name: "foo",
Image: "test",
ImagePullPolicy: api.PullAlways,
TerminationMessagePath: api.TerminationMessagePathDefault,
TerminationMessagePolicy: api.TerminationMessageReadFile,
},
},
},
},
}
}
func TestCreate(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := registrytest.New(t, storage.Store)
pod := validNewPodTemplate("foo")
pod.ObjectMeta = metav1.ObjectMeta{}
test.TestCreate(
// valid
pod,
// invalid
&api.PodTemplate{
Template: api.PodTemplateSpec{},
},
)
}
func TestUpdate(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := registrytest.New(t, storage.Store)
test.TestUpdate(
//valid
validNewPodTemplate("foo"),
// updateFunc
func(obj runtime.Object) runtime.Object {
object := obj.(*api.PodTemplate)
object.Template.Spec.NodeSelector = map[string]string{"a": "b"}
return object
},
)
}
func TestDelete(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := registrytest.New(t, storage.Store).ReturnDeletedObject()
test.TestDelete(validNewPodTemplate("foo"))
}
func TestGet(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := registrytest.New(t, storage.Store)
test.TestGet(validNewPodTemplate("foo"))
}
func TestList(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := registrytest.New(t, storage.Store)
test.TestList(validNewPodTemplate("foo"))
}
func TestWatch(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := registrytest.New(t, storage.Store)
test.TestWatch(
validNewPodTemplate("foo"),
// matching labels
[]labels.Set{},
// not matching labels
[]labels.Set{
{"foo": "bar"},
},
// matching fields
[]fields.Set{},
// not matching fields
[]fields.Set{
{"metadata.name": "bar"},
{"name": "foo"},
},
)
}
| pkg/registry/core/podtemplate/storage/storage_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.0010954024037346244,
0.00023735481954645365,
0.00016609103477094322,
0.00017073427443392575,
0.0002240939502371475
] |
{
"id": 0,
"code_window": [
"\tlabelsutil \"k8s.io/kubernetes/pkg/util/labels\"\n",
"\ttestutils \"k8s.io/kubernetes/test/utils\"\n",
")\n",
"\n",
"func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*extensions.Deployment, error) {\n",
"\treturn testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf)\n",
"}\n",
"\n",
"// Waits for the deployment to clean up old rcs.\n",
"func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)\n"
],
"file_path": "test/e2e/framework/deployment_util.go",
"type": "replace",
"edit_start_line_idx": 38
} | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package types contains go type information, packaged in a way that makes
// auto-generation convenient, whether by template or straight go functions.
package types
| vendor/k8s.io/gengo/types/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.00017623456369619817,
0.0001755381963448599,
0.00017484182899352163,
0.0001755381963448599,
6.963673513382673e-7
] |
{
"id": 0,
"code_window": [
"\tlabelsutil \"k8s.io/kubernetes/pkg/util/labels\"\n",
"\ttestutils \"k8s.io/kubernetes/test/utils\"\n",
")\n",
"\n",
"func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*extensions.Deployment, error) {\n",
"\treturn testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf)\n",
"}\n",
"\n",
"// Waits for the deployment to clean up old rcs.\n",
"func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)\n"
],
"file_path": "test/e2e/framework/deployment_util.go",
"type": "replace",
"edit_start_line_idx": 38
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This package is generated by client-gen with custom arguments.
// This package has the automatically generated typed clients.
package internalversion
| staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.00017638603458181024,
0.00017292029224336147,
0.0001680305867921561,
0.00017434422625228763,
0.000003556598585419124
] |
{
"id": 1,
"code_window": [
"type updateRsFunc func(d *extensions.ReplicaSet)\n",
"\n",
"func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*extensions.ReplicaSet, error) {\n",
"\treturn testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, Logf)\n",
"}\n",
"\n",
"// CheckNewRSAnnotations check if the new RS's annotation is as expected\n",
"func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)\n"
],
"file_path": "test/e2e/framework/rs_util.go",
"type": "replace",
"edit_start_line_idx": 37
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
testutils "k8s.io/kubernetes/test/utils"
)
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*extensions.Deployment, error) {
return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf)
}
// Waits for the deployment to clean up old rcs.
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
var oldRSs []*extensions.ReplicaSet
var d *extensions.Deployment
pollErr := wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
d = deployment
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.ExtensionsV1beta1())
if err != nil {
return false, err
}
return len(oldRSs) == desiredRSNum, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName)
logReplicaSetsOfDeployment(d, oldRSs, nil)
}
return pollErr
}
func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {
testutils.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, Logf)
}
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
return testutils.WaitForObservedDeployment(c, ns, deploymentName, desiredGeneration)
}
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType extensions.DeploymentConditionType) error {
var deployment *extensions.Deployment
pollErr := wait.PollImmediate(time.Second, 5*time.Minute, func() (bool, error) {
d, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
deployment = d
cond := deploymentutil.GetDeploymentCondition(deployment.Status, condType)
return cond != nil && cond.Reason == reason, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("deployment %q never updated with the desired condition and reason: %v", deployment.Name, deployment.Status.Conditions)
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
if err == nil {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment, append(allOldRSs, newRS))
}
}
return pollErr
}
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
// Note that deployment revision and its new RS revision should be updated shortly most of the time, but an overwhelmed RS controller
// may result in taking longer to relabel a RS.
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error {
return testutils.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, Logf, Poll, pollLongTimeout)
}
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType extensions.DeploymentStrategyType) *extensions.Deployment {
zero := int64(0)
return &extensions.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
},
Spec: extensions.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: podLabels},
Strategy: extensions.DeploymentStrategy{
Type: strategyType,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero,
Containers: []v1.Container{
{
Name: imageName,
Image: image,
},
},
},
},
},
}
}
// Waits for the deployment status to become valid (i.e. max unavailable and max surge aren't violated anymore).
// Note that the status should stay valid at all times unless shortly after a scaling event or the deployment is just created.
// To verify that the deployment status is valid and wait for the rollout to finish, use WaitForDeploymentStatus instead.
func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deployment) error {
return testutils.WaitForDeploymentStatusValid(c, d, Logf, Poll, pollLongTimeout)
}
// Waits for the deployment to reach desired state.
// Returns an error if the deployment's rolling update strategy (max unavailable or max surge) is broken at any times.
func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment) error {
var (
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
newRS *extensions.ReplicaSet
deployment *extensions.Deployment
)
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
if err != nil {
return false, err
}
if newRS == nil {
// New RS hasn't been created yet.
return false, nil
}
allRSs = append(oldRSs, newRS)
// The old/new ReplicaSets need to contain the pod-template-hash label
for i := range allRSs {
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
return false, nil
}
}
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
if totalCreated > maxCreated {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment, allRSs)
return false, fmt.Errorf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
}
minAvailable := deploymentutil.MinAvailable(deployment)
if deployment.Status.AvailableReplicas < minAvailable {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment, allRSs)
return false, fmt.Errorf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable)
}
// When the deployment status and its underlying resources reach the desired state, we're done
return deploymentutil.DeploymentComplete(deployment, &deployment.Status), nil
})
if err == wait.ErrWaitTimeout {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment, allRSs)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
}
return nil
}
// WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
if deployment.Status.ObservedGeneration >= desiredGeneration && deployment.Status.UpdatedReplicas >= minUpdatedReplicas {
return true, nil
}
return false, nil
})
if err != nil {
return fmt.Errorf("error waiting for deployment %s to have at least %d updpatedReplicas: %v", deploymentName, minUpdatedReplicas, err)
}
return nil
}
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
// Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error {
err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
// Rollback not set or is kicked off
if deployment.Spec.RollbackTo == nil {
return true, nil
}
return false, nil
})
if err != nil {
return fmt.Errorf("error waiting for deployment %s rollbackTo to be cleared: %v", deploymentName, err)
}
return nil
}
// WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
// old pods.
func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) error {
if d.Spec.Strategy.Type != extensions.RecreateDeploymentStrategyType {
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
}
w, err := c.Extensions().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
if err != nil {
return err
}
status := d.Status
condition := func(event watch.Event) (bool, error) {
d := event.Object.(*extensions.Deployment)
status = d.Status
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.ExtensionsV1beta1())
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.ExtensionsV1beta1())
if err == nil && nerr == nil {
Logf("%+v", d)
logReplicaSetsOfDeployment(d, allOldRSs, newRS)
logPodsOfDeployment(c, d, append(allOldRSs, newRS))
}
return false, fmt.Errorf("deployment %q is running new pods alongside old pods: %#v", d.Name, status)
}
return *(d.Spec.Replicas) == d.Status.Replicas &&
*(d.Spec.Replicas) == d.Status.UpdatedReplicas &&
d.Generation <= d.Status.ObservedGeneration, nil
}
_, err = watch.Until(2*time.Minute, w, condition)
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
}
return err
}
func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
return ScaleResource(clientset, internalClientset, ns, name, size, wait, extensionsinternal.Kind("Deployment"))
}
func RunDeployment(config testutils.DeploymentConfig) error {
By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace))
config.NodeDumpFunc = DumpNodeDebugInfo
config.ContainerDumpFunc = LogFailedContainers
return testutils.RunDeployment(config)
}
func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) {
testutils.LogPodsOfDeployment(c, deployment, rsList, Logf)
}
| test/e2e/framework/deployment_util.go | 1 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.4302144944667816,
0.02110390178859234,
0.00017083562852349132,
0.0017498335801064968,
0.07720339298248291
] |
{
"id": 1,
"code_window": [
"type updateRsFunc func(d *extensions.ReplicaSet)\n",
"\n",
"func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*extensions.ReplicaSet, error) {\n",
"\treturn testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, Logf)\n",
"}\n",
"\n",
"// CheckNewRSAnnotations check if the new RS's annotation is as expected\n",
"func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)\n"
],
"file_path": "test/e2e/framework/rs_util.go",
"type": "replace",
"edit_start_line_idx": 37
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by informer-gen
package v2beta1
import (
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer.
HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer
}
type version struct {
internalinterfaces.SharedInformerFactory
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory) Interface {
return &version{f}
}
// HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer.
func (v *version) HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer {
return &horizontalPodAutoscalerInformer{factory: v.SharedInformerFactory}
}
| staging/src/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.0011282761115580797,
0.0004072768206242472,
0.00017177419795189053,
0.00017901515820994973,
0.0003688842698466033
] |
{
"id": 1,
"code_window": [
"type updateRsFunc func(d *extensions.ReplicaSet)\n",
"\n",
"func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*extensions.ReplicaSet, error) {\n",
"\treturn testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, Logf)\n",
"}\n",
"\n",
"// CheckNewRSAnnotations check if the new RS's annotation is as expected\n",
"func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)\n"
],
"file_path": "test/e2e/framework/rs_util.go",
"type": "replace",
"edit_start_line_idx": 37
} | CoreOS Project
Copyright 2014 CoreOS, Inc
This product includes software developed at CoreOS, Inc.
(http://www.coreos.com/).
| vendor/github.com/coreos/go-oidc/NOTICE | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.00017809630662668496,
0.00017809630662668496,
0.00017809630662668496,
0.00017809630662668496,
0
] |
{
"id": 1,
"code_window": [
"type updateRsFunc func(d *extensions.ReplicaSet)\n",
"\n",
"func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*extensions.ReplicaSet, error) {\n",
"\treturn testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, Logf)\n",
"}\n",
"\n",
"// CheckNewRSAnnotations check if the new RS's annotation is as expected\n",
"func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)\n"
],
"file_path": "test/e2e/framework/rs_util.go",
"type": "replace",
"edit_start_line_idx": 37
} | package swarm
import "os"
// Config represents a config.
type Config struct {
ID string
Meta
Spec ConfigSpec
}
// ConfigSpec represents a config specification from a config in swarm
type ConfigSpec struct {
Annotations
Data []byte `json:",omitempty"`
}
// ConfigReferenceFileTarget is a file target in a config reference
type ConfigReferenceFileTarget struct {
Name string
UID string
GID string
Mode os.FileMode
}
// ConfigReference is a reference to a config in swarm
type ConfigReference struct {
File *ConfigReferenceFileTarget
ConfigID string
ConfigName string
}
| vendor/github.com/docker/docker/api/types/swarm/config.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.00017574055527802557,
0.00017373930313624442,
0.00017145085439551622,
0.00017388290143571794,
0.0000017789128605727456
] |
{
"id": 2,
"code_window": [
"\n",
"import (\n",
"\t\"reflect\"\n",
"\t\"strings\"\n",
"\t\"testing\"\n",
"\t\"time\"\n",
"\n",
"\t\"k8s.io/api/core/v1\"\n",
"\t\"k8s.io/api/extensions/v1beta1\"\n",
"\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "test/integration/deployment/deployment_test.go",
"type": "replace",
"edit_start_line_idx": 22
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
testutils "k8s.io/kubernetes/test/utils"
)
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*extensions.Deployment, error) {
return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf)
}
// Waits for the deployment to clean up old rcs.
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
var oldRSs []*extensions.ReplicaSet
var d *extensions.Deployment
pollErr := wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
d = deployment
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.ExtensionsV1beta1())
if err != nil {
return false, err
}
return len(oldRSs) == desiredRSNum, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName)
logReplicaSetsOfDeployment(d, oldRSs, nil)
}
return pollErr
}
func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {
testutils.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, Logf)
}
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
return testutils.WaitForObservedDeployment(c, ns, deploymentName, desiredGeneration)
}
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType extensions.DeploymentConditionType) error {
var deployment *extensions.Deployment
pollErr := wait.PollImmediate(time.Second, 5*time.Minute, func() (bool, error) {
d, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
deployment = d
cond := deploymentutil.GetDeploymentCondition(deployment.Status, condType)
return cond != nil && cond.Reason == reason, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("deployment %q never updated with the desired condition and reason: %v", deployment.Name, deployment.Status.Conditions)
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
if err == nil {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment, append(allOldRSs, newRS))
}
}
return pollErr
}
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
// Note that deployment revision and its new RS revision should be updated shortly most of the time, but an overwhelmed RS controller
// may result in taking longer to relabel a RS.
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error {
return testutils.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, Logf, Poll, pollLongTimeout)
}
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType extensions.DeploymentStrategyType) *extensions.Deployment {
zero := int64(0)
return &extensions.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
},
Spec: extensions.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: podLabels},
Strategy: extensions.DeploymentStrategy{
Type: strategyType,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero,
Containers: []v1.Container{
{
Name: imageName,
Image: image,
},
},
},
},
},
}
}
// Waits for the deployment status to become valid (i.e. max unavailable and max surge aren't violated anymore).
// Note that the status should stay valid at all times unless shortly after a scaling event or the deployment is just created.
// To verify that the deployment status is valid and wait for the rollout to finish, use WaitForDeploymentStatus instead.
func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deployment) error {
return testutils.WaitForDeploymentStatusValid(c, d, Logf, Poll, pollLongTimeout)
}
// Waits for the deployment to reach desired state.
// Returns an error if the deployment's rolling update strategy (max unavailable or max surge) is broken at any times.
func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment) error {
var (
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
newRS *extensions.ReplicaSet
deployment *extensions.Deployment
)
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
if err != nil {
return false, err
}
if newRS == nil {
// New RS hasn't been created yet.
return false, nil
}
allRSs = append(oldRSs, newRS)
// The old/new ReplicaSets need to contain the pod-template-hash label
for i := range allRSs {
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
return false, nil
}
}
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
if totalCreated > maxCreated {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment, allRSs)
return false, fmt.Errorf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
}
minAvailable := deploymentutil.MinAvailable(deployment)
if deployment.Status.AvailableReplicas < minAvailable {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment, allRSs)
return false, fmt.Errorf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable)
}
// When the deployment status and its underlying resources reach the desired state, we're done
return deploymentutil.DeploymentComplete(deployment, &deployment.Status), nil
})
if err == wait.ErrWaitTimeout {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment, allRSs)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
}
return nil
}
// WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
if deployment.Status.ObservedGeneration >= desiredGeneration && deployment.Status.UpdatedReplicas >= minUpdatedReplicas {
return true, nil
}
return false, nil
})
if err != nil {
return fmt.Errorf("error waiting for deployment %s to have at least %d updpatedReplicas: %v", deploymentName, minUpdatedReplicas, err)
}
return nil
}
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
// Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error {
err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
// Rollback not set or is kicked off
if deployment.Spec.RollbackTo == nil {
return true, nil
}
return false, nil
})
if err != nil {
return fmt.Errorf("error waiting for deployment %s rollbackTo to be cleared: %v", deploymentName, err)
}
return nil
}
// WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
// old pods.
func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) error {
if d.Spec.Strategy.Type != extensions.RecreateDeploymentStrategyType {
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
}
w, err := c.Extensions().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
if err != nil {
return err
}
status := d.Status
condition := func(event watch.Event) (bool, error) {
d := event.Object.(*extensions.Deployment)
status = d.Status
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.ExtensionsV1beta1())
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.ExtensionsV1beta1())
if err == nil && nerr == nil {
Logf("%+v", d)
logReplicaSetsOfDeployment(d, allOldRSs, newRS)
logPodsOfDeployment(c, d, append(allOldRSs, newRS))
}
return false, fmt.Errorf("deployment %q is running new pods alongside old pods: %#v", d.Name, status)
}
return *(d.Spec.Replicas) == d.Status.Replicas &&
*(d.Spec.Replicas) == d.Status.UpdatedReplicas &&
d.Generation <= d.Status.ObservedGeneration, nil
}
_, err = watch.Until(2*time.Minute, w, condition)
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
}
return err
}
func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
return ScaleResource(clientset, internalClientset, ns, name, size, wait, extensionsinternal.Kind("Deployment"))
}
func RunDeployment(config testutils.DeploymentConfig) error {
By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace))
config.NodeDumpFunc = DumpNodeDebugInfo
config.ContainerDumpFunc = LogFailedContainers
return testutils.RunDeployment(config)
}
func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) {
testutils.LogPodsOfDeployment(c, deployment, rsList, Logf)
}
| test/e2e/framework/deployment_util.go | 1 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.021740367636084557,
0.0010096459882333875,
0.00016245267761405557,
0.0001754380646161735,
0.003887490602210164
] |
{
"id": 2,
"code_window": [
"\n",
"import (\n",
"\t\"reflect\"\n",
"\t\"strings\"\n",
"\t\"testing\"\n",
"\t\"time\"\n",
"\n",
"\t\"k8s.io/api/core/v1\"\n",
"\t\"k8s.io/api/extensions/v1beta1\"\n",
"\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "test/integration/deployment/deployment_test.go",
"type": "replace",
"edit_start_line_idx": 22
} | package pflag
import (
"time"
)
// -- time.Duration Value
type durationValue time.Duration
func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
*p = val
return (*durationValue)(p)
}
func (d *durationValue) Set(s string) error {
v, err := time.ParseDuration(s)
*d = durationValue(v)
return err
}
func (d *durationValue) Type() string {
return "duration"
}
func (d *durationValue) String() string { return (*time.Duration)(d).String() }
func durationConv(sval string) (interface{}, error) {
return time.ParseDuration(sval)
}
// GetDuration return the duration value of a flag with the given name
func (f *FlagSet) GetDuration(name string) (time.Duration, error) {
val, err := f.getFlagType(name, "duration", durationConv)
if err != nil {
return 0, err
}
return val.(time.Duration), nil
}
// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
// The argument p points to a time.Duration variable in which to store the value of the flag.
func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
f.VarP(newDurationValue(value, p), name, "", usage)
}
// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
f.VarP(newDurationValue(value, p), name, shorthand, usage)
}
// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
// The argument p points to a time.Duration variable in which to store the value of the flag.
func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
CommandLine.VarP(newDurationValue(value, p), name, "", usage)
}
// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage)
}
// Duration defines a time.Duration flag with specified name, default value, and usage string.
// The return value is the address of a time.Duration variable that stores the value of the flag.
func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
p := new(time.Duration)
f.DurationVarP(p, name, "", value, usage)
return p
}
// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
p := new(time.Duration)
f.DurationVarP(p, name, shorthand, value, usage)
return p
}
// Duration defines a time.Duration flag with specified name, default value, and usage string.
// The return value is the address of a time.Duration variable that stores the value of the flag.
func Duration(name string, value time.Duration, usage string) *time.Duration {
return CommandLine.DurationP(name, "", value, usage)
}
// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
return CommandLine.DurationP(name, shorthand, value, usage)
}
| vendor/github.com/spf13/pflag/duration.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.0021894313395023346,
0.00061737623764202,
0.0001717681298032403,
0.0003055187116842717,
0.0006289685261435807
] |
{
"id": 2,
"code_window": [
"\n",
"import (\n",
"\t\"reflect\"\n",
"\t\"strings\"\n",
"\t\"testing\"\n",
"\t\"time\"\n",
"\n",
"\t\"k8s.io/api/core/v1\"\n",
"\t\"k8s.io/api/extensions/v1beta1\"\n",
"\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "test/integration/deployment/deployment_test.go",
"type": "replace",
"edit_start_line_idx": 22
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package nestedpendingoperations is a modified implementation of
pkg/util/goroutinemap. It implements a data structure for managing go routines
by volume/pod name. It prevents the creation of new go routines if an existing
go routine for the volume already exists. It also allows multiple operations to
execute in parallel for the same volume as long as they are operating on
different pods.
*/
package nestedpendingoperations
import (
"fmt"
"sync"
"github.com/golang/glog"
"k8s.io/api/core/v1"
k8sRuntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
"k8s.io/kubernetes/pkg/volume/util/types"
)
const (
// EmptyUniquePodName is a UniquePodName for empty string.
EmptyUniquePodName types.UniquePodName = types.UniquePodName("")
// EmptyUniqueVolumeName is a UniqueVolumeName for empty string
EmptyUniqueVolumeName v1.UniqueVolumeName = v1.UniqueVolumeName("")
)
// NestedPendingOperations defines the supported set of operations.
type NestedPendingOperations interface {
// Run adds the concatenation of volumeName and podName to the list of
// running operations and spawns a new go routine to execute operationFunc.
// If an operation with the same volumeName and same or empty podName
// exists, an AlreadyExists or ExponentialBackoff error is returned.
// This enables multiple operations to execute in parallel for the same
// volumeName as long as they have different podName.
// Once the operation is complete, the go routine is terminated and the
// concatenation of volumeName and podName is removed from the list of
// executing operations allowing a new operation to be started with the
// volumeName without error.
Run(volumeName v1.UniqueVolumeName, podName types.UniquePodName, operationFunc func() error, operationCompleteFunc func(error)) error
// Wait blocks until all operations are completed. This is typically
// necessary during tests - the test should wait until all operations finish
// and evaluate results after that.
Wait()
// IsOperationPending returns true if an operation for the given volumeName and podName is pending,
// otherwise it returns false
IsOperationPending(volumeName v1.UniqueVolumeName, podName types.UniquePodName) bool
}
// NewNestedPendingOperations returns a new instance of NestedPendingOperations.
func NewNestedPendingOperations(exponentialBackOffOnError bool) NestedPendingOperations {
g := &nestedPendingOperations{
operations: []operation{},
exponentialBackOffOnError: exponentialBackOffOnError,
}
g.cond = sync.NewCond(&g.lock)
return g
}
type nestedPendingOperations struct {
operations []operation
exponentialBackOffOnError bool
cond *sync.Cond
lock sync.RWMutex
}
type operation struct {
volumeName v1.UniqueVolumeName
podName types.UniquePodName
operationPending bool
expBackoff exponentialbackoff.ExponentialBackoff
}
func (grm *nestedPendingOperations) Run(
volumeName v1.UniqueVolumeName,
podName types.UniquePodName,
operationFunc func() error,
operationCompleteFunc func(error)) error {
grm.lock.Lock()
defer grm.lock.Unlock()
opExists, previousOpIndex := grm.isOperationExists(volumeName, podName)
if opExists {
previousOp := grm.operations[previousOpIndex]
// Operation already exists
if previousOp.operationPending {
// Operation is pending
operationName := getOperationName(volumeName, podName)
return NewAlreadyExistsError(operationName)
}
operationName := getOperationName(volumeName, podName)
if err := previousOp.expBackoff.SafeToRetry(operationName); err != nil {
return err
}
// Update existing operation to mark as pending.
grm.operations[previousOpIndex].operationPending = true
grm.operations[previousOpIndex].volumeName = volumeName
grm.operations[previousOpIndex].podName = podName
} else {
// Create a new operation
grm.operations = append(grm.operations,
operation{
operationPending: true,
volumeName: volumeName,
podName: podName,
expBackoff: exponentialbackoff.ExponentialBackoff{},
})
}
go func() (err error) {
// Handle unhandled panics (very unlikely)
defer k8sRuntime.HandleCrash()
// Handle completion of and error, if any, from operationFunc()
defer grm.operationComplete(volumeName, podName, &err)
defer operationCompleteFunc(err)
// Handle panic, if any, from operationFunc()
defer k8sRuntime.RecoverFromPanic(&err)
return operationFunc()
}()
return nil
}
func (grm *nestedPendingOperations) IsOperationPending(
volumeName v1.UniqueVolumeName,
podName types.UniquePodName) bool {
grm.lock.RLock()
defer grm.lock.RUnlock()
exist, previousOpIndex := grm.isOperationExists(volumeName, podName)
if exist && grm.operations[previousOpIndex].operationPending {
return true
}
return false
}
// This is an internal function and caller should acquire and release the lock
func (grm *nestedPendingOperations) isOperationExists(
volumeName v1.UniqueVolumeName,
podName types.UniquePodName) (bool, int) {
// If volumeName is empty, operation can be executed concurrently
if volumeName == EmptyUniqueVolumeName {
return false, -1
}
for previousOpIndex, previousOp := range grm.operations {
if previousOp.volumeName != volumeName {
// No match, keep searching
continue
}
if previousOp.podName != EmptyUniquePodName &&
podName != EmptyUniquePodName &&
previousOp.podName != podName {
// No match, keep searching
continue
}
// Match
return true, previousOpIndex
}
return false, -1
}
func (grm *nestedPendingOperations) getOperation(
volumeName v1.UniqueVolumeName,
podName types.UniquePodName) (uint, error) {
// Assumes lock has been acquired by caller.
for i, op := range grm.operations {
if op.volumeName == volumeName &&
op.podName == podName {
return uint(i), nil
}
}
logOperationName := getOperationName(volumeName, podName)
return 0, fmt.Errorf("Operation %q not found", logOperationName)
}
func (grm *nestedPendingOperations) deleteOperation(
// Assumes lock has been acquired by caller.
volumeName v1.UniqueVolumeName,
podName types.UniquePodName) {
opIndex := -1
for i, op := range grm.operations {
if op.volumeName == volumeName &&
op.podName == podName {
opIndex = i
break
}
}
// Delete index without preserving order
grm.operations[opIndex] = grm.operations[len(grm.operations)-1]
grm.operations = grm.operations[:len(grm.operations)-1]
}
func (grm *nestedPendingOperations) operationComplete(
volumeName v1.UniqueVolumeName, podName types.UniquePodName, err *error) {
// Defer operations are executed in Last-In is First-Out order. In this case
// the lock is acquired first when operationCompletes begins, and is
// released when the method finishes, after the lock is released cond is
// signaled to wake waiting goroutine.
defer grm.cond.Signal()
grm.lock.Lock()
defer grm.lock.Unlock()
if *err == nil || !grm.exponentialBackOffOnError {
// Operation completed without error, or exponentialBackOffOnError disabled
grm.deleteOperation(volumeName, podName)
if *err != nil {
// Log error
logOperationName := getOperationName(volumeName, podName)
glog.Errorf("operation %s failed with: %v",
logOperationName,
*err)
}
return
}
// Operation completed with error and exponentialBackOffOnError Enabled
existingOpIndex, getOpErr := grm.getOperation(volumeName, podName)
if getOpErr != nil {
// Failed to find existing operation
logOperationName := getOperationName(volumeName, podName)
glog.Errorf("Operation %s completed. error: %v. exponentialBackOffOnError is enabled, but failed to get operation to update.",
logOperationName,
*err)
return
}
grm.operations[existingOpIndex].expBackoff.Update(err)
grm.operations[existingOpIndex].operationPending = false
// Log error
operationName :=
getOperationName(volumeName, podName)
glog.Errorf("%v", grm.operations[existingOpIndex].expBackoff.
GenerateNoRetriesPermittedMsg(operationName))
}
func (grm *nestedPendingOperations) Wait() {
grm.lock.Lock()
defer grm.lock.Unlock()
for len(grm.operations) > 0 {
grm.cond.Wait()
}
}
func getOperationName(
volumeName v1.UniqueVolumeName, podName types.UniquePodName) string {
podNameStr := ""
if podName != EmptyUniquePodName {
podNameStr = fmt.Sprintf(" (%q)", podName)
}
return fmt.Sprintf("%q%s",
volumeName,
podNameStr)
}
// NewAlreadyExistsError returns a new instance of AlreadyExists error.
func NewAlreadyExistsError(operationName string) error {
return alreadyExistsError{operationName}
}
// IsAlreadyExists returns true if an error returned from
// NestedPendingOperations indicates a new operation can not be started because
// an operation with the same operation name is already executing.
func IsAlreadyExists(err error) bool {
switch err.(type) {
case alreadyExistsError:
return true
default:
return false
}
}
// alreadyExistsError is the error returned by NestedPendingOperations when a
// new operation can not be started because an operation with the same operation
// name is already executing.
type alreadyExistsError struct {
operationName string
}
var _ error = alreadyExistsError{}
func (err alreadyExistsError) Error() string {
return fmt.Sprintf(
"Failed to create operation with name %q. An operation with that name is already executing.",
err.operationName)
}
| pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.0009956662543118,
0.0002118253178196028,
0.0001631772320251912,
0.0001707191113382578,
0.00015940766024868935
] |
{
"id": 2,
"code_window": [
"\n",
"import (\n",
"\t\"reflect\"\n",
"\t\"strings\"\n",
"\t\"testing\"\n",
"\t\"time\"\n",
"\n",
"\t\"k8s.io/api/core/v1\"\n",
"\t\"k8s.io/api/extensions/v1beta1\"\n",
"\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "test/integration/deployment/deployment_test.go",
"type": "replace",
"edit_start_line_idx": 22
} | #!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script downloads and installs the Kubernetes client and server
# (and optionally test) binaries,
# It is intended to be called from an extracted Kubernetes release tarball.
#
# We automatically choose the correct client binaries to download.
#
# Options:
# Set KUBERNETES_SERVER_ARCH to choose the server (Kubernetes cluster)
# architecture to download:
# * amd64 [default]
# * arm
# * arm64
# * ppc64le
# * s390x
#
# Set KUBERNETES_SKIP_CONFIRM to skip the installation confirmation prompt.
# Set KUBERNETES_RELEASE_URL to choose where to download binaries from.
# (Defaults to https://storage.googleapis.com/kubernetes-release/release).
# Set KUBERNETES_DOWNLOAD_TESTS to additionally download and extract the test
# binaries tarball.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(cd $(dirname "${BASH_SOURCE}")/.. && pwd)
KUBERNETES_RELEASE_URL="${KUBERNETES_RELEASE_URL:-https://dl.k8s.io}"
function detect_kube_release() {
if [[ -n "${KUBE_VERSION:-}" ]]; then
return 0 # Allow caller to explicitly set version
fi
if [[ ! -e "${KUBE_ROOT}/version" ]]; then
echo "Can't determine Kubernetes release." >&2
echo "${BASH_SOURCE} should only be run from a prebuilt Kubernetes release." >&2
echo "Did you mean to use get-kube.sh instead?" >&2
exit 1
fi
KUBE_VERSION=$(cat "${KUBE_ROOT}/version")
}
function detect_client_info() {
local kernel=$(uname -s)
case "${kernel}" in
Darwin)
CLIENT_PLATFORM="darwin"
;;
Linux)
CLIENT_PLATFORM="linux"
;;
*)
echo "Unknown, unsupported platform: ${kernel}." >&2
echo "Supported platforms: Linux, Darwin." >&2
echo "Bailing out." >&2
exit 2
esac
# TODO: migrate the kube::util::host_platform function out of hack/lib and
# use it here.
local machine=$(uname -m)
case "${machine}" in
x86_64*|i?86_64*|amd64*)
CLIENT_ARCH="amd64"
;;
aarch64*|arm64*)
CLIENT_ARCH="arm64"
;;
arm*)
CLIENT_ARCH="arm"
;;
i?86*)
CLIENT_ARCH="386"
;;
s390x*)
CLIENT_ARCH="s390x"
;;
*)
echo "Unknown, unsupported architecture (${machine})." >&2
echo "Supported architectures x86_64, i686, arm, arm64, s390x." >&2
echo "Bailing out." >&2
exit 3
;;
esac
}
function md5sum_file() {
if which md5 >/dev/null 2>&1; then
md5 -q "$1"
else
md5sum "$1" | awk '{ print $1 }'
fi
}
function sha1sum_file() {
if which sha1sum >/dev/null 2>&1; then
sha1sum "$1" | awk '{ print $1 }'
else
shasum -a1 "$1" | awk '{ print $1 }'
fi
}
function download_tarball() {
local -r download_path="$1"
local -r file="$2"
url="${DOWNLOAD_URL_PREFIX}/${file}"
mkdir -p "${download_path}"
if [[ $(which curl) ]]; then
curl -fL --retry 3 --keepalive-time 2 "${url}" -o "${download_path}/${file}"
elif [[ $(which wget) ]]; then
wget "${url}" -O "${download_path}/${file}"
else
echo "Couldn't find curl or wget. Bailing out." >&2
exit 4
fi
echo
local md5sum=$(md5sum_file "${download_path}/${file}")
echo "md5sum(${file})=${md5sum}"
local sha1sum=$(sha1sum_file "${download_path}/${file}")
echo "sha1sum(${file})=${sha1sum}"
echo
# TODO: add actual verification
}
function extract_arch_tarball() {
local -r tarfile="$1"
local -r platform="$2"
local -r arch="$3"
platforms_dir="${KUBE_ROOT}/platforms/${platform}/${arch}"
echo "Extracting ${tarfile} into ${platforms_dir}"
mkdir -p "${platforms_dir}"
# Tarball looks like kubernetes/{client,server}/bin/BINARY"
tar -xzf "${tarfile}" --strip-components 3 -C "${platforms_dir}"
# Create convenience symlink
ln -sf "${platforms_dir}" "$(dirname ${tarfile})/bin"
echo "Add '$(dirname ${tarfile})/bin' to your PATH to use newly-installed binaries."
}
detect_kube_release
DOWNLOAD_URL_PREFIX="${KUBERNETES_RELEASE_URL}/${KUBE_VERSION}"
SERVER_PLATFORM="linux"
SERVER_ARCH="${KUBERNETES_SERVER_ARCH:-amd64}"
SERVER_TAR="kubernetes-server-${SERVER_PLATFORM}-${SERVER_ARCH}.tar.gz"
detect_client_info
CLIENT_TAR="kubernetes-client-${CLIENT_PLATFORM}-${CLIENT_ARCH}.tar.gz"
echo "Kubernetes release: ${KUBE_VERSION}"
echo "Server: ${SERVER_PLATFORM}/${SERVER_ARCH} (to override, set KUBERNETES_SERVER_ARCH)"
echo "Client: ${CLIENT_PLATFORM}/${CLIENT_ARCH} (autodetected)"
echo
# TODO: remove this check and default to true when we stop shipping server
# tarballs in kubernetes.tar.gz
DOWNLOAD_SERVER_TAR=false
if [[ ! -e "${KUBE_ROOT}/server/${SERVER_TAR}" ]]; then
DOWNLOAD_SERVER_TAR=true
echo "Will download ${SERVER_TAR} from ${DOWNLOAD_URL_PREFIX}"
fi
# TODO: remove this check and default to true when we stop shipping kubectl
# in kubernetes.tar.gz
DOWNLOAD_CLIENT_TAR=false
if [[ ! -x "${KUBE_ROOT}/platforms/${CLIENT_PLATFORM}/${CLIENT_ARCH}/kubectl" ]]; then
DOWNLOAD_CLIENT_TAR=true
echo "Will download and extract ${CLIENT_TAR} from ${DOWNLOAD_URL_PREFIX}"
fi
TESTS_TAR="kubernetes-test.tar.gz"
DOWNLOAD_TESTS_TAR=false
if [[ -n "${KUBERNETES_DOWNLOAD_TESTS-}" ]]; then
DOWNLOAD_TESTS_TAR=true
echo "Will download and extract ${TESTS_TAR} from ${DOWNLOAD_URL_PREFIX}"
fi
if [[ "${DOWNLOAD_CLIENT_TAR}" == false && \
"${DOWNLOAD_SERVER_TAR}" == false && \
"${DOWNLOAD_TESTS_TAR}" == false ]]; then
echo "Nothing additional to download."
exit 0
fi
if [[ -z "${KUBERNETES_SKIP_CONFIRM-}" ]]; then
echo "Is this ok? [Y]/n"
read confirm
if [[ "${confirm}" =~ ^[nN]$ ]]; then
echo "Aborting."
exit 1
fi
fi
if "${DOWNLOAD_SERVER_TAR}"; then
download_tarball "${KUBE_ROOT}/server" "${SERVER_TAR}"
fi
if "${DOWNLOAD_CLIENT_TAR}"; then
download_tarball "${KUBE_ROOT}/client" "${CLIENT_TAR}"
extract_arch_tarball "${KUBE_ROOT}/client/${CLIENT_TAR}" "${CLIENT_PLATFORM}" "${CLIENT_ARCH}"
fi
if "${DOWNLOAD_TESTS_TAR}"; then
download_tarball "${KUBE_ROOT}/test" "${TESTS_TAR}"
echo "Extracting ${TESTS_TAR} into ${KUBE_ROOT}"
# Strip leading "kubernetes/"
tar -xzf "${KUBE_ROOT}/test/${TESTS_TAR}" --strip-components 1 -C "${KUBE_ROOT}"
fi
| cluster/get-kube-binaries.sh | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.0010811450192704797,
0.0002291302807861939,
0.00016229414904955775,
0.0001709948555799201,
0.00020321132615208626
] |
{
"id": 3,
"code_window": [
"\t}\n",
"\n",
"\t// Expect deployment collision counter to increment\n",
"\tif err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {\n",
"\t\td, err := c.ExtensionsV1beta1().Deployments(ns.Name).Get(tester.deployment.Name, metav1.GetOptions{})\n",
"\t\tif err != nil {\n",
"\t\t\treturn false, nil\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {\n"
],
"file_path": "test/integration/deployment/deployment_test.go",
"type": "replace",
"edit_start_line_idx": 369
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"time"
"github.com/davecgh/go-spew/spew"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
)
type LogfFn func(format string, args ...interface{})
func LogReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, logf LogfFn) {
if newRS != nil {
logf(spew.Sprintf("New ReplicaSet %q of Deployment %q:\n%+v", newRS.Name, deployment.Name, *newRS))
} else {
logf("New ReplicaSet of Deployment %q is nil.", deployment.Name)
}
if len(allOldRSs) > 0 {
logf("All old ReplicaSets of Deployment %q:", deployment.Name)
}
for i := range allOldRSs {
logf(spew.Sprintf("%+v", *allOldRSs[i]))
}
}
func LogPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, logf LogfFn) {
minReadySeconds := deployment.Spec.MinReadySeconds
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
return c.Core().Pods(namespace).List(options)
}
podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)
if err != nil {
logf("Failed to list Pods of Deployment %q: %v", deployment.Name, err)
return
}
for _, pod := range podList.Items {
availability := "not available"
if podutil.IsPodAvailable(&pod, minReadySeconds, metav1.Now()) {
availability = "available"
}
logf(spew.Sprintf("Pod %q is %s:\n%+v", pod.Name, availability, pod))
}
}
// Waits for the deployment status to become valid (i.e. max unavailable and max surge aren't violated anymore).
// Note that the status should stay valid at all times unless shortly after a scaling event or the deployment is just created.
// To verify that the deployment status is valid and wait for the rollout to finish, use WaitForDeploymentStatus instead.
func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
var (
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
newRS *extensions.ReplicaSet
deployment *extensions.Deployment
reason string
)
err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
if err != nil {
return false, err
}
if newRS == nil {
// New RC hasn't been created yet.
reason = "new replica set hasn't been created yet"
logf(reason)
return false, nil
}
allRSs = append(oldRSs, newRS)
// The old/new ReplicaSets need to contain the pod-template-hash label
for i := range allRSs {
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
reason = "all replica sets need to contain the pod-template-hash label"
logf(reason)
return false, nil
}
}
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
if totalCreated > maxCreated {
reason = fmt.Sprintf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
logf(reason)
return false, nil
}
minAvailable := deploymentutil.MinAvailable(deployment)
if deployment.Status.AvailableReplicas < minAvailable {
reason = fmt.Sprintf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable)
logf(reason)
return false, nil
}
// When the deployment status and its underlying resources reach the desired state, we're done
if deploymentutil.DeploymentComplete(deployment, &deployment.Status) {
return true, nil
}
reason = fmt.Sprintf("deployment status: %#v", deployment.Status)
logf(reason)
return false, nil
})
if err == wait.ErrWaitTimeout {
LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, logf)
LogPodsOfDeployment(c, deployment, allRSs, logf)
err = fmt.Errorf("%s", reason)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
}
return nil
}
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
// Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
var deployment *extensions.Deployment
var newRS *extensions.ReplicaSet
var reason string
err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
// The new ReplicaSet needs to be non-nil and contain the pod-template-hash label
newRS, err = deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
if err != nil {
return false, err
}
if newRS == nil {
reason = fmt.Sprintf("New replica set for deployment %q is yet to be created", deployment.Name)
logf(reason)
return false, nil
}
if !labelsutil.SelectorHasLabel(newRS.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
reason = fmt.Sprintf("New replica set %q doesn't have DefaultDeploymentUniqueLabelKey", newRS.Name)
logf(reason)
return false, nil
}
// Check revision of this deployment, and of the new replica set of this deployment
if deployment.Annotations == nil || deployment.Annotations[deploymentutil.RevisionAnnotation] != revision {
reason = fmt.Sprintf("Deployment %q doesn't have the required revision set", deployment.Name)
logf(reason)
return false, nil
}
if !containsImage(deployment.Spec.Template.Spec.Containers, image) {
reason = fmt.Sprintf("Deployment %q doesn't have the required image %s set", deployment.Name, image)
logf(reason)
return false, nil
}
if newRS.Annotations == nil || newRS.Annotations[deploymentutil.RevisionAnnotation] != revision {
reason = fmt.Sprintf("New replica set %q doesn't have the required revision set", newRS.Name)
logf(reason)
return false, nil
}
if !containsImage(newRS.Spec.Template.Spec.Containers, image) {
reason = fmt.Sprintf("New replica set %q doesn't have the required image %s.", newRS.Name, image)
logf(reason)
return false, nil
}
return true, nil
})
if err == wait.ErrWaitTimeout {
LogReplicaSetsOfDeployment(deployment, nil, newRS, logf)
err = fmt.Errorf(reason)
}
if newRS == nil {
return fmt.Errorf("deployment %q failed to create new replica set", deploymentName)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q (got %s / %s) and new replica set %q (got %s / %s) revision and image to match expectation (expected %s / %s): %v", deploymentName, deployment.Annotations[deploymentutil.RevisionAnnotation], deployment.Spec.Template.Spec.Containers[0].Image, newRS.Name, newRS.Annotations[deploymentutil.RevisionAnnotation], newRS.Spec.Template.Spec.Containers[0].Image, revision, image, err)
}
return nil
}
func containsImage(containers []v1.Container, imageName string) bool {
for _, container := range containers {
if container.Image == imageName {
return true
}
}
return false
}
type UpdateDeploymentFunc func(d *extensions.Deployment)
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateDeploymentFunc, logf LogfFn) (*extensions.Deployment, error) {
var deployment *extensions.Deployment
var updateErr error
pollErr := wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
var err error
if deployment, err = c.Extensions().Deployments(namespace).Get(name, metav1.GetOptions{}); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(deployment)
if deployment, err = c.Extensions().Deployments(namespace).Update(deployment); err == nil {
logf("Updating deployment %s", name)
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided updated to deployment %q: %v", name, updateErr)
}
return deployment, pollErr
}
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) {
return c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
}, desiredGeneration, 2*time.Second, 1*time.Minute)
}
| test/utils/deployment.go | 1 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.9971812963485718,
0.23029659688472748,
0.00016711265197955072,
0.0010883113136515021,
0.40045925974845886
] |
{
"id": 3,
"code_window": [
"\t}\n",
"\n",
"\t// Expect deployment collision counter to increment\n",
"\tif err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {\n",
"\t\td, err := c.ExtensionsV1beta1().Deployments(ns.Name).Get(tester.deployment.Name, metav1.GetOptions{})\n",
"\t\tif err != nil {\n",
"\t\t\treturn false, nil\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {\n"
],
"file_path": "test/integration/deployment/deployment_test.go",
"type": "replace",
"edit_start_line_idx": 369
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["pkcs7.go"],
visibility = ["//visibility:public"],
deps = ["//vendor/github.com/cloudflare/cfssl/errors:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| vendor/github.com/cloudflare/cfssl/crypto/pkcs7/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.00017964441212825477,
0.00017762089555617422,
0.0001755318808136508,
0.0001776863500708714,
0.0000016795720512163825
] |
{
"id": 3,
"code_window": [
"\t}\n",
"\n",
"\t// Expect deployment collision counter to increment\n",
"\tif err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {\n",
"\t\td, err := c.ExtensionsV1beta1().Deployments(ns.Name).Get(tester.deployment.Name, metav1.GetOptions{})\n",
"\t\tif err != nil {\n",
"\t\t\treturn false, nil\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {\n"
],
"file_path": "test/integration/deployment/deployment_test.go",
"type": "replace",
"edit_start_line_idx": 369
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["marshalto.go"],
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/gogo/protobuf/gogoproto:go_default_library",
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
"//vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor:go_default_library",
"//vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator:go_default_library",
"//vendor/github.com/gogo/protobuf/vanity:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| vendor/github.com/gogo/protobuf/plugin/marshalto/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.0001784619380487129,
0.00017808808479458094,
0.0001776980352587998,
0.0001781042810762301,
3.12072216956949e-7
] |
{
"id": 3,
"code_window": [
"\t}\n",
"\n",
"\t// Expect deployment collision counter to increment\n",
"\tif err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {\n",
"\t\td, err := c.ExtensionsV1beta1().Deployments(ns.Name).Get(tester.deployment.Name, metav1.GetOptions{})\n",
"\t\tif err != nil {\n",
"\t\t\treturn false, nil\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {\n"
],
"file_path": "test/integration/deployment/deployment_test.go",
"type": "replace",
"edit_start_line_idx": 369
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"accounts.go",
"client.go",
"models.go",
"usage.go",
"version.go",
],
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/date:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/validation:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| vendor/github.com/Azure/azure-sdk-for-go/arm/storage/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.00017865178233478218,
0.00017818709602579474,
0.00017698494775686413,
0.00017855582700576633,
6.965719308027474e-7
] |
{
"id": 4,
"code_window": [
"\t\"k8s.io/kubernetes/test/integration/framework\"\n",
"\ttestutil \"k8s.io/kubernetes/test/utils\"\n",
")\n",
"\n",
"const (\n",
"\tpollInterval = 1 * time.Second\n",
"\tpollTimeout = 60 * time.Second\n",
"\n",
"\tfakeImageName = \"fake-name\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpollInterval = 100 * time.Millisecond\n"
],
"file_path": "test/integration/deployment/util.go",
"type": "replace",
"edit_start_line_idx": 40
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"reflect"
"strings"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/test/integration/framework"
)
func TestNewDeployment(t *testing.T) {
s, closeFn, rm, dc, informers, c := dcSetup(t)
defer closeFn()
name := "test-new-deployment"
ns := framework.CreateTestingNamespace(name, s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
replicas := int32(20)
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
tester.deployment.Spec.MinReadySeconds = 4
tester.deployment.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
var err error
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
if err != nil {
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
}
// Start informer and controllers
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
go rm.Run(5, stopCh)
go dc.Run(5, stopCh)
// Wait for the Deployment to be updated to revision 1
if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
t.Fatal(err)
}
// Make sure the Deployment status becomes valid while manually marking Deployment pods as ready at the same time
if err := tester.waitForDeploymentStatusValidAndMarkPodsReady(); err != nil {
t.Fatal(err)
}
// Check new RS annotations
newRS, err := tester.expectNewReplicaSet()
if err != nil {
t.Fatal(err)
}
if newRS.Annotations["test"] != "should-copy-to-replica-set" {
t.Errorf("expected new ReplicaSet annotations copied from Deployment %s, got: %v", tester.deployment.Name, newRS.Annotations)
}
if newRS.Annotations[v1.LastAppliedConfigAnnotation] != "" {
t.Errorf("expected new ReplicaSet last-applied annotation not copied from Deployment %s", tester.deployment.Name)
}
}
// selectors are IMMUTABLE for all API versions except apps/v1beta1 and extensions/v1beta1
func TestDeploymentSelectorImmutability(t *testing.T) {
s, closeFn, c := dcSimpleSetup(t)
defer closeFn()
name := "test-deployment-selector-immutability"
ns := framework.CreateTestingNamespace(name, s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, int32(20))}
deploymentExtensionsV1beta1, err := c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
if err != nil {
t.Fatalf("failed to create extensions/v1beta1 deployment %s: %v", tester.deployment.Name, err)
}
// test to ensure extensions/v1beta1 selector is mutable
newSelectorLabels := map[string]string{"name_extensions_v1beta1": "test_extensions_v1beta1"}
deploymentExtensionsV1beta1.Spec.Selector.MatchLabels = newSelectorLabels
deploymentExtensionsV1beta1.Spec.Template.Labels = newSelectorLabels
updatedDeploymentExtensionsV1beta1, err := c.ExtensionsV1beta1().Deployments(ns.Name).Update(deploymentExtensionsV1beta1)
if err != nil {
t.Fatalf("failed to update extensions/v1beta1 deployment %s: %v", deploymentExtensionsV1beta1.Name, err)
}
if !reflect.DeepEqual(updatedDeploymentExtensionsV1beta1.Spec.Selector.MatchLabels, newSelectorLabels) {
t.Errorf("selector should be changed for extensions/v1beta1, expected: %v, got: %v", newSelectorLabels, updatedDeploymentExtensionsV1beta1.Spec.Selector.MatchLabels)
}
// test to ensure apps/v1beta1 selector is mutable
deploymentAppsV1beta1, err := c.AppsV1beta1().Deployments(ns.Name).Get(updatedDeploymentExtensionsV1beta1.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get apps/v1beta1 deployment %s: %v", updatedDeploymentExtensionsV1beta1.Name, err)
}
newSelectorLabels = map[string]string{"name_apps_v1beta1": "test_apps_v1beta1"}
deploymentAppsV1beta1.Spec.Selector.MatchLabels = newSelectorLabels
deploymentAppsV1beta1.Spec.Template.Labels = newSelectorLabels
updatedDeploymentAppsV1beta1, err := c.AppsV1beta1().Deployments(ns.Name).Update(deploymentAppsV1beta1)
if err != nil {
t.Fatalf("failed to update apps/v1beta1 deployment %s: %v", deploymentAppsV1beta1.Name, err)
}
if !reflect.DeepEqual(updatedDeploymentAppsV1beta1.Spec.Selector.MatchLabels, newSelectorLabels) {
t.Errorf("selector should be changed for apps/v1beta1, expected: %v, got: %v", newSelectorLabels, updatedDeploymentAppsV1beta1.Spec.Selector.MatchLabels)
}
// test to ensure apps/v1beta2 selector is immutable
deploymentAppsV1beta2, err := c.AppsV1beta2().Deployments(ns.Name).Get(updatedDeploymentAppsV1beta1.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get apps/v1beta2 deployment %s: %v", updatedDeploymentAppsV1beta1.Name, err)
}
newSelectorLabels = map[string]string{"name_apps_v1beta2": "test_apps_v1beta2"}
deploymentAppsV1beta2.Spec.Selector.MatchLabels = newSelectorLabels
deploymentAppsV1beta2.Spec.Template.Labels = newSelectorLabels
_, err = c.AppsV1beta2().Deployments(ns.Name).Update(deploymentAppsV1beta2)
if err == nil {
t.Fatalf("failed to provide validation error when changing immutable selector when updating apps/v1beta2 deployment %s", deploymentAppsV1beta2.Name)
}
expectedErrType := "Invalid value"
expectedErrDetail := "field is immutable"
if !strings.Contains(err.Error(), expectedErrType) || !strings.Contains(err.Error(), expectedErrDetail) {
t.Errorf("error message does not match, expected type: %s, expected detail: %s, got: %s", expectedErrType, expectedErrDetail, err.Error())
}
}
// Paused deployment should not start new rollout
func TestPausedDeployment(t *testing.T) {
s, closeFn, rm, dc, informers, c := dcSetup(t)
defer closeFn()
name := "test-paused-deployment"
ns := framework.CreateTestingNamespace(name, s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
replicas := int32(1)
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
tester.deployment.Spec.Paused = true
tgps := int64(1)
tester.deployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &tgps
var err error
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
if err != nil {
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
}
// Start informer and controllers
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
go rm.Run(5, stopCh)
go dc.Run(5, stopCh)
// Verify that the paused deployment won't create new replica set.
if err := tester.expectNoNewReplicaSet(); err != nil {
t.Fatal(err)
}
// Resume the deployment
tester.deployment, err = tester.updateDeployment(resumeFn)
if err != nil {
t.Fatalf("failed to resume deployment %s: %v", tester.deployment.Name, err)
}
// Wait for the controller to notice the resume.
if err := tester.waitForObservedDeployment(tester.deployment.Generation); err != nil {
t.Fatal(err)
}
// Wait for the Deployment to be updated to revision 1
if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
t.Fatal(err)
}
// Make sure the Deployment status becomes valid while manually marking Deployment pods as ready at the same time
if err := tester.waitForDeploymentStatusValidAndMarkPodsReady(); err != nil {
t.Fatal(err)
}
// A new replicaset should be created.
if _, err := tester.expectNewReplicaSet(); err != nil {
t.Fatal(err)
}
// Pause the deployment.
// The paused deployment shouldn't trigger a new rollout.
tester.deployment, err = tester.updateDeployment(pauseFn)
if err != nil {
t.Fatalf("failed to pause deployment %s: %v", tester.deployment.Name, err)
}
// Wait for the controller to notice the pause.
if err := tester.waitForObservedDeployment(tester.deployment.Generation); err != nil {
t.Fatal(err)
}
// Update the deployment template
newTGPS := int64(0)
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
update.Spec.Template.Spec.TerminationGracePeriodSeconds = &newTGPS
})
if err != nil {
t.Fatalf("failed updating template of deployment %s: %v", tester.deployment.Name, err)
}
// Wait for the controller to notice the rollout.
if err := tester.waitForObservedDeployment(tester.deployment.Generation); err != nil {
t.Fatal(err)
}
// Verify that the paused deployment won't create new replica set.
if err := tester.expectNoNewReplicaSet(); err != nil {
t.Fatal(err)
}
_, allOldRs, err := deploymentutil.GetOldReplicaSets(tester.deployment, c.ExtensionsV1beta1())
if err != nil {
t.Fatalf("failed retrieving old replicasets of deployment %s: %v", tester.deployment.Name, err)
}
if len(allOldRs) != 1 {
t.Errorf("expected an old replica set, got %v", allOldRs)
}
if *allOldRs[0].Spec.Template.Spec.TerminationGracePeriodSeconds == newTGPS {
t.Errorf("TerminationGracePeriodSeconds on the replica set should be %d, got %d", tgps, newTGPS)
}
}
// Paused deployment can be scaled
func TestScalePausedDeployment(t *testing.T) {
s, closeFn, rm, dc, informers, c := dcSetup(t)
defer closeFn()
name := "test-scale-paused-deployment"
ns := framework.CreateTestingNamespace(name, s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
replicas := int32(1)
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
tgps := int64(1)
tester.deployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &tgps
var err error
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
if err != nil {
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
}
// Start informer and controllers
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
go rm.Run(5, stopCh)
go dc.Run(5, stopCh)
// Wait for the Deployment to be updated to revision 1
if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
t.Fatal(err)
}
// Make sure the Deployment status becomes valid while manually marking Deployment pods as ready at the same time
if err := tester.waitForDeploymentStatusValidAndMarkPodsReady(); err != nil {
t.Fatal(err)
}
// A new replicaset should be created.
if _, err := tester.expectNewReplicaSet(); err != nil {
t.Fatal(err)
}
// Pause the deployment.
tester.deployment, err = tester.updateDeployment(pauseFn)
if err != nil {
t.Fatalf("failed to pause deployment %s: %v", tester.deployment.Name, err)
}
// Wait for the controller to notice the scale.
if err := tester.waitForObservedDeployment(tester.deployment.Generation); err != nil {
t.Fatal(err)
}
// Scale the paused deployment.
newReplicas := int32(10)
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
update.Spec.Replicas = &newReplicas
})
if err != nil {
t.Fatalf("failed updating deployment %s: %v", tester.deployment.Name, err)
}
// Wait for the controller to notice the scale.
if err := tester.waitForObservedDeployment(tester.deployment.Generation); err != nil {
t.Fatal(err)
}
// Verify that the new replicaset is scaled.
rs, err := tester.expectNewReplicaSet()
if err != nil {
t.Fatal(err)
}
if *rs.Spec.Replicas != newReplicas {
t.Errorf("expected new replicaset replicas = %d, got %d", newReplicas, *rs.Spec.Replicas)
}
// Make sure the Deployment status becomes valid while manually marking Deployment pods as ready at the same time
if err := tester.waitForDeploymentStatusValidAndMarkPodsReady(); err != nil {
t.Fatal(err)
}
}
// Deployment rollout shouldn't be blocked on hash collisions
func TestDeploymentHashCollision(t *testing.T) {
s, closeFn, rm, dc, informers, c := dcSetup(t)
defer closeFn()
name := "test-hash-collision-deployment"
ns := framework.CreateTestingNamespace(name, s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
replicas := int32(1)
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
var err error
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
if err != nil {
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
}
// Start informer and controllers
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
go rm.Run(5, stopCh)
go dc.Run(5, stopCh)
// Wait for the Deployment to be updated to revision 1
if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
t.Fatal(err)
}
// Mock a hash collision
newRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
if err != nil {
t.Fatalf("failed getting new replicaset of deployment %s: %v", tester.deployment.Name, err)
}
if newRS == nil {
t.Fatalf("unable to find new replicaset of deployment %s", tester.deployment.Name)
}
_, err = tester.updateReplicaSet(newRS.Name, func(update *v1beta1.ReplicaSet) {
*update.Spec.Template.Spec.TerminationGracePeriodSeconds = int64(5)
})
if err != nil {
t.Fatalf("failed updating replicaset %s template: %v", newRS.Name, err)
}
// Expect deployment collision counter to increment
if err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
d, err := c.ExtensionsV1beta1().Deployments(ns.Name).Get(tester.deployment.Name, metav1.GetOptions{})
if err != nil {
return false, nil
}
return d.Status.CollisionCount != nil && *d.Status.CollisionCount == int32(1), nil
}); err != nil {
t.Fatalf("Failed to increment collision counter for deployment %q: %v", tester.deployment.Name, err)
}
// Expect a new ReplicaSet to be created
if err := tester.waitForDeploymentRevisionAndImage("2", fakeImage); err != nil {
t.Fatal(err)
}
}
| test/integration/deployment/deployment_test.go | 1 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.013893433846533298,
0.0006319640087895095,
0.00016579263319727033,
0.00017084850696846843,
0.0021914539393037558
] |
{
"id": 4,
"code_window": [
"\t\"k8s.io/kubernetes/test/integration/framework\"\n",
"\ttestutil \"k8s.io/kubernetes/test/utils\"\n",
")\n",
"\n",
"const (\n",
"\tpollInterval = 1 * time.Second\n",
"\tpollTimeout = 60 * time.Second\n",
"\n",
"\tfakeImageName = \"fake-name\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpollInterval = 100 * time.Millisecond\n"
],
"file_path": "test/integration/deployment/util.go",
"type": "replace",
"edit_start_line_idx": 40
} | # netns - network namespaces in go #
The netns package provides an ultra-simple interface for handling
network namespaces in go. Changing namespaces requires elevated
privileges, so in most cases this code needs to be run as root.
## Local Build and Test ##
You can use go get command:
go get github.com/vishvananda/netns
Testing (requires root):
sudo -E go test github.com/vishvananda/netns
## Example ##
```go
package main
import (
"fmt"
"net"
"runtime"
"github.com/vishvananda/netns"
)
func main() {
// Lock the OS Thread so we don't accidentally switch namespaces
runtime.LockOSThread()
defer runtime.UnlockOSThread()
// Save the current network namespace
origns, _ := netns.Get()
defer origns.Close()
// Create a new network namespace
newns, _ := netns.New()
netns.Set(newns)
defer newns.Close()
// Do something with the network namespace
ifaces, _ := net.Interfaces()
fmt.Printf("Interfaces: %v\n", ifaces)
// Switch back to the original namespace
netns.Set(origns)
}
```
| vendor/github.com/vishvananda/netns/README.md | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.00018420422566123307,
0.00017325136286672205,
0.0001640619884710759,
0.00017152362852357328,
0.000006215233497641748
] |
{
"id": 4,
"code_window": [
"\t\"k8s.io/kubernetes/test/integration/framework\"\n",
"\ttestutil \"k8s.io/kubernetes/test/utils\"\n",
")\n",
"\n",
"const (\n",
"\tpollInterval = 1 * time.Second\n",
"\tpollTimeout = 60 * time.Second\n",
"\n",
"\tfakeImageName = \"fake-name\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpollInterval = 100 * time.Millisecond\n"
],
"file_path": "test/integration/deployment/util.go",
"type": "replace",
"edit_start_line_idx": 40
} | package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"azure_file.go",
"azure_provision.go",
"azure_util.go",
"doc.go",
],
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/azure:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["azure_file_test.go"],
library = ":go_default_library",
deps = [
"//pkg/cloudprovider/providers/azure:go_default_library",
"//pkg/cloudprovider/providers/fake:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| pkg/volume/azure_file/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.00046445828047581017,
0.00021333075710572302,
0.0001691524958005175,
0.00017231669335160404,
0.00010253415530314669
] |
{
"id": 4,
"code_window": [
"\t\"k8s.io/kubernetes/test/integration/framework\"\n",
"\ttestutil \"k8s.io/kubernetes/test/utils\"\n",
")\n",
"\n",
"const (\n",
"\tpollInterval = 1 * time.Second\n",
"\tpollTimeout = 60 * time.Second\n",
"\n",
"\tfakeImageName = \"fake-name\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpollInterval = 100 * time.Millisecond\n"
],
"file_path": "test/integration/deployment/util.go",
"type": "replace",
"edit_start_line_idx": 40
} | # Configuration options
These options can be set as environment variables, to customize how your cluster is created.
These options apply across providers. There are additional documents for options specific to providers:
* [AWS](aws/options.md)
This is a work-in-progress; not all options are documented yet!
**NUM_NODES**
The number of node instances to create. Most providers default this to 4.
[]()
| cluster/options.md | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.00018564796482678503,
0.0001744223991408944,
0.0001631968334550038,
0.0001744223991408944,
0.000011225565685890615
] |
{
"id": 5,
"code_window": [
"\tif err != nil {\n",
"\t\td.t.Fatalf(\"failed to parse Deployment selector: %v\", err)\n",
"\t}\n",
"\tvar readyPods int32\n",
"\terr = wait.Poll(100*time.Millisecond, pollTimeout, func() (bool, error) {\n",
"\t\treadyPods = 0\n",
"\t\tpods, err := d.c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()})\n",
"\t\tif err != nil {\n",
"\t\t\td.t.Logf(\"failed to list Deployment pods, will retry later: %v\", err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\terr = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {\n"
],
"file_path": "test/integration/deployment/util.go",
"type": "replace",
"edit_start_line_idx": 170
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
"net/http/httptest"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller/deployment"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/controller/replicaset"
"k8s.io/kubernetes/test/integration/framework"
testutil "k8s.io/kubernetes/test/utils"
)
const (
pollInterval = 1 * time.Second
pollTimeout = 60 * time.Second
fakeImageName = "fake-name"
fakeImage = "fakeimage"
)
var pauseFn = func(update *v1beta1.Deployment) {
update.Spec.Paused = true
}
var resumeFn = func(update *v1beta1.Deployment) {
update.Spec.Paused = false
}
type deploymentTester struct {
t *testing.T
c clientset.Interface
deployment *v1beta1.Deployment
}
func testLabels() map[string]string {
return map[string]string{"name": "test"}
}
// newDeployment returns a RollingUpdate Deployment with with a fake container image
func newDeployment(name, ns string, replicas int32) *v1beta1.Deployment {
return &v1beta1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "extensions/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
},
Spec: v1beta1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: testLabels()},
Strategy: v1beta1.DeploymentStrategy{
Type: v1beta1.RollingUpdateDeploymentStrategyType,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: testLabels(),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: fakeImageName,
Image: fakeImage,
},
},
},
},
},
}
}
// dcSetup sets up necessities for Deployment integration test, including master, apiserver, informers, and clientset
func dcSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replicaset.ReplicaSetController, *deployment.DeploymentController, informers.SharedInformerFactory, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
_, s, closeFn := framework.RunAMaster(masterConfig)
config := restclient.Config{Host: s.URL}
clientSet, err := clientset.NewForConfig(&config)
if err != nil {
t.Fatalf("error in create clientset: %v", err)
}
resyncPeriod := 12 * time.Hour
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "deployment-informers")), resyncPeriod)
dc := deployment.NewDeploymentController(
informers.Extensions().V1beta1().Deployments(),
informers.Extensions().V1beta1().ReplicaSets(),
informers.Core().V1().Pods(),
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "deployment-controller")),
)
rm := replicaset.NewReplicaSetController(
informers.Extensions().V1beta1().ReplicaSets(),
informers.Core().V1().Pods(),
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
replicaset.BurstReplicas,
)
return s, closeFn, rm, dc, informers, clientSet
}
// dcSimpleSetup sets up necessities for Deployment integration test, including master, apiserver,
// and clientset, but not controllers and informers
func dcSimpleSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
_, s, closeFn := framework.RunAMaster(masterConfig)
config := restclient.Config{Host: s.URL}
clientSet, err := clientset.NewForConfig(&config)
if err != nil {
t.Fatalf("error in create clientset: %v", err)
}
return s, closeFn, clientSet
}
// addPodConditionReady sets given pod status to ready at given time
func addPodConditionReady(pod *v1.Pod, time metav1.Time) {
pod.Status = v1.PodStatus{
Phase: v1.PodRunning,
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
LastTransitionTime: time,
},
},
}
}
func (d *deploymentTester) waitForDeploymentRevisionAndImage(revision, image string) error {
if err := testutil.WaitForDeploymentRevisionAndImage(d.c, d.deployment.Namespace, d.deployment.Name, revision, image, d.t.Logf, pollInterval, pollTimeout); err != nil {
return fmt.Errorf("failed to wait for Deployment revision %s: %v", d.deployment.Name, err)
}
return nil
}
// markAllPodsReady manually updates all Deployment pods status to ready
func (d *deploymentTester) markAllPodsReady() {
ns := d.deployment.Namespace
selector, err := metav1.LabelSelectorAsSelector(d.deployment.Spec.Selector)
if err != nil {
d.t.Fatalf("failed to parse Deployment selector: %v", err)
}
var readyPods int32
err = wait.Poll(100*time.Millisecond, pollTimeout, func() (bool, error) {
readyPods = 0
pods, err := d.c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()})
if err != nil {
d.t.Logf("failed to list Deployment pods, will retry later: %v", err)
return false, nil
}
for i := range pods.Items {
pod := pods.Items[i]
if podutil.IsPodReady(&pod) {
readyPods++
continue
}
addPodConditionReady(&pod, metav1.Now())
if _, err = d.c.Core().Pods(ns).UpdateStatus(&pod); err != nil {
d.t.Logf("failed to update Deployment pod %s, will retry later: %v", pod.Name, err)
} else {
readyPods++
}
}
if readyPods >= *d.deployment.Spec.Replicas {
return true, nil
}
return false, nil
})
if err != nil {
d.t.Fatalf("failed to mark all Deployment pods to ready: %v", err)
}
}
func (d *deploymentTester) waitForDeploymentStatusValid() error {
return testutil.WaitForDeploymentStatusValid(d.c, d.deployment, d.t.Logf, pollInterval, pollTimeout)
}
// waitForDeploymentStatusValidAndMarkPodsReady waits for the Deployment status to become valid
// while marking all Deployment pods as ready at the same time.
func (d *deploymentTester) waitForDeploymentStatusValidAndMarkPodsReady() error {
// Manually mark all Deployment pods as ready in a separate goroutine
go d.markAllPodsReady()
// Make sure the Deployment status is valid while Deployment pods are becoming ready
err := d.waitForDeploymentStatusValid()
if err != nil {
return fmt.Errorf("failed to wait for Deployment status %s: %v", d.deployment.Name, err)
}
return nil
}
func (d *deploymentTester) updateDeployment(applyUpdate testutil.UpdateDeploymentFunc) (*v1beta1.Deployment, error) {
return testutil.UpdateDeploymentWithRetries(d.c, d.deployment.Namespace, d.deployment.Name, applyUpdate, d.t.Logf)
}
func (d *deploymentTester) waitForObservedDeployment(desiredGeneration int64) error {
if err := testutil.WaitForObservedDeployment(d.c, d.deployment.Namespace, d.deployment.Name, desiredGeneration); err != nil {
return fmt.Errorf("failed waiting for ObservedGeneration of deployment %s to become %d: %v", d.deployment.Name, desiredGeneration, err)
}
return nil
}
func (d *deploymentTester) getNewReplicaSet() (*v1beta1.ReplicaSet, error) {
rs, err := deploymentutil.GetNewReplicaSet(d.deployment, d.c.ExtensionsV1beta1())
if err != nil {
return nil, fmt.Errorf("failed retrieving new replicaset of deployment %s: %v", d.deployment.Name, err)
}
return rs, nil
}
func (d *deploymentTester) expectNoNewReplicaSet() error {
rs, err := d.getNewReplicaSet()
if err != nil {
return err
}
if rs != nil {
return fmt.Errorf("expected deployment %s not to create a new replicaset, got %v", d.deployment.Name, rs)
}
return nil
}
func (d *deploymentTester) expectNewReplicaSet() (*v1beta1.ReplicaSet, error) {
rs, err := d.getNewReplicaSet()
if err != nil {
return nil, err
}
if rs == nil {
return nil, fmt.Errorf("expected deployment %s to create a new replicaset, got nil", d.deployment.Name)
}
return rs, nil
}
func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.UpdateReplicaSetFunc) (*v1beta1.ReplicaSet, error) {
return testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf)
}
| test/integration/deployment/util.go | 1 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.9987053871154785,
0.1459476351737976,
0.00016036206216085702,
0.0025578434579074383,
0.3329276144504547
] |
{
"id": 5,
"code_window": [
"\tif err != nil {\n",
"\t\td.t.Fatalf(\"failed to parse Deployment selector: %v\", err)\n",
"\t}\n",
"\tvar readyPods int32\n",
"\terr = wait.Poll(100*time.Millisecond, pollTimeout, func() (bool, error) {\n",
"\t\treadyPods = 0\n",
"\t\tpods, err := d.c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()})\n",
"\t\tif err != nil {\n",
"\t\t\td.t.Logf(\"failed to list Deployment pods, will retry later: %v\", err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\terr = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {\n"
],
"file_path": "test/integration/deployment/util.go",
"type": "replace",
"edit_start_line_idx": 170
} | package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = [
"fileutil_test.go",
"lock_test.go",
"preallocate_test.go",
"purge_test.go",
],
library = ":go_default_library",
)
go_library(
name = "go_default_library",
srcs = [
"fileutil.go",
"lock.go",
"lock_unix.go",
"perallocate_unsupported.go",
"purge.go",
"sync.go",
] + select({
"@io_bazel_rules_go//go/platform:linux_amd64": [
"preallocate.go",
"sync_linux.go",
],
"@io_bazel_rules_go//go/platform:windows_amd64": [
"lock_windows.go",
],
"//conditions:default": [],
}),
deps = ["//vendor/github.com/coreos/pkg/capnslog:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| third_party/forked/etcd237/pkg/fileutil/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.00017552469216752797,
0.00017285560898017138,
0.00016847450751811266,
0.00017291976837441325,
0.0000022317644834402017
] |
{
"id": 5,
"code_window": [
"\tif err != nil {\n",
"\t\td.t.Fatalf(\"failed to parse Deployment selector: %v\", err)\n",
"\t}\n",
"\tvar readyPods int32\n",
"\terr = wait.Poll(100*time.Millisecond, pollTimeout, func() (bool, error) {\n",
"\t\treadyPods = 0\n",
"\t\tpods, err := d.c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()})\n",
"\t\tif err != nil {\n",
"\t\t\td.t.Logf(\"failed to list Deployment pods, will retry later: %v\", err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\terr = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {\n"
],
"file_path": "test/integration/deployment/util.go",
"type": "replace",
"edit_start_line_idx": 170
} | package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["interface.go"],
deps = [
"//vendor/k8s.io/code-generator/test/informers/internalversion/internalinterfaces:go_default_library",
"//vendor/k8s.io/code-generator/test/informers/internalversion/testgroup/internalversion:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/code-generator/test/informers/internalversion/testgroup/internalversion:all-srcs",
],
tags = ["automanaged"],
)
| staging/src/k8s.io/code-generator/test/informers/internalversion/testgroup/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.00017666156054474413,
0.0001728426868794486,
0.00016891217092052102,
0.00017289852257817984,
0.0000030696912745042937
] |
{
"id": 5,
"code_window": [
"\tif err != nil {\n",
"\t\td.t.Fatalf(\"failed to parse Deployment selector: %v\", err)\n",
"\t}\n",
"\tvar readyPods int32\n",
"\terr = wait.Poll(100*time.Millisecond, pollTimeout, func() (bool, error) {\n",
"\t\treadyPods = 0\n",
"\t\tpods, err := d.c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()})\n",
"\t\tif err != nil {\n",
"\t\t\td.t.Logf(\"failed to list Deployment pods, will retry later: %v\", err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\terr = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {\n"
],
"file_path": "test/integration/deployment/util.go",
"type": "replace",
"edit_start_line_idx": 170
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"bytes"
"io/ioutil"
"os"
"testing"
"reflect"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
type setConfigTest struct {
description string
config clientcmdapi.Config
args []string
expected string
expectedConfig clientcmdapi.Config
}
func TestSetConfigCurrentContext(t *testing.T) {
conf := clientcmdapi.Config{
Kind: "Config",
APIVersion: "v1",
CurrentContext: "minikube",
}
expectedConfig := *clientcmdapi.NewConfig()
expectedConfig.CurrentContext = "my-cluster"
test := setConfigTest{
description: "Testing for kubectl config set current-context",
config: conf,
args: []string{"current-context", "my-cluster"},
expected: `Property "current-context" set.` + "\n",
expectedConfig: expectedConfig,
}
test.run(t)
}
func (test setConfigTest) run(t *testing.T) {
fakeKubeFile, err := ioutil.TempFile("", "")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
defer os.Remove(fakeKubeFile.Name())
err = clientcmd.WriteToFile(test.config, fakeKubeFile.Name())
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
pathOptions := clientcmd.NewDefaultPathOptions()
pathOptions.GlobalFile = fakeKubeFile.Name()
pathOptions.EnvVar = ""
buf := bytes.NewBuffer([]byte{})
cmd := NewCmdConfigSet(buf, pathOptions)
cmd.SetArgs(test.args)
if err := cmd.Execute(); err != nil {
t.Fatalf("unexpected error executing command: %v", err)
}
config, err := clientcmd.LoadFromFile(fakeKubeFile.Name())
if err != nil {
t.Fatalf("unexpected error loading kubeconfig file: %v", err)
}
if len(test.expected) != 0 {
if buf.String() != test.expected {
t.Errorf("Failded in:%q\n expected %v\n but got %v", test.description, test.expected, buf.String())
}
}
if !reflect.DeepEqual(*config, test.expectedConfig) {
t.Errorf("Failed in: %q\n expected %v\n but got %v", test.description, *config, test.expectedConfig)
}
}
| pkg/kubectl/cmd/config/set_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.00017904333071783185,
0.00017331969866063446,
0.00016856673755683005,
0.00017386760737281293,
0.000003879730684275273
] |
{
"id": 6,
"code_window": [
"}\n",
"\n",
"func (d *deploymentTester) updateDeployment(applyUpdate testutil.UpdateDeploymentFunc) (*v1beta1.Deployment, error) {\n",
"\treturn testutil.UpdateDeploymentWithRetries(d.c, d.deployment.Namespace, d.deployment.Name, applyUpdate, d.t.Logf)\n",
"}\n",
"\n",
"func (d *deploymentTester) waitForObservedDeployment(desiredGeneration int64) error {\n",
"\tif err := testutil.WaitForObservedDeployment(d.c, d.deployment.Namespace, d.deployment.Name, desiredGeneration); err != nil {\n",
"\t\treturn fmt.Errorf(\"failed waiting for ObservedGeneration of deployment %s to become %d: %v\", d.deployment.Name, desiredGeneration, err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testutil.UpdateDeploymentWithRetries(d.c, d.deployment.Namespace, d.deployment.Name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)\n"
],
"file_path": "test/integration/deployment/util.go",
"type": "replace",
"edit_start_line_idx": 219
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
extensions "k8s.io/api/extensions/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
testutils "k8s.io/kubernetes/test/utils"
)
type updateRsFunc func(d *extensions.ReplicaSet)
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*extensions.ReplicaSet, error) {
return testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, Logf)
}
// CheckNewRSAnnotations check if the new RS's annotation is as expected
func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return err
}
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
if err != nil {
return err
}
for k, v := range expectedAnnotations {
// Skip checking revision annotations
if k != deploymentutil.RevisionAnnotation && v != newRS.Annotations[k] {
return fmt.Errorf("Expected new RS annotations = %+v, got %+v", expectedAnnotations, newRS.Annotations)
}
}
return nil
}
// Delete a ReplicaSet and all pods it spawned
func DeleteReplicaSet(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns))
rs, err := clientset.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
return err
}
startTime := time.Now()
err = clientset.ExtensionsV1beta1().ReplicaSets(ns).Delete(name, &metav1.DeleteOptions{})
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
deleteRSTime := time.Now().Sub(startTime)
Logf("Deleting RS %s took: %v", name, deleteRSTime)
if err == nil {
err = waitForReplicaSetPodsGone(clientset, rs)
}
terminatePodTime := time.Now().Sub(startTime) - deleteRSTime
Logf("Terminating ReplicaSet %s pods took: %v", name, terminatePodTime)
return err
}
// waitForReplicaSetPodsGone waits until there are no pods reported under a
// ReplicaSet selector (because the pods have completed termination).
func waitForReplicaSetPodsGone(c clientset.Interface, rs *extensions.ReplicaSet) error {
return wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) {
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
ExpectNoError(err)
options := metav1.ListOptions{LabelSelector: selector.String()}
if pods, err := c.Core().Pods(rs.Namespace).List(options); err == nil && len(pods.Items) == 0 {
return true, nil
}
return false, nil
})
}
// WaitForReadyReplicaSet waits until the replica set has all of its replicas ready.
func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
err := wait.Poll(Poll, pollShortTimeout, func() (bool, error) {
rs, err := c.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
if err != nil {
return false, err
}
return *(rs.Spec.Replicas) == rs.Status.Replicas && *(rs.Spec.Replicas) == rs.Status.ReadyReplicas, nil
})
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("replica set %q never became ready", name)
}
return err
}
func RunReplicaSet(config testutils.ReplicaSetConfig) error {
By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace))
config.NodeDumpFunc = DumpNodeDebugInfo
config.ContainerDumpFunc = LogFailedContainers
return testutils.RunReplicaSet(config)
}
| test/e2e/framework/rs_util.go | 1 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.004191982094198465,
0.0007630774052813649,
0.00016430635878350586,
0.00025729526532813907,
0.001221010461449623
] |
{
"id": 6,
"code_window": [
"}\n",
"\n",
"func (d *deploymentTester) updateDeployment(applyUpdate testutil.UpdateDeploymentFunc) (*v1beta1.Deployment, error) {\n",
"\treturn testutil.UpdateDeploymentWithRetries(d.c, d.deployment.Namespace, d.deployment.Name, applyUpdate, d.t.Logf)\n",
"}\n",
"\n",
"func (d *deploymentTester) waitForObservedDeployment(desiredGeneration int64) error {\n",
"\tif err := testutil.WaitForObservedDeployment(d.c, d.deployment.Namespace, d.deployment.Name, desiredGeneration); err != nil {\n",
"\t\treturn fmt.Errorf(\"failed waiting for ObservedGeneration of deployment %s to become %d: %v\", d.deployment.Name, desiredGeneration, err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testutil.UpdateDeploymentWithRetries(d.c, d.deployment.Namespace, d.deployment.Name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)\n"
],
"file_path": "test/integration/deployment/util.go",
"type": "replace",
"edit_start_line_idx": 219
} | package bolt
import "errors"
// These errors can be returned when opening or calling methods on a DB.
var (
// ErrDatabaseNotOpen is returned when a DB instance is accessed before it
// is opened or after it is closed.
ErrDatabaseNotOpen = errors.New("database not open")
// ErrDatabaseOpen is returned when opening a database that is
// already open.
ErrDatabaseOpen = errors.New("database already open")
// ErrInvalid is returned when both meta pages on a database are invalid.
// This typically occurs when a file is not a bolt database.
ErrInvalid = errors.New("invalid database")
// ErrVersionMismatch is returned when the data file was created with a
// different version of Bolt.
ErrVersionMismatch = errors.New("version mismatch")
// ErrChecksum is returned when either meta page checksum does not match.
ErrChecksum = errors.New("checksum error")
// ErrTimeout is returned when a database cannot obtain an exclusive lock
// on the data file after the timeout passed to Open().
ErrTimeout = errors.New("timeout")
)
// These errors can occur when beginning or committing a Tx.
var (
// ErrTxNotWritable is returned when performing a write operation on a
// read-only transaction.
ErrTxNotWritable = errors.New("tx not writable")
// ErrTxClosed is returned when committing or rolling back a transaction
// that has already been committed or rolled back.
ErrTxClosed = errors.New("tx closed")
// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
// read-only database.
ErrDatabaseReadOnly = errors.New("database is in read-only mode")
)
// These errors can occur when putting or deleting a value or a bucket.
var (
// ErrBucketNotFound is returned when trying to access a bucket that has
// not been created yet.
ErrBucketNotFound = errors.New("bucket not found")
// ErrBucketExists is returned when creating a bucket that already exists.
ErrBucketExists = errors.New("bucket already exists")
// ErrBucketNameRequired is returned when creating a bucket with a blank name.
ErrBucketNameRequired = errors.New("bucket name required")
// ErrKeyRequired is returned when inserting a zero-length key.
ErrKeyRequired = errors.New("key required")
// ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
ErrKeyTooLarge = errors.New("key too large")
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
ErrValueTooLarge = errors.New("value too large")
// ErrIncompatibleValue is returned when trying create or delete a bucket
// on an existing non-bucket key or when trying to create or delete a
// non-bucket key on an existing bucket key.
ErrIncompatibleValue = errors.New("incompatible value")
)
| vendor/github.com/boltdb/bolt/errors.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.0005402301321737468,
0.0002124358870787546,
0.0001614273787708953,
0.00016692507779225707,
0.00012393620272632688
] |
{
"id": 6,
"code_window": [
"}\n",
"\n",
"func (d *deploymentTester) updateDeployment(applyUpdate testutil.UpdateDeploymentFunc) (*v1beta1.Deployment, error) {\n",
"\treturn testutil.UpdateDeploymentWithRetries(d.c, d.deployment.Namespace, d.deployment.Name, applyUpdate, d.t.Logf)\n",
"}\n",
"\n",
"func (d *deploymentTester) waitForObservedDeployment(desiredGeneration int64) error {\n",
"\tif err := testutil.WaitForObservedDeployment(d.c, d.deployment.Namespace, d.deployment.Name, desiredGeneration); err != nil {\n",
"\t\treturn fmt.Errorf(\"failed waiting for ObservedGeneration of deployment %s to become %d: %v\", d.deployment.Name, desiredGeneration, err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testutil.UpdateDeploymentWithRetries(d.c, d.deployment.Namespace, d.deployment.Name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)\n"
],
"file_path": "test/integration/deployment/util.go",
"type": "replace",
"edit_start_line_idx": 219
} | /*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
// Below is derived from Solaris source, so CDDL license is included.
package gopass
import (
"syscall"
"golang.org/x/sys/unix"
)
type terminalState struct {
state *unix.Termios
}
// isTerminal returns true if there is a terminal attached to the given
// file descriptor.
// Source: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
func isTerminal(fd uintptr) bool {
var termio unix.Termio
err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
return err == nil
}
// makeRaw puts the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
// Source: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c
func makeRaw(fd uintptr) (*terminalState, error) {
oldTermiosPtr, err := unix.IoctlGetTermios(int(fd), unix.TCGETS)
if err != nil {
return nil, err
}
oldTermios := *oldTermiosPtr
newTermios := oldTermios
newTermios.Lflag &^= syscall.ECHO | syscall.ECHOE | syscall.ECHOK | syscall.ECHONL
if err := unix.IoctlSetTermios(int(fd), unix.TCSETS, &newTermios); err != nil {
return nil, err
}
return &terminalState{
state: oldTermiosPtr,
}, nil
}
func restore(fd uintptr, oldState *terminalState) error {
return unix.IoctlSetTermios(int(fd), unix.TCSETS, oldState.state)
}
| vendor/github.com/howeyc/gopass/terminal_solaris.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.00017277439474128187,
0.00016950991994235665,
0.00016454796423204243,
0.00016993687313515693,
0.000002787243147395202
] |
{
"id": 6,
"code_window": [
"}\n",
"\n",
"func (d *deploymentTester) updateDeployment(applyUpdate testutil.UpdateDeploymentFunc) (*v1beta1.Deployment, error) {\n",
"\treturn testutil.UpdateDeploymentWithRetries(d.c, d.deployment.Namespace, d.deployment.Name, applyUpdate, d.t.Logf)\n",
"}\n",
"\n",
"func (d *deploymentTester) waitForObservedDeployment(desiredGeneration int64) error {\n",
"\tif err := testutil.WaitForObservedDeployment(d.c, d.deployment.Namespace, d.deployment.Name, desiredGeneration); err != nil {\n",
"\t\treturn fmt.Errorf(\"failed waiting for ObservedGeneration of deployment %s to become %d: %v\", d.deployment.Name, desiredGeneration, err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn testutil.UpdateDeploymentWithRetries(d.c, d.deployment.Namespace, d.deployment.Name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)\n"
],
"file_path": "test/integration/deployment/util.go",
"type": "replace",
"edit_start_line_idx": 219
} | apiVersion: v1
kind: Pod
metadata:
name: cephfs2
spec:
containers:
- name: cephfs-rw
image: kubernetes/pause
volumeMounts:
- mountPath: "/mnt/cephfs"
name: cephfs
volumes:
- name: cephfs
cephfs:
monitors:
- 10.16.154.78:6789
- 10.16.154.82:6789
- 10.16.154.83:6789
user: admin
secretRef:
name: ceph-secret
readOnly: true
| examples/volumes/cephfs/cephfs-with-secret.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.00017160296556539834,
0.00016876665176823735,
0.00016608057194389403,
0.0001686164177954197,
0.0000022570091005036375
] |
{
"id": 7,
"code_window": [
"\t}\n",
"\treturn rs, nil\n",
"}\n",
"\n",
"func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.UpdateReplicaSetFunc) (*v1beta1.ReplicaSet, error) {\n",
"\treturn testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf)\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\treturn testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)\n"
],
"file_path": "test/integration/deployment/util.go",
"type": "replace",
"edit_start_line_idx": 260
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
"net/http/httptest"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller/deployment"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/controller/replicaset"
"k8s.io/kubernetes/test/integration/framework"
testutil "k8s.io/kubernetes/test/utils"
)
const (
pollInterval = 1 * time.Second
pollTimeout = 60 * time.Second
fakeImageName = "fake-name"
fakeImage = "fakeimage"
)
var pauseFn = func(update *v1beta1.Deployment) {
update.Spec.Paused = true
}
var resumeFn = func(update *v1beta1.Deployment) {
update.Spec.Paused = false
}
type deploymentTester struct {
t *testing.T
c clientset.Interface
deployment *v1beta1.Deployment
}
func testLabels() map[string]string {
return map[string]string{"name": "test"}
}
// newDeployment returns a RollingUpdate Deployment with with a fake container image
func newDeployment(name, ns string, replicas int32) *v1beta1.Deployment {
return &v1beta1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "extensions/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
},
Spec: v1beta1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: testLabels()},
Strategy: v1beta1.DeploymentStrategy{
Type: v1beta1.RollingUpdateDeploymentStrategyType,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: testLabels(),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: fakeImageName,
Image: fakeImage,
},
},
},
},
},
}
}
// dcSetup sets up necessities for Deployment integration test, including master, apiserver, informers, and clientset
func dcSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replicaset.ReplicaSetController, *deployment.DeploymentController, informers.SharedInformerFactory, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
_, s, closeFn := framework.RunAMaster(masterConfig)
config := restclient.Config{Host: s.URL}
clientSet, err := clientset.NewForConfig(&config)
if err != nil {
t.Fatalf("error in create clientset: %v", err)
}
resyncPeriod := 12 * time.Hour
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "deployment-informers")), resyncPeriod)
dc := deployment.NewDeploymentController(
informers.Extensions().V1beta1().Deployments(),
informers.Extensions().V1beta1().ReplicaSets(),
informers.Core().V1().Pods(),
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "deployment-controller")),
)
rm := replicaset.NewReplicaSetController(
informers.Extensions().V1beta1().ReplicaSets(),
informers.Core().V1().Pods(),
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
replicaset.BurstReplicas,
)
return s, closeFn, rm, dc, informers, clientSet
}
// dcSimpleSetup sets up necessities for Deployment integration test, including master, apiserver,
// and clientset, but not controllers and informers
func dcSimpleSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
_, s, closeFn := framework.RunAMaster(masterConfig)
config := restclient.Config{Host: s.URL}
clientSet, err := clientset.NewForConfig(&config)
if err != nil {
t.Fatalf("error in create clientset: %v", err)
}
return s, closeFn, clientSet
}
// addPodConditionReady sets given pod status to ready at given time
func addPodConditionReady(pod *v1.Pod, time metav1.Time) {
pod.Status = v1.PodStatus{
Phase: v1.PodRunning,
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
LastTransitionTime: time,
},
},
}
}
func (d *deploymentTester) waitForDeploymentRevisionAndImage(revision, image string) error {
if err := testutil.WaitForDeploymentRevisionAndImage(d.c, d.deployment.Namespace, d.deployment.Name, revision, image, d.t.Logf, pollInterval, pollTimeout); err != nil {
return fmt.Errorf("failed to wait for Deployment revision %s: %v", d.deployment.Name, err)
}
return nil
}
// markAllPodsReady manually updates all Deployment pods status to ready
func (d *deploymentTester) markAllPodsReady() {
ns := d.deployment.Namespace
selector, err := metav1.LabelSelectorAsSelector(d.deployment.Spec.Selector)
if err != nil {
d.t.Fatalf("failed to parse Deployment selector: %v", err)
}
var readyPods int32
err = wait.Poll(100*time.Millisecond, pollTimeout, func() (bool, error) {
readyPods = 0
pods, err := d.c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()})
if err != nil {
d.t.Logf("failed to list Deployment pods, will retry later: %v", err)
return false, nil
}
for i := range pods.Items {
pod := pods.Items[i]
if podutil.IsPodReady(&pod) {
readyPods++
continue
}
addPodConditionReady(&pod, metav1.Now())
if _, err = d.c.Core().Pods(ns).UpdateStatus(&pod); err != nil {
d.t.Logf("failed to update Deployment pod %s, will retry later: %v", pod.Name, err)
} else {
readyPods++
}
}
if readyPods >= *d.deployment.Spec.Replicas {
return true, nil
}
return false, nil
})
if err != nil {
d.t.Fatalf("failed to mark all Deployment pods to ready: %v", err)
}
}
func (d *deploymentTester) waitForDeploymentStatusValid() error {
return testutil.WaitForDeploymentStatusValid(d.c, d.deployment, d.t.Logf, pollInterval, pollTimeout)
}
// waitForDeploymentStatusValidAndMarkPodsReady waits for the Deployment status to become valid
// while marking all Deployment pods as ready at the same time.
func (d *deploymentTester) waitForDeploymentStatusValidAndMarkPodsReady() error {
// Manually mark all Deployment pods as ready in a separate goroutine
go d.markAllPodsReady()
// Make sure the Deployment status is valid while Deployment pods are becoming ready
err := d.waitForDeploymentStatusValid()
if err != nil {
return fmt.Errorf("failed to wait for Deployment status %s: %v", d.deployment.Name, err)
}
return nil
}
func (d *deploymentTester) updateDeployment(applyUpdate testutil.UpdateDeploymentFunc) (*v1beta1.Deployment, error) {
return testutil.UpdateDeploymentWithRetries(d.c, d.deployment.Namespace, d.deployment.Name, applyUpdate, d.t.Logf)
}
func (d *deploymentTester) waitForObservedDeployment(desiredGeneration int64) error {
if err := testutil.WaitForObservedDeployment(d.c, d.deployment.Namespace, d.deployment.Name, desiredGeneration); err != nil {
return fmt.Errorf("failed waiting for ObservedGeneration of deployment %s to become %d: %v", d.deployment.Name, desiredGeneration, err)
}
return nil
}
func (d *deploymentTester) getNewReplicaSet() (*v1beta1.ReplicaSet, error) {
rs, err := deploymentutil.GetNewReplicaSet(d.deployment, d.c.ExtensionsV1beta1())
if err != nil {
return nil, fmt.Errorf("failed retrieving new replicaset of deployment %s: %v", d.deployment.Name, err)
}
return rs, nil
}
func (d *deploymentTester) expectNoNewReplicaSet() error {
rs, err := d.getNewReplicaSet()
if err != nil {
return err
}
if rs != nil {
return fmt.Errorf("expected deployment %s not to create a new replicaset, got %v", d.deployment.Name, rs)
}
return nil
}
func (d *deploymentTester) expectNewReplicaSet() (*v1beta1.ReplicaSet, error) {
rs, err := d.getNewReplicaSet()
if err != nil {
return nil, err
}
if rs == nil {
return nil, fmt.Errorf("expected deployment %s to create a new replicaset, got nil", d.deployment.Name)
}
return rs, nil
}
func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.UpdateReplicaSetFunc) (*v1beta1.ReplicaSet, error) {
return testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf)
}
| test/integration/deployment/util.go | 1 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.9951110482215881,
0.29083189368247986,
0.0001690929930191487,
0.012043454684317112,
0.37897053360939026
] |
{
"id": 7,
"code_window": [
"\t}\n",
"\treturn rs, nil\n",
"}\n",
"\n",
"func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.UpdateReplicaSetFunc) (*v1beta1.ReplicaSet, error) {\n",
"\treturn testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf)\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\treturn testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)\n"
],
"file_path": "test/integration/deployment/util.go",
"type": "replace",
"edit_start_line_idx": 260
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"client.go",
"models.go",
"operations.go",
"registries.go",
"version.go",
],
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/date:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/validation:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.00017428406863473356,
0.00017276554717682302,
0.00017146016762126237,
0.00017265896894969046,
0.0000010042765552498167
] |
{
"id": 7,
"code_window": [
"\t}\n",
"\treturn rs, nil\n",
"}\n",
"\n",
"func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.UpdateReplicaSetFunc) (*v1beta1.ReplicaSet, error) {\n",
"\treturn testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf)\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\treturn testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)\n"
],
"file_path": "test/integration/deployment/util.go",
"type": "replace",
"edit_start_line_idx": 260
} | // Package storage implements the Azure ARM Storage service API version
// 2016-12-01.
//
// The Azure Storage Management API.
package storage
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
import (
"github.com/Azure/go-autorest/autorest"
)
const (
// DefaultBaseURI is the default URI used for the service Storage
DefaultBaseURI = "https://management.azure.com"
)
// ManagementClient is the base client for Storage.
type ManagementClient struct {
autorest.Client
BaseURI string
SubscriptionID string
}
// New creates an instance of the ManagementClient client.
func New(subscriptionID string) ManagementClient {
return NewWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewWithBaseURI creates an instance of the ManagementClient client.
func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient {
return ManagementClient{
Client: autorest.NewClientWithUserAgent(UserAgent()),
BaseURI: baseURI,
SubscriptionID: subscriptionID,
}
}
| vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.0001779964950401336,
0.00017179828137159348,
0.0001649145851843059,
0.00017233620746992528,
0.000005206898549658945
] |
{
"id": 7,
"code_window": [
"\t}\n",
"\treturn rs, nil\n",
"}\n",
"\n",
"func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.UpdateReplicaSetFunc) (*v1beta1.ReplicaSet, error) {\n",
"\treturn testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf)\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\treturn testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)\n"
],
"file_path": "test/integration/deployment/util.go",
"type": "replace",
"edit_start_line_idx": 260
} | apiVersion: v1
kind: ReplicationController
metadata:
name: redis-sentinel
spec:
replicas: 1
selector:
redis-sentinel: "true"
template:
metadata:
labels:
name: redis-sentinel
redis-sentinel: "true"
role: sentinel
spec:
containers:
- name: sentinel
image: gcr.io/google_containers/redis:v1
env:
- name: SENTINEL
value: "true"
ports:
- containerPort: 26379
| examples/storage/redis/redis-sentinel-controller.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.0001729296491248533,
0.00017067654698621482,
0.00016664784925524145,
0.00017245214257854968,
0.0000028553818083310034
] |
{
"id": 8,
"code_window": [
"}\n",
"\n",
"type UpdateDeploymentFunc func(d *extensions.Deployment)\n",
"\n",
"func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateDeploymentFunc, logf LogfFn) (*extensions.Deployment, error) {\n",
"\tvar deployment *extensions.Deployment\n",
"\tvar updateErr error\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateDeploymentFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*extensions.Deployment, error) {\n"
],
"file_path": "test/utils/deployment.go",
"type": "replace",
"edit_start_line_idx": 218
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
"net/http/httptest"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller/deployment"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/controller/replicaset"
"k8s.io/kubernetes/test/integration/framework"
testutil "k8s.io/kubernetes/test/utils"
)
const (
pollInterval = 1 * time.Second
pollTimeout = 60 * time.Second
fakeImageName = "fake-name"
fakeImage = "fakeimage"
)
var pauseFn = func(update *v1beta1.Deployment) {
update.Spec.Paused = true
}
var resumeFn = func(update *v1beta1.Deployment) {
update.Spec.Paused = false
}
type deploymentTester struct {
t *testing.T
c clientset.Interface
deployment *v1beta1.Deployment
}
func testLabels() map[string]string {
return map[string]string{"name": "test"}
}
// newDeployment returns a RollingUpdate Deployment with with a fake container image
func newDeployment(name, ns string, replicas int32) *v1beta1.Deployment {
return &v1beta1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "extensions/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
},
Spec: v1beta1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: testLabels()},
Strategy: v1beta1.DeploymentStrategy{
Type: v1beta1.RollingUpdateDeploymentStrategyType,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: testLabels(),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: fakeImageName,
Image: fakeImage,
},
},
},
},
},
}
}
// dcSetup sets up necessities for Deployment integration test, including master, apiserver, informers, and clientset
func dcSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replicaset.ReplicaSetController, *deployment.DeploymentController, informers.SharedInformerFactory, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
_, s, closeFn := framework.RunAMaster(masterConfig)
config := restclient.Config{Host: s.URL}
clientSet, err := clientset.NewForConfig(&config)
if err != nil {
t.Fatalf("error in create clientset: %v", err)
}
resyncPeriod := 12 * time.Hour
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "deployment-informers")), resyncPeriod)
dc := deployment.NewDeploymentController(
informers.Extensions().V1beta1().Deployments(),
informers.Extensions().V1beta1().ReplicaSets(),
informers.Core().V1().Pods(),
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "deployment-controller")),
)
rm := replicaset.NewReplicaSetController(
informers.Extensions().V1beta1().ReplicaSets(),
informers.Core().V1().Pods(),
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
replicaset.BurstReplicas,
)
return s, closeFn, rm, dc, informers, clientSet
}
// dcSimpleSetup sets up necessities for Deployment integration test, including master, apiserver,
// and clientset, but not controllers and informers
func dcSimpleSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
_, s, closeFn := framework.RunAMaster(masterConfig)
config := restclient.Config{Host: s.URL}
clientSet, err := clientset.NewForConfig(&config)
if err != nil {
t.Fatalf("error in create clientset: %v", err)
}
return s, closeFn, clientSet
}
// addPodConditionReady sets given pod status to ready at given time
func addPodConditionReady(pod *v1.Pod, time metav1.Time) {
pod.Status = v1.PodStatus{
Phase: v1.PodRunning,
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
LastTransitionTime: time,
},
},
}
}
func (d *deploymentTester) waitForDeploymentRevisionAndImage(revision, image string) error {
if err := testutil.WaitForDeploymentRevisionAndImage(d.c, d.deployment.Namespace, d.deployment.Name, revision, image, d.t.Logf, pollInterval, pollTimeout); err != nil {
return fmt.Errorf("failed to wait for Deployment revision %s: %v", d.deployment.Name, err)
}
return nil
}
// markAllPodsReady manually updates all Deployment pods status to ready
func (d *deploymentTester) markAllPodsReady() {
ns := d.deployment.Namespace
selector, err := metav1.LabelSelectorAsSelector(d.deployment.Spec.Selector)
if err != nil {
d.t.Fatalf("failed to parse Deployment selector: %v", err)
}
var readyPods int32
err = wait.Poll(100*time.Millisecond, pollTimeout, func() (bool, error) {
readyPods = 0
pods, err := d.c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()})
if err != nil {
d.t.Logf("failed to list Deployment pods, will retry later: %v", err)
return false, nil
}
for i := range pods.Items {
pod := pods.Items[i]
if podutil.IsPodReady(&pod) {
readyPods++
continue
}
addPodConditionReady(&pod, metav1.Now())
if _, err = d.c.Core().Pods(ns).UpdateStatus(&pod); err != nil {
d.t.Logf("failed to update Deployment pod %s, will retry later: %v", pod.Name, err)
} else {
readyPods++
}
}
if readyPods >= *d.deployment.Spec.Replicas {
return true, nil
}
return false, nil
})
if err != nil {
d.t.Fatalf("failed to mark all Deployment pods to ready: %v", err)
}
}
func (d *deploymentTester) waitForDeploymentStatusValid() error {
return testutil.WaitForDeploymentStatusValid(d.c, d.deployment, d.t.Logf, pollInterval, pollTimeout)
}
// waitForDeploymentStatusValidAndMarkPodsReady waits for the Deployment status to become valid
// while marking all Deployment pods as ready at the same time.
func (d *deploymentTester) waitForDeploymentStatusValidAndMarkPodsReady() error {
// Manually mark all Deployment pods as ready in a separate goroutine
go d.markAllPodsReady()
// Make sure the Deployment status is valid while Deployment pods are becoming ready
err := d.waitForDeploymentStatusValid()
if err != nil {
return fmt.Errorf("failed to wait for Deployment status %s: %v", d.deployment.Name, err)
}
return nil
}
func (d *deploymentTester) updateDeployment(applyUpdate testutil.UpdateDeploymentFunc) (*v1beta1.Deployment, error) {
return testutil.UpdateDeploymentWithRetries(d.c, d.deployment.Namespace, d.deployment.Name, applyUpdate, d.t.Logf)
}
func (d *deploymentTester) waitForObservedDeployment(desiredGeneration int64) error {
if err := testutil.WaitForObservedDeployment(d.c, d.deployment.Namespace, d.deployment.Name, desiredGeneration); err != nil {
return fmt.Errorf("failed waiting for ObservedGeneration of deployment %s to become %d: %v", d.deployment.Name, desiredGeneration, err)
}
return nil
}
func (d *deploymentTester) getNewReplicaSet() (*v1beta1.ReplicaSet, error) {
rs, err := deploymentutil.GetNewReplicaSet(d.deployment, d.c.ExtensionsV1beta1())
if err != nil {
return nil, fmt.Errorf("failed retrieving new replicaset of deployment %s: %v", d.deployment.Name, err)
}
return rs, nil
}
func (d *deploymentTester) expectNoNewReplicaSet() error {
rs, err := d.getNewReplicaSet()
if err != nil {
return err
}
if rs != nil {
return fmt.Errorf("expected deployment %s not to create a new replicaset, got %v", d.deployment.Name, rs)
}
return nil
}
func (d *deploymentTester) expectNewReplicaSet() (*v1beta1.ReplicaSet, error) {
rs, err := d.getNewReplicaSet()
if err != nil {
return nil, err
}
if rs == nil {
return nil, fmt.Errorf("expected deployment %s to create a new replicaset, got nil", d.deployment.Name)
}
return rs, nil
}
func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.UpdateReplicaSetFunc) (*v1beta1.ReplicaSet, error) {
return testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf)
}
| test/integration/deployment/util.go | 1 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.9993507266044617,
0.26508885622024536,
0.000170086495927535,
0.030263638123869896,
0.3932124972343445
] |
{
"id": 8,
"code_window": [
"}\n",
"\n",
"type UpdateDeploymentFunc func(d *extensions.Deployment)\n",
"\n",
"func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateDeploymentFunc, logf LogfFn) (*extensions.Deployment, error) {\n",
"\tvar deployment *extensions.Deployment\n",
"\tvar updateErr error\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateDeploymentFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*extensions.Deployment, error) {\n"
],
"file_path": "test/utils/deployment.go",
"type": "replace",
"edit_start_line_idx": 218
} | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"encoding/json"
"net/http"
"net/url"
"golang.org/x/net/context"
)
type Role struct {
Role string `json:"role"`
Permissions Permissions `json:"permissions"`
Grant *Permissions `json:"grant,omitempty"`
Revoke *Permissions `json:"revoke,omitempty"`
}
type Permissions struct {
KV rwPermission `json:"kv"`
}
type rwPermission struct {
Read []string `json:"read"`
Write []string `json:"write"`
}
type PermissionType int
const (
ReadPermission PermissionType = iota
WritePermission
ReadWritePermission
)
// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to
// interact with etcd's role creation and modification features.
func NewAuthRoleAPI(c Client) AuthRoleAPI {
return &httpAuthRoleAPI{
client: c,
}
}
type AuthRoleAPI interface {
// AddRole adds a role.
AddRole(ctx context.Context, role string) error
// RemoveRole removes a role.
RemoveRole(ctx context.Context, role string) error
// GetRole retrieves role details.
GetRole(ctx context.Context, role string) (*Role, error)
// GrantRoleKV grants a role some permission prefixes for the KV store.
GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
// RevokeRoleKV revokes some permission prefixes for a role on the KV store.
RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
// ListRoles lists roles.
ListRoles(ctx context.Context) ([]string, error)
}
type httpAuthRoleAPI struct {
client httpClient
}
type authRoleAPIAction struct {
verb string
name string
role *Role
}
type authRoleAPIList struct{}
func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "roles", "")
req, _ := http.NewRequest("GET", u.String(), nil)
req.Header.Set("Content-Type", "application/json")
return req
}
func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "roles", l.name)
if l.role == nil {
req, _ := http.NewRequest(l.verb, u.String(), nil)
return req
}
b, err := json.Marshal(l.role)
if err != nil {
panic(err)
}
body := bytes.NewReader(b)
req, _ := http.NewRequest(l.verb, u.String(), body)
req.Header.Set("Content-Type", "application/json")
return req
}
func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) {
resp, body, err := r.client.Do(ctx, &authRoleAPIList{})
if err != nil {
return nil, err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
return nil, err
}
var roleList struct {
Roles []Role `json:"roles"`
}
if err = json.Unmarshal(body, &roleList); err != nil {
return nil, err
}
ret := make([]string, 0, len(roleList.Roles))
for _, r := range roleList.Roles {
ret = append(ret, r.Role)
}
return ret, nil
}
func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
role := &Role{
Role: rolename,
}
return r.addRemoveRole(ctx, &authRoleAPIAction{
verb: "PUT",
name: rolename,
role: role,
})
}
func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error {
return r.addRemoveRole(ctx, &authRoleAPIAction{
verb: "DELETE",
name: rolename,
})
}
func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error {
resp, body, err := r.client.Do(ctx, req)
if err != nil {
return err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec authError
err := json.Unmarshal(body, &sec)
if err != nil {
return err
}
return sec
}
return nil
}
func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) {
return r.modRole(ctx, &authRoleAPIAction{
verb: "GET",
name: rolename,
})
}
func buildRWPermission(prefixes []string, permType PermissionType) rwPermission {
var out rwPermission
switch permType {
case ReadPermission:
out.Read = prefixes
case WritePermission:
out.Write = prefixes
case ReadWritePermission:
out.Read = prefixes
out.Write = prefixes
}
return out
}
func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
rwp := buildRWPermission(prefixes, permType)
role := &Role{
Role: rolename,
Grant: &Permissions{
KV: rwp,
},
}
return r.modRole(ctx, &authRoleAPIAction{
verb: "PUT",
name: rolename,
role: role,
})
}
func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
rwp := buildRWPermission(prefixes, permType)
role := &Role{
Role: rolename,
Revoke: &Permissions{
KV: rwp,
},
}
return r.modRole(ctx, &authRoleAPIAction{
verb: "PUT",
name: rolename,
role: role,
})
}
func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) {
resp, body, err := r.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var role Role
if err = json.Unmarshal(body, &role); err != nil {
return nil, err
}
return &role, nil
}
| vendor/github.com/coreos/etcd/client/auth_role.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.000343015359248966,
0.0001887279941001907,
0.00016572196909692138,
0.00016947905533015728,
0.000047529985749861225
] |
{
"id": 8,
"code_window": [
"}\n",
"\n",
"type UpdateDeploymentFunc func(d *extensions.Deployment)\n",
"\n",
"func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateDeploymentFunc, logf LogfFn) (*extensions.Deployment, error) {\n",
"\tvar deployment *extensions.Deployment\n",
"\tvar updateErr error\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateDeploymentFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*extensions.Deployment, error) {\n"
],
"file_path": "test/utils/deployment.go",
"type": "replace",
"edit_start_line_idx": 218
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"k8s.io/kubernetes/pkg/api"
)
func getClaimRefNamespace(pv *api.PersistentVolume) string {
if pv.Spec.ClaimRef != nil {
return pv.Spec.ClaimRef.Namespace
}
return ""
}
// Visitor is called with each object's namespace and name, and returns true if visiting should continue
type Visitor func(namespace, name string) (shouldContinue bool)
// VisitPVSecretNames invokes the visitor function with the name of every secret
// referenced by the PV spec. If visitor returns false, visiting is short-circuited.
// Returns true if visiting completed, false if visiting was short-circuited.
func VisitPVSecretNames(pv *api.PersistentVolume, visitor Visitor) bool {
source := &pv.Spec.PersistentVolumeSource
switch {
case source.AzureFile != nil:
if source.AzureFile.SecretNamespace != nil && len(*source.AzureFile.SecretNamespace) > 0 {
if len(source.AzureFile.SecretName) > 0 && !visitor(*source.AzureFile.SecretNamespace, source.AzureFile.SecretName) {
return false
}
} else {
if len(source.AzureFile.SecretName) > 0 && !visitor(getClaimRefNamespace(pv), source.AzureFile.SecretName) {
return false
}
}
return true
case source.CephFS != nil:
if source.CephFS.SecretRef != nil {
// previously persisted PV objects use claimRef namespace
ns := getClaimRefNamespace(pv)
if len(source.CephFS.SecretRef.Namespace) > 0 {
// use the secret namespace if namespace is set
ns = source.CephFS.SecretRef.Namespace
}
if !visitor(ns, source.CephFS.SecretRef.Name) {
return false
}
}
case source.FlexVolume != nil:
if source.FlexVolume.SecretRef != nil && !visitor(getClaimRefNamespace(pv), source.FlexVolume.SecretRef.Name) {
return false
}
case source.RBD != nil:
if source.RBD.SecretRef != nil && !visitor(getClaimRefNamespace(pv), source.RBD.SecretRef.Name) {
return false
}
case source.ScaleIO != nil:
if source.ScaleIO.SecretRef != nil && !visitor(getClaimRefNamespace(pv), source.ScaleIO.SecretRef.Name) {
return false
}
case source.ISCSI != nil:
if source.ISCSI.SecretRef != nil && !visitor(getClaimRefNamespace(pv), source.ISCSI.SecretRef.Name) {
return false
}
case source.StorageOS != nil:
if source.StorageOS.SecretRef != nil && !visitor(source.StorageOS.SecretRef.Namespace, source.StorageOS.SecretRef.Name) {
return false
}
}
return true
}
| pkg/api/persistentvolume/util.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.0006974402931518853,
0.0002330371062271297,
0.0001685140305198729,
0.00017575998208485544,
0.00016423936176579446
] |
{
"id": 8,
"code_window": [
"}\n",
"\n",
"type UpdateDeploymentFunc func(d *extensions.Deployment)\n",
"\n",
"func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateDeploymentFunc, logf LogfFn) (*extensions.Deployment, error) {\n",
"\tvar deployment *extensions.Deployment\n",
"\tvar updateErr error\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateDeploymentFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*extensions.Deployment, error) {\n"
],
"file_path": "test/utils/deployment.go",
"type": "replace",
"edit_start_line_idx": 218
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"io"
"os"
"path"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/util/i18n"
"k8s.io/kubernetes/pkg/printers"
)
// NewCmdCreateSecret groups subcommands to create various types of secrets
func NewCmdClusterInfoDump(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "dump",
Short: i18n.T("Dump lots of relevant info for debugging and diagnosis"),
Long: dumpLong,
Example: dumpExample,
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(dumpClusterInfo(f, cmd, cmdOut))
},
}
cmd.Flags().String("output-directory", "", i18n.T("Where to output the files. If empty or '-' uses stdout, otherwise creates a directory hierarchy in that directory"))
cmd.Flags().StringSlice("namespaces", []string{}, "A comma separated list of namespaces to dump.")
cmd.Flags().Bool("all-namespaces", false, "If true, dump all namespaces. If true, --namespaces is ignored.")
cmdutil.AddPodRunningTimeoutFlag(cmd, defaultPodLogsTimeout)
return cmd
}
var (
dumpLong = templates.LongDesc(i18n.T(`
Dumps cluster info out suitable for debugging and diagnosing cluster problems. By default, dumps everything to
stdout. You can optionally specify a directory with --output-directory. If you specify a directory, kubernetes will
build a set of files in that directory. By default only dumps things in the 'kube-system' namespace, but you can
switch to a different namespace with the --namespaces flag, or specify --all-namespaces to dump all namespaces.
The command also dumps the logs of all of the pods in the cluster, these logs are dumped into different directories
based on namespace and pod name.`))
dumpExample = templates.Examples(i18n.T(`
# Dump current cluster state to stdout
kubectl cluster-info dump
# Dump current cluster state to /path/to/cluster-state
kubectl cluster-info dump --output-directory=/path/to/cluster-state
# Dump all namespaces to stdout
kubectl cluster-info dump --all-namespaces
# Dump a set of namespaces to /path/to/cluster-state
kubectl cluster-info dump --namespaces default,kube-system --output-directory=/path/to/cluster-state`))
)
func setupOutputWriter(cmd *cobra.Command, defaultWriter io.Writer, filename string) io.Writer {
dir := cmdutil.GetFlagString(cmd, "output-directory")
if len(dir) == 0 || dir == "-" {
return defaultWriter
}
fullFile := path.Join(dir, filename)
parent := path.Dir(fullFile)
cmdutil.CheckErr(os.MkdirAll(parent, 0755))
file, err := os.Create(path.Join(dir, filename))
cmdutil.CheckErr(err)
return file
}
func dumpClusterInfo(f cmdutil.Factory, cmd *cobra.Command, out io.Writer) error {
timeout, err := cmdutil.GetPodRunningTimeoutFlag(cmd)
if err != nil {
return cmdutil.UsageErrorf(cmd, err.Error())
}
clientset, err := f.ClientSet()
if err != nil {
return err
}
printer := &printers.JSONPrinter{}
nodes, err := clientset.Core().Nodes().List(metav1.ListOptions{})
if err != nil {
return err
}
if err := printer.PrintObj(nodes, setupOutputWriter(cmd, out, "nodes.json")); err != nil {
return err
}
var namespaces []string
if cmdutil.GetFlagBool(cmd, "all-namespaces") {
namespaceList, err := clientset.Core().Namespaces().List(metav1.ListOptions{})
if err != nil {
return err
}
for ix := range namespaceList.Items {
namespaces = append(namespaces, namespaceList.Items[ix].Name)
}
} else {
namespaces = cmdutil.GetFlagStringSlice(cmd, "namespaces")
if len(namespaces) == 0 {
cmdNamespace, _, err := f.DefaultNamespace()
if err != nil {
return err
}
namespaces = []string{
metav1.NamespaceSystem,
cmdNamespace,
}
}
}
for _, namespace := range namespaces {
// TODO: this is repetitive in the extreme. Use reflection or
// something to make this a for loop.
events, err := clientset.Core().Events(namespace).List(metav1.ListOptions{})
if err != nil {
return err
}
if err := printer.PrintObj(events, setupOutputWriter(cmd, out, path.Join(namespace, "events.json"))); err != nil {
return err
}
rcs, err := clientset.Core().ReplicationControllers(namespace).List(metav1.ListOptions{})
if err != nil {
return err
}
if err := printer.PrintObj(rcs, setupOutputWriter(cmd, out, path.Join(namespace, "replication-controllers.json"))); err != nil {
return err
}
svcs, err := clientset.Core().Services(namespace).List(metav1.ListOptions{})
if err != nil {
return err
}
if err := printer.PrintObj(svcs, setupOutputWriter(cmd, out, path.Join(namespace, "services.json"))); err != nil {
return err
}
sets, err := clientset.Extensions().DaemonSets(namespace).List(metav1.ListOptions{})
if err != nil {
return err
}
if err := printer.PrintObj(sets, setupOutputWriter(cmd, out, path.Join(namespace, "daemonsets.json"))); err != nil {
return err
}
deps, err := clientset.Extensions().Deployments(namespace).List(metav1.ListOptions{})
if err != nil {
return err
}
if err := printer.PrintObj(deps, setupOutputWriter(cmd, out, path.Join(namespace, "deployments.json"))); err != nil {
return err
}
rps, err := clientset.Extensions().ReplicaSets(namespace).List(metav1.ListOptions{})
if err != nil {
return err
}
if err := printer.PrintObj(rps, setupOutputWriter(cmd, out, path.Join(namespace, "replicasets.json"))); err != nil {
return err
}
pods, err := clientset.Core().Pods(namespace).List(metav1.ListOptions{})
if err != nil {
return err
}
if err := printer.PrintObj(pods, setupOutputWriter(cmd, out, path.Join(namespace, "pods.json"))); err != nil {
return err
}
printContainer := func(writer io.Writer, container api.Container, pod *api.Pod) {
writer.Write([]byte(fmt.Sprintf("==== START logs for container %s of pod %s/%s ====\n", container.Name, pod.Namespace, pod.Name)))
defer writer.Write([]byte(fmt.Sprintf("==== END logs for container %s of pod %s/%s ====\n", container.Name, pod.Namespace, pod.Name)))
request, err := f.LogsForObject(pod, &api.PodLogOptions{Container: container.Name}, timeout)
if err != nil {
// Print error and return.
writer.Write([]byte(fmt.Sprintf("Create log request error: %s\n", err.Error())))
return
}
data, err := request.DoRaw()
if err != nil {
// Print error and return.
writer.Write([]byte(fmt.Sprintf("Request log error: %s\n", err.Error())))
return
}
writer.Write(data)
}
for ix := range pods.Items {
pod := &pods.Items[ix]
containers := pod.Spec.Containers
writer := setupOutputWriter(cmd, out, path.Join(namespace, pod.Name, "logs.txt"))
for i := range containers {
printContainer(writer, containers[i], pod)
}
}
}
dir := cmdutil.GetFlagString(cmd, "output-directory")
if len(dir) == 0 {
dir = "standard output"
}
if dir != "-" {
fmt.Fprintf(out, "Cluster info dumped to %s\n", dir)
}
return nil
}
| pkg/kubectl/cmd/clusterinfo_dump.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.057859260588884354,
0.00457768002524972,
0.00016548858548048884,
0.00019458812312223017,
0.012618658132851124
] |
{
"id": 9,
"code_window": [
"\tvar deployment *extensions.Deployment\n",
"\tvar updateErr error\n",
"\tpollErr := wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {\n",
"\t\tvar err error\n",
"\t\tif deployment, err = c.Extensions().Deployments(namespace).Get(name, metav1.GetOptions{}); err != nil {\n",
"\t\t\treturn false, err\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {\n"
],
"file_path": "test/utils/deployment.go",
"type": "replace",
"edit_start_line_idx": 221
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"time"
"github.com/davecgh/go-spew/spew"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
)
type LogfFn func(format string, args ...interface{})
func LogReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, logf LogfFn) {
if newRS != nil {
logf(spew.Sprintf("New ReplicaSet %q of Deployment %q:\n%+v", newRS.Name, deployment.Name, *newRS))
} else {
logf("New ReplicaSet of Deployment %q is nil.", deployment.Name)
}
if len(allOldRSs) > 0 {
logf("All old ReplicaSets of Deployment %q:", deployment.Name)
}
for i := range allOldRSs {
logf(spew.Sprintf("%+v", *allOldRSs[i]))
}
}
func LogPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, logf LogfFn) {
minReadySeconds := deployment.Spec.MinReadySeconds
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
return c.Core().Pods(namespace).List(options)
}
podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)
if err != nil {
logf("Failed to list Pods of Deployment %q: %v", deployment.Name, err)
return
}
for _, pod := range podList.Items {
availability := "not available"
if podutil.IsPodAvailable(&pod, minReadySeconds, metav1.Now()) {
availability = "available"
}
logf(spew.Sprintf("Pod %q is %s:\n%+v", pod.Name, availability, pod))
}
}
// Waits for the deployment status to become valid (i.e. max unavailable and max surge aren't violated anymore).
// Note that the status should stay valid at all times unless shortly after a scaling event or the deployment is just created.
// To verify that the deployment status is valid and wait for the rollout to finish, use WaitForDeploymentStatus instead.
func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
var (
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
newRS *extensions.ReplicaSet
deployment *extensions.Deployment
reason string
)
err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
if err != nil {
return false, err
}
if newRS == nil {
// New RC hasn't been created yet.
reason = "new replica set hasn't been created yet"
logf(reason)
return false, nil
}
allRSs = append(oldRSs, newRS)
// The old/new ReplicaSets need to contain the pod-template-hash label
for i := range allRSs {
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
reason = "all replica sets need to contain the pod-template-hash label"
logf(reason)
return false, nil
}
}
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
if totalCreated > maxCreated {
reason = fmt.Sprintf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
logf(reason)
return false, nil
}
minAvailable := deploymentutil.MinAvailable(deployment)
if deployment.Status.AvailableReplicas < minAvailable {
reason = fmt.Sprintf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable)
logf(reason)
return false, nil
}
// When the deployment status and its underlying resources reach the desired state, we're done
if deploymentutil.DeploymentComplete(deployment, &deployment.Status) {
return true, nil
}
reason = fmt.Sprintf("deployment status: %#v", deployment.Status)
logf(reason)
return false, nil
})
if err == wait.ErrWaitTimeout {
LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, logf)
LogPodsOfDeployment(c, deployment, allRSs, logf)
err = fmt.Errorf("%s", reason)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
}
return nil
}
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
// Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
var deployment *extensions.Deployment
var newRS *extensions.ReplicaSet
var reason string
err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
// The new ReplicaSet needs to be non-nil and contain the pod-template-hash label
newRS, err = deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
if err != nil {
return false, err
}
if newRS == nil {
reason = fmt.Sprintf("New replica set for deployment %q is yet to be created", deployment.Name)
logf(reason)
return false, nil
}
if !labelsutil.SelectorHasLabel(newRS.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
reason = fmt.Sprintf("New replica set %q doesn't have DefaultDeploymentUniqueLabelKey", newRS.Name)
logf(reason)
return false, nil
}
// Check revision of this deployment, and of the new replica set of this deployment
if deployment.Annotations == nil || deployment.Annotations[deploymentutil.RevisionAnnotation] != revision {
reason = fmt.Sprintf("Deployment %q doesn't have the required revision set", deployment.Name)
logf(reason)
return false, nil
}
if !containsImage(deployment.Spec.Template.Spec.Containers, image) {
reason = fmt.Sprintf("Deployment %q doesn't have the required image %s set", deployment.Name, image)
logf(reason)
return false, nil
}
if newRS.Annotations == nil || newRS.Annotations[deploymentutil.RevisionAnnotation] != revision {
reason = fmt.Sprintf("New replica set %q doesn't have the required revision set", newRS.Name)
logf(reason)
return false, nil
}
if !containsImage(newRS.Spec.Template.Spec.Containers, image) {
reason = fmt.Sprintf("New replica set %q doesn't have the required image %s.", newRS.Name, image)
logf(reason)
return false, nil
}
return true, nil
})
if err == wait.ErrWaitTimeout {
LogReplicaSetsOfDeployment(deployment, nil, newRS, logf)
err = fmt.Errorf(reason)
}
if newRS == nil {
return fmt.Errorf("deployment %q failed to create new replica set", deploymentName)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q (got %s / %s) and new replica set %q (got %s / %s) revision and image to match expectation (expected %s / %s): %v", deploymentName, deployment.Annotations[deploymentutil.RevisionAnnotation], deployment.Spec.Template.Spec.Containers[0].Image, newRS.Name, newRS.Annotations[deploymentutil.RevisionAnnotation], newRS.Spec.Template.Spec.Containers[0].Image, revision, image, err)
}
return nil
}
func containsImage(containers []v1.Container, imageName string) bool {
for _, container := range containers {
if container.Image == imageName {
return true
}
}
return false
}
type UpdateDeploymentFunc func(d *extensions.Deployment)
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateDeploymentFunc, logf LogfFn) (*extensions.Deployment, error) {
var deployment *extensions.Deployment
var updateErr error
pollErr := wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
var err error
if deployment, err = c.Extensions().Deployments(namespace).Get(name, metav1.GetOptions{}); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(deployment)
if deployment, err = c.Extensions().Deployments(namespace).Update(deployment); err == nil {
logf("Updating deployment %s", name)
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided updated to deployment %q: %v", name, updateErr)
}
return deployment, pollErr
}
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) {
return c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
}, desiredGeneration, 2*time.Second, 1*time.Minute)
}
| test/utils/deployment.go | 1 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.9981641173362732,
0.40967434644699097,
0.0001657986140344292,
0.059828441590070724,
0.4653286039829254
] |
{
"id": 9,
"code_window": [
"\tvar deployment *extensions.Deployment\n",
"\tvar updateErr error\n",
"\tpollErr := wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {\n",
"\t\tvar err error\n",
"\t\tif deployment, err = c.Extensions().Deployments(namespace).Get(name, metav1.GetOptions{}); err != nil {\n",
"\t\t\treturn false, err\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {\n"
],
"file_path": "test/utils/deployment.go",
"type": "replace",
"edit_start_line_idx": 221
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
fuzz "github.com/google/gofuzz"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/storage"
)
// Funcs returns the fuzzer functions for the storage api group.
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(obj *storage.StorageClass, c fuzz.Continue) {
c.FuzzNoCustom(obj) // fuzz self without calling this function again
reclamationPolicies := []api.PersistentVolumeReclaimPolicy{api.PersistentVolumeReclaimDelete, api.PersistentVolumeReclaimRetain}
obj.ReclaimPolicy = &reclamationPolicies[c.Rand.Intn(len(reclamationPolicies))]
},
}
}
| pkg/apis/storage/fuzzer/fuzzer.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.0001767796784406528,
0.0001726042537484318,
0.0001655888045206666,
0.00017402427329216152,
0.000004618087587004993
] |
{
"id": 9,
"code_window": [
"\tvar deployment *extensions.Deployment\n",
"\tvar updateErr error\n",
"\tpollErr := wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {\n",
"\t\tvar err error\n",
"\t\tif deployment, err = c.Extensions().Deployments(namespace).Get(name, metav1.GetOptions{}); err != nil {\n",
"\t\t\treturn false, err\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {\n"
],
"file_path": "test/utils/deployment.go",
"type": "replace",
"edit_start_line_idx": 221
} | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type Simple struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
// +optional
Other string `json:"other,omitempty"`
// +optional
Labels map[string]string `json:"labels,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type SimpleRoot struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
// +optional
Other string `json:"other,omitempty"`
// +optional
Labels map[string]string `json:"labels,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type SimpleGetOptions struct {
metav1.TypeMeta `json:",inline"`
Param1 string `json:"param1"`
Param2 string `json:"param2"`
Path string `json:"atAPath"`
}
func (SimpleGetOptions) SwaggerDoc() map[string]string {
return map[string]string{
"param1": "description for param1",
"param2": "description for param2",
}
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type SimpleList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,inline"`
// +optional
Items []Simple `json:"items,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SimpleXGSubresource is a cross group subresource, i.e. the subresource does not belong to the
// same group as its parent resource.
type SimpleXGSubresource struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
SubresourceInfo string `json:"subresourceInfo,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
}
| staging/src/k8s.io/apiserver/pkg/endpoints/testing/types.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.003970794845372438,
0.0006702602840960026,
0.00016300832794513553,
0.00017529551405459642,
0.0012484033359214664
] |
{
"id": 9,
"code_window": [
"\tvar deployment *extensions.Deployment\n",
"\tvar updateErr error\n",
"\tpollErr := wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {\n",
"\t\tvar err error\n",
"\t\tif deployment, err = c.Extensions().Deployments(namespace).Get(name, metav1.GetOptions{}); err != nil {\n",
"\t\t\treturn false, err\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {\n"
],
"file_path": "test/utils/deployment.go",
"type": "replace",
"edit_start_line_idx": 221
} | package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["type.go"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| staging/src/k8s.io/code-generator/third_party/forked/golang/reflect/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.00022803635511081666,
0.00019017723388969898,
0.00017046756693162024,
0.00017202776507474482,
0.000026778021492646076
] |
{
"id": 10,
"code_window": [
"\tclientset \"k8s.io/client-go/kubernetes\"\n",
")\n",
"\n",
"type UpdateReplicaSetFunc func(d *extensions.ReplicaSet)\n",
"\n",
"func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateReplicaSetFunc, logf LogfFn) (*extensions.ReplicaSet, error) {\n",
"\tvar rs *extensions.ReplicaSet\n",
"\tvar updateErr error\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateReplicaSetFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*extensions.ReplicaSet, error) {\n"
],
"file_path": "test/utils/replicaset.go",
"type": "replace",
"edit_start_line_idx": 30
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"time"
"github.com/davecgh/go-spew/spew"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
)
type LogfFn func(format string, args ...interface{})
func LogReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, logf LogfFn) {
if newRS != nil {
logf(spew.Sprintf("New ReplicaSet %q of Deployment %q:\n%+v", newRS.Name, deployment.Name, *newRS))
} else {
logf("New ReplicaSet of Deployment %q is nil.", deployment.Name)
}
if len(allOldRSs) > 0 {
logf("All old ReplicaSets of Deployment %q:", deployment.Name)
}
for i := range allOldRSs {
logf(spew.Sprintf("%+v", *allOldRSs[i]))
}
}
func LogPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, logf LogfFn) {
minReadySeconds := deployment.Spec.MinReadySeconds
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
return c.Core().Pods(namespace).List(options)
}
podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)
if err != nil {
logf("Failed to list Pods of Deployment %q: %v", deployment.Name, err)
return
}
for _, pod := range podList.Items {
availability := "not available"
if podutil.IsPodAvailable(&pod, minReadySeconds, metav1.Now()) {
availability = "available"
}
logf(spew.Sprintf("Pod %q is %s:\n%+v", pod.Name, availability, pod))
}
}
// Waits for the deployment status to become valid (i.e. max unavailable and max surge aren't violated anymore).
// Note that the status should stay valid at all times unless shortly after a scaling event or the deployment is just created.
// To verify that the deployment status is valid and wait for the rollout to finish, use WaitForDeploymentStatus instead.
func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
var (
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
newRS *extensions.ReplicaSet
deployment *extensions.Deployment
reason string
)
err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
if err != nil {
return false, err
}
if newRS == nil {
// New RC hasn't been created yet.
reason = "new replica set hasn't been created yet"
logf(reason)
return false, nil
}
allRSs = append(oldRSs, newRS)
// The old/new ReplicaSets need to contain the pod-template-hash label
for i := range allRSs {
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
reason = "all replica sets need to contain the pod-template-hash label"
logf(reason)
return false, nil
}
}
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
if totalCreated > maxCreated {
reason = fmt.Sprintf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
logf(reason)
return false, nil
}
minAvailable := deploymentutil.MinAvailable(deployment)
if deployment.Status.AvailableReplicas < minAvailable {
reason = fmt.Sprintf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable)
logf(reason)
return false, nil
}
// When the deployment status and its underlying resources reach the desired state, we're done
if deploymentutil.DeploymentComplete(deployment, &deployment.Status) {
return true, nil
}
reason = fmt.Sprintf("deployment status: %#v", deployment.Status)
logf(reason)
return false, nil
})
if err == wait.ErrWaitTimeout {
LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, logf)
LogPodsOfDeployment(c, deployment, allRSs, logf)
err = fmt.Errorf("%s", reason)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
}
return nil
}
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
// Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
var deployment *extensions.Deployment
var newRS *extensions.ReplicaSet
var reason string
err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
// The new ReplicaSet needs to be non-nil and contain the pod-template-hash label
newRS, err = deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
if err != nil {
return false, err
}
if newRS == nil {
reason = fmt.Sprintf("New replica set for deployment %q is yet to be created", deployment.Name)
logf(reason)
return false, nil
}
if !labelsutil.SelectorHasLabel(newRS.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
reason = fmt.Sprintf("New replica set %q doesn't have DefaultDeploymentUniqueLabelKey", newRS.Name)
logf(reason)
return false, nil
}
// Check revision of this deployment, and of the new replica set of this deployment
if deployment.Annotations == nil || deployment.Annotations[deploymentutil.RevisionAnnotation] != revision {
reason = fmt.Sprintf("Deployment %q doesn't have the required revision set", deployment.Name)
logf(reason)
return false, nil
}
if !containsImage(deployment.Spec.Template.Spec.Containers, image) {
reason = fmt.Sprintf("Deployment %q doesn't have the required image %s set", deployment.Name, image)
logf(reason)
return false, nil
}
if newRS.Annotations == nil || newRS.Annotations[deploymentutil.RevisionAnnotation] != revision {
reason = fmt.Sprintf("New replica set %q doesn't have the required revision set", newRS.Name)
logf(reason)
return false, nil
}
if !containsImage(newRS.Spec.Template.Spec.Containers, image) {
reason = fmt.Sprintf("New replica set %q doesn't have the required image %s.", newRS.Name, image)
logf(reason)
return false, nil
}
return true, nil
})
if err == wait.ErrWaitTimeout {
LogReplicaSetsOfDeployment(deployment, nil, newRS, logf)
err = fmt.Errorf(reason)
}
if newRS == nil {
return fmt.Errorf("deployment %q failed to create new replica set", deploymentName)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q (got %s / %s) and new replica set %q (got %s / %s) revision and image to match expectation (expected %s / %s): %v", deploymentName, deployment.Annotations[deploymentutil.RevisionAnnotation], deployment.Spec.Template.Spec.Containers[0].Image, newRS.Name, newRS.Annotations[deploymentutil.RevisionAnnotation], newRS.Spec.Template.Spec.Containers[0].Image, revision, image, err)
}
return nil
}
func containsImage(containers []v1.Container, imageName string) bool {
for _, container := range containers {
if container.Image == imageName {
return true
}
}
return false
}
type UpdateDeploymentFunc func(d *extensions.Deployment)
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateDeploymentFunc, logf LogfFn) (*extensions.Deployment, error) {
var deployment *extensions.Deployment
var updateErr error
pollErr := wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
var err error
if deployment, err = c.Extensions().Deployments(namespace).Get(name, metav1.GetOptions{}); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(deployment)
if deployment, err = c.Extensions().Deployments(namespace).Update(deployment); err == nil {
logf("Updating deployment %s", name)
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided updated to deployment %q: %v", name, updateErr)
}
return deployment, pollErr
}
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) {
return c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
}, desiredGeneration, 2*time.Second, 1*time.Minute)
}
| test/utils/deployment.go | 1 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.9573231339454651,
0.06910732388496399,
0.00016769285139162093,
0.003128131851553917,
0.1952747404575348
] |
{
"id": 10,
"code_window": [
"\tclientset \"k8s.io/client-go/kubernetes\"\n",
")\n",
"\n",
"type UpdateReplicaSetFunc func(d *extensions.ReplicaSet)\n",
"\n",
"func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateReplicaSetFunc, logf LogfFn) (*extensions.ReplicaSet, error) {\n",
"\tvar rs *extensions.ReplicaSet\n",
"\tvar updateErr error\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateReplicaSetFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*extensions.ReplicaSet, error) {\n"
],
"file_path": "test/utils/replicaset.go",
"type": "replace",
"edit_start_line_idx": 30
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"fmt"
)
const (
// ErrCodeNotSupported code for NotSupported Errors.
ErrCodeNotSupported int = iota + 1
ErrCodeNoPathDefined
ErrCodeFsInfoFailed
)
// NewNotSupportedError creates a new MetricsError with code NotSupported.
func NewNotSupportedError() *MetricsError {
return &MetricsError{
Code: ErrCodeNotSupported,
Msg: "metrics are not supported for MetricsNil Volumes",
}
}
// NewNoPathDefined creates a new MetricsError with code NoPathDefined.
func NewNoPathDefinedError() *MetricsError {
return &MetricsError{
Code: ErrCodeNoPathDefined,
Msg: "no path defined for disk usage metrics.",
}
}
// NewFsInfoFailedError creates a new MetricsError with code FsInfoFailed.
func NewFsInfoFailedError(err error) *MetricsError {
return &MetricsError{
Code: ErrCodeFsInfoFailed,
Msg: fmt.Sprintf("Failed to get FsInfo due to error %v", err),
}
}
// MetricsError to distinguish different Metrics Errors.
type MetricsError struct {
Code int
Msg string
}
func (e *MetricsError) Error() string {
return fmt.Sprintf("%s", e.Msg)
}
// IsNotSupported returns true if and only if err is "key" not found error.
func IsNotSupported(err error) bool {
return isErrCode(err, ErrCodeNotSupported)
}
func isErrCode(err error, code int) bool {
if err == nil {
return false
}
if e, ok := err.(*MetricsError); ok {
return e.Code == code
}
return false
}
| pkg/volume/metrics_errors.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.002713424852117896,
0.0006292769103311002,
0.00016329552454408258,
0.00018166989320889115,
0.0008572187507525086
] |
{
"id": 10,
"code_window": [
"\tclientset \"k8s.io/client-go/kubernetes\"\n",
")\n",
"\n",
"type UpdateReplicaSetFunc func(d *extensions.ReplicaSet)\n",
"\n",
"func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateReplicaSetFunc, logf LogfFn) (*extensions.ReplicaSet, error) {\n",
"\tvar rs *extensions.ReplicaSet\n",
"\tvar updateErr error\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateReplicaSetFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*extensions.ReplicaSet, error) {\n"
],
"file_path": "test/utils/replicaset.go",
"type": "replace",
"edit_start_line_idx": 30
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package interfaces
import (
"context"
"google.golang.org/api/googleapi"
"k8s.io/kubernetes/federation/pkg/dnsprovider/rrstype"
)
// Interfaces to directly mirror the Google Cloud DNS API structures.
// See https://godoc.org/google.golang.org/api/dns/v1 for details
// This facilitates stubbing out Google Cloud DNS for unit testing.
// Only the parts of the API that we use are included.
// Others can be added as needed.
type (
Change interface {
Additions() []ResourceRecordSet
Deletions() []ResourceRecordSet
// Id() string // TODO: Add as needed
// Kind() string // TODO: Add as needed
// StartTime() string // TODO: Add as needed
// Status() string // TODO: Add as needed
}
ChangesCreateCall interface {
// Context(ctx context.Context) *ChangesCreateCall // TODO: Add as needed
Do(opts ...googleapi.CallOption) (Change, error)
// Fields(s ...googleapi.Field) *ChangesCreateCall // TODO: Add as needed
}
ChangesGetCall interface {
// Context(ctx context.Context) *ChangesGetCall // TODO: Add as needed
Do(opts ...googleapi.CallOption) (*Change, error)
// Fields(s ...googleapi.Field) *ChangesGetCall // TODO: Add as needed
// IfNoneMatch(entityTag string) *ChangesGetCall // TODO: Add as needed
}
ChangesListCall interface {
// Context(ctx context.Context) *ChangesListCall // TODO: Add as needed
Do(opts ...googleapi.CallOption) (*ChangesListResponse, error)
// Fields(s ...googleapi.Field) *ChangesListCall // TODO: Add as needed
// IfNoneMatch(entityTag string) *ChangesListCall // TODO: Add as needed
// MaxResults(maxResults int64) *ChangesListCall // TODO: Add as needed
// PageToken(pageToken string) *ChangesListCall // TODO: Add as needed
// Pages(ctx context.Context, f func(*ChangesListResponse) error) error // TODO: Add as needed
// SortBy(sortBy string) *ChangesListCall // TODO: Add as needed
// SortOrder(sortOrder string) *ChangesListCall // TODO: Add as needed
}
ChangesListResponse interface {
// Changes() []*Change // TODO: Add as needed
// Kind() string // TODO: Add as needed
// NextPageToken() string // TODO: Add as needed
// ServerResponse() googleapi.ServerResponse // TODO: Add as needed
// ForceSendFields() []string // TODO: Add as needed
}
ChangesService interface {
// Create(project string, managedZone string, change *Change) *ChangesCreateCall // TODO: Add as needed
Create(project string, managedZone string, change Change) ChangesCreateCall
NewChange(additions, deletions []ResourceRecordSet) Change
// Get(project string, managedZone string, changeId string) *ChangesGetCall // TODO: Add as needed
// List(project string, managedZone string) *ChangesListCall // TODO: Add as needed
}
ManagedZone interface {
// CreationTime() string // TODO: Add as needed
// Description() string // TODO: Add as needed
DnsName() string
Id() uint64
// Kind() string // TODO: Add as needed
Name() string
// NameServerSet() string // TODO: Add as needed
// NameServers() []string // TODO: Add as needed
// ServerResponse() googleapi.ServerResponse // TODO: Add as needed
// ForceSendFields() []string // TODO: Add as needed
}
ManagedZonesCreateCall interface {
// Context(ctx context.Context) *ManagedZonesCreateCall // TODO: Add as needed
Do(opts ...googleapi.CallOption) (ManagedZone, error)
// Fields(s ...googleapi.Field) *ManagedZonesCreateCall // TODO: Add as needed
}
ManagedZonesDeleteCall interface {
// Context(ctx context.Context) *ManagedZonesDeleteCall // TODO: Add as needed
Do(opts ...googleapi.CallOption) error
// Fields(s ...googleapi.Field) *ManagedZonesDeleteCall // TODO: Add as needed
}
ManagedZonesGetCall interface {
// Context(ctx context.Context) *ManagedZonesGetCall // TODO: Add as needed
Do(opts ...googleapi.CallOption) (ManagedZone, error)
// Fields(s ...googleapi.Field) *ManagedZonesGetCall // TODO: Add as needed
// IfNoneMatch(entityTag string) *ManagedZonesGetCall // TODO: Add as needed
}
ManagedZonesListCall interface {
// Context(ctx context.Context) *ManagedZonesListCall // TODO: Add as needed
DnsName(dnsName string) ManagedZonesListCall
Do(opts ...googleapi.CallOption) (ManagedZonesListResponse, error)
// Fields(s ...googleapi.Field) *ManagedZonesListCall // TODO: Add as needed
// IfNoneMatch(entityTag string) *ManagedZonesListCall // TODO: Add as needed
// MaxResults(maxResults int64) *ManagedZonesListCall // TODO: Add as needed
// PageToken(pageToken string) *ManagedZonesListCall // TODO: Add as needed
// Pages(ctx context.Context, f func(*ManagedZonesListResponse) error) error // TODO: Add as needed
}
ManagedZonesListResponse interface {
// Kind() string // TODO: Add as needed
// ManagedZones() []*ManagedZone // TODO: Add as needed
ManagedZones() []ManagedZone
// NextPageToken string // TODO: Add as needed
// ServerResponse() googleapi.ServerResponse // TODO: Add as needed
// ForceSendFields() []string // TODO: Add as needed
}
ManagedZonesService interface {
// NewManagedZonesService(s *Service) *ManagedZonesService // TODO: Add to service if needed
Create(project string, managedZone ManagedZone) ManagedZonesCreateCall
Delete(project string, managedZone string) ManagedZonesDeleteCall
Get(project string, managedZone string) ManagedZonesGetCall
List(project string) ManagedZonesListCall
NewManagedZone(dnsName string) ManagedZone
}
Project interface {
// Id() string // TODO: Add as needed
// Kind() string // TODO: Add as needed
// Number() uint64 // TODO: Add as needed
// Quota() *Quota // TODO: Add as needed
// ServerResponse() googleapi.ServerResponse // TODO: Add as needed
// ForceSendFields() []string // TODO: Add as needed
}
ProjectsGetCall interface {
// TODO: Add as needed
}
ProjectsService interface {
// TODO: Add as needed
}
Quota interface {
// TODO: Add as needed
}
ResourceRecordSet interface {
// Kind() string // TODO: Add as needed
Name() string
Rrdatas() []string
Ttl() int64
Type() string
// ForceSendFields []string // TODO: Add as needed
}
ResourceRecordSetsListCall interface {
// Context(ctx context.Context) *ResourceRecordSetsListCall // TODO: Add as needed
Do(opts ...googleapi.CallOption) (ResourceRecordSetsListResponse, error)
Pages(ctx context.Context, f func(ResourceRecordSetsListResponse) error) error
// Fields(s ...googleapi.Field) *ResourceRecordSetsListCall // TODO: Add as needed
// IfNoneMatch(entityTag string) *ResourceRecordSetsListCall // TODO: Add as needed
// MaxResults(maxResults int64) *ResourceRecordSetsListCall // TODO: Add as needed
Name(name string) ResourceRecordSetsListCall
// PageToken(pageToken string) *ResourceRecordSetsListCall // TODO: Add as needed
Type(type_ string) ResourceRecordSetsListCall
}
ResourceRecordSetsListResponse interface {
// Kind() string // TODO: Add as needed
// NextPageToken() string // TODO: Add as needed
Rrsets() []ResourceRecordSet
// ServerResponse() googleapi.ServerResponse // TODO: Add as needed
// ForceSendFields() []string // TODO: Add as needed
}
ResourceRecordSetsService interface {
List(project string, managedZone string) ResourceRecordSetsListCall
// Get returns a list of resources records with the matching name
Get(project, managedZone, name string) ResourceRecordSetsListCall
// NewResourceRecordSetsService(s *Service) *ResourceRecordSetsService // TODO: add to service as needed
NewResourceRecordSet(name string, rrdatas []string, ttl int64, type_ rrstype.RrsType) ResourceRecordSet
}
Service interface {
// BasePath() string // TODO: Add as needed
// UserAgent() string // TODO: Add as needed
Changes() ChangesService
ManagedZones() ManagedZonesService
Projects() ProjectsService
ResourceRecordSets() ResourceRecordSetsService
}
// New(client *http.Client) (*Service, error) // TODO: Add as needed
)
| federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces/interfaces.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.0008832403109408915,
0.00020504268468357623,
0.00016391514509450644,
0.0001680887071415782,
0.00014857039786875248
] |
{
"id": 10,
"code_window": [
"\tclientset \"k8s.io/client-go/kubernetes\"\n",
")\n",
"\n",
"type UpdateReplicaSetFunc func(d *extensions.ReplicaSet)\n",
"\n",
"func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateReplicaSetFunc, logf LogfFn) (*extensions.ReplicaSet, error) {\n",
"\tvar rs *extensions.ReplicaSet\n",
"\tvar updateErr error\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateReplicaSetFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*extensions.ReplicaSet, error) {\n"
],
"file_path": "test/utils/replicaset.go",
"type": "replace",
"edit_start_line_idx": 30
} | # Generate OpenAPI definitions
- To generate definition for a specific type or package add "+k8s:openapi-gen=true" tag to the type/package comment lines.
- To exclude a type or a member from a tagged package/type, add "+k8s:openapi-gen=false" tag to the comment lines.
# OpenAPI Extensions
OpenAPI spec can have extensions on types. To define one or more extensions on a type or its member
add "+k8s:openapi-gen=x-kubernetes-$NAME:$VALUE" to the comment lines before type/member. A type/member can
have multiple extensions. The rest of the line in the comment will be used as $VALUE so there is no need to
escape or quote the value string. Extensions can be use to pass more information to client generators or
documentation generators. For example a type my have a friendly name to be displayed in documentation or
being used in a client's fluent interface.
| staging/src/k8s.io/code-generator/cmd/openapi-gen/README | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.0002189118677051738,
0.0001961699454113841,
0.0001734280085656792,
0.0001961699454113841,
0.0000227419295697473
] |
{
"id": 11,
"code_window": [
"\tvar rs *extensions.ReplicaSet\n",
"\tvar updateErr error\n",
"\tpollErr := wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {\n",
"\t\tvar err error\n",
"\t\tif rs, err = c.Extensions().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil {\n",
"\t\t\treturn false, err\n",
"\t\t}\n",
"\t\t// Apply the update, then attempt to push it to the apiserver.\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {\n"
],
"file_path": "test/utils/replicaset.go",
"type": "replace",
"edit_start_line_idx": 33
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"time"
"github.com/davecgh/go-spew/spew"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
)
type LogfFn func(format string, args ...interface{})
func LogReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, logf LogfFn) {
if newRS != nil {
logf(spew.Sprintf("New ReplicaSet %q of Deployment %q:\n%+v", newRS.Name, deployment.Name, *newRS))
} else {
logf("New ReplicaSet of Deployment %q is nil.", deployment.Name)
}
if len(allOldRSs) > 0 {
logf("All old ReplicaSets of Deployment %q:", deployment.Name)
}
for i := range allOldRSs {
logf(spew.Sprintf("%+v", *allOldRSs[i]))
}
}
func LogPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, logf LogfFn) {
minReadySeconds := deployment.Spec.MinReadySeconds
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
return c.Core().Pods(namespace).List(options)
}
podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)
if err != nil {
logf("Failed to list Pods of Deployment %q: %v", deployment.Name, err)
return
}
for _, pod := range podList.Items {
availability := "not available"
if podutil.IsPodAvailable(&pod, minReadySeconds, metav1.Now()) {
availability = "available"
}
logf(spew.Sprintf("Pod %q is %s:\n%+v", pod.Name, availability, pod))
}
}
// Waits for the deployment status to become valid (i.e. max unavailable and max surge aren't violated anymore).
// Note that the status should stay valid at all times unless shortly after a scaling event or the deployment is just created.
// To verify that the deployment status is valid and wait for the rollout to finish, use WaitForDeploymentStatus instead.
func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
var (
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
newRS *extensions.ReplicaSet
deployment *extensions.Deployment
reason string
)
err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
if err != nil {
return false, err
}
if newRS == nil {
// New RC hasn't been created yet.
reason = "new replica set hasn't been created yet"
logf(reason)
return false, nil
}
allRSs = append(oldRSs, newRS)
// The old/new ReplicaSets need to contain the pod-template-hash label
for i := range allRSs {
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
reason = "all replica sets need to contain the pod-template-hash label"
logf(reason)
return false, nil
}
}
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
if totalCreated > maxCreated {
reason = fmt.Sprintf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
logf(reason)
return false, nil
}
minAvailable := deploymentutil.MinAvailable(deployment)
if deployment.Status.AvailableReplicas < minAvailable {
reason = fmt.Sprintf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable)
logf(reason)
return false, nil
}
// When the deployment status and its underlying resources reach the desired state, we're done
if deploymentutil.DeploymentComplete(deployment, &deployment.Status) {
return true, nil
}
reason = fmt.Sprintf("deployment status: %#v", deployment.Status)
logf(reason)
return false, nil
})
if err == wait.ErrWaitTimeout {
LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, logf)
LogPodsOfDeployment(c, deployment, allRSs, logf)
err = fmt.Errorf("%s", reason)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
}
return nil
}
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
// Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
var deployment *extensions.Deployment
var newRS *extensions.ReplicaSet
var reason string
err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
// The new ReplicaSet needs to be non-nil and contain the pod-template-hash label
newRS, err = deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
if err != nil {
return false, err
}
if newRS == nil {
reason = fmt.Sprintf("New replica set for deployment %q is yet to be created", deployment.Name)
logf(reason)
return false, nil
}
if !labelsutil.SelectorHasLabel(newRS.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
reason = fmt.Sprintf("New replica set %q doesn't have DefaultDeploymentUniqueLabelKey", newRS.Name)
logf(reason)
return false, nil
}
// Check revision of this deployment, and of the new replica set of this deployment
if deployment.Annotations == nil || deployment.Annotations[deploymentutil.RevisionAnnotation] != revision {
reason = fmt.Sprintf("Deployment %q doesn't have the required revision set", deployment.Name)
logf(reason)
return false, nil
}
if !containsImage(deployment.Spec.Template.Spec.Containers, image) {
reason = fmt.Sprintf("Deployment %q doesn't have the required image %s set", deployment.Name, image)
logf(reason)
return false, nil
}
if newRS.Annotations == nil || newRS.Annotations[deploymentutil.RevisionAnnotation] != revision {
reason = fmt.Sprintf("New replica set %q doesn't have the required revision set", newRS.Name)
logf(reason)
return false, nil
}
if !containsImage(newRS.Spec.Template.Spec.Containers, image) {
reason = fmt.Sprintf("New replica set %q doesn't have the required image %s.", newRS.Name, image)
logf(reason)
return false, nil
}
return true, nil
})
if err == wait.ErrWaitTimeout {
LogReplicaSetsOfDeployment(deployment, nil, newRS, logf)
err = fmt.Errorf(reason)
}
if newRS == nil {
return fmt.Errorf("deployment %q failed to create new replica set", deploymentName)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q (got %s / %s) and new replica set %q (got %s / %s) revision and image to match expectation (expected %s / %s): %v", deploymentName, deployment.Annotations[deploymentutil.RevisionAnnotation], deployment.Spec.Template.Spec.Containers[0].Image, newRS.Name, newRS.Annotations[deploymentutil.RevisionAnnotation], newRS.Spec.Template.Spec.Containers[0].Image, revision, image, err)
}
return nil
}
func containsImage(containers []v1.Container, imageName string) bool {
for _, container := range containers {
if container.Image == imageName {
return true
}
}
return false
}
type UpdateDeploymentFunc func(d *extensions.Deployment)
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateDeploymentFunc, logf LogfFn) (*extensions.Deployment, error) {
var deployment *extensions.Deployment
var updateErr error
pollErr := wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
var err error
if deployment, err = c.Extensions().Deployments(namespace).Get(name, metav1.GetOptions{}); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(deployment)
if deployment, err = c.Extensions().Deployments(namespace).Update(deployment); err == nil {
logf("Updating deployment %s", name)
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided updated to deployment %q: %v", name, updateErr)
}
return deployment, pollErr
}
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) {
return c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
}, desiredGeneration, 2*time.Second, 1*time.Minute)
}
| test/utils/deployment.go | 1 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.9943718910217285,
0.20541109144687653,
0.00016515154857188463,
0.0004076203622389585,
0.37909266352653503
] |
{
"id": 11,
"code_window": [
"\tvar rs *extensions.ReplicaSet\n",
"\tvar updateErr error\n",
"\tpollErr := wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {\n",
"\t\tvar err error\n",
"\t\tif rs, err = c.Extensions().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil {\n",
"\t\t\treturn false, err\n",
"\t\t}\n",
"\t\t// Apply the update, then attempt to push it to the apiserver.\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {\n"
],
"file_path": "test/utils/replicaset.go",
"type": "replace",
"edit_start_line_idx": 33
} | // +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
package v1beta1
import (
core_v1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
reflect "reflect"
)
func init() {
SchemeBuilder.Register(RegisterDeepCopies)
}
// RegisterDeepCopies adds deep-copy functions to the given scheme. Public
// to allow building arbitrary schemes.
//
// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented.
func RegisterDeepCopies(scheme *runtime.Scheme) error {
return scheme.AddGeneratedDeepCopyFuncs(
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*APIVersion).DeepCopyInto(out.(*APIVersion))
return nil
}, InType: reflect.TypeOf(&APIVersion{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*AllowedHostPath).DeepCopyInto(out.(*AllowedHostPath))
return nil
}, InType: reflect.TypeOf(&AllowedHostPath{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*CustomMetricCurrentStatus).DeepCopyInto(out.(*CustomMetricCurrentStatus))
return nil
}, InType: reflect.TypeOf(&CustomMetricCurrentStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*CustomMetricCurrentStatusList).DeepCopyInto(out.(*CustomMetricCurrentStatusList))
return nil
}, InType: reflect.TypeOf(&CustomMetricCurrentStatusList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*CustomMetricTarget).DeepCopyInto(out.(*CustomMetricTarget))
return nil
}, InType: reflect.TypeOf(&CustomMetricTarget{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*CustomMetricTargetList).DeepCopyInto(out.(*CustomMetricTargetList))
return nil
}, InType: reflect.TypeOf(&CustomMetricTargetList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DaemonSet).DeepCopyInto(out.(*DaemonSet))
return nil
}, InType: reflect.TypeOf(&DaemonSet{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DaemonSetList).DeepCopyInto(out.(*DaemonSetList))
return nil
}, InType: reflect.TypeOf(&DaemonSetList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DaemonSetSpec).DeepCopyInto(out.(*DaemonSetSpec))
return nil
}, InType: reflect.TypeOf(&DaemonSetSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DaemonSetStatus).DeepCopyInto(out.(*DaemonSetStatus))
return nil
}, InType: reflect.TypeOf(&DaemonSetStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DaemonSetUpdateStrategy).DeepCopyInto(out.(*DaemonSetUpdateStrategy))
return nil
}, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*Deployment).DeepCopyInto(out.(*Deployment))
return nil
}, InType: reflect.TypeOf(&Deployment{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DeploymentCondition).DeepCopyInto(out.(*DeploymentCondition))
return nil
}, InType: reflect.TypeOf(&DeploymentCondition{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DeploymentList).DeepCopyInto(out.(*DeploymentList))
return nil
}, InType: reflect.TypeOf(&DeploymentList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DeploymentRollback).DeepCopyInto(out.(*DeploymentRollback))
return nil
}, InType: reflect.TypeOf(&DeploymentRollback{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DeploymentSpec).DeepCopyInto(out.(*DeploymentSpec))
return nil
}, InType: reflect.TypeOf(&DeploymentSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DeploymentStatus).DeepCopyInto(out.(*DeploymentStatus))
return nil
}, InType: reflect.TypeOf(&DeploymentStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DeploymentStrategy).DeepCopyInto(out.(*DeploymentStrategy))
return nil
}, InType: reflect.TypeOf(&DeploymentStrategy{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*FSGroupStrategyOptions).DeepCopyInto(out.(*FSGroupStrategyOptions))
return nil
}, InType: reflect.TypeOf(&FSGroupStrategyOptions{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*HTTPIngressPath).DeepCopyInto(out.(*HTTPIngressPath))
return nil
}, InType: reflect.TypeOf(&HTTPIngressPath{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*HTTPIngressRuleValue).DeepCopyInto(out.(*HTTPIngressRuleValue))
return nil
}, InType: reflect.TypeOf(&HTTPIngressRuleValue{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*HostPortRange).DeepCopyInto(out.(*HostPortRange))
return nil
}, InType: reflect.TypeOf(&HostPortRange{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*IDRange).DeepCopyInto(out.(*IDRange))
return nil
}, InType: reflect.TypeOf(&IDRange{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*IPBlock).DeepCopyInto(out.(*IPBlock))
return nil
}, InType: reflect.TypeOf(&IPBlock{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*Ingress).DeepCopyInto(out.(*Ingress))
return nil
}, InType: reflect.TypeOf(&Ingress{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*IngressBackend).DeepCopyInto(out.(*IngressBackend))
return nil
}, InType: reflect.TypeOf(&IngressBackend{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*IngressList).DeepCopyInto(out.(*IngressList))
return nil
}, InType: reflect.TypeOf(&IngressList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*IngressRule).DeepCopyInto(out.(*IngressRule))
return nil
}, InType: reflect.TypeOf(&IngressRule{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*IngressRuleValue).DeepCopyInto(out.(*IngressRuleValue))
return nil
}, InType: reflect.TypeOf(&IngressRuleValue{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*IngressSpec).DeepCopyInto(out.(*IngressSpec))
return nil
}, InType: reflect.TypeOf(&IngressSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*IngressStatus).DeepCopyInto(out.(*IngressStatus))
return nil
}, InType: reflect.TypeOf(&IngressStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*IngressTLS).DeepCopyInto(out.(*IngressTLS))
return nil
}, InType: reflect.TypeOf(&IngressTLS{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*NetworkPolicy).DeepCopyInto(out.(*NetworkPolicy))
return nil
}, InType: reflect.TypeOf(&NetworkPolicy{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*NetworkPolicyEgressRule).DeepCopyInto(out.(*NetworkPolicyEgressRule))
return nil
}, InType: reflect.TypeOf(&NetworkPolicyEgressRule{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*NetworkPolicyIngressRule).DeepCopyInto(out.(*NetworkPolicyIngressRule))
return nil
}, InType: reflect.TypeOf(&NetworkPolicyIngressRule{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*NetworkPolicyList).DeepCopyInto(out.(*NetworkPolicyList))
return nil
}, InType: reflect.TypeOf(&NetworkPolicyList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*NetworkPolicyPeer).DeepCopyInto(out.(*NetworkPolicyPeer))
return nil
}, InType: reflect.TypeOf(&NetworkPolicyPeer{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*NetworkPolicyPort).DeepCopyInto(out.(*NetworkPolicyPort))
return nil
}, InType: reflect.TypeOf(&NetworkPolicyPort{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*NetworkPolicySpec).DeepCopyInto(out.(*NetworkPolicySpec))
return nil
}, InType: reflect.TypeOf(&NetworkPolicySpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*PodSecurityPolicy).DeepCopyInto(out.(*PodSecurityPolicy))
return nil
}, InType: reflect.TypeOf(&PodSecurityPolicy{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*PodSecurityPolicyList).DeepCopyInto(out.(*PodSecurityPolicyList))
return nil
}, InType: reflect.TypeOf(&PodSecurityPolicyList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*PodSecurityPolicySpec).DeepCopyInto(out.(*PodSecurityPolicySpec))
return nil
}, InType: reflect.TypeOf(&PodSecurityPolicySpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ReplicaSet).DeepCopyInto(out.(*ReplicaSet))
return nil
}, InType: reflect.TypeOf(&ReplicaSet{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ReplicaSetCondition).DeepCopyInto(out.(*ReplicaSetCondition))
return nil
}, InType: reflect.TypeOf(&ReplicaSetCondition{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ReplicaSetList).DeepCopyInto(out.(*ReplicaSetList))
return nil
}, InType: reflect.TypeOf(&ReplicaSetList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ReplicaSetSpec).DeepCopyInto(out.(*ReplicaSetSpec))
return nil
}, InType: reflect.TypeOf(&ReplicaSetSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ReplicaSetStatus).DeepCopyInto(out.(*ReplicaSetStatus))
return nil
}, InType: reflect.TypeOf(&ReplicaSetStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ReplicationControllerDummy).DeepCopyInto(out.(*ReplicationControllerDummy))
return nil
}, InType: reflect.TypeOf(&ReplicationControllerDummy{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*RollbackConfig).DeepCopyInto(out.(*RollbackConfig))
return nil
}, InType: reflect.TypeOf(&RollbackConfig{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*RollingUpdateDaemonSet).DeepCopyInto(out.(*RollingUpdateDaemonSet))
return nil
}, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*RollingUpdateDeployment).DeepCopyInto(out.(*RollingUpdateDeployment))
return nil
}, InType: reflect.TypeOf(&RollingUpdateDeployment{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*RunAsUserStrategyOptions).DeepCopyInto(out.(*RunAsUserStrategyOptions))
return nil
}, InType: reflect.TypeOf(&RunAsUserStrategyOptions{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*SELinuxStrategyOptions).DeepCopyInto(out.(*SELinuxStrategyOptions))
return nil
}, InType: reflect.TypeOf(&SELinuxStrategyOptions{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*Scale).DeepCopyInto(out.(*Scale))
return nil
}, InType: reflect.TypeOf(&Scale{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ScaleSpec).DeepCopyInto(out.(*ScaleSpec))
return nil
}, InType: reflect.TypeOf(&ScaleSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ScaleStatus).DeepCopyInto(out.(*ScaleStatus))
return nil
}, InType: reflect.TypeOf(&ScaleStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*SupplementalGroupsStrategyOptions).DeepCopyInto(out.(*SupplementalGroupsStrategyOptions))
return nil
}, InType: reflect.TypeOf(&SupplementalGroupsStrategyOptions{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ThirdPartyResource).DeepCopyInto(out.(*ThirdPartyResource))
return nil
}, InType: reflect.TypeOf(&ThirdPartyResource{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ThirdPartyResourceData).DeepCopyInto(out.(*ThirdPartyResourceData))
return nil
}, InType: reflect.TypeOf(&ThirdPartyResourceData{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ThirdPartyResourceDataList).DeepCopyInto(out.(*ThirdPartyResourceDataList))
return nil
}, InType: reflect.TypeOf(&ThirdPartyResourceDataList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ThirdPartyResourceList).DeepCopyInto(out.(*ThirdPartyResourceList))
return nil
}, InType: reflect.TypeOf(&ThirdPartyResourceList{})},
)
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *APIVersion) DeepCopyInto(out *APIVersion) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersion.
func (in *APIVersion) DeepCopy() *APIVersion {
if in == nil {
return nil
}
out := new(APIVersion)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath.
func (in *AllowedHostPath) DeepCopy() *AllowedHostPath {
if in == nil {
return nil
}
out := new(AllowedHostPath)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomMetricCurrentStatus) DeepCopyInto(out *CustomMetricCurrentStatus) {
*out = *in
out.CurrentValue = in.CurrentValue.DeepCopy()
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatus.
func (in *CustomMetricCurrentStatus) DeepCopy() *CustomMetricCurrentStatus {
if in == nil {
return nil
}
out := new(CustomMetricCurrentStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomMetricCurrentStatusList) DeepCopyInto(out *CustomMetricCurrentStatusList) {
*out = *in
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CustomMetricCurrentStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatusList.
func (in *CustomMetricCurrentStatusList) DeepCopy() *CustomMetricCurrentStatusList {
if in == nil {
return nil
}
out := new(CustomMetricCurrentStatusList)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomMetricTarget) DeepCopyInto(out *CustomMetricTarget) {
*out = *in
out.TargetValue = in.TargetValue.DeepCopy()
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTarget.
func (in *CustomMetricTarget) DeepCopy() *CustomMetricTarget {
if in == nil {
return nil
}
out := new(CustomMetricTarget)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomMetricTargetList) DeepCopyInto(out *CustomMetricTargetList) {
*out = *in
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CustomMetricTarget, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTargetList.
func (in *CustomMetricTargetList) DeepCopy() *CustomMetricTargetList {
if in == nil {
return nil
}
out := new(CustomMetricTargetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSet) DeepCopyInto(out *DaemonSet) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet.
func (in *DaemonSet) DeepCopy() *DaemonSet {
if in == nil {
return nil
}
out := new(DaemonSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DaemonSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DaemonSet, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList.
func (in *DaemonSetList) DeepCopy() *DaemonSetList {
if in == nil {
return nil
}
out := new(DaemonSetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DaemonSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
if *in == nil {
*out = nil
} else {
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
in.Template.DeepCopyInto(&out.Template)
in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
if in.RevisionHistoryLimit != nil {
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec.
func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec {
if in == nil {
return nil
}
out := new(DaemonSetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) {
*out = *in
if in.CollisionCount != nil {
in, out := &in.CollisionCount, &out.CollisionCount
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus.
func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus {
if in == nil {
return nil
}
out := new(DaemonSetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) {
*out = *in
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
if *in == nil {
*out = nil
} else {
*out = new(RollingUpdateDaemonSet)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy.
func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy {
if in == nil {
return nil
}
out := new(DaemonSetUpdateStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Deployment) DeepCopyInto(out *Deployment) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment.
func (in *Deployment) DeepCopy() *Deployment {
if in == nil {
return nil
}
out := new(Deployment)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Deployment) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) {
*out = *in
in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition.
func (in *DeploymentCondition) DeepCopy() *DeploymentCondition {
if in == nil {
return nil
}
out := new(DeploymentCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentList) DeepCopyInto(out *DeploymentList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Deployment, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList.
func (in *DeploymentList) DeepCopy() *DeploymentList {
if in == nil {
return nil
}
out := new(DeploymentList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeploymentList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentRollback) DeepCopyInto(out *DeploymentRollback) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.UpdatedAnnotations != nil {
in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.RollbackTo = in.RollbackTo
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRollback.
func (in *DeploymentRollback) DeepCopy() *DeploymentRollback {
if in == nil {
return nil
}
out := new(DeploymentRollback)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeploymentRollback) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
*out = *in
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
if *in == nil {
*out = nil
} else {
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
in.Template.DeepCopyInto(&out.Template)
in.Strategy.DeepCopyInto(&out.Strategy)
if in.RevisionHistoryLimit != nil {
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
if in.RollbackTo != nil {
in, out := &in.RollbackTo, &out.RollbackTo
if *in == nil {
*out = nil
} else {
*out = new(RollbackConfig)
**out = **in
}
}
if in.ProgressDeadlineSeconds != nil {
in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec.
func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
if in == nil {
return nil
}
out := new(DeploymentSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]DeploymentCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.CollisionCount != nil {
in, out := &in.CollisionCount, &out.CollisionCount
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus.
func (in *DeploymentStatus) DeepCopy() *DeploymentStatus {
if in == nil {
return nil
}
out := new(DeploymentStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) {
*out = *in
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
if *in == nil {
*out = nil
} else {
*out = new(RollingUpdateDeployment)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy.
func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy {
if in == nil {
return nil
}
out := new(DeploymentStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) {
*out = *in
if in.Ranges != nil {
in, out := &in.Ranges, &out.Ranges
*out = make([]IDRange, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions.
func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions {
if in == nil {
return nil
}
out := new(FSGroupStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) {
*out = *in
out.Backend = in.Backend
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressPath.
func (in *HTTPIngressPath) DeepCopy() *HTTPIngressPath {
if in == nil {
return nil
}
out := new(HTTPIngressPath)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPIngressRuleValue) DeepCopyInto(out *HTTPIngressRuleValue) {
*out = *in
if in.Paths != nil {
in, out := &in.Paths, &out.Paths
*out = make([]HTTPIngressPath, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressRuleValue.
func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue {
if in == nil {
return nil
}
out := new(HTTPIngressRuleValue)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostPortRange) DeepCopyInto(out *HostPortRange) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange.
func (in *HostPortRange) DeepCopy() *HostPortRange {
if in == nil {
return nil
}
out := new(HostPortRange)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IDRange) DeepCopyInto(out *IDRange) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange.
func (in *IDRange) DeepCopy() *IDRange {
if in == nil {
return nil
}
out := new(IDRange)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPBlock) DeepCopyInto(out *IPBlock) {
*out = *in
if in.Except != nil {
in, out := &in.Except, &out.Except
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPBlock.
func (in *IPBlock) DeepCopy() *IPBlock {
if in == nil {
return nil
}
out := new(IPBlock)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Ingress) DeepCopyInto(out *Ingress) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress.
func (in *Ingress) DeepCopy() *Ingress {
if in == nil {
return nil
}
out := new(Ingress)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Ingress) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressBackend) DeepCopyInto(out *IngressBackend) {
*out = *in
out.ServicePort = in.ServicePort
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressBackend.
func (in *IngressBackend) DeepCopy() *IngressBackend {
if in == nil {
return nil
}
out := new(IngressBackend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressList) DeepCopyInto(out *IngressList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Ingress, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList.
func (in *IngressList) DeepCopy() *IngressList {
if in == nil {
return nil
}
out := new(IngressList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IngressList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressRule) DeepCopyInto(out *IngressRule) {
*out = *in
in.IngressRuleValue.DeepCopyInto(&out.IngressRuleValue)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule.
func (in *IngressRule) DeepCopy() *IngressRule {
if in == nil {
return nil
}
out := new(IngressRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressRuleValue) DeepCopyInto(out *IngressRuleValue) {
*out = *in
if in.HTTP != nil {
in, out := &in.HTTP, &out.HTTP
if *in == nil {
*out = nil
} else {
*out = new(HTTPIngressRuleValue)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRuleValue.
func (in *IngressRuleValue) DeepCopy() *IngressRuleValue {
if in == nil {
return nil
}
out := new(IngressRuleValue)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressSpec) DeepCopyInto(out *IngressSpec) {
*out = *in
if in.Backend != nil {
in, out := &in.Backend, &out.Backend
if *in == nil {
*out = nil
} else {
*out = new(IngressBackend)
**out = **in
}
}
if in.TLS != nil {
in, out := &in.TLS, &out.TLS
*out = make([]IngressTLS, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]IngressRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec.
func (in *IngressSpec) DeepCopy() *IngressSpec {
if in == nil {
return nil
}
out := new(IngressSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressStatus) DeepCopyInto(out *IngressStatus) {
*out = *in
in.LoadBalancer.DeepCopyInto(&out.LoadBalancer)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus.
func (in *IngressStatus) DeepCopy() *IngressStatus {
if in == nil {
return nil
}
out := new(IngressStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressTLS) DeepCopyInto(out *IngressTLS) {
*out = *in
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressTLS.
func (in *IngressTLS) DeepCopy() *IngressTLS {
if in == nil {
return nil
}
out := new(IngressTLS)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicy.
func (in *NetworkPolicy) DeepCopy() *NetworkPolicy {
if in == nil {
return nil
}
out := new(NetworkPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NetworkPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyEgressRule) DeepCopyInto(out *NetworkPolicyEgressRule) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]NetworkPolicyPort, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.To != nil {
in, out := &in.To, &out.To
*out = make([]NetworkPolicyPeer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyEgressRule.
func (in *NetworkPolicyEgressRule) DeepCopy() *NetworkPolicyEgressRule {
if in == nil {
return nil
}
out := new(NetworkPolicyEgressRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyIngressRule) DeepCopyInto(out *NetworkPolicyIngressRule) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]NetworkPolicyPort, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.From != nil {
in, out := &in.From, &out.From
*out = make([]NetworkPolicyPeer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyIngressRule.
func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule {
if in == nil {
return nil
}
out := new(NetworkPolicyIngressRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]NetworkPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyList.
func (in *NetworkPolicyList) DeepCopy() *NetworkPolicyList {
if in == nil {
return nil
}
out := new(NetworkPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NetworkPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyPeer) DeepCopyInto(out *NetworkPolicyPeer) {
*out = *in
if in.PodSelector != nil {
in, out := &in.PodSelector, &out.PodSelector
if *in == nil {
*out = nil
} else {
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
if in.NamespaceSelector != nil {
in, out := &in.NamespaceSelector, &out.NamespaceSelector
if *in == nil {
*out = nil
} else {
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
if in.IPBlock != nil {
in, out := &in.IPBlock, &out.IPBlock
if *in == nil {
*out = nil
} else {
*out = new(IPBlock)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPeer.
func (in *NetworkPolicyPeer) DeepCopy() *NetworkPolicyPeer {
if in == nil {
return nil
}
out := new(NetworkPolicyPeer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyPort) DeepCopyInto(out *NetworkPolicyPort) {
*out = *in
if in.Protocol != nil {
in, out := &in.Protocol, &out.Protocol
if *in == nil {
*out = nil
} else {
*out = new(core_v1.Protocol)
**out = **in
}
}
if in.Port != nil {
in, out := &in.Port, &out.Port
if *in == nil {
*out = nil
} else {
*out = new(intstr.IntOrString)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPort.
func (in *NetworkPolicyPort) DeepCopy() *NetworkPolicyPort {
if in == nil {
return nil
}
out := new(NetworkPolicyPort)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) {
*out = *in
in.PodSelector.DeepCopyInto(&out.PodSelector)
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = make([]NetworkPolicyIngressRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Egress != nil {
in, out := &in.Egress, &out.Egress
*out = make([]NetworkPolicyEgressRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.PolicyTypes != nil {
in, out := &in.PolicyTypes, &out.PolicyTypes
*out = make([]PolicyType, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicySpec.
func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec {
if in == nil {
return nil
}
out := new(NetworkPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy.
func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy {
if in == nil {
return nil
}
out := new(PodSecurityPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PodSecurityPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList.
func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList {
if in == nil {
return nil
}
out := new(PodSecurityPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) {
*out = *in
if in.DefaultAddCapabilities != nil {
in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities
*out = make([]core_v1.Capability, len(*in))
copy(*out, *in)
}
if in.RequiredDropCapabilities != nil {
in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities
*out = make([]core_v1.Capability, len(*in))
copy(*out, *in)
}
if in.AllowedCapabilities != nil {
in, out := &in.AllowedCapabilities, &out.AllowedCapabilities
*out = make([]core_v1.Capability, len(*in))
copy(*out, *in)
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]FSType, len(*in))
copy(*out, *in)
}
if in.HostPorts != nil {
in, out := &in.HostPorts, &out.HostPorts
*out = make([]HostPortRange, len(*in))
copy(*out, *in)
}
in.SELinux.DeepCopyInto(&out.SELinux)
in.RunAsUser.DeepCopyInto(&out.RunAsUser)
in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups)
in.FSGroup.DeepCopyInto(&out.FSGroup)
if in.DefaultAllowPrivilegeEscalation != nil {
in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation
if *in == nil {
*out = nil
} else {
*out = new(bool)
**out = **in
}
}
if in.AllowedHostPaths != nil {
in, out := &in.AllowedHostPaths, &out.AllowedHostPaths
*out = make([]AllowedHostPath, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec.
func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec {
if in == nil {
return nil
}
out := new(PodSecurityPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet.
func (in *ReplicaSet) DeepCopy() *ReplicaSet {
if in == nil {
return nil
}
out := new(ReplicaSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ReplicaSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition.
func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition {
if in == nil {
return nil
}
out := new(ReplicaSetCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ReplicaSet, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList.
func (in *ReplicaSetList) DeepCopy() *ReplicaSetList {
if in == nil {
return nil
}
out := new(ReplicaSetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ReplicaSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) {
*out = *in
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
if *in == nil {
*out = nil
} else {
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
in.Template.DeepCopyInto(&out.Template)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec.
func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec {
if in == nil {
return nil
}
out := new(ReplicaSetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]ReplicaSetCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus.
func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus {
if in == nil {
return nil
}
out := new(ReplicaSetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicationControllerDummy) DeepCopyInto(out *ReplicationControllerDummy) {
*out = *in
out.TypeMeta = in.TypeMeta
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerDummy.
func (in *ReplicationControllerDummy) DeepCopy() *ReplicationControllerDummy {
if in == nil {
return nil
}
out := new(ReplicationControllerDummy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ReplicationControllerDummy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackConfig.
func (in *RollbackConfig) DeepCopy() *RollbackConfig {
if in == nil {
return nil
}
out := new(RollbackConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) {
*out = *in
if in.MaxUnavailable != nil {
in, out := &in.MaxUnavailable, &out.MaxUnavailable
if *in == nil {
*out = nil
} else {
*out = new(intstr.IntOrString)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet.
func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet {
if in == nil {
return nil
}
out := new(RollingUpdateDaemonSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) {
*out = *in
if in.MaxUnavailable != nil {
in, out := &in.MaxUnavailable, &out.MaxUnavailable
if *in == nil {
*out = nil
} else {
*out = new(intstr.IntOrString)
**out = **in
}
}
if in.MaxSurge != nil {
in, out := &in.MaxSurge, &out.MaxSurge
if *in == nil {
*out = nil
} else {
*out = new(intstr.IntOrString)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment.
func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment {
if in == nil {
return nil
}
out := new(RollingUpdateDeployment)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) {
*out = *in
if in.Ranges != nil {
in, out := &in.Ranges, &out.Ranges
*out = make([]IDRange, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions.
func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions {
if in == nil {
return nil
}
out := new(RunAsUserStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) {
*out = *in
if in.SELinuxOptions != nil {
in, out := &in.SELinuxOptions, &out.SELinuxOptions
if *in == nil {
*out = nil
} else {
*out = new(core_v1.SELinuxOptions)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions.
func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions {
if in == nil {
return nil
}
out := new(SELinuxStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Scale) DeepCopyInto(out *Scale) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale.
func (in *Scale) DeepCopy() *Scale {
if in == nil {
return nil
}
out := new(Scale)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Scale) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec.
func (in *ScaleSpec) DeepCopy() *ScaleSpec {
if in == nil {
return nil
}
out := new(ScaleSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus.
func (in *ScaleStatus) DeepCopy() *ScaleStatus {
if in == nil {
return nil
}
out := new(ScaleStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) {
*out = *in
if in.Ranges != nil {
in, out := &in.Ranges, &out.Ranges
*out = make([]IDRange, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions.
func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions {
if in == nil {
return nil
}
out := new(SupplementalGroupsStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ThirdPartyResource) DeepCopyInto(out *ThirdPartyResource) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Versions != nil {
in, out := &in.Versions, &out.Versions
*out = make([]APIVersion, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResource.
func (in *ThirdPartyResource) DeepCopy() *ThirdPartyResource {
if in == nil {
return nil
}
out := new(ThirdPartyResource)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ThirdPartyResource) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ThirdPartyResourceData) DeepCopyInto(out *ThirdPartyResourceData) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Data != nil {
in, out := &in.Data, &out.Data
*out = make([]byte, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceData.
func (in *ThirdPartyResourceData) DeepCopy() *ThirdPartyResourceData {
if in == nil {
return nil
}
out := new(ThirdPartyResourceData)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ThirdPartyResourceData) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ThirdPartyResourceDataList) DeepCopyInto(out *ThirdPartyResourceDataList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ThirdPartyResourceData, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceDataList.
func (in *ThirdPartyResourceDataList) DeepCopy() *ThirdPartyResourceDataList {
if in == nil {
return nil
}
out := new(ThirdPartyResourceDataList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ThirdPartyResourceDataList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ThirdPartyResourceList) DeepCopyInto(out *ThirdPartyResourceList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ThirdPartyResource, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceList.
func (in *ThirdPartyResourceList) DeepCopy() *ThirdPartyResourceList {
if in == nil {
return nil
}
out := new(ThirdPartyResourceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ThirdPartyResourceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
| staging/src/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.004693495575338602,
0.000333419069647789,
0.00016210514877457172,
0.00016963211237452924,
0.000587541377171874
] |
{
"id": 11,
"code_window": [
"\tvar rs *extensions.ReplicaSet\n",
"\tvar updateErr error\n",
"\tpollErr := wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {\n",
"\t\tvar err error\n",
"\t\tif rs, err = c.Extensions().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil {\n",
"\t\t\treturn false, err\n",
"\t\t}\n",
"\t\t// Apply the update, then attempt to push it to the apiserver.\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {\n"
],
"file_path": "test/utils/replicaset.go",
"type": "replace",
"edit_start_line_idx": 33
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
package internalversion
| staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.00017791675054468215,
0.00017785823729354888,
0.00017779972404241562,
0.00017785823729354888,
5.851325113326311e-8
] |
{
"id": 11,
"code_window": [
"\tvar rs *extensions.ReplicaSet\n",
"\tvar updateErr error\n",
"\tpollErr := wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {\n",
"\t\tvar err error\n",
"\t\tif rs, err = c.Extensions().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil {\n",
"\t\t\treturn false, err\n",
"\t\t}\n",
"\t\t// Apply the update, then attempt to push it to the apiserver.\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {\n"
],
"file_path": "test/utils/replicaset.go",
"type": "replace",
"edit_start_line_idx": 33
} | // Copyright 2013 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package googleapi
import (
"encoding/json"
"errors"
"strconv"
)
// Int64s is a slice of int64s that marshal as quoted strings in JSON.
type Int64s []int64
func (q *Int64s) UnmarshalJSON(raw []byte) error {
*q = (*q)[:0]
var ss []string
if err := json.Unmarshal(raw, &ss); err != nil {
return err
}
for _, s := range ss {
v, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return err
}
*q = append(*q, int64(v))
}
return nil
}
// Int32s is a slice of int32s that marshal as quoted strings in JSON.
type Int32s []int32
func (q *Int32s) UnmarshalJSON(raw []byte) error {
*q = (*q)[:0]
var ss []string
if err := json.Unmarshal(raw, &ss); err != nil {
return err
}
for _, s := range ss {
v, err := strconv.ParseInt(s, 10, 32)
if err != nil {
return err
}
*q = append(*q, int32(v))
}
return nil
}
// Uint64s is a slice of uint64s that marshal as quoted strings in JSON.
type Uint64s []uint64
func (q *Uint64s) UnmarshalJSON(raw []byte) error {
*q = (*q)[:0]
var ss []string
if err := json.Unmarshal(raw, &ss); err != nil {
return err
}
for _, s := range ss {
v, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return err
}
*q = append(*q, uint64(v))
}
return nil
}
// Uint32s is a slice of uint32s that marshal as quoted strings in JSON.
type Uint32s []uint32
func (q *Uint32s) UnmarshalJSON(raw []byte) error {
*q = (*q)[:0]
var ss []string
if err := json.Unmarshal(raw, &ss); err != nil {
return err
}
for _, s := range ss {
v, err := strconv.ParseUint(s, 10, 32)
if err != nil {
return err
}
*q = append(*q, uint32(v))
}
return nil
}
// Float64s is a slice of float64s that marshal as quoted strings in JSON.
type Float64s []float64
func (q *Float64s) UnmarshalJSON(raw []byte) error {
*q = (*q)[:0]
var ss []string
if err := json.Unmarshal(raw, &ss); err != nil {
return err
}
for _, s := range ss {
v, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
*q = append(*q, float64(v))
}
return nil
}
func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) {
dst := make([]byte, 0, 2+n*10) // somewhat arbitrary
dst = append(dst, '[')
for i := 0; i < n; i++ {
if i > 0 {
dst = append(dst, ',')
}
dst = append(dst, '"')
dst = fn(dst, i)
dst = append(dst, '"')
}
dst = append(dst, ']')
return dst, nil
}
func (s Int64s) MarshalJSON() ([]byte, error) {
return quotedList(len(s), func(dst []byte, i int) []byte {
return strconv.AppendInt(dst, s[i], 10)
})
}
func (s Int32s) MarshalJSON() ([]byte, error) {
return quotedList(len(s), func(dst []byte, i int) []byte {
return strconv.AppendInt(dst, int64(s[i]), 10)
})
}
func (s Uint64s) MarshalJSON() ([]byte, error) {
return quotedList(len(s), func(dst []byte, i int) []byte {
return strconv.AppendUint(dst, s[i], 10)
})
}
func (s Uint32s) MarshalJSON() ([]byte, error) {
return quotedList(len(s), func(dst []byte, i int) []byte {
return strconv.AppendUint(dst, uint64(s[i]), 10)
})
}
func (s Float64s) MarshalJSON() ([]byte, error) {
return quotedList(len(s), func(dst []byte, i int) []byte {
return strconv.AppendFloat(dst, s[i], 'g', -1, 64)
})
}
// RawMessage is a raw encoded JSON value.
// It is identical to json.RawMessage, except it does not suffer from
// https://golang.org/issue/14493.
type RawMessage []byte
// MarshalJSON returns m.
func (m RawMessage) MarshalJSON() ([]byte, error) {
return m, nil
}
// UnmarshalJSON sets *m to a copy of data.
func (m *RawMessage) UnmarshalJSON(data []byte) error {
if m == nil {
return errors.New("googleapi.RawMessage: UnmarshalJSON on nil pointer")
}
*m = append((*m)[:0], data...)
return nil
}
/*
* Helper routines for simplifying the creation of optional fields of basic type.
*/
// Bool is a helper routine that allocates a new bool value
// to store v and returns a pointer to it.
func Bool(v bool) *bool { return &v }
// Int32 is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it.
func Int32(v int32) *int32 { return &v }
// Int64 is a helper routine that allocates a new int64 value
// to store v and returns a pointer to it.
func Int64(v int64) *int64 { return &v }
// Float64 is a helper routine that allocates a new float64 value
// to store v and returns a pointer to it.
func Float64(v float64) *float64 { return &v }
// Uint32 is a helper routine that allocates a new uint32 value
// to store v and returns a pointer to it.
func Uint32(v uint32) *uint32 { return &v }
// Uint64 is a helper routine that allocates a new uint64 value
// to store v and returns a pointer to it.
func Uint64(v uint64) *uint64 { return &v }
// String is a helper routine that allocates a new string value
// to store v and returns a pointer to it.
func String(v string) *string { return &v }
| vendor/google.golang.org/api/googleapi/types.go | 0 | https://github.com/kubernetes/kubernetes/commit/24eb21e6cf36da896ae6a410fad8947c129b39b0 | [
0.001450016163289547,
0.00026296210126020014,
0.00016316228720825166,
0.0001695579703664407,
0.000286028312984854
] |
{
"id": 0,
"code_window": [
"import authPage from 'vault/tests/pages/auth';\n",
"import { click, visit, currentURL, fillIn } from '@ember/test-helpers';\n",
"import { PAGE as ts } from 'vault/tests/helpers/sync/sync-selectors';\n",
"\n",
"module('Acceptance | enterprise | sync | destination', function (hooks) {\n",
" setupApplicationTest(hooks);\n",
" setupMirage(hooks);\n",
"\n",
" hooks.beforeEach(async function () {\n",
" syncScenario(this.server);\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// sync is an enterprise feature but since mirage is used the enterprise label has been intentionally omitted from the module name\n",
"module('Acceptance | sync | destination', function (hooks) {\n"
],
"file_path": "ui/tests/acceptance/sync/secrets/destination-test.js",
"type": "replace",
"edit_start_line_idx": 14
} | /**
* Copyright (c) HashiCorp, Inc.
* SPDX-License-Identifier: BUSL-1.1
*/
import { module, test } from 'qunit';
import { setupApplicationTest } from 'ember-qunit';
import { setupMirage } from 'ember-cli-mirage/test-support';
import syncScenario from 'vault/mirage/scenarios/sync';
import syncHandlers from 'vault/mirage/handlers/sync';
import authPage from 'vault/tests/pages/auth';
import { click } from '@ember/test-helpers';
import { PAGE as ts } from 'vault/tests/helpers/sync/sync-selectors';
module('Acceptance | enterprise | sync | destination', function (hooks) {
setupApplicationTest(hooks);
setupMirage(hooks);
hooks.beforeEach(async function () {
syncScenario(this.server);
syncHandlers(this.server);
return authPage.login();
});
test('it should transition to correct routes when performing actions', async function (assert) {
await click(ts.navLink('Secrets Sync'));
await click(ts.destinations.list.create);
await click(ts.createCancel);
await click(ts.overviewCard.actionLink('Create new'));
await click(ts.createCancel);
await click(ts.overview.table.actionToggle(0));
await click(ts.overview.table.action('sync'));
await click(ts.destinations.sync.cancel);
await click(ts.breadcrumbLink('Secrets Sync'));
await click(ts.overview.table.actionToggle(0));
await click(ts.overview.table.action('details'));
assert.dom(ts.tab('Secrets')).hasClass('active', 'Navigates to secrets view for destination');
});
});
| ui/tests/acceptance/sync/secrets/overview-test.js | 1 | https://github.com/hashicorp/vault/commit/87857e61f547e58d8bfe24b72e4844ba675da1f3 | [
0.9954173564910889,
0.25007614493370056,
0.00017789418052416295,
0.002354671712964773,
0.4303240478038788
] |
{
"id": 0,
"code_window": [
"import authPage from 'vault/tests/pages/auth';\n",
"import { click, visit, currentURL, fillIn } from '@ember/test-helpers';\n",
"import { PAGE as ts } from 'vault/tests/helpers/sync/sync-selectors';\n",
"\n",
"module('Acceptance | enterprise | sync | destination', function (hooks) {\n",
" setupApplicationTest(hooks);\n",
" setupMirage(hooks);\n",
"\n",
" hooks.beforeEach(async function () {\n",
" syncScenario(this.server);\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// sync is an enterprise feature but since mirage is used the enterprise label has been intentionally omitted from the module name\n",
"module('Acceptance | sync | destination', function (hooks) {\n"
],
"file_path": "ui/tests/acceptance/sync/secrets/destination-test.js",
"type": "replace",
"edit_start_line_idx": 14
} | ---
layout: docs
page_title: Feature Deprecation FAQ
sidebar_title: FAQ
description: |-
An FAQ page to communicate frequently asked questions concerning feature deprecations.
---
# Feature deprecation FAQ
This page provides frequently asked questions concerning decisions made about Vault feature deprecations. If you are looking for information about Vault licensing, refer to the [Licensing FAQ](/vault/docs/enterprise/license/faq) page. Pleaser refer to the [Feature Deprecation Notice and Plans](/vault/docs/deprecation) document for up-to-date information on Vault feature deprecations and notice.
- [Q: What is the impact on anyone using the legacy MFA feature?](#q-what-is-the-impact-on-anyone-using-the-legacy-mfa-feature)
- [Q: I'm currently using the Etcd storage backend feature. How does the deprecation impact me?](#q-i-m-currently-using-the-etcd-storage-backend-feature-how-does-the-deprecation-impact-me)
- [Q: What should I do if I use Mount Filters, AppID, or any of the standalone DB engines?](#q-what-should-i-do-if-i-use-mount-filters-appid-or-any-of-the-standalone-db-engines)
- [Q: What is the impact of removing support for X.509 certificates with signatures that use SHA-1?](#q-what-is-the-impact-of-removing-support-for-x-509-certificates-with-signatures-that-use-sha-1)
- [Q: What are the phases of deprecation?](#q-what-are-the-phases-of-deprecation)
### Q: what is the impact on anyone using the legacy MFA feature?
If you are an Enterprise Vault user, there is no impact. There are no changes to the Enterprise MFA offering.
If you use Vault Community Edition and use the legacy MFA, this will impact you since we plan to deprecate the legacy MFA feature. However, while we will continue to provide support for MFA in Vault Community Edition in the upcoming Vault 1.10 release, our target is to remove the legacy MFA feature from the product in the following Vault 1.11 release. Therefore, you should plan to migrate to the new MFA feature when Vault Community Edition supports it.
### Q: i'm currently using the etcd storage backend feature. how does the deprecation impact me?
The Etcd v2 has been deprecated with the release of Etcd v3.5 and will be decommissioned by Etcd v3.6. Etcd v2 API will be removed in Vault 1.10. The Etcd storage backend users should migrate Vault storage to an Etcd V3 cluster before upgrading to Vault 1.10. We recommend that you back up all storage migrations before upgrading.
If you are an Enterprise user, we recommend that you consider migrating to HashiCorp supported storage backends: **Integrated Storage** or **Consul** (if your use case requires you to use this). Your HashiCorp sales or support representative can assist you with this decision.
### Q: what should i do if i use mount filters, AppID, or any of the standalone DB engines?
These features were deprecated in prior releases of Vault. We are targeting the removal of these features from the product in the Vault 1.12 release. Please plan to upgrade to these features before the release of Vault 1.12. Refer to the table below for a list of alternative features.
| Deprecated Feature | Alternative Feature |
| --------------------- | ------------------------------------------------------------------------------------------------------------------- |
| Mount Filters | [Path Filters](/vault/api-docs/system/replication/replication-performance#create-paths-filter) |
| AppID | [AppRole auth method](/vault/docs/auth/approle) |
| Standalone DB engines | [Combined DB engines](/vault/docs/secrets/databases) |
**Note:** After upgrading to 1.12, any attempt to unseal a core with one of the following features enabled will result in a core shutdown. This may temporarily be overridden using the `VAULT_ALLOW_PENDING_REMOVAL_MOUNTS` environment variable when launching the Vault server. These features will be officially removed from Vault in version 1.13 and this environment variable will not work. In order to upgrade to 1.13, you will have to completely disable all removed features.
### Q: what is the impact of removing support for x.509 certificates with signatures that use SHA-1?
Starting with Vault 1.12.0, Vault will be built with Go 1.18 or later.
The standard library in Go 1.18 and later [rejects X.509 signatures](https://go.dev/doc/go1.18#sha1) that use a SHA-1 hash.
If this issue impacts your usage of Vault, you can temporarily work around it by deploying Vault with the environment variable `GODEBUG=x509sha1=1` set.
This workaround will fail in a future version of Go, however, the Go team has not said when they will remove this workaround.
If you want to check whether a certificate or CA contains a problematic signature, you can use the OpenSSL CLI:
```shell-session
$ openssl x509 -text -noout -in somecert.pem | grep sha1
Signature Algorithm: sha1WithRSAEncryption
Signature Algorithm: sha1WithRSAEncryption
```
Any signature algorithms that contain `sha1` will be potentially problematic.
Here are the use cases that may still use certificates with SHA-1:
#### Auth methods
- [AWS Auth Method](/vault/docs/auth/aws): [AWS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html) can use SHA-1-based PKCS7 signatures for DSA key pairs.
- [Cloud Foundry (CF) Auth Method ](/vault/docs/auth/cf)
- [Kerberos Auth Method](/vault/docs/auth/kerberos)
- [Kubernetes Auth Method](/vault/docs/auth/kubernetes)
- [LDAP Auth Method](/vault/docs/auth/ldap)
- [JWT/OIDC Auth Method](/vault/docs/auth/jwt/)
- [TLS Certificates Auth Method](/vault/docs/auth/cert)
#### Database secrets engines
- [Cassandra Database Secrets Engine](/vault/docs/secrets/databases/cassandra)
- [Couchbase Database Secrets Engine](/vault/docs/secrets/databases/couchbase)
- [Elasticsearch Database Secrets Engine](/vault/docs/secrets/databases/elasticdb)
- [InfluxDB Database Secrets Engine](/vault/docs/secrets/databases/influxdb)
- [MongoDB Database Secrets Engine](/vault/docs/secrets/databases/mongodb)
- [MySQL/MariaDB Database Secrets Engine](/vault/docs/secrets/databases/mysql-maria)
#### Secrets engines
- [Active Directory Secrets Engine](/vault/docs/secrets/ad)
- [Consul Secrets Engine](/vault/docs/secrets/consul)
- [Kubernetes Secrets Engine](/vault/docs/secrets/kubernetes)
- [Nomad Secrets Engine](/vault/docs/secrets/nomad)
- [LDAP Secrets Engine](/vault/docs/secrets/ldap)
- [PKI Secrets Engine](/vault/docs/secrets/pki/)
### Q: what are the phases of deprecation?
As of version 1.12, Vault implements a multi-phased approach to deprecation. The intent of this approach is to provide sufficient warning that a feature will be removed and safe handling of stored data when the associated feature has been removed.
The phases of deprecation are also known as "Deprecation Status". These statuses are currently reflected in builtin plugins and are exposed via the Vault `auth`, `secrets`, and `plugins` CLI/API endpoints. For more information, refer to the corresponding documentation.
The four phases of deprecation are: `Supported`, `Deprecated`, `Pending Removal`, and `Removed`.
**Note:** Deprecation Status currently only applies to builtin `auth` and `secrets` plugins. All external plugins will report a status of `n/a`. This is expected behavior.
#### Supported
This is the default status and reflects a feature which is still supported. There is no unique behavior or functionality associated with this status.
#### Deprecated
This status reflects a feature which has been marked for deprecation in a later release of Vault. This is the first phase of the deprecation process. A status of `Deprecated` has two effects:
1. After an upgrade, any existing `Deprecated` feature (builtin auth/secrets plugins enabled via CLI or API prior to upgrade) will log `Warn`-level messages on unseal.
2. All new usage of `Deprecated` features will log `Warn`-level messages.
3. All `POST/GET/LIST` endpoints associated with this feature will return `warnings` in response data.
#### Pending removal
This status reflects a feature which has been officially deprecated in this release of Vault. This is the first phase in the process that fundamentally alters the behavior of Vault. The effects are two-fold:
1. After an upgrade, any existing `Pending Removal` feature (builtin auth/secrets plugins enabled via CLI or API prior to upgrade) will log `Error`-level messages to the Vault log and cause an immediate shutdown of the Vault core.
2. Any new `Pending Removal` will fail and log `Error`-level messages to the Vault log and CLI/API.
##### VAULT_ALLOW_PENDING_REMOVAL_MOUNTS
The `Pending Removal` behavior may be overriden using a new environment variable: [`VAULT_ALLOW_PENDING_REMOVAL_MOUNTS`](/vault/docs/commands/server#vault_allow_pending_removal_mounts). This environment variable effectively allows all `Pending Removal` features to be treated as `Deprecated`.
#### Removed
This status reflects a feature which has been officially removed from Vault. `Removed` is the last phase of the deprecation process. During this phase, code for this feature no longer exists within Vault.
1. After an upgrade, any existing `Removed` feature will log `Error`-level messages to the Vault log and cause an immediate shutdown of the Vault core.
2. Any new `Removed` features will fail and log `Error`-level messages to the Vault log and CLI/API.
#### Migration path
In order to successfully upgrade, use of the `Removed` feature must be discontinued. To accomplish this:
1. Downgrade Vault to a previous version.
2. Replace any `Removed` or `Pending Removal` feature with the [preferred alternative feature](#q-what-should-i-do-if-i-use-mount-filters-appid-or-any-of-the-standalone-db-engines).
3. Upgrade to latest desired version.
| website/content/docs/deprecation/faq.mdx | 0 | https://github.com/hashicorp/vault/commit/87857e61f547e58d8bfe24b72e4844ba675da1f3 | [
0.0002577182895038277,
0.00017423764802515507,
0.0001618413662072271,
0.00016842439072206616,
0.000022950103812036105
] |
{
"id": 0,
"code_window": [
"import authPage from 'vault/tests/pages/auth';\n",
"import { click, visit, currentURL, fillIn } from '@ember/test-helpers';\n",
"import { PAGE as ts } from 'vault/tests/helpers/sync/sync-selectors';\n",
"\n",
"module('Acceptance | enterprise | sync | destination', function (hooks) {\n",
" setupApplicationTest(hooks);\n",
" setupMirage(hooks);\n",
"\n",
" hooks.beforeEach(async function () {\n",
" syncScenario(this.server);\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// sync is an enterprise feature but since mirage is used the enterprise label has been intentionally omitted from the module name\n",
"module('Acceptance | sync | destination', function (hooks) {\n"
],
"file_path": "ui/tests/acceptance/sync/secrets/destination-test.js",
"type": "replace",
"edit_start_line_idx": 14
} | ---
layout: docs
page_title: OCI KMS - Seals - Configuration
description: |-
The OCI KMS seal configures Vault to use OCI KMS as the seal wrapping
mechanism.
---
# `ocikms` seal
<Note title="Seal wrapping requires Vault Enterprise">
All Vault versions support **auto-unseal** for OCI KMS, but **seal wrapping**
requires Vault Enterprise.
Vault Enterprise enables seal wrapping by default, which means the KMS service
must be available at runtime and not just during the unseal process. Refer to
the [Seal wrap](/vault/docs/enterprise/sealwrap) overview for more
information.
</Note>
The OCI KMS seal configures Vault to use OCI KMS as the seal wrapping mechanism.
The OCI KMS seal is activated by one of the following:
- The presence of a `seal "ocikms"` block in Vault's configuration file
- The presence of the environment variable `VAULT_SEAL_TYPE` set to `ocikms`. If
enabling via environment variable, all other required values specific to OCI
KMS (i.e. `VAULT_OCIKMS_SEAL_KEY_ID`, `VAULT_OCIKMS_CRYPTO_ENDPOINT` `VAULT_OCIKMS_MANAGEMENT_ENDPOINT`) must be also supplied, as well as all
other OCI-related [environment variables][oci-sdk] that lends to successful
authentication.
## `ocikms` example
This example shows configuring the OCI KMS seal through the Vault configuration file
by providing all the required values:
```hcl
seal "ocikms" {
key_id = "ocid1.key.oc1.iad.afnxza26aag4s.abzwkljsbapzb2nrha5nt3s7s7p42ctcrcj72vn3kq5qx"
crypto_endpoint = "https://afnxza26aag4s-crypto.kms.us-ashburn-1.oraclecloud.com"
management_endpoint = "https://afnxza26aag4s-management.kms.us-ashburn-1.oraclecloud.com"
auth_type_api_key = "true"
}
```
## `ocikms` parameters
These parameters apply to the `seal` stanza in the Vault configuration file:
- `key_id` `(string: <required>)`: The OCI KMS key ID to use. May also be
specified by the `VAULT_OCIKMS_SEAL_KEY_ID` environment variable.
- `crypto_endpoint` `(string: <required>)`: The OCI KMS cryptographic endpoint (or data plane endpoint)
to be used to make OCI KMS encryption/decryption requests. May also be specified by the `VAULT_OCIKMS_CRYPTO_ENDPOINT` environment
variable.
- `management_endpoint` `(string: <required>)`: The OCI KMS management endpoint (or control plane endpoint)
to be used to make OCI KMS key management requests. May also be specified by the `VAULT_OCIKMS_MANAGEMENT_ENDPOINT` environment
variable.
- `auth_type_api_key` `(boolean: false)`: Specifies if using API key to authenticate to OCI KMS service.
If it is `false`, Vault authenticates using the instance principal from the compute instance. See Authentication section for details. Default is `false`.
- `disabled` `(string: "")`: Set this to `true` if Vault is migrating from an auto seal configuration. Otherwise, set to `false`.
Refer to the [Seal Migration](/vault/docs/concepts/seal#seal-migration) documentation for more information about the seal migration process.
## Authentication
Authentication-related values must be provided, either as environment
variables or as configuration parameters.
If you want to use Instance Principal, add section configuration below and add further configuration settings as detailed in the [configuration docs](/vault/docs/configuration/).
```hcl
seal "ocikms" {
crypto_endpoint = "<kms-crypto-endpoint>"
management_endpoint = "<kms-management-endpoint>"
key_id = "<kms-key-id>"
}
# Notes:
# crypto_endpoint can be replaced by VAULT_OCIKMS_CRYPTO_ENDPOINT environment var
# management_endpoint can be replaced by VAULT_OCIKMS_MANAGEMENT_ENDPOINT environment var
# key_id can be replaced by VAULT_OCIKMS_SEAL_KEY_ID environment var
```
If you want to use User Principal, the plugin will take the API key you defined for OCI SDK, often under `~/.oci/config`.
```hcl
seal "ocikms" {
auth_type_api_key = true
crypto_endpoint = "<kms-crypto-endpoint>"
management_endpoint = "<kms-management-endpoint>"
key_id = "<kms-key-id>"
}
```
To grant permission for a compute instance to use OCI KMS service, write policies for KMS access.
- Create a [Dynamic Group][oci-dg] in your OCI tenancy.
- Create a policy that allows the Dynamic Group to use or manage keys from OCI KMS. There are multiple ways to write these policies. The [OCI Identity Policy][oci-id] can be used as a reference or starting point.
The most common policy allows a dynamic group of tenant A to use KMS's keys in tenant B:
```text
define tenancy tenantB as <tenantB-ocid>
endorse dynamic-group <dynamic-group-name> to use keys in tenancy tenantB
```
```text
define tenancy tenantA as <tenantA-ocid>
define dynamic-group <dynamic-group-name> as <dynamic-group-ocid>
admit dynamic-group <dynamic-group-name> of tenancy tenantA to use keys in compartment <key-compartment>
```
## `ocikms` rotate OCI KMS master key
For the [OCI KMS key rotation feature][oci-kms-rotation], OCI KMS will create a new version of key internally. This process is independent from Vault, and Vault still uses the same `key_id` without any interruption.
If you want to change the `key_id`: migrate to Shamir, change `key_id`, and then migrate to OCI KMS with the new `key_id`.
[oci-sdk]: https://docs.cloud.oracle.com/iaas/Content/API/Concepts/sdkconfig.htm
[oci-dg]: https://docs.cloud.oracle.com/iaas/Content/Identity/Tasks/managingdynamicgroups.htm
[oci-id]: https://docs.cloud.oracle.com/iaas/Content/Identity/Concepts/policies.htm
[oci-kms-rotation]: https://docs.cloud.oracle.com/iaas/Content/KeyManagement/Tasks/managingkeys.htm
| website/content/docs/configuration/seal/ocikms.mdx | 0 | https://github.com/hashicorp/vault/commit/87857e61f547e58d8bfe24b72e4844ba675da1f3 | [
0.0001838539756136015,
0.0001689179625827819,
0.00016107082774396986,
0.00016751994553487748,
0.000005583936854236526
] |
{
"id": 0,
"code_window": [
"import authPage from 'vault/tests/pages/auth';\n",
"import { click, visit, currentURL, fillIn } from '@ember/test-helpers';\n",
"import { PAGE as ts } from 'vault/tests/helpers/sync/sync-selectors';\n",
"\n",
"module('Acceptance | enterprise | sync | destination', function (hooks) {\n",
" setupApplicationTest(hooks);\n",
" setupMirage(hooks);\n",
"\n",
" hooks.beforeEach(async function () {\n",
" syncScenario(this.server);\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// sync is an enterprise feature but since mirage is used the enterprise label has been intentionally omitted from the module name\n",
"module('Acceptance | sync | destination', function (hooks) {\n"
],
"file_path": "ui/tests/acceptance/sync/secrets/destination-test.js",
"type": "replace",
"edit_start_line_idx": 14
} | For enterprise customers, HashiCorp provides official support for Vault's Integrated Storage and Consul as storage backends. Vault Enterprise customers are strongly
recommended to use these supported storage backends for best outcomes. Version 1.12.0 of Vault Enterprise will not start if configured to use a storage backend
other than Integrated Storage or Consul. This was meant to protect against issues caused by using unsupported backends that do not support transactional storage.
Version 1.12.2 modified this behavior to instead log a warning when unsupported storage backends are used, while ensuring that Vault will start.
| website/content/partials/ent-supported-storage.mdx | 0 | https://github.com/hashicorp/vault/commit/87857e61f547e58d8bfe24b72e4844ba675da1f3 | [
0.00019240903202444315,
0.00019240903202444315,
0.00019240903202444315,
0.00019240903202444315,
0
] |
{
"id": 1,
"code_window": [
"import syncHandlers from 'vault/mirage/handlers/sync';\n",
"import authPage from 'vault/tests/pages/auth';\n",
"import { click, visit, fillIn, currentURL, currentRouteName } from '@ember/test-helpers';\n",
"import { PAGE as ts } from 'vault/tests/helpers/sync/sync-selectors';\n",
"\n",
"module('Acceptance | enterprise | sync | destinations', function (hooks) {\n",
" setupApplicationTest(hooks);\n",
" setupMirage(hooks);\n",
"\n",
" hooks.beforeEach(async function () {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// sync is an enterprise feature but since mirage is used the enterprise label has been intentionally omitted from the module name\n",
"module('Acceptance | sync | destinations', function (hooks) {\n"
],
"file_path": "ui/tests/acceptance/sync/secrets/destinations-test.js",
"type": "replace",
"edit_start_line_idx": 14
} | /**
* Copyright (c) HashiCorp, Inc.
* SPDX-License-Identifier: BUSL-1.1
*/
import { module, test } from 'qunit';
import { setupApplicationTest } from 'ember-qunit';
import { setupMirage } from 'ember-cli-mirage/test-support';
import syncScenario from 'vault/mirage/scenarios/sync';
import syncHandlers from 'vault/mirage/handlers/sync';
import authPage from 'vault/tests/pages/auth';
import { click } from '@ember/test-helpers';
import { PAGE as ts } from 'vault/tests/helpers/sync/sync-selectors';
module('Acceptance | enterprise | sync | destination', function (hooks) {
setupApplicationTest(hooks);
setupMirage(hooks);
hooks.beforeEach(async function () {
syncScenario(this.server);
syncHandlers(this.server);
return authPage.login();
});
test('it should transition to correct routes when performing actions', async function (assert) {
await click(ts.navLink('Secrets Sync'));
await click(ts.destinations.list.create);
await click(ts.createCancel);
await click(ts.overviewCard.actionLink('Create new'));
await click(ts.createCancel);
await click(ts.overview.table.actionToggle(0));
await click(ts.overview.table.action('sync'));
await click(ts.destinations.sync.cancel);
await click(ts.breadcrumbLink('Secrets Sync'));
await click(ts.overview.table.actionToggle(0));
await click(ts.overview.table.action('details'));
assert.dom(ts.tab('Secrets')).hasClass('active', 'Navigates to secrets view for destination');
});
});
| ui/tests/acceptance/sync/secrets/overview-test.js | 1 | https://github.com/hashicorp/vault/commit/87857e61f547e58d8bfe24b72e4844ba675da1f3 | [
0.985964298248291,
0.2483546882867813,
0.00024441705318167806,
0.0036050276830792427,
0.42586761713027954
] |
{
"id": 1,
"code_window": [
"import syncHandlers from 'vault/mirage/handlers/sync';\n",
"import authPage from 'vault/tests/pages/auth';\n",
"import { click, visit, fillIn, currentURL, currentRouteName } from '@ember/test-helpers';\n",
"import { PAGE as ts } from 'vault/tests/helpers/sync/sync-selectors';\n",
"\n",
"module('Acceptance | enterprise | sync | destinations', function (hooks) {\n",
" setupApplicationTest(hooks);\n",
" setupMirage(hooks);\n",
"\n",
" hooks.beforeEach(async function () {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// sync is an enterprise feature but since mirage is used the enterprise label has been intentionally omitted from the module name\n",
"module('Acceptance | sync | destinations', function (hooks) {\n"
],
"file_path": "ui/tests/acceptance/sync/secrets/destinations-test.js",
"type": "replace",
"edit_start_line_idx": 14
} | // Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package ssh
import (
"context"
"fmt"
"net"
"strings"
uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/logical"
)
type sshOTP struct {
Username string `json:"username" structs:"username" mapstructure:"username"`
IP string `json:"ip" structs:"ip" mapstructure:"ip"`
RoleName string `json:"role_name" structs:"role_name" mapstructure:"role_name"`
}
func pathCredsCreate(b *backend) *framework.Path {
return &framework.Path{
Pattern: "creds/" + framework.GenericNameWithAtRegex("role"),
DisplayAttrs: &framework.DisplayAttributes{
OperationPrefix: operationPrefixSSH,
OperationVerb: "generate",
OperationSuffix: "credentials",
},
Fields: map[string]*framework.FieldSchema{
"role": {
Type: framework.TypeString,
Description: "[Required] Name of the role",
},
"username": {
Type: framework.TypeString,
Description: "[Optional] Username in remote host",
},
"ip": {
Type: framework.TypeString,
Description: "[Required] IP of the remote host",
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathCredsCreateWrite,
},
HelpSynopsis: pathCredsCreateHelpSyn,
HelpDescription: pathCredsCreateHelpDesc,
}
}
func (b *backend) pathCredsCreateWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
roleName := d.Get("role").(string)
if roleName == "" {
return logical.ErrorResponse("Missing role"), nil
}
ipRaw := d.Get("ip").(string)
if ipRaw == "" {
return logical.ErrorResponse("Missing ip"), nil
}
role, err := b.getRole(ctx, req.Storage, roleName)
if err != nil {
return nil, fmt.Errorf("error retrieving role: %w", err)
}
if role == nil {
return logical.ErrorResponse(fmt.Sprintf("Role %q not found", roleName)), nil
}
// username is an optional parameter.
username := d.Get("username").(string)
// Set the default username
if username == "" {
if role.DefaultUser == "" {
return logical.ErrorResponse("No default username registered. Use 'username' option"), nil
}
username = role.DefaultUser
}
if role.AllowedUsers != "" {
// Check if the username is present in allowed users list.
err := validateUsername(username, role.AllowedUsers)
// If username is not present in allowed users list, check if it
// is the default username in the role. If neither is true, then
// that username is not allowed to generate a credential.
if err != nil && username != role.DefaultUser {
return logical.ErrorResponse("Username is not present is allowed users list"), nil
}
} else if username != role.DefaultUser {
return logical.ErrorResponse("Username has to be either in allowed users list or has to be a default username"), nil
}
// Validate the IP address
ipAddr := net.ParseIP(ipRaw)
if ipAddr == nil {
return logical.ErrorResponse(fmt.Sprintf("Invalid IP %q", ipRaw)), nil
}
// Check if the IP belongs to the registered list of CIDR blocks under the role
ip := ipAddr.String()
zeroAddressEntry, err := b.getZeroAddressRoles(ctx, req.Storage)
if err != nil {
return nil, fmt.Errorf("error retrieving zero-address roles: %w", err)
}
var zeroAddressRoles []string
if zeroAddressEntry != nil {
zeroAddressRoles = zeroAddressEntry.Roles
}
err = validateIP(ip, roleName, role.CIDRList, role.ExcludeCIDRList, zeroAddressRoles)
if err != nil {
return logical.ErrorResponse(fmt.Sprintf("Error validating IP: %v", err)), nil
}
var result *logical.Response
if role.KeyType == KeyTypeOTP {
// Generate an OTP
otp, err := b.GenerateOTPCredential(ctx, req, &sshOTP{
Username: username,
IP: ip,
RoleName: roleName,
})
if err != nil {
return nil, err
}
// Return the information relevant to user of OTP type and save
// the data required for later use in the internal section of secret.
// In this case, saving just the OTP is sufficient since there is
// no need to establish connection with the remote host.
result = b.Secret(SecretOTPType).Response(map[string]interface{}{
"key_type": role.KeyType,
"key": otp,
"username": username,
"ip": ip,
"port": role.Port,
}, map[string]interface{}{
"otp": otp,
})
} else if role.KeyType == KeyTypeDynamic {
return nil, fmt.Errorf("dynamic key types have been removed")
} else {
return nil, fmt.Errorf("key type unknown")
}
return result, nil
}
// Generates a UUID OTP and its salted value based on the salt of the backend.
func (b *backend) GenerateSaltedOTP(ctx context.Context) (string, string, error) {
str, err := uuid.GenerateUUID()
if err != nil {
return "", "", err
}
salt, err := b.Salt(ctx)
if err != nil {
return "", "", err
}
return str, salt.SaltID(str), nil
}
// Generates an UUID OTP and creates an entry for the same in storage backend with its salted string.
func (b *backend) GenerateOTPCredential(ctx context.Context, req *logical.Request, sshOTPEntry *sshOTP) (string, error) {
otp, otpSalted, err := b.GenerateSaltedOTP(ctx)
if err != nil {
return "", err
}
// Check if there is an entry already created for the newly generated OTP.
entry, err := b.getOTP(ctx, req.Storage, otpSalted)
// If entry already exists for the OTP, make sure that new OTP is not
// replacing an existing one by recreating new ones until an unused
// OTP is generated. It is very unlikely that this is the case and this
// code is just for safety.
for err == nil && entry != nil {
otp, otpSalted, err = b.GenerateSaltedOTP(ctx)
if err != nil {
return "", err
}
entry, err = b.getOTP(ctx, req.Storage, otpSalted)
if err != nil {
return "", err
}
}
// Store an entry for the salt of OTP.
newEntry, err := logical.StorageEntryJSON("otp/"+otpSalted, sshOTPEntry)
if err != nil {
return "", err
}
if err := req.Storage.Put(ctx, newEntry); err != nil {
return "", err
}
return otp, nil
}
// ValidateIP first checks if the role belongs to the list of privileged
// roles that could allow any IP address and if there is a match, IP is
// accepted immediately. If not, IP is searched in the allowed CIDR blocks
// registered with the role. If there is a match, then it is searched in the
// excluded CIDR blocks and if IP is found there as well, an error is returned.
// IP is valid only if it is encompassed by allowed CIDR blocks and not by
// excluded CIDR blocks.
func validateIP(ip, roleName, cidrList, excludeCidrList string, zeroAddressRoles []string) error {
// Search IP in the zero-address list
for _, role := range zeroAddressRoles {
if roleName == role {
return nil
}
}
// Search IP in allowed CIDR blocks
ipMatched, err := cidrListContainsIP(ip, cidrList)
if err != nil {
return err
}
if !ipMatched {
return fmt.Errorf("IP does not belong to role")
}
if len(excludeCidrList) == 0 {
return nil
}
// Search IP in exclude list
ipMatched, err = cidrListContainsIP(ip, excludeCidrList)
if err != nil {
return err
}
if ipMatched {
return fmt.Errorf("IP does not belong to role")
}
return nil
}
// Checks if the username supplied by the user is present in the list of
// allowed users registered which creation of role.
func validateUsername(username, allowedUsers string) error {
if allowedUsers == "" {
return fmt.Errorf("username not in allowed users list")
}
// Role was explicitly configured to allow any username.
if allowedUsers == "*" {
return nil
}
userList := strings.Split(allowedUsers, ",")
for _, user := range userList {
if strings.TrimSpace(user) == username {
return nil
}
}
return fmt.Errorf("username not in allowed users list")
}
const pathCredsCreateHelpSyn = `
Creates a credential for establishing SSH connection with the remote host.
`
const pathCredsCreateHelpDesc = `
This path will generate a new key for establishing SSH session with
target host. The key can be a One Time Password (OTP) using 'key_type'
being 'otp'.
Keys will have a lease associated with them. The access keys can be
revoked by using the lease ID.
`
| builtin/logical/ssh/path_creds_create.go | 0 | https://github.com/hashicorp/vault/commit/87857e61f547e58d8bfe24b72e4844ba675da1f3 | [
0.00017687090439721942,
0.00017250230303034186,
0.00016249530017375946,
0.00017292026313953102,
0.0000030550831979780924
] |
{
"id": 1,
"code_window": [
"import syncHandlers from 'vault/mirage/handlers/sync';\n",
"import authPage from 'vault/tests/pages/auth';\n",
"import { click, visit, fillIn, currentURL, currentRouteName } from '@ember/test-helpers';\n",
"import { PAGE as ts } from 'vault/tests/helpers/sync/sync-selectors';\n",
"\n",
"module('Acceptance | enterprise | sync | destinations', function (hooks) {\n",
" setupApplicationTest(hooks);\n",
" setupMirage(hooks);\n",
"\n",
" hooks.beforeEach(async function () {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// sync is an enterprise feature but since mirage is used the enterprise label has been intentionally omitted from the module name\n",
"module('Acceptance | sync | destinations', function (hooks) {\n"
],
"file_path": "ui/tests/acceptance/sync/secrets/destinations-test.js",
"type": "replace",
"edit_start_line_idx": 14
} | /**
* Copyright (c) HashiCorp, Inc.
* SPDX-License-Identifier: BUSL-1.1
*/
import AdapterError from '@ember-data/adapter/error';
import AuthConfigComponent from './config';
import { inject as service } from '@ember/service';
import { task } from 'ember-concurrency';
import { waitFor } from '@ember/test-waiters';
/**
* @module AuthConfigForm/Options
* The `AuthConfigForm/Options` is options portion of the auth config form.
*
* @example
* ```js
* {{auth-config-form/options model.model}}
* ```
*
* @property model=null {DS.Model} - The corresponding auth model that is being configured.
*
*/
export default AuthConfigComponent.extend({
flashMessages: service(),
router: service(),
saveModel: task(
waitFor(function* () {
const data = this.model.config.serialize();
data.description = this.model.description;
// token_type should not be tuneable for the token auth method.
if (this.model.methodType === 'token') {
delete data.token_type;
}
try {
yield this.model.tune(data);
} catch (err) {
// AdapterErrors are handled by the error-message component
// in the form
if (err instanceof AdapterError === false) {
throw err;
}
// because we're not calling model.save the model never updates with
// the error. Forcing the error message by manually setting the errorMessage
try {
this.model.set('errorMessage', err.errors.firstObject);
} catch {
// do nothing
}
return;
}
this.router.transitionTo('vault.cluster.access.methods').followRedirects();
this.flashMessages.success('The configuration was saved successfully.');
})
),
});
| ui/app/components/auth-config-form/options.js | 0 | https://github.com/hashicorp/vault/commit/87857e61f547e58d8bfe24b72e4844ba675da1f3 | [
0.00020314534776844084,
0.00017398981435690075,
0.00016336388944182545,
0.00017095963994506747,
0.000012267031706869602
] |
{
"id": 1,
"code_window": [
"import syncHandlers from 'vault/mirage/handlers/sync';\n",
"import authPage from 'vault/tests/pages/auth';\n",
"import { click, visit, fillIn, currentURL, currentRouteName } from '@ember/test-helpers';\n",
"import { PAGE as ts } from 'vault/tests/helpers/sync/sync-selectors';\n",
"\n",
"module('Acceptance | enterprise | sync | destinations', function (hooks) {\n",
" setupApplicationTest(hooks);\n",
" setupMirage(hooks);\n",
"\n",
" hooks.beforeEach(async function () {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// sync is an enterprise feature but since mirage is used the enterprise label has been intentionally omitted from the module name\n",
"module('Acceptance | sync | destinations', function (hooks) {\n"
],
"file_path": "ui/tests/acceptance/sync/secrets/destinations-test.js",
"type": "replace",
"edit_start_line_idx": 14
} | // Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package credsutil
import (
"context"
"time"
"github.com/hashicorp/vault/sdk/database/dbplugin"
)
const (
NoneLength int = -1
)
// SQLCredentialsProducer implements CredentialsProducer and provides a generic credentials producer for most sql database types.
type SQLCredentialsProducer struct {
DisplayNameLen int
RoleNameLen int
UsernameLen int
Separator string
LowercaseUsername bool
}
func (scp *SQLCredentialsProducer) GenerateCredentials(ctx context.Context) (string, error) {
password, err := scp.GeneratePassword()
if err != nil {
return "", err
}
return password, nil
}
func (scp *SQLCredentialsProducer) GenerateUsername(config dbplugin.UsernameConfig) (string, error) {
caseOp := KeepCase
if scp.LowercaseUsername {
caseOp = Lowercase
}
return GenerateUsername(
DisplayName(config.DisplayName, scp.DisplayNameLen),
RoleName(config.RoleName, scp.RoleNameLen),
Case(caseOp),
Separator(scp.Separator),
MaxLength(scp.UsernameLen),
)
}
func (scp *SQLCredentialsProducer) GeneratePassword() (string, error) {
password, err := RandomAlphaNumeric(20, true)
if err != nil {
return "", err
}
return password, nil
}
func (scp *SQLCredentialsProducer) GenerateExpiration(ttl time.Time) (string, error) {
return ttl.Format("2006-01-02 15:04:05-0700"), nil
}
| sdk/database/helper/credsutil/sql.go | 0 | https://github.com/hashicorp/vault/commit/87857e61f547e58d8bfe24b72e4844ba675da1f3 | [
0.00017607197514735162,
0.0001715156831778586,
0.00016479323676321656,
0.00017188533092848957,
0.0000037753245578642236
] |
{
"id": 2,
"code_window": [
"import syncHandlers from 'vault/mirage/handlers/sync';\n",
"import authPage from 'vault/tests/pages/auth';\n",
"import { click } from '@ember/test-helpers';\n",
"import { PAGE as ts } from 'vault/tests/helpers/sync/sync-selectors';\n",
"\n",
"module('Acceptance | enterprise | sync | destination', function (hooks) {\n",
" setupApplicationTest(hooks);\n",
" setupMirage(hooks);\n",
"\n",
" hooks.beforeEach(async function () {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// sync is an enterprise feature but since mirage is used the enterprise label has been intentionally omitted from the module name\n",
"module('Acceptance | sync | destination', function (hooks) {\n"
],
"file_path": "ui/tests/acceptance/sync/secrets/overview-test.js",
"type": "replace",
"edit_start_line_idx": 14
} | /**
* Copyright (c) HashiCorp, Inc.
* SPDX-License-Identifier: BUSL-1.1
*/
import { module, test } from 'qunit';
import { setupApplicationTest } from 'ember-qunit';
import { setupMirage } from 'ember-cli-mirage/test-support';
import syncScenario from 'vault/mirage/scenarios/sync';
import syncHandlers from 'vault/mirage/handlers/sync';
import authPage from 'vault/tests/pages/auth';
import { click, visit, fillIn, currentURL, currentRouteName } from '@ember/test-helpers';
import { PAGE as ts } from 'vault/tests/helpers/sync/sync-selectors';
module('Acceptance | enterprise | sync | destinations', function (hooks) {
setupApplicationTest(hooks);
setupMirage(hooks);
hooks.beforeEach(async function () {
syncScenario(this.server);
syncHandlers(this.server);
return authPage.login();
});
test('it should create new destination', async function (assert) {
// remove destinations from mirage so cta shows when 404 is returned
this.server.db.syncDestinations.remove();
await click(ts.navLink('Secrets Sync'));
await click(ts.cta.button);
await click(ts.selectType('aws-sm'));
await fillIn(ts.inputByAttr('name'), 'foo');
await click(ts.saveButton);
assert.dom(ts.infoRowValue('Name')).hasText('foo', 'Destination details render after create success');
await click(ts.breadcrumbLink('Destinations'));
await click(ts.destinations.list.create);
assert.strictEqual(
currentURL(),
'/vault/sync/secrets/destinations/create',
'Toolbar action navigates to destinations create view'
);
});
test('it should filter destinations list', async function (assert) {
await visit('vault/sync/secrets/destinations');
assert.dom(ts.listItem).exists({ count: 6 }, 'All destinations render');
await click(`${ts.filter('type')} .ember-basic-dropdown-trigger`);
await click(ts.searchSelect.option());
assert.dom(ts.listItem).exists({ count: 2 }, 'Destinations are filtered by type');
await fillIn(ts.filter('name'), 'new');
assert.dom(ts.listItem).exists({ count: 1 }, 'Destinations are filtered by type and name');
await click(ts.searchSelect.removeSelected);
await fillIn(ts.filter('name'), 'gcp');
assert.dom(ts.listItem).exists({ count: 1 }, 'Destinations are filtered by name');
});
test('it should transition to correct routes when performing actions', async function (assert) {
const routeName = (route) => `vault.cluster.sync.secrets.destinations.destination.${route}`;
await visit('vault/sync/secrets/destinations');
await click(ts.menuTrigger);
await click(ts.destinations.list.menu.details);
assert.strictEqual(currentRouteName(), routeName('details'), 'Navigates to details route');
await click(ts.breadcrumbLink('Destinations'));
await click(ts.menuTrigger);
await click(ts.destinations.list.menu.edit);
assert.strictEqual(currentRouteName(), routeName('edit'), 'Navigates to edit route');
});
});
| ui/tests/acceptance/sync/secrets/destinations-test.js | 1 | https://github.com/hashicorp/vault/commit/87857e61f547e58d8bfe24b72e4844ba675da1f3 | [
0.9923335313796997,
0.14260534942150116,
0.00022147868003230542,
0.0005340678617358208,
0.346900999546051
] |
{
"id": 2,
"code_window": [
"import syncHandlers from 'vault/mirage/handlers/sync';\n",
"import authPage from 'vault/tests/pages/auth';\n",
"import { click } from '@ember/test-helpers';\n",
"import { PAGE as ts } from 'vault/tests/helpers/sync/sync-selectors';\n",
"\n",
"module('Acceptance | enterprise | sync | destination', function (hooks) {\n",
" setupApplicationTest(hooks);\n",
" setupMirage(hooks);\n",
"\n",
" hooks.beforeEach(async function () {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// sync is an enterprise feature but since mirage is used the enterprise label has been intentionally omitted from the module name\n",
"module('Acceptance | sync | destination', function (hooks) {\n"
],
"file_path": "ui/tests/acceptance/sync/secrets/overview-test.js",
"type": "replace",
"edit_start_line_idx": 14
} | ---
layout: api
page_title: /sys/plugins/reload - HTTP API
description: The `/sys/plugins/reload` endpoints are used to reload plugins.
---
# `/sys/plugins/reload`
## Reload plugin
The `/sys/plugins/reload/:type/:name` endpoint reloads a named plugin across all
namespaces. It is only available in the root namespace. All instances of the plugin
will be killed, and any newly pinned version of the plugin will be started in
their place.
| Method | Path |
| :----- | :-------------------------------- |
| `POST` | `/sys/plugins/reload/:type/:name` |
### Parameters
- `type` `(string: <required>)` – The type of the plugin, as registered in the
plugin catalog. One of "auth", "secret", "database", or "unknown". If "unknown",
all plugin types with the provided name will be reloaded.
- `name` `(string: <required>)` – The name of the plugin to reload, as registered
in the plugin catalog.
- `scope` `(string: "")` - The scope of the reload. If omitted, reloads the
plugin or mounts on this Vault instance. If 'global', will begin reloading the
plugin on all instances of a cluster.
### Sample payload
```json
{
"scope": "global"
}
```
### Sample request
```shell-session
$ curl \
--header "X-Vault-Token: ..." \
--request POST \
--data @payload.json \
http://127.0.0.1:8200/v1/sys/plugins/reload/auth/mock-plugin
```
### Sample response
```json
{
"data": {
"reload_id": "bdddb8df-ccb6-1b09-670d-efa9d3f2c11b"
},
...
}
```
-> Note: If no plugins are reloaded on the node that serviced the request, a
warning will also be returned in the response.
## Reload plugins within a namespace
The `/sys/plugins/reload/backend` endpoint is used to reload mounted plugin
backends. Either the plugin name (`plugin`) or the desired plugin backend mounts
(`mounts`) must be provided, but not both. In the case that the plugin name is
provided, all mounted paths that use that plugin backend will be reloaded.
This API is available in all namespaces, and is limited to reloading plugins in
use within the request's namespace.
| Method | Path - |
| :----- | :---------------------------- |
| `POST` | `/sys/plugins/reload/backend` |
### Parameters
- `plugin` `(string: "")` – The name of the plugin to reload, as
registered in the plugin catalog.
- `mounts` `(array: [])` – Array or comma-separated string mount paths
of the plugin backends to reload.
- `scope` `(string: "")` - The scope of the reload. If omitted, reloads the
plugin or mounts on this Vault instance. If 'global', will begin reloading the
plugin on all instances of a cluster.
### Sample payload
```json
{
"plugin": "mock-plugin",
"scope": "global"
}
```
### Sample request
```shell-session
$ curl \
--header "X-Vault-Token: ..." \
--request POST \
--data @payload.json \
http://127.0.0.1:8200/v1/sys/plugins/reload/backend
```
### Sample response
```json
{
"data": {
"reload_id": "bdddb8df-ccb6-1b09-670d-efa9d3f2c11b"
},
...
}
```
-> Note: If no plugins are reloaded on the node that serviced the request, a
warning will also be returned in the response.
| website/content/api-docs/system/plugins-reload.mdx | 0 | https://github.com/hashicorp/vault/commit/87857e61f547e58d8bfe24b72e4844ba675da1f3 | [
0.00017265616043005139,
0.00016720437270123512,
0.00016113535093609244,
0.0001682799047557637,
0.0000033159044505737256
] |
{
"id": 2,
"code_window": [
"import syncHandlers from 'vault/mirage/handlers/sync';\n",
"import authPage from 'vault/tests/pages/auth';\n",
"import { click } from '@ember/test-helpers';\n",
"import { PAGE as ts } from 'vault/tests/helpers/sync/sync-selectors';\n",
"\n",
"module('Acceptance | enterprise | sync | destination', function (hooks) {\n",
" setupApplicationTest(hooks);\n",
" setupMirage(hooks);\n",
"\n",
" hooks.beforeEach(async function () {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// sync is an enterprise feature but since mirage is used the enterprise label has been intentionally omitted from the module name\n",
"module('Acceptance | sync | destination', function (hooks) {\n"
],
"file_path": "ui/tests/acceptance/sync/secrets/overview-test.js",
"type": "replace",
"edit_start_line_idx": 14
} | /**
* Copyright (c) HashiCorp, Inc.
* SPDX-License-Identifier: BUSL-1.1
*/
import { isPresent } from '@ember/utils';
import { inject as service } from '@ember/service';
import Controller from '@ember/controller';
const CONFIG_ATTRS = {
// ssh
configured: false,
// aws root config
iamEndpoint: null,
stsEndpoint: null,
accessKey: null,
secretKey: null,
region: '',
};
export default Controller.extend(CONFIG_ATTRS, {
queryParams: ['tab'],
tab: '',
flashMessages: service(),
loading: false,
reset() {
this.model.rollbackAttributes();
this.setProperties(CONFIG_ATTRS);
},
actions: {
saveConfig(options = { delete: false }) {
const isDelete = options.delete;
if (this.model.type === 'ssh') {
this.set('loading', true);
this.model
.saveCA({ isDelete })
.then(() => {
this.send('refreshRoute');
this.set('configured', !isDelete);
if (isDelete) {
this.flashMessages.success('SSH Certificate Authority Configuration deleted!');
} else {
this.flashMessages.success('SSH Certificate Authority Configuration saved!');
}
})
.catch((error) => {
const errorMessage = error.errors ? error.errors.join('. ') : error;
this.flashMessages.danger(errorMessage);
})
.finally(() => {
this.set('loading', false);
});
}
},
save(method, data) {
this.set('loading', true);
const hasData = Object.keys(data).some((key) => {
return isPresent(data[key]);
});
if (!hasData) {
return;
}
this.model
.save({
adapterOptions: {
adapterMethod: method,
data,
},
})
.then(() => {
this.reset();
this.flashMessages.success('The backend configuration saved successfully!');
})
.finally(() => {
this.set('loading', false);
});
},
},
});
| ui/app/controllers/vault/cluster/settings/configure-secret-backend.js | 0 | https://github.com/hashicorp/vault/commit/87857e61f547e58d8bfe24b72e4844ba675da1f3 | [
0.00017479565576650202,
0.00017221203597728163,
0.00017042936815414578,
0.00017155861132778227,
0.0000014128304428595584
] |
{
"id": 2,
"code_window": [
"import syncHandlers from 'vault/mirage/handlers/sync';\n",
"import authPage from 'vault/tests/pages/auth';\n",
"import { click } from '@ember/test-helpers';\n",
"import { PAGE as ts } from 'vault/tests/helpers/sync/sync-selectors';\n",
"\n",
"module('Acceptance | enterprise | sync | destination', function (hooks) {\n",
" setupApplicationTest(hooks);\n",
" setupMirage(hooks);\n",
"\n",
" hooks.beforeEach(async function () {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// sync is an enterprise feature but since mirage is used the enterprise label has been intentionally omitted from the module name\n",
"module('Acceptance | sync | destination', function (hooks) {\n"
],
"file_path": "ui/tests/acceptance/sync/secrets/overview-test.js",
"type": "replace",
"edit_start_line_idx": 14
} | /**
* Copyright (c) HashiCorp, Inc.
* SPDX-License-Identifier: BUSL-1.1
*/
import Model, { attr } from '@ember-data/model';
export default class MountConfigModel extends Model {
@attr({
label: 'Default Lease TTL',
editType: 'ttl',
})
defaultLeaseTtl;
@attr({
label: 'Max Lease TTL',
editType: 'ttl',
})
maxLeaseTtl;
@attr({
label: 'Request keys excluded from HMACing in audit',
editType: 'stringArray',
helpText: "Keys that will not be HMAC'd by audit devices in the request data object.",
})
auditNonHmacRequestKeys;
@attr({
label: 'Response keys excluded from HMACing in audit',
editType: 'stringArray',
helpText: "Keys that will not be HMAC'd by audit devices in the response data object.",
})
auditNonHmacResponseKeys;
@attr('mountVisibility', {
editType: 'boolean',
label: 'List method when unauthenticated',
defaultValue: false,
})
listingVisibility;
@attr({
label: 'Allowed passthrough request headers',
helpText: 'Headers to allow and pass from the request to the backend',
editType: 'stringArray',
})
passthroughRequestHeaders;
@attr({
label: 'Allowed response headers',
helpText: 'Headers to allow, allowing a plugin to include them in the response.',
editType: 'stringArray',
})
allowedResponseHeaders;
@attr('string', {
label: 'Token Type',
helpText:
'The type of token that should be generated via this role. For `default-service` and `default-batch` service and batch tokens will be issued respectively, unless the auth method explicitly requests a different type.',
possibleValues: ['default-service', 'default-batch', 'batch', 'service'],
noDefault: true,
})
tokenType;
@attr({
editType: 'stringArray',
})
allowedManagedKeys;
}
| ui/app/models/mount-config.js | 0 | https://github.com/hashicorp/vault/commit/87857e61f547e58d8bfe24b72e4844ba675da1f3 | [
0.00023083992709871382,
0.00018166830705013126,
0.00016784275067038834,
0.0001742350432323292,
0.000020280986063880846
] |
{
"id": 0,
"code_window": [
"ENV RCLONE_BASE_DIR=/mnt\n",
"ENV RCLONE_VERBOSE=0\n",
"\n",
"WORKDIR /data\n",
"ENTRYPOINT [\"/usr/local/bin/rclone\"]\n",
"CMD [\"serve\", \"docker\"]"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"ENV HTTP_PROXY=\n",
"ENV HTTPS_PROXY=\n",
"ENV NO_PROXY=\n",
"\n"
],
"file_path": "cmd/serve/docker/contrib/plugin/Dockerfile",
"type": "add",
"edit_start_line_idx": 28
} | ARG BASE_IMAGE=rclone/rclone:latest
ARG BUILD_PLATFORM=linux/amd64
ARG TARGET_PLATFORM=linux/amd64
# temporary build image
FROM --platform=${BUILD_PLATFORM} golang:alpine AS BUILD_ENV
COPY . /src
WORKDIR /src
RUN apk add --no-cache make git bash && \
CGO_ENABLED=0 \
GOARCH=$(echo ${TARGET_PLATFORM} | cut -d '/' -f2) \
make rclone
# plugin image
FROM ${BASE_IMAGE}
COPY --from=BUILD_ENV /src/rclone /usr/local/bin/rclone
RUN mkdir -p /data/config /data/cache /mnt \
&& /usr/local/bin/rclone version
ENV RCLONE_CONFIG=/data/config/rclone.conf
ENV RCLONE_CACHE_DIR=/data/cache
ENV RCLONE_BASE_DIR=/mnt
ENV RCLONE_VERBOSE=0
WORKDIR /data
ENTRYPOINT ["/usr/local/bin/rclone"]
CMD ["serve", "docker"]
| cmd/serve/docker/contrib/plugin/Dockerfile | 1 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.9922130703926086,
0.43021082878112793,
0.017523441463708878,
0.35555341839790344,
0.36781060695648193
] |
{
"id": 0,
"code_window": [
"ENV RCLONE_BASE_DIR=/mnt\n",
"ENV RCLONE_VERBOSE=0\n",
"\n",
"WORKDIR /data\n",
"ENTRYPOINT [\"/usr/local/bin/rclone\"]\n",
"CMD [\"serve\", \"docker\"]"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"ENV HTTP_PROXY=\n",
"ENV HTTPS_PROXY=\n",
"ENV NO_PROXY=\n",
"\n"
],
"file_path": "cmd/serve/docker/contrib/plugin/Dockerfile",
"type": "add",
"edit_start_line_idx": 28
} | package vfstest
import (
"os"
"runtime"
"testing"
"time"
"github.com/rclone/rclone/vfs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestFileModTime tests mod times on files
func TestFileModTime(t *testing.T) {
run.skipIfNoFUSE(t)
run.createFile(t, "file", "123")
mtime := time.Date(2012, time.November, 18, 17, 32, 31, 0, time.UTC)
err := run.os.Chtimes(run.path("file"), mtime, mtime)
require.NoError(t, err)
info, err := run.os.Stat(run.path("file"))
require.NoError(t, err)
// avoid errors because of timezone differences
assert.Equal(t, info.ModTime().Unix(), mtime.Unix())
run.rm(t, "file")
}
// run.os.Create without opening for write too
func osCreate(name string) (vfs.OsFiler, error) {
return run.os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
}
// run.os.Create with append
func osAppend(name string) (vfs.OsFiler, error) {
return run.os.OpenFile(name, os.O_WRONLY|os.O_APPEND, 0666)
}
// TestFileModTimeWithOpenWriters tests mod time on open files
func TestFileModTimeWithOpenWriters(t *testing.T) {
run.skipIfNoFUSE(t)
if runtime.GOOS == "windows" {
t.Skip("Skipping test on Windows")
}
mtime := time.Date(2012, time.November, 18, 17, 32, 31, 0, time.UTC)
filepath := run.path("cp-archive-test")
f, err := osCreate(filepath)
require.NoError(t, err)
_, err = f.Write([]byte{104, 105})
require.NoError(t, err)
err = run.os.Chtimes(filepath, mtime, mtime)
require.NoError(t, err)
err = f.Close()
require.NoError(t, err)
run.waitForWriters()
info, err := run.os.Stat(filepath)
require.NoError(t, err)
// avoid errors because of timezone differences
assert.Equal(t, info.ModTime().Unix(), mtime.Unix())
run.rm(t, "cp-archive-test")
}
| vfs/vfstest/file.go | 0 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.0023143554572016,
0.0009995370637625456,
0.0001758179860189557,
0.0005338529008440673,
0.0008797788759693503
] |
{
"id": 0,
"code_window": [
"ENV RCLONE_BASE_DIR=/mnt\n",
"ENV RCLONE_VERBOSE=0\n",
"\n",
"WORKDIR /data\n",
"ENTRYPOINT [\"/usr/local/bin/rclone\"]\n",
"CMD [\"serve\", \"docker\"]"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"ENV HTTP_PROXY=\n",
"ENV HTTPS_PROXY=\n",
"ENV NO_PROXY=\n",
"\n"
],
"file_path": "cmd/serve/docker/contrib/plugin/Dockerfile",
"type": "add",
"edit_start_line_idx": 28
} | // Package fs is a generic file system interface for rclone object storage systems
package fs
import (
"context"
"io"
"math"
"time"
"github.com/pkg/errors"
)
// Constants
const (
// ModTimeNotSupported is a very large precision value to show
// mod time isn't supported on this Fs
ModTimeNotSupported = 100 * 365 * 24 * time.Hour
// MaxLevel is a sentinel representing an infinite depth for listings
MaxLevel = math.MaxInt32
)
// Globals
var (
// ErrorNotFoundInConfigFile is returned by NewFs if not found in config file
ErrorNotFoundInConfigFile = errors.New("didn't find section in config file")
ErrorCantPurge = errors.New("can't purge directory")
ErrorCantCopy = errors.New("can't copy object - incompatible remotes")
ErrorCantMove = errors.New("can't move object - incompatible remotes")
ErrorCantDirMove = errors.New("can't move directory - incompatible remotes")
ErrorCantUploadEmptyFiles = errors.New("can't upload empty files to this remote")
ErrorDirExists = errors.New("can't copy directory - destination already exists")
ErrorCantSetModTime = errors.New("can't set modified time")
ErrorCantSetModTimeWithoutDelete = errors.New("can't set modified time without deleting existing object")
ErrorDirNotFound = errors.New("directory not found")
ErrorObjectNotFound = errors.New("object not found")
ErrorLevelNotSupported = errors.New("level value not supported")
ErrorListAborted = errors.New("list aborted")
ErrorListBucketRequired = errors.New("bucket or container name is needed in remote")
ErrorIsFile = errors.New("is a file not a directory")
ErrorIsDir = errors.New("is a directory not a file")
ErrorNotAFile = errors.New("is not a regular file")
ErrorNotDeleting = errors.New("not deleting files as there were IO errors")
ErrorNotDeletingDirs = errors.New("not deleting directories as there were IO errors")
ErrorOverlapping = errors.New("can't sync or move files on overlapping remotes")
ErrorDirectoryNotEmpty = errors.New("directory not empty")
ErrorImmutableModified = errors.New("immutable file modified")
ErrorPermissionDenied = errors.New("permission denied")
ErrorCantShareDirectories = errors.New("this backend can't share directories with link")
ErrorNotImplemented = errors.New("optional feature not implemented")
ErrorCommandNotFound = errors.New("command not found")
ErrorFileNameTooLong = errors.New("file name too long")
)
// CheckClose is a utility function used to check the return from
// Close in a defer statement.
func CheckClose(c io.Closer, err *error) {
cerr := c.Close()
if *err == nil {
*err = cerr
}
}
// FileExists returns true if a file remote exists.
// If remote is a directory, FileExists returns false.
func FileExists(ctx context.Context, fs Fs, remote string) (bool, error) {
_, err := fs.NewObject(ctx, remote)
if err != nil {
if err == ErrorObjectNotFound || err == ErrorNotAFile || err == ErrorPermissionDenied {
return false, nil
}
return false, err
}
return true, nil
}
// GetModifyWindow calculates the maximum modify window between the given Fses
// and the Config.ModifyWindow parameter.
func GetModifyWindow(ctx context.Context, fss ...Info) time.Duration {
window := GetConfig(ctx).ModifyWindow
for _, f := range fss {
if f != nil {
precision := f.Precision()
if precision == ModTimeNotSupported {
return ModTimeNotSupported
}
if precision > window {
window = precision
}
}
}
return window
}
| fs/fs.go | 0 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.005314122885465622,
0.0014368741540238261,
0.00024763416149653494,
0.0011927693849429488,
0.0014345869421958923
] |
{
"id": 0,
"code_window": [
"ENV RCLONE_BASE_DIR=/mnt\n",
"ENV RCLONE_VERBOSE=0\n",
"\n",
"WORKDIR /data\n",
"ENTRYPOINT [\"/usr/local/bin/rclone\"]\n",
"CMD [\"serve\", \"docker\"]"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"ENV HTTP_PROXY=\n",
"ENV HTTPS_PROXY=\n",
"ENV NO_PROXY=\n",
"\n"
],
"file_path": "cmd/serve/docker/contrib/plugin/Dockerfile",
"type": "add",
"edit_start_line_idx": 28
} | // Package proxy implements a programmable proxy for rclone serve
package proxy
import (
"bytes"
"context"
"crypto/sha256"
"crypto/subtle"
"encoding/json"
"os/exec"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
libcache "github.com/rclone/rclone/lib/cache"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
)
// Help contains text describing how to use the proxy
var Help = strings.Replace(`
### Auth Proxy
If you supply the parameter |--auth-proxy /path/to/program| then
rclone will use that program to generate backends on the fly which
then are used to authenticate incoming requests. This uses a simple
JSON based protocol with input on STDIN and output on STDOUT.
**PLEASE NOTE:** |--auth-proxy| and |--authorized-keys| cannot be used
together, if |--auth-proxy| is set the authorized keys option will be
ignored.
There is an example program
[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/test_proxy.py)
in the rclone source code.
The program's job is to take a |user| and |pass| on the input and turn
those into the config for a backend on STDOUT in JSON format. This
config will have any default parameters for the backend added, but it
won't use configuration from environment variables or command line
options - it is the job of the proxy program to make a complete
config.
This config generated must have this extra parameter
- |_root| - root to use for the backend
And it may have this parameter
- |_obscure| - comma separated strings for parameters to obscure
If password authentication was used by the client, input to the proxy
process (on STDIN) would look similar to this:
|||
{
"user": "me",
"pass": "mypassword"
}
|||
If public-key authentication was used by the client, input to the
proxy process (on STDIN) would look similar to this:
|||
{
"user": "me",
"public_key": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDuwESFdAe14hVS6omeyX7edc...JQdf"
}
|||
And as an example return this on STDOUT
|||
{
"type": "sftp",
"_root": "",
"_obscure": "pass",
"user": "me",
"pass": "mypassword",
"host": "sftp.example.com"
}
|||
This would mean that an SFTP backend would be created on the fly for
the |user| and |pass|/|public_key| returned in the output to the host given. Note
that since |_obscure| is set to |pass|, rclone will obscure the |pass|
parameter before creating the backend (which is required for sftp
backends).
The program can manipulate the supplied |user| in any way, for example
to make proxy to many different sftp backends, you could make the
|user| be |[email protected]| and then set the |host| to |example.com|
in the output and the user to |user|. For security you'd probably want
to restrict the |host| to a limited list.
Note that an internal cache is keyed on |user| so only use that for
configuration, don't use |pass| or |public_key|. This also means that if a user's
password or public-key is changed the cache will need to expire (which takes 5 mins)
before it takes effect.
This can be used to build general purpose proxies to any kind of
backend that rclone supports.
`, "|", "`", -1)
// Options is options for creating the proxy
type Options struct {
AuthProxy string
}
// DefaultOpt is the default values uses for Opt
var DefaultOpt = Options{
AuthProxy: "",
}
// Proxy represents a proxy to turn auth requests into a VFS
type Proxy struct {
cmdLine []string // broken down command line
vfsCache *libcache.Cache
ctx context.Context // for global config
Opt Options
}
// cacheEntry is what is stored in the vfsCache
type cacheEntry struct {
vfs *vfs.VFS // stored VFS
pwHash [sha256.Size]byte // sha256 hash of the password/publicKey
}
// New creates a new proxy with the Options passed in
func New(ctx context.Context, opt *Options) *Proxy {
return &Proxy{
ctx: ctx,
Opt: *opt,
cmdLine: strings.Fields(opt.AuthProxy),
vfsCache: libcache.New(),
}
}
// run the proxy command returning a config map
func (p *Proxy) run(in map[string]string) (config configmap.Simple, err error) {
cmd := exec.Command(p.cmdLine[0], p.cmdLine[1:]...)
inBytes, err := json.MarshalIndent(in, "", "\t")
if err != nil {
return nil, errors.Wrap(err, "Proxy.Call failed to marshal input: %v")
}
var stdout, stderr bytes.Buffer
cmd.Stdin = bytes.NewBuffer(inBytes)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
start := time.Now()
err = cmd.Run()
fs.Debugf(nil, "Calling proxy %v", p.cmdLine)
duration := time.Since(start)
if err != nil {
return nil, errors.Wrapf(err, "proxy: failed on %v: %q", p.cmdLine, strings.TrimSpace(string(stderr.Bytes())))
}
err = json.Unmarshal(stdout.Bytes(), &config)
if err != nil {
return nil, errors.Wrapf(err, "proxy: failed to read output: %q", string(stdout.Bytes()))
}
fs.Debugf(nil, "Proxy returned in %v", duration)
// Obscure any values in the config map that need it
obscureFields, ok := config.Get("_obscure")
if ok {
for _, key := range strings.Split(obscureFields, ",") {
value, ok := config.Get(key)
if ok {
obscuredValue, err := obscure.Obscure(value)
if err != nil {
return nil, errors.Wrap(err, "proxy")
}
config.Set(key, obscuredValue)
}
}
}
return config, nil
}
// call runs the auth proxy and returns a cacheEntry and an error
func (p *Proxy) call(user, auth string, isPublicKey bool) (value interface{}, err error) {
var config configmap.Simple
// Contact the proxy
if isPublicKey {
config, err = p.run(map[string]string{
"user": user,
"public_key": auth,
})
} else {
config, err = p.run(map[string]string{
"user": user,
"pass": auth,
})
}
if err != nil {
return nil, err
}
// Look for required fields in the answer
fsName, ok := config.Get("type")
if !ok {
return nil, errors.New("proxy: type not set in result")
}
root, ok := config.Get("_root")
if !ok {
return nil, errors.New("proxy: _root not set in result")
}
// Find the backend
fsInfo, err := fs.Find(fsName)
if err != nil {
return nil, errors.Wrapf(err, "proxy: couldn't find backend for %q", fsName)
}
// base name of config on user name. This may appear in logs
name := "proxy-" + user
fsString := name + ":" + root
// Look for fs in the VFS cache
value, err = p.vfsCache.Get(user, func(key string) (value interface{}, ok bool, err error) {
// Create the Fs from the cache
f, err := cache.GetFn(p.ctx, fsString, func(ctx context.Context, fsString string) (fs.Fs, error) {
// Update the config with the default values
for i := range fsInfo.Options {
o := &fsInfo.Options[i]
if _, found := config.Get(o.Name); !found && o.Default != nil && o.String() != "" {
config.Set(o.Name, o.String())
}
}
return fsInfo.NewFs(ctx, name, root, config)
})
if err != nil {
return nil, false, err
}
// We hash the auth here so we don't copy the auth more than we
// need to in memory. An attacker would find it easier to go
// after the unencrypted password in memory most likely.
entry := cacheEntry{
vfs: vfs.New(f, &vfsflags.Opt),
pwHash: sha256.Sum256([]byte(auth)),
}
return entry, true, nil
})
if err != nil {
return nil, errors.Wrapf(err, "proxy: failed to create backend")
}
return value, nil
}
// Call runs the auth proxy with the username and password/public key provided
// returning a *vfs.VFS and the key used in the VFS cache.
func (p *Proxy) Call(user, auth string, isPublicKey bool) (VFS *vfs.VFS, vfsKey string, err error) {
// Look in the cache first
value, ok := p.vfsCache.GetMaybe(user)
// If not found then call the proxy for a fresh answer
if !ok {
value, err = p.call(user, auth, isPublicKey)
if err != nil {
return nil, "", err
}
}
// check we got what we were expecting
entry, ok := value.(cacheEntry)
if !ok {
return nil, "", errors.Errorf("proxy: value is not cache entry: %#v", value)
}
// Check the password / public key is correct in the cached entry. This
// prevents an attack where subsequent requests for the same
// user don't have their auth checked. It does mean that if
// the password is changed, the user will have to wait for
// cache expiry (5m) before trying again.
authHash := sha256.Sum256([]byte(auth))
if subtle.ConstantTimeCompare(authHash[:], entry.pwHash[:]) != 1 {
if isPublicKey {
return nil, "", errors.New("proxy: incorrect public key")
}
return nil, "", errors.New("proxy: incorrect password")
}
return entry.vfs, user, nil
}
// Get VFS from the cache using key - returns nil if not found
func (p *Proxy) Get(key string) *vfs.VFS {
value, ok := p.vfsCache.GetMaybe(key)
if !ok {
return nil
}
entry := value.(cacheEntry)
return entry.vfs
}
| cmd/serve/proxy/proxy.go | 0 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.006192473694682121,
0.0008207463542930782,
0.00017400459910277277,
0.00026010884903371334,
0.0012833857908844948
] |
{
"id": 1,
"code_window": [
" },\n",
" {\n",
" \"name\": \"RCLONE_BASE_DIR\",\n",
" \"value\": \"/mnt\"\n",
" }\n",
" ],\n",
" \"mounts\": [\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" },\n",
" {\n",
" \"name\": \"HTTP_PROXY\",\n",
" \"value\": \"\",\n",
" \"settable\": [\"value\"]\n",
" },\n",
" {\n",
" \"name\": \"HTTPS_PROXY\",\n",
" \"value\": \"\",\n",
" \"settable\": [\"value\"]\n",
" },\n",
" {\n",
" \"name\": \"NO_PROXY\",\n",
" \"value\": \"\",\n",
" \"settable\": [\"value\"]\n"
],
"file_path": "cmd/serve/docker/contrib/plugin/config.json",
"type": "add",
"edit_start_line_idx": 44
} | ---
title: "Docker Volume Plugin"
description: "Docker Volume Plugin"
---
# Docker Volume Plugin
## Introduction
Docker 1.9 has added support for creating
[named volumes](https://docs.docker.com/storage/volumes/) via
[command-line interface](https://docs.docker.com/engine/reference/commandline/volume_create/)
and mounting them in containers as a way to share data between them.
Since Docker 1.10 you can create named volumes with
[Docker Compose](https://docs.docker.com/compose/) by descriptions in
[docker-compose.yml](https://docs.docker.com/compose/compose-file/compose-file-v2/#volume-configuration-reference)
files for use by container groups on a single host.
As of Docker 1.12 volumes are supported by
[Docker Swarm](https://docs.docker.com/engine/swarm/key-concepts/)
included with Docker Engine and created from descriptions in
[swarm compose v3](https://docs.docker.com/compose/compose-file/compose-file-v3/#volume-configuration-reference)
files for use with _swarm stacks_ across multiple cluster nodes.
[Docker Volume Plugins](https://docs.docker.com/engine/extend/plugins_volume/)
augment the default `local` volume driver included in Docker with stateful
volumes shared across containers and hosts. Unlike local volumes, your
data will _not_ be deleted when such volume is removed. Plugins can run
managed by the docker daemon, as a native system service
(under systemd, _sysv_ or _upstart_) or as a standalone executable.
Rclone can run as docker volume plugin in all these modes.
It interacts with the local docker daemon
via [plugin API](https://docs.docker.com/engine/extend/plugin_api/) and
handles mounting of remote file systems into docker containers so it must
run on the same host as the docker daemon or on every Swarm node.
## Getting started
In the first example we will use the [SFTP](/sftp/)
rclone volume with Docker engine on a standalone Ubuntu machine.
Start from [installing Docker](https://docs.docker.com/engine/install/)
on the host.
The _FUSE_ driver is a prerequisite for rclone mounting and should be
installed on host:
```
sudo apt-get -y install fuse
```
Create two directories required by rclone docker plugin:
```
sudo mkdir -p /var/lib/docker-plugins/rclone/config
sudo mkdir -p /var/lib/docker-plugins/rclone/cache
```
Install the managed rclone docker plugin:
```
docker plugin install rclone/docker-volume-rclone args="-v" --alias rclone --grant-all-permissions
docker plugin list
```
Create your [SFTP volume](/sftp/#standard-options):
```
docker volume create firstvolume -d rclone -o type=sftp -o sftp-host=_hostname_ -o sftp-user=_username_ -o sftp-pass=_password_ -o allow-other=true
```
Note that since all options are static, you don't even have to run
`rclone config` or create the `rclone.conf` file (but the `config` directory
should still be present). In the simplest case you can use `localhost`
as _hostname_ and your SSH credentials as _username_ and _password_.
You can also change the remote path to your home directory on the host,
for example `-o path=/home/username`.
Time to create a test container and mount the volume into it:
```
docker run --rm -it -v firstvolume:/mnt --workdir /mnt ubuntu:latest bash
```
If all goes well, you will enter the new container and change right to
the mounted SFTP remote. You can type `ls` to list the mounted directory
or otherwise play with it. Type `exit` when you are done.
The container will stop but the volume will stay, ready to be reused.
When it's not needed anymore, remove it:
```
docker volume list
docker volume remove firstvolume
```
Now let us try **something more elaborate**:
[Google Drive](/drive/) volume on multi-node Docker Swarm.
You should start from installing Docker and FUSE, creating plugin
directories and installing rclone plugin on _every_ swarm node.
Then [setup the Swarm](https://docs.docker.com/engine/swarm/swarm-mode/).
Google Drive volumes need an access token which can be setup via web
browser and will be periodically renewed by rclone. The managed
plugin cannot run a browser so we will use a technique similar to the
[rclone setup on a headless box](/remote_setup/).
Run [rclone config](/commands/rclone_config_create/)
on _another_ machine equipped with _web browser_ and graphical user interface.
Create the [Google Drive remote](/drive/#standard-options).
When done, transfer the resulting `rclone.conf` to the Swarm cluster
and save as `/var/lib/docker-plugins/rclone/config/rclone.conf`
on _every_ node. By default this location is accessible only to the
root user so you will need appropriate privileges. The resulting config
will look like this:
```
[gdrive]
type = drive
scope = drive
drive_id = 1234567...
root_folder_id = 0Abcd...
token = {"access_token":...}
```
Now create the file named `example.yml` with a swarm stack description
like this:
```
version: '3'
services:
heimdall:
image: linuxserver/heimdall:latest
ports: [8080:80]
volumes: [configdata:/config]
volumes:
configdata:
driver: rclone
driver_opts:
remote: 'gdrive:heimdall'
allow_other: 'true'
vfs_cache_mode: full
poll_interval: 0
```
and run the stack:
```
docker stack deploy example -c ./example.yml
```
After a few seconds docker will spread the parsed stack description
over cluster, create the `example_heimdall` service on port _8080_,
run service containers on one or more cluster nodes and request
the `example_configdata` volume from rclone plugins on the node hosts.
You can use the following commands to confirm results:
```
docker service ls
docker service ps example_heimdall
docker volume ls
```
Point your browser to `http://cluster.host.address:8080` and play with
the service. Stop it with `docker stack remove example` when you are done.
Note that the `example_configdata` volume(s) created on demand at the
cluster nodes will not be automatically removed together with the stack
but stay for future reuse. You can remove them manually by invoking
the `docker volume remove example_configdata` command on every node.
## Creating Volumes via CLI
Volumes can be created with [docker volume create](https://docs.docker.com/engine/reference/commandline/volume_create/).
Here are a few examples:
```
docker volume create vol1 -d rclone -o remote=storj: -o vfs-cache-mode=full
docker volume create vol2 -d rclone -o remote=:tardigrade,access_grant=xxx:heimdall
docker volume create vol3 -d rclone -o type=tardigrade -o path=heimdall -o tardigrade-access-grant=xxx -o poll-interval=0
```
Note the `-d rclone` flag that tells docker to request volume from the
rclone driver. This works even if you installed managed driver by its full
name `rclone/docker-volume-rclone` because you provided the `--alias rclone`
option.
Volumes can be inspected as follows:
```
docker volume list
docker volume inspect vol1
```
## Volume Configuration
Rclone flags and volume options are set via the `-o` flag to the
`docker volume create` command. They include backend-specific parameters
as well as mount and _VFS_ options. Also there are a few
special `-o` options:
`remote`, `fs`, `type`, `path`, `mount-type` and `persist`.
`remote` determines an existing remote name from the config file, with
trailing colon and optionally with a remote path. See the full syntax in
the [rclone documentation](/docs/#syntax-of-remote-paths).
This option can be aliased as `fs` to prevent confusion with the
_remote_ parameter of such backends as _crypt_ or _alias_.
The `remote=:backend:dir/subdir` syntax can be used to create
[on-the-fly (config-less) remotes](/docs/#backend-path-to-dir),
while the `type` and `path` options provide a simpler alternative for this.
Using two split options
```
-o type=backend -o path=dir/subdir
```
is equivalent to the combined syntax
```
-o remote=:backend:dir/subdir
```
but is arguably easier to parameterize in scripts.
The `path` part is optional.
[Mount and VFS options](/commands/rclone_serve_docker/#options)
as well as [backend parameters](/flags/#backend-flags) are named
like their twin command-line flags without the `--` CLI prefix.
Optionally you can use underscores instead of dashes in option names.
For example, `--vfs-cache-mode full` becomes
`-o vfs-cache-mode=full` or `-o vfs_cache_mode=full`.
Boolean CLI flags without value will gain the `true` value, e.g.
`--allow-other` becomes `-o allow-other=true` or `-o allow_other=true`.
Please note that you can provide parameters only for the backend immediately
referenced by the backend type of mounted `remote`.
If this is a wrapping backend like _alias, chunker or crypt_, you cannot
provide options for the referred to remote or backend. This limitation is
imposed by the rclone connection string parser. The only workaround is to
feed plugin with `rclone.conf` or configure plugin arguments (see below).
## Special Volume Options
`mount-type` determines the mount method and in general can be one of:
`mount`, `cmount`, or `mount2`. This can be aliased as `mount_type`.
It should be noted that the managed rclone docker plugin currently does
not support the `cmount` method and `mount2` is rarely needed.
This option defaults to the first found method, which is usually `mount`
so you generally won't need it.
`persist` is a reserved boolean (true/false) option.
In future it will allow to persist on-the-fly remotes in the plugin
`rclone.conf` file.
## Connection Strings
The `remote` value can be extended
with [connection strings](/docs/#connection-strings)
as an alternative way to supply backend parameters. This is equivalent
to the `-o` backend options with one _syntactic difference_.
Inside connection string the backend prefix must be dropped from parameter
names but in the `-o param=value` array it must be present.
For instance, compare the following option array
```
-o remote=:sftp:/home -o sftp-host=localhost
```
with equivalent connection string:
```
-o remote=:sftp,host=localhost:/home
```
This difference exists because flag options `-o key=val` include not only
backend parameters but also mount/VFS flags and possibly other settings.
Also it allows to discriminate the `remote` option from the `crypt-remote`
(or similarly named backend parameters) and arguably simplifies scripting
due to clearer value substitution.
## Using with Swarm or Compose
Both _Docker Swarm_ and _Docker Compose_ use
[YAML](http://yaml.org/spec/1.2/spec.html)-formatted text files to describe
groups (stacks) of containers, their properties, networks and volumes.
_Compose_ uses the [compose v2](https://docs.docker.com/compose/compose-file/compose-file-v2/#volume-configuration-reference) format,
_Swarm_ uses the [compose v3](https://docs.docker.com/compose/compose-file/compose-file-v3/#volume-configuration-reference) format.
They are mostly similar, differences are explained in the
[docker documentation](https://docs.docker.com/compose/compose-file/compose-versioning/#upgrading).
Volumes are described by the children of the top-level `volumes:` node.
Each of them should be named after its volume and have at least two
elements, the self-explanatory `driver: rclone` value and the
`driver_opts:` structure playing the same role as `-o key=val` CLI flags:
```
volumes:
volume_name_1:
driver: rclone
driver_opts:
remote: 'gdrive:'
allow_other: 'true'
vfs_cache_mode: full
token: '{"type": "borrower", "expires": "2021-12-31"}'
poll_interval: 0
```
Notice a few important details:
- YAML prefers `_` in option names instead of `-`.
- YAML treats single and double quotes interchangeably.
Simple strings and integers can be left unquoted.
- Boolean values must be quoted like `'true'` or `"false"` because
these two words are reserved by YAML.
- The filesystem string is keyed with `remote` (or with `fs`).
Normally you can omit quotes here, but if the string ends with colon,
you **must** quote it like `remote: "storage_box:"`.
- YAML is picky about surrounding braces in values as this is in fact
another [syntax for key/value mappings](http://yaml.org/spec/1.2/spec.html#id2790832).
For example, JSON access tokens usually contain double quotes and
surrounding braces, so you must put them in single quotes.
## Installing as Managed Plugin
Docker daemon can install plugins from an image registry and run them managed.
We maintain the
[docker-volume-rclone](https://hub.docker.com/p/rclone/docker-volume-rclone/)
plugin image on [Docker Hub](https://hub.docker.com).
Rclone volume plugin requires **Docker Engine >= 19.03.15**
The plugin requires presence of two directories on the host before it can
be installed. Note that plugin will **not** create them automatically.
By default they must exist on host at the following locations
(though you can tweak the paths):
- `/var/lib/docker-plugins/rclone/config`
is reserved for the `rclone.conf` config file and **must** exist
even if it's empty and the config file is not present.
- `/var/lib/docker-plugins/rclone/cache`
holds the plugin state file as well as optional VFS caches.
You can [install managed plugin](https://docs.docker.com/engine/reference/commandline/plugin_install/)
with default settings as follows:
```
docker plugin install rclone/docker-volume-rclone:latest --grant-all-permissions --alias rclone
```
Managed plugin is in fact a special container running in a namespace separate
from normal docker containers. Inside it runs the `rclone serve docker`
command. The config and cache directories are bind-mounted into the
container at start. The docker daemon connects to a unix socket created
by the command inside the container. The command creates on-demand remote
mounts right inside, then docker machinery propagates them through kernel
mount namespaces and bind-mounts into requesting user containers.
You can tweak a few plugin settings after installation when it's disabled
(not in use), for instance:
```
docker plugin disable rclone
docker plugin set rclone RCLONE_VERBOSE=2 config=/etc/rclone args="--vfs-cache-mode=writes --allow-other"
docker plugin enable rclone
docker plugin inspect rclone
```
Note that if docker refuses to disable the plugin, you should find and
remove all active volumes connected with it as well as containers and
swarm services that use them. This is rather tedious so please carefully
plan in advance.
You can tweak the following settings:
`args`, `config`, `cache`, and `RCLONE_VERBOSE`.
It's _your_ task to keep plugin settings in sync across swarm cluster nodes.
`args` sets command-line arguments for the `rclone serve docker` command
(_none_ by default). Arguments should be separated by space so you will
normally want to put them in quotes on the
[docker plugin set](https://docs.docker.com/engine/reference/commandline/plugin_set/)
command line. Both [serve docker flags](/commands/rclone_serve_docker/#options)
and [generic rclone flags](/flags/) are supported, including backend
parameters that will be used as defaults for volume creation.
Note that plugin will fail (due to [this docker bug](https://github.com/moby/moby/blob/v20.10.7/plugin/v2/plugin.go#L195))
if the `args` value is empty. Use e.g. `args="-v"` as a workaround.
`config=/host/dir` sets alternative host location for the config directory.
Plugin will look for `rclone.conf` here. It's not an error if the config
file is not present but the directory must exist. Please note that plugin
can periodically rewrite the config file, for example when it renews
storage access tokens. Keep this in mind and try to avoid races between
the plugin and other instances of rclone on the host that might try to
change the config simultaneously resulting in corrupted `rclone.conf`.
You can also put stuff like private key files for SFTP remotes in this
directory. Just note that it's bind-mounted inside the plugin container
at the predefined path `/data/config`. For example, if your key file is
named `sftp-box1.key` on the host, the corresponding volume config option
should read `-o sftp-key-file=/data/config/sftp-box1.key`.
`cache=/host/dir` sets alternative host location for the _cache_ directory.
The plugin will keep VFS caches here. Also it will create and maintain
the `docker-plugin.state` file in this directory. When the plugin is
restarted or reinstalled, it will look in this file to recreate any volumes
that existed previously. However, they will not be re-mounted into
consuming containers after restart. Usually this is not a problem as
the docker daemon normally will restart affected user containers after
failures, daemon restarts or host reboots.
`RCLONE_VERBOSE` sets plugin verbosity from `0` (errors only, by default)
to `2` (debugging). Verbosity can be also tweaked via `args="-v [-v] ..."`.
Since arguments are more generic, you will rarely need this setting.
The plugin output by default feeds the docker daemon log on local host.
Log entries are reflected as _errors_ in the docker log but retain their
actual level assigned by rclone in the encapsulated message string.
You can set custom plugin options right when you install it, _in one go_:
```
docker plugin remove rclone
docker plugin install rclone/docker-volume-rclone:latest \
--alias rclone --grant-all-permissions \
args="-v --allow-other" config=/etc/rclone
docker plugin inspect rclone
```
## Healthchecks
The docker plugin volume protocol doesn't provide a way for plugins
to inform the docker daemon that a volume is (un-)available.
As a workaround you can setup a healthcheck to verify that the mount
is responding, for example:
```
services:
my_service:
image: my_image
healthcheck:
test: ls /path/to/rclone/mount || exit 1
interval: 1m
timeout: 15s
retries: 3
start_period: 15s
```
## Running Plugin under Systemd
In most cases you should prefer managed mode. Moreover, MacOS and Windows
do not support native Docker plugins. Please use managed mode on these
systems. Proceed further only if you are on Linux.
First, [install rclone](/install/).
You can just run it (type `rclone serve docker` and hit enter) for the test.
Install _FUSE_:
```
sudo apt-get -y install fuse
```
Download two systemd configuration files:
[docker-volume-rclone.service](https://raw.githubusercontent.com/rclone/rclone/master/cmd/serve/docker/contrib/systemd/docker-volume-rclone.service)
and [docker-volume-rclone.socket](https://raw.githubusercontent.com/rclone/rclone/master/cmd/serve/docker/contrib/systemd/docker-volume-rclone.socket).
Put them to the `/etc/systemd/system/` directory:
```
cp docker-volume-plugin.service /etc/systemd/system/
cp docker-volume-plugin.socket /etc/systemd/system/
```
Please note that all commands in this section must be run as _root_ but
we omit `sudo` prefix for brevity.
Now create directories required by the service:
```
mkdir -p /var/lib/docker-volumes/rclone
mkdir -p /var/lib/docker-plugins/rclone/config
mkdir -p /var/lib/docker-plugins/rclone/cache
```
Run the docker plugin service in the socket activated mode:
```
systemctl daemon-reload
systemctl start docker-volume-rclone.service
systemctl enable docker-volume-rclone.socket
systemctl start docker-volume-rclone.socket
systemctl restart docker
```
Or run the service directly:
- run `systemctl daemon-reload` to let systemd pick up new config
- run `systemctl enable docker-volume-rclone.service` to make the new
service start automatically when you power on your machine.
- run `systemctl start docker-volume-rclone.service`
to start the service now.
- run `systemctl restart docker` to restart docker daemon and let it
detect the new plugin socket. Note that this step is not needed in
managed mode where docker knows about plugin state changes.
The two methods are equivalent from the user perspective, but I personally
prefer socket activation.
## Troubleshooting
You can [see managed plugin settings](https://docs.docker.com/engine/extend/#debugging-plugins)
with
```
docker plugin list
docker plugin inspect rclone
```
Note that docker (including latest 20.10.7) will not show actual values
of `args`, just the defaults.
Use `journalctl --unit docker` to see managed plugin output as part of
the docker daemon log. Note that docker reflects plugin lines as _errors_
but their actual level can be seen from encapsulated message string.
You will usually install the latest version of managed plugin.
Use the following commands to print the actual installed version:
```
PLUGID=$(docker plugin list --no-trunc | awk '/rclone/{print$1}')
sudo runc --root /run/docker/runtime-runc/plugins.moby exec $PLUGID rclone version
```
You can even use `runc` to run shell inside the plugin container:
```
sudo runc --root /run/docker/runtime-runc/plugins.moby exec --tty $PLUGID bash
```
Also you can use curl to check the plugin socket connectivity:
```
docker plugin list --no-trunc
PLUGID=123abc...
sudo curl -H Content-Type:application/json -XPOST -d {} --unix-socket /run/docker/plugins/$PLUGID/rclone.sock http://localhost/Plugin.Activate
```
though this is rarely needed.
Finally I'd like to mention a _caveat with updating volume settings_.
Docker CLI does not have a dedicated command like `docker volume update`.
It may be tempting to invoke `docker volume create` with updated options
on existing volume, but there is a gotcha. The command will do nothing,
it won't even return an error. I hope that docker maintainers will fix
this some day. In the meantime be aware that you must remove your volume
before recreating it with new settings:
```
docker volume remove my_vol
docker volume create my_vol -d rclone -o opt1=new_val1 ...
```
and verify that settings did update:
```
docker volume list
docker volume inspect my_vol
```
If docker refuses to remove the volume, you should find containers
or swarm services that use it and stop them first.
| docs/content/docker.md | 1 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.0022357210982590914,
0.00023202269221656024,
0.00016295020759571344,
0.0001680831628618762,
0.00028679732349701226
] |
{
"id": 1,
"code_window": [
" },\n",
" {\n",
" \"name\": \"RCLONE_BASE_DIR\",\n",
" \"value\": \"/mnt\"\n",
" }\n",
" ],\n",
" \"mounts\": [\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" },\n",
" {\n",
" \"name\": \"HTTP_PROXY\",\n",
" \"value\": \"\",\n",
" \"settable\": [\"value\"]\n",
" },\n",
" {\n",
" \"name\": \"HTTPS_PROXY\",\n",
" \"value\": \"\",\n",
" \"settable\": [\"value\"]\n",
" },\n",
" {\n",
" \"name\": \"NO_PROXY\",\n",
" \"value\": \"\",\n",
" \"settable\": [\"value\"]\n"
],
"file_path": "cmd/serve/docker/contrib/plugin/config.json",
"type": "add",
"edit_start_line_idx": 44
} | package encoder
import (
"fmt"
"regexp"
"strconv"
"strings"
"testing"
"github.com/spf13/pflag"
"github.com/stretchr/testify/assert"
)
// Check it satisfies the interfaces
var (
_ pflag.Value = (*MultiEncoder)(nil)
_ fmt.Scanner = (*MultiEncoder)(nil)
)
func TestEncodeString(t *testing.T) {
for _, test := range []struct {
mask MultiEncoder
want string
}{
{0, "None"},
{EncodeZero, "None"},
{EncodeDoubleQuote, "DoubleQuote"},
{EncodeDot, "Dot"},
{EncodeWin, "LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe"},
{EncodeHashPercent, "Hash,Percent"},
{EncodeSlash | EncodeDollar | EncodeColon, "Slash,Dollar,Colon"},
{EncodeSlash | (1 << 31), "Slash,0x80000000"},
} {
got := test.mask.String()
assert.Equal(t, test.want, got)
}
}
func TestEncodeSet(t *testing.T) {
for _, test := range []struct {
in string
want MultiEncoder
wantErr bool
}{
{"", 0, true},
{"None", 0, false},
{"None", EncodeZero, false},
{"DoubleQuote", EncodeDoubleQuote, false},
{"Dot", EncodeDot, false},
{"LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe", EncodeWin, false},
{"Hash,Percent", EncodeHashPercent, false},
{"Slash,Dollar,Colon", EncodeSlash | EncodeDollar | EncodeColon, false},
{"Slash,0x80000000", EncodeSlash | (1 << 31), false},
{"Blerp", 0, true},
{"0xFGFFF", 0, true},
} {
var got MultiEncoder
err := got.Set(test.in)
assert.Equal(t, test.wantErr, err != nil, err)
assert.Equal(t, test.want, got, test.in)
}
}
type testCase struct {
mask MultiEncoder
in string
out string
}
func TestEncodeSingleMask(t *testing.T) {
for i, tc := range testCasesSingle {
e := tc.mask
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
got := e.Encode(tc.in)
if got != tc.out {
t.Errorf("Encode(%q) want %q got %q", tc.in, tc.out, got)
}
got2 := e.Decode(got)
if got2 != tc.in {
t.Errorf("Decode(%q) want %q got %q", got, tc.in, got2)
}
})
}
}
func TestEncodeSingleMaskEdge(t *testing.T) {
for i, tc := range testCasesSingleEdge {
e := tc.mask
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
got := e.Encode(tc.in)
if got != tc.out {
t.Errorf("Encode(%q) want %q got %q", tc.in, tc.out, got)
}
got2 := e.Decode(got)
if got2 != tc.in {
t.Errorf("Decode(%q) want %q got %q", got, tc.in, got2)
}
})
}
}
func TestEncodeDoubleMaskEdge(t *testing.T) {
for i, tc := range testCasesDoubleEdge {
e := tc.mask
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
got := e.Encode(tc.in)
if got != tc.out {
t.Errorf("Encode(%q) want %q got %q", tc.in, tc.out, got)
}
got2 := e.Decode(got)
if got2 != tc.in {
t.Errorf("Decode(%q) want %q got %q", got, tc.in, got2)
}
})
}
}
func TestEncodeInvalidUnicode(t *testing.T) {
for i, tc := range []testCase{
{
mask: EncodeInvalidUtf8,
in: "\xBF",
out: "‛BF",
}, {
mask: EncodeInvalidUtf8,
in: "\xBF\xFE",
out: "‛BF‛FE",
}, {
mask: EncodeInvalidUtf8,
in: "a\xBF\xFEb",
out: "a‛BF‛FEb",
}, {
mask: EncodeInvalidUtf8,
in: "a\xBFξ\xFEb",
out: "a‛BFξ‛FEb",
}, {
mask: EncodeInvalidUtf8 | EncodeBackSlash,
in: "a\xBF\\\xFEb",
out: "a‛BF\‛FEb",
}, {
mask: 0,
in: "\xBF",
out: "\xBF",
}, {
mask: 0,
in: "\xBF\xFE",
out: "\xBF\xFE",
}, {
mask: 0,
in: "a\xBF\xFEb",
out: "a\xBF\xFEb",
}, {
mask: 0,
in: "a\xBFξ\xFEb",
out: "a\xBFξ\xFEb",
}, {
mask: EncodeBackSlash,
in: "a\xBF\\\xFEb",
out: "a\xBF\\xFEb",
},
} {
e := tc.mask
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
got := e.Encode(tc.in)
if got != tc.out {
t.Errorf("Encode(%q) want %q got %q", tc.in, tc.out, got)
}
got2 := e.Decode(got)
if got2 != tc.in {
t.Errorf("Decode(%q) want %q got %q", got, tc.in, got2)
}
})
}
}
func TestEncodeDot(t *testing.T) {
for i, tc := range []testCase{
{
mask: 0,
in: ".",
out: ".",
}, {
mask: EncodeDot,
in: ".",
out: ".",
}, {
mask: 0,
in: "..",
out: "..",
}, {
mask: EncodeDot,
in: "..",
out: "..",
}, {
mask: EncodeDot,
in: "...",
out: "...",
}, {
mask: EncodeDot,
in: ". .",
out: ". .",
},
} {
e := tc.mask
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
got := e.Encode(tc.in)
if got != tc.out {
t.Errorf("Encode(%q) want %q got %q", tc.in, tc.out, got)
}
got2 := e.Decode(got)
if got2 != tc.in {
t.Errorf("Decode(%q) want %q got %q", got, tc.in, got2)
}
})
}
}
func TestDecodeHalf(t *testing.T) {
for i, tc := range []testCase{
{
mask: 0,
in: "‛",
out: "‛",
}, {
mask: 0,
in: "‛‛",
out: "‛",
}, {
mask: 0,
in: "‛a‛",
out: "‛a‛",
}, {
mask: EncodeInvalidUtf8,
in: "a‛B‛Eg",
out: "a‛B‛Eg",
}, {
mask: EncodeInvalidUtf8,
in: "a‛B\‛Eg",
out: "a‛B\‛Eg",
}, {
mask: EncodeInvalidUtf8 | EncodeBackSlash,
in: "a‛B\‛Eg",
out: "a‛B\\‛Eg",
},
} {
e := tc.mask
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
got := e.Decode(tc.in)
if got != tc.out {
t.Errorf("Decode(%q) want %q got %q", tc.in, tc.out, got)
}
})
}
}
const oneDrive = (Standard |
EncodeWin |
EncodeBackSlash |
EncodeHashPercent |
EncodeDel |
EncodeCtl |
EncodeLeftTilde |
EncodeRightSpace |
EncodeRightPeriod)
var benchTests = []struct {
in string
out string
}{
{"", ""},
{"abc 123", "abc 123"},
{`\*<>?:|#%".~`, `\*<>?:|#%".~`},
{`\*<>?:|#%".~/\*<>?:|#%".~`, `\*<>?:|#%".~/\*<>?:|#%".~`},
{" leading space", " leading space"},
{"~leading tilde", "~leading tilde"},
{"trailing dot.", "trailing dot."},
{" leading space/ leading space/ leading space", " leading space/ leading space/ leading space"},
{"~leading tilde/~leading tilde/~leading tilde", "~leading tilde/~leading tilde/~leading tilde"},
{"leading tilde/~leading tilde", "leading tilde/~leading tilde"},
{"trailing dot./trailing dot./trailing dot.", "trailing dot./trailing dot./trailing dot."},
}
func benchReplace(b *testing.B, f func(string) string) {
for range make([]struct{}, b.N) {
for _, test := range benchTests {
got := f(test.in)
if got != test.out {
b.Errorf("Encode(%q) want %q got %q", test.in, test.out, got)
}
}
}
}
func benchRestore(b *testing.B, f func(string) string) {
for range make([]struct{}, b.N) {
for _, test := range benchTests {
got := f(test.out)
if got != test.in {
b.Errorf("Decode(%q) want %q got %q", got, test.in, got)
}
}
}
}
func BenchmarkOneDriveReplaceNew(b *testing.B) {
benchReplace(b, oneDrive.Encode)
}
func BenchmarkOneDriveReplaceOld(b *testing.B) {
benchReplace(b, replaceReservedChars)
}
func BenchmarkOneDriveRestoreNew(b *testing.B) {
benchRestore(b, oneDrive.Decode)
}
func BenchmarkOneDriveRestoreOld(b *testing.B) {
benchRestore(b, restoreReservedChars)
}
var (
charMap = map[rune]rune{
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
'*': '*', // FULLWIDTH ASTERISK
'<': '<', // FULLWIDTH LESS-THAN SIGN
'>': '>', // FULLWIDTH GREATER-THAN SIGN
'?': '?', // FULLWIDTH QUESTION MARK
':': ':', // FULLWIDTH COLON
'|': '|', // FULLWIDTH VERTICAL LINE
'#': '#', // FULLWIDTH NUMBER SIGN
'%': '%', // FULLWIDTH PERCENT SIGN
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
'.': '.', // FULLWIDTH FULL STOP
'~': '~', // FULLWIDTH TILDE
' ': '␠', // SYMBOL FOR SPACE
}
invCharMap map[rune]rune
fixEndingInPeriod = regexp.MustCompile(`\.(/|$)`)
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
fixStartingWithTilde = regexp.MustCompile(`(/|^)~`)
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// Folder names can't end with a period '.'
in = fixEndingInPeriod.ReplaceAllString(in, string(charMap['.'])+"$1")
// OneDrive for Business file or folder names cannot begin with a tilde '~'
in = fixStartingWithTilde.ReplaceAllString(in, "$1"+string(charMap['~']))
// Apparently file names can't start with space either
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
// Encode reserved characters
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != '.' && c != '~' && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}
| lib/encoder/encoder_test.go | 0 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.0024021603167057037,
0.00024184466747101396,
0.00016337759734597057,
0.00016886908269952983,
0.00036013650242239237
] |
{
"id": 1,
"code_window": [
" },\n",
" {\n",
" \"name\": \"RCLONE_BASE_DIR\",\n",
" \"value\": \"/mnt\"\n",
" }\n",
" ],\n",
" \"mounts\": [\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" },\n",
" {\n",
" \"name\": \"HTTP_PROXY\",\n",
" \"value\": \"\",\n",
" \"settable\": [\"value\"]\n",
" },\n",
" {\n",
" \"name\": \"HTTPS_PROXY\",\n",
" \"value\": \"\",\n",
" \"settable\": [\"value\"]\n",
" },\n",
" {\n",
" \"name\": \"NO_PROXY\",\n",
" \"value\": \"\",\n",
" \"settable\": [\"value\"]\n"
],
"file_path": "cmd/serve/docker/contrib/plugin/config.json",
"type": "add",
"edit_start_line_idx": 44
} | // Package vfs provides a virtual filing system layer over rclone's
// native objects.
//
// It attempts to behave in a similar way to Go's filing system
// manipulation code in the os package. The same named function
// should behave in an identical fashion. The objects also obey Go's
// standard interfaces.
//
// Note that paths don't start or end with /, so the root directory
// may be referred to as "". However Stat strips slashes so you can
// use paths with slashes in.
//
// It also includes directory caching
//
// The vfs package returns Error values to signal precisely which
// error conditions have ocurred. It may also return general errors
// it receives. It tries to use os Error values (e.g. os.ErrExist)
// where possible.
//go:generate sh -c "go run make_open_tests.go | gofmt > open_test.go"
package vfs
import (
"context"
"fmt"
"io/ioutil"
"os"
"path"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/vfs/vfscache"
"github.com/rclone/rclone/vfs/vfscommon"
)
// Node represents either a directory (*Dir) or a file (*File)
type Node interface {
os.FileInfo
IsFile() bool
Inode() uint64
SetModTime(modTime time.Time) error
Sync() error
Remove() error
RemoveAll() error
DirEntry() fs.DirEntry
VFS() *VFS
Open(flags int) (Handle, error)
Truncate(size int64) error
Path() string
SetSys(interface{})
}
// Check interfaces
var (
_ Node = (*File)(nil)
_ Node = (*Dir)(nil)
)
// Nodes is a slice of Node
type Nodes []Node
// Sort functions
func (ns Nodes) Len() int { return len(ns) }
func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
func (ns Nodes) Less(i, j int) bool { return ns[i].Path() < ns[j].Path() }
// Noder represents something which can return a node
type Noder interface {
fmt.Stringer
Node() Node
}
// Check interfaces
var (
_ Noder = (*File)(nil)
_ Noder = (*Dir)(nil)
_ Noder = (*ReadFileHandle)(nil)
_ Noder = (*WriteFileHandle)(nil)
_ Noder = (*RWFileHandle)(nil)
_ Noder = (*DirHandle)(nil)
)
// OsFiler is the methods on *os.File
type OsFiler interface {
Chdir() error
Chmod(mode os.FileMode) error
Chown(uid, gid int) error
Close() error
Fd() uintptr
Name() string
Read(b []byte) (n int, err error)
ReadAt(b []byte, off int64) (n int, err error)
Readdir(n int) ([]os.FileInfo, error)
Readdirnames(n int) (names []string, err error)
Seek(offset int64, whence int) (ret int64, err error)
Stat() (os.FileInfo, error)
Sync() error
Truncate(size int64) error
Write(b []byte) (n int, err error)
WriteAt(b []byte, off int64) (n int, err error)
WriteString(s string) (n int, err error)
}
// Handle is the interface satisfied by open files or directories.
// It is the methods on *os.File, plus a few more useful for FUSE
// filingsystems. Not all of them are supported.
type Handle interface {
OsFiler
// Additional methods useful for FUSE filesystems
Flush() error
Release() error
Node() Node
// Size() int64
}
// baseHandle implements all the missing methods
type baseHandle struct{}
func (h baseHandle) Chdir() error { return ENOSYS }
func (h baseHandle) Chmod(mode os.FileMode) error { return ENOSYS }
func (h baseHandle) Chown(uid, gid int) error { return ENOSYS }
func (h baseHandle) Close() error { return ENOSYS }
func (h baseHandle) Fd() uintptr { return 0 }
func (h baseHandle) Name() string { return "" }
func (h baseHandle) Read(b []byte) (n int, err error) { return 0, ENOSYS }
func (h baseHandle) ReadAt(b []byte, off int64) (n int, err error) { return 0, ENOSYS }
func (h baseHandle) Readdir(n int) ([]os.FileInfo, error) { return nil, ENOSYS }
func (h baseHandle) Readdirnames(n int) (names []string, err error) { return nil, ENOSYS }
func (h baseHandle) Seek(offset int64, whence int) (ret int64, err error) { return 0, ENOSYS }
func (h baseHandle) Stat() (os.FileInfo, error) { return nil, ENOSYS }
func (h baseHandle) Sync() error { return nil }
func (h baseHandle) Truncate(size int64) error { return ENOSYS }
func (h baseHandle) Write(b []byte) (n int, err error) { return 0, ENOSYS }
func (h baseHandle) WriteAt(b []byte, off int64) (n int, err error) { return 0, ENOSYS }
func (h baseHandle) WriteString(s string) (n int, err error) { return 0, ENOSYS }
func (h baseHandle) Flush() (err error) { return ENOSYS }
func (h baseHandle) Release() (err error) { return ENOSYS }
func (h baseHandle) Node() Node { return nil }
//func (h baseHandle) Size() int64 { return 0 }
// Check interfaces
var (
_ OsFiler = (*os.File)(nil)
_ Handle = (*baseHandle)(nil)
_ Handle = (*ReadFileHandle)(nil)
_ Handle = (*WriteFileHandle)(nil)
_ Handle = (*DirHandle)(nil)
)
// VFS represents the top level filing system
type VFS struct {
f fs.Fs
root *Dir
Opt vfscommon.Options
cache *vfscache.Cache
cancelCache context.CancelFunc
usageMu sync.Mutex
usageTime time.Time
usage *fs.Usage
pollChan chan time.Duration
inUse int32 // count of number of opens accessed with atomic
}
// Keep track of active VFS keyed on fs.ConfigString(f)
var (
activeMu sync.Mutex
active = map[string][]*VFS{}
)
// New creates a new VFS and root directory. If opt is nil, then
// DefaultOpt will be used
func New(f fs.Fs, opt *vfscommon.Options) *VFS {
fsDir := fs.NewDir("", time.Now())
vfs := &VFS{
f: f,
inUse: int32(1),
}
// Make a copy of the options
if opt != nil {
vfs.Opt = *opt
} else {
vfs.Opt = vfscommon.DefaultOpt
}
// Mask the permissions with the umask
vfs.Opt.DirPerms &= ^os.FileMode(vfs.Opt.Umask)
vfs.Opt.FilePerms &= ^os.FileMode(vfs.Opt.Umask)
// Make sure directories are returned as directories
vfs.Opt.DirPerms |= os.ModeDir
// Find a VFS with the same name and options and return it if possible
activeMu.Lock()
defer activeMu.Unlock()
configName := fs.ConfigString(f)
for _, activeVFS := range active[configName] {
if vfs.Opt == activeVFS.Opt {
fs.Debugf(f, "Re-using VFS from active cache")
atomic.AddInt32(&activeVFS.inUse, 1)
return activeVFS
}
}
// Put the VFS into the active cache
active[configName] = append(active[configName], vfs)
// Create root directory
vfs.root = newDir(vfs, f, nil, fsDir)
// Start polling function
features := vfs.f.Features()
if do := features.ChangeNotify; do != nil {
vfs.pollChan = make(chan time.Duration)
do(context.TODO(), vfs.root.changeNotify, vfs.pollChan)
vfs.pollChan <- vfs.Opt.PollInterval
} else if vfs.Opt.PollInterval > 0 {
fs.Infof(f, "poll-interval is not supported by this remote")
}
// Warn if can't stream
if !vfs.Opt.ReadOnly && vfs.Opt.CacheMode < vfscommon.CacheModeWrites && features.PutStream == nil {
fs.Logf(f, "--vfs-cache-mode writes or full is recommended for this remote as it can't stream")
}
vfs.SetCacheMode(vfs.Opt.CacheMode)
// Pin the Fs into the cache so that when we use cache.NewFs
// with the same remote string we get this one. The Pin is
// removed when the vfs is finalized
cache.PinUntilFinalized(f, vfs)
return vfs
}
// Return the number of active cache entries and a VFS if any are in
// the cache.
func activeCacheEntries() (vfs *VFS, count int) {
activeMu.Lock()
for _, vfses := range active {
count += len(vfses)
if len(vfses) > 0 {
vfs = vfses[0]
}
}
activeMu.Unlock()
return vfs, count
}
// Fs returns the Fs passed into the New call
func (vfs *VFS) Fs() fs.Fs {
return vfs.f
}
// SetCacheMode change the cache mode
func (vfs *VFS) SetCacheMode(cacheMode vfscommon.CacheMode) {
vfs.shutdownCache()
vfs.cache = nil
if cacheMode > vfscommon.CacheModeOff {
ctx, cancel := context.WithCancel(context.Background())
cache, err := vfscache.New(ctx, vfs.f, &vfs.Opt, vfs.AddVirtual) // FIXME pass on context or get from Opt?
if err != nil {
fs.Errorf(nil, "Failed to create vfs cache - disabling: %v", err)
vfs.Opt.CacheMode = vfscommon.CacheModeOff
cancel()
return
}
vfs.Opt.CacheMode = cacheMode
vfs.cancelCache = cancel
vfs.cache = cache
}
}
// shutdown the cache if it was running
func (vfs *VFS) shutdownCache() {
if vfs.cancelCache != nil {
vfs.cancelCache()
vfs.cancelCache = nil
}
}
// Shutdown stops any background go-routines and removes the VFS from
// the active ache.
func (vfs *VFS) Shutdown() {
if atomic.AddInt32(&vfs.inUse, -1) > 0 {
return
}
// Remove from active cache
activeMu.Lock()
configName := fs.ConfigString(vfs.f)
activeVFSes := active[configName]
for i, activeVFS := range activeVFSes {
if activeVFS == vfs {
activeVFSes[i] = nil
active[configName] = append(activeVFSes[:i], activeVFSes[i+1:]...)
break
}
}
activeMu.Unlock()
vfs.shutdownCache()
}
// CleanUp deletes the contents of the on disk cache
func (vfs *VFS) CleanUp() error {
if vfs.Opt.CacheMode == vfscommon.CacheModeOff {
return nil
}
return vfs.cache.CleanUp()
}
// FlushDirCache empties the directory cache
func (vfs *VFS) FlushDirCache() {
vfs.root.ForgetAll()
}
// WaitForWriters sleeps until all writers have finished or
// time.Duration has elapsed
func (vfs *VFS) WaitForWriters(timeout time.Duration) {
defer log.Trace(nil, "timeout=%v", timeout)("")
tickTime := 10 * time.Millisecond
deadline := time.NewTimer(timeout)
defer deadline.Stop()
tick := time.NewTimer(tickTime)
defer tick.Stop()
tick.Stop()
for {
writers := vfs.root.countActiveWriters()
cacheInUse := 0
if vfs.cache != nil {
cacheInUse = vfs.cache.TotalInUse()
}
if writers == 0 && cacheInUse == 0 {
return
}
fs.Debugf(nil, "Still %d writers active and %d cache items in use, waiting %v", writers, cacheInUse, tickTime)
tick.Reset(tickTime)
select {
case <-tick.C:
break
case <-deadline.C:
fs.Errorf(nil, "Exiting even though %d writers active and %d cache items in use after %v\n%s", writers, cacheInUse, timeout, vfs.cache.Dump())
return
}
tickTime *= 2
if tickTime > time.Second {
tickTime = time.Second
}
}
}
// Root returns the root node
func (vfs *VFS) Root() (*Dir, error) {
// fs.Debugf(vfs.f, "Root()")
return vfs.root, nil
}
var inodeCount uint64
// newInode creates a new unique inode number
func newInode() (inode uint64) {
return atomic.AddUint64(&inodeCount, 1)
}
// Stat finds the Node by path starting from the root
//
// It is the equivalent of os.Stat - Node contains the os.FileInfo
// interface.
func (vfs *VFS) Stat(path string) (node Node, err error) {
path = strings.Trim(path, "/")
node = vfs.root
for path != "" {
i := strings.IndexRune(path, '/')
var name string
if i < 0 {
name, path = path, ""
} else {
name, path = path[:i], path[i+1:]
}
if name == "" {
continue
}
dir, ok := node.(*Dir)
if !ok {
// We need to look in a directory, but found a file
return nil, ENOENT
}
node, err = dir.Stat(name)
if err != nil {
return nil, err
}
}
return
}
// StatParent finds the parent directory and the leaf name of a path
func (vfs *VFS) StatParent(name string) (dir *Dir, leaf string, err error) {
name = strings.Trim(name, "/")
parent, leaf := path.Split(name)
node, err := vfs.Stat(parent)
if err != nil {
return nil, "", err
}
if node.IsFile() {
return nil, "", os.ErrExist
}
dir = node.(*Dir)
return dir, leaf, nil
}
// decodeOpenFlags returns a string representing the open flags
func decodeOpenFlags(flags int) string {
var out []string
rdwrMode := flags & accessModeMask
switch rdwrMode {
case os.O_RDONLY:
out = append(out, "O_RDONLY")
case os.O_WRONLY:
out = append(out, "O_WRONLY")
case os.O_RDWR:
out = append(out, "O_RDWR")
default:
out = append(out, fmt.Sprintf("0x%X", rdwrMode))
}
if flags&os.O_APPEND != 0 {
out = append(out, "O_APPEND")
}
if flags&os.O_CREATE != 0 {
out = append(out, "O_CREATE")
}
if flags&os.O_EXCL != 0 {
out = append(out, "O_EXCL")
}
if flags&os.O_SYNC != 0 {
out = append(out, "O_SYNC")
}
if flags&os.O_TRUNC != 0 {
out = append(out, "O_TRUNC")
}
flags &^= accessModeMask | os.O_APPEND | os.O_CREATE | os.O_EXCL | os.O_SYNC | os.O_TRUNC
if flags != 0 {
out = append(out, fmt.Sprintf("0x%X", flags))
}
return strings.Join(out, "|")
}
// OpenFile a file according to the flags and perm provided
func (vfs *VFS) OpenFile(name string, flags int, perm os.FileMode) (fd Handle, err error) {
defer log.Trace(name, "flags=%s, perm=%v", decodeOpenFlags(flags), perm)("fd=%v, err=%v", &fd, &err)
// http://pubs.opengroup.org/onlinepubs/7908799/xsh/open.html
// The result of using O_TRUNC with O_RDONLY is undefined.
// Linux seems to truncate the file, but we prefer to return EINVAL
if flags&accessModeMask == os.O_RDONLY && flags&os.O_TRUNC != 0 {
return nil, EINVAL
}
node, err := vfs.Stat(name)
if err != nil {
if err != ENOENT || flags&os.O_CREATE == 0 {
return nil, err
}
// If not found and O_CREATE then create the file
dir, leaf, err := vfs.StatParent(name)
if err != nil {
return nil, err
}
node, err = dir.Create(leaf, flags)
if err != nil {
return nil, err
}
}
return node.Open(flags)
}
// Open opens the named file for reading. If successful, methods on
// the returned file can be used for reading; the associated file
// descriptor has mode O_RDONLY.
func (vfs *VFS) Open(name string) (Handle, error) {
return vfs.OpenFile(name, os.O_RDONLY, 0)
}
// Create creates the named file with mode 0666 (before umask), truncating
// it if it already exists. If successful, methods on the returned
// File can be used for I/O; the associated file descriptor has mode
// O_RDWR.
func (vfs *VFS) Create(name string) (Handle, error) {
return vfs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
}
// Rename oldName to newName
func (vfs *VFS) Rename(oldName, newName string) error {
// find the parent directories
oldDir, oldLeaf, err := vfs.StatParent(oldName)
if err != nil {
return err
}
newDir, newLeaf, err := vfs.StatParent(newName)
if err != nil {
return err
}
err = oldDir.Rename(oldLeaf, newLeaf, newDir)
if err != nil {
return err
}
return nil
}
// This works out the missing values from (total, used, free) using
// unknownFree as the intended free space
func fillInMissingSizes(total, used, free, unknownFree int64) (newTotal, newUsed, newFree int64) {
if total < 0 {
if free >= 0 {
total = free
} else {
total = unknownFree
}
if used >= 0 {
total += used
}
}
// total is now defined
if used < 0 {
if free >= 0 {
used = total - free
} else {
used = 0
}
}
// used is now defined
if free < 0 {
free = total - used
}
return total, used, free
}
// If the total size isn't known then we will aim for this many bytes free (1 PiB)
const unknownFreeBytes = 1 << 50
// Statfs returns into about the filing system if known
//
// The values will be -1 if they aren't known
//
// This information is cached for the DirCacheTime interval
func (vfs *VFS) Statfs() (total, used, free int64) {
// defer log.Trace("/", "")("total=%d, used=%d, free=%d", &total, &used, &free)
vfs.usageMu.Lock()
defer vfs.usageMu.Unlock()
total, used, free = -1, -1, -1
doAbout := vfs.f.Features().About
if (doAbout != nil || vfs.Opt.UsedIsSize) && (vfs.usageTime.IsZero() || time.Since(vfs.usageTime) >= vfs.Opt.DirCacheTime) {
var err error
ctx := context.TODO()
if doAbout == nil {
vfs.usage = &fs.Usage{}
} else {
vfs.usage, err = doAbout(ctx)
}
if vfs.Opt.UsedIsSize {
var usedBySizeAlgorithm int64
// Algorithm from `rclone size`
err = walk.ListR(ctx, vfs.f, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
entries.ForObject(func(o fs.Object) {
usedBySizeAlgorithm += o.Size()
})
return nil
})
vfs.usage.Used = &usedBySizeAlgorithm
}
vfs.usageTime = time.Now()
if err != nil {
fs.Errorf(vfs.f, "Statfs failed: %v", err)
return
}
}
if u := vfs.usage; u != nil {
if u.Total != nil {
total = *u.Total
}
if u.Free != nil {
free = *u.Free
}
if u.Used != nil {
used = *u.Used
}
}
total, used, free = fillInMissingSizes(total, used, free, unknownFreeBytes)
return
}
// Remove removes the named file or (empty) directory.
func (vfs *VFS) Remove(name string) error {
node, err := vfs.Stat(name)
if err != nil {
return err
}
err = node.Remove()
if err != nil {
return err
}
return nil
}
// Chtimes changes the access and modification times of the named file, similar
// to the Unix utime() or utimes() functions.
//
// The underlying filesystem may truncate or round the values to a less precise
// time unit.
func (vfs *VFS) Chtimes(name string, atime time.Time, mtime time.Time) error {
node, err := vfs.Stat(name)
if err != nil {
return err
}
err = node.SetModTime(mtime)
if err != nil {
return err
}
return nil
}
// Mkdir creates a new directory with the specified name and permission bits
// (before umask).
func (vfs *VFS) Mkdir(name string, perm os.FileMode) error {
dir, leaf, err := vfs.StatParent(name)
if err != nil {
return err
}
_, err = dir.Mkdir(leaf)
if err != nil {
return err
}
return nil
}
// ReadDir reads the directory named by dirname and returns
// a list of directory entries sorted by filename.
func (vfs *VFS) ReadDir(dirname string) ([]os.FileInfo, error) {
f, err := vfs.Open(dirname)
if err != nil {
return nil, err
}
list, err := f.Readdir(-1)
closeErr := f.Close()
if err != nil {
return nil, err
}
if closeErr != nil {
return nil, closeErr
}
sort.Slice(list, func(i, j int) bool { return list[i].Name() < list[j].Name() })
return list, nil
}
// ReadFile reads the file named by filename and returns the contents.
// A successful call returns err == nil, not err == EOF. Because ReadFile
// reads the whole file, it does not treat an EOF from Read as an error
// to be reported.
func (vfs *VFS) ReadFile(filename string) (b []byte, err error) {
f, err := vfs.Open(filename)
if err != nil {
return nil, err
}
defer fs.CheckClose(f, &err)
return ioutil.ReadAll(f)
}
// AddVirtual adds the object (file or dir) to the directory cache
func (vfs *VFS) AddVirtual(remote string, size int64, isDir bool) error {
dir, leaf, err := vfs.StatParent(remote)
if err != nil {
return err
}
dir.AddVirtual(leaf, size, false)
return nil
}
| vfs/vfs.go | 0 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.0018825693987309933,
0.0002280433545820415,
0.0001636861707083881,
0.0001698792038951069,
0.0002722613571677357
] |
{
"id": 1,
"code_window": [
" },\n",
" {\n",
" \"name\": \"RCLONE_BASE_DIR\",\n",
" \"value\": \"/mnt\"\n",
" }\n",
" ],\n",
" \"mounts\": [\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" },\n",
" {\n",
" \"name\": \"HTTP_PROXY\",\n",
" \"value\": \"\",\n",
" \"settable\": [\"value\"]\n",
" },\n",
" {\n",
" \"name\": \"HTTPS_PROXY\",\n",
" \"value\": \"\",\n",
" \"settable\": [\"value\"]\n",
" },\n",
" {\n",
" \"name\": \"NO_PROXY\",\n",
" \"value\": \"\",\n",
" \"settable\": [\"value\"]\n"
],
"file_path": "cmd/serve/docker/contrib/plugin/config.json",
"type": "add",
"edit_start_line_idx": 44
} | package accounting
import (
"context"
"fmt"
"sort"
"strings"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
)
// transferMap holds name to transfer map
type transferMap struct {
mu sync.RWMutex
items map[string]*Transfer
name string
}
// newTransferMap creates a new empty transfer map of capacity size
func newTransferMap(size int, name string) *transferMap {
return &transferMap{
items: make(map[string]*Transfer, size),
name: name,
}
}
// add adds a new transfer to the map
func (tm *transferMap) add(tr *Transfer) {
tm.mu.Lock()
tm.items[tr.remote] = tr
tm.mu.Unlock()
}
// del removes a transfer from the map by name
func (tm *transferMap) del(remote string) {
tm.mu.Lock()
delete(tm.items, remote)
tm.mu.Unlock()
}
// merge adds items from another map
func (tm *transferMap) merge(m *transferMap) {
tm.mu.Lock()
m.mu.Lock()
for name, tr := range m.items {
tm.items[name] = tr
}
m.mu.Unlock()
tm.mu.Unlock()
}
// empty returns whether the map has any items
func (tm *transferMap) empty() bool {
tm.mu.RLock()
defer tm.mu.RUnlock()
return len(tm.items) == 0
}
// count returns the number of items in the map
func (tm *transferMap) count() int {
tm.mu.RLock()
defer tm.mu.RUnlock()
return len(tm.items)
}
// _sortedSlice returns all transfers sorted by start time
//
// Call with mu.Rlock held
func (tm *transferMap) _sortedSlice() []*Transfer {
s := make([]*Transfer, 0, len(tm.items))
for _, tr := range tm.items {
s = append(s, tr)
}
// sort by time first and if equal by name. Note that the relatively
// low time resolution on Windows can cause equal times.
sort.Slice(s, func(i, j int) bool {
a, b := s[i], s[j]
if a.startedAt.Before(b.startedAt) {
return true
} else if !a.startedAt.Equal(b.startedAt) {
return false
}
return a.remote < b.remote
})
return s
}
// String returns string representation of map items excluding any in
// exclude (if set).
func (tm *transferMap) String(ctx context.Context, progress *inProgress, exclude *transferMap) string {
tm.mu.RLock()
defer tm.mu.RUnlock()
ci := fs.GetConfig(ctx)
stringList := make([]string, 0, len(tm.items))
for _, tr := range tm._sortedSlice() {
if exclude != nil {
exclude.mu.RLock()
_, found := exclude.items[tr.remote]
exclude.mu.RUnlock()
if found {
continue
}
}
var out string
if acc := progress.get(tr.remote); acc != nil {
out = acc.String()
} else {
out = fmt.Sprintf("%*s: %s",
ci.StatsFileNameLength,
shortenName(tr.remote, ci.StatsFileNameLength),
tm.name,
)
}
stringList = append(stringList, " * "+out)
}
return strings.Join(stringList, "\n")
}
// progress returns total bytes read as well as the size.
func (tm *transferMap) progress(stats *StatsInfo) (totalBytes, totalSize int64) {
tm.mu.RLock()
defer tm.mu.RUnlock()
for name := range tm.items {
if acc := stats.inProgress.get(name); acc != nil {
bytes, size := acc.progress()
if size >= 0 && bytes >= 0 {
totalBytes += bytes
totalSize += size
}
}
}
return totalBytes, totalSize
}
// remotes returns a []string of the remote names for the transferMap
func (tm *transferMap) remotes() (c []string) {
tm.mu.RLock()
defer tm.mu.RUnlock()
for _, tr := range tm._sortedSlice() {
c = append(c, tr.remote)
}
return c
}
// rcStats returns a []rc.Params of the stats for the transferMap
func (tm *transferMap) rcStats(progress *inProgress) (t []rc.Params) {
tm.mu.RLock()
defer tm.mu.RUnlock()
for _, tr := range tm._sortedSlice() {
if acc := progress.get(tr.remote); acc != nil {
t = append(t, acc.rcStats())
} else {
t = append(t, tr.rcStats())
}
}
return t
}
| fs/accounting/transfermap.go | 0 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.0002975173410959542,
0.00018961673777084798,
0.00016206126019824296,
0.00016844310448504984,
0.00004536201959126629
] |
{
"id": 2,
"code_window": [
"remove all active volumes connected with it as well as containers and\n",
"swarm services that use them. This is rather tedious so please carefully\n",
"plan in advance.\n",
"\n",
"You can tweak the following settings:\n",
"`args`, `config`, `cache`, and `RCLONE_VERBOSE`.\n",
"It's _your_ task to keep plugin settings in sync across swarm cluster nodes.\n",
"\n",
"`args` sets command-line arguments for the `rclone serve docker` command\n",
"(_none_ by default). Arguments should be separated by space so you will\n",
"normally want to put them in quotes on the\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"`args`, `config`, `cache`, `HTTP_PROXY`, `HTTPS_PROXY`, `NO_PROXY`\n",
"and `RCLONE_VERBOSE`.\n"
],
"file_path": "docs/content/docker.md",
"type": "replace",
"edit_start_line_idx": 349
} | ---
title: "Docker Volume Plugin"
description: "Docker Volume Plugin"
---
# Docker Volume Plugin
## Introduction
Docker 1.9 has added support for creating
[named volumes](https://docs.docker.com/storage/volumes/) via
[command-line interface](https://docs.docker.com/engine/reference/commandline/volume_create/)
and mounting them in containers as a way to share data between them.
Since Docker 1.10 you can create named volumes with
[Docker Compose](https://docs.docker.com/compose/) by descriptions in
[docker-compose.yml](https://docs.docker.com/compose/compose-file/compose-file-v2/#volume-configuration-reference)
files for use by container groups on a single host.
As of Docker 1.12 volumes are supported by
[Docker Swarm](https://docs.docker.com/engine/swarm/key-concepts/)
included with Docker Engine and created from descriptions in
[swarm compose v3](https://docs.docker.com/compose/compose-file/compose-file-v3/#volume-configuration-reference)
files for use with _swarm stacks_ across multiple cluster nodes.
[Docker Volume Plugins](https://docs.docker.com/engine/extend/plugins_volume/)
augment the default `local` volume driver included in Docker with stateful
volumes shared across containers and hosts. Unlike local volumes, your
data will _not_ be deleted when such volume is removed. Plugins can run
managed by the docker daemon, as a native system service
(under systemd, _sysv_ or _upstart_) or as a standalone executable.
Rclone can run as docker volume plugin in all these modes.
It interacts with the local docker daemon
via [plugin API](https://docs.docker.com/engine/extend/plugin_api/) and
handles mounting of remote file systems into docker containers so it must
run on the same host as the docker daemon or on every Swarm node.
## Getting started
In the first example we will use the [SFTP](/sftp/)
rclone volume with Docker engine on a standalone Ubuntu machine.
Start from [installing Docker](https://docs.docker.com/engine/install/)
on the host.
The _FUSE_ driver is a prerequisite for rclone mounting and should be
installed on host:
```
sudo apt-get -y install fuse
```
Create two directories required by rclone docker plugin:
```
sudo mkdir -p /var/lib/docker-plugins/rclone/config
sudo mkdir -p /var/lib/docker-plugins/rclone/cache
```
Install the managed rclone docker plugin:
```
docker plugin install rclone/docker-volume-rclone args="-v" --alias rclone --grant-all-permissions
docker plugin list
```
Create your [SFTP volume](/sftp/#standard-options):
```
docker volume create firstvolume -d rclone -o type=sftp -o sftp-host=_hostname_ -o sftp-user=_username_ -o sftp-pass=_password_ -o allow-other=true
```
Note that since all options are static, you don't even have to run
`rclone config` or create the `rclone.conf` file (but the `config` directory
should still be present). In the simplest case you can use `localhost`
as _hostname_ and your SSH credentials as _username_ and _password_.
You can also change the remote path to your home directory on the host,
for example `-o path=/home/username`.
Time to create a test container and mount the volume into it:
```
docker run --rm -it -v firstvolume:/mnt --workdir /mnt ubuntu:latest bash
```
If all goes well, you will enter the new container and change right to
the mounted SFTP remote. You can type `ls` to list the mounted directory
or otherwise play with it. Type `exit` when you are done.
The container will stop but the volume will stay, ready to be reused.
When it's not needed anymore, remove it:
```
docker volume list
docker volume remove firstvolume
```
Now let us try **something more elaborate**:
[Google Drive](/drive/) volume on multi-node Docker Swarm.
You should start from installing Docker and FUSE, creating plugin
directories and installing rclone plugin on _every_ swarm node.
Then [setup the Swarm](https://docs.docker.com/engine/swarm/swarm-mode/).
Google Drive volumes need an access token which can be setup via web
browser and will be periodically renewed by rclone. The managed
plugin cannot run a browser so we will use a technique similar to the
[rclone setup on a headless box](/remote_setup/).
Run [rclone config](/commands/rclone_config_create/)
on _another_ machine equipped with _web browser_ and graphical user interface.
Create the [Google Drive remote](/drive/#standard-options).
When done, transfer the resulting `rclone.conf` to the Swarm cluster
and save as `/var/lib/docker-plugins/rclone/config/rclone.conf`
on _every_ node. By default this location is accessible only to the
root user so you will need appropriate privileges. The resulting config
will look like this:
```
[gdrive]
type = drive
scope = drive
drive_id = 1234567...
root_folder_id = 0Abcd...
token = {"access_token":...}
```
Now create the file named `example.yml` with a swarm stack description
like this:
```
version: '3'
services:
heimdall:
image: linuxserver/heimdall:latest
ports: [8080:80]
volumes: [configdata:/config]
volumes:
configdata:
driver: rclone
driver_opts:
remote: 'gdrive:heimdall'
allow_other: 'true'
vfs_cache_mode: full
poll_interval: 0
```
and run the stack:
```
docker stack deploy example -c ./example.yml
```
After a few seconds docker will spread the parsed stack description
over cluster, create the `example_heimdall` service on port _8080_,
run service containers on one or more cluster nodes and request
the `example_configdata` volume from rclone plugins on the node hosts.
You can use the following commands to confirm results:
```
docker service ls
docker service ps example_heimdall
docker volume ls
```
Point your browser to `http://cluster.host.address:8080` and play with
the service. Stop it with `docker stack remove example` when you are done.
Note that the `example_configdata` volume(s) created on demand at the
cluster nodes will not be automatically removed together with the stack
but stay for future reuse. You can remove them manually by invoking
the `docker volume remove example_configdata` command on every node.
## Creating Volumes via CLI
Volumes can be created with [docker volume create](https://docs.docker.com/engine/reference/commandline/volume_create/).
Here are a few examples:
```
docker volume create vol1 -d rclone -o remote=storj: -o vfs-cache-mode=full
docker volume create vol2 -d rclone -o remote=:tardigrade,access_grant=xxx:heimdall
docker volume create vol3 -d rclone -o type=tardigrade -o path=heimdall -o tardigrade-access-grant=xxx -o poll-interval=0
```
Note the `-d rclone` flag that tells docker to request volume from the
rclone driver. This works even if you installed managed driver by its full
name `rclone/docker-volume-rclone` because you provided the `--alias rclone`
option.
Volumes can be inspected as follows:
```
docker volume list
docker volume inspect vol1
```
## Volume Configuration
Rclone flags and volume options are set via the `-o` flag to the
`docker volume create` command. They include backend-specific parameters
as well as mount and _VFS_ options. Also there are a few
special `-o` options:
`remote`, `fs`, `type`, `path`, `mount-type` and `persist`.
`remote` determines an existing remote name from the config file, with
trailing colon and optionally with a remote path. See the full syntax in
the [rclone documentation](/docs/#syntax-of-remote-paths).
This option can be aliased as `fs` to prevent confusion with the
_remote_ parameter of such backends as _crypt_ or _alias_.
The `remote=:backend:dir/subdir` syntax can be used to create
[on-the-fly (config-less) remotes](/docs/#backend-path-to-dir),
while the `type` and `path` options provide a simpler alternative for this.
Using two split options
```
-o type=backend -o path=dir/subdir
```
is equivalent to the combined syntax
```
-o remote=:backend:dir/subdir
```
but is arguably easier to parameterize in scripts.
The `path` part is optional.
[Mount and VFS options](/commands/rclone_serve_docker/#options)
as well as [backend parameters](/flags/#backend-flags) are named
like their twin command-line flags without the `--` CLI prefix.
Optionally you can use underscores instead of dashes in option names.
For example, `--vfs-cache-mode full` becomes
`-o vfs-cache-mode=full` or `-o vfs_cache_mode=full`.
Boolean CLI flags without value will gain the `true` value, e.g.
`--allow-other` becomes `-o allow-other=true` or `-o allow_other=true`.
Please note that you can provide parameters only for the backend immediately
referenced by the backend type of mounted `remote`.
If this is a wrapping backend like _alias, chunker or crypt_, you cannot
provide options for the referred to remote or backend. This limitation is
imposed by the rclone connection string parser. The only workaround is to
feed plugin with `rclone.conf` or configure plugin arguments (see below).
## Special Volume Options
`mount-type` determines the mount method and in general can be one of:
`mount`, `cmount`, or `mount2`. This can be aliased as `mount_type`.
It should be noted that the managed rclone docker plugin currently does
not support the `cmount` method and `mount2` is rarely needed.
This option defaults to the first found method, which is usually `mount`
so you generally won't need it.
`persist` is a reserved boolean (true/false) option.
In future it will allow to persist on-the-fly remotes in the plugin
`rclone.conf` file.
## Connection Strings
The `remote` value can be extended
with [connection strings](/docs/#connection-strings)
as an alternative way to supply backend parameters. This is equivalent
to the `-o` backend options with one _syntactic difference_.
Inside connection string the backend prefix must be dropped from parameter
names but in the `-o param=value` array it must be present.
For instance, compare the following option array
```
-o remote=:sftp:/home -o sftp-host=localhost
```
with equivalent connection string:
```
-o remote=:sftp,host=localhost:/home
```
This difference exists because flag options `-o key=val` include not only
backend parameters but also mount/VFS flags and possibly other settings.
Also it allows to discriminate the `remote` option from the `crypt-remote`
(or similarly named backend parameters) and arguably simplifies scripting
due to clearer value substitution.
## Using with Swarm or Compose
Both _Docker Swarm_ and _Docker Compose_ use
[YAML](http://yaml.org/spec/1.2/spec.html)-formatted text files to describe
groups (stacks) of containers, their properties, networks and volumes.
_Compose_ uses the [compose v2](https://docs.docker.com/compose/compose-file/compose-file-v2/#volume-configuration-reference) format,
_Swarm_ uses the [compose v3](https://docs.docker.com/compose/compose-file/compose-file-v3/#volume-configuration-reference) format.
They are mostly similar, differences are explained in the
[docker documentation](https://docs.docker.com/compose/compose-file/compose-versioning/#upgrading).
Volumes are described by the children of the top-level `volumes:` node.
Each of them should be named after its volume and have at least two
elements, the self-explanatory `driver: rclone` value and the
`driver_opts:` structure playing the same role as `-o key=val` CLI flags:
```
volumes:
volume_name_1:
driver: rclone
driver_opts:
remote: 'gdrive:'
allow_other: 'true'
vfs_cache_mode: full
token: '{"type": "borrower", "expires": "2021-12-31"}'
poll_interval: 0
```
Notice a few important details:
- YAML prefers `_` in option names instead of `-`.
- YAML treats single and double quotes interchangeably.
Simple strings and integers can be left unquoted.
- Boolean values must be quoted like `'true'` or `"false"` because
these two words are reserved by YAML.
- The filesystem string is keyed with `remote` (or with `fs`).
Normally you can omit quotes here, but if the string ends with colon,
you **must** quote it like `remote: "storage_box:"`.
- YAML is picky about surrounding braces in values as this is in fact
another [syntax for key/value mappings](http://yaml.org/spec/1.2/spec.html#id2790832).
For example, JSON access tokens usually contain double quotes and
surrounding braces, so you must put them in single quotes.
## Installing as Managed Plugin
Docker daemon can install plugins from an image registry and run them managed.
We maintain the
[docker-volume-rclone](https://hub.docker.com/p/rclone/docker-volume-rclone/)
plugin image on [Docker Hub](https://hub.docker.com).
Rclone volume plugin requires **Docker Engine >= 19.03.15**
The plugin requires presence of two directories on the host before it can
be installed. Note that plugin will **not** create them automatically.
By default they must exist on host at the following locations
(though you can tweak the paths):
- `/var/lib/docker-plugins/rclone/config`
is reserved for the `rclone.conf` config file and **must** exist
even if it's empty and the config file is not present.
- `/var/lib/docker-plugins/rclone/cache`
holds the plugin state file as well as optional VFS caches.
You can [install managed plugin](https://docs.docker.com/engine/reference/commandline/plugin_install/)
with default settings as follows:
```
docker plugin install rclone/docker-volume-rclone:latest --grant-all-permissions --alias rclone
```
Managed plugin is in fact a special container running in a namespace separate
from normal docker containers. Inside it runs the `rclone serve docker`
command. The config and cache directories are bind-mounted into the
container at start. The docker daemon connects to a unix socket created
by the command inside the container. The command creates on-demand remote
mounts right inside, then docker machinery propagates them through kernel
mount namespaces and bind-mounts into requesting user containers.
You can tweak a few plugin settings after installation when it's disabled
(not in use), for instance:
```
docker plugin disable rclone
docker plugin set rclone RCLONE_VERBOSE=2 config=/etc/rclone args="--vfs-cache-mode=writes --allow-other"
docker plugin enable rclone
docker plugin inspect rclone
```
Note that if docker refuses to disable the plugin, you should find and
remove all active volumes connected with it as well as containers and
swarm services that use them. This is rather tedious so please carefully
plan in advance.
You can tweak the following settings:
`args`, `config`, `cache`, and `RCLONE_VERBOSE`.
It's _your_ task to keep plugin settings in sync across swarm cluster nodes.
`args` sets command-line arguments for the `rclone serve docker` command
(_none_ by default). Arguments should be separated by space so you will
normally want to put them in quotes on the
[docker plugin set](https://docs.docker.com/engine/reference/commandline/plugin_set/)
command line. Both [serve docker flags](/commands/rclone_serve_docker/#options)
and [generic rclone flags](/flags/) are supported, including backend
parameters that will be used as defaults for volume creation.
Note that plugin will fail (due to [this docker bug](https://github.com/moby/moby/blob/v20.10.7/plugin/v2/plugin.go#L195))
if the `args` value is empty. Use e.g. `args="-v"` as a workaround.
`config=/host/dir` sets alternative host location for the config directory.
Plugin will look for `rclone.conf` here. It's not an error if the config
file is not present but the directory must exist. Please note that plugin
can periodically rewrite the config file, for example when it renews
storage access tokens. Keep this in mind and try to avoid races between
the plugin and other instances of rclone on the host that might try to
change the config simultaneously resulting in corrupted `rclone.conf`.
You can also put stuff like private key files for SFTP remotes in this
directory. Just note that it's bind-mounted inside the plugin container
at the predefined path `/data/config`. For example, if your key file is
named `sftp-box1.key` on the host, the corresponding volume config option
should read `-o sftp-key-file=/data/config/sftp-box1.key`.
`cache=/host/dir` sets alternative host location for the _cache_ directory.
The plugin will keep VFS caches here. Also it will create and maintain
the `docker-plugin.state` file in this directory. When the plugin is
restarted or reinstalled, it will look in this file to recreate any volumes
that existed previously. However, they will not be re-mounted into
consuming containers after restart. Usually this is not a problem as
the docker daemon normally will restart affected user containers after
failures, daemon restarts or host reboots.
`RCLONE_VERBOSE` sets plugin verbosity from `0` (errors only, by default)
to `2` (debugging). Verbosity can be also tweaked via `args="-v [-v] ..."`.
Since arguments are more generic, you will rarely need this setting.
The plugin output by default feeds the docker daemon log on local host.
Log entries are reflected as _errors_ in the docker log but retain their
actual level assigned by rclone in the encapsulated message string.
You can set custom plugin options right when you install it, _in one go_:
```
docker plugin remove rclone
docker plugin install rclone/docker-volume-rclone:latest \
--alias rclone --grant-all-permissions \
args="-v --allow-other" config=/etc/rclone
docker plugin inspect rclone
```
## Healthchecks
The docker plugin volume protocol doesn't provide a way for plugins
to inform the docker daemon that a volume is (un-)available.
As a workaround you can setup a healthcheck to verify that the mount
is responding, for example:
```
services:
my_service:
image: my_image
healthcheck:
test: ls /path/to/rclone/mount || exit 1
interval: 1m
timeout: 15s
retries: 3
start_period: 15s
```
## Running Plugin under Systemd
In most cases you should prefer managed mode. Moreover, MacOS and Windows
do not support native Docker plugins. Please use managed mode on these
systems. Proceed further only if you are on Linux.
First, [install rclone](/install/).
You can just run it (type `rclone serve docker` and hit enter) for the test.
Install _FUSE_:
```
sudo apt-get -y install fuse
```
Download two systemd configuration files:
[docker-volume-rclone.service](https://raw.githubusercontent.com/rclone/rclone/master/cmd/serve/docker/contrib/systemd/docker-volume-rclone.service)
and [docker-volume-rclone.socket](https://raw.githubusercontent.com/rclone/rclone/master/cmd/serve/docker/contrib/systemd/docker-volume-rclone.socket).
Put them to the `/etc/systemd/system/` directory:
```
cp docker-volume-plugin.service /etc/systemd/system/
cp docker-volume-plugin.socket /etc/systemd/system/
```
Please note that all commands in this section must be run as _root_ but
we omit `sudo` prefix for brevity.
Now create directories required by the service:
```
mkdir -p /var/lib/docker-volumes/rclone
mkdir -p /var/lib/docker-plugins/rclone/config
mkdir -p /var/lib/docker-plugins/rclone/cache
```
Run the docker plugin service in the socket activated mode:
```
systemctl daemon-reload
systemctl start docker-volume-rclone.service
systemctl enable docker-volume-rclone.socket
systemctl start docker-volume-rclone.socket
systemctl restart docker
```
Or run the service directly:
- run `systemctl daemon-reload` to let systemd pick up new config
- run `systemctl enable docker-volume-rclone.service` to make the new
service start automatically when you power on your machine.
- run `systemctl start docker-volume-rclone.service`
to start the service now.
- run `systemctl restart docker` to restart docker daemon and let it
detect the new plugin socket. Note that this step is not needed in
managed mode where docker knows about plugin state changes.
The two methods are equivalent from the user perspective, but I personally
prefer socket activation.
## Troubleshooting
You can [see managed plugin settings](https://docs.docker.com/engine/extend/#debugging-plugins)
with
```
docker plugin list
docker plugin inspect rclone
```
Note that docker (including latest 20.10.7) will not show actual values
of `args`, just the defaults.
Use `journalctl --unit docker` to see managed plugin output as part of
the docker daemon log. Note that docker reflects plugin lines as _errors_
but their actual level can be seen from encapsulated message string.
You will usually install the latest version of managed plugin.
Use the following commands to print the actual installed version:
```
PLUGID=$(docker plugin list --no-trunc | awk '/rclone/{print$1}')
sudo runc --root /run/docker/runtime-runc/plugins.moby exec $PLUGID rclone version
```
You can even use `runc` to run shell inside the plugin container:
```
sudo runc --root /run/docker/runtime-runc/plugins.moby exec --tty $PLUGID bash
```
Also you can use curl to check the plugin socket connectivity:
```
docker plugin list --no-trunc
PLUGID=123abc...
sudo curl -H Content-Type:application/json -XPOST -d {} --unix-socket /run/docker/plugins/$PLUGID/rclone.sock http://localhost/Plugin.Activate
```
though this is rarely needed.
Finally I'd like to mention a _caveat with updating volume settings_.
Docker CLI does not have a dedicated command like `docker volume update`.
It may be tempting to invoke `docker volume create` with updated options
on existing volume, but there is a gotcha. The command will do nothing,
it won't even return an error. I hope that docker maintainers will fix
this some day. In the meantime be aware that you must remove your volume
before recreating it with new settings:
```
docker volume remove my_vol
docker volume create my_vol -d rclone -o opt1=new_val1 ...
```
and verify that settings did update:
```
docker volume list
docker volume inspect my_vol
```
If docker refuses to remove the volume, you should find containers
or swarm services that use it and stop them first.
| docs/content/docker.md | 1 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.991272509098053,
0.02361547015607357,
0.00016851851250976324,
0.0008698719320818782,
0.1350552886724472
] |
{
"id": 2,
"code_window": [
"remove all active volumes connected with it as well as containers and\n",
"swarm services that use them. This is rather tedious so please carefully\n",
"plan in advance.\n",
"\n",
"You can tweak the following settings:\n",
"`args`, `config`, `cache`, and `RCLONE_VERBOSE`.\n",
"It's _your_ task to keep plugin settings in sync across swarm cluster nodes.\n",
"\n",
"`args` sets command-line arguments for the `rclone serve docker` command\n",
"(_none_ by default). Arguments should be separated by space so you will\n",
"normally want to put them in quotes on the\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"`args`, `config`, `cache`, `HTTP_PROXY`, `HTTPS_PROXY`, `NO_PROXY`\n",
"and `RCLONE_VERBOSE`.\n"
],
"file_path": "docs/content/docker.md",
"type": "replace",
"edit_start_line_idx": 349
} | package policy
import (
"context"
"path"
"sync"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("epall", &EpAll{})
}
// EpAll stands for existing path, all
// Action category: apply to all found.
// Create category: apply to all found.
// Search category: same as epff.
type EpAll struct {
EpFF
}
func (p *EpAll) epall(ctx context.Context, upstreams []*upstream.Fs, filePath string) ([]*upstream.Fs, error) {
var wg sync.WaitGroup
ufs := make([]*upstream.Fs, len(upstreams))
for i, u := range upstreams {
wg.Add(1)
i, u := i, u // Closure
go func() {
rfs := u.RootFs
remote := path.Join(u.RootPath, filePath)
if findEntry(ctx, rfs, remote) != nil {
ufs[i] = u
}
wg.Done()
}()
}
wg.Wait()
var results []*upstream.Fs
for _, f := range ufs {
if f != nil {
results = append(results, f)
}
}
if len(results) == 0 {
return nil, fs.ErrorObjectNotFound
}
return results, nil
}
// Action category policy, governing the modification of files and directories
func (p *EpAll) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterRO(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
return p.epall(ctx, upstreams, path)
}
// ActionEntries is ACTION category policy but receiving a set of candidate entries
func (p *EpAll) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
entries = filterROEntries(entries)
if len(entries) == 0 {
return nil, fs.ErrorPermissionDenied
}
return entries, nil
}
// Create category policy, governing the creation of files and directories
func (p *EpAll) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
upstreams, err := p.epall(ctx, upstreams, path+"/..")
return upstreams, err
}
// CreateEntries is CREATE category policy but receiving a set of candidate entries
func (p *EpAll) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
entries = filterNCEntries(entries)
if len(entries) == 0 {
return nil, fs.ErrorPermissionDenied
}
return entries, nil
}
| backend/union/policy/epall.go | 0 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.00016893183055799454,
0.0001670493365963921,
0.00016404117923229933,
0.00016734919336158782,
0.0000014837426078884164
] |
{
"id": 2,
"code_window": [
"remove all active volumes connected with it as well as containers and\n",
"swarm services that use them. This is rather tedious so please carefully\n",
"plan in advance.\n",
"\n",
"You can tweak the following settings:\n",
"`args`, `config`, `cache`, and `RCLONE_VERBOSE`.\n",
"It's _your_ task to keep plugin settings in sync across swarm cluster nodes.\n",
"\n",
"`args` sets command-line arguments for the `rclone serve docker` command\n",
"(_none_ by default). Arguments should be separated by space so you will\n",
"normally want to put them in quotes on the\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"`args`, `config`, `cache`, `HTTP_PROXY`, `HTTPS_PROXY`, `NO_PROXY`\n",
"and `RCLONE_VERBOSE`.\n"
],
"file_path": "docs/content/docker.md",
"type": "replace",
"edit_start_line_idx": 349
} | package ls
import (
"fmt"
"sort"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/flags"
"github.com/spf13/cobra"
)
// Globals
var (
listLong bool
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &listLong, "long", "", listLong, "Show the type as well as names.")
}
var commandDefinition = &cobra.Command{
Use: "listremotes",
Short: `List all the remotes in the config file.`,
Long: `
rclone listremotes lists all the available remotes from the config file.
When uses with the -l flag it lists the types too.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 0, command, args)
remotes := config.FileSections()
sort.Strings(remotes)
maxlen := 1
for _, remote := range remotes {
if len(remote) > maxlen {
maxlen = len(remote)
}
}
for _, remote := range remotes {
if listLong {
remoteType := config.FileGet(remote, "type")
fmt.Printf("%-*s %s\n", maxlen+1, remote+":", remoteType)
} else {
fmt.Printf("%s:\n", remote)
}
}
},
}
| cmd/listremotes/listremotes.go | 0 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.0012385407462716103,
0.00039689618279226124,
0.0001652259088587016,
0.00023070271709002554,
0.00038287995266728103
] |
{
"id": 2,
"code_window": [
"remove all active volumes connected with it as well as containers and\n",
"swarm services that use them. This is rather tedious so please carefully\n",
"plan in advance.\n",
"\n",
"You can tweak the following settings:\n",
"`args`, `config`, `cache`, and `RCLONE_VERBOSE`.\n",
"It's _your_ task to keep plugin settings in sync across swarm cluster nodes.\n",
"\n",
"`args` sets command-line arguments for the `rclone serve docker` command\n",
"(_none_ by default). Arguments should be separated by space so you will\n",
"normally want to put them in quotes on the\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"`args`, `config`, `cache`, `HTTP_PROXY`, `HTTPS_PROXY`, `NO_PROXY`\n",
"and `RCLONE_VERBOSE`.\n"
],
"file_path": "docs/content/docker.md",
"type": "replace",
"edit_start_line_idx": 349
} | package gendocs
import (
"bytes"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"text/template"
"time"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/lib/file"
"github.com/spf13/cobra"
"github.com/spf13/cobra/doc"
"github.com/spf13/pflag"
)
func init() {
cmd.Root.AddCommand(commandDefinition)
}
// define things which go into the frontmatter
type frontmatter struct {
Date string
Title string
Description string
Slug string
URL string
Source string
}
var frontmatterTemplate = template.Must(template.New("frontmatter").Parse(`---
title: "{{ .Title }}"
description: "{{ .Description }}"
slug: {{ .Slug }}
url: {{ .URL }}
# autogenerated - DO NOT EDIT, instead edit the source code in {{ .Source }} and as part of making a release run "make commanddocs"
---
`))
var commandDefinition = &cobra.Command{
Use: "gendocs output_directory",
Short: `Output markdown docs for rclone to the directory supplied.`,
Long: `
This produces markdown docs for the rclone commands to the directory
supplied. These are in a format suitable for hugo to render into the
rclone.org website.`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 1, command, args)
now := time.Now().Format(time.RFC3339)
// Create the directory structure
root := args[0]
out := filepath.Join(root, "commands")
err := file.MkdirAll(out, 0777)
if err != nil {
return err
}
// Write the flags page
var buf bytes.Buffer
cmd.Root.SetOutput(&buf)
cmd.Root.SetArgs([]string{"help", "flags"})
cmd.GeneratingDocs = true
err = cmd.Root.Execute()
if err != nil {
return err
}
err = ioutil.WriteFile(filepath.Join(root, "flags.md"), buf.Bytes(), 0777)
if err != nil {
return err
}
// Look up name => description for prepender
var description = map[string]string{}
var addDescription func(root *cobra.Command)
addDescription = func(root *cobra.Command) {
name := strings.Replace(root.CommandPath(), " ", "_", -1) + ".md"
description[name] = root.Short
for _, c := range root.Commands() {
addDescription(c)
}
}
addDescription(cmd.Root)
// markup for the docs files
prepender := func(filename string) string {
name := filepath.Base(filename)
base := strings.TrimSuffix(name, path.Ext(name))
data := frontmatter{
Date: now,
Title: strings.Replace(base, "_", " ", -1),
Description: description[name],
Slug: base,
URL: "/commands/" + strings.ToLower(base) + "/",
Source: strings.Replace(strings.Replace(base, "rclone", "cmd", -1), "_", "/", -1) + "/",
}
var buf bytes.Buffer
err := frontmatterTemplate.Execute(&buf, data)
if err != nil {
log.Fatalf("Failed to render frontmatter template: %v", err)
}
return buf.String()
}
linkHandler := func(name string) string {
base := strings.TrimSuffix(name, path.Ext(name))
return "/commands/" + strings.ToLower(base) + "/"
}
// Hide all of the root entries flags
cmd.Root.Flags().VisitAll(func(flag *pflag.Flag) {
flag.Hidden = true
})
err = doc.GenMarkdownTreeCustom(cmd.Root, out, prepender, linkHandler)
if err != nil {
return err
}
var outdentTitle = regexp.MustCompile(`(?m)^#(#+)`)
// Munge the files to add a link to the global flags page
err = filepath.Walk(out, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
b, err := ioutil.ReadFile(path)
if err != nil {
return err
}
doc := string(b)
doc = strings.Replace(doc, "\n### SEE ALSO", `
See the [global flags page](/flags/) for global options not listed here.
### SEE ALSO`, 1)
// outdent all the titles by one
doc = outdentTitle.ReplaceAllString(doc, `$1`)
err = ioutil.WriteFile(path, []byte(doc), 0777)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
return nil
},
}
| cmd/gendocs/gendocs.go | 0 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.0010155829368159175,
0.00030438770772889256,
0.00016552978195250034,
0.00017256339197047055,
0.00028109579579904675
] |
{
"id": 3,
"code_window": [
"Log entries are reflected as _errors_ in the docker log but retain their\n",
"actual level assigned by rclone in the encapsulated message string.\n",
"\n",
"You can set custom plugin options right when you install it, _in one go_:\n",
"```\n",
"docker plugin remove rclone\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"`HTTP_PROXY`, `HTTPS_PROXY`, `NO_PROXY` customize the plugin proxy settings.\n",
"\n"
],
"file_path": "docs/content/docker.md",
"type": "add",
"edit_start_line_idx": 391
} | ---
title: "Docker Volume Plugin"
description: "Docker Volume Plugin"
---
# Docker Volume Plugin
## Introduction
Docker 1.9 has added support for creating
[named volumes](https://docs.docker.com/storage/volumes/) via
[command-line interface](https://docs.docker.com/engine/reference/commandline/volume_create/)
and mounting them in containers as a way to share data between them.
Since Docker 1.10 you can create named volumes with
[Docker Compose](https://docs.docker.com/compose/) by descriptions in
[docker-compose.yml](https://docs.docker.com/compose/compose-file/compose-file-v2/#volume-configuration-reference)
files for use by container groups on a single host.
As of Docker 1.12 volumes are supported by
[Docker Swarm](https://docs.docker.com/engine/swarm/key-concepts/)
included with Docker Engine and created from descriptions in
[swarm compose v3](https://docs.docker.com/compose/compose-file/compose-file-v3/#volume-configuration-reference)
files for use with _swarm stacks_ across multiple cluster nodes.
[Docker Volume Plugins](https://docs.docker.com/engine/extend/plugins_volume/)
augment the default `local` volume driver included in Docker with stateful
volumes shared across containers and hosts. Unlike local volumes, your
data will _not_ be deleted when such volume is removed. Plugins can run
managed by the docker daemon, as a native system service
(under systemd, _sysv_ or _upstart_) or as a standalone executable.
Rclone can run as docker volume plugin in all these modes.
It interacts with the local docker daemon
via [plugin API](https://docs.docker.com/engine/extend/plugin_api/) and
handles mounting of remote file systems into docker containers so it must
run on the same host as the docker daemon or on every Swarm node.
## Getting started
In the first example we will use the [SFTP](/sftp/)
rclone volume with Docker engine on a standalone Ubuntu machine.
Start from [installing Docker](https://docs.docker.com/engine/install/)
on the host.
The _FUSE_ driver is a prerequisite for rclone mounting and should be
installed on host:
```
sudo apt-get -y install fuse
```
Create two directories required by rclone docker plugin:
```
sudo mkdir -p /var/lib/docker-plugins/rclone/config
sudo mkdir -p /var/lib/docker-plugins/rclone/cache
```
Install the managed rclone docker plugin:
```
docker plugin install rclone/docker-volume-rclone args="-v" --alias rclone --grant-all-permissions
docker plugin list
```
Create your [SFTP volume](/sftp/#standard-options):
```
docker volume create firstvolume -d rclone -o type=sftp -o sftp-host=_hostname_ -o sftp-user=_username_ -o sftp-pass=_password_ -o allow-other=true
```
Note that since all options are static, you don't even have to run
`rclone config` or create the `rclone.conf` file (but the `config` directory
should still be present). In the simplest case you can use `localhost`
as _hostname_ and your SSH credentials as _username_ and _password_.
You can also change the remote path to your home directory on the host,
for example `-o path=/home/username`.
Time to create a test container and mount the volume into it:
```
docker run --rm -it -v firstvolume:/mnt --workdir /mnt ubuntu:latest bash
```
If all goes well, you will enter the new container and change right to
the mounted SFTP remote. You can type `ls` to list the mounted directory
or otherwise play with it. Type `exit` when you are done.
The container will stop but the volume will stay, ready to be reused.
When it's not needed anymore, remove it:
```
docker volume list
docker volume remove firstvolume
```
Now let us try **something more elaborate**:
[Google Drive](/drive/) volume on multi-node Docker Swarm.
You should start from installing Docker and FUSE, creating plugin
directories and installing rclone plugin on _every_ swarm node.
Then [setup the Swarm](https://docs.docker.com/engine/swarm/swarm-mode/).
Google Drive volumes need an access token which can be setup via web
browser and will be periodically renewed by rclone. The managed
plugin cannot run a browser so we will use a technique similar to the
[rclone setup on a headless box](/remote_setup/).
Run [rclone config](/commands/rclone_config_create/)
on _another_ machine equipped with _web browser_ and graphical user interface.
Create the [Google Drive remote](/drive/#standard-options).
When done, transfer the resulting `rclone.conf` to the Swarm cluster
and save as `/var/lib/docker-plugins/rclone/config/rclone.conf`
on _every_ node. By default this location is accessible only to the
root user so you will need appropriate privileges. The resulting config
will look like this:
```
[gdrive]
type = drive
scope = drive
drive_id = 1234567...
root_folder_id = 0Abcd...
token = {"access_token":...}
```
Now create the file named `example.yml` with a swarm stack description
like this:
```
version: '3'
services:
heimdall:
image: linuxserver/heimdall:latest
ports: [8080:80]
volumes: [configdata:/config]
volumes:
configdata:
driver: rclone
driver_opts:
remote: 'gdrive:heimdall'
allow_other: 'true'
vfs_cache_mode: full
poll_interval: 0
```
and run the stack:
```
docker stack deploy example -c ./example.yml
```
After a few seconds docker will spread the parsed stack description
over cluster, create the `example_heimdall` service on port _8080_,
run service containers on one or more cluster nodes and request
the `example_configdata` volume from rclone plugins on the node hosts.
You can use the following commands to confirm results:
```
docker service ls
docker service ps example_heimdall
docker volume ls
```
Point your browser to `http://cluster.host.address:8080` and play with
the service. Stop it with `docker stack remove example` when you are done.
Note that the `example_configdata` volume(s) created on demand at the
cluster nodes will not be automatically removed together with the stack
but stay for future reuse. You can remove them manually by invoking
the `docker volume remove example_configdata` command on every node.
## Creating Volumes via CLI
Volumes can be created with [docker volume create](https://docs.docker.com/engine/reference/commandline/volume_create/).
Here are a few examples:
```
docker volume create vol1 -d rclone -o remote=storj: -o vfs-cache-mode=full
docker volume create vol2 -d rclone -o remote=:tardigrade,access_grant=xxx:heimdall
docker volume create vol3 -d rclone -o type=tardigrade -o path=heimdall -o tardigrade-access-grant=xxx -o poll-interval=0
```
Note the `-d rclone` flag that tells docker to request volume from the
rclone driver. This works even if you installed managed driver by its full
name `rclone/docker-volume-rclone` because you provided the `--alias rclone`
option.
Volumes can be inspected as follows:
```
docker volume list
docker volume inspect vol1
```
## Volume Configuration
Rclone flags and volume options are set via the `-o` flag to the
`docker volume create` command. They include backend-specific parameters
as well as mount and _VFS_ options. Also there are a few
special `-o` options:
`remote`, `fs`, `type`, `path`, `mount-type` and `persist`.
`remote` determines an existing remote name from the config file, with
trailing colon and optionally with a remote path. See the full syntax in
the [rclone documentation](/docs/#syntax-of-remote-paths).
This option can be aliased as `fs` to prevent confusion with the
_remote_ parameter of such backends as _crypt_ or _alias_.
The `remote=:backend:dir/subdir` syntax can be used to create
[on-the-fly (config-less) remotes](/docs/#backend-path-to-dir),
while the `type` and `path` options provide a simpler alternative for this.
Using two split options
```
-o type=backend -o path=dir/subdir
```
is equivalent to the combined syntax
```
-o remote=:backend:dir/subdir
```
but is arguably easier to parameterize in scripts.
The `path` part is optional.
[Mount and VFS options](/commands/rclone_serve_docker/#options)
as well as [backend parameters](/flags/#backend-flags) are named
like their twin command-line flags without the `--` CLI prefix.
Optionally you can use underscores instead of dashes in option names.
For example, `--vfs-cache-mode full` becomes
`-o vfs-cache-mode=full` or `-o vfs_cache_mode=full`.
Boolean CLI flags without value will gain the `true` value, e.g.
`--allow-other` becomes `-o allow-other=true` or `-o allow_other=true`.
Please note that you can provide parameters only for the backend immediately
referenced by the backend type of mounted `remote`.
If this is a wrapping backend like _alias, chunker or crypt_, you cannot
provide options for the referred to remote or backend. This limitation is
imposed by the rclone connection string parser. The only workaround is to
feed plugin with `rclone.conf` or configure plugin arguments (see below).
## Special Volume Options
`mount-type` determines the mount method and in general can be one of:
`mount`, `cmount`, or `mount2`. This can be aliased as `mount_type`.
It should be noted that the managed rclone docker plugin currently does
not support the `cmount` method and `mount2` is rarely needed.
This option defaults to the first found method, which is usually `mount`
so you generally won't need it.
`persist` is a reserved boolean (true/false) option.
In future it will allow to persist on-the-fly remotes in the plugin
`rclone.conf` file.
## Connection Strings
The `remote` value can be extended
with [connection strings](/docs/#connection-strings)
as an alternative way to supply backend parameters. This is equivalent
to the `-o` backend options with one _syntactic difference_.
Inside connection string the backend prefix must be dropped from parameter
names but in the `-o param=value` array it must be present.
For instance, compare the following option array
```
-o remote=:sftp:/home -o sftp-host=localhost
```
with equivalent connection string:
```
-o remote=:sftp,host=localhost:/home
```
This difference exists because flag options `-o key=val` include not only
backend parameters but also mount/VFS flags and possibly other settings.
Also it allows to discriminate the `remote` option from the `crypt-remote`
(or similarly named backend parameters) and arguably simplifies scripting
due to clearer value substitution.
## Using with Swarm or Compose
Both _Docker Swarm_ and _Docker Compose_ use
[YAML](http://yaml.org/spec/1.2/spec.html)-formatted text files to describe
groups (stacks) of containers, their properties, networks and volumes.
_Compose_ uses the [compose v2](https://docs.docker.com/compose/compose-file/compose-file-v2/#volume-configuration-reference) format,
_Swarm_ uses the [compose v3](https://docs.docker.com/compose/compose-file/compose-file-v3/#volume-configuration-reference) format.
They are mostly similar, differences are explained in the
[docker documentation](https://docs.docker.com/compose/compose-file/compose-versioning/#upgrading).
Volumes are described by the children of the top-level `volumes:` node.
Each of them should be named after its volume and have at least two
elements, the self-explanatory `driver: rclone` value and the
`driver_opts:` structure playing the same role as `-o key=val` CLI flags:
```
volumes:
volume_name_1:
driver: rclone
driver_opts:
remote: 'gdrive:'
allow_other: 'true'
vfs_cache_mode: full
token: '{"type": "borrower", "expires": "2021-12-31"}'
poll_interval: 0
```
Notice a few important details:
- YAML prefers `_` in option names instead of `-`.
- YAML treats single and double quotes interchangeably.
Simple strings and integers can be left unquoted.
- Boolean values must be quoted like `'true'` or `"false"` because
these two words are reserved by YAML.
- The filesystem string is keyed with `remote` (or with `fs`).
Normally you can omit quotes here, but if the string ends with colon,
you **must** quote it like `remote: "storage_box:"`.
- YAML is picky about surrounding braces in values as this is in fact
another [syntax for key/value mappings](http://yaml.org/spec/1.2/spec.html#id2790832).
For example, JSON access tokens usually contain double quotes and
surrounding braces, so you must put them in single quotes.
## Installing as Managed Plugin
Docker daemon can install plugins from an image registry and run them managed.
We maintain the
[docker-volume-rclone](https://hub.docker.com/p/rclone/docker-volume-rclone/)
plugin image on [Docker Hub](https://hub.docker.com).
Rclone volume plugin requires **Docker Engine >= 19.03.15**
The plugin requires presence of two directories on the host before it can
be installed. Note that plugin will **not** create them automatically.
By default they must exist on host at the following locations
(though you can tweak the paths):
- `/var/lib/docker-plugins/rclone/config`
is reserved for the `rclone.conf` config file and **must** exist
even if it's empty and the config file is not present.
- `/var/lib/docker-plugins/rclone/cache`
holds the plugin state file as well as optional VFS caches.
You can [install managed plugin](https://docs.docker.com/engine/reference/commandline/plugin_install/)
with default settings as follows:
```
docker plugin install rclone/docker-volume-rclone:latest --grant-all-permissions --alias rclone
```
Managed plugin is in fact a special container running in a namespace separate
from normal docker containers. Inside it runs the `rclone serve docker`
command. The config and cache directories are bind-mounted into the
container at start. The docker daemon connects to a unix socket created
by the command inside the container. The command creates on-demand remote
mounts right inside, then docker machinery propagates them through kernel
mount namespaces and bind-mounts into requesting user containers.
You can tweak a few plugin settings after installation when it's disabled
(not in use), for instance:
```
docker plugin disable rclone
docker plugin set rclone RCLONE_VERBOSE=2 config=/etc/rclone args="--vfs-cache-mode=writes --allow-other"
docker plugin enable rclone
docker plugin inspect rclone
```
Note that if docker refuses to disable the plugin, you should find and
remove all active volumes connected with it as well as containers and
swarm services that use them. This is rather tedious so please carefully
plan in advance.
You can tweak the following settings:
`args`, `config`, `cache`, and `RCLONE_VERBOSE`.
It's _your_ task to keep plugin settings in sync across swarm cluster nodes.
`args` sets command-line arguments for the `rclone serve docker` command
(_none_ by default). Arguments should be separated by space so you will
normally want to put them in quotes on the
[docker plugin set](https://docs.docker.com/engine/reference/commandline/plugin_set/)
command line. Both [serve docker flags](/commands/rclone_serve_docker/#options)
and [generic rclone flags](/flags/) are supported, including backend
parameters that will be used as defaults for volume creation.
Note that plugin will fail (due to [this docker bug](https://github.com/moby/moby/blob/v20.10.7/plugin/v2/plugin.go#L195))
if the `args` value is empty. Use e.g. `args="-v"` as a workaround.
`config=/host/dir` sets alternative host location for the config directory.
Plugin will look for `rclone.conf` here. It's not an error if the config
file is not present but the directory must exist. Please note that plugin
can periodically rewrite the config file, for example when it renews
storage access tokens. Keep this in mind and try to avoid races between
the plugin and other instances of rclone on the host that might try to
change the config simultaneously resulting in corrupted `rclone.conf`.
You can also put stuff like private key files for SFTP remotes in this
directory. Just note that it's bind-mounted inside the plugin container
at the predefined path `/data/config`. For example, if your key file is
named `sftp-box1.key` on the host, the corresponding volume config option
should read `-o sftp-key-file=/data/config/sftp-box1.key`.
`cache=/host/dir` sets alternative host location for the _cache_ directory.
The plugin will keep VFS caches here. Also it will create and maintain
the `docker-plugin.state` file in this directory. When the plugin is
restarted or reinstalled, it will look in this file to recreate any volumes
that existed previously. However, they will not be re-mounted into
consuming containers after restart. Usually this is not a problem as
the docker daemon normally will restart affected user containers after
failures, daemon restarts or host reboots.
`RCLONE_VERBOSE` sets plugin verbosity from `0` (errors only, by default)
to `2` (debugging). Verbosity can be also tweaked via `args="-v [-v] ..."`.
Since arguments are more generic, you will rarely need this setting.
The plugin output by default feeds the docker daemon log on local host.
Log entries are reflected as _errors_ in the docker log but retain their
actual level assigned by rclone in the encapsulated message string.
You can set custom plugin options right when you install it, _in one go_:
```
docker plugin remove rclone
docker plugin install rclone/docker-volume-rclone:latest \
--alias rclone --grant-all-permissions \
args="-v --allow-other" config=/etc/rclone
docker plugin inspect rclone
```
## Healthchecks
The docker plugin volume protocol doesn't provide a way for plugins
to inform the docker daemon that a volume is (un-)available.
As a workaround you can setup a healthcheck to verify that the mount
is responding, for example:
```
services:
my_service:
image: my_image
healthcheck:
test: ls /path/to/rclone/mount || exit 1
interval: 1m
timeout: 15s
retries: 3
start_period: 15s
```
## Running Plugin under Systemd
In most cases you should prefer managed mode. Moreover, MacOS and Windows
do not support native Docker plugins. Please use managed mode on these
systems. Proceed further only if you are on Linux.
First, [install rclone](/install/).
You can just run it (type `rclone serve docker` and hit enter) for the test.
Install _FUSE_:
```
sudo apt-get -y install fuse
```
Download two systemd configuration files:
[docker-volume-rclone.service](https://raw.githubusercontent.com/rclone/rclone/master/cmd/serve/docker/contrib/systemd/docker-volume-rclone.service)
and [docker-volume-rclone.socket](https://raw.githubusercontent.com/rclone/rclone/master/cmd/serve/docker/contrib/systemd/docker-volume-rclone.socket).
Put them to the `/etc/systemd/system/` directory:
```
cp docker-volume-plugin.service /etc/systemd/system/
cp docker-volume-plugin.socket /etc/systemd/system/
```
Please note that all commands in this section must be run as _root_ but
we omit `sudo` prefix for brevity.
Now create directories required by the service:
```
mkdir -p /var/lib/docker-volumes/rclone
mkdir -p /var/lib/docker-plugins/rclone/config
mkdir -p /var/lib/docker-plugins/rclone/cache
```
Run the docker plugin service in the socket activated mode:
```
systemctl daemon-reload
systemctl start docker-volume-rclone.service
systemctl enable docker-volume-rclone.socket
systemctl start docker-volume-rclone.socket
systemctl restart docker
```
Or run the service directly:
- run `systemctl daemon-reload` to let systemd pick up new config
- run `systemctl enable docker-volume-rclone.service` to make the new
service start automatically when you power on your machine.
- run `systemctl start docker-volume-rclone.service`
to start the service now.
- run `systemctl restart docker` to restart docker daemon and let it
detect the new plugin socket. Note that this step is not needed in
managed mode where docker knows about plugin state changes.
The two methods are equivalent from the user perspective, but I personally
prefer socket activation.
## Troubleshooting
You can [see managed plugin settings](https://docs.docker.com/engine/extend/#debugging-plugins)
with
```
docker plugin list
docker plugin inspect rclone
```
Note that docker (including latest 20.10.7) will not show actual values
of `args`, just the defaults.
Use `journalctl --unit docker` to see managed plugin output as part of
the docker daemon log. Note that docker reflects plugin lines as _errors_
but their actual level can be seen from encapsulated message string.
You will usually install the latest version of managed plugin.
Use the following commands to print the actual installed version:
```
PLUGID=$(docker plugin list --no-trunc | awk '/rclone/{print$1}')
sudo runc --root /run/docker/runtime-runc/plugins.moby exec $PLUGID rclone version
```
You can even use `runc` to run shell inside the plugin container:
```
sudo runc --root /run/docker/runtime-runc/plugins.moby exec --tty $PLUGID bash
```
Also you can use curl to check the plugin socket connectivity:
```
docker plugin list --no-trunc
PLUGID=123abc...
sudo curl -H Content-Type:application/json -XPOST -d {} --unix-socket /run/docker/plugins/$PLUGID/rclone.sock http://localhost/Plugin.Activate
```
though this is rarely needed.
Finally I'd like to mention a _caveat with updating volume settings_.
Docker CLI does not have a dedicated command like `docker volume update`.
It may be tempting to invoke `docker volume create` with updated options
on existing volume, but there is a gotcha. The command will do nothing,
it won't even return an error. I hope that docker maintainers will fix
this some day. In the meantime be aware that you must remove your volume
before recreating it with new settings:
```
docker volume remove my_vol
docker volume create my_vol -d rclone -o opt1=new_val1 ...
```
and verify that settings did update:
```
docker volume list
docker volume inspect my_vol
```
If docker refuses to remove the volume, you should find containers
or swarm services that use it and stop them first.
| docs/content/docker.md | 1 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.9936227202415466,
0.0530938059091568,
0.00016579213843215257,
0.0040908996015787125,
0.1718917340040207
] |
{
"id": 3,
"code_window": [
"Log entries are reflected as _errors_ in the docker log but retain their\n",
"actual level assigned by rclone in the encapsulated message string.\n",
"\n",
"You can set custom plugin options right when you install it, _in one go_:\n",
"```\n",
"docker plugin remove rclone\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"`HTTP_PROXY`, `HTTPS_PROXY`, `NO_PROXY` customize the plugin proxy settings.\n",
"\n"
],
"file_path": "docs/content/docker.md",
"type": "add",
"edit_start_line_idx": 391
} | package policy
import (
"context"
"math/rand"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("rand", &Rand{})
}
// Rand stands for random
// Calls all and then randomizes. Returns one candidate.
type Rand struct {
All
}
func (p *Rand) rand(upstreams []*upstream.Fs) *upstream.Fs {
return upstreams[rand.Intn(len(upstreams))]
}
func (p *Rand) randEntries(entries []upstream.Entry) upstream.Entry {
return entries[rand.Intn(len(entries))]
}
// Action category policy, governing the modification of files and directories
func (p *Rand) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.All.Action(ctx, upstreams, path)
if err != nil {
return nil, err
}
return []*upstream.Fs{p.rand(upstreams)}, nil
}
// ActionEntries is ACTION category policy but receiving a set of candidate entries
func (p *Rand) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.All.ActionEntries(entries...)
if err != nil {
return nil, err
}
return []upstream.Entry{p.randEntries(entries)}, nil
}
// Create category policy, governing the creation of files and directories
func (p *Rand) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
upstreams, err := p.All.Create(ctx, upstreams, path)
if err != nil {
return nil, err
}
return []*upstream.Fs{p.rand(upstreams)}, nil
}
// CreateEntries is CREATE category policy but receiving a set of candidate entries
func (p *Rand) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
entries, err := p.All.CreateEntries(entries...)
if err != nil {
return nil, err
}
return []upstream.Entry{p.randEntries(entries)}, nil
}
// Search category policy, governing the access to files and directories
func (p *Rand) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams, err := p.epall(ctx, upstreams, path)
if err != nil {
return nil, err
}
return p.rand(upstreams), nil
}
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
func (p *Rand) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound
}
return p.randEntries(entries), nil
}
| backend/union/policy/rand.go | 0 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.0004984736442565918,
0.000223719616769813,
0.000164501674589701,
0.00016722151485737413,
0.00010697820107452571
] |
{
"id": 3,
"code_window": [
"Log entries are reflected as _errors_ in the docker log but retain their\n",
"actual level assigned by rclone in the encapsulated message string.\n",
"\n",
"You can set custom plugin options right when you install it, _in one go_:\n",
"```\n",
"docker plugin remove rclone\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"`HTTP_PROXY`, `HTTPS_PROXY`, `NO_PROXY` customize the plugin proxy settings.\n",
"\n"
],
"file_path": "docs/content/docker.md",
"type": "add",
"edit_start_line_idx": 391
} | package mountlib
import (
"context"
"log"
"sort"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
)
var (
// mutex to protect all the variables in this block
mountMu sync.Mutex
// Mount functions available
mountFns = map[string]MountFn{}
// Map of mounted path => MountInfo
liveMounts = map[string]*MountPoint{}
// Supported mount types
supportedMountTypes = []string{"mount", "cmount", "mount2"}
)
// ResolveMountMethod returns mount function by name
func ResolveMountMethod(mountType string) (string, MountFn) {
if mountType != "" {
return mountType, mountFns[mountType]
}
for _, mountType := range supportedMountTypes {
if mountFns[mountType] != nil {
return mountType, mountFns[mountType]
}
}
return "", nil
}
// AddRc adds mount and unmount functionality to rc
func AddRc(mountUtilName string, mountFunction MountFn) {
mountMu.Lock()
defer mountMu.Unlock()
// rcMount allows the mount command to be run from rc
mountFns[mountUtilName] = mountFunction
}
func init() {
rc.Add(rc.Call{
Path: "mount/mount",
AuthRequired: true,
Fn: mountRc,
Title: "Create a new mount point",
Help: `rclone allows Linux, FreeBSD, macOS and Windows to mount any of
Rclone's cloud storage systems as a file system with FUSE.
If no mountType is provided, the priority is given as follows: 1. mount 2.cmount 3.mount2
This takes the following parameters
- fs - a remote path to be mounted (required)
- mountPoint: valid path on the local machine (required)
- mountType: One of the values (mount, cmount, mount2) specifies the mount implementation to use
- mountOpt: a JSON object with Mount options in.
- vfsOpt: a JSON object with VFS options in.
Eg
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=mount
rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
The vfsOpt are as described in options/get and can be seen in the the
"vfs" section when running and the mountOpt can be seen in the "mount" section.
rclone rc options/get
`,
})
}
// mountRc allows the mount command to be run from rc
func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
mountPoint, err := in.GetString("mountPoint")
if err != nil {
return nil, err
}
vfsOpt := vfsflags.Opt
err = in.GetStructMissingOK("vfsOpt", &vfsOpt)
if err != nil {
return nil, err
}
mountOpt := Opt
err = in.GetStructMissingOK("mountOpt", &mountOpt)
if err != nil {
return nil, err
}
mountType, err := in.GetString("mountType")
mountMu.Lock()
defer mountMu.Unlock()
if err != nil {
mountType = ""
}
mountType, mountFn := ResolveMountMethod(mountType)
if mountFn == nil {
return nil, errors.New("Mount Option specified is not registered, or is invalid")
}
// Get Fs.fs to be mounted from fs parameter in the params
fdst, err := rc.GetFs(ctx, in)
if err != nil {
return nil, err
}
VFS := vfs.New(fdst, &vfsOpt)
_, unmountFn, err := mountFn(VFS, mountPoint, &mountOpt)
if err != nil {
log.Printf("mount FAILED: %v", err)
return nil, err
}
// Add mount to list if mount point was successfully created
liveMounts[mountPoint] = &MountPoint{
MountPoint: mountPoint,
MountedOn: time.Now(),
MountFn: mountFn,
UnmountFn: unmountFn,
MountOpt: mountOpt,
VFSOpt: vfsOpt,
Fs: fdst,
}
fs.Debugf(nil, "Mount for %s created at %s using %s", fdst.String(), mountPoint, mountType)
return nil, nil
}
func init() {
rc.Add(rc.Call{
Path: "mount/unmount",
AuthRequired: true,
Fn: unMountRc,
Title: "Unmount selected active mount",
Help: `
rclone allows Linux, FreeBSD, macOS and Windows to
mount any of Rclone's cloud storage systems as a file system with
FUSE.
This takes the following parameters
- mountPoint: valid path on the local machine where the mount was created (required)
Eg
rclone rc mount/unmount mountPoint=/home/<user>/mountPoint
`,
})
}
// unMountRc allows the umount command to be run from rc
func unMountRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
mountPoint, err := in.GetString("mountPoint")
if err != nil {
return nil, err
}
mountMu.Lock()
defer mountMu.Unlock()
mountInfo, found := liveMounts[mountPoint]
if !found {
return nil, errors.New("mount not found")
}
if err = mountInfo.Unmount(); err != nil {
return nil, err
}
delete(liveMounts, mountPoint)
return nil, nil
}
func init() {
rc.Add(rc.Call{
Path: "mount/types",
AuthRequired: true,
Fn: mountTypesRc,
Title: "Show all possible mount types",
Help: `This shows all possible mount types and returns them as a list.
This takes no parameters and returns
- mountTypes: list of mount types
The mount types are strings like "mount", "mount2", "cmount" and can
be passed to mount/mount as the mountType parameter.
Eg
rclone rc mount/types
`,
})
}
// mountTypesRc returns a list of available mount types.
func mountTypesRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
var mountTypes = []string{}
mountMu.Lock()
defer mountMu.Unlock()
for mountType := range mountFns {
mountTypes = append(mountTypes, mountType)
}
sort.Strings(mountTypes)
return rc.Params{
"mountTypes": mountTypes,
}, nil
}
func init() {
rc.Add(rc.Call{
Path: "mount/listmounts",
AuthRequired: true,
Fn: listMountsRc,
Title: "Show current mount points",
Help: `This shows currently mounted points, which can be used for performing an unmount
This takes no parameters and returns
- mountPoints: list of current mount points
Eg
rclone rc mount/listmounts
`,
})
}
// MountInfo is a transitional structure for json marshaling
type MountInfo struct {
Fs string `json:"Fs"`
MountPoint string `json:"MountPoint"`
MountedOn time.Time `json:"MountedOn"`
}
// listMountsRc returns a list of current mounts sorted by mount path
func listMountsRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
mountMu.Lock()
defer mountMu.Unlock()
var keys []string
for key := range liveMounts {
keys = append(keys, key)
}
sort.Strings(keys)
mountPoints := []MountInfo{}
for _, k := range keys {
m := liveMounts[k]
info := MountInfo{
Fs: m.Fs.Name(),
MountPoint: m.MountPoint,
MountedOn: m.MountedOn,
}
mountPoints = append(mountPoints, info)
}
return rc.Params{
"mountPoints": mountPoints,
}, nil
}
func init() {
rc.Add(rc.Call{
Path: "mount/unmountall",
AuthRequired: true,
Fn: unmountAll,
Title: "Show current mount points",
Help: `This shows currently mounted points, which can be used for performing an unmount
This takes no parameters and returns error if unmount does not succeed.
Eg
rclone rc mount/unmountall
`,
})
}
// unmountAll unmounts all the created mounts
func unmountAll(_ context.Context, in rc.Params) (out rc.Params, err error) {
mountMu.Lock()
defer mountMu.Unlock()
for mountPoint, mountInfo := range liveMounts {
if err = mountInfo.Unmount(); err != nil {
fs.Debugf(nil, "Couldn't unmount : %s", mountPoint)
return nil, err
}
delete(liveMounts, mountPoint)
}
return nil, nil
}
| cmd/mountlib/rc.go | 0 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.003571209730580449,
0.0005944884032942355,
0.00016702980792615563,
0.0001747620990499854,
0.0009159393375739455
] |
{
"id": 3,
"code_window": [
"Log entries are reflected as _errors_ in the docker log but retain their\n",
"actual level assigned by rclone in the encapsulated message string.\n",
"\n",
"You can set custom plugin options right when you install it, _in one go_:\n",
"```\n",
"docker plugin remove rclone\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"`HTTP_PROXY`, `HTTPS_PROXY`, `NO_PROXY` customize the plugin proxy settings.\n",
"\n"
],
"file_path": "docs/content/docker.md",
"type": "add",
"edit_start_line_idx": 391
} | package readers
import "io"
// noClose is used to wrap an io.Reader to stop it being upgraded
type noClose struct {
in io.Reader
}
// Read implements io.Closer by passing it straight on
func (nc noClose) Read(p []byte) (n int, err error) {
return nc.in.Read(p)
}
// NoCloser makes sure that the io.Reader passed in can't upgraded to
// an io.Closer.
//
// This is for use with http.NewRequest to make sure the body doesn't
// get upgraded to an io.Closer and the body closed unexpectedly.
func NoCloser(in io.Reader) io.Reader {
if in == nil {
return in
}
// if in doesn't implement io.Closer, just return it
if _, canClose := in.(io.Closer); !canClose {
return in
}
return noClose{in: in}
}
| lib/readers/noclose.go | 0 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.0001692539663054049,
0.00016743796004448086,
0.0001663866569288075,
0.00016667328600306064,
0.0000012894240626337705
] |
{
"id": 4,
"code_window": [
"PLUGID=123abc...\n",
"sudo curl -H Content-Type:application/json -XPOST -d {} --unix-socket /run/docker/plugins/$PLUGID/rclone.sock http://localhost/Plugin.Activate\n",
"```\n",
"though this is rarely needed.\n",
"\n",
"Finally I'd like to mention a _caveat with updating volume settings_.\n",
"Docker CLI does not have a dedicated command like `docker volume update`.\n",
"It may be tempting to invoke `docker volume create` with updated options\n",
"on existing volume, but there is a gotcha. The command will do nothing,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"## Caveats\n",
"\n"
],
"file_path": "docs/content/docker.md",
"type": "add",
"edit_start_line_idx": 508
} | {
"description": "Rclone volume plugin for Docker",
"documentation": "https://rclone.org/",
"interface": {
"socket": "rclone.sock",
"types": ["docker.volumedriver/1.0"]
},
"linux": {
"capabilities": [
"CAP_SYS_ADMIN"
],
"devices": [
{
"path": "/dev/fuse"
}
]
},
"network": {
"type": "host"
},
"entrypoint": ["/usr/local/bin/rclone", "serve", "docker"],
"workdir": "/data",
"args": {
"name": "args",
"value": [],
"settable": ["value"]
},
"env": [
{
"name": "RCLONE_VERBOSE",
"value": "0",
"settable": ["value"]
},
{
"name": "RCLONE_CONFIG",
"value": "/data/config/rclone.conf"
},
{
"name": "RCLONE_CACHE_DIR",
"value": "/data/cache"
},
{
"name": "RCLONE_BASE_DIR",
"value": "/mnt"
}
],
"mounts": [
{
"name": "config",
"source": "/var/lib/docker-plugins/rclone/config",
"destination": "/data/config",
"type": "bind",
"options": ["rbind"],
"settable": ["source"]
},
{
"name": "cache",
"source": "/var/lib/docker-plugins/rclone/cache",
"destination": "/data/cache",
"type": "bind",
"options": ["rbind"],
"settable": ["source"]
}
],
"propagatedMount": "/mnt"
}
| cmd/serve/docker/contrib/plugin/config.json | 1 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.00023721605248283595,
0.0001777385623427108,
0.00016592323663644493,
0.00016811778186820447,
0.00002430539097986184
] |
{
"id": 4,
"code_window": [
"PLUGID=123abc...\n",
"sudo curl -H Content-Type:application/json -XPOST -d {} --unix-socket /run/docker/plugins/$PLUGID/rclone.sock http://localhost/Plugin.Activate\n",
"```\n",
"though this is rarely needed.\n",
"\n",
"Finally I'd like to mention a _caveat with updating volume settings_.\n",
"Docker CLI does not have a dedicated command like `docker volume update`.\n",
"It may be tempting to invoke `docker volume create` with updated options\n",
"on existing volume, but there is a gotcha. The command will do nothing,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"## Caveats\n",
"\n"
],
"file_path": "docs/content/docker.md",
"type": "add",
"edit_start_line_idx": 508
} | package pool
import (
"errors"
"fmt"
"math/rand"
"testing"
"time"
"github.com/rclone/rclone/fstest/testy"
"github.com/stretchr/testify/assert"
)
// makes the allocations be unreliable
func makeUnreliable(bp *Pool) {
bp.alloc = func(size int) ([]byte, error) {
if rand.Intn(3) != 0 {
return nil, errors.New("failed to allocate memory")
}
return make([]byte, size), nil
}
bp.free = func(b []byte) error {
if rand.Intn(3) != 0 {
return errors.New("failed to free memory")
}
return nil
}
}
func testGetPut(t *testing.T, useMmap bool, unreliable bool) {
bp := New(60*time.Second, 4096, 2, useMmap)
if unreliable {
makeUnreliable(bp)
}
assert.Equal(t, 0, bp.InUse())
b1 := bp.Get()
assert.Equal(t, 1, bp.InUse())
assert.Equal(t, 0, bp.InPool())
assert.Equal(t, 1, bp.Alloced())
b2 := bp.Get()
assert.Equal(t, 2, bp.InUse())
assert.Equal(t, 0, bp.InPool())
assert.Equal(t, 2, bp.Alloced())
b3 := bp.Get()
assert.Equal(t, 3, bp.InUse())
assert.Equal(t, 0, bp.InPool())
assert.Equal(t, 3, bp.Alloced())
bp.Put(b1)
assert.Equal(t, 2, bp.InUse())
assert.Equal(t, 1, bp.InPool())
assert.Equal(t, 3, bp.Alloced())
bp.Put(b2)
assert.Equal(t, 1, bp.InUse())
assert.Equal(t, 2, bp.InPool())
assert.Equal(t, 3, bp.Alloced())
bp.Put(b3)
assert.Equal(t, 0, bp.InUse())
assert.Equal(t, 2, bp.InPool())
assert.Equal(t, 2, bp.Alloced())
addr := func(b []byte) string {
return fmt.Sprintf("%p", &b[0])
}
b1a := bp.Get()
assert.Equal(t, addr(b2), addr(b1a))
assert.Equal(t, 1, bp.InUse())
assert.Equal(t, 1, bp.InPool())
assert.Equal(t, 2, bp.Alloced())
b2a := bp.Get()
assert.Equal(t, addr(b1), addr(b2a))
assert.Equal(t, 2, bp.InUse())
assert.Equal(t, 0, bp.InPool())
assert.Equal(t, 2, bp.Alloced())
bp.Put(b1a)
bp.Put(b2a)
assert.Equal(t, 0, bp.InUse())
assert.Equal(t, 2, bp.InPool())
assert.Equal(t, 2, bp.Alloced())
assert.Panics(t, func() {
bp.Put(make([]byte, 1))
})
bp.Flush()
assert.Equal(t, 0, bp.InUse())
assert.Equal(t, 0, bp.InPool())
assert.Equal(t, 0, bp.Alloced())
}
func testFlusher(t *testing.T, useMmap bool, unreliable bool) {
bp := New(50*time.Millisecond, 4096, 2, useMmap)
if unreliable {
makeUnreliable(bp)
}
b1 := bp.Get()
b2 := bp.Get()
b3 := bp.Get()
bp.Put(b1)
bp.Put(b2)
bp.Put(b3)
assert.Equal(t, 0, bp.InUse())
assert.Equal(t, 2, bp.InPool())
assert.Equal(t, 2, bp.Alloced())
bp.mu.Lock()
assert.Equal(t, 0, bp.minFill)
assert.Equal(t, true, bp.flushPending)
bp.mu.Unlock()
checkFlushHasHappened := func(desired int) {
var n int
for i := 0; i < 10; i++ {
time.Sleep(100 * time.Millisecond)
n = bp.InPool()
if n <= desired {
break
}
}
assert.Equal(t, desired, n)
}
checkFlushHasHappened(0)
assert.Equal(t, 0, bp.InUse())
assert.Equal(t, 0, bp.InPool())
assert.Equal(t, 0, bp.Alloced())
bp.mu.Lock()
assert.Equal(t, 0, bp.minFill)
assert.Equal(t, false, bp.flushPending)
bp.mu.Unlock()
// Now do manual aging to check it is working properly
bp = New(100*time.Second, 4096, 2, useMmap)
// Check the new one doesn't get flushed
b1 = bp.Get()
b2 = bp.Get()
bp.Put(b1)
bp.Put(b2)
bp.mu.Lock()
assert.Equal(t, 0, bp.minFill)
assert.Equal(t, true, bp.flushPending)
bp.mu.Unlock()
bp.flushAged()
assert.Equal(t, 0, bp.InUse())
assert.Equal(t, 2, bp.InPool())
assert.Equal(t, 2, bp.Alloced())
bp.mu.Lock()
assert.Equal(t, 2, bp.minFill)
assert.Equal(t, true, bp.flushPending)
bp.mu.Unlock()
bp.Put(bp.Get())
assert.Equal(t, 0, bp.InUse())
assert.Equal(t, 2, bp.InPool())
assert.Equal(t, 2, bp.Alloced())
bp.mu.Lock()
assert.Equal(t, 1, bp.minFill)
assert.Equal(t, true, bp.flushPending)
bp.mu.Unlock()
bp.flushAged()
assert.Equal(t, 0, bp.InUse())
assert.Equal(t, 1, bp.InPool())
assert.Equal(t, 1, bp.Alloced())
bp.mu.Lock()
assert.Equal(t, 1, bp.minFill)
assert.Equal(t, true, bp.flushPending)
bp.mu.Unlock()
bp.flushAged()
assert.Equal(t, 0, bp.InUse())
assert.Equal(t, 0, bp.InPool())
assert.Equal(t, 0, bp.Alloced())
bp.mu.Lock()
assert.Equal(t, 0, bp.minFill)
assert.Equal(t, false, bp.flushPending)
bp.mu.Unlock()
}
func TestPool(t *testing.T) {
for _, test := range []struct {
name string
useMmap bool
unreliable bool
}{
{
name: "make",
useMmap: false,
unreliable: false,
},
{
name: "mmap",
useMmap: true,
unreliable: false,
},
{
name: "canFail",
useMmap: false,
unreliable: true,
},
} {
t.Run(test.name, func(t *testing.T) {
t.Run("GetPut", func(t *testing.T) { testGetPut(t, test.useMmap, test.unreliable) })
t.Run("Flusher", func(t *testing.T) {
if test.name == "canFail" {
testy.SkipUnreliable(t) // fails regularly on macOS
}
testFlusher(t, test.useMmap, test.unreliable)
})
})
}
}
| lib/pool/pool_test.go | 0 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.0001734553079586476,
0.00017107785970438272,
0.00016724510351195931,
0.0001711901422822848,
0.0000017437164387956727
] |
{
"id": 4,
"code_window": [
"PLUGID=123abc...\n",
"sudo curl -H Content-Type:application/json -XPOST -d {} --unix-socket /run/docker/plugins/$PLUGID/rclone.sock http://localhost/Plugin.Activate\n",
"```\n",
"though this is rarely needed.\n",
"\n",
"Finally I'd like to mention a _caveat with updating volume settings_.\n",
"Docker CLI does not have a dedicated command like `docker volume update`.\n",
"It may be tempting to invoke `docker volume create` with updated options\n",
"on existing volume, but there is a gotcha. The command will do nothing,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"## Caveats\n",
"\n"
],
"file_path": "docs/content/docker.md",
"type": "add",
"edit_start_line_idx": 508
} | <?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg width="100%" height="100%" viewBox="0 0 226 52" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" xmlns:serif="http://www.serif.com/" style="fill-rule:evenodd;clip-rule:evenodd;stroke-linejoin:round;stroke-miterlimit:1.41421;">
<g transform="matrix(1,0,0,1,-556,-579)">
<g id="logo_on_light__horizontal_mono" transform="matrix(0.879377,0,0,0.619048,543.689,220.571)">
<rect x="14" y="579" width="257" height="84" style="fill:none;"/>
<clipPath id="_clip1">
<rect x="14" y="579" width="257" height="84"/>
</clipPath>
<g clip-path="url(#_clip1)">
<g id="Layer-1" serif:id="Layer 1" transform="matrix(2.87734,0,0,4.08735,1538.9,344.215)">
<g transform="matrix(1,0,0,1,-15.8992,-32.1872)">
<g transform="matrix(0.306497,0,0,0.306497,-493.562,108.224)">
<path d="M0,-26.524C-2.206,-30.345 -5.416,-33.225 -9.105,-35.023C-9.577,-32.503 -10.47,-30.019 -11.823,-27.675L-13.958,-23.97C-12.536,-23.18 -11.298,-22.017 -10.425,-20.505C-7.86,-16.063 -9.383,-10.381 -13.826,-7.816C-18.268,-5.251 -23.95,-6.773 -26.515,-11.216L-30.823,-18.666L-37.775,-18.666L-41.251,-12.646L-36.94,-5.197C-31.05,5.004 -18.007,8.499 -7.806,2.609C2.394,-3.28 5.889,-16.323 0,-26.524" style="fill-rule:nonzero;"/>
</g>
<g transform="matrix(0.306497,0,0,0.306497,-500.11,99.9163)">
<path d="M0,-30.703C-10.201,-36.592 -23.244,-33.097 -29.133,-22.897C-31.34,-19.076 -32.228,-14.856 -31.941,-10.762C-29.523,-11.613 -26.925,-12.082 -24.218,-12.082L-19.943,-12.086C-19.97,-13.712 -19.581,-15.366 -18.709,-16.877C-16.143,-21.32 -10.462,-22.843 -6.019,-20.277C-1.576,-17.712 -0.054,-12.031 -2.619,-7.588L-6.916,-0.132L-3.441,5.889L3.511,5.888L7.806,-1.57C13.696,-11.77 10.201,-24.814 0,-30.703" style="fill-rule:nonzero;"/>
</g>
<g transform="matrix(0.306497,0,0,0.306497,-505.415,99.7857)">
<path d="M0,23.335L-2.142,19.634C-3.537,20.471 -5.163,20.961 -6.908,20.961C-12.039,20.961 -16.198,16.802 -16.198,11.671C-16.198,6.541 -12.039,2.382 -6.908,2.382L1.697,2.376L5.174,-3.644L1.697,-9.664L-6.909,-9.656C-18.688,-9.656 -28.236,-0.107 -28.236,11.671C-28.236,23.45 -18.688,32.999 -6.909,32.999C-2.498,32.999 1.599,31.659 5,29.366C3.054,27.697 1.353,25.678 0,23.335" style="fill-rule:nonzero;"/>
</g>
<g transform="matrix(0.306497,0,0,0.306497,-482.708,99.4102)">
<path d="M0,1.359C6.116,1.359 10.664,-1.725 10.664,-7.005L10.664,-7.109C10.664,-12.023 6.795,-15.107 0.157,-15.107L-12.337,-15.107L-12.337,1.359L0,1.359ZM-15.055,-17.617L0.314,-17.617C4.809,-17.617 8.416,-16.205 10.612,-14.009C12.337,-12.284 13.382,-9.828 13.382,-7.214L13.382,-7.109C13.382,-0.993 8.835,2.509 2.666,3.346L14.741,18.976L11.291,18.976L-0.366,3.816L-0.471,3.816L-12.337,3.816L-12.337,18.976L-15.055,18.976L-15.055,-17.617Z" style="fill-rule:nonzero;"/>
</g>
<g transform="matrix(0.306497,0,0,0.306497,-477.518,99.5864)">
<path d="M0,0.209L0,0.105C0,-10.298 7.841,-18.819 19.08,-18.819C25.98,-18.819 30.11,-16.519 33.508,-13.173L28.385,-7.266C25.562,-9.828 22.687,-11.396 19.028,-11.396C12.859,-11.396 8.416,-6.273 8.416,0L8.416,0.105C8.416,6.378 12.755,11.605 19.028,11.605C23.21,11.605 25.771,9.932 28.646,7.318L33.77,12.493C30.005,16.519 25.823,19.028 18.766,19.028C7.998,19.028 0,10.716 0,0.209" style="fill-rule:nonzero;"/>
</g>
<g transform="matrix(0.306497,0,0,0.306497,-466.159,105.226)">
<path d="M0,-36.592L8.05,-36.592L8.05,-7.318L26.294,-7.318L26.294,0L0,0L0,-36.592Z" style="fill-rule:nonzero;"/>
</g>
<g transform="matrix(0.306497,0,0,0.306497,-449.001,99.5864)">
<path d="M0,0.209L0,0.105C0,-6.168 -4.599,-11.396 -11.082,-11.396C-17.563,-11.396 -22.06,-6.273 -22.06,0L-22.06,0.105C-22.06,6.378 -17.459,11.605 -10.977,11.605C-4.495,11.605 0,6.482 0,0.209M-30.475,0.209L-30.475,0.105C-30.475,-10.298 -22.268,-18.819 -10.977,-18.819C0.314,-18.819 8.416,-10.403 8.416,0L8.416,0.105C8.416,10.507 0.21,19.028 -11.082,19.028C-22.373,19.028 -30.475,10.612 -30.475,0.209" style="fill-rule:nonzero;"/>
</g>
<g transform="matrix(0.306497,0,0,0.306497,-445.189,105.226)">
<path d="M0,-36.592L7.424,-36.592L24.57,-14.062L24.57,-36.592L32.515,-36.592L32.515,0L25.667,0L7.947,-23.262L7.947,0L0,0L0,-36.592Z" style="fill-rule:nonzero;"/>
</g>
<g transform="matrix(0.306497,0,0,0.306497,-433.494,105.226)">
<path d="M0,-36.592L27.601,-36.592L27.601,-29.43L7.997,-29.43L7.997,-22.008L25.248,-22.008L25.248,-14.846L7.997,-14.846L7.997,-7.162L27.862,-7.162L27.862,0L0,0L0,-36.592Z" style="fill-rule:nonzero;"/>
</g>
</g>
</g>
</g>
</g>
</g>
</svg>
| graphics/logo/svg/logo_on_light__horizontal_mono.svg | 0 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.00017268778174184263,
0.00016769944340921938,
0.0001635061635170132,
0.00016642919217702,
0.000003591529093682766
] |
{
"id": 4,
"code_window": [
"PLUGID=123abc...\n",
"sudo curl -H Content-Type:application/json -XPOST -d {} --unix-socket /run/docker/plugins/$PLUGID/rclone.sock http://localhost/Plugin.Activate\n",
"```\n",
"though this is rarely needed.\n",
"\n",
"Finally I'd like to mention a _caveat with updating volume settings_.\n",
"Docker CLI does not have a dedicated command like `docker volume update`.\n",
"It may be tempting to invoke `docker volume create` with updated options\n",
"on existing volume, but there is a gotcha. The command will do nothing,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"## Caveats\n",
"\n"
],
"file_path": "docs/content/docker.md",
"type": "add",
"edit_start_line_idx": 508
} | package vfscache
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/ranges"
"github.com/rclone/rclone/vfs/vfscache/downloaders"
"github.com/rclone/rclone/vfs/vfscache/writeback"
)
// NB as Cache and Item are tightly linked it is necessary to have a
// total lock ordering between them. So Cache.mu must always be
// taken before Item.mu to avoid deadlocks.
//
// Cache may call into Item but care is needed if Item calls Cache
//
// A lot of the Cache methods do not require locking, these include
//
// - Cache.toOSPath
// - Cache.toOSPathMeta
// - Cache.createItemDir
// - Cache.objectFingerprint
// - Cache.AddVirtual
// NB Item and downloader are tightly linked so it is necessary to
// have a total lock ordering between them. downloader.mu must always
// be taken before Item.mu. downloader may call into Item but Item may
// **not** call downloader methods with Item.mu held
// NB Item and writeback are tightly linked so it is necessary to
// have a total lock ordering between them. writeback.mu must always
// be taken before Item.mu. writeback may call into Item but Item may
// **not** call writeback methods with Item.mu held
// LL Item reset is invoked by cache cleaner for synchronous recovery
// from ENOSPC errors. The reset operation removes the cache file and
// closes/reopens the downloaders. Although most parts of reset and
// other item operations are done with the item mutex held, the mutex
// is released during fd.WriteAt and downloaders calls. We use preAccess
// and postAccess calls to serialize reset and other item operations.
// Item is stored in the item map
//
// The Info field is written to the backing store to store status
type Item struct {
// read only
c *Cache // cache this is part of
mu sync.Mutex // protect the variables
cond *sync.Cond // synchronize with cache cleaner
name string // name in the VFS
opens int // number of times file is open
downloaders *downloaders.Downloaders // a record of the downloaders in action - may be nil
o fs.Object // object we are caching - may be nil
fd *os.File // handle we are using to read and write to the file
modified bool // set if the file has been modified since the last Open
info Info // info about the file to persist to backing store
writeBackID writeback.Handle // id of any writebacks in progress
pendingAccesses int // number of threads - cache reset not allowed if not zero
beingReset bool // cache cleaner is resetting the cache file, access not allowed
}
// Info is persisted to backing store
type Info struct {
ModTime time.Time // last time file was modified
ATime time.Time // last time file was accessed
Size int64 // size of the file
Rs ranges.Ranges // which parts of the file are present
Fingerprint string // fingerprint of remote object
Dirty bool // set if the backing file has been modified
}
// Items are a slice of *Item ordered by ATime
type Items []*Item
// ResetResult reports the actual action taken in the Reset function and reason
type ResetResult int
// Constants used to report actual action taken in the Reset function and reason
const (
SkippedDirty ResetResult = iota // Dirty item cannot be reset
SkippedPendingAccess // Reset pending access can lead to deadlock
SkippedEmpty // Reset empty item does not save space
RemovedNotInUse // Item not used. Remove instead of reset
ResetFailed // Reset failed with an error
ResetComplete // Reset completed successfully
)
func (rr ResetResult) String() string {
return [...]string{"Dirty item skipped", "In-access item skipped", "Empty item skipped",
"Not-in-use item removed", "Item reset failed", "Item reset completed"}[rr]
}
func (v Items) Len() int { return len(v) }
func (v Items) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
func (v Items) Less(i, j int) bool {
if i == j {
return false
}
iItem := v[i]
jItem := v[j]
iItem.mu.Lock()
defer iItem.mu.Unlock()
jItem.mu.Lock()
defer jItem.mu.Unlock()
return iItem.info.ATime.Before(jItem.info.ATime)
}
// clean the item after its cache file has been deleted
func (info *Info) clean() {
*info = Info{}
info.ModTime = time.Now()
info.ATime = info.ModTime
}
// StoreFn is called back with an object after it has been uploaded
type StoreFn func(fs.Object)
// newItem returns an item for the cache
func newItem(c *Cache, name string) (item *Item) {
now := time.Now()
item = &Item{
c: c,
name: name,
info: Info{
ModTime: now,
ATime: now,
},
}
item.cond = sync.NewCond(&item.mu)
// check the cache file exists
osPath := c.toOSPath(name)
fi, statErr := os.Stat(osPath)
if statErr != nil {
if os.IsNotExist(statErr) {
item._removeMeta("cache file doesn't exist")
} else {
item.remove(fmt.Sprintf("failed to stat cache file: %v", statErr))
}
}
// Try to load the metadata
exists, err := item.load()
if !exists {
item._removeFile("metadata doesn't exist")
} else if err != nil {
item.remove(fmt.Sprintf("failed to load metadata: %v", err))
}
// Get size estimate (which is best we can do until Open() called)
if statErr == nil {
item.info.Size = fi.Size()
}
return item
}
// inUse returns true if the item is open or dirty
func (item *Item) inUse() bool {
item.mu.Lock()
defer item.mu.Unlock()
return item.opens != 0 || item.info.Dirty
}
// getATime returns the ATime of the item
func (item *Item) getATime() time.Time {
item.mu.Lock()
defer item.mu.Unlock()
return item.info.ATime
}
// getDiskSize returns the size on disk (approximately) of the item
//
// We return the sizes of the chunks we have fetched, however there is
// likely to be some overhead which we are not taking into account.
func (item *Item) getDiskSize() int64 {
item.mu.Lock()
defer item.mu.Unlock()
return item.info.Rs.Size()
}
// load reads an item from the disk or returns nil if not found
func (item *Item) load() (exists bool, err error) {
item.mu.Lock()
defer item.mu.Unlock()
osPathMeta := item.c.toOSPathMeta(item.name) // No locking in Cache
in, err := os.Open(osPathMeta)
if err != nil {
if os.IsNotExist(err) {
return false, err
}
return true, errors.Wrap(err, "vfs cache item: failed to read metadata")
}
defer fs.CheckClose(in, &err)
decoder := json.NewDecoder(in)
err = decoder.Decode(&item.info)
if err != nil {
return true, errors.Wrap(err, "vfs cache item: corrupt metadata")
}
return true, nil
}
// save writes an item to the disk
//
// call with the lock held
func (item *Item) _save() (err error) {
osPathMeta := item.c.toOSPathMeta(item.name) // No locking in Cache
out, err := os.Create(osPathMeta)
if err != nil {
return errors.Wrap(err, "vfs cache item: failed to write metadata")
}
defer fs.CheckClose(out, &err)
encoder := json.NewEncoder(out)
encoder.SetIndent("", "\t")
err = encoder.Encode(item.info)
if err != nil {
return errors.Wrap(err, "vfs cache item: failed to encode metadata")
}
return nil
}
// truncate the item to the given size, creating it if necessary
//
// this does not mark the object as dirty
//
// call with the lock held
func (item *Item) _truncate(size int64) (err error) {
if size < 0 {
// FIXME ignore unknown length files
return nil
}
// Use open handle if available
fd := item.fd
if fd == nil {
// If the metadata says we have some blocks cached then the
// file should exist, so open without O_CREATE
oFlags := os.O_WRONLY
if item.info.Rs.Size() == 0 {
oFlags |= os.O_CREATE
}
osPath := item.c.toOSPath(item.name) // No locking in Cache
fd, err = file.OpenFile(osPath, oFlags, 0600)
if err != nil && os.IsNotExist(err) {
// If the metadata has info but the file doesn't
// not exist then it has been externally removed
fs.Errorf(item.name, "vfs cache: detected external removal of cache file")
item.info.Rs = nil // show we have no blocks cached
item.info.Dirty = false // file can't be dirty if it doesn't exist
item._removeMeta("cache file externally deleted")
fd, err = file.OpenFile(osPath, os.O_CREATE|os.O_WRONLY, 0600)
}
if err != nil {
return errors.Wrap(err, "vfs cache: truncate: failed to open cache file")
}
defer fs.CheckClose(fd, &err)
err = file.SetSparse(fd)
if err != nil {
fs.Errorf(item.name, "vfs cache: truncate: failed to set as a sparse file: %v", err)
}
}
fs.Debugf(item.name, "vfs cache: truncate to size=%d", size)
err = fd.Truncate(size)
if err != nil {
return errors.Wrap(err, "vfs cache: truncate")
}
item.info.Size = size
return nil
}
// Truncate the item to the current size, creating if necessary
//
// This does not mark the object as dirty
//
// call with the lock held
func (item *Item) _truncateToCurrentSize() (err error) {
size, err := item._getSize()
if err != nil && !os.IsNotExist(errors.Cause(err)) {
return errors.Wrap(err, "truncate to current size")
}
if size < 0 {
// FIXME ignore unknown length files
return nil
}
err = item._truncate(size)
if err != nil {
return err
}
return nil
}
// Truncate the item to the given size, creating it if necessary
//
// If the new size is shorter than the existing size then the object
// will be shortened and marked as dirty.
//
// If the new size is longer than the old size then the object will be
// extended and the extended data will be filled with zeros. The
// object will be marked as dirty in this case also.
func (item *Item) Truncate(size int64) (err error) {
item.preAccess()
defer item.postAccess()
item.mu.Lock()
defer item.mu.Unlock()
if item.fd == nil {
return errors.New("vfs cache item truncate: internal error: didn't Open file")
}
// Read old size
oldSize, err := item._getSize()
if err != nil {
if !os.IsNotExist(errors.Cause(err)) {
return errors.Wrap(err, "truncate failed to read size")
}
oldSize = 0
}
err = item._truncate(size)
if err != nil {
return err
}
changed := true
if size > oldSize {
// Truncate extends the file in which case all new bytes are
// read as zeros. In this case we must show we have written to
// the new parts of the file.
item._written(oldSize, size)
} else if size < oldSize {
// Truncate shrinks the file so clip the downloaded ranges
item.info.Rs = item.info.Rs.Intersection(ranges.Range{Pos: 0, Size: size})
} else {
changed = item.o == nil
}
if changed {
item._dirty()
}
return nil
}
// _stat gets the current stat of the backing file
//
// Call with mutex held
func (item *Item) _stat() (fi os.FileInfo, err error) {
if item.fd != nil {
return item.fd.Stat()
}
osPath := item.c.toOSPath(item.name) // No locking in Cache
return os.Stat(osPath)
}
// _getSize gets the current size of the item and updates item.info.Size
//
// Call with mutex held
func (item *Item) _getSize() (size int64, err error) {
fi, err := item._stat()
if err != nil {
if os.IsNotExist(err) && item.o != nil {
size = item.o.Size()
err = nil
}
} else {
size = fi.Size()
}
if err == nil {
item.info.Size = size
}
return size, err
}
// GetName gets the vfs name of the item
func (item *Item) GetName() (name string) {
item.mu.Lock()
defer item.mu.Unlock()
return item.name
}
// GetSize gets the current size of the item
func (item *Item) GetSize() (size int64, err error) {
item.mu.Lock()
defer item.mu.Unlock()
return item._getSize()
}
// _exists returns whether the backing file for the item exists or not
//
// call with mutex held
func (item *Item) _exists() bool {
osPath := item.c.toOSPath(item.name) // No locking in Cache
_, err := os.Stat(osPath)
return err == nil
}
// Exists returns whether the backing file for the item exists or not
func (item *Item) Exists() bool {
item.mu.Lock()
defer item.mu.Unlock()
return item._exists()
}
// _dirty marks the item as changed and needing writeback
//
// call with lock held
func (item *Item) _dirty() {
item.info.ModTime = time.Now()
item.info.ATime = item.info.ModTime
if !item.modified {
item.modified = true
item.mu.Unlock()
item.c.writeback.Remove(item.writeBackID)
item.mu.Lock()
}
if !item.info.Dirty {
item.info.Dirty = true
err := item._save()
if err != nil {
fs.Errorf(item.name, "vfs cache: failed to save item info: %v", err)
}
}
}
// Dirty marks the item as changed and needing writeback
func (item *Item) Dirty() {
item.preAccess()
defer item.postAccess()
item.mu.Lock()
item._dirty()
item.mu.Unlock()
}
// IsDirty returns true if the item data is dirty
func (item *Item) IsDirty() bool {
item.mu.Lock()
defer item.mu.Unlock()
return item.info.Dirty
}
// Create the cache file and store the metadata on disk
// Called with item.mu locked
func (item *Item) _createFile(osPath string) (err error) {
if item.fd != nil {
return errors.New("vfs cache item: internal error: didn't Close file")
}
item.modified = false
fd, err := file.OpenFile(osPath, os.O_RDWR, 0600)
if err != nil {
return errors.Wrap(err, "vfs cache item: open failed")
}
err = file.SetSparse(fd)
if err != nil {
fs.Errorf(item.name, "vfs cache: failed to set as a sparse file: %v", err)
}
item.fd = fd
err = item._save()
if err != nil {
closeErr := item.fd.Close()
if closeErr != nil {
fs.Errorf(item.name, "vfs cache: item.fd.Close: closeErr: %v", err)
}
item.fd = nil
return errors.Wrap(err, "vfs cache item: _save failed")
}
return err
}
// Open the local file from the object passed in. Wraps open()
// to provide recovery from out of space error.
func (item *Item) Open(o fs.Object) (err error) {
for retries := 0; retries < fs.GetConfig(context.TODO()).LowLevelRetries; retries++ {
item.preAccess()
err = item.open(o)
item.postAccess()
if err == nil {
break
}
fs.Errorf(item.name, "vfs cache: failed to open item: %v", err)
if !fserrors.IsErrNoSpace(err) && err.Error() != "no space left on device" {
fs.Errorf(item.name, "Non-out-of-space error encountered during open")
break
}
item.c.KickCleaner()
}
return err
}
// Open the local file from the object passed in (which may be nil)
// which implies we are about to create the file
func (item *Item) open(o fs.Object) (err error) {
// defer log.Trace(o, "item=%p", item)("err=%v", &err)
item.mu.Lock()
defer item.mu.Unlock()
item.info.ATime = time.Now()
osPath, err := item.c.createItemDir(item.name) // No locking in Cache
if err != nil {
return errors.Wrap(err, "vfs cache item: createItemDir failed")
}
err = item._checkObject(o)
if err != nil {
return errors.Wrap(err, "vfs cache item: check object failed")
}
item.opens++
if item.opens != 1 {
return nil
}
err = item._createFile(osPath)
if err != nil {
item._remove("item.open failed on _createFile, remove cache data/metadata files")
item.fd = nil
item.opens--
return errors.Wrap(err, "vfs cache item: create cache file failed")
}
// Unlock the Item.mu so we can call some methods which take Cache.mu
item.mu.Unlock()
// Ensure this item is in the cache. It is possible a cache
// expiry has run and removed the item if it had no opens so
// we put it back here. If there was an item with opens
// already then return an error. This shouldn't happen because
// there should only be one vfs.File with a pointer to this
// item in at a time.
oldItem := item.c.put(item.name, item) // LOCKING in Cache method
if oldItem != nil {
oldItem.mu.Lock()
if oldItem.opens != 0 {
// Put the item back and return an error
item.c.put(item.name, oldItem) // LOCKING in Cache method
err = errors.Errorf("internal error: item %q already open in the cache", item.name)
}
oldItem.mu.Unlock()
}
// Relock the Item.mu for the return
item.mu.Lock()
// Create the downloaders
if item.o != nil {
item.downloaders = downloaders.New(item, item.c.opt, item.name, item.o)
}
return err
}
// Store stores the local cache file to the remote object, returning
// the new remote object. objOld is the old object if known.
//
// Call with lock held
func (item *Item) _store(ctx context.Context, storeFn StoreFn) (err error) {
// defer log.Trace(item.name, "item=%p", item)("err=%v", &err)
// Transfer the temp file to the remote
cacheObj, err := item.c.fcache.NewObject(ctx, item.name)
if err != nil && err != fs.ErrorObjectNotFound {
return errors.Wrap(err, "vfs cache: failed to find cache file")
}
// Object has disappeared if cacheObj == nil
if cacheObj != nil {
o, name := item.o, item.name
item.mu.Unlock()
o, err := operations.Copy(ctx, item.c.fremote, o, name, cacheObj)
item.mu.Lock()
if err != nil {
return errors.Wrap(err, "vfs cache: failed to transfer file from cache to remote")
}
item.o = o
item._updateFingerprint()
}
item.info.Dirty = false
err = item._save()
if err != nil {
fs.Errorf(item.name, "vfs cache: failed to write metadata file: %v", err)
}
if storeFn != nil && item.o != nil {
fs.Debugf(item.name, "vfs cache: writeback object to VFS layer")
// Write the object back to the VFS layer as last
// thing we do with mutex unlocked
o := item.o
item.mu.Unlock()
storeFn(o)
item.mu.Lock()
}
return nil
}
// Store stores the local cache file to the remote object, returning
// the new remote object. objOld is the old object if known.
func (item *Item) store(ctx context.Context, storeFn StoreFn) (err error) {
item.mu.Lock()
defer item.mu.Unlock()
return item._store(ctx, storeFn)
}
// Close the cache file
func (item *Item) Close(storeFn StoreFn) (err error) {
// defer log.Trace(item.o, "Item.Close")("err=%v", &err)
item.preAccess()
defer item.postAccess()
var (
downloaders *downloaders.Downloaders
syncWriteBack = item.c.opt.WriteBack <= 0
)
item.mu.Lock()
defer item.mu.Unlock()
item.info.ATime = time.Now()
item.opens--
if item.opens < 0 {
return os.ErrClosed
} else if item.opens > 0 {
return nil
}
// Update the size on close
_, _ = item._getSize()
// If the file is dirty ensure any segments not transferred
// are brought in first.
//
// FIXME It would be nice to do this asynchronously however it
// would require keeping the downloaders alive after the item
// has been closed
if item.info.Dirty && item.o != nil {
err = item._ensure(0, item.info.Size)
if err != nil {
return errors.Wrap(err, "vfs cache: failed to download missing parts of cache file")
}
}
// Accumulate and log errors
checkErr := func(e error) {
if e != nil {
fs.Errorf(item.o, "vfs cache: item close failed: %v", e)
if err == nil {
err = e
}
}
}
// Close the downloaders
if downloaders = item.downloaders; downloaders != nil {
item.downloaders = nil
// FIXME need to unlock to kill downloader - should we
// re-arrange locking so this isn't necessary? maybe
// downloader should use the item mutex for locking? or put a
// finer lock on Rs?
//
// downloader.Write calls ensure which needs the lock
// close downloader with mutex unlocked
item.mu.Unlock()
checkErr(downloaders.Close(nil))
item.mu.Lock()
}
// close the file handle
if item.fd == nil {
checkErr(errors.New("vfs cache item: internal error: didn't Open file"))
} else {
checkErr(item.fd.Close())
item.fd = nil
}
// save the metadata once more since it may be dirty
// after the downloader
checkErr(item._save())
// if the item hasn't been changed but has been completed then
// set the modtime from the object otherwise set it from the info
if item._exists() {
if !item.info.Dirty && item.o != nil {
item._setModTime(item.o.ModTime(context.Background()))
} else {
item._setModTime(item.info.ModTime)
}
}
// upload the file to backing store if changed
if item.info.Dirty {
fs.Infof(item.name, "vfs cache: queuing for upload in %v", item.c.opt.WriteBack)
if syncWriteBack {
// do synchronous writeback
checkErr(item._store(context.Background(), storeFn))
} else {
// asynchronous writeback
item.c.writeback.SetID(&item.writeBackID)
id := item.writeBackID
item.mu.Unlock()
item.c.writeback.Add(id, item.name, item.modified, func(ctx context.Context) error {
return item.store(ctx, storeFn)
})
item.mu.Lock()
}
}
// mark as not modified now we have uploaded or queued for upload
item.modified = false
return err
}
// reload is called with valid items recovered from a cache reload.
//
// If they are dirty then it makes sure they get uploaded
//
// it is called before the cache has started so opens will be 0 and
// metaDirty will be false.
func (item *Item) reload(ctx context.Context) error {
item.mu.Lock()
dirty := item.info.Dirty
item.mu.Unlock()
if !dirty {
return nil
}
// see if the object still exists
obj, _ := item.c.fremote.NewObject(ctx, item.name)
// open the file with the object (or nil)
err := item.Open(obj)
if err != nil {
return err
}
// close the file to execute the writeback if needed
err = item.Close(nil)
if err != nil {
return err
}
// put the file into the directory listings
size, err := item._getSize()
if err != nil {
return errors.Wrap(err, "reload: failed to read size")
}
err = item.c.AddVirtual(item.name, size, false)
if err != nil {
return errors.Wrap(err, "reload: failed to add virtual dir entry")
}
return nil
}
// check the fingerprint of an object and update the item or delete
// the cached file accordingly
//
// If we have local modifications then they take precedence
// over a change in the remote
//
// It ensures the file is the correct size for the object
//
// call with lock held
func (item *Item) _checkObject(o fs.Object) error {
if o == nil {
if item.info.Fingerprint != "" {
// no remote object && local object
// remove local object unless dirty
if !item.info.Dirty {
item._remove("stale (remote deleted)")
} else {
fs.Debugf(item.name, "vfs cache: remote object has gone but local object modified - keeping it")
}
} else {
// no remote object && no local object
// OK
}
} else {
remoteFingerprint := fs.Fingerprint(context.TODO(), o, false)
fs.Debugf(item.name, "vfs cache: checking remote fingerprint %q against cached fingerprint %q", remoteFingerprint, item.info.Fingerprint)
if item.info.Fingerprint != "" {
// remote object && local object
if remoteFingerprint != item.info.Fingerprint {
if !item.info.Dirty {
fs.Debugf(item.name, "vfs cache: removing cached entry as stale (remote fingerprint %q != cached fingerprint %q)", remoteFingerprint, item.info.Fingerprint)
item._remove("stale (remote is different)")
} else {
fs.Debugf(item.name, "vfs cache: remote object has changed but local object modified - keeping it (remote fingerprint %q != cached fingerprint %q)", remoteFingerprint, item.info.Fingerprint)
}
}
} else {
// remote object && no local object
// Set fingerprint
item.info.Fingerprint = remoteFingerprint
}
item.info.Size = o.Size()
}
item.o = o
err := item._truncateToCurrentSize()
if err != nil {
return errors.Wrap(err, "vfs cache item: open truncate failed")
}
return nil
}
// WrittenBack checks to see if the item has been written back or not
func (item *Item) WrittenBack() bool {
item.mu.Lock()
defer item.mu.Unlock()
return item.info.Fingerprint != ""
}
// remove the cached file
//
// call with lock held
func (item *Item) _removeFile(reason string) {
osPath := item.c.toOSPath(item.name) // No locking in Cache
err := os.Remove(osPath)
if err != nil {
if !os.IsNotExist(err) {
fs.Errorf(item.name, "vfs cache: failed to remove cache file as %s: %v", reason, err)
}
} else {
fs.Infof(item.name, "vfs cache: removed cache file as %s", reason)
}
}
// remove the metadata
//
// call with lock held
func (item *Item) _removeMeta(reason string) {
osPathMeta := item.c.toOSPathMeta(item.name) // No locking in Cache
err := os.Remove(osPathMeta)
if err != nil {
if !os.IsNotExist(err) {
fs.Errorf(item.name, "vfs cache: failed to remove metadata from cache as %s: %v", reason, err)
}
} else {
fs.Debugf(item.name, "vfs cache: removed metadata from cache as %s", reason)
}
}
// remove the cached file and empty the metadata
//
// This returns true if the file was in the transfer queue so may not
// have completely uploaded yet.
//
// call with lock held
func (item *Item) _remove(reason string) (wasWriting bool) {
// Cancel writeback, if any
item.mu.Unlock()
wasWriting = item.c.writeback.Remove(item.writeBackID)
item.mu.Lock()
item.info.clean()
item._removeFile(reason)
item._removeMeta(reason)
return wasWriting
}
// remove the cached file and empty the metadata
//
// This returns true if the file was in the transfer queue so may not
// have completely uploaded yet.
func (item *Item) remove(reason string) (wasWriting bool) {
item.mu.Lock()
defer item.mu.Unlock()
return item._remove(reason)
}
// RemoveNotInUse is called to remove cache file that has not been accessed recently
// It may also be called for removing empty cache files too when the quota is already reached.
func (item *Item) RemoveNotInUse(maxAge time.Duration, emptyOnly bool) (removed bool, spaceFreed int64) {
item.mu.Lock()
defer item.mu.Unlock()
spaceFreed = 0
removed = false
if item.opens != 0 || item.info.Dirty {
return
}
removeIt := false
if maxAge == 0 {
removeIt = true // quota-driven removal
}
if maxAge != 0 {
cutoff := time.Now().Add(-maxAge)
// If not locked and access time too long ago - delete the file
accessTime := item.info.ATime
if accessTime.Sub(cutoff) <= 0 {
removeIt = true
}
}
if removeIt {
spaceUsed := item.info.Rs.Size()
if !emptyOnly || spaceUsed == 0 {
spaceFreed = spaceUsed
removed = true
if item._remove("Removing old cache file not in use") {
fs.Errorf(item.name, "item removed when it was writing/uploaded")
}
}
}
return
}
// Reset is called by the cache purge functions only to reset (empty the contents) cache files that
// are not dirty. It is used when cache space runs out and we see some ENOSPC error.
func (item *Item) Reset() (rr ResetResult, spaceFreed int64, err error) {
item.mu.Lock()
defer item.mu.Unlock()
// The item is not being used now. Just remove it instead of resetting it.
if item.opens == 0 && !item.info.Dirty {
spaceFreed = item.info.Rs.Size()
if item._remove("Removing old cache file not in use") {
fs.Errorf(item.name, "item removed when it was writing/uploaded")
}
return RemovedNotInUse, spaceFreed, nil
}
// do not reset dirty file
if item.info.Dirty {
return SkippedDirty, 0, nil
}
/* A wait on pendingAccessCnt to become 0 can lead to deadlock when an item.Open bumps
up the pendingAccesses count, calls item.open, which calls cache.put. The cache.put
operation needs the cache mutex, which is held here. We skip this file now. The
caller (the cache cleaner thread) may retry resetting this item if the cache size does
not reduce below quota. */
if item.pendingAccesses > 0 {
return SkippedPendingAccess, 0, nil
}
/* Do not need to reset an empty cache file unless it was being reset and the reset failed.
Some thread(s) may be waiting on the reset's succesful completion in that case. */
if item.info.Rs.Size() == 0 && item.beingReset == false {
return SkippedEmpty, 0, nil
}
item.beingReset = true
/* Error handling from this point on (setting item.fd and item.beingReset):
Since Reset is called by the cache cleaner thread, there is no direct way to return
the error to the io threads. Set item.fd to nil upon internal errors, so that the
io threads will return internal errors seeing a nil fd. In the case when the error
is ENOSPC, keep the item in isBeingReset state and that will keep the item.ReadAt
waiting at its beginning. The cache purge loop will try to redo the reset after cache
space is made available again. This recovery design should allow most io threads to
eventually go through, unless large files are written/overwritten concurrently and
the total size of these files exceed the cache storage limit. */
// Close the downloaders
// Accumulate and log errors
checkErr := func(e error) {
if e != nil {
fs.Errorf(item.o, "vfs cache: item reset failed: %v", e)
if err == nil {
err = e
}
}
}
if downloaders := item.downloaders; downloaders != nil {
item.downloaders = nil
// FIXME need to unlock to kill downloader - should we
// re-arrange locking so this isn't necessary? maybe
// downloader should use the item mutex for locking? or put a
// finer lock on Rs?
//
// downloader.Write calls ensure which needs the lock
// close downloader with mutex unlocked
item.mu.Unlock()
checkErr(downloaders.Close(nil))
item.mu.Lock()
}
// close the file handle
// fd can be nil if we tried Reset and failed before because of ENOSPC during reset
if item.fd != nil {
checkErr(item.fd.Close())
if err != nil {
// Could not close the cache file
item.beingReset = false
item.cond.Broadcast()
return ResetFailed, 0, err
}
item.fd = nil
}
spaceFreed = item.info.Rs.Size()
// This should not be possible. We get here only if cache data is not dirty.
if item._remove("cache out of space, item is clean") {
fs.Errorf(item.o, "vfs cache item removed when it was writing/uploaded")
}
// can we have an item with no dirty data (so that we can get here) and nil item.o at the same time?
fso := item.o
checkErr(item._checkObject(fso))
if err != nil {
item.beingReset = false
item.cond.Broadcast()
return ResetFailed, spaceFreed, err
}
osPath := item.c.toOSPath(item.name)
checkErr(item._createFile(osPath))
if err != nil {
item._remove("cache reset failed on _createFile, removed cache data file")
item.fd = nil // This allows a new Reset redo to have a clean state to deal with
if !fserrors.IsErrNoSpace(err) {
item.beingReset = false
item.cond.Broadcast()
}
return ResetFailed, spaceFreed, err
}
// Create the downloaders
if item.o != nil {
item.downloaders = downloaders.New(item, item.c.opt, item.name, item.o)
}
/* The item will stay in the beingReset state if we get an error that prevents us from
reaching this point. The cache purge loop will redo the failed Reset. */
item.beingReset = false
item.cond.Broadcast()
return ResetComplete, spaceFreed, err
}
// ProtectCache either waits for an ongoing cache reset to finish or increases pendingReads
// to protect against cache reset on this item while the thread potentially uses the cache file
// Cache cleaner waits until pendingReads is zero before resetting cache.
func (item *Item) preAccess() {
item.mu.Lock()
defer item.mu.Unlock()
if item.beingReset {
for {
item.cond.Wait()
if !item.beingReset {
break
}
}
}
item.pendingAccesses++
}
// postAccess reduces the pendingReads count enabling cache reset upon ENOSPC
func (item *Item) postAccess() {
item.mu.Lock()
defer item.mu.Unlock()
item.pendingAccesses--
item.cond.Broadcast()
}
// _present returns true if the whole file has been downloaded
//
// call with the lock held
func (item *Item) _present() bool {
return item.info.Rs.Present(ranges.Range{Pos: 0, Size: item.info.Size})
}
// present returns true if the whole file has been downloaded
func (item *Item) present() bool {
item.mu.Lock()
defer item.mu.Unlock()
return item._present()
}
// HasRange returns true if the current ranges entirely include range
func (item *Item) HasRange(r ranges.Range) bool {
item.mu.Lock()
defer item.mu.Unlock()
return item.info.Rs.Present(r)
}
// FindMissing adjusts r returning a new ranges.Range which only
// contains the range which needs to be downloaded. This could be
// empty - check with IsEmpty. It also adjust this to make sure it is
// not larger than the file.
func (item *Item) FindMissing(r ranges.Range) (outr ranges.Range) {
item.mu.Lock()
defer item.mu.Unlock()
outr = item.info.Rs.FindMissing(r)
// Clip returned block to size of file
outr.Clip(item.info.Size)
return outr
}
// ensure the range from offset, size is present in the backing file
//
// call with the item lock held
func (item *Item) _ensure(offset, size int64) (err error) {
// defer log.Trace(item.name, "offset=%d, size=%d", offset, size)("err=%v", &err)
if offset+size > item.info.Size {
size = item.info.Size - offset
}
r := ranges.Range{Pos: offset, Size: size}
present := item.info.Rs.Present(r)
/* This statement simulates a cache space error for test purpose */
/* if present != true && item.info.Rs.Size() > 32*1024*1024 {
return errors.New("no space left on device")
} */
fs.Debugf(nil, "vfs cache: looking for range=%+v in %+v - present %v", r, item.info.Rs, present)
item.mu.Unlock()
defer item.mu.Lock()
if present {
// This is a file we are writing so no downloaders needed
if item.downloaders == nil {
return nil
}
// Otherwise start the downloader for the future if required
return item.downloaders.EnsureDownloader(r)
}
if item.downloaders == nil {
return errors.New("internal error: downloaders is nil")
}
return item.downloaders.Download(r)
}
// _written marks the (offset, size) as present in the backing file
//
// This is called by the downloader downloading file segments and the
// vfs layer writing to the file.
//
// This doesn't mark the item as Dirty - that the the responsibility
// of the caller as we don't know here whether we are adding reads or
// writes to the cache file.
//
// call with lock held
func (item *Item) _written(offset, size int64) {
// defer log.Trace(item.name, "offset=%d, size=%d", offset, size)("")
item.info.Rs.Insert(ranges.Range{Pos: offset, Size: size})
}
// update the fingerprint of the object if any
//
// call with lock held
func (item *Item) _updateFingerprint() {
if item.o == nil {
return
}
oldFingerprint := item.info.Fingerprint
item.info.Fingerprint = fs.Fingerprint(context.TODO(), item.o, false)
if oldFingerprint != item.info.Fingerprint {
fs.Debugf(item.o, "vfs cache: fingerprint now %q", item.info.Fingerprint)
}
}
// setModTime of the cache file
//
// call with lock held
func (item *Item) _setModTime(modTime time.Time) {
fs.Debugf(item.name, "vfs cache: setting modification time to %v", modTime)
osPath := item.c.toOSPath(item.name) // No locking in Cache
err := os.Chtimes(osPath, modTime, modTime)
if err != nil {
fs.Errorf(item.name, "vfs cache: failed to set modification time of cached file: %v", err)
}
}
// setModTime of the cache file and in the Item
func (item *Item) setModTime(modTime time.Time) {
// defer log.Trace(item.name, "modTime=%v", modTime)("")
item.mu.Lock()
item._updateFingerprint()
item._setModTime(modTime)
item.info.ModTime = modTime
err := item._save()
if err != nil {
fs.Errorf(item.name, "vfs cache: setModTime: failed to save item info: %v", err)
}
item.mu.Unlock()
}
// GetModTime of the cache file
func (item *Item) GetModTime() (modTime time.Time, err error) {
// defer log.Trace(item.name, "modTime=%v", modTime)("")
item.mu.Lock()
defer item.mu.Unlock()
fi, err := item._stat()
if err == nil {
modTime = fi.ModTime()
}
return modTime, nil
}
// ReadAt bytes from the file at off
func (item *Item) ReadAt(b []byte, off int64) (n int, err error) {
n = 0
var expBackOff int
for retries := 0; retries < fs.GetConfig(context.TODO()).LowLevelRetries; retries++ {
item.preAccess()
n, err = item.readAt(b, off)
item.postAccess()
if err == nil || err == io.EOF {
break
}
fs.Errorf(item.name, "vfs cache: failed to _ensure cache %v", err)
if !fserrors.IsErrNoSpace(err) && err.Error() != "no space left on device" {
fs.Debugf(item.name, "vfs cache: failed to _ensure cache %v is not out of space", err)
break
}
item.c.KickCleaner()
expBackOff = 2 << uint(retries)
time.Sleep(time.Duration(expBackOff) * time.Millisecond) // Exponential back-off the retries
}
if fserrors.IsErrNoSpace(err) {
fs.Errorf(item.name, "vfs cache: failed to _ensure cache after retries %v", err)
}
return n, err
}
// ReadAt bytes from the file at off
func (item *Item) readAt(b []byte, off int64) (n int, err error) {
item.mu.Lock()
if item.fd == nil {
item.mu.Unlock()
return 0, errors.New("vfs cache item ReadAt: internal error: didn't Open file")
}
if off < 0 {
item.mu.Unlock()
return 0, io.EOF
}
defer item.mu.Unlock()
err = item._ensure(off, int64(len(b)))
if err != nil {
return 0, err
}
item.info.ATime = time.Now()
// Do the reading with Item.mu unlocked and cache protected by preAccess
n, err = item.fd.ReadAt(b, off)
return n, err
}
// WriteAt bytes to the file at off
func (item *Item) WriteAt(b []byte, off int64) (n int, err error) {
item.preAccess()
defer item.postAccess()
item.mu.Lock()
if item.fd == nil {
item.mu.Unlock()
return 0, errors.New("vfs cache item WriteAt: internal error: didn't Open file")
}
item.mu.Unlock()
// Do the writing with Item.mu unlocked
n, err = item.fd.WriteAt(b, off)
if err == nil && n != len(b) {
err = errors.Errorf("short write: tried to write %d but only %d written", len(b), n)
}
item.mu.Lock()
item._written(off, int64(n))
if n > 0 {
item._dirty()
}
end := off + int64(n)
// Writing off the end of the file so need to make some
// zeroes. we do this by showing that we have written to the
// new parts of the file.
if off > item.info.Size {
item._written(item.info.Size, off-item.info.Size)
item._dirty()
}
// Update size
if end > item.info.Size {
item.info.Size = end
}
item.mu.Unlock()
return n, err
}
// WriteAtNoOverwrite writes b to the file, but will not overwrite
// already present ranges.
//
// This is used by the downloader to write bytes to the file
//
// It returns n the total bytes processed and skipped the number of
// bytes which were processed but not actually written to the file.
func (item *Item) WriteAtNoOverwrite(b []byte, off int64) (n int, skipped int, err error) {
item.mu.Lock()
var (
// Range we wish to write
r = ranges.Range{Pos: off, Size: int64(len(b))}
// Ranges that we need to write
foundRanges = item.info.Rs.FindAll(r)
// Length of each write
nn int
)
// Write the range out ignoring already written chunks
// fs.Debugf(item.name, "Ranges = %v", item.info.Rs)
for i := range foundRanges {
foundRange := &foundRanges[i]
// fs.Debugf(item.name, "foundRange[%d] = %v", i, foundRange)
if foundRange.R.Pos != off {
err = errors.New("internal error: offset of range is wrong")
break
}
size := int(foundRange.R.Size)
if foundRange.Present {
// if present want to skip this range
// fs.Debugf(item.name, "skip chunk offset=%d size=%d", off, size)
nn = size
skipped += size
} else {
// if range not present then we want to write it
// fs.Debugf(item.name, "write chunk offset=%d size=%d", off, size)
nn, err = item.fd.WriteAt(b[:size], off)
if err == nil && nn != size {
err = errors.Errorf("downloader: short write: tried to write %d but only %d written", size, nn)
}
item._written(off, int64(nn))
}
off += int64(nn)
b = b[nn:]
n += nn
if err != nil {
break
}
}
item.mu.Unlock()
return n, skipped, err
}
// Sync commits the current contents of the file to stable storage. Typically,
// this means flushing the file system's in-memory copy of recently written
// data to disk.
func (item *Item) Sync() (err error) {
item.preAccess()
defer item.postAccess()
item.mu.Lock()
defer item.mu.Unlock()
if item.fd == nil {
return errors.New("vfs cache item sync: internal error: didn't Open file")
}
// sync the file and the metadata to disk
err = item.fd.Sync()
if err != nil {
return errors.Wrap(err, "vfs cache item sync: failed to sync file")
}
err = item._save()
if err != nil {
return errors.Wrap(err, "vfs cache item sync: failed to sync metadata")
}
return nil
}
// rename the item
func (item *Item) rename(name string, newName string, newObj fs.Object) (err error) {
item.preAccess()
defer item.postAccess()
item.mu.Lock()
// stop downloader
downloaders := item.downloaders
item.downloaders = nil
// id for writeback cancel
id := item.writeBackID
// Set internal state
item.name = newName
item.o = newObj
// Rename cache file if it exists
err = rename(item.c.toOSPath(name), item.c.toOSPath(newName)) // No locking in Cache
// Rename meta file if it exists
err2 := rename(item.c.toOSPathMeta(name), item.c.toOSPathMeta(newName)) // No locking in Cache
if err2 != nil {
err = err2
}
item.mu.Unlock()
// close downloader and cancel writebacks with mutex unlocked
if downloaders != nil {
_ = downloaders.Close(nil)
}
item.c.writeback.Rename(id, newName)
return err
}
| vfs/vfscache/item.go | 0 | https://github.com/rclone/rclone/commit/bbcc9a45fe534c5ad585ec2daaf2a79a6aaf8708 | [
0.00021293667668942362,
0.00016914727166295052,
0.0001602729462319985,
0.0001694412640063092,
0.000004622109372576233
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.