hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 6,
"code_window": [
" required: false\n",
" default: go-test-reports\n",
" type: string\n",
"\n",
"env: ${{ fromJSON(inputs.env-vars) }}\n",
"\n",
"jobs:\n",
" test-matrix:\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" checkout-ref:\n",
" description: The ref to use for checkout.\n",
" required: false\n",
" default: ${{ github.base_ref }}\n",
" type: string\n"
],
"file_path": ".github/workflows/test-go.yml",
"type": "add",
"edit_start_line_idx": 69
} | name: CI
on:
pull_request:
# The default types for pull_request are [ opened, synchronize, reopened ].
# This is insufficient for our needs, since we're skipping stuff on PRs in
# draft mode. By adding the ready_for_review type, when a draft pr is marked
# ready, we run everything, including the stuff we'd have skipped up until now.
types: [opened, synchronize, reopened, ready_for_review]
push:
branches:
- main
- release/**
workflow_dispatch:
concurrency:
group: ${{ github.head_ref || github.run_id }}-ci
cancel-in-progress: true
jobs:
setup:
name: Setup
runs-on: ubuntu-latest
outputs:
compute-small: ${{ steps.setup-outputs.outputs.compute-small }}
compute-medium: ${{ steps.setup-outputs.outputs.compute-medium }}
compute-large: ${{ steps.setup-outputs.outputs.compute-large }}
compute-largem: ${{ steps.setup-outputs.outputs.compute-largem }}
compute-xlarge: ${{ steps.setup-outputs.outputs.compute-xlarge }}
enterprise: ${{ steps.setup-outputs.outputs.enterprise }}
go-tags: ${{ steps.setup-outputs.outputs.go-tags }}
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
- id: setup-outputs
name: Setup outputs
run: |
github_repository="${{ github.repository }}"
if [ "${github_repository##*/}" == "vault-enterprise" ] ; then
# shellcheck disable=SC2129
echo 'compute-small=["self-hosted","ondemand","linux","type=c6a.large"]' >> "$GITHUB_OUTPUT" # 2x vCPUs, 4 GiB RAM,
echo 'compute-medium=["self-hosted","ondemand","linux","type=c6a.xlarge"]' >> "$GITHUB_OUTPUT" # 4x vCPUs, 8 GiB RAM,
echo 'compute-large=["self-hosted","ondemand","linux","type=c6a.2xlarge","disk_gb=64"]' >> "$GITHUB_OUTPUT" # 8x vCPUs, 16 GiB RAM,
echo 'compute-largem=["self-hosted","ondemand","linux","type=m6a.2xlarge"]' >> "$GITHUB_OUTPUT" # 8x vCPUs, 32 GiB RAM,
echo 'compute-xlarge=["self-hosted","ondemand","linux","type=c6a.4xlarge"]' >> "$GITHUB_OUTPUT" # 16x vCPUs, 32 GiB RAM,
echo 'enterprise=1' >> "$GITHUB_OUTPUT"
echo 'go-tags=ent,enterprise' >> "$GITHUB_OUTPUT"
else
# shellcheck disable=SC2129
echo 'compute-small="ubuntu-latest"' >> "$GITHUB_OUTPUT" # 2x vCPUs, 7 GiB RAM, 14 GB SSD
echo 'compute-medium="custom-linux-small-vault-latest"' >> "$GITHUB_OUTPUT" # 8x vCPUs, 32 GiB RAM, 300 GB SSD
echo 'compute-large="custom-linux-medium-vault-latest"' >> "$GITHUB_OUTPUT" # 16x vCPUs, 64 GiB RAM, 600 GB SSD
echo 'compute-largem="custom-linux-medium-vault-latest"' >> "$GITHUB_OUTPUT" # 16x vCPUs, 64 GiB RAM, 600 GB SSD
echo 'compute-xlarge="custom-linux-xl-vault-latest"' >> "$GITHUB_OUTPUT" # 32x vCPUs, 128 GiB RAM, 1200 GB SSD
echo 'enterprise=' >> "$GITHUB_OUTPUT"
echo 'go-tags=' >> "$GITHUB_OUTPUT"
fi
- name: Ensure Go modules are cached
uses: ./.github/actions/set-up-go
with:
github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
no-restore: true # don't download them on a cache hit
verify-changes:
name: Verify doc-ui only PRs
uses: ./.github/workflows/verify_changes.yml
test-go:
name: Run Go tests
needs:
- setup
- verify-changes
# Don't run this job for docs/ui only PRs
if: |
needs.verify-changes.outputs.is_docs_change == 'false' &&
needs.verify-changes.outputs.is_ui_change == 'false'
uses: ./.github/workflows/test-go.yml
with:
# The regular Go tests use an extra runner to execute the
# binary-dependent tests. We isolate them there so that the
# other tests aren't slowed down waiting for a binary build.
binary-tests: true
total-runners: 16
go-arch: amd64
go-tags: '${{ needs.setup.outputs.go-tags }},deadlock'
runs-on: ${{ needs.setup.outputs.compute-large }}
enterprise: ${{ needs.setup.outputs.enterprise }}
test-timing-cache-key: go-test-timing-standard
secrets: inherit
test-go-testonly:
name: Run Go tests tagged with testonly
needs:
- setup
- verify-changes
# Don't run this job for docs/ui only PRs
if: |
needs.verify-changes.outputs.is_docs_change == 'false' &&
needs.verify-changes.outputs.is_ui_change == 'false'
uses: ./.github/workflows/test-go.yml
with:
testonly: true
total-runners: 2 # test runners cannot be less than 2
go-arch: amd64
go-tags: '${{ needs.setup.outputs.go-tags }},deadlock,testonly'
runs-on: ${{ needs.setup.outputs.compute-large }}
enterprise: ${{ needs.setup.outputs.enterprise }}
test-timing-cache-enabled: false
secrets: inherit
test-go-race:
name: Run Go tests with data race detection
needs:
- setup
- verify-changes
# Don't run this job for docs/ui only PRs
if: |
github.event.pull_request.draft == false &&
needs.verify-changes.outputs.is_docs_change == 'false' &&
needs.verify-changes.outputs.is_ui_change == 'false'
uses: ./.github/workflows/test-go.yml
with:
total-runners: 16
env-vars: |
{
"VAULT_CI_GO_TEST_RACE": 1
}
extra-flags: '-race'
go-arch: amd64
go-tags: ${{ needs.setup.outputs.go-tags }}
runs-on: ${{ needs.setup.outputs.compute-large }}
enterprise: ${{ needs.setup.outputs.enterprise }}
name: "race"
test-timing-cache-key: go-test-timing-race
secrets: inherit
test-go-fips:
name: Run Go tests with FIPS configuration
# Only run this job for the enterprise repo if the PR is not docs/ui only
if: |
github.event.pull_request.draft == false &&
needs.setup.outputs.enterprise == 1 &&
needs.verify-changes.outputs.is_docs_change == 'false' &&
needs.verify-changes.outputs.is_ui_change == 'false'
needs:
- setup
- verify-changes
uses: ./.github/workflows/test-go.yml
with:
total-runners: 16
env-vars: |
{
"GOEXPERIMENT": "boringcrypto"
}
go-arch: amd64
go-tags: '${{ needs.setup.outputs.go-tags }},deadlock,cgo,fips,fips_140_2'
runs-on: ${{ needs.setup.outputs.compute-large }}
enterprise: ${{ needs.setup.outputs.enterprise }}
name: "fips"
test-timing-cache-key: go-test-timing-fips
secrets: inherit
test-ui:
name: Test UI
# The test-ui job is only run on:
# - pushes to main and branches starting with "release/"
# - PRs where the branch starts with "ui/", "backport/ui/", "merge", or when base branch starts with "release/"
# - PRs with the "ui" label on GitHub
if: |
github.ref_name == 'main' ||
startsWith(github.ref_name, 'release/') ||
startsWith(github.head_ref, 'ui/') ||
startsWith(github.head_ref, 'backport/ui/') ||
startsWith(github.head_ref, 'merge') ||
contains(github.event.pull_request.labels.*.name, 'ui')
needs:
- setup
permissions:
id-token: write
contents: read
runs-on: ${{ fromJSON(needs.setup.outputs.compute-largem) }}
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
- uses: ./.github/actions/set-up-go
with:
github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
# Setup node.js without caching to allow running npm install -g yarn (next step)
- uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0
with:
node-version-file: './ui/package.json'
- id: install-yarn
run: |
npm install -g yarn
# Setup node.js with caching using the yarn.lock file
- uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0
with:
node-version-file: './ui/package.json'
cache: yarn
cache-dependency-path: ui/yarn.lock
- id: install-browser
uses: browser-actions/setup-chrome@c485fa3bab6be59dce18dbc18ef6ab7cbc8ff5f1 # v1.2.0
- id: ui-dependencies
name: ui-dependencies
working-directory: ./ui
run: |
yarn install --frozen-lockfile
npm rebuild node-sass
- id: vault-auth
name: Authenticate to Vault
if: github.repository == 'hashicorp/vault-enterprise'
run: vault-auth
- id: secrets
name: Fetch secrets
if: github.repository == 'hashicorp/vault-enterprise'
uses: hashicorp/vault-action@130d1f5f4fe645bb6c83e4225c04d64cfb62de6e
with:
url: ${{ steps.vault-auth.outputs.addr }}
caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }}
token: ${{ steps.vault-auth.outputs.token }}
secrets: |
kv/data/github/hashicorp/vault-enterprise/github-token username-and-token | PRIVATE_REPO_GITHUB_TOKEN;
kv/data/github/hashicorp/vault-enterprise/license license_1 | VAULT_LICENSE;
- id: setup-git
name: Setup Git
if: github.repository == 'hashicorp/vault-enterprise'
run: |
git config --global url."https://${{ steps.secrets.outputs.PRIVATE_REPO_GITHUB_TOKEN }}@github.com".insteadOf https://github.com
- id: build-go-dev
name: build-go-dev
run: |
rm -rf ./pkg
mkdir ./pkg
make ci-bootstrap dev
- id: test-ui
name: test-ui
env:
VAULT_LICENSE: ${{ steps.secrets.outputs.VAULT_LICENSE }}
run: |
export PATH="${PWD}/bin:${PATH}"
if [ "${{ github.repository }}" == 'hashicorp/vault' ] ; then
export VAULT_LICENSE="${{ secrets.VAULT_LICENSE }}"
fi
# Run Ember tests
cd ui
mkdir -p test-results/qunit
yarn test:oss
- uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
with:
name: test-results-ui
path: ui/test-results
if: success() || failure()
- uses: test-summary/action@62bc5c68de2a6a0d02039763b8c754569df99e3f # v2.1
with:
paths: "ui/test-results/qunit/results.xml"
show: "fail"
if: always()
tests-completed:
needs:
- setup
- test-go
- test-ui
if: always()
runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }}
steps:
- run: |
tr -d '\n' <<< '${{ toJSON(needs.*.result) }}' | grep -q -v -E '(failure|cancelled)'
notify-tests-completed-failures-ce:
if: |
always() &&
github.repository == 'hashicorp/vault' &&
(needs.test-go.result == 'failure' ||
needs.test-go-testonly.result == 'failure' ||
needs.test-go-race.result == 'failure' ||
needs.test-ui.result == 'failure'
) && (github.ref_name == 'main' || startsWith(github.ref_name, 'release/'))
runs-on: ubuntu-latest
permissions:
id-token: write
contents: read
strategy:
fail-fast: false
needs:
- test-go
- test-go-testonly
- test-go-race
- test-ui
steps:
- name: send-notification
uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0
# We intentionally aren't using the following here since it's from an internal repo
# uses: hashicorp/cloud-gha-slack-notifier@730a033037b8e603adf99ebd3085f0fdfe75e2f4 #v1
env:
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
with:
channel-id: "C05AABYEA9Y" # sent to #feed-vault-ci-official, use "C05Q4D5V89W"/test-vault-ci-slack-integration for testing
payload: |
{
"text": "CE test failures on ${{ github.ref_name }}",
"blocks": [
{
"type": "header",
"text": {
"type": "plain_text",
"text": ":rotating_light: CE test failures on ${{ github.ref_name }} :rotating_light:",
"emoji": true
}
},
{
"type": "divider"
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "${{ needs.test-go.result != 'failure' && ':white_check_mark:' || ':x:' }} Go tests\n${{ needs.test-go-race.result != 'failure' && ':white_check_mark:' || ':x:' }} Go race tests\n${{ needs.test-go-testonly.result != 'failure' && ':white_check_mark:' || ':x:' }} Go testonly tests\n${{ needs.test-ui.result != 'failure' && ':white_check_mark:' || ':x:' }} UI tests"
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "View Failing Workflow",
"emoji": true
},
"url": "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
}
}
]
}
notify-tests-completed-failures-ent:
if: |
always() &&
github.repository == 'hashicorp/vault-enterprise' &&
(needs.test-go.result == 'failure' ||
needs.test-go-testonly.result == 'failure' ||
needs.test-go-fips.result == 'failure' ||
needs.test-go-race.result == 'failure' ||
needs.test-ui.result == 'failure'
) && (github.ref_name == 'main' || startsWith(github.ref_name, 'release/'))
runs-on: ['self-hosted', 'linux', 'small']
permissions:
id-token: write
contents: read
strategy:
fail-fast: false
needs:
- test-go
- test-go-testonly
- test-go-race
- test-go-fips
- test-ui
steps:
- id: vault-auth
name: Vault Authenticate
run: vault-auth
- id: secrets
name: Fetch Vault Secrets
uses: hashicorp/vault-action@130d1f5f4fe645bb6c83e4225c04d64cfb62de6e
with:
url: ${{ steps.vault-auth.outputs.addr }}
caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }}
token: ${{ steps.vault-auth.outputs.token }}
secrets: |
kv/data/github/${{ github.repository }}/github_actions_notifications_bot token | SLACK_BOT_TOKEN;
- name: send-notification
uses: hashicorp/cloud-gha-slack-notifier@730a033037b8e603adf99ebd3085f0fdfe75e2f4 #v1
with:
channel-id: "C05AABYEA9Y" # sent to #feed-vault-ci-official, use "C05Q4D5V89W"/test-vault-ci-slack-integration for testing
slack-bot-token: ${{ steps.secrets.outputs.SLACK_BOT_TOKEN }}
payload: |
{
"text": "Enterprise test failures on ${{ github.ref_name }}",
"blocks": [
{
"type": "header",
"text": {
"type": "plain_text",
"text": ":rotating_light: Enterprise test failures on ${{ github.ref_name }} :rotating_light:",
"emoji": true
}
},
{
"type": "divider"
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "${{ needs.test-go.result != 'failure' && ':white_check_mark:' || ':x:' }} Go tests\n${{ needs.test-go-fips.result != 'failure' && ':white_check_mark:' || ':x:' }} Go FIPS tests\n${{ needs.test-go-race.result != 'failure' && ':white_check_mark:' || ':x:' }} Go race tests\n${{ needs.test-go-testonly.result != 'failure' && ':white_check_mark:' || ':x:' }} Go testonly tests\n${{ needs.test-ui.result != 'failure' && ':white_check_mark:' || ':x:' }} UI tests"
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "View Failing Workflow",
"emoji": true
},
"url": "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
}
}
]
}
test-summary:
name: Go test failures
runs-on: ubuntu-latest
if: |
always() &&
(needs.test-go.result == 'success' ||
needs.test-go.result == 'failure' ||
needs.test-go-fips.result == 'success' ||
needs.test-go-fips.result == 'failure' ||
needs.test-go-race.result == 'success' ||
needs.test-go-race.result == 'failure') &&
(github.repository != 'hashicorp/vault' ||
(github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name))
# The last check ensures this doesn't run on community-contributed PRs, who
# won't have the permissions to run this job.
needs:
- test-go
- test-go-fips
- test-go-race
steps:
- name: Download failure summary
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
with:
name: failure-summary
- name: Prepare failure summary
run: |
# Sort all of the summary table rows and push them to a temp file.
temp_file_name=temp-$(date +%s)
cat failure-summary-*.md | sort >> "$temp_file_name"
# If there are test failures, present them in a format of a GitHub Markdown table.
if [ -s "$temp_file_name" ]; then
# shellcheck disable=SC2129
# Here we create the headings for the summary table
echo "| Test Type | Package | Test | Elapsed | Runner Index | Logs |" >> "$GITHUB_STEP_SUMMARY"
echo "| --------- | ------- | ---- | ------- | ------------ | ---- |" >> "$GITHUB_STEP_SUMMARY"
# shellcheck disable=SC2002
cat "$temp_file_name" >> "$GITHUB_STEP_SUMMARY"
else
echo "### All Go tests passed! :white_check_mark:" >> "$GITHUB_STEP_SUMMARY"
fi
# the random EOF is needed for a multiline environment variable
EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
# shellcheck disable=SC2129
echo "TABLE_TEST_RESULTS<<$EOF" >> "$GITHUB_ENV"
cat "$temp_file_name" >> "$GITHUB_ENV"
echo "$EOF" >> "$GITHUB_ENV"
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
- name: Create comment
if: github.head_ref != ''
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.pull_request.number }}
RUN_ID: ${{ github.run_id }}
REPO: ${{ github.event.repository.name }}
TABLE_DATA: ${{ env.TABLE_TEST_RESULTS }}
run: ./.github/scripts/report_failed_tests.sh
| .github/workflows/ci.yml | 1 | https://github.com/hashicorp/vault/commit/375c2be624cfd9b19b2f21cbb62a3e453d605f58 | [
0.986078143119812,
0.021229106932878494,
0.0001625794538995251,
0.00017069153545890003,
0.14225949347019196
] |
{
"id": 6,
"code_window": [
" required: false\n",
" default: go-test-reports\n",
" type: string\n",
"\n",
"env: ${{ fromJSON(inputs.env-vars) }}\n",
"\n",
"jobs:\n",
" test-matrix:\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" checkout-ref:\n",
" description: The ref to use for checkout.\n",
" required: false\n",
" default: ${{ github.base_ref }}\n",
" type: string\n"
],
"file_path": ".github/workflows/test-go.yml",
"type": "add",
"edit_start_line_idx": 69
} | // Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package pki
import (
"context"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"os"
"path"
"strings"
"testing"
"time"
"github.com/hashicorp/vault/sdk/helper/certutil"
"github.com/go-test/deep"
"github.com/stretchr/testify/require"
"golang.org/x/crypto/acme"
"golang.org/x/net/http2"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/builtin/logical/pki/dnstest"
"github.com/hashicorp/vault/helper/constants"
"github.com/hashicorp/vault/helper/testhelpers"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
)
// TestAcmeBasicWorkflow a test that will validate a basic ACME workflow using the Golang ACME client.
func TestAcmeBasicWorkflow(t *testing.T) {
t.Parallel()
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
cases := []struct {
name string
prefixUrl string
}{
{"root", "acme/"},
{"role", "roles/test-role/acme/"},
{"issuer", "issuer/int-ca/acme/"},
{"issuer_role", "issuer/int-ca/roles/test-role/acme/"},
}
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
baseAcmeURL := "/v1/pki/" + tc.prefixUrl
accountKey, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err, "failed creating rsa key")
acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey)
t.Logf("Testing discover on %s", baseAcmeURL)
discovery, err := acmeClient.Discover(testCtx)
require.NoError(t, err, "failed acme discovery call")
discoveryBaseUrl := client.Address() + baseAcmeURL
require.Equal(t, discoveryBaseUrl+"new-nonce", discovery.NonceURL)
require.Equal(t, discoveryBaseUrl+"new-account", discovery.RegURL)
require.Equal(t, discoveryBaseUrl+"new-order", discovery.OrderURL)
require.Equal(t, discoveryBaseUrl+"revoke-cert", discovery.RevokeURL)
require.Equal(t, discoveryBaseUrl+"key-change", discovery.KeyChangeURL)
require.False(t, discovery.ExternalAccountRequired, "bad value for external account required in directory")
// Attempt to update prior to creating an account
t.Logf("Testing updates with no proper account fail on %s", baseAcmeURL)
_, err = acmeClient.UpdateReg(testCtx, &acme.Account{Contact: []string{"mailto:[email protected]"}})
require.ErrorIs(t, err, acme.ErrNoAccount, "expected failure attempting to update prior to account registration")
// Create new account
t.Logf("Testing register on %s", baseAcmeURL)
acct, err := acmeClient.Register(testCtx, &acme.Account{
Contact: []string{"mailto:[email protected]", "mailto:[email protected]"},
}, func(tosURL string) bool { return true })
require.NoError(t, err, "failed registering account")
require.Equal(t, acme.StatusValid, acct.Status)
require.Contains(t, acct.Contact, "mailto:[email protected]")
require.Contains(t, acct.Contact, "mailto:[email protected]")
require.Len(t, acct.Contact, 2)
// Call register again we should get existing account
t.Logf("Testing duplicate register returns existing account on %s", baseAcmeURL)
_, err = acmeClient.Register(testCtx, acct, func(tosURL string) bool { return true })
require.ErrorIs(t, err, acme.ErrAccountAlreadyExists,
"We should have returned a 200 status code which would have triggered an error in the golang acme"+
" library")
// Update contact
t.Logf("Testing Update account contacts on %s", baseAcmeURL)
acct.Contact = []string{"mailto:[email protected]"}
acct2, err := acmeClient.UpdateReg(testCtx, acct)
require.NoError(t, err, "failed updating account")
require.Equal(t, acme.StatusValid, acct2.Status)
// We should get this back, not the original values.
require.Contains(t, acct2.Contact, "mailto:[email protected]")
require.Len(t, acct2.Contact, 1)
// Make sure order's do not accept dates
_, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "localhost"}},
acme.WithOrderNotBefore(time.Now().Add(10*time.Minute)))
require.Error(t, err, "should have rejected a new order with NotBefore set")
_, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "localhost"}},
acme.WithOrderNotAfter(time.Now().Add(10*time.Minute)))
require.Error(t, err, "should have rejected a new order with NotAfter set")
// Make sure DNS identifiers cannot include IP addresses
_, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "127.0.0.1"}},
acme.WithOrderNotAfter(time.Now().Add(10*time.Minute)))
require.Error(t, err, "should have rejected a new order with IP-like DNS-type identifier")
_, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "*.127.0.0.1"}},
acme.WithOrderNotAfter(time.Now().Add(10*time.Minute)))
require.Error(t, err, "should have rejected a new order with IP-like DNS-type identifier")
// Create an order
t.Logf("Testing Authorize Order on %s", baseAcmeURL)
identifiers := []string{"localhost.localdomain", "*.localdomain"}
createOrder, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{
{Type: "dns", Value: identifiers[0]},
{Type: "dns", Value: identifiers[1]},
})
require.NoError(t, err, "failed creating order")
require.Equal(t, acme.StatusPending, createOrder.Status)
require.Empty(t, createOrder.CertURL)
require.Equal(t, createOrder.URI+"/finalize", createOrder.FinalizeURL)
require.Len(t, createOrder.AuthzURLs, 2, "expected two authzurls")
// Get order
t.Logf("Testing GetOrder on %s", baseAcmeURL)
getOrder, err := acmeClient.GetOrder(testCtx, createOrder.URI)
require.NoError(t, err, "failed fetching order")
require.Equal(t, acme.StatusPending, createOrder.Status)
if diffs := deep.Equal(createOrder, getOrder); diffs != nil {
t.Fatalf("Differences exist between create and get order: \n%v", strings.Join(diffs, "\n"))
}
// Make sure the identifiers returned in the order contain the original values
var ids []string
for _, id := range getOrder.Identifiers {
require.Equal(t, "dns", id.Type)
ids = append(ids, id.Value)
}
require.ElementsMatch(t, identifiers, ids, "order responses should have all original identifiers")
// Load authorizations
var authorizations []*acme.Authorization
for _, authUrl := range getOrder.AuthzURLs {
auth, err := acmeClient.GetAuthorization(testCtx, authUrl)
require.NoError(t, err, "failed fetching authorization: %s", authUrl)
authorizations = append(authorizations, auth)
}
// We should have 2 separate auth challenges as we have two separate identifier
require.Len(t, authorizations, 2, "expected 2 authorizations in order")
var wildcardAuth *acme.Authorization
var domainAuth *acme.Authorization
for _, auth := range authorizations {
if auth.Wildcard {
wildcardAuth = auth
} else {
domainAuth = auth
}
}
// Test the values for the domain authentication
require.Equal(t, acme.StatusPending, domainAuth.Status)
require.Equal(t, "dns", domainAuth.Identifier.Type)
require.Equal(t, "localhost.localdomain", domainAuth.Identifier.Value)
require.False(t, domainAuth.Wildcard, "should not be a wildcard")
require.True(t, domainAuth.Expires.IsZero(), "authorization should only have expiry set on valid status")
require.Len(t, domainAuth.Challenges, 3, "expected three challenges")
require.Equal(t, acme.StatusPending, domainAuth.Challenges[0].Status)
require.True(t, domainAuth.Challenges[0].Validated.IsZero(), "validated time should be 0 on challenge")
require.Equal(t, "http-01", domainAuth.Challenges[0].Type)
require.NotEmpty(t, domainAuth.Challenges[0].Token, "missing challenge token")
require.Equal(t, acme.StatusPending, domainAuth.Challenges[1].Status)
require.True(t, domainAuth.Challenges[1].Validated.IsZero(), "validated time should be 0 on challenge")
require.Equal(t, "dns-01", domainAuth.Challenges[1].Type)
require.NotEmpty(t, domainAuth.Challenges[1].Token, "missing challenge token")
require.Equal(t, acme.StatusPending, domainAuth.Challenges[2].Status)
require.True(t, domainAuth.Challenges[2].Validated.IsZero(), "validated time should be 0 on challenge")
require.Equal(t, "tls-alpn-01", domainAuth.Challenges[2].Type)
require.NotEmpty(t, domainAuth.Challenges[2].Token, "missing challenge token")
// Test the values for the wildcard authentication
require.Equal(t, acme.StatusPending, wildcardAuth.Status)
require.Equal(t, "dns", wildcardAuth.Identifier.Type)
require.Equal(t, "localdomain", wildcardAuth.Identifier.Value) // Make sure we strip the *. in auth responses
require.True(t, wildcardAuth.Wildcard, "should be a wildcard")
require.True(t, wildcardAuth.Expires.IsZero(), "authorization should only have expiry set on valid status")
require.Len(t, wildcardAuth.Challenges, 1, "expected one challenge")
require.Equal(t, acme.StatusPending, domainAuth.Challenges[0].Status)
require.True(t, wildcardAuth.Challenges[0].Validated.IsZero(), "validated time should be 0 on challenge")
require.Equal(t, "dns-01", wildcardAuth.Challenges[0].Type)
require.NotEmpty(t, domainAuth.Challenges[0].Token, "missing challenge token")
// Make sure that getting a challenge does not start it.
challenge, err := acmeClient.GetChallenge(testCtx, domainAuth.Challenges[0].URI)
require.NoError(t, err, "failed to load challenge")
require.Equal(t, acme.StatusPending, challenge.Status)
require.True(t, challenge.Validated.IsZero(), "validated time should be 0 on challenge")
require.Equal(t, "http-01", challenge.Type)
// Accept a challenge; this triggers validation to start.
challenge, err = acmeClient.Accept(testCtx, domainAuth.Challenges[0])
require.NoError(t, err, "failed to load challenge")
require.Equal(t, acme.StatusProcessing, challenge.Status)
require.True(t, challenge.Validated.IsZero(), "validated time should be 0 on challenge")
require.Equal(t, "http-01", challenge.Type)
require.NotEmpty(t, challenge.Token, "missing challenge token")
// HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow
// test.
markAuthorizationSuccess(t, client, acmeClient, acct, getOrder)
// Make sure sending a CSR with the account key gets rejected.
goodCr := &x509.CertificateRequest{
Subject: pkix.Name{CommonName: identifiers[1]},
DNSNames: []string{identifiers[0], identifiers[1]},
}
t.Logf("csr: %v", goodCr)
// We want to make sure people are not using the same keys for CSR/Certs and their ACME account.
csrSignedWithAccountKey, err := x509.CreateCertificateRequest(rand.Reader, goodCr, accountKey)
require.NoError(t, err, "failed generating csr")
_, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrSignedWithAccountKey, true)
require.Error(t, err, "should not be allowed to use the account key for a CSR")
csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err, "failed generated key for CSR")
// Validate we reject CSRs that contain CN that aren't in the original order
badCr := &x509.CertificateRequest{
Subject: pkix.Name{CommonName: "not-in-original-order.com"},
DNSNames: []string{identifiers[0], identifiers[1]},
}
t.Logf("csr: %v", badCr)
csrWithBadCName, err := x509.CreateCertificateRequest(rand.Reader, badCr, csrKey)
require.NoError(t, err, "failed generating csr with bad common name")
_, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadCName, true)
require.Error(t, err, "should not be allowed to csr with different common names than order")
// Validate we reject CSRs that contain DNS names that aren't in the original order
badCr = &x509.CertificateRequest{
Subject: pkix.Name{CommonName: createOrder.Identifiers[0].Value},
DNSNames: []string{"www.notinorder.com"},
}
csrWithBadName, err := x509.CreateCertificateRequest(rand.Reader, badCr, csrKey)
require.NoError(t, err, "failed generating csr with bad name")
_, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadName, true)
require.Error(t, err, "should not be allowed to csr with different names than order")
// Validate we reject CSRs that contain IP addresses that weren't in the original order
badCr = &x509.CertificateRequest{
Subject: pkix.Name{CommonName: createOrder.Identifiers[0].Value},
IPAddresses: []net.IP{{127, 0, 0, 1}},
}
csrWithBadIP, err := x509.CreateCertificateRequest(rand.Reader, badCr, csrKey)
require.NoError(t, err, "failed generating csr with bad name")
_, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadIP, true)
require.Error(t, err, "should not be allowed to csr with different ip address than order")
// Validate we reject CSRs that contains fewer names than in the original order.
badCr = &x509.CertificateRequest{
Subject: pkix.Name{CommonName: identifiers[0]},
}
csrWithBadName, err = x509.CreateCertificateRequest(rand.Reader, badCr, csrKey)
require.NoError(t, err, "failed generating csr with bad name")
_, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadName, true)
require.Error(t, err, "should not be allowed to csr with different names than order")
// Finally test a proper CSR, with the correct name and signed with a different key works.
csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey)
require.NoError(t, err, "failed generating csr")
certs, _, err := acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csr, true)
require.NoError(t, err, "failed finalizing order")
require.Len(t, certs, 3, "expected three items within the returned certs")
testAcmeCertSignedByCa(t, client, certs, "int-ca")
// Make sure the certificate has a NotAfter date of a maximum of 90 days
acmeCert, err := x509.ParseCertificate(certs[0])
require.NoError(t, err, "failed parsing acme cert bytes")
maxAcmeNotAfter := time.Now().Add(maxAcmeCertTTL)
if maxAcmeNotAfter.Before(acmeCert.NotAfter) {
require.Fail(t, fmt.Sprintf("certificate has a NotAfter value %v greater than ACME max ttl %v", acmeCert.NotAfter, maxAcmeNotAfter))
}
// Can we revoke it using the account key revocation
err = acmeClient.RevokeCert(ctx, nil, certs[0], acme.CRLReasonUnspecified)
require.NoError(t, err, "failed to revoke certificate through account key")
// Make sure it was actually revoked
certResp, err := client.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert))
require.NoError(t, err, "failed to read certificate status")
require.NotNil(t, certResp, "certificate status response was nil")
revocationTime := certResp.Data["revocation_time"].(json.Number)
revocationTimeInt, err := revocationTime.Int64()
require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime)
require.Greater(t, revocationTimeInt, int64(0),
"revocation time was not greater than 0, revocation did not work value was: %v", revocationTimeInt)
// Make sure we can revoke an authorization as a client
err = acmeClient.RevokeAuthorization(ctx, authorizations[0].URI)
require.NoError(t, err, "failed revoking authorization status")
revokedAuth, err := acmeClient.GetAuthorization(ctx, authorizations[0].URI)
require.NoError(t, err, "failed fetching authorization")
require.Equal(t, acme.StatusDeactivated, revokedAuth.Status)
// Deactivate account
t.Logf("Testing deactivate account on %s", baseAcmeURL)
err = acmeClient.DeactivateReg(testCtx)
require.NoError(t, err, "failed deactivating account")
// Make sure we get an unauthorized error trying to update the account again.
t.Logf("Testing update on deactivated account fails on %s", baseAcmeURL)
_, err = acmeClient.UpdateReg(testCtx, acct)
require.Error(t, err, "expected account to be deactivated")
require.IsType(t, &acme.Error{}, err, "expected acme error type")
acmeErr := err.(*acme.Error)
require.Equal(t, "urn:ietf:params:acme:error:unauthorized", acmeErr.ProblemType)
})
}
}
// TestAcmeBasicWorkflowWithEab verify that new accounts require EAB's if enforced by configuration.
func TestAcmeBasicWorkflowWithEab(t *testing.T) {
t.Parallel()
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
// Enable EAB
_, err := client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{
"enabled": true,
"eab_policy": "always-required",
})
require.NoError(t, err)
cases := []struct {
name string
prefixUrl string
}{
{"root", "acme/"},
{"role", "roles/test-role/acme/"},
{"issuer", "issuer/int-ca/acme/"},
{"issuer_role", "issuer/int-ca/roles/test-role/acme/"},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
baseAcmeURL := "/v1/pki/" + tc.prefixUrl
accountKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err, "failed creating ec key")
acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey)
t.Logf("Testing discover on %s", baseAcmeURL)
discovery, err := acmeClient.Discover(testCtx)
require.NoError(t, err, "failed acme discovery call")
require.True(t, discovery.ExternalAccountRequired, "bad value for external account required in directory")
// Create new account without EAB, should fail
t.Logf("Testing register on %s", baseAcmeURL)
_, err = acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true })
require.ErrorContains(t, err, "urn:ietf:params:acme:error:externalAccountRequired",
"expected failure creating an account without eab")
// Test fetch, list, delete workflow
kid, _ := getEABKey(t, client, tc.prefixUrl)
resp, err := client.Logical().ListWithContext(testCtx, "pki/eab")
require.NoError(t, err, "failed to list eab tokens")
require.NotNil(t, resp, "list response for eab tokens should not be nil")
require.Contains(t, resp.Data, "keys")
require.Contains(t, resp.Data, "key_info")
require.Len(t, resp.Data["keys"], 1)
require.Contains(t, resp.Data["keys"], kid)
_, err = client.Logical().DeleteWithContext(testCtx, "pki/eab/"+kid)
require.NoError(t, err, "failed to delete eab")
// List eabs should return zero results
resp, err = client.Logical().ListWithContext(testCtx, "pki/eab")
require.NoError(t, err, "failed to list eab tokens")
require.Nil(t, resp, "list response for eab tokens should have been nil")
// fetch a new EAB
kid, eabKeyBytes := getEABKey(t, client, tc.prefixUrl)
acct := &acme.Account{
ExternalAccountBinding: &acme.ExternalAccountBinding{
KID: kid,
Key: eabKeyBytes,
},
}
// Make sure we can list our key
resp, err = client.Logical().ListWithContext(testCtx, "pki/eab")
require.NoError(t, err, "failed to list eab tokens")
require.NotNil(t, resp, "list response for eab tokens should not be nil")
require.Contains(t, resp.Data, "keys")
require.Contains(t, resp.Data, "key_info")
require.Len(t, resp.Data["keys"], 1)
require.Contains(t, resp.Data["keys"], kid)
keyInfo := resp.Data["key_info"].(map[string]interface{})
require.Contains(t, keyInfo, kid)
infoForKid := keyInfo[kid].(map[string]interface{})
require.Equal(t, "hs", infoForKid["key_type"])
require.Equal(t, tc.prefixUrl+"directory", infoForKid["acme_directory"])
// Create new account with EAB
t.Logf("Testing register on %s", baseAcmeURL)
_, err = acmeClient.Register(testCtx, acct, func(tosURL string) bool { return true })
require.NoError(t, err, "failed registering new account with eab")
// Make sure our EAB is no longer available
resp, err = client.Logical().ListWithContext(context.Background(), "pki/eab")
require.NoError(t, err, "failed to list eab tokens")
require.Nil(t, resp, "list response for eab tokens should have been nil due to empty list")
// Attempt to create another account with the same EAB as before -- should fail
accountKey2, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err, "failed creating ec key")
acmeClient2 := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey2)
acct2 := &acme.Account{
ExternalAccountBinding: &acme.ExternalAccountBinding{
KID: kid,
Key: eabKeyBytes,
},
}
_, err = acmeClient2.Register(testCtx, acct2, func(tosURL string) bool { return true })
require.ErrorContains(t, err, "urn:ietf:params:acme:error:unauthorized", "should fail due to EAB re-use")
// We can lookup/find an existing account without EAB if we have the account key
_, err = acmeClient.GetReg(testCtx /* unused url */, "")
require.NoError(t, err, "expected to lookup existing account without eab")
})
}
}
// TestAcmeNonce a basic test that will validate we get back a nonce with the proper status codes
// based on the
func TestAcmeNonce(t *testing.T) {
t.Parallel()
cluster, client, pathConfig := setupAcmeBackend(t)
defer cluster.Cleanup()
cases := []struct {
name string
prefixUrl string
directoryUrl string
}{
{"root", "", "pki/acme/new-nonce"},
{"role", "/roles/test-role", "pki/roles/test-role/acme/new-nonce"},
{"issuer", "/issuer/default", "pki/issuer/default/acme/new-nonce"},
{"issuer_role", "/issuer/default/roles/test-role", "pki/issuer/default/roles/test-role/acme/new-nonce"},
}
for _, tc := range cases {
for _, httpOp := range []string{"get", "header"} {
t.Run(fmt.Sprintf("%s-%s", tc.name, httpOp), func(t *testing.T) {
var req *api.Request
switch httpOp {
case "get":
req = client.NewRequest(http.MethodGet, "/v1/"+tc.directoryUrl)
case "header":
req = client.NewRequest(http.MethodHead, "/v1/"+tc.directoryUrl)
}
res, err := client.RawRequestWithContext(ctx, req)
require.NoError(t, err, "failed sending raw request")
_ = res.Body.Close()
// Proper Status Code
switch httpOp {
case "get":
require.Equal(t, http.StatusNoContent, res.StatusCode)
case "header":
require.Equal(t, http.StatusOK, res.StatusCode)
}
// Make sure we don't have a Content-Type header.
require.Equal(t, "", res.Header.Get("Content-Type"))
// Make sure we return the Cache-Control header
require.Contains(t, res.Header.Get("Cache-Control"), "no-store",
"missing Cache-Control header with no-store header value")
// Test for our nonce header value
require.NotEmpty(t, res.Header.Get("Replay-Nonce"), "missing Replay-Nonce header with an actual value")
// Test Link header value
expectedLinkHeader := fmt.Sprintf("<%s>;rel=\"index\"", pathConfig+tc.prefixUrl+"/acme/directory")
require.Contains(t, res.Header.Get("Link"), expectedLinkHeader,
"different value for link header than expected")
})
}
}
}
// TestAcmeClusterPathNotConfigured basic testing of the ACME error handler.
func TestAcmeClusterPathNotConfigured(t *testing.T) {
t.Parallel()
cluster, client := setupTestPkiCluster(t)
defer cluster.Cleanup()
// Go sneaky, sneaky and update the acme configuration through sys/raw to bypass config/cluster path checks
pkiMount := findStorageMountUuid(t, client, "pki")
rawPath := path.Join("/sys/raw/logical/", pkiMount, storageAcmeConfig)
_, err := client.Logical().WriteWithContext(context.Background(), rawPath, map[string]interface{}{
"value": "{\"enabled\": true, \"eab_policy_name\": \"not-required\"}",
})
require.NoError(t, err, "failed updating acme config through sys/raw")
// Force reload the plugin so we read the new config we slipped in.
_, err = client.Sys().ReloadPluginWithContext(context.Background(), &api.ReloadPluginInput{Mounts: []string{"pki"}})
require.NoError(t, err, "failed reloading plugin")
// Do not fill in the path option within the local cluster configuration
cases := []struct {
name string
directoryUrl string
}{
{"root", "pki/acme/directory"},
{"role", "pki/roles/test-role/acme/directory"},
{"issuer", "pki/issuer/default/acme/directory"},
{"issuer_role", "pki/issuer/default/roles/test-role/acme/directory"},
}
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
dirResp, err := client.Logical().ReadRawWithContext(testCtx, tc.directoryUrl)
require.Error(t, err, "expected failure reading ACME directory configuration got none")
require.Equal(t, "application/problem+json", dirResp.Header.Get("Content-Type"))
require.Equal(t, http.StatusInternalServerError, dirResp.StatusCode)
rawBodyBytes, err := io.ReadAll(dirResp.Body)
require.NoError(t, err, "failed reading from directory response body")
_ = dirResp.Body.Close()
respType := map[string]interface{}{}
err = json.Unmarshal(rawBodyBytes, &respType)
require.NoError(t, err, "failed unmarshalling ACME directory response body")
require.Equal(t, "urn:ietf:params:acme:error:serverInternal", respType["type"])
require.NotEmpty(t, respType["detail"])
})
}
}
// TestAcmeAccountsCrossingDirectoryPath make sure that if an account attempts to use a different ACME
// directory path that we get an error.
func TestAcmeAccountsCrossingDirectoryPath(t *testing.T) {
t.Parallel()
cluster, _, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
baseAcmeURL := "/v1/pki/acme/"
accountKey, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err, "failed creating rsa key")
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey)
// Create new account
acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true })
require.NoError(t, err, "failed registering account")
// Try to update the account under another ACME directory
baseAcmeURL2 := "/v1/pki/roles/test-role/acme/"
acmeClient2 := getAcmeClientForCluster(t, cluster, baseAcmeURL2, accountKey)
acct.Contact = []string{"mailto:[email protected]"}
_, err = acmeClient2.UpdateReg(testCtx, acct)
require.Error(t, err, "successfully updated account when we should have failed due to different directory")
// We don't test for the specific error about using the wrong directory, as the golang library
// swallows the error we are sending back to a no account error
}
// TestAcmeEabCrossingDirectoryPath make sure that if an account attempts to use a different ACME
// directory path that an EAB was created within we get an error.
func TestAcmeEabCrossingDirectoryPath(t *testing.T) {
t.Parallel()
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
// Enable EAB
_, err := client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{
"enabled": true,
"eab_policy": "always-required",
})
require.NoError(t, err)
baseAcmeURL := "/v1/pki/acme/"
accountKey, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err, "failed creating rsa key")
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey)
// fetch a new EAB
kid, eabKeyBytes := getEABKey(t, client, "roles/test-role/acme/")
acct := &acme.Account{
ExternalAccountBinding: &acme.ExternalAccountBinding{
KID: kid,
Key: eabKeyBytes,
},
}
// Create new account
_, err = acmeClient.Register(testCtx, acct, func(tosURL string) bool { return true })
require.ErrorContains(t, err, "failed to verify eab", "should have failed as EAB is for a different directory")
}
// TestAcmeDisabledWithEnvVar verifies if VAULT_DISABLE_PUBLIC_ACME is set that we completely
// disable the ACME service
func TestAcmeDisabledWithEnvVar(t *testing.T) {
// Setup a cluster with the configuration set to not-required, initially as the
// configuration will validate if the environment var is set
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
// Seal setup the environment variable, and unseal which now means we have a cluster
// with ACME configuration saying it is enabled with a bad EAB policy.
cluster.EnsureCoresSealed(t)
t.Setenv("VAULT_DISABLE_PUBLIC_ACME", "true")
cluster.UnsealCores(t)
// Make sure that ACME is disabled now.
for _, method := range []string{http.MethodHead, http.MethodGet} {
t.Run(fmt.Sprintf("%s", method), func(t *testing.T) {
req := client.NewRequest(method, "/v1/pki/acme/new-nonce")
_, err := client.RawRequestWithContext(ctx, req)
require.Error(t, err, "should have received an error as ACME should have been disabled")
if apiError, ok := err.(*api.ResponseError); ok {
require.Equal(t, 404, apiError.StatusCode)
}
})
}
}
// TestAcmeConfigChecksPublicAcmeEnv verifies certain EAB policy values can not be set if ENV var is enabled
func TestAcmeConfigChecksPublicAcmeEnv(t *testing.T) {
t.Setenv("VAULT_DISABLE_PUBLIC_ACME", "true")
cluster, client := setupTestPkiCluster(t)
defer cluster.Cleanup()
_, err := client.Logical().WriteWithContext(context.Background(), "pki/config/cluster", map[string]interface{}{
"path": "https://dadgarcorp.com/v1/pki",
})
require.NoError(t, err)
_, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{
"enabled": true,
"eab_policy": string(eabPolicyAlwaysRequired),
})
require.NoError(t, err)
for _, policyName := range []EabPolicyName{eabPolicyNewAccountRequired, eabPolicyNotRequired} {
_, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{
"enabled": true,
"eab_policy": string(policyName),
})
require.Error(t, err, "eab policy %s should have not been allowed to be set")
}
// Make sure we can disable ACME and the eab policy is not checked
_, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{
"enabled": false,
"eab_policy": string(eabPolicyNotRequired),
})
require.NoError(t, err)
}
// TestAcmeTruncatesToIssuerExpiry make sure that if the selected issuer's expiry is shorter than the
// CSR's selected TTL value in ACME and the issuer's leaf_not_after_behavior setting is set to Err,
// we will override the configured behavior and truncate to the issuer's NotAfter
func TestAcmeTruncatesToIssuerExpiry(t *testing.T) {
t.Parallel()
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
mount := "pki"
resp, err := client.Logical().WriteWithContext(context.Background(), mount+"/issuers/generate/intermediate/internal",
map[string]interface{}{
"key_name": "short-key",
"key_type": "ec",
"common_name": "test.com",
})
require.NoError(t, err, "failed creating intermediary CSR")
intermediateCSR := resp.Data["csr"].(string)
// Sign the intermediate CSR using /pki
resp, err = client.Logical().Write(mount+"/issuer/root-ca/sign-intermediate", map[string]interface{}{
"csr": intermediateCSR,
"ttl": "10m",
"max_ttl": "1h",
})
require.NoError(t, err, "failed signing intermediary CSR")
intermediateCertPEM := resp.Data["certificate"].(string)
shortCa := parseCert(t, intermediateCertPEM)
// Configure the intermediate cert as the CA in /pki2
resp, err = client.Logical().Write(mount+"/issuers/import/cert", map[string]interface{}{
"pem_bundle": intermediateCertPEM,
})
require.NoError(t, err, "failed importing intermediary cert")
importedIssuersRaw := resp.Data["imported_issuers"].([]interface{})
require.Len(t, importedIssuersRaw, 1)
shortCaUuid := importedIssuersRaw[0].(string)
_, err = client.Logical().Write(mount+"/issuer/"+shortCaUuid, map[string]interface{}{
"leaf_not_after_behavior": "err",
"issuer_name": "short-ca",
})
require.NoError(t, err, "failed updating issuer name")
baseAcmeURL := "/v1/pki/issuer/short-ca/acme/"
accountKey, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err, "failed creating rsa key")
acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey)
// Create new account
t.Logf("Testing register on %s", baseAcmeURL)
acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true })
require.NoError(t, err, "failed registering account")
// Create an order
t.Logf("Testing Authorize Order on %s", baseAcmeURL)
identifiers := []string{"*.localdomain"}
order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{
{Type: "dns", Value: identifiers[0]},
})
require.NoError(t, err, "failed creating order")
// HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow
// test.
markAuthorizationSuccess(t, client, acmeClient, acct, order)
// Build a proper CSR, with the correct name and signed with a different key works.
goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}}
csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err, "failed generated key for CSR")
csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey)
require.NoError(t, err, "failed generating csr")
certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true)
require.NoError(t, err, "failed finalizing order")
require.Len(t, certs, 3, "expected full acme chain")
testAcmeCertSignedByCa(t, client, certs, "short-ca")
acmeCert, err := x509.ParseCertificate(certs[0])
require.NoError(t, err, "failed parsing acme cert")
require.Equal(t, shortCa.NotAfter, acmeCert.NotAfter, "certificate times aren't the same")
}
// TestAcmeRoleExtKeyUsage verify that ACME by default ignores the role's various ExtKeyUsage flags,
// but if the ACME configuration override of allow_role_ext_key_usage is set that we then honor
// the role's flag.
func TestAcmeRoleExtKeyUsage(t *testing.T) {
t.Parallel()
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
roleName := "test-role"
roleOpt := map[string]interface{}{
"ttl": "365h",
"max_ttl": "720h",
"key_type": "any",
"allowed_domains": "localdomain",
"allow_subdomains": "true",
"allow_wildcard_certificates": "true",
"require_cn": "true", /* explicit default */
"server_flag": "true",
"client_flag": "true",
"code_signing_flag": "true",
"email_protection_flag": "true",
}
_, err := client.Logical().Write("pki/roles/"+roleName, roleOpt)
baseAcmeURL := "/v1/pki/roles/" + roleName + "/acme/"
accountKey, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err, "failed creating rsa key")
require.NoError(t, err, "failed creating role test-role")
acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey)
// Create new account
t.Logf("Testing register on %s", baseAcmeURL)
acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true })
require.NoError(t, err, "failed registering account")
// Create an order
t.Logf("Testing Authorize Order on %s", baseAcmeURL)
identifiers := []string{"*.localdomain"}
order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{
{Type: "dns", Value: identifiers[0]},
})
require.NoError(t, err, "failed creating order")
// HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test.
markAuthorizationSuccess(t, client, acmeClient, acct, order)
// Build a proper CSR, with the correct name and signed with a different key works.
goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}}
csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err, "failed generated key for CSR")
csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey)
require.NoError(t, err, "failed generating csr")
certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true)
require.NoError(t, err, "order finalization failed")
require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle")
acmeCert, err := x509.ParseCertificate(certs[0])
require.NoError(t, err, "failed parsing acme cert")
require.Equal(t, 1, len(acmeCert.ExtKeyUsage), "mis-match on expected ExtKeyUsages")
require.ElementsMatch(t, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, acmeCert.ExtKeyUsage,
"mismatch of ExtKeyUsage flags")
// Now turn the ACME configuration allow_role_ext_key_usage and retest to make sure we get a certificate
// with them all
_, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{
"enabled": true,
"eab_policy": "not-required",
"allow_role_ext_key_usage": true,
})
require.NoError(t, err, "failed updating ACME configuration")
t.Logf("Testing Authorize Order on %s", baseAcmeURL)
order, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{
{Type: "dns", Value: identifiers[0]},
})
require.NoError(t, err, "failed creating order")
// HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test.
markAuthorizationSuccess(t, client, acmeClient, acct, order)
certs, _, err = acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true)
require.NoError(t, err, "order finalization failed")
require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle")
acmeCert, err = x509.ParseCertificate(certs[0])
require.NoError(t, err, "failed parsing acme cert")
require.Equal(t, 4, len(acmeCert.ExtKeyUsage), "mis-match on expected ExtKeyUsages")
require.ElementsMatch(t, []x509.ExtKeyUsage{
x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth,
x509.ExtKeyUsageCodeSigning, x509.ExtKeyUsageEmailProtection,
},
acmeCert.ExtKeyUsage, "mismatch of ExtKeyUsage flags")
}
func TestIssuerRoleDirectoryAssociations(t *testing.T) {
t.Parallel()
// This creates two issuers for us (root-ca, int-ca) and two
// roles (test-role, acme) that we can use with various directory
// configurations.
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
// Setup DNS for validations.
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
dns := dnstest.SetupResolver(t, "dadgarcorp.com")
defer dns.Cleanup()
_, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{
"dns_resolver": dns.GetLocalAddr(),
})
require.NoError(t, err, "failed to specify dns resolver")
// 1. Use a forbidden role should fail.
resp, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{
"enabled": true,
"allowed_roles": []string{"acme"},
})
require.NoError(t, err, "failed to write config")
require.NotNil(t, resp)
_, err = client.Logical().ReadWithContext(testCtx, "pki/roles/test-role/acme/directory")
require.Error(t, err, "failed to forbid usage of test-role")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/test-role/acme/directory")
require.Error(t, err, "failed to forbid usage of test-role under default issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/test-role/acme/directory")
require.Error(t, err, "failed to forbid usage of test-role under int-ca issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/test-role/acme/directory")
require.Error(t, err, "failed to forbid usage of test-role under root-ca issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/roles/acme/acme/directory")
require.NoError(t, err, "failed to allow usage of acme")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/acme/acme/directory")
require.NoError(t, err, "failed to allow usage of acme under default issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/acme/acme/directory")
require.NoError(t, err, "failed to allow usage of acme under int-ca issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/acme/acme/directory")
require.NoError(t, err, "failed to allow usage of acme under root-ca issuer")
// 2. Use a forbidden issuer should fail.
resp, err = client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{
"allowed_roles": []string{"acme"},
"allowed_issuers": []string{"int-ca"},
})
require.NoError(t, err, "failed to write config")
require.NotNil(t, resp)
_, err = client.Logical().ReadWithContext(testCtx, "pki/roles/test-role/acme/directory")
require.Error(t, err, "failed to forbid usage of test-role")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/test-role/acme/directory")
require.Error(t, err, "failed to forbid usage of test-role under default issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/test-role/acme/directory")
require.Error(t, err, "failed to forbid usage of test-role under int-ca issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/test-role/acme/directory")
require.Error(t, err, "failed to forbid usage of test-role under root-ca issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/acme/acme/directory")
require.Error(t, err, "failed to forbid usage of acme under root-ca issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/roles/acme/acme/directory")
require.NoError(t, err, "failed to allow usage of acme")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/acme/acme/directory")
require.NoError(t, err, "failed to allow usage of acme under default issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/acme/acme/directory")
require.NoError(t, err, "failed to allow usage of acme under int-ca issuer")
// 3. Setting the default directory to be a sign-verbatim policy and
// using two different CAs should result in certs signed by each CA.
resp, err = client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{
"allowed_roles": []string{"*"},
"allowed_issuers": []string{"*"},
"default_directory_policy": "sign-verbatim",
})
require.NoError(t, err, "failed to write config")
require.NotNil(t, resp)
// default == int-ca
acmeClientDefault := getAcmeClientForCluster(t, cluster, "/v1/pki/issuer/default/acme/", nil)
defaultLeafCert := doACMEForDomainWithDNS(t, dns, acmeClientDefault, []string{"default-ca.dadgarcorp.com"})
requireSignedByAtPath(t, client, defaultLeafCert, "pki/issuer/int-ca")
acmeClientIntCA := getAcmeClientForCluster(t, cluster, "/v1/pki/issuer/int-ca/acme/", nil)
intCALeafCert := doACMEForDomainWithDNS(t, dns, acmeClientIntCA, []string{"int-ca.dadgarcorp.com"})
requireSignedByAtPath(t, client, intCALeafCert, "pki/issuer/int-ca")
acmeClientRootCA := getAcmeClientForCluster(t, cluster, "/v1/pki/issuer/root-ca/acme/", nil)
rootCALeafCert := doACMEForDomainWithDNS(t, dns, acmeClientRootCA, []string{"root-ca.dadgarcorp.com"})
requireSignedByAtPath(t, client, rootCALeafCert, "pki/issuer/root-ca")
// 4. Using a role-based default directory should allow us to control leaf
// issuance on the base and issuer-specific directories.
resp, err = client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{
"allowed_roles": []string{"*"},
"allowed_issuers": []string{"*"},
"default_directory_policy": "role:acme",
})
require.NoError(t, err, "failed to write config")
require.NotNil(t, resp)
resp, err = client.Logical().JSONMergePatch(testCtx, "pki/roles/acme", map[string]interface{}{
"ou": "IT Security",
"organization": []string{"Dadgar Corporation, Limited"},
"allow_any_name": true,
})
require.NoError(t, err, "failed to write role differentiator")
require.NotNil(t, resp)
for _, issuer := range []string{"", "default", "int-ca", "root-ca"} {
// Path should override role.
directory := "/v1/pki/issuer/" + issuer + "/acme/"
issuerPath := "/pki/issuer/" + issuer
if issuer == "" {
directory = "/v1/pki/acme/"
issuerPath = "/pki/issuer/int-ca"
} else if issuer == "default" {
issuerPath = "/pki/issuer/int-ca"
}
t.Logf("using directory: %v / issuer: %v", directory, issuerPath)
acmeClient := getAcmeClientForCluster(t, cluster, directory, nil)
leafCert := doACMEForDomainWithDNS(t, dns, acmeClient, []string{"role-restricted.dadgarcorp.com"})
require.Contains(t, leafCert.Subject.Organization, "Dadgar Corporation, Limited", "on directory: %v", directory)
require.Contains(t, leafCert.Subject.OrganizationalUnit, "IT Security", "on directory: %v", directory)
requireSignedByAtPath(t, client, leafCert, issuerPath)
}
}
func TestACMESubjectFieldsAndExtensionsIgnored(t *testing.T) {
t.Parallel()
// This creates two issuers for us (root-ca, int-ca) and two
// roles (test-role, acme) that we can use with various directory
// configurations.
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
// Setup DNS for validations.
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
dns := dnstest.SetupResolver(t, "dadgarcorp.com")
defer dns.Cleanup()
_, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{
"dns_resolver": dns.GetLocalAddr(),
})
require.NoError(t, err, "failed to specify dns resolver")
// Use the default sign-verbatim policy and ensure OU does not get set.
directory := "/v1/pki/acme/"
domains := []string{"no-ou.dadgarcorp.com"}
acmeClient := getAcmeClientForCluster(t, cluster, directory, nil)
cr := &x509.CertificateRequest{
Subject: pkix.Name{CommonName: domains[0], OrganizationalUnit: []string{"DadgarCorp IT"}},
DNSNames: domains,
}
cert := doACMEForCSRWithDNS(t, dns, acmeClient, domains, cr)
t.Logf("Got certificate: %v", cert)
require.Empty(t, cert.Subject.OrganizationalUnit)
// Use the default sign-verbatim policy and ensure extension does not get set.
domains = []string{"no-ext.dadgarcorp.com"}
extension, err := certutil.CreateDeltaCRLIndicatorExt(12345)
require.NoError(t, err)
cr = &x509.CertificateRequest{
Subject: pkix.Name{CommonName: domains[0]},
DNSNames: domains,
ExtraExtensions: []pkix.Extension{extension},
}
cert = doACMEForCSRWithDNS(t, dns, acmeClient, domains, cr)
t.Logf("Got certificate: %v", cert)
for _, ext := range cert.Extensions {
require.False(t, ext.Id.Equal(certutil.DeltaCRLIndicatorOID))
}
require.NotEmpty(t, cert.Extensions)
}
// TestAcmeWithCsrIncludingBasicConstraintExtension verify that we error out for a CSR that is requesting a
// certificate with the IsCA set to true, false is okay, within the basic constraints extension and that no matter what
// the extension is not present on the returned certificate.
func TestAcmeWithCsrIncludingBasicConstraintExtension(t *testing.T) {
t.Parallel()
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
baseAcmeURL := "/v1/pki/acme/"
accountKey, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err, "failed creating rsa key")
acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey)
// Create new account
t.Logf("Testing register on %s", baseAcmeURL)
acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true })
require.NoError(t, err, "failed registering account")
// Create an order
t.Logf("Testing Authorize Order on %s", baseAcmeURL)
identifiers := []string{"*.localdomain"}
order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{
{Type: "dns", Value: identifiers[0]},
})
require.NoError(t, err, "failed creating order")
// HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test.
markAuthorizationSuccess(t, client, acmeClient, acct, order)
// Build a CSR with IsCA set to true, making sure we reject it
extension, err := certutil.CreateBasicConstraintExtension(true, -1)
require.NoError(t, err, "failed generating basic constraint extension")
isCATrueCSR := &x509.CertificateRequest{
DNSNames: []string{identifiers[0]},
ExtraExtensions: []pkix.Extension{extension},
}
csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err, "failed generated key for CSR")
csr, err := x509.CreateCertificateRequest(rand.Reader, isCATrueCSR, csrKey)
require.NoError(t, err, "failed generating csr")
_, _, err = acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true)
require.Error(t, err, "order finalization should have failed with IsCA set to true")
extension, err = certutil.CreateBasicConstraintExtension(false, -1)
require.NoError(t, err, "failed generating basic constraint extension")
isCAFalseCSR := &x509.CertificateRequest{
DNSNames: []string{identifiers[0]},
Extensions: []pkix.Extension{extension},
}
csr, err = x509.CreateCertificateRequest(rand.Reader, isCAFalseCSR, csrKey)
require.NoError(t, err, "failed generating csr")
certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true)
require.NoError(t, err, "order finalization should have failed with IsCA set to false")
require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle")
acmeCert, err := x509.ParseCertificate(certs[0])
require.NoError(t, err, "failed parsing acme cert")
// Make sure we don't have any basic constraint extension within the returned cert
for _, ext := range acmeCert.Extensions {
if ext.Id.Equal(certutil.ExtensionBasicConstraintsOID) {
// We shouldn't have this extension in our cert
t.Fatalf("acme csr contained a basic constraints extension")
}
}
}
func markAuthorizationSuccess(t *testing.T, client *api.Client, acmeClient *acme.Client, acct *acme.Account, order *acme.Order) {
testCtx := context.Background()
pkiMount := findStorageMountUuid(t, client, "pki")
// Delete any and all challenge validation entries to stop the engine from overwriting our hack here
i := 0
for {
deleteCvEntries(t, client, pkiMount)
accountId := acct.URI[strings.LastIndex(acct.URI, "/"):]
for _, authURI := range order.AuthzURLs {
authId := authURI[strings.LastIndex(authURI, "/"):]
// sys/raw does not work with namespaces
baseClient := client.WithNamespace("")
values, err := baseClient.Logical().ListWithContext(testCtx, "sys/raw/logical/")
require.NoError(t, err)
require.True(t, true, "values: %v", values)
rawPath := path.Join("sys/raw/logical/", pkiMount, getAuthorizationPath(accountId, authId))
resp, err := baseClient.Logical().ReadWithContext(testCtx, rawPath)
require.NoError(t, err, "failed looking up authorization storage")
require.NotNil(t, resp, "sys raw response was nil")
require.NotEmpty(t, resp.Data["value"], "no value field in sys raw response")
var authz ACMEAuthorization
err = jsonutil.DecodeJSON([]byte(resp.Data["value"].(string)), &authz)
require.NoError(t, err, "error decoding authorization: %w", err)
authz.Status = ACMEAuthorizationValid
for _, challenge := range authz.Challenges {
challenge.Status = ACMEChallengeValid
}
encodeJSON, err := jsonutil.EncodeJSON(authz)
require.NoError(t, err, "failed encoding authz json")
_, err = baseClient.Logical().WriteWithContext(testCtx, rawPath, map[string]interface{}{
"value": base64.StdEncoding.EncodeToString(encodeJSON),
"encoding": "base64",
})
require.NoError(t, err, "failed writing authorization storage")
}
// Give some time
time.Sleep(200 * time.Millisecond)
// Check to see if we have fixed up the status and no new entries have appeared.
if !deleteCvEntries(t, client, pkiMount) {
// No entries found
// Look to see if we raced against the engine
orderLookup, err := acmeClient.GetOrder(testCtx, order.URI)
require.NoError(t, err, "failed loading order status after manually ")
if orderLookup.Status == string(ACMEOrderReady) {
// Our order seems to be in the proper status, should be safe-ish to go ahead now
break
} else {
t.Logf("order status was not ready, retrying")
}
} else {
t.Logf("new challenge entries appeared after deletion, retrying")
}
if i > 5 {
t.Fatalf("We are constantly deleting cv entries or order status is not changing, something is wrong")
}
i++
}
}
func deleteCvEntries(t *testing.T, client *api.Client, pkiMount string) bool {
testCtx := context.Background()
baseClient := client.WithNamespace("")
cvPath := path.Join("sys/raw/logical/", pkiMount, acmeValidationPrefix)
resp, err := baseClient.Logical().ListWithContext(testCtx, cvPath)
require.NoError(t, err, "failed listing cv path items")
deletedEntries := false
if resp != nil {
cvEntries := resp.Data["keys"].([]interface{})
for _, cvEntry := range cvEntries {
cvEntryPath := path.Join(cvPath, cvEntry.(string))
_, err = baseClient.Logical().DeleteWithContext(testCtx, cvEntryPath)
require.NoError(t, err, "failed to delete cv entry")
deletedEntries = true
}
}
return deletedEntries
}
func setupAcmeBackend(t *testing.T) (*vault.TestCluster, *api.Client, string) {
cluster, client := setupTestPkiCluster(t)
return setupAcmeBackendOnClusterAtPath(t, cluster, client, "pki")
}
func setupAcmeBackendOnClusterAtPath(t *testing.T, cluster *vault.TestCluster, client *api.Client, mount string) (*vault.TestCluster, *api.Client, string) {
mount = strings.Trim(mount, "/")
// Setting templated AIAs should succeed.
pathConfig := client.Address() + "/v1/" + mount
namespace := ""
mountName := mount
if mount != "pki" {
if strings.Contains(mount, "/") && constants.IsEnterprise {
ns_pieces := strings.Split(mount, "/")
c := len(ns_pieces)
// mount is c-1
ns_name := ns_pieces[c-2]
if len(ns_pieces) > 2 {
// Parent's namespaces
parent := strings.Join(ns_pieces[0:c-2], "/")
_, err := client.WithNamespace(parent).Logical().Write("/sys/namespaces/"+ns_name, nil)
require.NoError(t, err, "failed to create nested namespaces "+parent+" -> "+ns_name)
} else {
_, err := client.Logical().Write("/sys/namespaces/"+ns_name, nil)
require.NoError(t, err, "failed to create nested namespace "+ns_name)
}
namespace = strings.Join(ns_pieces[0:c-1], "/")
mountName = ns_pieces[c-1]
}
err := client.WithNamespace(namespace).Sys().Mount(mountName, &api.MountInput{
Type: "pki",
Config: api.MountConfigInput{
DefaultLeaseTTL: "3000h",
MaxLeaseTTL: "600000h",
},
})
require.NoError(t, err, "failed to mount new PKI instance at "+mount)
}
err := client.Sys().TuneMountWithContext(ctx, mount, api.MountConfigInput{
DefaultLeaseTTL: "3000h",
MaxLeaseTTL: "600000h",
})
require.NoError(t, err, "failed updating mount lease times "+mount)
_, err = client.Logical().WriteWithContext(context.Background(), mount+"/config/cluster", map[string]interface{}{
"path": pathConfig,
"aia_path": "http://localhost:8200/cdn/" + mount,
})
require.NoError(t, err)
_, err = client.Logical().WriteWithContext(context.Background(), mount+"/config/acme", map[string]interface{}{
"enabled": true,
"eab_policy": "not-required",
})
require.NoError(t, err)
// Allow certain headers to pass through for ACME support
_, err = client.WithNamespace(namespace).Logical().WriteWithContext(context.Background(), "sys/mounts/"+mountName+"/tune", map[string]interface{}{
"allowed_response_headers": []string{"Last-Modified", "Replay-Nonce", "Link", "Location"},
"max_lease_ttl": "920000h",
})
require.NoError(t, err, "failed tuning mount response headers")
resp, err := client.Logical().WriteWithContext(context.Background(), mount+"/issuers/generate/root/internal",
map[string]interface{}{
"issuer_name": "root-ca",
"key_name": "root-key",
"key_type": "ec",
"common_name": "Test Root R1 " + mount,
"ttl": "7200h",
"max_ttl": "920000h",
})
require.NoError(t, err, "failed creating root CA")
resp, err = client.Logical().WriteWithContext(context.Background(), mount+"/issuers/generate/intermediate/internal",
map[string]interface{}{
"key_name": "int-key",
"key_type": "ec",
"common_name": "Test Int X1 " + mount,
})
require.NoError(t, err, "failed creating intermediary CSR")
intermediateCSR := resp.Data["csr"].(string)
// Sign the intermediate CSR using /pki
resp, err = client.Logical().Write(mount+"/issuer/root-ca/sign-intermediate", map[string]interface{}{
"csr": intermediateCSR,
"ttl": "7100h",
"max_ttl": "910000h",
})
require.NoError(t, err, "failed signing intermediary CSR")
intermediateCertPEM := resp.Data["certificate"].(string)
// Configure the intermediate cert as the CA in /pki2
resp, err = client.Logical().Write(mount+"/issuers/import/cert", map[string]interface{}{
"pem_bundle": intermediateCertPEM,
})
require.NoError(t, err, "failed importing intermediary cert")
importedIssuersRaw := resp.Data["imported_issuers"].([]interface{})
require.Len(t, importedIssuersRaw, 1)
intCaUuid := importedIssuersRaw[0].(string)
_, err = client.Logical().Write(mount+"/issuer/"+intCaUuid, map[string]interface{}{
"issuer_name": "int-ca",
})
require.NoError(t, err, "failed updating issuer name")
_, err = client.Logical().Write(mount+"/config/issuers", map[string]interface{}{
"default": "int-ca",
})
require.NoError(t, err, "failed updating default issuer")
_, err = client.Logical().Write(mount+"/roles/test-role", map[string]interface{}{
"ttl": "168h",
"max_ttl": "168h",
"key_type": "any",
"allowed_domains": "localdomain",
"allow_subdomains": "true",
"allow_wildcard_certificates": "true",
})
require.NoError(t, err, "failed creating role test-role")
_, err = client.Logical().Write(mount+"/roles/acme", map[string]interface{}{
"ttl": "3650h",
"max_ttl": "7200h",
"key_type": "any",
})
require.NoError(t, err, "failed creating role acme")
return cluster, client, pathConfig
}
func testAcmeCertSignedByCa(t *testing.T, client *api.Client, derCerts [][]byte, issuerRef string) {
t.Helper()
require.NotEmpty(t, derCerts)
acmeCert, err := x509.ParseCertificate(derCerts[0])
require.NoError(t, err, "failed parsing acme cert bytes")
resp, err := client.Logical().ReadWithContext(context.Background(), "pki/issuer/"+issuerRef)
require.NoError(t, err, "failed reading issuer with name %s", issuerRef)
issuerCert := parseCert(t, resp.Data["certificate"].(string))
issuerChainRaw := resp.Data["ca_chain"].([]interface{})
err = acmeCert.CheckSignatureFrom(issuerCert)
require.NoError(t, err, "issuer %s did not sign provided cert", issuerRef)
expectedCerts := [][]byte{derCerts[0]}
for _, entry := range issuerChainRaw {
chainCert := parseCert(t, entry.(string))
expectedCerts = append(expectedCerts, chainCert.Raw)
}
if diffs := deep.Equal(expectedCerts, derCerts); diffs != nil {
t.Fatalf("diffs were found between the acme chain returned and the expected value: \n%v", diffs)
}
}
// TestAcmeValidationError make sure that we properly return errors on validation errors.
func TestAcmeValidationError(t *testing.T) {
t.Parallel()
cluster, _, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
baseAcmeURL := "/v1/pki/acme/"
accountKey, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err, "failed creating rsa key")
acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey)
// Create new account
t.Logf("Testing register on %s", baseAcmeURL)
_, err = acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true })
require.NoError(t, err, "failed registering account")
// Create an order
t.Logf("Testing Authorize Order on %s", baseAcmeURL)
identifiers := []string{"www.dadgarcorp.com"}
order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{
{Type: "dns", Value: identifiers[0]},
})
require.NoError(t, err, "failed creating order")
// Load authorizations
var authorizations []*acme.Authorization
for _, authUrl := range order.AuthzURLs {
auth, err := acmeClient.GetAuthorization(testCtx, authUrl)
require.NoError(t, err, "failed fetching authorization: %s", authUrl)
authorizations = append(authorizations, auth)
}
require.Len(t, authorizations, 1, "expected a certain number of authorizations")
require.Len(t, authorizations[0].Challenges, 3, "expected a certain number of challenges associated with authorization")
acceptedAuth, err := acmeClient.Accept(testCtx, authorizations[0].Challenges[0])
require.NoError(t, err, "Should have been allowed to accept challenge 1")
require.Equal(t, string(ACMEChallengeProcessing), acceptedAuth.Status)
_, err = acmeClient.Accept(testCtx, authorizations[0].Challenges[1])
require.Error(t, err, "Should have been prevented to accept challenge 2")
// Make sure our challenge returns errors
testhelpers.RetryUntil(t, 30*time.Second, func() error {
challenge, err := acmeClient.GetChallenge(testCtx, authorizations[0].Challenges[0].URI)
if err != nil {
return err
}
if challenge.Error == nil {
return fmt.Errorf("no error set in challenge yet")
}
acmeError, ok := challenge.Error.(*acme.Error)
if !ok {
return fmt.Errorf("unexpected error back: %v", err)
}
if acmeError.ProblemType != "urn:ietf:params:acme:error:incorrectResponse" {
return fmt.Errorf("unexpected ACME error back: %v", acmeError)
}
return nil
})
// Make sure our challenge,auth and order status change.
// This takes a little too long to run in CI properly, we need the ability to influence
// how long the validations take before CI can go wild on this.
if os.Getenv("CI") == "" {
testhelpers.RetryUntil(t, 10*time.Minute, func() error {
challenge, err := acmeClient.GetChallenge(testCtx, authorizations[0].Challenges[0].URI)
if err != nil {
return fmt.Errorf("failed to load challenge: %w", err)
}
if challenge.Status != string(ACMEChallengeInvalid) {
return fmt.Errorf("challenge state was not changed to invalid: %v", challenge)
}
authz, err := acmeClient.GetAuthorization(testCtx, authorizations[0].URI)
if err != nil {
return fmt.Errorf("failed to load authorization: %w", err)
}
if authz.Status != string(ACMEAuthorizationInvalid) {
return fmt.Errorf("authz state was not changed to invalid: %v", authz)
}
myOrder, err := acmeClient.GetOrder(testCtx, order.URI)
if err != nil {
return fmt.Errorf("failed to load order: %w", err)
}
if myOrder.Status != string(ACMEOrderInvalid) {
return fmt.Errorf("order state was not changed to invalid: %v", order)
}
return nil
})
}
}
// TestAcmeRevocationAcrossAccounts makes sure that we can revoke certificates using different accounts if
// we have another ACME account or not but access to the certificate key. Also verifies we can't revoke
// certificates across account keys.
func TestAcmeRevocationAcrossAccounts(t *testing.T) {
t.Parallel()
cluster, vaultClient, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
baseAcmeURL := "/v1/pki/acme/"
accountKey1, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err, "failed creating rsa key")
acmeClient1 := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey1)
leafKey, certs := doACMEWorkflow(t, vaultClient, acmeClient1)
acmeCert, err := x509.ParseCertificate(certs[0])
require.NoError(t, err, "failed parsing acme cert bytes")
// Make sure our cert is not revoked
certResp, err := vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert))
require.NoError(t, err, "failed to read certificate status")
require.NotNil(t, certResp, "certificate status response was nil")
revocationTime := certResp.Data["revocation_time"].(json.Number)
revocationTimeInt, err := revocationTime.Int64()
require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime)
require.Equal(t, revocationTimeInt, int64(0),
"revocation time was not 0, cert was already revoked: %v", revocationTimeInt)
// Test that we can't revoke the certificate with another account's key
accountKey2, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
require.NoError(t, err, "failed creating rsa key")
acmeClient2 := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey2)
_, err = acmeClient2.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true })
require.NoError(t, err, "failed registering second account")
err = acmeClient2.RevokeCert(ctx, nil, certs[0], acme.CRLReasonUnspecified)
require.Error(t, err, "should have failed revoking the certificate with a different account")
// Make sure our cert is not revoked
certResp, err = vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert))
require.NoError(t, err, "failed to read certificate status")
require.NotNil(t, certResp, "certificate status response was nil")
revocationTime = certResp.Data["revocation_time"].(json.Number)
revocationTimeInt, err = revocationTime.Int64()
require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime)
require.Equal(t, revocationTimeInt, int64(0),
"revocation time was not 0, cert was already revoked: %v", revocationTimeInt)
// But we can revoke if we sign the request with the certificate's key and a different account
err = acmeClient2.RevokeCert(ctx, leafKey, certs[0], acme.CRLReasonUnspecified)
require.NoError(t, err, "should have been allowed to revoke certificate with csr key across accounts")
// Make sure our cert is now revoked
certResp, err = vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert))
require.NoError(t, err, "failed to read certificate status")
require.NotNil(t, certResp, "certificate status response was nil")
revocationTime = certResp.Data["revocation_time"].(json.Number)
revocationTimeInt, err = revocationTime.Int64()
require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime)
require.Greater(t, revocationTimeInt, int64(0),
"revocation time was not greater than 0, cert was not revoked: %v", revocationTimeInt)
// Make sure we can revoke a certificate without a registered ACME account
leafKey2, certs2 := doACMEWorkflow(t, vaultClient, acmeClient1)
acmeClient3 := getAcmeClientForCluster(t, cluster, baseAcmeURL, nil)
err = acmeClient3.RevokeCert(ctx, leafKey2, certs2[0], acme.CRLReasonUnspecified)
require.NoError(t, err, "should be allowed to revoke a cert with no ACME account but with cert key")
// Make sure our cert is now revoked
acmeCert2, err := x509.ParseCertificate(certs2[0])
require.NoError(t, err, "failed parsing acme cert 2 bytes")
certResp, err = vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert2))
require.NoError(t, err, "failed to read certificate status")
require.NotNil(t, certResp, "certificate status response was nil")
revocationTime = certResp.Data["revocation_time"].(json.Number)
revocationTimeInt, err = revocationTime.Int64()
require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime)
require.Greater(t, revocationTimeInt, int64(0),
"revocation time was not greater than 0, cert was not revoked: %v", revocationTimeInt)
}
func doACMEWorkflow(t *testing.T, vaultClient *api.Client, acmeClient *acme.Client) (*ecdsa.PrivateKey, [][]byte) {
testCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Create new account
acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true })
if err != nil {
if strings.Contains(err.Error(), "acme: account already exists") {
acct, err = acmeClient.GetReg(testCtx, "")
require.NoError(t, err, "failed looking up account after account exists error?")
} else {
require.NoError(t, err, "failed registering account")
}
}
// Create an order
identifiers := []string{"*.localdomain"}
order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{
{Type: "dns", Value: identifiers[0]},
})
require.NoError(t, err, "failed creating order")
// HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow
// test.
markAuthorizationSuccess(t, vaultClient, acmeClient, acct, order)
// Build a proper CSR, with the correct name and signed with a different key works.
goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}}
csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err, "failed generated key for CSR")
csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey)
require.NoError(t, err, "failed generating csr")
certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true)
require.NoError(t, err, "failed finalizing order")
require.Len(t, certs, 3, "expected full acme chain")
return csrKey, certs
}
func setupTestPkiCluster(t *testing.T) (*vault.TestCluster, *api.Client) {
coreConfig := &vault.CoreConfig{
LogicalBackends: map[string]logical.Factory{
"pki": Factory,
},
EnableRaw: true,
}
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
})
cluster.Start()
client := cluster.Cores[0].Client
mountPKIEndpoint(t, client, "pki")
return cluster, client
}
func getAcmeClientForCluster(t *testing.T, cluster *vault.TestCluster, baseUrl string, key crypto.Signer) *acme.Client {
coreAddr := cluster.Cores[0].Listeners[0].Address
tlsConfig := cluster.Cores[0].TLSConfig()
transport := cleanhttp.DefaultPooledTransport()
transport.TLSClientConfig = tlsConfig.Clone()
if err := http2.ConfigureTransport(transport); err != nil {
t.Fatal(err)
}
httpClient := &http.Client{Transport: transport}
if baseUrl[0] == '/' {
baseUrl = baseUrl[1:]
}
if !strings.HasPrefix(baseUrl, "v1/") {
baseUrl = "v1/" + baseUrl
}
if !strings.HasSuffix(baseUrl, "/") {
baseUrl = baseUrl + "/"
}
baseAcmeURL := fmt.Sprintf("https://%s/%s", coreAddr.String(), baseUrl)
return &acme.Client{
Key: key,
HTTPClient: httpClient,
DirectoryURL: baseAcmeURL + "directory",
}
}
func getEABKey(t *testing.T, client *api.Client, baseUrl string) (string, []byte) {
t.Helper()
resp, err := client.Logical().WriteWithContext(ctx, path.Join("pki/", baseUrl, "/new-eab"), map[string]interface{}{})
require.NoError(t, err, "failed getting eab key")
require.NotNil(t, resp, "eab key returned nil response")
require.NotEmpty(t, resp.Data["id"], "eab key response missing id field")
kid := resp.Data["id"].(string)
require.NotEmpty(t, resp.Data["key"], "eab key response missing private_key field")
base64Key := resp.Data["key"].(string)
require.True(t, strings.HasPrefix(base64Key, "vault-eab-0-"), "%s should have had a prefix of vault-eab-0-", base64Key)
privateKeyBytes, err := base64.RawURLEncoding.DecodeString(base64Key)
require.NoError(t, err, "failed base 64 decoding eab key response")
require.Equal(t, "hs", resp.Data["key_type"], "eab key_type field mis-match")
require.Equal(t, baseUrl+"directory", resp.Data["acme_directory"], "eab acme_directory field mis-match")
require.NotEmpty(t, resp.Data["created_on"], "empty created_on field")
_, err = time.Parse(time.RFC3339, resp.Data["created_on"].(string))
require.NoError(t, err, "failed parsing eab created_on field")
return kid, privateKeyBytes
}
func TestACMEClientRequestLimits(t *testing.T) {
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
cases := []struct {
name string
authorizations []acme.AuthzID
requestCSR x509.CertificateRequest
valid bool
}{
{
"validate-only-cn",
[]acme.AuthzID{
{"dns", "localhost"},
},
x509.CertificateRequest{
Subject: pkix.Name{CommonName: "localhost"},
},
true,
},
{
"validate-only-san",
[]acme.AuthzID{
{"dns", "localhost"},
},
x509.CertificateRequest{
DNSNames: []string{"localhost"},
},
true,
},
{
"validate-only-ip-address",
[]acme.AuthzID{
{"ip", "127.0.0.1"},
},
x509.CertificateRequest{
IPAddresses: []net.IP{{127, 0, 0, 1}},
},
true,
},
}
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
acmeConfig := map[string]interface{}{
"enabled": true,
"allowed_issuers": "*",
"allowed_roles": "*",
"default_directory_policy": "sign-verbatim",
"dns_resolver": "",
"eab_policy_name": "",
}
_, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", acmeConfig)
require.NoError(t, err, "error configuring acme")
for _, tc := range cases {
// First Create Our Client
accountKey, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err, "failed creating rsa key")
acmeClient := getAcmeClientForCluster(t, cluster, "/v1/pki/acme/", accountKey)
discovery, err := acmeClient.Discover(testCtx)
require.NoError(t, err, "failed acme discovery call")
t.Logf("%v", discovery)
acct, err := acmeClient.Register(testCtx, &acme.Account{
Contact: []string{"mailto:[email protected]"},
}, func(tosURL string) bool { return true })
require.NoError(t, err, "failed registering account")
require.Equal(t, acme.StatusValid, acct.Status)
require.Contains(t, acct.Contact, "mailto:[email protected]")
require.Len(t, acct.Contact, 1)
// Create an order
t.Logf("Testing Authorize Order on %s", "pki/acme")
identifiers := make([]string, len(tc.authorizations))
for index, auth := range tc.authorizations {
identifiers[index] = auth.Value
}
createOrder, err := acmeClient.AuthorizeOrder(testCtx, tc.authorizations)
require.NoError(t, err, "failed creating order")
require.Equal(t, acme.StatusPending, createOrder.Status)
require.Empty(t, createOrder.CertURL)
require.Equal(t, createOrder.URI+"/finalize", createOrder.FinalizeURL)
require.Len(t, createOrder.AuthzURLs, len(tc.authorizations), "expected same number of authzurls as identifiers")
// HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow
// test.
markAuthorizationSuccess(t, client, acmeClient, acct, createOrder)
// Submit the CSR
csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err, "failed generated key for CSR")
csr, err := x509.CreateCertificateRequest(rand.Reader, &tc.requestCSR, csrKey)
require.NoError(t, err, "failed generating csr")
certs, _, err := acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csr, true)
if tc.valid {
require.NoError(t, err, "failed finalizing order")
// Validate we get a signed cert back
testAcmeCertSignedByCa(t, client, certs, "int-ca")
} else {
require.Error(t, err, "Not a valid CSR, should err")
}
}
}
| builtin/logical/pki/path_acme_test.go | 0 | https://github.com/hashicorp/vault/commit/375c2be624cfd9b19b2f21cbb62a3e453d605f58 | [
0.001854238100349903,
0.00018575349531602114,
0.0001628122990950942,
0.00017229172226507217,
0.00013245103764347732
] |
{
"id": 6,
"code_window": [
" required: false\n",
" default: go-test-reports\n",
" type: string\n",
"\n",
"env: ${{ fromJSON(inputs.env-vars) }}\n",
"\n",
"jobs:\n",
" test-matrix:\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" checkout-ref:\n",
" description: The ref to use for checkout.\n",
" required: false\n",
" default: ${{ github.base_ref }}\n",
" type: string\n"
],
"file_path": ".github/workflows/test-go.yml",
"type": "add",
"edit_start_line_idx": 69
} | /**
* Copyright (c) HashiCorp, Inc.
* SPDX-License-Identifier: BUSL-1.1
*/
import Route from '@ember/routing/route';
import { inject as service } from '@ember/service';
export default class VaultClusterAccessMethodsRoute extends Route {
@service store;
queryParams = {
page: {
refreshModel: true,
},
pageFilter: {
refreshModel: true,
},
};
model() {
return this.store.findAll('auth-method');
}
}
| ui/app/routes/vault/cluster/access/methods.js | 0 | https://github.com/hashicorp/vault/commit/375c2be624cfd9b19b2f21cbb62a3e453d605f58 | [
0.0001736538833938539,
0.00017223689064849168,
0.00017026954446919262,
0.00017278725863434374,
0.0000014354121731230407
] |
{
"id": 6,
"code_window": [
" required: false\n",
" default: go-test-reports\n",
" type: string\n",
"\n",
"env: ${{ fromJSON(inputs.env-vars) }}\n",
"\n",
"jobs:\n",
" test-matrix:\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" checkout-ref:\n",
" description: The ref to use for checkout.\n",
" required: false\n",
" default: ${{ github.base_ref }}\n",
" type: string\n"
],
"file_path": ".github/workflows/test-go.yml",
"type": "add",
"edit_start_line_idx": 69
} | ---
layout: docs
page_title: Google Cloud Platform Secret Manager - Secrets Sync Destination
description: The Google Cloud Platform Secret Manager destination syncs secrets from Vault to GCP.
---
# Google Cloud Platform Secret Manager
The Google Cloud Platform (GCP) Secret Manager sync destination allows Vault to safely synchronize secrets to your GCP projects.
This is a low footprint option that enables your applications to benefit from Vault-managed secrets without requiring them
to connect directly with Vault. This guide walks you through the configuration process.
Prerequisites:
* Ability to read or create KVv2 secrets
* Ability to create GCP Service Account credentials with access to the Secret Manager
* Ability to create sync destinations and associations on your Vault server
## Setup
1. If you do not already have a Service Account, navigate to the IAM & Admin page in the Google Cloud console to
[create a new Service Account](https://cloud.google.com/iam/docs/service-accounts-create) with the
[necessary permissions](/vault/docs/sync/gcpsm#permissions). [Instructions](/vault/docs/sync/gcpsm#provision-service-account)
to provision this Service Account via Terraform can be found below.
1. Configure a sync destination with the Service Account JSON credentials created in the previous step. See docs for
[alternative ways](/vault/docs/secrets/gcp#authentication) to pass in the `credentials` parameter.
```shell-session
$ vault write sys/sync/destinations/gcp-sm/my-dest \
credentials='@path/to/credentials.json'
```
**Output:**
<CodeBlockConfig hideClipboard>
```plaintext
Key Value
--- -----
connection_details map[credentials:*****]
name my-dest
type gcp-sm
```
</CodeBlockConfig>
## Usage
1. If you do not already have a KVv2 secret to sync, mount a new KVv2 secrets engine.
```shell-session
$ vault secrets enable -path=my-kv kv-v2
```
**Output**:
<CodeBlockConfig hideClipboard>
```plaintext
Success! Enabled the kv-v2 secrets engine at: my-kv/
```
</CodeBlockConfig>
1. Create secrets you wish to sync with a target GCP Secret Manager.
```shell-session
$ vault kv put -mount=my-kv my-secret foo='bar'
```
**Output**:
<CodeBlockConfig hideClipboard>
```plaintext
==== Secret Path ====
my-kv/data/my-secret
======= Metadata =======
Key Value
--- -----
created_time <timestamp>
custom_metadata <nil>
deletion_time n/a
destroyed false
version 1
```
</CodeBlockConfig>
1. Create an association between the destination and a secret to synchronize.
```shell-session
$ vault write sys/sync/destinations/gcp-sm/my-dest/associations/set \
mount='my-kv' \
secret_name='my-secret'
```
**Output:**
<CodeBlockConfig hideClipboard>
```plaintext
Key Value
--- -----
associated_secrets map[kv_1234/my-secret:map[accessor:kv_1234 secret_name:my-secret sync_status:SYNCED updated_at:<timestamp>]]
store_name my-dest
store_type gcp-sm
```
</CodeBlockConfig>
1. Navigate to the [Secret Manager](https://console.cloud.google.com/security/secret-manager) in the Google Cloud console
to confirm your secret was successfully created in your GCP project.
Moving forward, any modification on the Vault secret will be propagated in near real time to its GCP Secret Manager
counterpart. Creating a new secret version in Vault will create a new version in GCP Secret Manager. Deleting the secret
or the association in Vault will delete the secret in your GCP project as well.
## Permissions
The credentials given to Vault must have the following permissions to synchronize secrets:
```shell-session
secretmanager.secrets.create
secretmanager.secrets.delete
secretmanager.secrets.get
secretmanager.secrets.list
secretmanager.secrets.update
secretmanager.versions.access
secretmanager.versions.add
secretmanager.versions.destroy
secretmanager.versions.get
secretmanager.versions.list
```
## Provision service account
Vault needs to be configured with credentials to establish a trust relationship with your GCP project so it can manage
Secret Manager secrets on your behalf. The IAM & Admin page in the Google Cloud console can be used to
[create a new Service Account](https://cloud.google.com/iam/docs/service-accounts-create) with access to the Secret Manager.
You can equally use the [Terraform Google provider](https://registry.terraform.io/providers/hashicorp/google/latest/docs#authentication-and-configuration)
to provision a GCP Service Account with the appropriate policies.
1. Copy-paste this HCL snippet into a `secrets-sync-setup.tf` file.
```hcl
provider "google" {
// See https://registry.terraform.io/providers/hashicorp/google/latest/docs#authentication-and-configuration to setup the Google Provider
// for options on how to configure this provider. The following parameters or environment
// variables are typically used.
// Parameters
// region = "" (Optional)
// project = ""
// credentials = ""
// Environment Variables
// GOOGLE_REGION (optional)
// GOOGLE_PROJECT
// GOOGLE_CREDENTIALS (The path to a service account key file with the
// "Service Account Admin", "Service Account Key
// Admin", and "Security Admin" roles attached)
}
data "google_client_config" "config" {}
resource "google_service_account" "vault_secrets_sync_account" {
account_id = "gcp-sm-vault-secrets-sync"
description = "service account for Vault Secrets Sync feature"
}
// Production environments should use a more restricted role.
// The built-in secret manager admin role is used as an example for simplicity.
data "google_iam_policy" "vault_secrets_sync_iam_policy" {
binding {
role = "roles/secretmanager.admin"
members = [
google_service_account.vault_secrets_sync_account.email,
]
}
}
resource "google_project_iam_member" "vault_secrets_sync_iam_member" {
project = data.google_client_config.config.project
role = "roles/secretmanager.admin"
member = google_service_account.vault_secrets_sync_account.member
}
resource "google_service_account_key" "vault_secrets_sync_account_key" {
service_account_id = google_service_account.vault_secrets_sync_account.name
public_key_type = "TYPE_X509_PEM_FILE"
}
resource "local_file" "vault_secrets_sync_credentials_file" {
content = base64decode(google_service_account_key.vault_secrets_sync_account_key.private_key)
filename = "gcp-sm-sync-service-account-credentials.json"
}
output "vault_secrets_sync_credentials_file_path" {
value = abspath("${path.module}/${local_file.sync_service_account_credentials_file.filename}")
}
```
1. Execute a plan to validate the Terraform Google provider is properly configured.
```shell-session
$ terraform init && terraform plan
```
**Output:**
<CodeBlockConfig hideClipboard>
```plaintext
(...)
Plan: 4 to add, 0 to change, 0 to destroy.
```
</CodeBlockConfig>
1. Execute an apply to provision the Service Account.
```shell-session
$ terraform apply
```
**Output:**
<CodeBlockConfig hideClipboard>
```plaintext
(...)
Apply complete! Resources: 4 added, 0 changed, 0 destroyed.
Outputs:
sync_service_account_credentials_file = "/path/to/credentials/file/gcp-sm-sync-service-account-credentials.json"
```
</CodeBlockConfig>
The generated Service Account credentials file can then be used to configure the Vault GCP Secret Manager destination
following the [setup](/vault/docs/sync/gcpsm#setup) steps.
## API
Please see the [secrets sync API](/vault/api-docs/system/secrets-sync) for more details.
| website/content/docs/sync/gcpsm.mdx | 0 | https://github.com/hashicorp/vault/commit/375c2be624cfd9b19b2f21cbb62a3e453d605f58 | [
0.00028209752053953707,
0.00017200854199472815,
0.0001634039217606187,
0.00016677501844242215,
0.000022586640625377186
] |
{
"id": 7,
"code_window": [
" id-token: write # Note: this permission is explicitly required for Vault auth\n",
" contents: read\n",
" runs-on: ${{ fromJSON(inputs.runs-on) }}\n",
" steps:\n",
" - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3\n",
" - uses: ./.github/actions/set-up-go\n",
" with:\n",
" github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}\n",
" - name: Authenticate to Vault\n",
" id: vault-auth\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" with:\n",
" ref: ${{ inputs.checkout-ref }}\n"
],
"file_path": ".github/workflows/test-go.yml",
"type": "add",
"edit_start_line_idx": 80
} | on:
workflow_call:
inputs:
go-arch:
description: The execution architecture (arm, amd64, etc.)
required: true
type: string
enterprise:
description: A flag indicating if this workflow is executing for the enterprise repository.
required: true
type: string
total-runners:
description: Number of runners to use for executing non-binary tests.
required: true
type: string
binary-tests:
description: Whether to run the binary tests.
required: false
default: false
type: boolean
env-vars:
description: A map of environment variables as JSON.
required: false
type: string
default: '{}'
extra-flags:
description: A space-separated list of additional build flags.
required: false
type: string
default: ''
runs-on:
description: An expression indicating which kind of runners to use.
required: false
type: string
default: ubuntu-latest
go-tags:
description: A comma-separated list of additional build tags to consider satisfied during the build.
required: false
type: string
name:
description: A suffix to append to archived test results
required: false
default: ''
type: string
go-test-parallelism:
description: The parallelism parameter for Go tests
required: false
default: 20
type: number
timeout-minutes:
description: The maximum number of minutes that this workflow should run
required: false
default: 60
type: number
testonly:
description: Whether to run the tests tagged with testonly.
required: false
default: false
type: boolean
test-timing-cache-enabled:
description: Cache the gotestsum test timing data.
required: false
default: true
type: boolean
test-timing-cache-key:
description: The cache key to use for gotestsum test timing data.
required: false
default: go-test-reports
type: string
env: ${{ fromJSON(inputs.env-vars) }}
jobs:
test-matrix:
permissions:
id-token: write # Note: this permission is explicitly required for Vault auth
contents: read
runs-on: ${{ fromJSON(inputs.runs-on) }}
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
- uses: ./.github/actions/set-up-go
with:
github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
- name: Authenticate to Vault
id: vault-auth
if: github.repository == 'hashicorp/vault-enterprise'
run: vault-auth
- name: Fetch Secrets
id: secrets
if: github.repository == 'hashicorp/vault-enterprise'
uses: hashicorp/vault-action@130d1f5f4fe645bb6c83e4225c04d64cfb62de6e
with:
url: ${{ steps.vault-auth.outputs.addr }}
caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }}
token: ${{ steps.vault-auth.outputs.token }}
secrets: |
kv/data/github/${{ github.repository }}/datadog-ci DATADOG_API_KEY;
kv/data/github/${{ github.repository }}/github-token username-and-token | github-token;
kv/data/github/${{ github.repository }}/license license_1 | VAULT_LICENSE_CI;
kv/data/github/${{ github.repository }}/license license_2 | VAULT_LICENSE_2;
kv/data/github/${{ github.repository }}/hcp-link HCP_API_ADDRESS;
kv/data/github/${{ github.repository }}/hcp-link HCP_AUTH_URL;
kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_ID;
kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_SECRET;
kv/data/github/${{ github.repository }}/hcp-link HCP_RESOURCE_ID;
- id: setup-git-private
name: Setup Git configuration (private)
if: github.repository == 'hashicorp/vault-enterprise'
run: |
git config --global url."https://${{ steps.secrets.outputs.github-token }}@github.com".insteadOf https://github.com
- id: setup-git-public
name: Setup Git configuration (public)
if: github.repository != 'hashicorp/vault-enterprise'
run: |
git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN}}@github.com".insteadOf https://github.com
- uses: ./.github/actions/set-up-gotestsum
- run: mkdir -p test-results/go-test
- uses: actions/cache/restore@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
if: inputs.test-timing-cache-enabled
with:
path: test-results/go-test
key: ${{ inputs.test-timing-cache-key }}-${{ github.run_number }}
restore-keys: |
${{ inputs.test-timing-cache-key }}-
go-test-reports-
- name: Sanitize timing files
id: sanitize-timing-files
run: |
# Prune invalid timing files
find test-results/go-test -mindepth 1 -type f -name "*.json" -exec sh -c '
file="$1";
jq . "$file" || rm "$file"
' shell {} \; > /dev/null 2>&1
- name: Build matrix excluding binary, integration, and testonly tests
id: build-non-binary
if: ${{ !inputs.testonly }}
env:
GOPRIVATE: github.com/hashicorp/*
run: |
# testonly tests need additional build tag though let's exclude them anyway for clarity
(
go list ./... | grep -v "_binary" | grep -v "vault/integ" | grep -v "testonly" | gotestsum tool ci-matrix --debug \
--partitions "${{ inputs.total-runners }}" \
--timing-files 'test-results/go-test/*.json' > matrix.json
)
- name: Build matrix for tests tagged with testonly
if: ${{ inputs.testonly }}
env:
GOPRIVATE: github.com/hashicorp/*
run: |
set -exo pipefail
# enable glob expansion
shopt -s nullglob
# testonly tagged tests need an additional tag to be included
# also running some extra tests for sanity checking with the testonly build tag
(
go list -tags=testonly ./vault/external_tests/{kv,token,*replication-perf*,*testonly*} ./vault/ | gotestsum tool ci-matrix --debug \
--partitions "${{ inputs.total-runners }}" \
--timing-files 'test-results/go-test/*.json' > matrix.json
)
# disable glob expansion
shopt -u nullglob
- name: Capture list of binary tests
if: inputs.binary-tests
id: list-binary-tests
run: |
LIST="$(go list ./... | grep "_binary" | xargs)"
echo "list=$LIST" >> "$GITHUB_OUTPUT"
- name: Build complete matrix
id: build
run: |
set -exo pipefail
matrix_file="matrix.json"
if [ "${{ inputs.binary-tests}}" == "true" ] && [ -n "${{ steps.list-binary-tests.outputs.list }}" ]; then
export BINARY_TESTS="${{ steps.list-binary-tests.outputs.list }}"
jq --arg BINARY "${BINARY_TESTS}" --arg BINARY_INDEX "${{ inputs.total-runners }}" \
'.include += [{
"id": $BINARY_INDEX,
"estimatedRuntime": "N/A",
"packages": $BINARY,
"description": "partition $BINARY_INDEX - binary test packages"
}]' matrix.json > new-matrix.json
matrix_file="new-matrix.json"
fi
# convert the json to a map keyed by id
(
echo -n "matrix="
jq -c \
'.include | map( { (.id|tostring): . } ) | add' "$matrix_file"
) >> "$GITHUB_OUTPUT"
# extract an array of ids from the json
(
echo -n "matrix_ids="
jq -c \
'[ .include[].id | tostring ]' "$matrix_file"
) >> "$GITHUB_OUTPUT"
outputs:
matrix: ${{ steps.build.outputs.matrix }}
matrix_ids: ${{ steps.build.outputs.matrix_ids }}
test-go:
needs: test-matrix
permissions:
actions: read
contents: read
id-token: write # Note: this permission is explicitly required for Vault auth
runs-on: ${{ fromJSON(inputs.runs-on) }}
strategy:
fail-fast: false
matrix:
id: ${{ fromJSON(needs.test-matrix.outputs.matrix_ids) }}
env:
GOPRIVATE: github.com/hashicorp/*
TIMEOUT_IN_MINUTES: ${{ inputs.timeout-minutes }}
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
- uses: ./.github/actions/set-up-go
with:
github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
- name: Authenticate to Vault
id: vault-auth
if: github.repository == 'hashicorp/vault-enterprise'
run: vault-auth
- name: Fetch Secrets
id: secrets
if: github.repository == 'hashicorp/vault-enterprise'
uses: hashicorp/vault-action@130d1f5f4fe645bb6c83e4225c04d64cfb62de6e
with:
url: ${{ steps.vault-auth.outputs.addr }}
caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }}
token: ${{ steps.vault-auth.outputs.token }}
secrets: |
kv/data/github/${{ github.repository }}/datadog-ci DATADOG_API_KEY;
kv/data/github/${{ github.repository }}/github-token username-and-token | github-token;
kv/data/github/${{ github.repository }}/license license_1 | VAULT_LICENSE_CI;
kv/data/github/${{ github.repository }}/license license_2 | VAULT_LICENSE_2;
kv/data/github/${{ github.repository }}/hcp-link HCP_API_ADDRESS;
kv/data/github/${{ github.repository }}/hcp-link HCP_AUTH_URL;
kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_ID;
kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_SECRET;
kv/data/github/${{ github.repository }}/hcp-link HCP_RESOURCE_ID;
- id: setup-git-private
name: Setup Git configuration (private)
if: github.repository == 'hashicorp/vault-enterprise'
run: |
git config --global url."https://${{ steps.secrets.outputs.github-token }}@github.com".insteadOf https://github.com
- id: setup-git-public
name: Setup Git configuration (public)
if: github.repository != 'hashicorp/vault-enterprise'
run: |
git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN}}@github.com".insteadOf https://github.com
- id: build
if: inputs.binary-tests && matrix.id == inputs.total-runners
env:
GOPRIVATE: github.com/hashicorp/*
run: time make ci-bootstrap dev
- uses: ./.github/actions/set-up-gotestsum
- name: Install gVisor
# Enterprise repo runners do not allow sudo, so can't install gVisor there yet.
if: ${{ !inputs.enterprise }}
run: |
(
set -e
ARCH="$(uname -m)"
URL="https://storage.googleapis.com/gvisor/releases/release/latest/${ARCH}"
wget --quiet "${URL}/runsc" "${URL}/runsc.sha512" \
"${URL}/containerd-shim-runsc-v1" "${URL}/containerd-shim-runsc-v1.sha512"
sha512sum -c runsc.sha512 \
-c containerd-shim-runsc-v1.sha512
rm -f -- *.sha512
chmod a+rx runsc containerd-shim-runsc-v1
sudo mv runsc containerd-shim-runsc-v1 /usr/local/bin
)
sudo tee /etc/docker/daemon.json <<EOF
{
"runtimes": {
"runsc": {
"path": "/usr/local/bin/runsc",
"runtimeArgs": [
"--host-uds=all",
"--host-fifo=open"
]
}
}
}
EOF
sudo systemctl reload docker
- id: run-go-tests
name: Run Go tests
timeout-minutes: ${{ fromJSON(env.TIMEOUT_IN_MINUTES) }}
env:
COMMIT_SHA: ${{ github.sha }}
run: |
set -exo pipefail
# Build the dynamically generated source files.
make prep
packages=$(echo "${{ toJSON(needs.test-matrix.outputs.matrix) }}" | jq -c -r --arg id "${{ matrix.id }}" '.[$id] | .packages')
if [ -z "$packages" ]; then
echo "no test packages to run"
exit 1
fi
# We don't want VAULT_LICENSE set when running Go tests, because that's
# not what developers have in their environments and it could break some
# tests; it would be like setting VAULT_TOKEN. However some non-Go
# CI commands, like the UI tests, shouldn't have to worry about licensing.
# So we provide the tests which want an externally supplied license with licenses
# via the VAULT_LICENSE_CI and VAULT_LICENSE_2 environment variables, and here we unset it.
# shellcheck disable=SC2034
VAULT_LICENSE=
# Assign test licenses to relevant variables if they aren't already
if [[ ${{ github.repository }} == 'hashicorp/vault' ]]; then
export VAULT_LICENSE_CI=${{ secrets.ci_license }}
export VAULT_LICENSE_2=${{ secrets.ci_license_2 }}
export HCP_API_ADDRESS=${{ secrets.HCP_API_ADDRESS }}
export HCP_AUTH_URL=${{ secrets.HCP_AUTH_URL }}
export HCP_CLIENT_ID=${{ secrets.HCP_CLIENT_ID }}
export HCP_CLIENT_SECRET=${{ secrets.HCP_CLIENT_SECRET }}
export HCP_RESOURCE_ID=${{ secrets.HCP_RESOURCE_ID }}
# Temporarily removing this variable to cause HCP Link tests
# to be skipped.
#export HCP_SCADA_ADDRESS=${{ secrets.HCP_SCADA_ADDRESS }}
fi
if [ -f bin/vault ]; then
VAULT_BINARY="$(pwd)/bin/vault"
export VAULT_BINARY
fi
# On a release branch, add a flag to rerun failed tests
# shellcheck disable=SC2193 # can get false positive for this comparision
if [[ "${{ github.base_ref }}" == release/* ]] || [[ -z "${{ github.base_ref }}" && "${{ github.ref_name }}" == release/* ]]
then
# TODO remove this extra condition once 1.15 is about to released GA
if [[ "${{ github.base_ref }}" != release/1.15* ]] || [[ -z "${{ github.base_ref }}" && "${{ github.ref_name }}" != release/1.15* ]]
then
RERUN_FAILS="--rerun-fails"
fi
fi
# shellcheck disable=SC2086 # can't quote RERUN_FAILS
GOARCH=${{ inputs.go-arch }} \
gotestsum --format=short-verbose \
--junitfile test-results/go-test/results-${{ matrix.id }}.xml \
--jsonfile test-results/go-test/results-${{ matrix.id }}.json \
--jsonfile-timing-events failure-summary-${{ matrix.id }}${{ inputs.name != '' && '-' || '' }}${{ inputs.name }}.json \
$RERUN_FAILS \
--packages "$packages" \
-- \
-tags "${{ inputs.go-tags }}" \
-timeout=${{ env.TIMEOUT_IN_MINUTES }}m \
-parallel=${{ inputs.go-test-parallelism }} \
${{ inputs.extra-flags }} \
- name: Prepare datadog-ci
if: github.repository == 'hashicorp/vault' && (success() || failure())
continue-on-error: true
run: |
curl -L --fail "https://github.com/DataDog/datadog-ci/releases/latest/download/datadog-ci_linux-x64" --output "/usr/local/bin/datadog-ci"
chmod +x /usr/local/bin/datadog-ci
- name: Upload test results to DataDog
continue-on-error: true
env:
DD_ENV: ci
run: |
if [[ ${{ github.repository }} == 'hashicorp/vault' ]]; then
export DATADOG_API_KEY=${{ secrets.DATADOG_API_KEY }}
fi
datadog-ci junit upload --service "$GITHUB_REPOSITORY" test-results/go-test/results-${{ matrix.id }}.xml
if: success() || failure()
- name: Archive test results
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
with:
name: test-results${{ inputs.name != '' && '-' || '' }}${{ inputs.name }}
path: test-results/go-test
if: success() || failure()
# GitHub Actions doesn't expose the job ID or the URL to the job execution,
# so we have to fetch it from the API
- name: Fetch job logs URL
uses: actions/github-script@d7906e4ad0b1822421a7e6a35d5ca353c962f410 # v6.4.1
if: success() || failure()
continue-on-error: true
with:
retries: 3
script: |
// We surround the whole script with a try-catch block, to avoid each of the matrix jobs
// displaying an error in the GHA workflow run annotations, which gets very noisy.
// If an error occurs, it will be logged so that we don't lose any information about the reason for failure.
try {
const fs = require("fs");
const result = await github.rest.actions.listJobsForWorkflowRun({
owner: context.repo.owner,
per_page: 100,
repo: context.repo.repo,
run_id: context.runId,
});
// Determine what job name to use for the query. These values are hardcoded, because GHA doesn't
// expose them in any of the contexts available within a workflow run.
let prefixToSearchFor;
switch ("${{ inputs.name }}") {
case "race":
prefixToSearchFor = 'Run Go tests with data race detection / test-go (${{ matrix.id }})'
break
case "fips":
prefixToSearchFor = 'Run Go tests with FIPS configuration / test-go (${{ matrix.id }})'
break
default:
prefixToSearchFor = 'Run Go tests / test-go (${{ matrix.id }})'
}
const jobData = result.data.jobs.filter(
(job) => job.name.startsWith(prefixToSearchFor)
);
const url = jobData[0].html_url;
const envVarName = "GH_JOB_URL";
const envVar = envVarName + "=" + url;
const envFile = process.env.GITHUB_ENV;
fs.appendFile(envFile, envVar, (err) => {
if (err) throw err;
console.log("Successfully set " + envVarName + " to: " + url);
});
} catch (error) {
console.log("Error: " + error);
return
}
- name: Prepare failure summary
if: success() || failure()
continue-on-error: true
run: |
# This jq query filters out successful tests, leaving only the failures.
# Then, it formats the results into rows of a Markdown table.
# An example row will resemble this:
# | github.com/hashicorp/vault/package | TestName | fips | 0 | 2 | [view results](github.com/link-to-logs) |
jq -r -n 'inputs
| select(.Action == "fail")
| "| ${{inputs.name}} | \(.Package) | \(.Test // "-") | \(.Elapsed) | ${{ matrix.id }} | [view test results :scroll:](${{ env.GH_JOB_URL }}) |"' \
failure-summary-${{ matrix.id }}${{ inputs.name != '' && '-' || '' }}${{inputs.name}}.json \
>> failure-summary-${{ matrix.id }}${{ inputs.name != '' && '-' || '' }}${{inputs.name}}.md
- name: Upload failure summary
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
if: success() || failure()
with:
name: failure-summary
path: failure-summary-${{ matrix.id }}${{ inputs.name != '' && '-' || '' }}${{inputs.name}}.md
test-collect-reports:
if: ${{ ! cancelled() && needs.test-go.result == 'success' && inputs.test-timing-cache-enabled }}
needs: test-go
runs-on: ${{ fromJSON(inputs.runs-on) }}
steps:
- uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
with:
path: test-results/go-test
key: ${{ inputs.test-timing-cache-key }}-${{ github.run_number }}
restore-keys: |
${{ inputs.test-timing-cache-key }}-
go-test-reports-
- uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
with:
name: test-results
path: test-results/go-test
- run: |
ls -lhR test-results/go-test
find test-results/go-test -mindepth 1 -mtime +3 -delete
# Prune invalid timing files
find test-results/go-test -mindepth 1 -type f -name "*.json" -exec sh -c '
file="$1";
jq . "$file" || rm "$file"
' shell {} \; > /dev/null 2>&1
ls -lhR test-results/go-test
| .github/workflows/test-go.yml | 1 | https://github.com/hashicorp/vault/commit/375c2be624cfd9b19b2f21cbb62a3e453d605f58 | [
0.9703990817070007,
0.02593967318534851,
0.00016447172674816102,
0.0002878453815355897,
0.13886317610740662
] |
{
"id": 7,
"code_window": [
" id-token: write # Note: this permission is explicitly required for Vault auth\n",
" contents: read\n",
" runs-on: ${{ fromJSON(inputs.runs-on) }}\n",
" steps:\n",
" - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3\n",
" - uses: ./.github/actions/set-up-go\n",
" with:\n",
" github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}\n",
" - name: Authenticate to Vault\n",
" id: vault-auth\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" with:\n",
" ref: ${{ inputs.checkout-ref }}\n"
],
"file_path": ".github/workflows/test-go.yml",
"type": "add",
"edit_start_line_idx": 80
} | <svg viewBox="0 0 48 48" xmlns="http://www.w3.org/2000/svg"><path d="M4 9.68l16.347-2.225.007 15.768-16.34.092L4 9.681zm16.34 15.36l.012 15.78-16.34-2.246v-13.64l16.327.105zm1.98-17.877L43.996 4v19.022l-21.674.172V7.164zM44 25.187l-.005 18.937-21.675-3.06-.03-15.912 21.71.035z" fill="#00ADEF" fill-rule="nonzero"/></svg>
| ui/public/eco/ad.svg | 0 | https://github.com/hashicorp/vault/commit/375c2be624cfd9b19b2f21cbb62a3e453d605f58 | [
0.00017257139552384615,
0.00017257139552384615,
0.00017257139552384615,
0.00017257139552384615,
0
] |
{
"id": 7,
"code_window": [
" id-token: write # Note: this permission is explicitly required for Vault auth\n",
" contents: read\n",
" runs-on: ${{ fromJSON(inputs.runs-on) }}\n",
" steps:\n",
" - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3\n",
" - uses: ./.github/actions/set-up-go\n",
" with:\n",
" github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}\n",
" - name: Authenticate to Vault\n",
" id: vault-auth\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" with:\n",
" ref: ${{ inputs.checkout-ref }}\n"
],
"file_path": ".github/workflows/test-go.yml",
"type": "add",
"edit_start_line_idx": 80
} | # Routing
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Guidelines](#guidelines)
- [File structure](#file-structure)
- [Shared functionality](#shared-functionality)
- [Decorators](#decorators)
- [@withConfirmLeave()](#withconfirmleave)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## Guidelines
- Parent route typically serves to group related child resources
- Parent index route typically displays empty state placeholder with call to action or redirects to default child resource
- Child resource names are pluralized
- Child index route represents list view.
- Child singularized name + /details is the read view.
- _Avoid_ extending routes. This can lead to unnecessary inheritance which gets messy quickly. For [shared functionality](#shared-functionality), consider a decorator.
## File structure
The file structure can be leveraged to simplify CRUD actions and passing data. The singular resource route should live at the same level as its folder, this automatically passes its model to any route nested within the folder.
Below, `details.js` and `edit.js` will automatically receive the model returned by the model hook in `resource-foo.js`. Alternately, if defining a custom model hook in those routes, we can use methods like `this.modelFor` instead of re-querying records.
```
├── routes/vault/cluster/access
│ ├── parent/
│ │ ├── index.js
│ │ ├── resource-foos /
│ │ │ ├── resource-foo.js
│ │ │ ├── create.js
│ │ │ ├── index.js
│ │ │ ├── resource-foo/
│ │ │ │ ├── details.js
│ │ │ │ ├── edit.js
```
> For example, [OIDC](../app/routes/vault/cluster/access/oidc/) route structure [_original PR_](https://github.com/hashicorp/vault/pull/16028):
```
├── routes/vault/cluster/access
│ ├── oidc/
│ │ ├── index.js
│ │ ├── clients/
│ │ │ ├── client.js
│ │ │ ├── create.js
│ │ │ ├── index.js
│ │ │ ├── client/
│ │ │ │ ├── details.js
│ │ │ │ ├── edit.js
│ │ │ │ ├── providers.js <- utilizes the modelFor method to get id about parent's clientId
```
## Shared functionality
To guide users, we sometimes have a call to action that depends on a resource's state. For example, if a secret engine hasn't been configured routing to the first step to do so, and otherwise navigating to its overview page.
Instead of extending route classes to share this `isConfigured` state, consider a decorator! [withConfig()](../../ui/lib/kubernetes/addon/decorators/fetch-config.js) is a great example.
## Decorators
### [@withConfirmLeave()](../lib/core/addon/decorators/confirm-leave.js)
- Renders `window.confirm()` alert that a user has unsaved changes if navigaing away from route with the decorator
- Unloads or rolls back Ember data model record
<!-- TODO add withConfig() if we refactor for more general use -->
<!-- ### [withConfig()](../../ui/lib/kubernetes/addon/decorators/fetch-config.js)
We sometimes have a call to action guiding users that depends on a resource's state. For example, if a secret engine hasn't been configured the UI renders an empty state linking to the first configuration step. Otherwise, it routes to the overview page of that engine.
Sample use:
```js
import { withConfig } from '../decorators/fetch-config';
@withConfig()
export default class SomeRouter extends Route {
model() {
// in case of any error other than 404 we want to display that to the user
if (this.configError) {
throw this.configError;
}
return {
config: this.configModel, // configuration data to determine UI state
};
}
}
``` -->
| ui/docs/routing.md | 0 | https://github.com/hashicorp/vault/commit/375c2be624cfd9b19b2f21cbb62a3e453d605f58 | [
0.0002111913199769333,
0.00017063635459635407,
0.00015955536218825728,
0.00016685380251146853,
0.000013999980183143634
] |
{
"id": 7,
"code_window": [
" id-token: write # Note: this permission is explicitly required for Vault auth\n",
" contents: read\n",
" runs-on: ${{ fromJSON(inputs.runs-on) }}\n",
" steps:\n",
" - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3\n",
" - uses: ./.github/actions/set-up-go\n",
" with:\n",
" github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}\n",
" - name: Authenticate to Vault\n",
" id: vault-auth\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" with:\n",
" ref: ${{ inputs.checkout-ref }}\n"
],
"file_path": ".github/workflows/test-go.yml",
"type": "add",
"edit_start_line_idx": 80
} | /**
* Copyright (c) HashiCorp, Inc.
* SPDX-License-Identifier: BUSL-1.1
*/
//https://github.com/jgthms/bulma/blob/6ad2e3df0589e5d6ff7a9c03ee1c78a546bedeaf/sass/utilities/initial-variables.sass#L48-L59
//https://github.com/jgthms/bulma/blob/6ad2e3df0589e5d6ff7a9c03ee1c78a546bedeaf/sass/utilities/mixins.sass#L71-L130
export default {
mobile: '(max-width: 768px)',
tablet: '(min-width: 769px)',
desktop: '(min-width: 1088px)',
};
| ui/app/breakpoints.js | 0 | https://github.com/hashicorp/vault/commit/375c2be624cfd9b19b2f21cbb62a3e453d605f58 | [
0.0001651509228395298,
0.00016410814714618027,
0.00016306538600474596,
0.00016410814714618027,
0.000001042768417391926
] |
{
"id": 8,
"code_window": [
" GOPRIVATE: github.com/hashicorp/*\n",
" TIMEOUT_IN_MINUTES: ${{ inputs.timeout-minutes }}\n",
" steps:\n",
" - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3\n",
" - uses: ./.github/actions/set-up-go\n",
" with:\n",
" github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}\n",
" - name: Authenticate to Vault\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" with:\n",
" ref: ${{ inputs.checkout-ref }}\n"
],
"file_path": ".github/workflows/test-go.yml",
"type": "add",
"edit_start_line_idx": 216
} | on:
workflow_call:
inputs:
go-arch:
description: The execution architecture (arm, amd64, etc.)
required: true
type: string
enterprise:
description: A flag indicating if this workflow is executing for the enterprise repository.
required: true
type: string
total-runners:
description: Number of runners to use for executing non-binary tests.
required: true
type: string
binary-tests:
description: Whether to run the binary tests.
required: false
default: false
type: boolean
env-vars:
description: A map of environment variables as JSON.
required: false
type: string
default: '{}'
extra-flags:
description: A space-separated list of additional build flags.
required: false
type: string
default: ''
runs-on:
description: An expression indicating which kind of runners to use.
required: false
type: string
default: ubuntu-latest
go-tags:
description: A comma-separated list of additional build tags to consider satisfied during the build.
required: false
type: string
name:
description: A suffix to append to archived test results
required: false
default: ''
type: string
go-test-parallelism:
description: The parallelism parameter for Go tests
required: false
default: 20
type: number
timeout-minutes:
description: The maximum number of minutes that this workflow should run
required: false
default: 60
type: number
testonly:
description: Whether to run the tests tagged with testonly.
required: false
default: false
type: boolean
test-timing-cache-enabled:
description: Cache the gotestsum test timing data.
required: false
default: true
type: boolean
test-timing-cache-key:
description: The cache key to use for gotestsum test timing data.
required: false
default: go-test-reports
type: string
env: ${{ fromJSON(inputs.env-vars) }}
jobs:
test-matrix:
permissions:
id-token: write # Note: this permission is explicitly required for Vault auth
contents: read
runs-on: ${{ fromJSON(inputs.runs-on) }}
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
- uses: ./.github/actions/set-up-go
with:
github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
- name: Authenticate to Vault
id: vault-auth
if: github.repository == 'hashicorp/vault-enterprise'
run: vault-auth
- name: Fetch Secrets
id: secrets
if: github.repository == 'hashicorp/vault-enterprise'
uses: hashicorp/vault-action@130d1f5f4fe645bb6c83e4225c04d64cfb62de6e
with:
url: ${{ steps.vault-auth.outputs.addr }}
caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }}
token: ${{ steps.vault-auth.outputs.token }}
secrets: |
kv/data/github/${{ github.repository }}/datadog-ci DATADOG_API_KEY;
kv/data/github/${{ github.repository }}/github-token username-and-token | github-token;
kv/data/github/${{ github.repository }}/license license_1 | VAULT_LICENSE_CI;
kv/data/github/${{ github.repository }}/license license_2 | VAULT_LICENSE_2;
kv/data/github/${{ github.repository }}/hcp-link HCP_API_ADDRESS;
kv/data/github/${{ github.repository }}/hcp-link HCP_AUTH_URL;
kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_ID;
kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_SECRET;
kv/data/github/${{ github.repository }}/hcp-link HCP_RESOURCE_ID;
- id: setup-git-private
name: Setup Git configuration (private)
if: github.repository == 'hashicorp/vault-enterprise'
run: |
git config --global url."https://${{ steps.secrets.outputs.github-token }}@github.com".insteadOf https://github.com
- id: setup-git-public
name: Setup Git configuration (public)
if: github.repository != 'hashicorp/vault-enterprise'
run: |
git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN}}@github.com".insteadOf https://github.com
- uses: ./.github/actions/set-up-gotestsum
- run: mkdir -p test-results/go-test
- uses: actions/cache/restore@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
if: inputs.test-timing-cache-enabled
with:
path: test-results/go-test
key: ${{ inputs.test-timing-cache-key }}-${{ github.run_number }}
restore-keys: |
${{ inputs.test-timing-cache-key }}-
go-test-reports-
- name: Sanitize timing files
id: sanitize-timing-files
run: |
# Prune invalid timing files
find test-results/go-test -mindepth 1 -type f -name "*.json" -exec sh -c '
file="$1";
jq . "$file" || rm "$file"
' shell {} \; > /dev/null 2>&1
- name: Build matrix excluding binary, integration, and testonly tests
id: build-non-binary
if: ${{ !inputs.testonly }}
env:
GOPRIVATE: github.com/hashicorp/*
run: |
# testonly tests need additional build tag though let's exclude them anyway for clarity
(
go list ./... | grep -v "_binary" | grep -v "vault/integ" | grep -v "testonly" | gotestsum tool ci-matrix --debug \
--partitions "${{ inputs.total-runners }}" \
--timing-files 'test-results/go-test/*.json' > matrix.json
)
- name: Build matrix for tests tagged with testonly
if: ${{ inputs.testonly }}
env:
GOPRIVATE: github.com/hashicorp/*
run: |
set -exo pipefail
# enable glob expansion
shopt -s nullglob
# testonly tagged tests need an additional tag to be included
# also running some extra tests for sanity checking with the testonly build tag
(
go list -tags=testonly ./vault/external_tests/{kv,token,*replication-perf*,*testonly*} ./vault/ | gotestsum tool ci-matrix --debug \
--partitions "${{ inputs.total-runners }}" \
--timing-files 'test-results/go-test/*.json' > matrix.json
)
# disable glob expansion
shopt -u nullglob
- name: Capture list of binary tests
if: inputs.binary-tests
id: list-binary-tests
run: |
LIST="$(go list ./... | grep "_binary" | xargs)"
echo "list=$LIST" >> "$GITHUB_OUTPUT"
- name: Build complete matrix
id: build
run: |
set -exo pipefail
matrix_file="matrix.json"
if [ "${{ inputs.binary-tests}}" == "true" ] && [ -n "${{ steps.list-binary-tests.outputs.list }}" ]; then
export BINARY_TESTS="${{ steps.list-binary-tests.outputs.list }}"
jq --arg BINARY "${BINARY_TESTS}" --arg BINARY_INDEX "${{ inputs.total-runners }}" \
'.include += [{
"id": $BINARY_INDEX,
"estimatedRuntime": "N/A",
"packages": $BINARY,
"description": "partition $BINARY_INDEX - binary test packages"
}]' matrix.json > new-matrix.json
matrix_file="new-matrix.json"
fi
# convert the json to a map keyed by id
(
echo -n "matrix="
jq -c \
'.include | map( { (.id|tostring): . } ) | add' "$matrix_file"
) >> "$GITHUB_OUTPUT"
# extract an array of ids from the json
(
echo -n "matrix_ids="
jq -c \
'[ .include[].id | tostring ]' "$matrix_file"
) >> "$GITHUB_OUTPUT"
outputs:
matrix: ${{ steps.build.outputs.matrix }}
matrix_ids: ${{ steps.build.outputs.matrix_ids }}
test-go:
needs: test-matrix
permissions:
actions: read
contents: read
id-token: write # Note: this permission is explicitly required for Vault auth
runs-on: ${{ fromJSON(inputs.runs-on) }}
strategy:
fail-fast: false
matrix:
id: ${{ fromJSON(needs.test-matrix.outputs.matrix_ids) }}
env:
GOPRIVATE: github.com/hashicorp/*
TIMEOUT_IN_MINUTES: ${{ inputs.timeout-minutes }}
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
- uses: ./.github/actions/set-up-go
with:
github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
- name: Authenticate to Vault
id: vault-auth
if: github.repository == 'hashicorp/vault-enterprise'
run: vault-auth
- name: Fetch Secrets
id: secrets
if: github.repository == 'hashicorp/vault-enterprise'
uses: hashicorp/vault-action@130d1f5f4fe645bb6c83e4225c04d64cfb62de6e
with:
url: ${{ steps.vault-auth.outputs.addr }}
caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }}
token: ${{ steps.vault-auth.outputs.token }}
secrets: |
kv/data/github/${{ github.repository }}/datadog-ci DATADOG_API_KEY;
kv/data/github/${{ github.repository }}/github-token username-and-token | github-token;
kv/data/github/${{ github.repository }}/license license_1 | VAULT_LICENSE_CI;
kv/data/github/${{ github.repository }}/license license_2 | VAULT_LICENSE_2;
kv/data/github/${{ github.repository }}/hcp-link HCP_API_ADDRESS;
kv/data/github/${{ github.repository }}/hcp-link HCP_AUTH_URL;
kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_ID;
kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_SECRET;
kv/data/github/${{ github.repository }}/hcp-link HCP_RESOURCE_ID;
- id: setup-git-private
name: Setup Git configuration (private)
if: github.repository == 'hashicorp/vault-enterprise'
run: |
git config --global url."https://${{ steps.secrets.outputs.github-token }}@github.com".insteadOf https://github.com
- id: setup-git-public
name: Setup Git configuration (public)
if: github.repository != 'hashicorp/vault-enterprise'
run: |
git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN}}@github.com".insteadOf https://github.com
- id: build
if: inputs.binary-tests && matrix.id == inputs.total-runners
env:
GOPRIVATE: github.com/hashicorp/*
run: time make ci-bootstrap dev
- uses: ./.github/actions/set-up-gotestsum
- name: Install gVisor
# Enterprise repo runners do not allow sudo, so can't install gVisor there yet.
if: ${{ !inputs.enterprise }}
run: |
(
set -e
ARCH="$(uname -m)"
URL="https://storage.googleapis.com/gvisor/releases/release/latest/${ARCH}"
wget --quiet "${URL}/runsc" "${URL}/runsc.sha512" \
"${URL}/containerd-shim-runsc-v1" "${URL}/containerd-shim-runsc-v1.sha512"
sha512sum -c runsc.sha512 \
-c containerd-shim-runsc-v1.sha512
rm -f -- *.sha512
chmod a+rx runsc containerd-shim-runsc-v1
sudo mv runsc containerd-shim-runsc-v1 /usr/local/bin
)
sudo tee /etc/docker/daemon.json <<EOF
{
"runtimes": {
"runsc": {
"path": "/usr/local/bin/runsc",
"runtimeArgs": [
"--host-uds=all",
"--host-fifo=open"
]
}
}
}
EOF
sudo systemctl reload docker
- id: run-go-tests
name: Run Go tests
timeout-minutes: ${{ fromJSON(env.TIMEOUT_IN_MINUTES) }}
env:
COMMIT_SHA: ${{ github.sha }}
run: |
set -exo pipefail
# Build the dynamically generated source files.
make prep
packages=$(echo "${{ toJSON(needs.test-matrix.outputs.matrix) }}" | jq -c -r --arg id "${{ matrix.id }}" '.[$id] | .packages')
if [ -z "$packages" ]; then
echo "no test packages to run"
exit 1
fi
# We don't want VAULT_LICENSE set when running Go tests, because that's
# not what developers have in their environments and it could break some
# tests; it would be like setting VAULT_TOKEN. However some non-Go
# CI commands, like the UI tests, shouldn't have to worry about licensing.
# So we provide the tests which want an externally supplied license with licenses
# via the VAULT_LICENSE_CI and VAULT_LICENSE_2 environment variables, and here we unset it.
# shellcheck disable=SC2034
VAULT_LICENSE=
# Assign test licenses to relevant variables if they aren't already
if [[ ${{ github.repository }} == 'hashicorp/vault' ]]; then
export VAULT_LICENSE_CI=${{ secrets.ci_license }}
export VAULT_LICENSE_2=${{ secrets.ci_license_2 }}
export HCP_API_ADDRESS=${{ secrets.HCP_API_ADDRESS }}
export HCP_AUTH_URL=${{ secrets.HCP_AUTH_URL }}
export HCP_CLIENT_ID=${{ secrets.HCP_CLIENT_ID }}
export HCP_CLIENT_SECRET=${{ secrets.HCP_CLIENT_SECRET }}
export HCP_RESOURCE_ID=${{ secrets.HCP_RESOURCE_ID }}
# Temporarily removing this variable to cause HCP Link tests
# to be skipped.
#export HCP_SCADA_ADDRESS=${{ secrets.HCP_SCADA_ADDRESS }}
fi
if [ -f bin/vault ]; then
VAULT_BINARY="$(pwd)/bin/vault"
export VAULT_BINARY
fi
# On a release branch, add a flag to rerun failed tests
# shellcheck disable=SC2193 # can get false positive for this comparision
if [[ "${{ github.base_ref }}" == release/* ]] || [[ -z "${{ github.base_ref }}" && "${{ github.ref_name }}" == release/* ]]
then
# TODO remove this extra condition once 1.15 is about to released GA
if [[ "${{ github.base_ref }}" != release/1.15* ]] || [[ -z "${{ github.base_ref }}" && "${{ github.ref_name }}" != release/1.15* ]]
then
RERUN_FAILS="--rerun-fails"
fi
fi
# shellcheck disable=SC2086 # can't quote RERUN_FAILS
GOARCH=${{ inputs.go-arch }} \
gotestsum --format=short-verbose \
--junitfile test-results/go-test/results-${{ matrix.id }}.xml \
--jsonfile test-results/go-test/results-${{ matrix.id }}.json \
--jsonfile-timing-events failure-summary-${{ matrix.id }}${{ inputs.name != '' && '-' || '' }}${{ inputs.name }}.json \
$RERUN_FAILS \
--packages "$packages" \
-- \
-tags "${{ inputs.go-tags }}" \
-timeout=${{ env.TIMEOUT_IN_MINUTES }}m \
-parallel=${{ inputs.go-test-parallelism }} \
${{ inputs.extra-flags }} \
- name: Prepare datadog-ci
if: github.repository == 'hashicorp/vault' && (success() || failure())
continue-on-error: true
run: |
curl -L --fail "https://github.com/DataDog/datadog-ci/releases/latest/download/datadog-ci_linux-x64" --output "/usr/local/bin/datadog-ci"
chmod +x /usr/local/bin/datadog-ci
- name: Upload test results to DataDog
continue-on-error: true
env:
DD_ENV: ci
run: |
if [[ ${{ github.repository }} == 'hashicorp/vault' ]]; then
export DATADOG_API_KEY=${{ secrets.DATADOG_API_KEY }}
fi
datadog-ci junit upload --service "$GITHUB_REPOSITORY" test-results/go-test/results-${{ matrix.id }}.xml
if: success() || failure()
- name: Archive test results
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
with:
name: test-results${{ inputs.name != '' && '-' || '' }}${{ inputs.name }}
path: test-results/go-test
if: success() || failure()
# GitHub Actions doesn't expose the job ID or the URL to the job execution,
# so we have to fetch it from the API
- name: Fetch job logs URL
uses: actions/github-script@d7906e4ad0b1822421a7e6a35d5ca353c962f410 # v6.4.1
if: success() || failure()
continue-on-error: true
with:
retries: 3
script: |
// We surround the whole script with a try-catch block, to avoid each of the matrix jobs
// displaying an error in the GHA workflow run annotations, which gets very noisy.
// If an error occurs, it will be logged so that we don't lose any information about the reason for failure.
try {
const fs = require("fs");
const result = await github.rest.actions.listJobsForWorkflowRun({
owner: context.repo.owner,
per_page: 100,
repo: context.repo.repo,
run_id: context.runId,
});
// Determine what job name to use for the query. These values are hardcoded, because GHA doesn't
// expose them in any of the contexts available within a workflow run.
let prefixToSearchFor;
switch ("${{ inputs.name }}") {
case "race":
prefixToSearchFor = 'Run Go tests with data race detection / test-go (${{ matrix.id }})'
break
case "fips":
prefixToSearchFor = 'Run Go tests with FIPS configuration / test-go (${{ matrix.id }})'
break
default:
prefixToSearchFor = 'Run Go tests / test-go (${{ matrix.id }})'
}
const jobData = result.data.jobs.filter(
(job) => job.name.startsWith(prefixToSearchFor)
);
const url = jobData[0].html_url;
const envVarName = "GH_JOB_URL";
const envVar = envVarName + "=" + url;
const envFile = process.env.GITHUB_ENV;
fs.appendFile(envFile, envVar, (err) => {
if (err) throw err;
console.log("Successfully set " + envVarName + " to: " + url);
});
} catch (error) {
console.log("Error: " + error);
return
}
- name: Prepare failure summary
if: success() || failure()
continue-on-error: true
run: |
# This jq query filters out successful tests, leaving only the failures.
# Then, it formats the results into rows of a Markdown table.
# An example row will resemble this:
# | github.com/hashicorp/vault/package | TestName | fips | 0 | 2 | [view results](github.com/link-to-logs) |
jq -r -n 'inputs
| select(.Action == "fail")
| "| ${{inputs.name}} | \(.Package) | \(.Test // "-") | \(.Elapsed) | ${{ matrix.id }} | [view test results :scroll:](${{ env.GH_JOB_URL }}) |"' \
failure-summary-${{ matrix.id }}${{ inputs.name != '' && '-' || '' }}${{inputs.name}}.json \
>> failure-summary-${{ matrix.id }}${{ inputs.name != '' && '-' || '' }}${{inputs.name}}.md
- name: Upload failure summary
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
if: success() || failure()
with:
name: failure-summary
path: failure-summary-${{ matrix.id }}${{ inputs.name != '' && '-' || '' }}${{inputs.name}}.md
test-collect-reports:
if: ${{ ! cancelled() && needs.test-go.result == 'success' && inputs.test-timing-cache-enabled }}
needs: test-go
runs-on: ${{ fromJSON(inputs.runs-on) }}
steps:
- uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
with:
path: test-results/go-test
key: ${{ inputs.test-timing-cache-key }}-${{ github.run_number }}
restore-keys: |
${{ inputs.test-timing-cache-key }}-
go-test-reports-
- uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
with:
name: test-results
path: test-results/go-test
- run: |
ls -lhR test-results/go-test
find test-results/go-test -mindepth 1 -mtime +3 -delete
# Prune invalid timing files
find test-results/go-test -mindepth 1 -type f -name "*.json" -exec sh -c '
file="$1";
jq . "$file" || rm "$file"
' shell {} \; > /dev/null 2>&1
ls -lhR test-results/go-test
| .github/workflows/test-go.yml | 1 | https://github.com/hashicorp/vault/commit/375c2be624cfd9b19b2f21cbb62a3e453d605f58 | [
0.9915355443954468,
0.024052776396274567,
0.00016232540656346828,
0.0004897300386801362,
0.14141564071178436
] |
{
"id": 8,
"code_window": [
" GOPRIVATE: github.com/hashicorp/*\n",
" TIMEOUT_IN_MINUTES: ${{ inputs.timeout-minutes }}\n",
" steps:\n",
" - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3\n",
" - uses: ./.github/actions/set-up-go\n",
" with:\n",
" github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}\n",
" - name: Authenticate to Vault\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" with:\n",
" ref: ${{ inputs.checkout-ref }}\n"
],
"file_path": ".github/workflows/test-go.yml",
"type": "add",
"edit_start_line_idx": 216
} | // Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package pki
import (
"context"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"os"
"path"
"strings"
"testing"
"time"
"github.com/hashicorp/vault/sdk/helper/certutil"
"github.com/go-test/deep"
"github.com/stretchr/testify/require"
"golang.org/x/crypto/acme"
"golang.org/x/net/http2"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/builtin/logical/pki/dnstest"
"github.com/hashicorp/vault/helper/constants"
"github.com/hashicorp/vault/helper/testhelpers"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
)
// TestAcmeBasicWorkflow a test that will validate a basic ACME workflow using the Golang ACME client.
func TestAcmeBasicWorkflow(t *testing.T) {
t.Parallel()
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
cases := []struct {
name string
prefixUrl string
}{
{"root", "acme/"},
{"role", "roles/test-role/acme/"},
{"issuer", "issuer/int-ca/acme/"},
{"issuer_role", "issuer/int-ca/roles/test-role/acme/"},
}
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
baseAcmeURL := "/v1/pki/" + tc.prefixUrl
accountKey, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err, "failed creating rsa key")
acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey)
t.Logf("Testing discover on %s", baseAcmeURL)
discovery, err := acmeClient.Discover(testCtx)
require.NoError(t, err, "failed acme discovery call")
discoveryBaseUrl := client.Address() + baseAcmeURL
require.Equal(t, discoveryBaseUrl+"new-nonce", discovery.NonceURL)
require.Equal(t, discoveryBaseUrl+"new-account", discovery.RegURL)
require.Equal(t, discoveryBaseUrl+"new-order", discovery.OrderURL)
require.Equal(t, discoveryBaseUrl+"revoke-cert", discovery.RevokeURL)
require.Equal(t, discoveryBaseUrl+"key-change", discovery.KeyChangeURL)
require.False(t, discovery.ExternalAccountRequired, "bad value for external account required in directory")
// Attempt to update prior to creating an account
t.Logf("Testing updates with no proper account fail on %s", baseAcmeURL)
_, err = acmeClient.UpdateReg(testCtx, &acme.Account{Contact: []string{"mailto:[email protected]"}})
require.ErrorIs(t, err, acme.ErrNoAccount, "expected failure attempting to update prior to account registration")
// Create new account
t.Logf("Testing register on %s", baseAcmeURL)
acct, err := acmeClient.Register(testCtx, &acme.Account{
Contact: []string{"mailto:[email protected]", "mailto:[email protected]"},
}, func(tosURL string) bool { return true })
require.NoError(t, err, "failed registering account")
require.Equal(t, acme.StatusValid, acct.Status)
require.Contains(t, acct.Contact, "mailto:[email protected]")
require.Contains(t, acct.Contact, "mailto:[email protected]")
require.Len(t, acct.Contact, 2)
// Call register again we should get existing account
t.Logf("Testing duplicate register returns existing account on %s", baseAcmeURL)
_, err = acmeClient.Register(testCtx, acct, func(tosURL string) bool { return true })
require.ErrorIs(t, err, acme.ErrAccountAlreadyExists,
"We should have returned a 200 status code which would have triggered an error in the golang acme"+
" library")
// Update contact
t.Logf("Testing Update account contacts on %s", baseAcmeURL)
acct.Contact = []string{"mailto:[email protected]"}
acct2, err := acmeClient.UpdateReg(testCtx, acct)
require.NoError(t, err, "failed updating account")
require.Equal(t, acme.StatusValid, acct2.Status)
// We should get this back, not the original values.
require.Contains(t, acct2.Contact, "mailto:[email protected]")
require.Len(t, acct2.Contact, 1)
// Make sure order's do not accept dates
_, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "localhost"}},
acme.WithOrderNotBefore(time.Now().Add(10*time.Minute)))
require.Error(t, err, "should have rejected a new order with NotBefore set")
_, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "localhost"}},
acme.WithOrderNotAfter(time.Now().Add(10*time.Minute)))
require.Error(t, err, "should have rejected a new order with NotAfter set")
// Make sure DNS identifiers cannot include IP addresses
_, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "127.0.0.1"}},
acme.WithOrderNotAfter(time.Now().Add(10*time.Minute)))
require.Error(t, err, "should have rejected a new order with IP-like DNS-type identifier")
_, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "*.127.0.0.1"}},
acme.WithOrderNotAfter(time.Now().Add(10*time.Minute)))
require.Error(t, err, "should have rejected a new order with IP-like DNS-type identifier")
// Create an order
t.Logf("Testing Authorize Order on %s", baseAcmeURL)
identifiers := []string{"localhost.localdomain", "*.localdomain"}
createOrder, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{
{Type: "dns", Value: identifiers[0]},
{Type: "dns", Value: identifiers[1]},
})
require.NoError(t, err, "failed creating order")
require.Equal(t, acme.StatusPending, createOrder.Status)
require.Empty(t, createOrder.CertURL)
require.Equal(t, createOrder.URI+"/finalize", createOrder.FinalizeURL)
require.Len(t, createOrder.AuthzURLs, 2, "expected two authzurls")
// Get order
t.Logf("Testing GetOrder on %s", baseAcmeURL)
getOrder, err := acmeClient.GetOrder(testCtx, createOrder.URI)
require.NoError(t, err, "failed fetching order")
require.Equal(t, acme.StatusPending, createOrder.Status)
if diffs := deep.Equal(createOrder, getOrder); diffs != nil {
t.Fatalf("Differences exist between create and get order: \n%v", strings.Join(diffs, "\n"))
}
// Make sure the identifiers returned in the order contain the original values
var ids []string
for _, id := range getOrder.Identifiers {
require.Equal(t, "dns", id.Type)
ids = append(ids, id.Value)
}
require.ElementsMatch(t, identifiers, ids, "order responses should have all original identifiers")
// Load authorizations
var authorizations []*acme.Authorization
for _, authUrl := range getOrder.AuthzURLs {
auth, err := acmeClient.GetAuthorization(testCtx, authUrl)
require.NoError(t, err, "failed fetching authorization: %s", authUrl)
authorizations = append(authorizations, auth)
}
// We should have 2 separate auth challenges as we have two separate identifier
require.Len(t, authorizations, 2, "expected 2 authorizations in order")
var wildcardAuth *acme.Authorization
var domainAuth *acme.Authorization
for _, auth := range authorizations {
if auth.Wildcard {
wildcardAuth = auth
} else {
domainAuth = auth
}
}
// Test the values for the domain authentication
require.Equal(t, acme.StatusPending, domainAuth.Status)
require.Equal(t, "dns", domainAuth.Identifier.Type)
require.Equal(t, "localhost.localdomain", domainAuth.Identifier.Value)
require.False(t, domainAuth.Wildcard, "should not be a wildcard")
require.True(t, domainAuth.Expires.IsZero(), "authorization should only have expiry set on valid status")
require.Len(t, domainAuth.Challenges, 3, "expected three challenges")
require.Equal(t, acme.StatusPending, domainAuth.Challenges[0].Status)
require.True(t, domainAuth.Challenges[0].Validated.IsZero(), "validated time should be 0 on challenge")
require.Equal(t, "http-01", domainAuth.Challenges[0].Type)
require.NotEmpty(t, domainAuth.Challenges[0].Token, "missing challenge token")
require.Equal(t, acme.StatusPending, domainAuth.Challenges[1].Status)
require.True(t, domainAuth.Challenges[1].Validated.IsZero(), "validated time should be 0 on challenge")
require.Equal(t, "dns-01", domainAuth.Challenges[1].Type)
require.NotEmpty(t, domainAuth.Challenges[1].Token, "missing challenge token")
require.Equal(t, acme.StatusPending, domainAuth.Challenges[2].Status)
require.True(t, domainAuth.Challenges[2].Validated.IsZero(), "validated time should be 0 on challenge")
require.Equal(t, "tls-alpn-01", domainAuth.Challenges[2].Type)
require.NotEmpty(t, domainAuth.Challenges[2].Token, "missing challenge token")
// Test the values for the wildcard authentication
require.Equal(t, acme.StatusPending, wildcardAuth.Status)
require.Equal(t, "dns", wildcardAuth.Identifier.Type)
require.Equal(t, "localdomain", wildcardAuth.Identifier.Value) // Make sure we strip the *. in auth responses
require.True(t, wildcardAuth.Wildcard, "should be a wildcard")
require.True(t, wildcardAuth.Expires.IsZero(), "authorization should only have expiry set on valid status")
require.Len(t, wildcardAuth.Challenges, 1, "expected one challenge")
require.Equal(t, acme.StatusPending, domainAuth.Challenges[0].Status)
require.True(t, wildcardAuth.Challenges[0].Validated.IsZero(), "validated time should be 0 on challenge")
require.Equal(t, "dns-01", wildcardAuth.Challenges[0].Type)
require.NotEmpty(t, domainAuth.Challenges[0].Token, "missing challenge token")
// Make sure that getting a challenge does not start it.
challenge, err := acmeClient.GetChallenge(testCtx, domainAuth.Challenges[0].URI)
require.NoError(t, err, "failed to load challenge")
require.Equal(t, acme.StatusPending, challenge.Status)
require.True(t, challenge.Validated.IsZero(), "validated time should be 0 on challenge")
require.Equal(t, "http-01", challenge.Type)
// Accept a challenge; this triggers validation to start.
challenge, err = acmeClient.Accept(testCtx, domainAuth.Challenges[0])
require.NoError(t, err, "failed to load challenge")
require.Equal(t, acme.StatusProcessing, challenge.Status)
require.True(t, challenge.Validated.IsZero(), "validated time should be 0 on challenge")
require.Equal(t, "http-01", challenge.Type)
require.NotEmpty(t, challenge.Token, "missing challenge token")
// HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow
// test.
markAuthorizationSuccess(t, client, acmeClient, acct, getOrder)
// Make sure sending a CSR with the account key gets rejected.
goodCr := &x509.CertificateRequest{
Subject: pkix.Name{CommonName: identifiers[1]},
DNSNames: []string{identifiers[0], identifiers[1]},
}
t.Logf("csr: %v", goodCr)
// We want to make sure people are not using the same keys for CSR/Certs and their ACME account.
csrSignedWithAccountKey, err := x509.CreateCertificateRequest(rand.Reader, goodCr, accountKey)
require.NoError(t, err, "failed generating csr")
_, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrSignedWithAccountKey, true)
require.Error(t, err, "should not be allowed to use the account key for a CSR")
csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err, "failed generated key for CSR")
// Validate we reject CSRs that contain CN that aren't in the original order
badCr := &x509.CertificateRequest{
Subject: pkix.Name{CommonName: "not-in-original-order.com"},
DNSNames: []string{identifiers[0], identifiers[1]},
}
t.Logf("csr: %v", badCr)
csrWithBadCName, err := x509.CreateCertificateRequest(rand.Reader, badCr, csrKey)
require.NoError(t, err, "failed generating csr with bad common name")
_, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadCName, true)
require.Error(t, err, "should not be allowed to csr with different common names than order")
// Validate we reject CSRs that contain DNS names that aren't in the original order
badCr = &x509.CertificateRequest{
Subject: pkix.Name{CommonName: createOrder.Identifiers[0].Value},
DNSNames: []string{"www.notinorder.com"},
}
csrWithBadName, err := x509.CreateCertificateRequest(rand.Reader, badCr, csrKey)
require.NoError(t, err, "failed generating csr with bad name")
_, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadName, true)
require.Error(t, err, "should not be allowed to csr with different names than order")
// Validate we reject CSRs that contain IP addresses that weren't in the original order
badCr = &x509.CertificateRequest{
Subject: pkix.Name{CommonName: createOrder.Identifiers[0].Value},
IPAddresses: []net.IP{{127, 0, 0, 1}},
}
csrWithBadIP, err := x509.CreateCertificateRequest(rand.Reader, badCr, csrKey)
require.NoError(t, err, "failed generating csr with bad name")
_, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadIP, true)
require.Error(t, err, "should not be allowed to csr with different ip address than order")
// Validate we reject CSRs that contains fewer names than in the original order.
badCr = &x509.CertificateRequest{
Subject: pkix.Name{CommonName: identifiers[0]},
}
csrWithBadName, err = x509.CreateCertificateRequest(rand.Reader, badCr, csrKey)
require.NoError(t, err, "failed generating csr with bad name")
_, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadName, true)
require.Error(t, err, "should not be allowed to csr with different names than order")
// Finally test a proper CSR, with the correct name and signed with a different key works.
csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey)
require.NoError(t, err, "failed generating csr")
certs, _, err := acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csr, true)
require.NoError(t, err, "failed finalizing order")
require.Len(t, certs, 3, "expected three items within the returned certs")
testAcmeCertSignedByCa(t, client, certs, "int-ca")
// Make sure the certificate has a NotAfter date of a maximum of 90 days
acmeCert, err := x509.ParseCertificate(certs[0])
require.NoError(t, err, "failed parsing acme cert bytes")
maxAcmeNotAfter := time.Now().Add(maxAcmeCertTTL)
if maxAcmeNotAfter.Before(acmeCert.NotAfter) {
require.Fail(t, fmt.Sprintf("certificate has a NotAfter value %v greater than ACME max ttl %v", acmeCert.NotAfter, maxAcmeNotAfter))
}
// Can we revoke it using the account key revocation
err = acmeClient.RevokeCert(ctx, nil, certs[0], acme.CRLReasonUnspecified)
require.NoError(t, err, "failed to revoke certificate through account key")
// Make sure it was actually revoked
certResp, err := client.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert))
require.NoError(t, err, "failed to read certificate status")
require.NotNil(t, certResp, "certificate status response was nil")
revocationTime := certResp.Data["revocation_time"].(json.Number)
revocationTimeInt, err := revocationTime.Int64()
require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime)
require.Greater(t, revocationTimeInt, int64(0),
"revocation time was not greater than 0, revocation did not work value was: %v", revocationTimeInt)
// Make sure we can revoke an authorization as a client
err = acmeClient.RevokeAuthorization(ctx, authorizations[0].URI)
require.NoError(t, err, "failed revoking authorization status")
revokedAuth, err := acmeClient.GetAuthorization(ctx, authorizations[0].URI)
require.NoError(t, err, "failed fetching authorization")
require.Equal(t, acme.StatusDeactivated, revokedAuth.Status)
// Deactivate account
t.Logf("Testing deactivate account on %s", baseAcmeURL)
err = acmeClient.DeactivateReg(testCtx)
require.NoError(t, err, "failed deactivating account")
// Make sure we get an unauthorized error trying to update the account again.
t.Logf("Testing update on deactivated account fails on %s", baseAcmeURL)
_, err = acmeClient.UpdateReg(testCtx, acct)
require.Error(t, err, "expected account to be deactivated")
require.IsType(t, &acme.Error{}, err, "expected acme error type")
acmeErr := err.(*acme.Error)
require.Equal(t, "urn:ietf:params:acme:error:unauthorized", acmeErr.ProblemType)
})
}
}
// TestAcmeBasicWorkflowWithEab verify that new accounts require EAB's if enforced by configuration.
func TestAcmeBasicWorkflowWithEab(t *testing.T) {
t.Parallel()
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
// Enable EAB
_, err := client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{
"enabled": true,
"eab_policy": "always-required",
})
require.NoError(t, err)
cases := []struct {
name string
prefixUrl string
}{
{"root", "acme/"},
{"role", "roles/test-role/acme/"},
{"issuer", "issuer/int-ca/acme/"},
{"issuer_role", "issuer/int-ca/roles/test-role/acme/"},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
baseAcmeURL := "/v1/pki/" + tc.prefixUrl
accountKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err, "failed creating ec key")
acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey)
t.Logf("Testing discover on %s", baseAcmeURL)
discovery, err := acmeClient.Discover(testCtx)
require.NoError(t, err, "failed acme discovery call")
require.True(t, discovery.ExternalAccountRequired, "bad value for external account required in directory")
// Create new account without EAB, should fail
t.Logf("Testing register on %s", baseAcmeURL)
_, err = acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true })
require.ErrorContains(t, err, "urn:ietf:params:acme:error:externalAccountRequired",
"expected failure creating an account without eab")
// Test fetch, list, delete workflow
kid, _ := getEABKey(t, client, tc.prefixUrl)
resp, err := client.Logical().ListWithContext(testCtx, "pki/eab")
require.NoError(t, err, "failed to list eab tokens")
require.NotNil(t, resp, "list response for eab tokens should not be nil")
require.Contains(t, resp.Data, "keys")
require.Contains(t, resp.Data, "key_info")
require.Len(t, resp.Data["keys"], 1)
require.Contains(t, resp.Data["keys"], kid)
_, err = client.Logical().DeleteWithContext(testCtx, "pki/eab/"+kid)
require.NoError(t, err, "failed to delete eab")
// List eabs should return zero results
resp, err = client.Logical().ListWithContext(testCtx, "pki/eab")
require.NoError(t, err, "failed to list eab tokens")
require.Nil(t, resp, "list response for eab tokens should have been nil")
// fetch a new EAB
kid, eabKeyBytes := getEABKey(t, client, tc.prefixUrl)
acct := &acme.Account{
ExternalAccountBinding: &acme.ExternalAccountBinding{
KID: kid,
Key: eabKeyBytes,
},
}
// Make sure we can list our key
resp, err = client.Logical().ListWithContext(testCtx, "pki/eab")
require.NoError(t, err, "failed to list eab tokens")
require.NotNil(t, resp, "list response for eab tokens should not be nil")
require.Contains(t, resp.Data, "keys")
require.Contains(t, resp.Data, "key_info")
require.Len(t, resp.Data["keys"], 1)
require.Contains(t, resp.Data["keys"], kid)
keyInfo := resp.Data["key_info"].(map[string]interface{})
require.Contains(t, keyInfo, kid)
infoForKid := keyInfo[kid].(map[string]interface{})
require.Equal(t, "hs", infoForKid["key_type"])
require.Equal(t, tc.prefixUrl+"directory", infoForKid["acme_directory"])
// Create new account with EAB
t.Logf("Testing register on %s", baseAcmeURL)
_, err = acmeClient.Register(testCtx, acct, func(tosURL string) bool { return true })
require.NoError(t, err, "failed registering new account with eab")
// Make sure our EAB is no longer available
resp, err = client.Logical().ListWithContext(context.Background(), "pki/eab")
require.NoError(t, err, "failed to list eab tokens")
require.Nil(t, resp, "list response for eab tokens should have been nil due to empty list")
// Attempt to create another account with the same EAB as before -- should fail
accountKey2, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err, "failed creating ec key")
acmeClient2 := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey2)
acct2 := &acme.Account{
ExternalAccountBinding: &acme.ExternalAccountBinding{
KID: kid,
Key: eabKeyBytes,
},
}
_, err = acmeClient2.Register(testCtx, acct2, func(tosURL string) bool { return true })
require.ErrorContains(t, err, "urn:ietf:params:acme:error:unauthorized", "should fail due to EAB re-use")
// We can lookup/find an existing account without EAB if we have the account key
_, err = acmeClient.GetReg(testCtx /* unused url */, "")
require.NoError(t, err, "expected to lookup existing account without eab")
})
}
}
// TestAcmeNonce a basic test that will validate we get back a nonce with the proper status codes
// based on the
func TestAcmeNonce(t *testing.T) {
t.Parallel()
cluster, client, pathConfig := setupAcmeBackend(t)
defer cluster.Cleanup()
cases := []struct {
name string
prefixUrl string
directoryUrl string
}{
{"root", "", "pki/acme/new-nonce"},
{"role", "/roles/test-role", "pki/roles/test-role/acme/new-nonce"},
{"issuer", "/issuer/default", "pki/issuer/default/acme/new-nonce"},
{"issuer_role", "/issuer/default/roles/test-role", "pki/issuer/default/roles/test-role/acme/new-nonce"},
}
for _, tc := range cases {
for _, httpOp := range []string{"get", "header"} {
t.Run(fmt.Sprintf("%s-%s", tc.name, httpOp), func(t *testing.T) {
var req *api.Request
switch httpOp {
case "get":
req = client.NewRequest(http.MethodGet, "/v1/"+tc.directoryUrl)
case "header":
req = client.NewRequest(http.MethodHead, "/v1/"+tc.directoryUrl)
}
res, err := client.RawRequestWithContext(ctx, req)
require.NoError(t, err, "failed sending raw request")
_ = res.Body.Close()
// Proper Status Code
switch httpOp {
case "get":
require.Equal(t, http.StatusNoContent, res.StatusCode)
case "header":
require.Equal(t, http.StatusOK, res.StatusCode)
}
// Make sure we don't have a Content-Type header.
require.Equal(t, "", res.Header.Get("Content-Type"))
// Make sure we return the Cache-Control header
require.Contains(t, res.Header.Get("Cache-Control"), "no-store",
"missing Cache-Control header with no-store header value")
// Test for our nonce header value
require.NotEmpty(t, res.Header.Get("Replay-Nonce"), "missing Replay-Nonce header with an actual value")
// Test Link header value
expectedLinkHeader := fmt.Sprintf("<%s>;rel=\"index\"", pathConfig+tc.prefixUrl+"/acme/directory")
require.Contains(t, res.Header.Get("Link"), expectedLinkHeader,
"different value for link header than expected")
})
}
}
}
// TestAcmeClusterPathNotConfigured basic testing of the ACME error handler.
func TestAcmeClusterPathNotConfigured(t *testing.T) {
t.Parallel()
cluster, client := setupTestPkiCluster(t)
defer cluster.Cleanup()
// Go sneaky, sneaky and update the acme configuration through sys/raw to bypass config/cluster path checks
pkiMount := findStorageMountUuid(t, client, "pki")
rawPath := path.Join("/sys/raw/logical/", pkiMount, storageAcmeConfig)
_, err := client.Logical().WriteWithContext(context.Background(), rawPath, map[string]interface{}{
"value": "{\"enabled\": true, \"eab_policy_name\": \"not-required\"}",
})
require.NoError(t, err, "failed updating acme config through sys/raw")
// Force reload the plugin so we read the new config we slipped in.
_, err = client.Sys().ReloadPluginWithContext(context.Background(), &api.ReloadPluginInput{Mounts: []string{"pki"}})
require.NoError(t, err, "failed reloading plugin")
// Do not fill in the path option within the local cluster configuration
cases := []struct {
name string
directoryUrl string
}{
{"root", "pki/acme/directory"},
{"role", "pki/roles/test-role/acme/directory"},
{"issuer", "pki/issuer/default/acme/directory"},
{"issuer_role", "pki/issuer/default/roles/test-role/acme/directory"},
}
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
dirResp, err := client.Logical().ReadRawWithContext(testCtx, tc.directoryUrl)
require.Error(t, err, "expected failure reading ACME directory configuration got none")
require.Equal(t, "application/problem+json", dirResp.Header.Get("Content-Type"))
require.Equal(t, http.StatusInternalServerError, dirResp.StatusCode)
rawBodyBytes, err := io.ReadAll(dirResp.Body)
require.NoError(t, err, "failed reading from directory response body")
_ = dirResp.Body.Close()
respType := map[string]interface{}{}
err = json.Unmarshal(rawBodyBytes, &respType)
require.NoError(t, err, "failed unmarshalling ACME directory response body")
require.Equal(t, "urn:ietf:params:acme:error:serverInternal", respType["type"])
require.NotEmpty(t, respType["detail"])
})
}
}
// TestAcmeAccountsCrossingDirectoryPath make sure that if an account attempts to use a different ACME
// directory path that we get an error.
func TestAcmeAccountsCrossingDirectoryPath(t *testing.T) {
t.Parallel()
cluster, _, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
baseAcmeURL := "/v1/pki/acme/"
accountKey, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err, "failed creating rsa key")
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey)
// Create new account
acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true })
require.NoError(t, err, "failed registering account")
// Try to update the account under another ACME directory
baseAcmeURL2 := "/v1/pki/roles/test-role/acme/"
acmeClient2 := getAcmeClientForCluster(t, cluster, baseAcmeURL2, accountKey)
acct.Contact = []string{"mailto:[email protected]"}
_, err = acmeClient2.UpdateReg(testCtx, acct)
require.Error(t, err, "successfully updated account when we should have failed due to different directory")
// We don't test for the specific error about using the wrong directory, as the golang library
// swallows the error we are sending back to a no account error
}
// TestAcmeEabCrossingDirectoryPath make sure that if an account attempts to use a different ACME
// directory path that an EAB was created within we get an error.
func TestAcmeEabCrossingDirectoryPath(t *testing.T) {
t.Parallel()
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
// Enable EAB
_, err := client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{
"enabled": true,
"eab_policy": "always-required",
})
require.NoError(t, err)
baseAcmeURL := "/v1/pki/acme/"
accountKey, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err, "failed creating rsa key")
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey)
// fetch a new EAB
kid, eabKeyBytes := getEABKey(t, client, "roles/test-role/acme/")
acct := &acme.Account{
ExternalAccountBinding: &acme.ExternalAccountBinding{
KID: kid,
Key: eabKeyBytes,
},
}
// Create new account
_, err = acmeClient.Register(testCtx, acct, func(tosURL string) bool { return true })
require.ErrorContains(t, err, "failed to verify eab", "should have failed as EAB is for a different directory")
}
// TestAcmeDisabledWithEnvVar verifies if VAULT_DISABLE_PUBLIC_ACME is set that we completely
// disable the ACME service
func TestAcmeDisabledWithEnvVar(t *testing.T) {
// Setup a cluster with the configuration set to not-required, initially as the
// configuration will validate if the environment var is set
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
// Seal setup the environment variable, and unseal which now means we have a cluster
// with ACME configuration saying it is enabled with a bad EAB policy.
cluster.EnsureCoresSealed(t)
t.Setenv("VAULT_DISABLE_PUBLIC_ACME", "true")
cluster.UnsealCores(t)
// Make sure that ACME is disabled now.
for _, method := range []string{http.MethodHead, http.MethodGet} {
t.Run(fmt.Sprintf("%s", method), func(t *testing.T) {
req := client.NewRequest(method, "/v1/pki/acme/new-nonce")
_, err := client.RawRequestWithContext(ctx, req)
require.Error(t, err, "should have received an error as ACME should have been disabled")
if apiError, ok := err.(*api.ResponseError); ok {
require.Equal(t, 404, apiError.StatusCode)
}
})
}
}
// TestAcmeConfigChecksPublicAcmeEnv verifies certain EAB policy values can not be set if ENV var is enabled
func TestAcmeConfigChecksPublicAcmeEnv(t *testing.T) {
t.Setenv("VAULT_DISABLE_PUBLIC_ACME", "true")
cluster, client := setupTestPkiCluster(t)
defer cluster.Cleanup()
_, err := client.Logical().WriteWithContext(context.Background(), "pki/config/cluster", map[string]interface{}{
"path": "https://dadgarcorp.com/v1/pki",
})
require.NoError(t, err)
_, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{
"enabled": true,
"eab_policy": string(eabPolicyAlwaysRequired),
})
require.NoError(t, err)
for _, policyName := range []EabPolicyName{eabPolicyNewAccountRequired, eabPolicyNotRequired} {
_, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{
"enabled": true,
"eab_policy": string(policyName),
})
require.Error(t, err, "eab policy %s should have not been allowed to be set")
}
// Make sure we can disable ACME and the eab policy is not checked
_, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{
"enabled": false,
"eab_policy": string(eabPolicyNotRequired),
})
require.NoError(t, err)
}
// TestAcmeTruncatesToIssuerExpiry make sure that if the selected issuer's expiry is shorter than the
// CSR's selected TTL value in ACME and the issuer's leaf_not_after_behavior setting is set to Err,
// we will override the configured behavior and truncate to the issuer's NotAfter
func TestAcmeTruncatesToIssuerExpiry(t *testing.T) {
t.Parallel()
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
mount := "pki"
resp, err := client.Logical().WriteWithContext(context.Background(), mount+"/issuers/generate/intermediate/internal",
map[string]interface{}{
"key_name": "short-key",
"key_type": "ec",
"common_name": "test.com",
})
require.NoError(t, err, "failed creating intermediary CSR")
intermediateCSR := resp.Data["csr"].(string)
// Sign the intermediate CSR using /pki
resp, err = client.Logical().Write(mount+"/issuer/root-ca/sign-intermediate", map[string]interface{}{
"csr": intermediateCSR,
"ttl": "10m",
"max_ttl": "1h",
})
require.NoError(t, err, "failed signing intermediary CSR")
intermediateCertPEM := resp.Data["certificate"].(string)
shortCa := parseCert(t, intermediateCertPEM)
// Configure the intermediate cert as the CA in /pki2
resp, err = client.Logical().Write(mount+"/issuers/import/cert", map[string]interface{}{
"pem_bundle": intermediateCertPEM,
})
require.NoError(t, err, "failed importing intermediary cert")
importedIssuersRaw := resp.Data["imported_issuers"].([]interface{})
require.Len(t, importedIssuersRaw, 1)
shortCaUuid := importedIssuersRaw[0].(string)
_, err = client.Logical().Write(mount+"/issuer/"+shortCaUuid, map[string]interface{}{
"leaf_not_after_behavior": "err",
"issuer_name": "short-ca",
})
require.NoError(t, err, "failed updating issuer name")
baseAcmeURL := "/v1/pki/issuer/short-ca/acme/"
accountKey, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err, "failed creating rsa key")
acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey)
// Create new account
t.Logf("Testing register on %s", baseAcmeURL)
acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true })
require.NoError(t, err, "failed registering account")
// Create an order
t.Logf("Testing Authorize Order on %s", baseAcmeURL)
identifiers := []string{"*.localdomain"}
order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{
{Type: "dns", Value: identifiers[0]},
})
require.NoError(t, err, "failed creating order")
// HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow
// test.
markAuthorizationSuccess(t, client, acmeClient, acct, order)
// Build a proper CSR, with the correct name and signed with a different key works.
goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}}
csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err, "failed generated key for CSR")
csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey)
require.NoError(t, err, "failed generating csr")
certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true)
require.NoError(t, err, "failed finalizing order")
require.Len(t, certs, 3, "expected full acme chain")
testAcmeCertSignedByCa(t, client, certs, "short-ca")
acmeCert, err := x509.ParseCertificate(certs[0])
require.NoError(t, err, "failed parsing acme cert")
require.Equal(t, shortCa.NotAfter, acmeCert.NotAfter, "certificate times aren't the same")
}
// TestAcmeRoleExtKeyUsage verify that ACME by default ignores the role's various ExtKeyUsage flags,
// but if the ACME configuration override of allow_role_ext_key_usage is set that we then honor
// the role's flag.
func TestAcmeRoleExtKeyUsage(t *testing.T) {
t.Parallel()
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
roleName := "test-role"
roleOpt := map[string]interface{}{
"ttl": "365h",
"max_ttl": "720h",
"key_type": "any",
"allowed_domains": "localdomain",
"allow_subdomains": "true",
"allow_wildcard_certificates": "true",
"require_cn": "true", /* explicit default */
"server_flag": "true",
"client_flag": "true",
"code_signing_flag": "true",
"email_protection_flag": "true",
}
_, err := client.Logical().Write("pki/roles/"+roleName, roleOpt)
baseAcmeURL := "/v1/pki/roles/" + roleName + "/acme/"
accountKey, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err, "failed creating rsa key")
require.NoError(t, err, "failed creating role test-role")
acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey)
// Create new account
t.Logf("Testing register on %s", baseAcmeURL)
acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true })
require.NoError(t, err, "failed registering account")
// Create an order
t.Logf("Testing Authorize Order on %s", baseAcmeURL)
identifiers := []string{"*.localdomain"}
order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{
{Type: "dns", Value: identifiers[0]},
})
require.NoError(t, err, "failed creating order")
// HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test.
markAuthorizationSuccess(t, client, acmeClient, acct, order)
// Build a proper CSR, with the correct name and signed with a different key works.
goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}}
csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err, "failed generated key for CSR")
csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey)
require.NoError(t, err, "failed generating csr")
certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true)
require.NoError(t, err, "order finalization failed")
require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle")
acmeCert, err := x509.ParseCertificate(certs[0])
require.NoError(t, err, "failed parsing acme cert")
require.Equal(t, 1, len(acmeCert.ExtKeyUsage), "mis-match on expected ExtKeyUsages")
require.ElementsMatch(t, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, acmeCert.ExtKeyUsage,
"mismatch of ExtKeyUsage flags")
// Now turn the ACME configuration allow_role_ext_key_usage and retest to make sure we get a certificate
// with them all
_, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{
"enabled": true,
"eab_policy": "not-required",
"allow_role_ext_key_usage": true,
})
require.NoError(t, err, "failed updating ACME configuration")
t.Logf("Testing Authorize Order on %s", baseAcmeURL)
order, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{
{Type: "dns", Value: identifiers[0]},
})
require.NoError(t, err, "failed creating order")
// HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test.
markAuthorizationSuccess(t, client, acmeClient, acct, order)
certs, _, err = acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true)
require.NoError(t, err, "order finalization failed")
require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle")
acmeCert, err = x509.ParseCertificate(certs[0])
require.NoError(t, err, "failed parsing acme cert")
require.Equal(t, 4, len(acmeCert.ExtKeyUsage), "mis-match on expected ExtKeyUsages")
require.ElementsMatch(t, []x509.ExtKeyUsage{
x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth,
x509.ExtKeyUsageCodeSigning, x509.ExtKeyUsageEmailProtection,
},
acmeCert.ExtKeyUsage, "mismatch of ExtKeyUsage flags")
}
func TestIssuerRoleDirectoryAssociations(t *testing.T) {
t.Parallel()
// This creates two issuers for us (root-ca, int-ca) and two
// roles (test-role, acme) that we can use with various directory
// configurations.
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
// Setup DNS for validations.
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
dns := dnstest.SetupResolver(t, "dadgarcorp.com")
defer dns.Cleanup()
_, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{
"dns_resolver": dns.GetLocalAddr(),
})
require.NoError(t, err, "failed to specify dns resolver")
// 1. Use a forbidden role should fail.
resp, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{
"enabled": true,
"allowed_roles": []string{"acme"},
})
require.NoError(t, err, "failed to write config")
require.NotNil(t, resp)
_, err = client.Logical().ReadWithContext(testCtx, "pki/roles/test-role/acme/directory")
require.Error(t, err, "failed to forbid usage of test-role")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/test-role/acme/directory")
require.Error(t, err, "failed to forbid usage of test-role under default issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/test-role/acme/directory")
require.Error(t, err, "failed to forbid usage of test-role under int-ca issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/test-role/acme/directory")
require.Error(t, err, "failed to forbid usage of test-role under root-ca issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/roles/acme/acme/directory")
require.NoError(t, err, "failed to allow usage of acme")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/acme/acme/directory")
require.NoError(t, err, "failed to allow usage of acme under default issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/acme/acme/directory")
require.NoError(t, err, "failed to allow usage of acme under int-ca issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/acme/acme/directory")
require.NoError(t, err, "failed to allow usage of acme under root-ca issuer")
// 2. Use a forbidden issuer should fail.
resp, err = client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{
"allowed_roles": []string{"acme"},
"allowed_issuers": []string{"int-ca"},
})
require.NoError(t, err, "failed to write config")
require.NotNil(t, resp)
_, err = client.Logical().ReadWithContext(testCtx, "pki/roles/test-role/acme/directory")
require.Error(t, err, "failed to forbid usage of test-role")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/test-role/acme/directory")
require.Error(t, err, "failed to forbid usage of test-role under default issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/test-role/acme/directory")
require.Error(t, err, "failed to forbid usage of test-role under int-ca issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/test-role/acme/directory")
require.Error(t, err, "failed to forbid usage of test-role under root-ca issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/acme/acme/directory")
require.Error(t, err, "failed to forbid usage of acme under root-ca issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/roles/acme/acme/directory")
require.NoError(t, err, "failed to allow usage of acme")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/acme/acme/directory")
require.NoError(t, err, "failed to allow usage of acme under default issuer")
_, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/acme/acme/directory")
require.NoError(t, err, "failed to allow usage of acme under int-ca issuer")
// 3. Setting the default directory to be a sign-verbatim policy and
// using two different CAs should result in certs signed by each CA.
resp, err = client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{
"allowed_roles": []string{"*"},
"allowed_issuers": []string{"*"},
"default_directory_policy": "sign-verbatim",
})
require.NoError(t, err, "failed to write config")
require.NotNil(t, resp)
// default == int-ca
acmeClientDefault := getAcmeClientForCluster(t, cluster, "/v1/pki/issuer/default/acme/", nil)
defaultLeafCert := doACMEForDomainWithDNS(t, dns, acmeClientDefault, []string{"default-ca.dadgarcorp.com"})
requireSignedByAtPath(t, client, defaultLeafCert, "pki/issuer/int-ca")
acmeClientIntCA := getAcmeClientForCluster(t, cluster, "/v1/pki/issuer/int-ca/acme/", nil)
intCALeafCert := doACMEForDomainWithDNS(t, dns, acmeClientIntCA, []string{"int-ca.dadgarcorp.com"})
requireSignedByAtPath(t, client, intCALeafCert, "pki/issuer/int-ca")
acmeClientRootCA := getAcmeClientForCluster(t, cluster, "/v1/pki/issuer/root-ca/acme/", nil)
rootCALeafCert := doACMEForDomainWithDNS(t, dns, acmeClientRootCA, []string{"root-ca.dadgarcorp.com"})
requireSignedByAtPath(t, client, rootCALeafCert, "pki/issuer/root-ca")
// 4. Using a role-based default directory should allow us to control leaf
// issuance on the base and issuer-specific directories.
resp, err = client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{
"allowed_roles": []string{"*"},
"allowed_issuers": []string{"*"},
"default_directory_policy": "role:acme",
})
require.NoError(t, err, "failed to write config")
require.NotNil(t, resp)
resp, err = client.Logical().JSONMergePatch(testCtx, "pki/roles/acme", map[string]interface{}{
"ou": "IT Security",
"organization": []string{"Dadgar Corporation, Limited"},
"allow_any_name": true,
})
require.NoError(t, err, "failed to write role differentiator")
require.NotNil(t, resp)
for _, issuer := range []string{"", "default", "int-ca", "root-ca"} {
// Path should override role.
directory := "/v1/pki/issuer/" + issuer + "/acme/"
issuerPath := "/pki/issuer/" + issuer
if issuer == "" {
directory = "/v1/pki/acme/"
issuerPath = "/pki/issuer/int-ca"
} else if issuer == "default" {
issuerPath = "/pki/issuer/int-ca"
}
t.Logf("using directory: %v / issuer: %v", directory, issuerPath)
acmeClient := getAcmeClientForCluster(t, cluster, directory, nil)
leafCert := doACMEForDomainWithDNS(t, dns, acmeClient, []string{"role-restricted.dadgarcorp.com"})
require.Contains(t, leafCert.Subject.Organization, "Dadgar Corporation, Limited", "on directory: %v", directory)
require.Contains(t, leafCert.Subject.OrganizationalUnit, "IT Security", "on directory: %v", directory)
requireSignedByAtPath(t, client, leafCert, issuerPath)
}
}
func TestACMESubjectFieldsAndExtensionsIgnored(t *testing.T) {
t.Parallel()
// This creates two issuers for us (root-ca, int-ca) and two
// roles (test-role, acme) that we can use with various directory
// configurations.
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
// Setup DNS for validations.
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
dns := dnstest.SetupResolver(t, "dadgarcorp.com")
defer dns.Cleanup()
_, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{
"dns_resolver": dns.GetLocalAddr(),
})
require.NoError(t, err, "failed to specify dns resolver")
// Use the default sign-verbatim policy and ensure OU does not get set.
directory := "/v1/pki/acme/"
domains := []string{"no-ou.dadgarcorp.com"}
acmeClient := getAcmeClientForCluster(t, cluster, directory, nil)
cr := &x509.CertificateRequest{
Subject: pkix.Name{CommonName: domains[0], OrganizationalUnit: []string{"DadgarCorp IT"}},
DNSNames: domains,
}
cert := doACMEForCSRWithDNS(t, dns, acmeClient, domains, cr)
t.Logf("Got certificate: %v", cert)
require.Empty(t, cert.Subject.OrganizationalUnit)
// Use the default sign-verbatim policy and ensure extension does not get set.
domains = []string{"no-ext.dadgarcorp.com"}
extension, err := certutil.CreateDeltaCRLIndicatorExt(12345)
require.NoError(t, err)
cr = &x509.CertificateRequest{
Subject: pkix.Name{CommonName: domains[0]},
DNSNames: domains,
ExtraExtensions: []pkix.Extension{extension},
}
cert = doACMEForCSRWithDNS(t, dns, acmeClient, domains, cr)
t.Logf("Got certificate: %v", cert)
for _, ext := range cert.Extensions {
require.False(t, ext.Id.Equal(certutil.DeltaCRLIndicatorOID))
}
require.NotEmpty(t, cert.Extensions)
}
// TestAcmeWithCsrIncludingBasicConstraintExtension verify that we error out for a CSR that is requesting a
// certificate with the IsCA set to true, false is okay, within the basic constraints extension and that no matter what
// the extension is not present on the returned certificate.
func TestAcmeWithCsrIncludingBasicConstraintExtension(t *testing.T) {
t.Parallel()
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
baseAcmeURL := "/v1/pki/acme/"
accountKey, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err, "failed creating rsa key")
acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey)
// Create new account
t.Logf("Testing register on %s", baseAcmeURL)
acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true })
require.NoError(t, err, "failed registering account")
// Create an order
t.Logf("Testing Authorize Order on %s", baseAcmeURL)
identifiers := []string{"*.localdomain"}
order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{
{Type: "dns", Value: identifiers[0]},
})
require.NoError(t, err, "failed creating order")
// HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test.
markAuthorizationSuccess(t, client, acmeClient, acct, order)
// Build a CSR with IsCA set to true, making sure we reject it
extension, err := certutil.CreateBasicConstraintExtension(true, -1)
require.NoError(t, err, "failed generating basic constraint extension")
isCATrueCSR := &x509.CertificateRequest{
DNSNames: []string{identifiers[0]},
ExtraExtensions: []pkix.Extension{extension},
}
csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err, "failed generated key for CSR")
csr, err := x509.CreateCertificateRequest(rand.Reader, isCATrueCSR, csrKey)
require.NoError(t, err, "failed generating csr")
_, _, err = acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true)
require.Error(t, err, "order finalization should have failed with IsCA set to true")
extension, err = certutil.CreateBasicConstraintExtension(false, -1)
require.NoError(t, err, "failed generating basic constraint extension")
isCAFalseCSR := &x509.CertificateRequest{
DNSNames: []string{identifiers[0]},
Extensions: []pkix.Extension{extension},
}
csr, err = x509.CreateCertificateRequest(rand.Reader, isCAFalseCSR, csrKey)
require.NoError(t, err, "failed generating csr")
certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true)
require.NoError(t, err, "order finalization should have failed with IsCA set to false")
require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle")
acmeCert, err := x509.ParseCertificate(certs[0])
require.NoError(t, err, "failed parsing acme cert")
// Make sure we don't have any basic constraint extension within the returned cert
for _, ext := range acmeCert.Extensions {
if ext.Id.Equal(certutil.ExtensionBasicConstraintsOID) {
// We shouldn't have this extension in our cert
t.Fatalf("acme csr contained a basic constraints extension")
}
}
}
func markAuthorizationSuccess(t *testing.T, client *api.Client, acmeClient *acme.Client, acct *acme.Account, order *acme.Order) {
testCtx := context.Background()
pkiMount := findStorageMountUuid(t, client, "pki")
// Delete any and all challenge validation entries to stop the engine from overwriting our hack here
i := 0
for {
deleteCvEntries(t, client, pkiMount)
accountId := acct.URI[strings.LastIndex(acct.URI, "/"):]
for _, authURI := range order.AuthzURLs {
authId := authURI[strings.LastIndex(authURI, "/"):]
// sys/raw does not work with namespaces
baseClient := client.WithNamespace("")
values, err := baseClient.Logical().ListWithContext(testCtx, "sys/raw/logical/")
require.NoError(t, err)
require.True(t, true, "values: %v", values)
rawPath := path.Join("sys/raw/logical/", pkiMount, getAuthorizationPath(accountId, authId))
resp, err := baseClient.Logical().ReadWithContext(testCtx, rawPath)
require.NoError(t, err, "failed looking up authorization storage")
require.NotNil(t, resp, "sys raw response was nil")
require.NotEmpty(t, resp.Data["value"], "no value field in sys raw response")
var authz ACMEAuthorization
err = jsonutil.DecodeJSON([]byte(resp.Data["value"].(string)), &authz)
require.NoError(t, err, "error decoding authorization: %w", err)
authz.Status = ACMEAuthorizationValid
for _, challenge := range authz.Challenges {
challenge.Status = ACMEChallengeValid
}
encodeJSON, err := jsonutil.EncodeJSON(authz)
require.NoError(t, err, "failed encoding authz json")
_, err = baseClient.Logical().WriteWithContext(testCtx, rawPath, map[string]interface{}{
"value": base64.StdEncoding.EncodeToString(encodeJSON),
"encoding": "base64",
})
require.NoError(t, err, "failed writing authorization storage")
}
// Give some time
time.Sleep(200 * time.Millisecond)
// Check to see if we have fixed up the status and no new entries have appeared.
if !deleteCvEntries(t, client, pkiMount) {
// No entries found
// Look to see if we raced against the engine
orderLookup, err := acmeClient.GetOrder(testCtx, order.URI)
require.NoError(t, err, "failed loading order status after manually ")
if orderLookup.Status == string(ACMEOrderReady) {
// Our order seems to be in the proper status, should be safe-ish to go ahead now
break
} else {
t.Logf("order status was not ready, retrying")
}
} else {
t.Logf("new challenge entries appeared after deletion, retrying")
}
if i > 5 {
t.Fatalf("We are constantly deleting cv entries or order status is not changing, something is wrong")
}
i++
}
}
func deleteCvEntries(t *testing.T, client *api.Client, pkiMount string) bool {
testCtx := context.Background()
baseClient := client.WithNamespace("")
cvPath := path.Join("sys/raw/logical/", pkiMount, acmeValidationPrefix)
resp, err := baseClient.Logical().ListWithContext(testCtx, cvPath)
require.NoError(t, err, "failed listing cv path items")
deletedEntries := false
if resp != nil {
cvEntries := resp.Data["keys"].([]interface{})
for _, cvEntry := range cvEntries {
cvEntryPath := path.Join(cvPath, cvEntry.(string))
_, err = baseClient.Logical().DeleteWithContext(testCtx, cvEntryPath)
require.NoError(t, err, "failed to delete cv entry")
deletedEntries = true
}
}
return deletedEntries
}
func setupAcmeBackend(t *testing.T) (*vault.TestCluster, *api.Client, string) {
cluster, client := setupTestPkiCluster(t)
return setupAcmeBackendOnClusterAtPath(t, cluster, client, "pki")
}
func setupAcmeBackendOnClusterAtPath(t *testing.T, cluster *vault.TestCluster, client *api.Client, mount string) (*vault.TestCluster, *api.Client, string) {
mount = strings.Trim(mount, "/")
// Setting templated AIAs should succeed.
pathConfig := client.Address() + "/v1/" + mount
namespace := ""
mountName := mount
if mount != "pki" {
if strings.Contains(mount, "/") && constants.IsEnterprise {
ns_pieces := strings.Split(mount, "/")
c := len(ns_pieces)
// mount is c-1
ns_name := ns_pieces[c-2]
if len(ns_pieces) > 2 {
// Parent's namespaces
parent := strings.Join(ns_pieces[0:c-2], "/")
_, err := client.WithNamespace(parent).Logical().Write("/sys/namespaces/"+ns_name, nil)
require.NoError(t, err, "failed to create nested namespaces "+parent+" -> "+ns_name)
} else {
_, err := client.Logical().Write("/sys/namespaces/"+ns_name, nil)
require.NoError(t, err, "failed to create nested namespace "+ns_name)
}
namespace = strings.Join(ns_pieces[0:c-1], "/")
mountName = ns_pieces[c-1]
}
err := client.WithNamespace(namespace).Sys().Mount(mountName, &api.MountInput{
Type: "pki",
Config: api.MountConfigInput{
DefaultLeaseTTL: "3000h",
MaxLeaseTTL: "600000h",
},
})
require.NoError(t, err, "failed to mount new PKI instance at "+mount)
}
err := client.Sys().TuneMountWithContext(ctx, mount, api.MountConfigInput{
DefaultLeaseTTL: "3000h",
MaxLeaseTTL: "600000h",
})
require.NoError(t, err, "failed updating mount lease times "+mount)
_, err = client.Logical().WriteWithContext(context.Background(), mount+"/config/cluster", map[string]interface{}{
"path": pathConfig,
"aia_path": "http://localhost:8200/cdn/" + mount,
})
require.NoError(t, err)
_, err = client.Logical().WriteWithContext(context.Background(), mount+"/config/acme", map[string]interface{}{
"enabled": true,
"eab_policy": "not-required",
})
require.NoError(t, err)
// Allow certain headers to pass through for ACME support
_, err = client.WithNamespace(namespace).Logical().WriteWithContext(context.Background(), "sys/mounts/"+mountName+"/tune", map[string]interface{}{
"allowed_response_headers": []string{"Last-Modified", "Replay-Nonce", "Link", "Location"},
"max_lease_ttl": "920000h",
})
require.NoError(t, err, "failed tuning mount response headers")
resp, err := client.Logical().WriteWithContext(context.Background(), mount+"/issuers/generate/root/internal",
map[string]interface{}{
"issuer_name": "root-ca",
"key_name": "root-key",
"key_type": "ec",
"common_name": "Test Root R1 " + mount,
"ttl": "7200h",
"max_ttl": "920000h",
})
require.NoError(t, err, "failed creating root CA")
resp, err = client.Logical().WriteWithContext(context.Background(), mount+"/issuers/generate/intermediate/internal",
map[string]interface{}{
"key_name": "int-key",
"key_type": "ec",
"common_name": "Test Int X1 " + mount,
})
require.NoError(t, err, "failed creating intermediary CSR")
intermediateCSR := resp.Data["csr"].(string)
// Sign the intermediate CSR using /pki
resp, err = client.Logical().Write(mount+"/issuer/root-ca/sign-intermediate", map[string]interface{}{
"csr": intermediateCSR,
"ttl": "7100h",
"max_ttl": "910000h",
})
require.NoError(t, err, "failed signing intermediary CSR")
intermediateCertPEM := resp.Data["certificate"].(string)
// Configure the intermediate cert as the CA in /pki2
resp, err = client.Logical().Write(mount+"/issuers/import/cert", map[string]interface{}{
"pem_bundle": intermediateCertPEM,
})
require.NoError(t, err, "failed importing intermediary cert")
importedIssuersRaw := resp.Data["imported_issuers"].([]interface{})
require.Len(t, importedIssuersRaw, 1)
intCaUuid := importedIssuersRaw[0].(string)
_, err = client.Logical().Write(mount+"/issuer/"+intCaUuid, map[string]interface{}{
"issuer_name": "int-ca",
})
require.NoError(t, err, "failed updating issuer name")
_, err = client.Logical().Write(mount+"/config/issuers", map[string]interface{}{
"default": "int-ca",
})
require.NoError(t, err, "failed updating default issuer")
_, err = client.Logical().Write(mount+"/roles/test-role", map[string]interface{}{
"ttl": "168h",
"max_ttl": "168h",
"key_type": "any",
"allowed_domains": "localdomain",
"allow_subdomains": "true",
"allow_wildcard_certificates": "true",
})
require.NoError(t, err, "failed creating role test-role")
_, err = client.Logical().Write(mount+"/roles/acme", map[string]interface{}{
"ttl": "3650h",
"max_ttl": "7200h",
"key_type": "any",
})
require.NoError(t, err, "failed creating role acme")
return cluster, client, pathConfig
}
func testAcmeCertSignedByCa(t *testing.T, client *api.Client, derCerts [][]byte, issuerRef string) {
t.Helper()
require.NotEmpty(t, derCerts)
acmeCert, err := x509.ParseCertificate(derCerts[0])
require.NoError(t, err, "failed parsing acme cert bytes")
resp, err := client.Logical().ReadWithContext(context.Background(), "pki/issuer/"+issuerRef)
require.NoError(t, err, "failed reading issuer with name %s", issuerRef)
issuerCert := parseCert(t, resp.Data["certificate"].(string))
issuerChainRaw := resp.Data["ca_chain"].([]interface{})
err = acmeCert.CheckSignatureFrom(issuerCert)
require.NoError(t, err, "issuer %s did not sign provided cert", issuerRef)
expectedCerts := [][]byte{derCerts[0]}
for _, entry := range issuerChainRaw {
chainCert := parseCert(t, entry.(string))
expectedCerts = append(expectedCerts, chainCert.Raw)
}
if diffs := deep.Equal(expectedCerts, derCerts); diffs != nil {
t.Fatalf("diffs were found between the acme chain returned and the expected value: \n%v", diffs)
}
}
// TestAcmeValidationError make sure that we properly return errors on validation errors.
func TestAcmeValidationError(t *testing.T) {
t.Parallel()
cluster, _, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
baseAcmeURL := "/v1/pki/acme/"
accountKey, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err, "failed creating rsa key")
acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey)
// Create new account
t.Logf("Testing register on %s", baseAcmeURL)
_, err = acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true })
require.NoError(t, err, "failed registering account")
// Create an order
t.Logf("Testing Authorize Order on %s", baseAcmeURL)
identifiers := []string{"www.dadgarcorp.com"}
order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{
{Type: "dns", Value: identifiers[0]},
})
require.NoError(t, err, "failed creating order")
// Load authorizations
var authorizations []*acme.Authorization
for _, authUrl := range order.AuthzURLs {
auth, err := acmeClient.GetAuthorization(testCtx, authUrl)
require.NoError(t, err, "failed fetching authorization: %s", authUrl)
authorizations = append(authorizations, auth)
}
require.Len(t, authorizations, 1, "expected a certain number of authorizations")
require.Len(t, authorizations[0].Challenges, 3, "expected a certain number of challenges associated with authorization")
acceptedAuth, err := acmeClient.Accept(testCtx, authorizations[0].Challenges[0])
require.NoError(t, err, "Should have been allowed to accept challenge 1")
require.Equal(t, string(ACMEChallengeProcessing), acceptedAuth.Status)
_, err = acmeClient.Accept(testCtx, authorizations[0].Challenges[1])
require.Error(t, err, "Should have been prevented to accept challenge 2")
// Make sure our challenge returns errors
testhelpers.RetryUntil(t, 30*time.Second, func() error {
challenge, err := acmeClient.GetChallenge(testCtx, authorizations[0].Challenges[0].URI)
if err != nil {
return err
}
if challenge.Error == nil {
return fmt.Errorf("no error set in challenge yet")
}
acmeError, ok := challenge.Error.(*acme.Error)
if !ok {
return fmt.Errorf("unexpected error back: %v", err)
}
if acmeError.ProblemType != "urn:ietf:params:acme:error:incorrectResponse" {
return fmt.Errorf("unexpected ACME error back: %v", acmeError)
}
return nil
})
// Make sure our challenge,auth and order status change.
// This takes a little too long to run in CI properly, we need the ability to influence
// how long the validations take before CI can go wild on this.
if os.Getenv("CI") == "" {
testhelpers.RetryUntil(t, 10*time.Minute, func() error {
challenge, err := acmeClient.GetChallenge(testCtx, authorizations[0].Challenges[0].URI)
if err != nil {
return fmt.Errorf("failed to load challenge: %w", err)
}
if challenge.Status != string(ACMEChallengeInvalid) {
return fmt.Errorf("challenge state was not changed to invalid: %v", challenge)
}
authz, err := acmeClient.GetAuthorization(testCtx, authorizations[0].URI)
if err != nil {
return fmt.Errorf("failed to load authorization: %w", err)
}
if authz.Status != string(ACMEAuthorizationInvalid) {
return fmt.Errorf("authz state was not changed to invalid: %v", authz)
}
myOrder, err := acmeClient.GetOrder(testCtx, order.URI)
if err != nil {
return fmt.Errorf("failed to load order: %w", err)
}
if myOrder.Status != string(ACMEOrderInvalid) {
return fmt.Errorf("order state was not changed to invalid: %v", order)
}
return nil
})
}
}
// TestAcmeRevocationAcrossAccounts makes sure that we can revoke certificates using different accounts if
// we have another ACME account or not but access to the certificate key. Also verifies we can't revoke
// certificates across account keys.
func TestAcmeRevocationAcrossAccounts(t *testing.T) {
t.Parallel()
cluster, vaultClient, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
baseAcmeURL := "/v1/pki/acme/"
accountKey1, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err, "failed creating rsa key")
acmeClient1 := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey1)
leafKey, certs := doACMEWorkflow(t, vaultClient, acmeClient1)
acmeCert, err := x509.ParseCertificate(certs[0])
require.NoError(t, err, "failed parsing acme cert bytes")
// Make sure our cert is not revoked
certResp, err := vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert))
require.NoError(t, err, "failed to read certificate status")
require.NotNil(t, certResp, "certificate status response was nil")
revocationTime := certResp.Data["revocation_time"].(json.Number)
revocationTimeInt, err := revocationTime.Int64()
require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime)
require.Equal(t, revocationTimeInt, int64(0),
"revocation time was not 0, cert was already revoked: %v", revocationTimeInt)
// Test that we can't revoke the certificate with another account's key
accountKey2, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
require.NoError(t, err, "failed creating rsa key")
acmeClient2 := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey2)
_, err = acmeClient2.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true })
require.NoError(t, err, "failed registering second account")
err = acmeClient2.RevokeCert(ctx, nil, certs[0], acme.CRLReasonUnspecified)
require.Error(t, err, "should have failed revoking the certificate with a different account")
// Make sure our cert is not revoked
certResp, err = vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert))
require.NoError(t, err, "failed to read certificate status")
require.NotNil(t, certResp, "certificate status response was nil")
revocationTime = certResp.Data["revocation_time"].(json.Number)
revocationTimeInt, err = revocationTime.Int64()
require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime)
require.Equal(t, revocationTimeInt, int64(0),
"revocation time was not 0, cert was already revoked: %v", revocationTimeInt)
// But we can revoke if we sign the request with the certificate's key and a different account
err = acmeClient2.RevokeCert(ctx, leafKey, certs[0], acme.CRLReasonUnspecified)
require.NoError(t, err, "should have been allowed to revoke certificate with csr key across accounts")
// Make sure our cert is now revoked
certResp, err = vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert))
require.NoError(t, err, "failed to read certificate status")
require.NotNil(t, certResp, "certificate status response was nil")
revocationTime = certResp.Data["revocation_time"].(json.Number)
revocationTimeInt, err = revocationTime.Int64()
require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime)
require.Greater(t, revocationTimeInt, int64(0),
"revocation time was not greater than 0, cert was not revoked: %v", revocationTimeInt)
// Make sure we can revoke a certificate without a registered ACME account
leafKey2, certs2 := doACMEWorkflow(t, vaultClient, acmeClient1)
acmeClient3 := getAcmeClientForCluster(t, cluster, baseAcmeURL, nil)
err = acmeClient3.RevokeCert(ctx, leafKey2, certs2[0], acme.CRLReasonUnspecified)
require.NoError(t, err, "should be allowed to revoke a cert with no ACME account but with cert key")
// Make sure our cert is now revoked
acmeCert2, err := x509.ParseCertificate(certs2[0])
require.NoError(t, err, "failed parsing acme cert 2 bytes")
certResp, err = vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert2))
require.NoError(t, err, "failed to read certificate status")
require.NotNil(t, certResp, "certificate status response was nil")
revocationTime = certResp.Data["revocation_time"].(json.Number)
revocationTimeInt, err = revocationTime.Int64()
require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime)
require.Greater(t, revocationTimeInt, int64(0),
"revocation time was not greater than 0, cert was not revoked: %v", revocationTimeInt)
}
func doACMEWorkflow(t *testing.T, vaultClient *api.Client, acmeClient *acme.Client) (*ecdsa.PrivateKey, [][]byte) {
testCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Create new account
acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true })
if err != nil {
if strings.Contains(err.Error(), "acme: account already exists") {
acct, err = acmeClient.GetReg(testCtx, "")
require.NoError(t, err, "failed looking up account after account exists error?")
} else {
require.NoError(t, err, "failed registering account")
}
}
// Create an order
identifiers := []string{"*.localdomain"}
order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{
{Type: "dns", Value: identifiers[0]},
})
require.NoError(t, err, "failed creating order")
// HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow
// test.
markAuthorizationSuccess(t, vaultClient, acmeClient, acct, order)
// Build a proper CSR, with the correct name and signed with a different key works.
goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}}
csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err, "failed generated key for CSR")
csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey)
require.NoError(t, err, "failed generating csr")
certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true)
require.NoError(t, err, "failed finalizing order")
require.Len(t, certs, 3, "expected full acme chain")
return csrKey, certs
}
func setupTestPkiCluster(t *testing.T) (*vault.TestCluster, *api.Client) {
coreConfig := &vault.CoreConfig{
LogicalBackends: map[string]logical.Factory{
"pki": Factory,
},
EnableRaw: true,
}
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
})
cluster.Start()
client := cluster.Cores[0].Client
mountPKIEndpoint(t, client, "pki")
return cluster, client
}
func getAcmeClientForCluster(t *testing.T, cluster *vault.TestCluster, baseUrl string, key crypto.Signer) *acme.Client {
coreAddr := cluster.Cores[0].Listeners[0].Address
tlsConfig := cluster.Cores[0].TLSConfig()
transport := cleanhttp.DefaultPooledTransport()
transport.TLSClientConfig = tlsConfig.Clone()
if err := http2.ConfigureTransport(transport); err != nil {
t.Fatal(err)
}
httpClient := &http.Client{Transport: transport}
if baseUrl[0] == '/' {
baseUrl = baseUrl[1:]
}
if !strings.HasPrefix(baseUrl, "v1/") {
baseUrl = "v1/" + baseUrl
}
if !strings.HasSuffix(baseUrl, "/") {
baseUrl = baseUrl + "/"
}
baseAcmeURL := fmt.Sprintf("https://%s/%s", coreAddr.String(), baseUrl)
return &acme.Client{
Key: key,
HTTPClient: httpClient,
DirectoryURL: baseAcmeURL + "directory",
}
}
func getEABKey(t *testing.T, client *api.Client, baseUrl string) (string, []byte) {
t.Helper()
resp, err := client.Logical().WriteWithContext(ctx, path.Join("pki/", baseUrl, "/new-eab"), map[string]interface{}{})
require.NoError(t, err, "failed getting eab key")
require.NotNil(t, resp, "eab key returned nil response")
require.NotEmpty(t, resp.Data["id"], "eab key response missing id field")
kid := resp.Data["id"].(string)
require.NotEmpty(t, resp.Data["key"], "eab key response missing private_key field")
base64Key := resp.Data["key"].(string)
require.True(t, strings.HasPrefix(base64Key, "vault-eab-0-"), "%s should have had a prefix of vault-eab-0-", base64Key)
privateKeyBytes, err := base64.RawURLEncoding.DecodeString(base64Key)
require.NoError(t, err, "failed base 64 decoding eab key response")
require.Equal(t, "hs", resp.Data["key_type"], "eab key_type field mis-match")
require.Equal(t, baseUrl+"directory", resp.Data["acme_directory"], "eab acme_directory field mis-match")
require.NotEmpty(t, resp.Data["created_on"], "empty created_on field")
_, err = time.Parse(time.RFC3339, resp.Data["created_on"].(string))
require.NoError(t, err, "failed parsing eab created_on field")
return kid, privateKeyBytes
}
func TestACMEClientRequestLimits(t *testing.T) {
cluster, client, _ := setupAcmeBackend(t)
defer cluster.Cleanup()
cases := []struct {
name string
authorizations []acme.AuthzID
requestCSR x509.CertificateRequest
valid bool
}{
{
"validate-only-cn",
[]acme.AuthzID{
{"dns", "localhost"},
},
x509.CertificateRequest{
Subject: pkix.Name{CommonName: "localhost"},
},
true,
},
{
"validate-only-san",
[]acme.AuthzID{
{"dns", "localhost"},
},
x509.CertificateRequest{
DNSNames: []string{"localhost"},
},
true,
},
{
"validate-only-ip-address",
[]acme.AuthzID{
{"ip", "127.0.0.1"},
},
x509.CertificateRequest{
IPAddresses: []net.IP{{127, 0, 0, 1}},
},
true,
},
}
testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
acmeConfig := map[string]interface{}{
"enabled": true,
"allowed_issuers": "*",
"allowed_roles": "*",
"default_directory_policy": "sign-verbatim",
"dns_resolver": "",
"eab_policy_name": "",
}
_, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", acmeConfig)
require.NoError(t, err, "error configuring acme")
for _, tc := range cases {
// First Create Our Client
accountKey, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err, "failed creating rsa key")
acmeClient := getAcmeClientForCluster(t, cluster, "/v1/pki/acme/", accountKey)
discovery, err := acmeClient.Discover(testCtx)
require.NoError(t, err, "failed acme discovery call")
t.Logf("%v", discovery)
acct, err := acmeClient.Register(testCtx, &acme.Account{
Contact: []string{"mailto:[email protected]"},
}, func(tosURL string) bool { return true })
require.NoError(t, err, "failed registering account")
require.Equal(t, acme.StatusValid, acct.Status)
require.Contains(t, acct.Contact, "mailto:[email protected]")
require.Len(t, acct.Contact, 1)
// Create an order
t.Logf("Testing Authorize Order on %s", "pki/acme")
identifiers := make([]string, len(tc.authorizations))
for index, auth := range tc.authorizations {
identifiers[index] = auth.Value
}
createOrder, err := acmeClient.AuthorizeOrder(testCtx, tc.authorizations)
require.NoError(t, err, "failed creating order")
require.Equal(t, acme.StatusPending, createOrder.Status)
require.Empty(t, createOrder.CertURL)
require.Equal(t, createOrder.URI+"/finalize", createOrder.FinalizeURL)
require.Len(t, createOrder.AuthzURLs, len(tc.authorizations), "expected same number of authzurls as identifiers")
// HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow
// test.
markAuthorizationSuccess(t, client, acmeClient, acct, createOrder)
// Submit the CSR
csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err, "failed generated key for CSR")
csr, err := x509.CreateCertificateRequest(rand.Reader, &tc.requestCSR, csrKey)
require.NoError(t, err, "failed generating csr")
certs, _, err := acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csr, true)
if tc.valid {
require.NoError(t, err, "failed finalizing order")
// Validate we get a signed cert back
testAcmeCertSignedByCa(t, client, certs, "int-ca")
} else {
require.Error(t, err, "Not a valid CSR, should err")
}
}
}
| builtin/logical/pki/path_acme_test.go | 0 | https://github.com/hashicorp/vault/commit/375c2be624cfd9b19b2f21cbb62a3e453d605f58 | [
0.0007762463064864278,
0.00017751003906596452,
0.00016092161240521818,
0.00016993776080198586,
0.00005319343108567409
] |
{
"id": 8,
"code_window": [
" GOPRIVATE: github.com/hashicorp/*\n",
" TIMEOUT_IN_MINUTES: ${{ inputs.timeout-minutes }}\n",
" steps:\n",
" - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3\n",
" - uses: ./.github/actions/set-up-go\n",
" with:\n",
" github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}\n",
" - name: Authenticate to Vault\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" with:\n",
" ref: ${{ inputs.checkout-ref }}\n"
],
"file_path": ".github/workflows/test-go.yml",
"type": "add",
"edit_start_line_idx": 216
} | /**
* Copyright (c) HashiCorp, Inc.
* SPDX-License-Identifier: MPL-2.0
*/
import type { WithFormFieldsAndValidationsModel } from 'vault/app-types';
import type { FormField } from 'vault/app-types';
import CapabilitiesModel from '../capabilities';
import { LdapDynamicRoleCredentials, LdapStaticRoleCredentials } from 'ldap/routes/roles/role/credentials';
export default interface LdapRoleModel extends WithFormFieldsAndValidationsModel {
type: string;
backend: string;
name: string;
dn: string;
username: string;
rotation_period: string;
default_ttl: string;
max_ttl: string;
username_template: string;
creation_ldif: string;
rollback_ldif: string;
get isStatic(): string;
get isDynamic(): string;
get fieldsForType(): Array<string>;
get displayFields(): Array<FormField>;
get roleUri(): string;
get credsUri(): string;
rolePath: CapabilitiesModel;
credsPath: CapabilitiesModel;
staticRotateCredsPath: CapabilitiesModel;
get canCreate(): boolean;
get canDelete(): boolean;
get canEdit(): boolean;
get canRead(): boolean;
get canList(): boolean;
get canReadCreds(): boolean;
get canRotateStaticCreds(): boolean;
fetchCredentials(): Promise<LdapDynamicRoleCredentials | LdapStaticRoleCredentials>;
rotateStaticPassword(): Promise<void>;
}
| ui/types/vault/models/ldap/role.d.ts | 0 | https://github.com/hashicorp/vault/commit/375c2be624cfd9b19b2f21cbb62a3e453d605f58 | [
0.00017686095088720322,
0.00017043812840711325,
0.0001655581727391109,
0.00016966668772511184,
0.000004117666321690194
] |
{
"id": 8,
"code_window": [
" GOPRIVATE: github.com/hashicorp/*\n",
" TIMEOUT_IN_MINUTES: ${{ inputs.timeout-minutes }}\n",
" steps:\n",
" - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3\n",
" - uses: ./.github/actions/set-up-go\n",
" with:\n",
" github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}\n",
" - name: Authenticate to Vault\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" with:\n",
" ref: ${{ inputs.checkout-ref }}\n"
],
"file_path": ".github/workflows/test-go.yml",
"type": "add",
"edit_start_line_idx": 216
} | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/cloudsqlconn v1.4.3 h1:/WYFbB1NtMtoMxCbqpzzTFPDkxxlLTPme390KEGaEPc=
cloud.google.com/go/cloudsqlconn v1.4.3/go.mod h1:QL3tuStVOO70txb3rs4G8j5uMfo5ztZii8K3oGD3VYA=
cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg=
cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-ntlmssp v0.0.0-20220621081337-cb9428e4ac1e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aws/aws-sdk-go v1.36.29/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA=
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=
github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/containerd/containerd v1.7.0 h1:G/ZQr3gMZs6ZT0qPUZ15znx5QSdQdASW11nXTLTM2Pg=
github.com/containerd/containerd v1.7.0/go.mod h1:QfR7Efgb/6X2BDpTPJRvPTYDE9rsF0FsXX9J8sIs/sc=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v24.0.5+incompatible h1:WmgcE4fxyI6EEXxBRxsHnZXrO1pQ3smi0k/jho4HLeY=
github.com/docker/docker v24.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A=
github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc=
github.com/go-ldap/ldap/v3 v3.4.4 h1:qPjipEpt+qDa6SI/h1fzuGWoRUY+qqQ9sOZq67/PYUs=
github.com/go-ldap/ldap/v3 v3.4.4/go.mod h1:fe1MsuN5eJJ1FeLT/LEBVdWfNWKh459R7aXgXtJC+aI=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA=
github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
github.com/google/tink/go v1.6.1 h1:t7JHqO8Ath2w2ig5vjwQYJzhGEZymedQc90lQXUBa4I=
github.com/google/tink/go v1.6.1/go.mod h1:IGW53kTgag+st5yPhKKwJ6u2l+SSp5/v9XF7spovjlY=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM=
github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/cap/ldap v0.0.0-20230914221201-c4eecc7e31f7 h1:jgVdtp5YMn++PxnYhAFfrURfLf+nlqzBeddbvRG+tTg=
github.com/hashicorp/cap/ldap v0.0.0-20230914221201-c4eecc7e31f7/go.mod h1:q+c9XV1VqloZFZMu+zdvfb0cm7UrvKbvtmTF5wX5Q9o=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 h1:pSjQfW3vPtrOTcasTUKgCTQT7OGPPTTMVRrOfU6FJD8=
github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk=
github.com/hashicorp/go-kms-wrapping/v2 v2.0.8 h1:9Q2lu1YbbmiAgvYZ7Pr31RdlVonUpX+mmDL7Z7qTA2U=
github.com/hashicorp/go-kms-wrapping/v2 v2.0.8/go.mod h1:qTCjxGig/kjuj3hk1z8pOUrzbse/GxB1tGfbrq8tGJg=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y=
github.com/hashicorp/go-plugin v1.5.2/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ=
github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 h1:ET4pqyjiGmY09R5y+rSd70J2w45CtbWDNvGqWp/R3Ng=
github.com/hashicorp/go-secure-stdlib/base62 v0.1.2/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw=
github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmkiSY5xuju57czJ/IJQ=
github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I=
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs=
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
github.com/hashicorp/go-secure-stdlib/password v0.1.1 h1:6JzmBqXprakgFEHwBgdchsjaA9x3GyjdI568bXKxa60=
github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo=
github.com/hashicorp/go-secure-stdlib/plugincontainer v0.2.2 h1:lNWQ5KVsLmzjvN11LYqaTXtMrCP7CyxfmTeR3h0l3s8=
github.com/hashicorp/go-secure-stdlib/plugincontainer v0.2.2/go.mod h1:7xQt0+IfRmzYBLpFx+4MYfLpBdd1PT1VatGKRswf7xE=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 h1:phcbL8urUzF/kxA/Oj6awENaRwfWsjP59GW7u2qlDyY=
github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs=
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM=
github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM=
github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q=
github.com/hashicorp/vault/api v1.9.1 h1:LtY/I16+5jVGU8rufyyAkwopgq/HpUnxFBg+QLOAV38=
github.com/hashicorp/vault/api v1.9.1/go.mod h1:78kktNcQYbBGSrOjQfHjXN32OhhxXnbYl3zxpd2uPUs=
github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE=
github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
github.com/jackc/pgconn v1.14.0 h1:vrbA9Ud87g6JdFWkHTJXppVce58qPIdP7N8y0Ml/A7Q=
github.com/jackc/pgconn v1.14.0/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E=
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c=
github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc=
github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
github.com/jackc/pgproto3/v2 v2.3.2 h1:7eY55bdBeCz1F2fTzSz69QC+pG46jYq9/jtSPiJ5nn0=
github.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=
github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw=
github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
github.com/jackc/pgx/v4 v4.18.1 h1:YP7G1KABtKpB5IHrO9vYwSrCOhs7p3uqhvhhQBptya0=
github.com/jackc/pgx/v4 v4.18.1/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE=
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c=
github.com/jimlambrt/gldap v0.1.4 h1:PoB5u4ND0E+6W99JtQJvcjGFw+iKi3Gx3M60oOJBOqE=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8=
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/microsoft/go-mssqldb v1.5.0 h1:CgENxkwtOBNj3Jg6T1X209y2blCfTTcwuOlznd2k9fk=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo=
github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8=
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
github.com/opencontainers/runc v1.1.6 h1:XbhB8IfG/EsnhNvZtNdLB0GBw92GYEFvKlhaJk9jUgA=
github.com/opencontainers/runc v1.1.6/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.134.0 h1:ktL4Goua+UBgoP1eL1/60LwZJqa1sIzkLmvoR3hR6Gw=
google.golang.org/api v0.134.0/go.mod h1:sjRL3UnjTx5UqNQS9EWr9N8p7xbHpy1k0XGRLCf3Spk=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e h1:xIXmWJ303kJCuogpj0bHq+dcjcZHU+XFyc1I0Yl9cRg=
google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 h1:XVeBY8d/FaK4848myy41HBqnDwvxeV3zMZhwN1TvAMU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5 h1:eSaPbMR4T7WfH9FvABk36NBMacoTUKdWCvV0dx+KfOg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5/go.mod h1:zBEcrKX2ZOcEkHWxBPAIvYUWOKKMIhYcmNiUIu2ji3I=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
| sdk/go.sum | 0 | https://github.com/hashicorp/vault/commit/375c2be624cfd9b19b2f21cbb62a3e453d605f58 | [
0.00503372261300683,
0.0007637011585757136,
0.00016278574184980243,
0.0003211698494851589,
0.0010117888450622559
] |
{
"id": 0,
"code_window": [
"\tif len(config.HealthzBindAddress) > 0 {\n",
"\t\ts.HealthzServer = healthcheck.NewProxierHealthServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration, s.Recorder, s.NodeRef)\n",
"\t}\n",
"\n",
"\ts.Proxier, err = s.createProxier(config)\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\terr = s.platformSetup()\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n",
"\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 569
} | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to configure and run a
// Kubernetes app process.
package app
import (
goflag "flag"
"fmt"
"net"
"net/http"
"os"
"strings"
"time"
utilnode "k8s.io/kubernetes/pkg/util/node"
"github.com/fsnotify/fsnotify"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/server/healthz"
"k8s.io/apiserver/pkg/server/mux"
"k8s.io/apiserver/pkg/server/routes"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/tools/events"
cliflag "k8s.io/component-base/cli/flag"
componentbaseconfig "k8s.io/component-base/config"
"k8s.io/component-base/configz"
"k8s.io/component-base/logs"
logsapi "k8s.io/component-base/logs/api/v1"
"k8s.io/component-base/metrics"
metricsfeatures "k8s.io/component-base/metrics/features"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/component-base/metrics/prometheus/slis"
"k8s.io/component-base/version"
"k8s.io/component-base/version/verflag"
nodeutil "k8s.io/component-helpers/node/util"
"k8s.io/klog/v2"
"k8s.io/kube-proxy/config/v1alpha1"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/cluster/ports"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/proxy/apis"
kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config"
proxyconfigscheme "k8s.io/kubernetes/pkg/proxy/apis/config/scheme"
kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1"
"k8s.io/kubernetes/pkg/proxy/apis/config/validation"
"k8s.io/kubernetes/pkg/proxy/config"
"k8s.io/kubernetes/pkg/proxy/healthcheck"
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
"k8s.io/kubernetes/pkg/util/filesystem"
utilflag "k8s.io/kubernetes/pkg/util/flag"
"k8s.io/kubernetes/pkg/util/oom"
netutils "k8s.io/utils/net"
"k8s.io/utils/pointer"
)
func init() {
utilruntime.Must(metricsfeatures.AddFeatureGates(utilfeature.DefaultMutableFeatureGate))
logsapi.AddFeatureGates(utilfeature.DefaultMutableFeatureGate)
}
// proxyRun defines the interface to run a specified ProxyServer
type proxyRun interface {
Run() error
}
// Options contains everything necessary to create and run a proxy server.
type Options struct {
// ConfigFile is the location of the proxy server's configuration file.
ConfigFile string
// WriteConfigTo is the path where the default configuration will be written.
WriteConfigTo string
// CleanupAndExit, when true, makes the proxy server clean up iptables and ipvs rules, then exit.
CleanupAndExit bool
// WindowsService should be set to true if kube-proxy is running as a service on Windows.
// Its corresponding flag only gets registered in Windows builds
WindowsService bool
// config is the proxy server's configuration object.
config *kubeproxyconfig.KubeProxyConfiguration
// watcher is used to watch on the update change of ConfigFile
watcher filesystem.FSWatcher
// proxyServer is the interface to run the proxy server
proxyServer proxyRun
// errCh is the channel that errors will be sent
errCh chan error
// The fields below here are placeholders for flags that can't be directly mapped into
// config.KubeProxyConfiguration.
//
// TODO remove these fields once the deprecated flags are removed.
// master is used to override the kubeconfig's URL to the apiserver.
master string
// healthzPort is the port to be used by the healthz server.
healthzPort int32
// metricsPort is the port to be used by the metrics server.
metricsPort int32
// hostnameOverride, if set from the command line flag, takes precedence over the `HostnameOverride` value from the config file
hostnameOverride string
}
// AddFlags adds flags to fs and binds them to options.
func (o *Options) AddFlags(fs *pflag.FlagSet) {
o.addOSFlags(fs)
fs.StringVar(&o.ConfigFile, "config", o.ConfigFile, "The path to the configuration file.")
fs.StringVar(&o.WriteConfigTo, "write-config-to", o.WriteConfigTo, "If set, write the default configuration values to this file and exit.")
fs.StringVar(&o.config.ClientConnection.Kubeconfig, "kubeconfig", o.config.ClientConnection.Kubeconfig, "Path to kubeconfig file with authorization information (the master location can be overridden by the master flag).")
fs.StringVar(&o.config.ClusterCIDR, "cluster-cidr", o.config.ClusterCIDR, "The CIDR range of pods in the cluster. When configured, traffic sent to a Service cluster IP from outside this range will be masqueraded and traffic sent from pods to an external LoadBalancer IP will be directed to the respective cluster IP instead. "+
"For dual-stack clusters, a comma-separated list is accepted with at least one CIDR per IP family (IPv4 and IPv6). "+
"This parameter is ignored if a config file is specified by --config.")
fs.StringVar(&o.config.ClientConnection.ContentType, "kube-api-content-type", o.config.ClientConnection.ContentType, "Content type of requests sent to apiserver.")
fs.StringVar(&o.master, "master", o.master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
fs.StringVar(&o.hostnameOverride, "hostname-override", o.hostnameOverride, "If non-empty, will use this string as identification instead of the actual hostname.")
fs.StringVar(&o.config.IPVS.Scheduler, "ipvs-scheduler", o.config.IPVS.Scheduler, "The ipvs scheduler type when proxy mode is ipvs")
fs.StringVar(&o.config.ShowHiddenMetricsForVersion, "show-hidden-metrics-for-version", o.config.ShowHiddenMetricsForVersion,
"The previous version for which you want to show hidden metrics. "+
"Only the previous minor version is meaningful, other values will not be allowed. "+
"The format is <major>.<minor>, e.g.: '1.16'. "+
"The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, "+
"rather than being surprised when they are permanently removed in the release after that. "+
"This parameter is ignored if a config file is specified by --config.")
fs.StringSliceVar(&o.config.IPVS.ExcludeCIDRs, "ipvs-exclude-cidrs", o.config.IPVS.ExcludeCIDRs, "A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules.")
fs.StringSliceVar(&o.config.NodePortAddresses, "nodeport-addresses", o.config.NodePortAddresses,
"A string slice of values which specify the addresses to use for NodePorts. Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. This parameter is ignored if a config file is specified by --config.")
fs.BoolVar(&o.CleanupAndExit, "cleanup", o.CleanupAndExit, "If true cleanup iptables and ipvs rules and exit.")
fs.Var(&utilflag.IPVar{Val: &o.config.BindAddress}, "bind-address", "The IP address for the proxy server to serve on (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces). This parameter is ignored if a config file is specified by --config.")
fs.Var(&utilflag.IPPortVar{Val: &o.config.HealthzBindAddress}, "healthz-bind-address", "The IP address with port for the health check server to serve on (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. This parameter is ignored if a config file is specified by --config.")
fs.Var(&utilflag.IPPortVar{Val: &o.config.MetricsBindAddress}, "metrics-bind-address", "The IP address with port for the metrics server to serve on (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. This parameter is ignored if a config file is specified by --config.")
fs.BoolVar(&o.config.BindAddressHardFail, "bind-address-hard-fail", o.config.BindAddressHardFail, "If true kube-proxy will treat failure to bind to a port as fatal and exit")
fs.Var(utilflag.PortRangeVar{Val: &o.config.PortRange}, "proxy-port-range", "Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) that may be consumed in order to proxy service traffic. If (unspecified, 0, or 0-0) then ports will be randomly chosen.")
fs.Var(&o.config.Mode, "proxy-mode", "Which proxy mode to use: on Linux this can be 'iptables' (default) or 'ipvs'. On Windows the only supported value is 'kernelspace'."+
"This parameter is ignored if a config file is specified by --config.")
fs.Var(cliflag.NewMapStringBool(&o.config.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features. "+
"Options are:\n"+strings.Join(utilfeature.DefaultFeatureGate.KnownFeatures(), "\n")+"\n"+
"This parameter is ignored if a config file is specified by --config.")
fs.Int32Var(&o.healthzPort, "healthz-port", o.healthzPort, "The port to bind the health check server. Use 0 to disable.")
fs.MarkDeprecated("healthz-port", "This flag is deprecated and will be removed in a future release. Please use --healthz-bind-address instead.")
fs.Int32Var(&o.metricsPort, "metrics-port", o.metricsPort, "The port to bind the metrics server. Use 0 to disable.")
fs.MarkDeprecated("metrics-port", "This flag is deprecated and will be removed in a future release. Please use --metrics-bind-address instead.")
fs.Int32Var(o.config.OOMScoreAdj, "oom-score-adj", pointer.Int32Deref(o.config.OOMScoreAdj, int32(qos.KubeProxyOOMScoreAdj)), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]. This parameter is ignored if a config file is specified by --config.")
fs.Int32Var(o.config.IPTables.MasqueradeBit, "iptables-masquerade-bit", pointer.Int32Deref(o.config.IPTables.MasqueradeBit, 14), "If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31].")
fs.Int32Var(o.config.Conntrack.MaxPerCore, "conntrack-max-per-core", *o.config.Conntrack.MaxPerCore,
"Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min).")
fs.Int32Var(o.config.Conntrack.Min, "conntrack-min", *o.config.Conntrack.Min,
"Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is).")
fs.Int32Var(&o.config.ClientConnection.Burst, "kube-api-burst", o.config.ClientConnection.Burst, "Burst to use while talking with kubernetes apiserver")
fs.DurationVar(&o.config.IPTables.SyncPeriod.Duration, "iptables-sync-period", o.config.IPTables.SyncPeriod.Duration, "The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.")
fs.DurationVar(&o.config.IPTables.MinSyncPeriod.Duration, "iptables-min-sync-period", o.config.IPTables.MinSyncPeriod.Duration, "The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m').")
fs.DurationVar(&o.config.IPVS.SyncPeriod.Duration, "ipvs-sync-period", o.config.IPVS.SyncPeriod.Duration, "The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.")
fs.DurationVar(&o.config.IPVS.MinSyncPeriod.Duration, "ipvs-min-sync-period", o.config.IPVS.MinSyncPeriod.Duration, "The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m').")
fs.DurationVar(&o.config.IPVS.TCPTimeout.Duration, "ipvs-tcp-timeout", o.config.IPVS.TCPTimeout.Duration, "The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').")
fs.DurationVar(&o.config.IPVS.TCPFinTimeout.Duration, "ipvs-tcpfin-timeout", o.config.IPVS.TCPFinTimeout.Duration, "The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').")
fs.DurationVar(&o.config.IPVS.UDPTimeout.Duration, "ipvs-udp-timeout", o.config.IPVS.UDPTimeout.Duration, "The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').")
fs.DurationVar(&o.config.Conntrack.TCPEstablishedTimeout.Duration, "conntrack-tcp-timeout-established", o.config.Conntrack.TCPEstablishedTimeout.Duration, "Idle timeout for established TCP connections (0 to leave as-is)")
fs.DurationVar(
&o.config.Conntrack.TCPCloseWaitTimeout.Duration, "conntrack-tcp-timeout-close-wait",
o.config.Conntrack.TCPCloseWaitTimeout.Duration,
"NAT timeout for TCP connections in the CLOSE_WAIT state")
fs.DurationVar(&o.config.ConfigSyncPeriod.Duration, "config-sync-period", o.config.ConfigSyncPeriod.Duration, "How often configuration from the apiserver is refreshed. Must be greater than 0.")
fs.BoolVar(&o.config.IPVS.StrictARP, "ipvs-strict-arp", o.config.IPVS.StrictARP, "Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2")
fs.BoolVar(&o.config.IPTables.MasqueradeAll, "masquerade-all", o.config.IPTables.MasqueradeAll, "If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed)")
fs.BoolVar(o.config.IPTables.LocalhostNodePorts, "iptables-localhost-nodeports", pointer.BoolDeref(o.config.IPTables.LocalhostNodePorts, true), "If false Kube-proxy will disable the legacy behavior of allowing NodePort services to be accessed via localhost, This only applies to iptables mode and ipv4.")
fs.BoolVar(&o.config.EnableProfiling, "profiling", o.config.EnableProfiling, "If true enables profiling via web interface on /debug/pprof handler. This parameter is ignored if a config file is specified by --config.")
fs.Float32Var(&o.config.ClientConnection.QPS, "kube-api-qps", o.config.ClientConnection.QPS, "QPS to use while talking with kubernetes apiserver")
fs.Var(&o.config.DetectLocalMode, "detect-local-mode", "Mode to use to detect local traffic. This parameter is ignored if a config file is specified by --config.")
fs.StringVar(&o.config.DetectLocal.BridgeInterface, "pod-bridge-interface", o.config.DetectLocal.BridgeInterface, "A bridge interface name in the cluster. Kube-proxy considers traffic as local if originating from an interface which matches the value. This argument should be set if DetectLocalMode is set to BridgeInterface.")
fs.StringVar(&o.config.DetectLocal.InterfaceNamePrefix, "pod-interface-name-prefix", o.config.DetectLocal.InterfaceNamePrefix, "An interface prefix in the cluster. Kube-proxy considers traffic as local if originating from interfaces that match the given prefix. This argument should be set if DetectLocalMode is set to InterfaceNamePrefix.")
}
// newKubeProxyConfiguration returns a KubeProxyConfiguration with default values
func newKubeProxyConfiguration() *kubeproxyconfig.KubeProxyConfiguration {
versionedConfig := &v1alpha1.KubeProxyConfiguration{}
proxyconfigscheme.Scheme.Default(versionedConfig)
internalConfig, err := proxyconfigscheme.Scheme.ConvertToVersion(versionedConfig, kubeproxyconfig.SchemeGroupVersion)
if err != nil {
panic(fmt.Sprintf("Unable to create default config: %v", err))
}
return internalConfig.(*kubeproxyconfig.KubeProxyConfiguration)
}
// NewOptions returns initialized Options
func NewOptions() *Options {
return &Options{
config: newKubeProxyConfiguration(),
healthzPort: ports.ProxyHealthzPort,
metricsPort: ports.ProxyStatusPort,
errCh: make(chan error),
}
}
// Complete completes all the required options.
func (o *Options) Complete() error {
if len(o.ConfigFile) == 0 && len(o.WriteConfigTo) == 0 {
o.config.HealthzBindAddress = addressFromDeprecatedFlags(o.config.HealthzBindAddress, o.healthzPort)
o.config.MetricsBindAddress = addressFromDeprecatedFlags(o.config.MetricsBindAddress, o.metricsPort)
}
// Load the config file here in Complete, so that Validate validates the fully-resolved config.
if len(o.ConfigFile) > 0 {
c, err := o.loadConfigFromFile(o.ConfigFile)
if err != nil {
return err
}
o.config = c
if err := o.initWatcher(); err != nil {
return err
}
}
o.platformApplyDefaults(o.config)
if err := o.processHostnameOverrideFlag(); err != nil {
return err
}
return utilfeature.DefaultMutableFeatureGate.SetFromMap(o.config.FeatureGates)
}
// Creates a new filesystem watcher and adds watches for the config file.
func (o *Options) initWatcher() error {
fswatcher := filesystem.NewFsnotifyWatcher()
err := fswatcher.Init(o.eventHandler, o.errorHandler)
if err != nil {
return err
}
err = fswatcher.AddWatch(o.ConfigFile)
if err != nil {
return err
}
o.watcher = fswatcher
return nil
}
func (o *Options) eventHandler(ent fsnotify.Event) {
if ent.Has(fsnotify.Write) || ent.Has(fsnotify.Rename) {
// error out when ConfigFile is updated
o.errCh <- fmt.Errorf("content of the proxy server's configuration file was updated")
return
}
o.errCh <- nil
}
func (o *Options) errorHandler(err error) {
o.errCh <- err
}
// processHostnameOverrideFlag processes hostname-override flag
func (o *Options) processHostnameOverrideFlag() error {
// Check if hostname-override flag is set and use value since configFile always overrides
if len(o.hostnameOverride) > 0 {
hostName := strings.TrimSpace(o.hostnameOverride)
if len(hostName) == 0 {
return fmt.Errorf("empty hostname-override is invalid")
}
o.config.HostnameOverride = strings.ToLower(hostName)
}
return nil
}
// Validate validates all the required options.
func (o *Options) Validate() error {
if errs := validation.Validate(o.config); len(errs) != 0 {
return errs.ToAggregate()
}
return nil
}
// Run runs the specified ProxyServer.
func (o *Options) Run() error {
defer close(o.errCh)
if len(o.WriteConfigTo) > 0 {
return o.writeConfigFile()
}
if o.CleanupAndExit {
return cleanupAndExit()
}
proxyServer, err := newProxyServer(o.config, o.master)
if err != nil {
return err
}
o.proxyServer = proxyServer
return o.runLoop()
}
// runLoop will watch on the update change of the proxy server's configuration file.
// Return an error when updated
func (o *Options) runLoop() error {
if o.watcher != nil {
o.watcher.Run()
}
// run the proxy in goroutine
go func() {
err := o.proxyServer.Run()
o.errCh <- err
}()
for {
err := <-o.errCh
if err != nil {
return err
}
}
}
func (o *Options) writeConfigFile() (err error) {
const mediaType = runtime.ContentTypeYAML
info, ok := runtime.SerializerInfoForMediaType(proxyconfigscheme.Codecs.SupportedMediaTypes(), mediaType)
if !ok {
return fmt.Errorf("unable to locate encoder -- %q is not a supported media type", mediaType)
}
encoder := proxyconfigscheme.Codecs.EncoderForVersion(info.Serializer, v1alpha1.SchemeGroupVersion)
configFile, err := os.Create(o.WriteConfigTo)
if err != nil {
return err
}
defer func() {
ferr := configFile.Close()
if ferr != nil && err == nil {
err = ferr
}
}()
if err = encoder.Encode(o.config, configFile); err != nil {
return err
}
klog.InfoS("Wrote configuration", "file", o.WriteConfigTo)
return nil
}
// addressFromDeprecatedFlags returns server address from flags
// passed on the command line based on the following rules:
// 1. If port is 0, disable the server (e.g. set address to empty).
// 2. Otherwise, set the port portion of the config accordingly.
func addressFromDeprecatedFlags(addr string, port int32) string {
if port == 0 {
return ""
}
return proxyutil.AppendPortIfNeeded(addr, port)
}
// newLenientSchemeAndCodecs returns a scheme that has only v1alpha1 registered into
// it and a CodecFactory with strict decoding disabled.
func newLenientSchemeAndCodecs() (*runtime.Scheme, *serializer.CodecFactory, error) {
lenientScheme := runtime.NewScheme()
if err := kubeproxyconfig.AddToScheme(lenientScheme); err != nil {
return nil, nil, fmt.Errorf("failed to add kube-proxy config API to lenient scheme: %v", err)
}
if err := kubeproxyconfigv1alpha1.AddToScheme(lenientScheme); err != nil {
return nil, nil, fmt.Errorf("failed to add kube-proxy config v1alpha1 API to lenient scheme: %v", err)
}
lenientCodecs := serializer.NewCodecFactory(lenientScheme, serializer.DisableStrict)
return lenientScheme, &lenientCodecs, nil
}
// loadConfigFromFile loads the contents of file and decodes it as a
// KubeProxyConfiguration object.
func (o *Options) loadConfigFromFile(file string) (*kubeproxyconfig.KubeProxyConfiguration, error) {
data, err := os.ReadFile(file)
if err != nil {
return nil, err
}
return o.loadConfig(data)
}
// loadConfig decodes a serialized KubeProxyConfiguration to the internal type.
func (o *Options) loadConfig(data []byte) (*kubeproxyconfig.KubeProxyConfiguration, error) {
configObj, gvk, err := proxyconfigscheme.Codecs.UniversalDecoder().Decode(data, nil, nil)
if err != nil {
// Try strict decoding first. If that fails decode with a lenient
// decoder, which has only v1alpha1 registered, and log a warning.
// The lenient path is to be dropped when support for v1alpha1 is dropped.
if !runtime.IsStrictDecodingError(err) {
return nil, fmt.Errorf("failed to decode: %w", err)
}
_, lenientCodecs, lenientErr := newLenientSchemeAndCodecs()
if lenientErr != nil {
return nil, lenientErr
}
configObj, gvk, lenientErr = lenientCodecs.UniversalDecoder().Decode(data, nil, nil)
if lenientErr != nil {
// Lenient decoding failed with the current version, return the
// original strict error.
return nil, fmt.Errorf("failed lenient decoding: %v", err)
}
// Continue with the v1alpha1 object that was decoded leniently, but emit a warning.
klog.InfoS("Using lenient decoding as strict decoding failed", "err", err)
}
proxyConfig, ok := configObj.(*kubeproxyconfig.KubeProxyConfiguration)
if !ok {
return nil, fmt.Errorf("got unexpected config type: %v", gvk)
}
return proxyConfig, nil
}
// NewProxyCommand creates a *cobra.Command object with default parameters
func NewProxyCommand() *cobra.Command {
opts := NewOptions()
cmd := &cobra.Command{
Use: "kube-proxy",
Long: `The Kubernetes network proxy runs on each node. This
reflects services as defined in the Kubernetes API on each node and can do simple
TCP, UDP, and SCTP stream forwarding or round robin TCP, UDP, and SCTP forwarding across a set of backends.
Service cluster IPs and ports are currently found through Docker-links-compatible
environment variables specifying ports opened by the service proxy. There is an optional
addon that provides cluster DNS for these cluster IPs. The user must create a service
with the apiserver API to configure the proxy.`,
RunE: func(cmd *cobra.Command, args []string) error {
verflag.PrintAndExitIfRequested()
cliflag.PrintFlags(cmd.Flags())
if err := initForOS(opts.WindowsService); err != nil {
return fmt.Errorf("failed os init: %w", err)
}
if err := opts.Complete(); err != nil {
return fmt.Errorf("failed complete: %w", err)
}
if err := opts.Validate(); err != nil {
return fmt.Errorf("failed validate: %w", err)
}
// add feature enablement metrics
utilfeature.DefaultMutableFeatureGate.AddMetrics()
if err := opts.Run(); err != nil {
klog.ErrorS(err, "Error running ProxyServer")
return err
}
return nil
},
Args: func(cmd *cobra.Command, args []string) error {
for _, arg := range args {
if len(arg) > 0 {
return fmt.Errorf("%q does not take any arguments, got %q", cmd.CommandPath(), args)
}
}
return nil
},
}
fs := cmd.Flags()
opts.AddFlags(fs)
fs.AddGoFlagSet(goflag.CommandLine) // for --boot-id-file and --machine-id-file
_ = cmd.MarkFlagFilename("config", "yaml", "yml", "json")
return cmd
}
// ProxyServer represents all the parameters required to start the Kubernetes proxy server. All
// fields are required.
type ProxyServer struct {
Config *kubeproxyconfig.KubeProxyConfiguration
Client clientset.Interface
Broadcaster events.EventBroadcaster
Recorder events.EventRecorder
NodeRef *v1.ObjectReference
HealthzServer healthcheck.ProxierHealthUpdater
Hostname string
PrimaryIPFamily v1.IPFamily
NodeIPs map[v1.IPFamily]net.IP
podCIDRs []string // only used for LocalModeNodeCIDR
Proxier proxy.Provider
}
// newProxyServer creates a ProxyServer based on the given config
func newProxyServer(config *kubeproxyconfig.KubeProxyConfiguration, master string) (*ProxyServer, error) {
s := &ProxyServer{Config: config}
cz, err := configz.New(kubeproxyconfig.GroupName)
if err != nil {
return nil, fmt.Errorf("unable to register configz: %s", err)
}
cz.Set(config)
if len(config.ShowHiddenMetricsForVersion) > 0 {
metrics.SetShowHidden()
}
s.Hostname, err = nodeutil.GetHostname(config.HostnameOverride)
if err != nil {
return nil, err
}
s.Client, err = createClient(config.ClientConnection, master)
if err != nil {
return nil, err
}
s.PrimaryIPFamily, s.NodeIPs = detectNodeIPs(s.Client, s.Hostname, config.BindAddress)
s.Broadcaster = events.NewBroadcaster(&events.EventSinkImpl{Interface: s.Client.EventsV1()})
s.Recorder = s.Broadcaster.NewRecorder(proxyconfigscheme.Scheme, "kube-proxy")
s.NodeRef = &v1.ObjectReference{
Kind: "Node",
Name: s.Hostname,
UID: types.UID(s.Hostname),
Namespace: "",
}
if len(config.HealthzBindAddress) > 0 {
s.HealthzServer = healthcheck.NewProxierHealthServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration, s.Recorder, s.NodeRef)
}
s.Proxier, err = s.createProxier(config)
if err != nil {
return nil, err
}
return s, nil
}
// createClient creates a kube client from the given config and masterOverride.
// TODO remove masterOverride when CLI flags are removed.
func createClient(config componentbaseconfig.ClientConnectionConfiguration, masterOverride string) (clientset.Interface, error) {
var kubeConfig *rest.Config
var err error
if len(config.Kubeconfig) == 0 && len(masterOverride) == 0 {
klog.InfoS("Neither kubeconfig file nor master URL was specified, falling back to in-cluster config")
kubeConfig, err = rest.InClusterConfig()
} else {
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeConfig, err = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: config.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterOverride}}).ClientConfig()
}
if err != nil {
return nil, err
}
kubeConfig.AcceptContentTypes = config.AcceptContentTypes
kubeConfig.ContentType = config.ContentType
kubeConfig.QPS = config.QPS
kubeConfig.Burst = int(config.Burst)
client, err := clientset.NewForConfig(kubeConfig)
if err != nil {
return nil, err
}
return client, nil
}
func serveHealthz(hz healthcheck.ProxierHealthUpdater, errCh chan error) {
if hz == nil {
return
}
fn := func() {
err := hz.Run()
if err != nil {
klog.ErrorS(err, "Healthz server failed")
if errCh != nil {
errCh <- fmt.Errorf("healthz server failed: %v", err)
// if in hardfail mode, never retry again
blockCh := make(chan error)
<-blockCh
}
} else {
klog.ErrorS(nil, "Healthz server returned without error")
}
}
go wait.Until(fn, 5*time.Second, wait.NeverStop)
}
func serveMetrics(bindAddress string, proxyMode kubeproxyconfig.ProxyMode, enableProfiling bool, errCh chan error) {
if len(bindAddress) == 0 {
return
}
proxyMux := mux.NewPathRecorderMux("kube-proxy")
healthz.InstallHandler(proxyMux)
if utilfeature.DefaultFeatureGate.Enabled(metricsfeatures.ComponentSLIs) {
slis.SLIMetricsWithReset{}.Install(proxyMux)
}
proxyMux.HandleFunc("/proxyMode", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("X-Content-Type-Options", "nosniff")
fmt.Fprintf(w, "%s", proxyMode)
})
proxyMux.Handle("/metrics", legacyregistry.Handler())
if enableProfiling {
routes.Profiling{}.Install(proxyMux)
routes.DebugFlags{}.Install(proxyMux, "v", routes.StringFlagPutHandler(logs.GlogSetter))
}
configz.InstallHandler(proxyMux)
fn := func() {
err := http.ListenAndServe(bindAddress, proxyMux)
if err != nil {
err = fmt.Errorf("starting metrics server failed: %v", err)
utilruntime.HandleError(err)
if errCh != nil {
errCh <- err
// if in hardfail mode, never retry again
blockCh := make(chan error)
<-blockCh
}
}
}
go wait.Until(fn, 5*time.Second, wait.NeverStop)
}
// Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set).
// TODO: At the moment, Run() cannot return a nil error, otherwise it's caller will never exit. Update callers of Run to handle nil errors.
func (s *ProxyServer) Run() error {
// To help debugging, immediately log version
klog.InfoS("Version info", "version", version.Get())
klog.InfoS("Golang settings", "GOGC", os.Getenv("GOGC"), "GOMAXPROCS", os.Getenv("GOMAXPROCS"), "GOTRACEBACK", os.Getenv("GOTRACEBACK"))
// TODO(vmarmol): Use container config for this.
var oomAdjuster *oom.OOMAdjuster
if s.Config.OOMScoreAdj != nil {
oomAdjuster = oom.NewOOMAdjuster()
if err := oomAdjuster.ApplyOOMScoreAdj(0, int(*s.Config.OOMScoreAdj)); err != nil {
klog.V(2).InfoS("Failed to apply OOMScore", "err", err)
}
}
if s.Broadcaster != nil {
stopCh := make(chan struct{})
s.Broadcaster.StartRecordingToSink(stopCh)
}
// TODO(thockin): make it possible for healthz and metrics to be on the same port.
var errCh chan error
if s.Config.BindAddressHardFail {
errCh = make(chan error)
}
// Start up a healthz server if requested
serveHealthz(s.HealthzServer, errCh)
// Start up a metrics server if requested
serveMetrics(s.Config.MetricsBindAddress, s.Config.Mode, s.Config.EnableProfiling, errCh)
// Do platform-specific setup
err := s.platformSetup()
if err != nil {
return err
}
noProxyName, err := labels.NewRequirement(apis.LabelServiceProxyName, selection.DoesNotExist, nil)
if err != nil {
return err
}
noHeadlessEndpoints, err := labels.NewRequirement(v1.IsHeadlessService, selection.DoesNotExist, nil)
if err != nil {
return err
}
labelSelector := labels.NewSelector()
labelSelector = labelSelector.Add(*noProxyName, *noHeadlessEndpoints)
// Make informers that filter out objects that want a non-default service proxy.
informerFactory := informers.NewSharedInformerFactoryWithOptions(s.Client, s.Config.ConfigSyncPeriod.Duration,
informers.WithTweakListOptions(func(options *metav1.ListOptions) {
options.LabelSelector = labelSelector.String()
}))
// Create configs (i.e. Watches for Services and EndpointSlices)
// Note: RegisterHandler() calls need to happen before creation of Sources because sources
// only notify on changes, and the initial update (on process start) may be lost if no handlers
// are registered yet.
serviceConfig := config.NewServiceConfig(informerFactory.Core().V1().Services(), s.Config.ConfigSyncPeriod.Duration)
serviceConfig.RegisterEventHandler(s.Proxier)
go serviceConfig.Run(wait.NeverStop)
endpointSliceConfig := config.NewEndpointSliceConfig(informerFactory.Discovery().V1().EndpointSlices(), s.Config.ConfigSyncPeriod.Duration)
endpointSliceConfig.RegisterEventHandler(s.Proxier)
go endpointSliceConfig.Run(wait.NeverStop)
// This has to start after the calls to NewServiceConfig because that
// function must configure its shared informer event handlers first.
informerFactory.Start(wait.NeverStop)
// Make an informer that selects for our nodename.
currentNodeInformerFactory := informers.NewSharedInformerFactoryWithOptions(s.Client, s.Config.ConfigSyncPeriod.Duration,
informers.WithTweakListOptions(func(options *metav1.ListOptions) {
options.FieldSelector = fields.OneTermEqualSelector("metadata.name", s.NodeRef.Name).String()
}))
nodeConfig := config.NewNodeConfig(currentNodeInformerFactory.Core().V1().Nodes(), s.Config.ConfigSyncPeriod.Duration)
// https://issues.k8s.io/111321
if s.Config.DetectLocalMode == kubeproxyconfig.LocalModeNodeCIDR {
nodeConfig.RegisterEventHandler(proxy.NewNodePodCIDRHandler(s.podCIDRs))
}
nodeConfig.RegisterEventHandler(s.Proxier)
go nodeConfig.Run(wait.NeverStop)
// This has to start after the calls to NewNodeConfig because that must
// configure the shared informer event handler first.
currentNodeInformerFactory.Start(wait.NeverStop)
// Birth Cry after the birth is successful
s.birthCry()
go s.Proxier.SyncLoop()
return <-errCh
}
func (s *ProxyServer) birthCry() {
s.Recorder.Eventf(s.NodeRef, nil, api.EventTypeNormal, "Starting", "StartKubeProxy", "")
}
// detectNodeIPs returns the proxier's "node IP" or IPs, and the IP family to use if the
// node turns out to be incapable of dual-stack. (Note that kube-proxy normally runs as
// dual-stack if the backend is capable of supporting both IP families, regardless of
// whether the node is *actually* configured as dual-stack or not.)
// (Note that on Linux, the node IPs are used only to determine whether a given
// LoadBalancerSourceRanges value matches the node or not. In particular, they are *not*
// used for NodePort handling.)
//
// The order of precedence is:
// 1. if bindAddress is not 0.0.0.0 or ::, then it is used as the primary IP.
// 2. if the Node object can be fetched, then its primary IP is used as the primary IP
// (and its secondary IP, if any, is just ignored).
// 3. otherwise the primary node IP is 127.0.0.1.
//
// In all cases, the secondary IP is the zero IP of the other IP family.
func detectNodeIPs(client clientset.Interface, hostname, bindAddress string) (v1.IPFamily, map[v1.IPFamily]net.IP) {
nodeIP := netutils.ParseIPSloppy(bindAddress)
if nodeIP.IsUnspecified() {
nodeIP = utilnode.GetNodeIP(client, hostname)
}
if nodeIP == nil {
klog.InfoS("Can't determine this node's IP, assuming 127.0.0.1; if this is incorrect, please set the --bind-address flag")
nodeIP = netutils.ParseIPSloppy("127.0.0.1")
}
if netutils.IsIPv4(nodeIP) {
return v1.IPv4Protocol, map[v1.IPFamily]net.IP{
v1.IPv4Protocol: nodeIP,
v1.IPv6Protocol: net.IPv6zero,
}
} else {
return v1.IPv6Protocol, map[v1.IPFamily]net.IP{
v1.IPv4Protocol: net.IPv4zero,
v1.IPv6Protocol: nodeIP,
}
}
}
| cmd/kube-proxy/app/server.go | 1 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.9979909658432007,
0.02762879990041256,
0.00016293344378937036,
0.00018594003631733358,
0.14255696535110474
] |
{
"id": 0,
"code_window": [
"\tif len(config.HealthzBindAddress) > 0 {\n",
"\t\ts.HealthzServer = healthcheck.NewProxierHealthServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration, s.Recorder, s.NodeRef)\n",
"\t}\n",
"\n",
"\ts.Proxier, err = s.createProxier(config)\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\terr = s.platformSetup()\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n",
"\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 569
} | apiVersion: v1
kind: Pod
metadata:
name: procmount1
spec:
containers:
- image: registry.k8s.io/pause
name: container1
securityContext: {}
initContainers:
- image: registry.k8s.io/pause
name: initcontainer1
securityContext:
procMount: Unmasked
securityContext: {}
| staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.5/fail/procmount1.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00017860255320556462,
0.00017765184747986495,
0.00017670115630608052,
0.00017765184747986495,
9.50698449742049e-7
] |
{
"id": 0,
"code_window": [
"\tif len(config.HealthzBindAddress) > 0 {\n",
"\t\ts.HealthzServer = healthcheck.NewProxierHealthServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration, s.Recorder, s.NodeRef)\n",
"\t}\n",
"\n",
"\ts.Proxier, err = s.createProxier(config)\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\terr = s.platformSetup()\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n",
"\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 569
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"sort"
"strings"
"github.com/pkg/errors"
"k8s.io/klog/v2"
)
// BuildArgumentListFromMap takes two string-string maps, one with the base arguments and one
// with optional override arguments. In the return list override arguments will precede base
// arguments
func BuildArgumentListFromMap(baseArguments map[string]string, overrideArguments map[string]string) []string {
var command []string
var keys []string
argsMap := make(map[string]string)
for k, v := range baseArguments {
argsMap[k] = v
}
for k, v := range overrideArguments {
argsMap[k] = v
}
for k := range argsMap {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
command = append(command, fmt.Sprintf("--%s=%s", k, argsMap[k]))
}
return command
}
// ParseArgumentListToMap parses a CLI argument list in the form "--foo=bar" to a string-string map
func ParseArgumentListToMap(arguments []string) map[string]string {
resultingMap := map[string]string{}
for i, arg := range arguments {
key, val, err := parseArgument(arg)
// Ignore if the first argument doesn't satisfy the criteria, it's most often the binary name
// Warn in all other cases, but don't error out. This can happen only if the user has edited the argument list by hand, so they might know what they are doing
if err != nil {
if i != 0 {
klog.Warningf("[kubeadm] WARNING: The component argument %q could not be parsed correctly. The argument must be of the form %q. Skipping...\n", arg, "--")
}
continue
}
resultingMap[key] = val
}
return resultingMap
}
// ReplaceArgument gets a command list; converts it to a map for easier modification, runs the provided function that
// returns a new modified map, and then converts the map back to a command string slice
func ReplaceArgument(command []string, argMutateFunc func(map[string]string) map[string]string) []string {
argMap := ParseArgumentListToMap(command)
// Save the first command (the executable) if we're sure it's not an argument (i.e. no --)
var newCommand []string
if len(command) > 0 && !strings.HasPrefix(command[0], "--") {
newCommand = append(newCommand, command[0])
}
newArgMap := argMutateFunc(argMap)
newCommand = append(newCommand, BuildArgumentListFromMap(newArgMap, map[string]string{})...)
return newCommand
}
// parseArgument parses the argument "--foo=bar" to "foo" and "bar"
func parseArgument(arg string) (string, string, error) {
if !strings.HasPrefix(arg, "--") {
return "", "", errors.New("the argument should start with '--'")
}
if !strings.Contains(arg, "=") {
return "", "", errors.New("the argument should have a '=' between the flag and the value")
}
// Remove the starting --
arg = strings.TrimPrefix(arg, "--")
// Split the string on =. Return only two substrings, since we want only key/value, but the value can include '=' as well
keyvalSlice := strings.SplitN(arg, "=", 2)
// Make sure both a key and value is present
if len(keyvalSlice) != 2 {
return "", "", errors.New("the argument must have both a key and a value")
}
if len(keyvalSlice[0]) == 0 {
return "", "", errors.New("the argument must have a key")
}
return keyvalSlice[0], keyvalSlice[1], nil
}
| cmd/kubeadm/app/util/arguments.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00019888328097295016,
0.00017456042405683547,
0.00016217333904933184,
0.00017386043327860534,
0.000009486320777796209
] |
{
"id": 0,
"code_window": [
"\tif len(config.HealthzBindAddress) > 0 {\n",
"\t\ts.HealthzServer = healthcheck.NewProxierHealthServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration, s.Recorder, s.NodeRef)\n",
"\t}\n",
"\n",
"\ts.Proxier, err = s.createProxier(config)\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\terr = s.platformSetup()\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n",
"\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 569
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1beta1
import (
v1beta1 "k8s.io/api/policy/v1beta1"
)
// RunAsGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsGroupStrategyOptions type for use
// with apply.
type RunAsGroupStrategyOptionsApplyConfiguration struct {
Rule *v1beta1.RunAsGroupStrategy `json:"rule,omitempty"`
Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"`
}
// RunAsGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsGroupStrategyOptions type for use with
// apply.
func RunAsGroupStrategyOptions() *RunAsGroupStrategyOptionsApplyConfiguration {
return &RunAsGroupStrategyOptionsApplyConfiguration{}
}
// WithRule sets the Rule field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Rule field is set to the value of the last call.
func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsGroupStrategy) *RunAsGroupStrategyOptionsApplyConfiguration {
b.Rule = &value
return b
}
// WithRanges adds the given value to the Ranges field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Ranges field.
func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsGroupStrategyOptionsApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithRanges")
}
b.Ranges = append(b.Ranges, *values[i])
}
return b
}
| staging/src/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasgroupstrategyoptions.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.0003871115332003683,
0.00021184324577916414,
0.000167603517184034,
0.000177829060703516,
0.00007858409662730992
] |
{
"id": 1,
"code_window": [
"\t// Start up a metrics server if requested\n",
"\tserveMetrics(s.Config.MetricsBindAddress, s.Config.Mode, s.Config.EnableProfiling, errCh)\n",
"\n",
"\t// Do platform-specific setup\n",
"\terr := s.platformSetup()\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\n",
"\tnoProxyName, err := labels.NewRequirement(apis.LabelServiceProxyName, selection.DoesNotExist, nil)\n",
"\tif err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "replace",
"edit_start_line_idx": 708
} | //go:build !windows
// +build !windows
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to configure and run a
// Kubernetes app process.
package app
import (
"context"
"errors"
"fmt"
goruntime "runtime"
"strings"
"time"
"github.com/google/cadvisor/machine"
"github.com/google/cadvisor/utils/sysfs"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/cache"
"k8s.io/apimachinery/pkg/fields"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
toolswatch "k8s.io/client-go/tools/watch"
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
"k8s.io/kubernetes/pkg/proxy"
proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config"
"k8s.io/kubernetes/pkg/proxy/iptables"
"k8s.io/kubernetes/pkg/proxy/ipvs"
utilipset "k8s.io/kubernetes/pkg/proxy/ipvs/ipset"
utilipvs "k8s.io/kubernetes/pkg/proxy/ipvs/util"
proxymetrics "k8s.io/kubernetes/pkg/proxy/metrics"
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
"k8s.io/utils/exec"
netutils "k8s.io/utils/net"
"k8s.io/klog/v2"
)
// timeoutForNodePodCIDR is the time to wait for allocators to assign a PodCIDR to the
// node after it is registered.
var timeoutForNodePodCIDR = 5 * time.Minute
func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfiguration) {
if config.Mode == "" {
klog.InfoS("Using iptables proxy")
config.Mode = proxyconfigapi.ProxyModeIPTables
}
if config.DetectLocalMode == "" {
klog.V(4).InfoS("Defaulting detect-local-mode", "localModeClusterCIDR", string(proxyconfigapi.LocalModeClusterCIDR))
config.DetectLocalMode = proxyconfigapi.LocalModeClusterCIDR
}
klog.V(2).InfoS("DetectLocalMode", "localMode", string(config.DetectLocalMode))
}
// createProxier creates the proxy.Provider
func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {
var proxier proxy.Provider
var err error
if config.DetectLocalMode == proxyconfigapi.LocalModeNodeCIDR {
klog.InfoS("Watching for node, awaiting podCIDR allocation", "hostname", s.Hostname)
node, err := waitForPodCIDR(s.Client, s.Hostname)
if err != nil {
return nil, err
}
s.podCIDRs = node.Spec.PodCIDRs
klog.InfoS("NodeInfo", "podCIDRs", node.Spec.PodCIDRs)
}
primaryProtocol := utiliptables.ProtocolIPv4
if s.PrimaryIPFamily == v1.IPv6Protocol {
primaryProtocol = utiliptables.ProtocolIPv6
}
execer := exec.New()
iptInterface := utiliptables.New(execer, primaryProtocol)
var ipt [2]utiliptables.Interface
dualStack := true // While we assume that node supports, we do further checks below
// Create iptables handlers for both families, one is already created
// Always ordered as IPv4, IPv6
if primaryProtocol == utiliptables.ProtocolIPv4 {
ipt[0] = iptInterface
ipt[1] = utiliptables.New(execer, utiliptables.ProtocolIPv6)
} else {
ipt[0] = utiliptables.New(execer, utiliptables.ProtocolIPv4)
ipt[1] = iptInterface
}
if !ipt[0].Present() {
return nil, fmt.Errorf("iptables is not supported for primary IP family %q", primaryProtocol)
} else if !ipt[1].Present() {
klog.InfoS("kube-proxy running in single-stack mode: secondary ipFamily is not supported", "ipFamily", ipt[1].Protocol())
dualStack = false
// Validate NodePortAddresses is single-stack
npaByFamily := proxyutil.MapCIDRsByIPFamily(config.NodePortAddresses)
secondaryFamily := proxyutil.OtherIPFamily(s.PrimaryIPFamily)
badAddrs := npaByFamily[secondaryFamily]
if len(badAddrs) > 0 {
klog.InfoS("Ignoring --nodeport-addresses of the wrong family", "ipFamily", secondaryFamily, "addresses", badAddrs)
}
}
if config.Mode == proxyconfigapi.ProxyModeIPTables {
klog.InfoS("Using iptables Proxier")
if dualStack {
klog.InfoS("kube-proxy running in dual-stack mode", "ipFamily", iptInterface.Protocol())
klog.InfoS("Creating dualStackProxier for iptables")
// Always ordered to match []ipt
var localDetectors [2]proxyutiliptables.LocalTrafficDetector
localDetectors, err = getDualStackLocalDetectorTuple(config.DetectLocalMode, config, ipt, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
// TODO this has side effects that should only happen when Run() is invoked.
proxier, err = iptables.NewDualStackProxier(
ipt,
utilsysctl.New(),
execer,
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.IPTables.MasqueradeAll,
*config.IPTables.LocalhostNodePorts,
int(*config.IPTables.MasqueradeBit),
localDetectors,
s.Hostname,
s.NodeIPs,
s.Recorder,
s.HealthzServer,
config.NodePortAddresses,
)
} else {
// Create a single-stack proxier if and only if the node does not support dual-stack (i.e, no iptables support).
var localDetector proxyutiliptables.LocalTrafficDetector
localDetector, err = getLocalDetector(config.DetectLocalMode, config, iptInterface, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
// TODO this has side effects that should only happen when Run() is invoked.
proxier, err = iptables.NewProxier(
s.PrimaryIPFamily,
iptInterface,
utilsysctl.New(),
execer,
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.IPTables.MasqueradeAll,
*config.IPTables.LocalhostNodePorts,
int(*config.IPTables.MasqueradeBit),
localDetector,
s.Hostname,
s.NodeIPs[s.PrimaryIPFamily],
s.Recorder,
s.HealthzServer,
config.NodePortAddresses,
)
}
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
} else if config.Mode == proxyconfigapi.ProxyModeIPVS {
kernelHandler := ipvs.NewLinuxKernelHandler()
ipsetInterface := utilipset.New(execer)
ipvsInterface := utilipvs.New()
if err := ipvs.CanUseIPVSProxier(ipvsInterface, ipsetInterface, config.IPVS.Scheduler); err != nil {
return nil, fmt.Errorf("can't use the IPVS proxier: %v", err)
}
klog.InfoS("Using ipvs Proxier")
if dualStack {
klog.InfoS("Creating dualStackProxier for ipvs")
// Always ordered to match []ipt
var localDetectors [2]proxyutiliptables.LocalTrafficDetector
localDetectors, err = getDualStackLocalDetectorTuple(config.DetectLocalMode, config, ipt, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
proxier, err = ipvs.NewDualStackProxier(
ipt,
ipvsInterface,
ipsetInterface,
utilsysctl.New(),
execer,
config.IPVS.SyncPeriod.Duration,
config.IPVS.MinSyncPeriod.Duration,
config.IPVS.ExcludeCIDRs,
config.IPVS.StrictARP,
config.IPVS.TCPTimeout.Duration,
config.IPVS.TCPFinTimeout.Duration,
config.IPVS.UDPTimeout.Duration,
config.IPTables.MasqueradeAll,
int(*config.IPTables.MasqueradeBit),
localDetectors,
s.Hostname,
s.NodeIPs,
s.Recorder,
s.HealthzServer,
config.IPVS.Scheduler,
config.NodePortAddresses,
kernelHandler,
)
} else {
var localDetector proxyutiliptables.LocalTrafficDetector
localDetector, err = getLocalDetector(config.DetectLocalMode, config, iptInterface, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
proxier, err = ipvs.NewProxier(
s.PrimaryIPFamily,
iptInterface,
ipvsInterface,
ipsetInterface,
utilsysctl.New(),
execer,
config.IPVS.SyncPeriod.Duration,
config.IPVS.MinSyncPeriod.Duration,
config.IPVS.ExcludeCIDRs,
config.IPVS.StrictARP,
config.IPVS.TCPTimeout.Duration,
config.IPVS.TCPFinTimeout.Duration,
config.IPVS.UDPTimeout.Duration,
config.IPTables.MasqueradeAll,
int(*config.IPTables.MasqueradeBit),
localDetector,
s.Hostname,
s.NodeIPs[s.PrimaryIPFamily],
s.Recorder,
s.HealthzServer,
config.IPVS.Scheduler,
config.NodePortAddresses,
kernelHandler,
)
}
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
}
return proxier, nil
}
func (s *ProxyServer) platformSetup() error {
ct := &realConntracker{}
max, err := getConntrackMax(s.Config.Conntrack)
if err != nil {
return err
}
if max > 0 {
err := ct.SetMax(max)
if err != nil {
if err != errReadOnlySysFS {
return err
}
// errReadOnlySysFS is caused by a known docker issue (https://github.com/docker/docker/issues/24000),
// the only remediation we know is to restart the docker daemon.
// Here we'll send an node event with specific reason and message, the
// administrator should decide whether and how to handle this issue,
// whether to drain the node and restart docker. Occurs in other container runtimes
// as well.
// TODO(random-liu): Remove this when the docker bug is fixed.
const message = "CRI error: /sys is read-only: " +
"cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000)"
s.Recorder.Eventf(s.NodeRef, nil, v1.EventTypeWarning, err.Error(), "StartKubeProxy", message)
}
}
if s.Config.Conntrack.TCPEstablishedTimeout != nil && s.Config.Conntrack.TCPEstablishedTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.TCPEstablishedTimeout.Duration / time.Second)
if err := ct.SetTCPEstablishedTimeout(timeout); err != nil {
return err
}
}
if s.Config.Conntrack.TCPCloseWaitTimeout != nil && s.Config.Conntrack.TCPCloseWaitTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.TCPCloseWaitTimeout.Duration / time.Second)
if err := ct.SetTCPCloseWaitTimeout(timeout); err != nil {
return err
}
}
proxymetrics.RegisterMetrics()
return nil
}
func getConntrackMax(config proxyconfigapi.KubeProxyConntrackConfiguration) (int, error) {
if config.MaxPerCore != nil && *config.MaxPerCore > 0 {
floor := 0
if config.Min != nil {
floor = int(*config.Min)
}
scaled := int(*config.MaxPerCore) * detectNumCPU()
if scaled > floor {
klog.V(3).InfoS("GetConntrackMax: using scaled conntrack-max-per-core")
return scaled, nil
}
klog.V(3).InfoS("GetConntrackMax: using conntrack-min")
return floor, nil
}
return 0, nil
}
func waitForPodCIDR(client clientset.Interface, nodeName string) (*v1.Node, error) {
// since allocators can assign the podCIDR after the node registers, we do a watch here to wait
// for podCIDR to be assigned, instead of assuming that the Get() on startup will have it.
ctx, cancelFunc := context.WithTimeout(context.TODO(), timeoutForNodePodCIDR)
defer cancelFunc()
fieldSelector := fields.OneTermEqualSelector("metadata.name", nodeName).String()
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) {
options.FieldSelector = fieldSelector
return client.CoreV1().Nodes().List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector
return client.CoreV1().Nodes().Watch(ctx, options)
},
}
condition := func(event watch.Event) (bool, error) {
// don't process delete events
if event.Type != watch.Modified && event.Type != watch.Added {
return false, nil
}
n, ok := event.Object.(*v1.Node)
if !ok {
return false, fmt.Errorf("event object not of type Node")
}
// don't consider the node if is going to be deleted and keep waiting
if !n.DeletionTimestamp.IsZero() {
return false, nil
}
return n.Spec.PodCIDR != "" && len(n.Spec.PodCIDRs) > 0, nil
}
evt, err := toolswatch.UntilWithSync(ctx, lw, &v1.Node{}, nil, condition)
if err != nil {
return nil, fmt.Errorf("timeout waiting for PodCIDR allocation to configure detect-local-mode %v: %v", proxyconfigapi.LocalModeNodeCIDR, err)
}
if n, ok := evt.Object.(*v1.Node); ok {
return n, nil
}
return nil, fmt.Errorf("event object not of type node")
}
func detectNumCPU() int {
// try get numCPU from /sys firstly due to a known issue (https://github.com/kubernetes/kubernetes/issues/99225)
_, numCPU, err := machine.GetTopology(sysfs.NewRealSysFs())
if err != nil || numCPU < 1 {
return goruntime.NumCPU()
}
return numCPU
}
func getLocalDetector(mode proxyconfigapi.LocalMode, config *proxyconfigapi.KubeProxyConfiguration, ipt utiliptables.Interface, nodePodCIDRs []string) (proxyutiliptables.LocalTrafficDetector, error) {
switch mode {
case proxyconfigapi.LocalModeClusterCIDR:
// LocalModeClusterCIDR is the default if --detect-local-mode wasn't passed,
// but --cluster-cidr is optional.
if len(strings.TrimSpace(config.ClusterCIDR)) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no cluster CIDR defined")
break
}
return proxyutiliptables.NewDetectLocalByCIDR(config.ClusterCIDR, ipt)
case proxyconfigapi.LocalModeNodeCIDR:
if len(nodePodCIDRs) == 0 {
klog.InfoS("Detect-local-mode set to NodeCIDR, but no PodCIDR defined at node")
break
}
return proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt)
case proxyconfigapi.LocalModeBridgeInterface:
return proxyutiliptables.NewDetectLocalByBridgeInterface(config.DetectLocal.BridgeInterface)
case proxyconfigapi.LocalModeInterfaceNamePrefix:
return proxyutiliptables.NewDetectLocalByInterfaceNamePrefix(config.DetectLocal.InterfaceNamePrefix)
}
klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode))
return proxyutiliptables.NewNoOpLocalDetector(), nil
}
func getDualStackLocalDetectorTuple(mode proxyconfigapi.LocalMode, config *proxyconfigapi.KubeProxyConfiguration, ipt [2]utiliptables.Interface, nodePodCIDRs []string) ([2]proxyutiliptables.LocalTrafficDetector, error) {
var err error
localDetectors := [2]proxyutiliptables.LocalTrafficDetector{proxyutiliptables.NewNoOpLocalDetector(), proxyutiliptables.NewNoOpLocalDetector()}
switch mode {
case proxyconfigapi.LocalModeClusterCIDR:
// LocalModeClusterCIDR is the default if --detect-local-mode wasn't passed,
// but --cluster-cidr is optional.
if len(strings.TrimSpace(config.ClusterCIDR)) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no cluster CIDR defined")
break
}
clusterCIDRs := cidrTuple(config.ClusterCIDR)
if len(strings.TrimSpace(clusterCIDRs[0])) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no IPv4 cluster CIDR defined, defaulting to no-op detect-local for IPv4")
} else {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(clusterCIDRs[0], ipt[0])
if err != nil { // don't loose the original error
return localDetectors, err
}
}
if len(strings.TrimSpace(clusterCIDRs[1])) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, defaulting to no-op detect-local for IPv6")
} else {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(clusterCIDRs[1], ipt[1])
}
return localDetectors, err
case proxyconfigapi.LocalModeNodeCIDR:
if len(nodePodCIDRs) == 0 {
klog.InfoS("No node info available to configure detect-local-mode NodeCIDR")
break
}
// localDetectors, like ipt, need to be of the order [IPv4, IPv6], but PodCIDRs is setup so that PodCIDRs[0] == PodCIDR.
// so have to handle the case where PodCIDR can be IPv6 and set that to localDetectors[1]
if netutils.IsIPv6CIDRString(nodePodCIDRs[0]) {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt[1])
if err != nil {
return localDetectors, err
}
if len(nodePodCIDRs) > 1 {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[1], ipt[0])
}
} else {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt[0])
if err != nil {
return localDetectors, err
}
if len(nodePodCIDRs) > 1 {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[1], ipt[1])
}
}
return localDetectors, err
case proxyconfigapi.LocalModeBridgeInterface, proxyconfigapi.LocalModeInterfaceNamePrefix:
localDetector, err := getLocalDetector(mode, config, ipt[0], nodePodCIDRs)
if err == nil {
localDetectors[0] = localDetector
localDetectors[1] = localDetector
}
return localDetectors, err
default:
klog.InfoS("Unknown detect-local-mode", "detectLocalMode", mode)
}
klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode))
return localDetectors, nil
}
// cidrTuple takes a comma separated list of CIDRs and return a tuple (ipv4cidr,ipv6cidr)
// The returned tuple is guaranteed to have the order (ipv4,ipv6) and if no cidr from a family is found an
// empty string "" is inserted.
func cidrTuple(cidrList string) [2]string {
cidrs := [2]string{"", ""}
foundIPv4 := false
foundIPv6 := false
for _, cidr := range strings.Split(cidrList, ",") {
if netutils.IsIPv6CIDRString(cidr) && !foundIPv6 {
cidrs[1] = cidr
foundIPv6 = true
} else if !foundIPv4 {
cidrs[0] = cidr
foundIPv4 = true
}
if foundIPv6 && foundIPv4 {
break
}
}
return cidrs
}
// cleanupAndExit remove iptables rules and ipset/ipvs rules
func cleanupAndExit() error {
execer := exec.New()
// cleanup IPv6 and IPv4 iptables rules, regardless of current configuration
ipts := []utiliptables.Interface{
utiliptables.New(execer, utiliptables.ProtocolIPv4),
utiliptables.New(execer, utiliptables.ProtocolIPv6),
}
ipsetInterface := utilipset.New(execer)
ipvsInterface := utilipvs.New()
var encounteredError bool
for _, ipt := range ipts {
encounteredError = iptables.CleanupLeftovers(ipt) || encounteredError
encounteredError = ipvs.CleanupLeftovers(ipvsInterface, ipt, ipsetInterface) || encounteredError
}
if encounteredError {
return errors.New("encountered an error while tearing down rules")
}
return nil
}
| cmd/kube-proxy/app/server_others.go | 1 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.0014731495175510645,
0.0002627090725582093,
0.0001635379740037024,
0.00017191363440360874,
0.00025590983568690717
] |
{
"id": 1,
"code_window": [
"\t// Start up a metrics server if requested\n",
"\tserveMetrics(s.Config.MetricsBindAddress, s.Config.Mode, s.Config.EnableProfiling, errCh)\n",
"\n",
"\t// Do platform-specific setup\n",
"\terr := s.platformSetup()\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\n",
"\tnoProxyName, err := labels.NewRequirement(apis.LabelServiceProxyName, selection.DoesNotExist, nil)\n",
"\tif err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "replace",
"edit_start_line_idx": 708
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package disruption
import (
"context"
"flag"
"fmt"
"os"
"runtime/debug"
"strings"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
apps "k8s.io/api/apps/v1"
autoscalingapi "k8s.io/api/autoscaling/v1"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
discoveryfake "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
scalefake "k8s.io/client-go/scale/fake"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
_ "k8s.io/kubernetes/pkg/apis/core/install"
"k8s.io/kubernetes/pkg/controller"
clocktesting "k8s.io/utils/clock/testing"
"k8s.io/utils/pointer"
)
type pdbStates map[string]policy.PodDisruptionBudget
var alwaysReady = func() bool { return true }
func (ps *pdbStates) Set(ctx context.Context, pdb *policy.PodDisruptionBudget) error {
key, err := controller.KeyFunc(pdb)
if err != nil {
return err
}
(*ps)[key] = *pdb.DeepCopy()
return nil
}
func (ps *pdbStates) Get(key string) policy.PodDisruptionBudget {
return (*ps)[key]
}
func (ps *pdbStates) VerifyPdbStatus(t *testing.T, key string, disruptionsAllowed, currentHealthy, desiredHealthy, expectedPods int32, disruptedPodMap map[string]metav1.Time) {
t.Helper()
actualPDB := ps.Get(key)
actualConditions := actualPDB.Status.Conditions
actualPDB.Status.Conditions = nil
expectedStatus := policy.PodDisruptionBudgetStatus{
DisruptionsAllowed: disruptionsAllowed,
CurrentHealthy: currentHealthy,
DesiredHealthy: desiredHealthy,
ExpectedPods: expectedPods,
DisruptedPods: disruptedPodMap,
ObservedGeneration: actualPDB.Generation,
}
actualStatus := actualPDB.Status
if diff := cmp.Diff(expectedStatus, actualStatus, cmpopts.EquateEmpty()); diff != "" {
t.Fatalf("PDB %q status mismatch (-want,+got):\n%s", key, diff)
}
cond := apimeta.FindStatusCondition(actualConditions, policy.DisruptionAllowedCondition)
if cond == nil {
t.Fatalf("Expected condition %q, but didn't find it", policy.DisruptionAllowedCondition)
}
if disruptionsAllowed > 0 {
if cond.Status != metav1.ConditionTrue {
t.Fatalf("Expected condition %q to have status %q, but was %q",
policy.DisruptionAllowedCondition, metav1.ConditionTrue, cond.Status)
}
} else {
if cond.Status != metav1.ConditionFalse {
t.Fatalf("Expected condition %q to have status %q, but was %q",
policy.DisruptionAllowedCondition, metav1.ConditionFalse, cond.Status)
}
}
}
func (ps *pdbStates) VerifyDisruptionAllowed(t *testing.T, key string, disruptionsAllowed int32) {
pdb := ps.Get(key)
if pdb.Status.DisruptionsAllowed != disruptionsAllowed {
debug.PrintStack()
t.Fatalf("PodDisruptionAllowed mismatch for PDB %q. Expected %v but got %v.", key, disruptionsAllowed, pdb.Status.DisruptionsAllowed)
}
}
func (ps *pdbStates) VerifyNoStatusError(t *testing.T, key string) {
pdb := ps.Get(key)
for _, condition := range pdb.Status.Conditions {
if strings.Contains(condition.Message, "found no controller ref") && condition.Reason == policy.SyncFailedReason {
t.Fatalf("PodDisruption Controller should not error when unmanaged pods are found but it failed for %q", key)
}
}
}
type disruptionController struct {
*DisruptionController
podStore cache.Store
pdbStore cache.Store
rcStore cache.Store
rsStore cache.Store
dStore cache.Store
ssStore cache.Store
coreClient *fake.Clientset
scaleClient *scalefake.FakeScaleClient
discoveryClient *discoveryfake.FakeDiscovery
informerFactory informers.SharedInformerFactory
}
var customGVK = schema.GroupVersionKind{
Group: "custom.k8s.io",
Version: "v1",
Kind: "customresource",
}
func newFakeDisruptionController() (*disruptionController, *pdbStates) {
return newFakeDisruptionControllerWithTime(context.TODO(), time.Now())
}
func newFakeDisruptionControllerWithTime(ctx context.Context, now time.Time) (*disruptionController, *pdbStates) {
ps := &pdbStates{}
coreClient := fake.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactory(coreClient, controller.NoResyncPeriodFunc())
scheme := runtime.NewScheme()
scheme.AddKnownTypeWithName(customGVK, &v1.Service{})
fakeScaleClient := &scalefake.FakeScaleClient{}
fakeDiscovery := &discoveryfake.FakeDiscovery{
Fake: &core.Fake{},
}
fakeClock := clocktesting.NewFakeClock(now)
dc := NewDisruptionControllerInternal(
informerFactory.Core().V1().Pods(),
informerFactory.Policy().V1().PodDisruptionBudgets(),
informerFactory.Core().V1().ReplicationControllers(),
informerFactory.Apps().V1().ReplicaSets(),
informerFactory.Apps().V1().Deployments(),
informerFactory.Apps().V1().StatefulSets(),
coreClient,
testrestmapper.TestOnlyStaticRESTMapper(scheme),
fakeScaleClient,
fakeDiscovery,
fakeClock,
stalePodDisruptionTimeout,
)
dc.getUpdater = func() updater { return ps.Set }
dc.podListerSynced = alwaysReady
dc.pdbListerSynced = alwaysReady
dc.rcListerSynced = alwaysReady
dc.rsListerSynced = alwaysReady
dc.dListerSynced = alwaysReady
dc.ssListerSynced = alwaysReady
dc.recorder = record.NewFakeRecorder(100)
informerFactory.Start(ctx.Done())
informerFactory.WaitForCacheSync(ctx.Done())
return &disruptionController{
dc,
informerFactory.Core().V1().Pods().Informer().GetStore(),
informerFactory.Policy().V1().PodDisruptionBudgets().Informer().GetStore(),
informerFactory.Core().V1().ReplicationControllers().Informer().GetStore(),
informerFactory.Apps().V1().ReplicaSets().Informer().GetStore(),
informerFactory.Apps().V1().Deployments().Informer().GetStore(),
informerFactory.Apps().V1().StatefulSets().Informer().GetStore(),
coreClient,
fakeScaleClient,
fakeDiscovery,
informerFactory,
}, ps
}
func fooBar() map[string]string {
return map[string]string{"foo": "bar"}
}
func newSel(labels map[string]string) *metav1.LabelSelector {
return &metav1.LabelSelector{MatchLabels: labels}
}
func newSelFooBar() *metav1.LabelSelector {
return newSel(map[string]string{"foo": "bar"})
}
func newMinAvailablePodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {
pdb := &policy.PodDisruptionBudget{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Name: "foobar",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "18",
},
Spec: policy.PodDisruptionBudgetSpec{
MinAvailable: &minAvailable,
Selector: newSelFooBar(),
},
}
pdbName, err := controller.KeyFunc(pdb)
if err != nil {
t.Fatalf("Unexpected error naming pdb %q: %v", pdb.Name, err)
}
return pdb, pdbName
}
func newMaxUnavailablePodDisruptionBudget(t *testing.T, maxUnavailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {
pdb := &policy.PodDisruptionBudget{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Name: "foobar",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "18",
},
Spec: policy.PodDisruptionBudgetSpec{
MaxUnavailable: &maxUnavailable,
Selector: newSelFooBar(),
},
}
pdbName, err := controller.KeyFunc(pdb)
if err != nil {
t.Fatalf("Unexpected error naming pdb %q: %v", pdb.Name, err)
}
return pdb, pdbName
}
func updatePodOwnerToRc(t *testing.T, pod *v1.Pod, rc *v1.ReplicationController) {
var controllerReference metav1.OwnerReference
var trueVar = true
controllerReference = metav1.OwnerReference{UID: rc.UID, APIVersion: controllerKindRC.GroupVersion().String(), Kind: controllerKindRC.Kind, Name: rc.Name, Controller: &trueVar}
pod.OwnerReferences = append(pod.OwnerReferences, controllerReference)
}
func updatePodOwnerToRs(t *testing.T, pod *v1.Pod, rs *apps.ReplicaSet) {
var controllerReference metav1.OwnerReference
var trueVar = true
controllerReference = metav1.OwnerReference{UID: rs.UID, APIVersion: controllerKindRS.GroupVersion().String(), Kind: controllerKindRS.Kind, Name: rs.Name, Controller: &trueVar}
pod.OwnerReferences = append(pod.OwnerReferences, controllerReference)
}
func updatePodOwnerToSs(t *testing.T, pod *v1.Pod, ss *apps.StatefulSet) {
var controllerReference metav1.OwnerReference
var trueVar = true
controllerReference = metav1.OwnerReference{UID: ss.UID, APIVersion: controllerKindSS.GroupVersion().String(), Kind: controllerKindSS.Kind, Name: ss.Name, Controller: &trueVar}
pod.OwnerReferences = append(pod.OwnerReferences, controllerReference)
}
func newPod(t *testing.T, name string) (*v1.Pod, string) {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Annotations: make(map[string]string),
Name: name,
Namespace: metav1.NamespaceDefault,
ResourceVersion: "18",
Labels: fooBar(),
},
Spec: v1.PodSpec{},
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{Type: v1.PodReady, Status: v1.ConditionTrue},
},
},
}
podName, err := controller.KeyFunc(pod)
if err != nil {
t.Fatalf("Unexpected error naming pod %q: %v", pod.Name, err)
}
return pod, podName
}
func newReplicationController(t *testing.T, size int32) (*v1.ReplicationController, string) {
rc := &v1.ReplicationController{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Name: "foobar",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "18",
Labels: fooBar(),
},
Spec: v1.ReplicationControllerSpec{
Replicas: &size,
Selector: fooBar(),
},
}
rcName, err := controller.KeyFunc(rc)
if err != nil {
t.Fatalf("Unexpected error naming RC %q", rc.Name)
}
return rc, rcName
}
func newDeployment(t *testing.T, size int32) (*apps.Deployment, string) {
d := &apps.Deployment{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Name: "foobar",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "18",
Labels: fooBar(),
},
Spec: apps.DeploymentSpec{
Replicas: &size,
Selector: newSelFooBar(),
},
}
dName, err := controller.KeyFunc(d)
if err != nil {
t.Fatalf("Unexpected error naming Deployment %q: %v", d.Name, err)
}
return d, dName
}
func newReplicaSet(t *testing.T, size int32) (*apps.ReplicaSet, string) {
rs := &apps.ReplicaSet{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Name: "foobar",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "18",
Labels: fooBar(),
},
Spec: apps.ReplicaSetSpec{
Replicas: &size,
Selector: newSelFooBar(),
},
}
rsName, err := controller.KeyFunc(rs)
if err != nil {
t.Fatalf("Unexpected error naming ReplicaSet %q: %v", rs.Name, err)
}
return rs, rsName
}
func newStatefulSet(t *testing.T, size int32) (*apps.StatefulSet, string) {
ss := &apps.StatefulSet{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Name: "foobar",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "18",
Labels: fooBar(),
},
Spec: apps.StatefulSetSpec{
Replicas: &size,
Selector: newSelFooBar(),
},
}
ssName, err := controller.KeyFunc(ss)
if err != nil {
t.Fatalf("Unexpected error naming StatefulSet %q: %v", ss.Name, err)
}
return ss, ssName
}
func update(t *testing.T, store cache.Store, obj interface{}) {
if err := store.Update(obj); err != nil {
t.Fatalf("Could not add %+v to %+v: %v", obj, store, err)
}
}
func add(t *testing.T, store cache.Store, obj interface{}) {
if err := store.Add(obj); err != nil {
t.Fatalf("Could not add %+v to %+v: %v", obj, store, err)
}
}
// Create one with no selector. Verify it matches all pods
func TestNoSelector(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt32(3))
pdb.Spec.Selector = &metav1.LabelSelector{}
pod, _ := newPod(t, "yo-yo-yo")
add(t, dc.pdbStore, pdb)
ctx := context.TODO()
dc.sync(ctx, pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, 0, 3, 0, map[string]metav1.Time{})
add(t, dc.podStore, pod)
dc.sync(ctx, pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, 1, 3, 1, map[string]metav1.Time{})
}
// Verify that available/expected counts go up as we add pods, then verify that
// available count goes down when we make a pod unavailable.
func TestUnavailable(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt32(3))
ctx := context.TODO()
add(t, dc.pdbStore, pdb)
dc.sync(ctx, pdbName)
// Add three pods, verifying that the counts go up at each step.
pods := []*v1.Pod{}
for i := int32(0); i < 4; i++ {
ps.VerifyPdbStatus(t, pdbName, 0, i, 3, i, map[string]metav1.Time{})
pod, _ := newPod(t, fmt.Sprintf("yo-yo-yo %d", i))
pods = append(pods, pod)
add(t, dc.podStore, pod)
dc.sync(ctx, pdbName)
}
ps.VerifyPdbStatus(t, pdbName, 1, 4, 3, 4, map[string]metav1.Time{})
// Now set one pod as unavailable
pods[0].Status.Conditions = []v1.PodCondition{}
update(t, dc.podStore, pods[0])
dc.sync(ctx, pdbName)
// Verify expected update
ps.VerifyPdbStatus(t, pdbName, 0, 3, 3, 4, map[string]metav1.Time{})
}
// Verify that an integer MaxUnavailable won't
// allow a disruption for pods with no controller.
func TestIntegerMaxUnavailable(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt32(1))
add(t, dc.pdbStore, pdb)
ctx := context.TODO()
dc.sync(ctx, pdbName)
// This verifies that when a PDB has 0 pods, disruptions are not allowed.
ps.VerifyDisruptionAllowed(t, pdbName, 0)
pod, _ := newPod(t, "naked")
add(t, dc.podStore, pod)
dc.sync(ctx, pdbName)
ps.VerifyDisruptionAllowed(t, pdbName, 0)
verifyEventEmitted(t, dc, "UnmanagedPods")
}
// Verify that an integer MaxUnavailable will recompute allowed disruptions when the scale of
// the selected pod's controller is modified.
func TestIntegerMaxUnavailableWithScaling(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt32(2))
add(t, dc.pdbStore, pdb)
rs, _ := newReplicaSet(t, 7)
add(t, dc.rsStore, rs)
pod, _ := newPod(t, "pod")
updatePodOwnerToRs(t, pod, rs)
ctx := context.TODO()
add(t, dc.podStore, pod)
dc.sync(ctx, pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, 1, 5, 7, map[string]metav1.Time{})
// Update scale of ReplicaSet and check PDB
rs.Spec.Replicas = pointer.Int32(5)
update(t, dc.rsStore, rs)
dc.sync(ctx, pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, 1, 3, 5, map[string]metav1.Time{})
}
// Verify that an percentage MaxUnavailable will recompute allowed disruptions when the scale of
// the selected pod's controller is modified.
func TestPercentageMaxUnavailableWithScaling(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromString("30%"))
add(t, dc.pdbStore, pdb)
rs, _ := newReplicaSet(t, 7)
add(t, dc.rsStore, rs)
pod, _ := newPod(t, "pod")
updatePodOwnerToRs(t, pod, rs)
add(t, dc.podStore, pod)
ctx := context.TODO()
dc.sync(ctx, pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, 1, 4, 7, map[string]metav1.Time{})
// Update scale of ReplicaSet and check PDB
rs.Spec.Replicas = pointer.Int32(3)
update(t, dc.rsStore, rs)
dc.sync(ctx, pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, 1, 2, 3, map[string]metav1.Time{})
}
// Create a pod with no controller, and verify that a PDB with a percentage
// specified won't allow a disruption.
func TestNakedPod(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
add(t, dc.pdbStore, pdb)
ctx := context.TODO()
dc.sync(ctx, pdbName)
// This verifies that when a PDB has 0 pods, disruptions are not allowed.
ps.VerifyDisruptionAllowed(t, pdbName, 0)
pod, _ := newPod(t, "naked")
add(t, dc.podStore, pod)
dc.sync(ctx, pdbName)
ps.VerifyDisruptionAllowed(t, pdbName, 0)
verifyEventEmitted(t, dc, "UnmanagedPods")
}
// Create a pod with unsupported controller, and verify that a PDB with a percentage
// specified won't allow a disruption.
func TestUnsupportedControllerPod(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
add(t, dc.pdbStore, pdb)
ctx := context.TODO()
dc.sync(ctx, pdbName)
// This verifies that when a PDB has 0 pods, disruptions are not allowed.
ps.VerifyDisruptionAllowed(t, pdbName, 0)
pod, _ := newPod(t, "naked")
isController := true
pod.OwnerReferences = append(pod.OwnerReferences, metav1.OwnerReference{
APIVersion: "apps.test.io/v1",
Kind: "TestWorkload",
Name: "fake-controller",
UID: "b7329742-8daa-493a-8881-6ca07139172b",
Controller: &isController,
})
add(t, dc.podStore, pod)
dc.sync(ctx, pdbName)
ps.VerifyDisruptionAllowed(t, pdbName, 0)
verifyEventEmitted(t, dc, "CalculateExpectedPodCountFailed")
}
// Verify that disruption controller is not erroring when unmanaged pods are found
func TestStatusForUnmanagedPod(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
add(t, dc.pdbStore, pdb)
ctx := context.TODO()
dc.sync(ctx, pdbName)
// This verifies that when a PDB has 0 pods, disruptions are not allowed.
ps.VerifyDisruptionAllowed(t, pdbName, 0)
pod, _ := newPod(t, "unmanaged")
add(t, dc.podStore, pod)
dc.sync(ctx, pdbName)
ps.VerifyNoStatusError(t, pdbName)
verifyEventEmitted(t, dc, "UnmanagedPods")
}
// Check if the unmanaged pods are correctly collected or not
func TestTotalUnmanagedPods(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
add(t, dc.pdbStore, pdb)
ctx := context.TODO()
dc.sync(ctx, pdbName)
// This verifies that when a PDB has 0 pods, disruptions are not allowed.
ps.VerifyDisruptionAllowed(t, pdbName, 0)
pod, _ := newPod(t, "unmanaged")
add(t, dc.podStore, pod)
dc.sync(ctx, pdbName)
var pods []*v1.Pod
pods = append(pods, pod)
_, unmanagedPods, _ := dc.getExpectedScale(ctx, pdb, pods)
if len(unmanagedPods) != 1 {
t.Fatalf("expected one pod to be unmanaged pod but found %d", len(unmanagedPods))
}
ps.VerifyNoStatusError(t, pdbName)
verifyEventEmitted(t, dc, "UnmanagedPods")
}
// Verify that we count the scale of a ReplicaSet even when it has no Deployment.
func TestReplicaSet(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("20%"))
add(t, dc.pdbStore, pdb)
rs, _ := newReplicaSet(t, 10)
add(t, dc.rsStore, rs)
ctx := context.TODO()
pod, _ := newPod(t, "pod")
updatePodOwnerToRs(t, pod, rs)
add(t, dc.podStore, pod)
dc.sync(ctx, pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, 1, 2, 10, map[string]metav1.Time{})
}
func TestScaleResource(t *testing.T) {
customResourceUID := uuid.NewUUID()
replicas := int32(10)
pods := int32(4)
maxUnavailable := int32(5)
dc, ps := newFakeDisruptionController()
dc.scaleClient.AddReactor("get", "customresources", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &autoscalingapi.Scale{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
UID: customResourceUID,
},
Spec: autoscalingapi.ScaleSpec{
Replicas: replicas,
},
}
return true, obj, nil
})
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt32(maxUnavailable))
add(t, dc.pdbStore, pdb)
trueVal := true
for i := 0; i < int(pods); i++ {
pod, _ := newPod(t, fmt.Sprintf("pod-%d", i))
pod.SetOwnerReferences([]metav1.OwnerReference{
{
Kind: customGVK.Kind,
APIVersion: customGVK.GroupVersion().String(),
Controller: &trueVal,
UID: customResourceUID,
},
})
add(t, dc.podStore, pod)
}
ctx := context.TODO()
dc.sync(ctx, pdbName)
disruptionsAllowed := int32(0)
if replicas-pods < maxUnavailable {
disruptionsAllowed = maxUnavailable - (replicas - pods)
}
ps.VerifyPdbStatus(t, pdbName, disruptionsAllowed, pods, replicas-maxUnavailable, replicas, map[string]metav1.Time{})
}
func TestScaleFinderNoResource(t *testing.T) {
resourceName := "customresources"
testCases := map[string]struct {
apiResources []metav1.APIResource
expectError bool
}{
"resource implements scale": {
apiResources: []metav1.APIResource{
{
Kind: customGVK.Kind,
Name: resourceName + "/status",
},
{
Kind: "Scale",
Group: autoscalingapi.GroupName,
Version: "v1",
Name: resourceName + "/scale",
},
{
Kind: customGVK.Kind,
Name: resourceName,
},
},
expectError: false,
},
"resource implements unsupported data format for scale subresource": {
apiResources: []metav1.APIResource{
{
Kind: customGVK.Kind,
Name: resourceName,
},
{
Kind: customGVK.Kind,
Name: resourceName + "/scale",
},
},
expectError: true,
},
"resource does not implement scale": {
apiResources: []metav1.APIResource{
{
Kind: customGVK.Kind,
Name: resourceName,
},
},
expectError: true,
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
customResourceUID := uuid.NewUUID()
dc, _ := newFakeDisruptionController()
dc.scaleClient.AddReactor("get", resourceName, func(action core.Action) (handled bool, ret runtime.Object, err error) {
gr := schema.GroupResource{
Group: customGVK.Group,
Resource: resourceName,
}
return true, nil, errors.NewNotFound(gr, "name")
})
dc.discoveryClient.Resources = []*metav1.APIResourceList{
{
GroupVersion: customGVK.GroupVersion().String(),
APIResources: tc.apiResources,
},
}
trueVal := true
ownerRef := &metav1.OwnerReference{
Kind: customGVK.Kind,
APIVersion: customGVK.GroupVersion().String(),
Controller: &trueVal,
UID: customResourceUID,
}
_, err := dc.getScaleController(context.TODO(), ownerRef, "default")
if tc.expectError && err == nil {
t.Error("expected error, but didn't get one")
}
if !tc.expectError && err != nil {
t.Errorf("did not expect error, but got %v", err)
}
})
}
}
// Verify that multiple controllers doesn't allow the PDB to be set true.
func TestMultipleControllers(t *testing.T) {
const podCount = 2
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("1%"))
add(t, dc.pdbStore, pdb)
pods := []*v1.Pod{}
for i := 0; i < podCount; i++ {
pod, _ := newPod(t, fmt.Sprintf("pod %d", i))
pods = append(pods, pod)
add(t, dc.podStore, pod)
}
ctx := context.TODO()
dc.sync(ctx, pdbName)
// No controllers yet => no disruption allowed
ps.VerifyDisruptionAllowed(t, pdbName, 0)
rc, _ := newReplicationController(t, 1)
rc.Name = "rc 1"
for i := 0; i < podCount; i++ {
updatePodOwnerToRc(t, pods[i], rc)
}
add(t, dc.rcStore, rc)
dc.sync(ctx, pdbName)
// One RC and 200%>1% healthy => disruption allowed
ps.VerifyDisruptionAllowed(t, pdbName, 1)
rc, _ = newReplicationController(t, 1)
rc.Name = "rc 2"
for i := 0; i < podCount; i++ {
updatePodOwnerToRc(t, pods[i], rc)
}
add(t, dc.rcStore, rc)
dc.sync(ctx, pdbName)
// 100%>1% healthy BUT two RCs => no disruption allowed
// TODO: Find out if this assert is still needed
//ps.VerifyDisruptionAllowed(t, pdbName, 0)
}
func TestReplicationController(t *testing.T) {
// The budget in this test matches foo=bar, but the RC and its pods match
// {foo=bar, baz=quux}. Later, when we add a rogue pod with only a foo=bar
// label, it will match the budget but have no controllers, which should
// trigger the controller to set PodDisruptionAllowed to false.
labels := map[string]string{
"foo": "bar",
"baz": "quux",
}
dc, ps := newFakeDisruptionController()
// 34% should round up to 2
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("34%"))
add(t, dc.pdbStore, pdb)
rc, _ := newReplicationController(t, 3)
rc.Spec.Selector = labels
add(t, dc.rcStore, rc)
ctx := context.TODO()
dc.sync(ctx, pdbName)
// It starts out at 0 expected because, with no pods, the PDB doesn't know
// about the RC. This is a known bug. TODO(mml): file issue
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
for i := int32(0); i < 3; i++ {
pod, _ := newPod(t, fmt.Sprintf("foobar %d", i))
updatePodOwnerToRc(t, pod, rc)
pod.Labels = labels
add(t, dc.podStore, pod)
dc.sync(ctx, pdbName)
if i < 2 {
ps.VerifyPdbStatus(t, pdbName, 0, i+1, 2, 3, map[string]metav1.Time{})
} else {
ps.VerifyPdbStatus(t, pdbName, 1, 3, 2, 3, map[string]metav1.Time{})
}
}
rogue, _ := newPod(t, "rogue")
add(t, dc.podStore, rogue)
dc.sync(ctx, pdbName)
ps.VerifyDisruptionAllowed(t, pdbName, 2)
}
func TestStatefulSetController(t *testing.T) {
labels := map[string]string{
"foo": "bar",
"baz": "quux",
}
dc, ps := newFakeDisruptionController()
// 34% should round up to 2
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("34%"))
add(t, dc.pdbStore, pdb)
ss, _ := newStatefulSet(t, 3)
add(t, dc.ssStore, ss)
ctx := context.TODO()
dc.sync(ctx, pdbName)
// It starts out at 0 expected because, with no pods, the PDB doesn't know
// about the SS. This is a known bug. TODO(mml): file issue
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
for i := int32(0); i < 3; i++ {
pod, _ := newPod(t, fmt.Sprintf("foobar %d", i))
updatePodOwnerToSs(t, pod, ss)
pod.Labels = labels
add(t, dc.podStore, pod)
dc.sync(ctx, pdbName)
if i < 2 {
ps.VerifyPdbStatus(t, pdbName, 0, i+1, 2, 3, map[string]metav1.Time{})
} else {
ps.VerifyPdbStatus(t, pdbName, 1, 3, 2, 3, map[string]metav1.Time{})
}
}
}
func TestTwoControllers(t *testing.T) {
// Most of this test is in verifying intermediate cases as we define the
// three controllers and create the pods.
rcLabels := map[string]string{
"foo": "bar",
"baz": "quux",
}
dLabels := map[string]string{
"foo": "bar",
"baz": "quuux",
}
dc, ps := newFakeDisruptionController()
// These constants are related, but I avoid calculating the correct values in
// code. If you update a parameter here, recalculate the correct values for
// all of them. Further down in the test, we use these to control loops, and
// that level of logic is enough complexity for me.
const collectionSize int32 = 11 // How big each collection is
const minimumOne int32 = 4 // integer minimum with one controller
const minimumTwo int32 = 7 // integer minimum with two controllers
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
add(t, dc.pdbStore, pdb)
rc, _ := newReplicationController(t, collectionSize)
rc.Spec.Selector = rcLabels
add(t, dc.rcStore, rc)
ctx := context.TODO()
dc.sync(ctx, pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
pods := []*v1.Pod{}
unavailablePods := collectionSize - minimumOne - 1
for i := int32(1); i <= collectionSize; i++ {
pod, _ := newPod(t, fmt.Sprintf("quux %d", i))
updatePodOwnerToRc(t, pod, rc)
pods = append(pods, pod)
pod.Labels = rcLabels
if i <= unavailablePods {
pod.Status.Conditions = []v1.PodCondition{}
}
add(t, dc.podStore, pod)
dc.sync(ctx, pdbName)
if i <= unavailablePods {
ps.VerifyPdbStatus(t, pdbName, 0, 0, minimumOne, collectionSize, map[string]metav1.Time{})
} else if i-unavailablePods <= minimumOne {
ps.VerifyPdbStatus(t, pdbName, 0, i-unavailablePods, minimumOne, collectionSize, map[string]metav1.Time{})
} else {
ps.VerifyPdbStatus(t, pdbName, 1, i-unavailablePods, minimumOne, collectionSize, map[string]metav1.Time{})
}
}
d, _ := newDeployment(t, collectionSize)
d.Spec.Selector = newSel(dLabels)
add(t, dc.dStore, d)
dc.sync(ctx, pdbName)
ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize, map[string]metav1.Time{})
rs, _ := newReplicaSet(t, collectionSize)
rs.Spec.Selector = newSel(dLabels)
rs.Labels = dLabels
add(t, dc.rsStore, rs)
dc.sync(ctx, pdbName)
ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize, map[string]metav1.Time{})
// By the end of this loop, the number of ready pods should be N+2 (hence minimumTwo+2).
unavailablePods = 2*collectionSize - (minimumTwo + 2) - unavailablePods
for i := int32(1); i <= collectionSize; i++ {
pod, _ := newPod(t, fmt.Sprintf("quuux %d", i))
updatePodOwnerToRs(t, pod, rs)
pods = append(pods, pod)
pod.Labels = dLabels
if i <= unavailablePods {
pod.Status.Conditions = []v1.PodCondition{}
}
add(t, dc.podStore, pod)
dc.sync(ctx, pdbName)
if i <= unavailablePods {
ps.VerifyPdbStatus(t, pdbName, 0, minimumOne+1, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
} else if i-unavailablePods <= minimumTwo-(minimumOne+1) {
ps.VerifyPdbStatus(t, pdbName, 0, (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize, map[string]metav1.Time{})
} else {
ps.VerifyPdbStatus(t, pdbName, i-unavailablePods-(minimumTwo-(minimumOne+1)),
(minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize, map[string]metav1.Time{})
}
}
// Now we verify we can bring down 1 pod and a disruption is still permitted,
// but if we bring down two, it's not. Then we make the pod ready again and
// verify that a disruption is permitted again.
ps.VerifyPdbStatus(t, pdbName, 2, 2+minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
pods[collectionSize-1].Status.Conditions = []v1.PodCondition{}
update(t, dc.podStore, pods[collectionSize-1])
dc.sync(ctx, pdbName)
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
pods[collectionSize-2].Status.Conditions = []v1.PodCondition{}
update(t, dc.podStore, pods[collectionSize-2])
dc.sync(ctx, pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
pods[collectionSize-1].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
update(t, dc.podStore, pods[collectionSize-1])
dc.sync(ctx, pdbName)
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
}
// Test pdb doesn't exist
func TestPDBNotExist(t *testing.T) {
dc, _ := newFakeDisruptionController()
pdb, _ := newMinAvailablePodDisruptionBudget(t, intstr.FromString("67%"))
add(t, dc.pdbStore, pdb)
if err := dc.sync(context.TODO(), "notExist"); err != nil {
t.Errorf("Unexpected error: %v, expect nil", err)
}
}
func TestUpdateDisruptedPods(t *testing.T) {
dc, ps := newFakeDisruptionController()
dc.recheckQueue = workqueue.NewNamedDelayingQueue("pdb_queue")
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt32(1))
currentTime := dc.clock.Now()
pdb.Status.DisruptedPods = map[string]metav1.Time{
"p1": {Time: currentTime}, // Should be removed, pod deletion started.
"p2": {Time: currentTime.Add(-3 * time.Minute)}, // Should be removed, expired.
"p3": {Time: currentTime.Add(-time.Minute)}, // Should remain, pod untouched.
"notthere": {Time: currentTime}, // Should be removed, pod deleted.
}
add(t, dc.pdbStore, pdb)
pod1, _ := newPod(t, "p1")
pod1.DeletionTimestamp = &metav1.Time{Time: dc.clock.Now()}
pod2, _ := newPod(t, "p2")
pod3, _ := newPod(t, "p3")
add(t, dc.podStore, pod1)
add(t, dc.podStore, pod2)
add(t, dc.podStore, pod3)
dc.sync(context.TODO(), pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, 1, 1, 3, map[string]metav1.Time{"p3": {Time: currentTime.Add(-time.Minute)}})
}
func TestBasicFinderFunctions(t *testing.T) {
dc, _ := newFakeDisruptionController()
rs, _ := newReplicaSet(t, 10)
add(t, dc.rsStore, rs)
rc, _ := newReplicationController(t, 12)
add(t, dc.rcStore, rc)
ss, _ := newStatefulSet(t, 14)
add(t, dc.ssStore, ss)
testCases := map[string]struct {
finderFunc podControllerFinder
apiVersion string
kind string
name string
uid types.UID
findsScale bool
expectedScale int32
}{
"replicaset controller with apps group": {
finderFunc: dc.getPodReplicaSet,
apiVersion: "apps/v1",
kind: controllerKindRS.Kind,
name: rs.Name,
uid: rs.UID,
findsScale: true,
expectedScale: 10,
},
"replicaset controller with invalid group": {
finderFunc: dc.getPodReplicaSet,
apiVersion: "invalid/v1",
kind: controllerKindRS.Kind,
name: rs.Name,
uid: rs.UID,
findsScale: false,
},
"replicationcontroller with empty group": {
finderFunc: dc.getPodReplicationController,
apiVersion: "/v1",
kind: controllerKindRC.Kind,
name: rc.Name,
uid: rc.UID,
findsScale: true,
expectedScale: 12,
},
"replicationcontroller with invalid group": {
finderFunc: dc.getPodReplicationController,
apiVersion: "apps/v1",
kind: controllerKindRC.Kind,
name: rc.Name,
uid: rc.UID,
findsScale: false,
},
"statefulset controller with extensions group": {
finderFunc: dc.getPodStatefulSet,
apiVersion: "apps/v1",
kind: controllerKindSS.Kind,
name: ss.Name,
uid: ss.UID,
findsScale: true,
expectedScale: 14,
},
"statefulset controller with invalid kind": {
finderFunc: dc.getPodStatefulSet,
apiVersion: "apps/v1",
kind: controllerKindRS.Kind,
name: ss.Name,
uid: ss.UID,
findsScale: false,
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
controllerRef := &metav1.OwnerReference{
APIVersion: tc.apiVersion,
Kind: tc.kind,
Name: tc.name,
UID: tc.uid,
}
controllerAndScale, _ := tc.finderFunc(context.TODO(), controllerRef, metav1.NamespaceDefault)
if controllerAndScale == nil {
if tc.findsScale {
t.Error("Expected scale, but got nil")
}
return
}
if got, want := controllerAndScale.scale, tc.expectedScale; got != want {
t.Errorf("Expected scale %d, but got %d", want, got)
}
if got, want := controllerAndScale.UID, tc.uid; got != want {
t.Errorf("Expected uid %s, but got %s", want, got)
}
})
}
}
func TestDeploymentFinderFunction(t *testing.T) {
labels := map[string]string{
"foo": "bar",
}
testCases := map[string]struct {
rsApiVersion string
rsKind string
depApiVersion string
depKind string
findsScale bool
expectedScale int32
}{
"happy path": {
rsApiVersion: "apps/v1",
rsKind: controllerKindRS.Kind,
depApiVersion: "extensions/v1",
depKind: controllerKindDep.Kind,
findsScale: true,
expectedScale: 10,
},
"invalid rs apiVersion": {
rsApiVersion: "invalid/v1",
rsKind: controllerKindRS.Kind,
depApiVersion: "apps/v1",
depKind: controllerKindDep.Kind,
findsScale: false,
},
"invalid rs kind": {
rsApiVersion: "apps/v1",
rsKind: "InvalidKind",
depApiVersion: "apps/v1",
depKind: controllerKindDep.Kind,
findsScale: false,
},
"invalid deployment apiVersion": {
rsApiVersion: "extensions/v1",
rsKind: controllerKindRS.Kind,
depApiVersion: "deployment/v1",
depKind: controllerKindDep.Kind,
findsScale: false,
},
"invalid deployment kind": {
rsApiVersion: "apps/v1",
rsKind: controllerKindRS.Kind,
depApiVersion: "extensions/v1",
depKind: "InvalidKind",
findsScale: false,
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
dc, _ := newFakeDisruptionController()
dep, _ := newDeployment(t, 10)
dep.Spec.Selector = newSel(labels)
add(t, dc.dStore, dep)
rs, _ := newReplicaSet(t, 5)
rs.Labels = labels
trueVal := true
rs.OwnerReferences = append(rs.OwnerReferences, metav1.OwnerReference{
APIVersion: tc.depApiVersion,
Kind: tc.depKind,
Name: dep.Name,
UID: dep.UID,
Controller: &trueVal,
})
add(t, dc.rsStore, rs)
controllerRef := &metav1.OwnerReference{
APIVersion: tc.rsApiVersion,
Kind: tc.rsKind,
Name: rs.Name,
UID: rs.UID,
}
controllerAndScale, _ := dc.getPodDeployment(context.TODO(), controllerRef, metav1.NamespaceDefault)
if controllerAndScale == nil {
if tc.findsScale {
t.Error("Expected scale, but got nil")
}
return
}
if got, want := controllerAndScale.scale, tc.expectedScale; got != want {
t.Errorf("Expected scale %d, but got %d", want, got)
}
if got, want := controllerAndScale.UID, dep.UID; got != want {
t.Errorf("Expected uid %s, but got %s", want, got)
}
})
}
}
// This test checks that the disruption controller does not write stale data to
// a PDB status during race conditions with the eviction handler. Specifically,
// failed updates due to ResourceVersion conflict should not cause a stale value
// of DisruptionsAllowed to be written.
//
// In this test, DisruptionsAllowed starts at 2.
// (A) We will delete 1 pod and trigger DisruptionController to set
// DisruptionsAllowed to 1.
// (B) As the DisruptionController attempts this write, we will evict the
// remaining 2 pods and update DisruptionsAllowed to 0. (The real eviction
// handler would allow this because it still sees DisruptionsAllowed=2.)
// (C) If the DisruptionController writes DisruptionsAllowed=1 despite the
// resource conflict error, then there is a bug.
func TestUpdatePDBStatusRetries(t *testing.T) {
dc, _ := newFakeDisruptionController()
// Inject the production code over our fake impl
dc.getUpdater = func() updater { return dc.writePdbStatus }
ctx := context.TODO()
// Create a PDB and 3 pods that match it.
pdb, pdbKey := newMinAvailablePodDisruptionBudget(t, intstr.FromInt32(1))
pdb, err := dc.coreClient.PolicyV1().PodDisruptionBudgets(pdb.Namespace).Create(ctx, pdb, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Failed to create PDB: %v", err)
}
podNames := []string{"moe", "larry", "curly"}
for _, name := range podNames {
pod, _ := newPod(t, name)
_, err := dc.coreClient.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
}
// Block until the fake clientset writes are observable in the informer caches.
// FUN FACT: This guarantees that the informer caches have updated, but it does
// not guarantee that informer event handlers have completed. Fortunately,
// DisruptionController does most of its logic by reading from informer
// listers, so this guarantee is sufficient.
if err := waitForCacheCount(dc.pdbStore, 1); err != nil {
t.Fatalf("Failed to verify PDB in informer cache: %v", err)
}
if err := waitForCacheCount(dc.podStore, len(podNames)); err != nil {
t.Fatalf("Failed to verify pods in informer cache: %v", err)
}
// Sync DisruptionController once to update PDB status.
if err := dc.sync(ctx, pdbKey); err != nil {
t.Fatalf("Failed initial sync: %v", err)
}
// Evict simulates the visible effects of eviction in our fake client.
evict := func(podNames ...string) {
// These GVRs are copied from the generated fake code because they are not exported.
var (
podsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
poddisruptionbudgetsResource = schema.GroupVersionResource{Group: "policy", Version: "v1", Resource: "poddisruptionbudgets"}
)
// Bypass the coreClient.Fake and write directly to the ObjectTracker, because
// this helper will be called while the Fake is holding a lock.
obj, err := dc.coreClient.Tracker().Get(poddisruptionbudgetsResource, pdb.Namespace, pdb.Name)
if err != nil {
t.Fatalf("Failed to get PDB: %v", err)
}
updatedPDB := obj.(*policy.PodDisruptionBudget)
// Each eviction,
// - decrements DisruptionsAllowed
// - adds the pod to DisruptedPods
// - deletes the pod
updatedPDB.Status.DisruptionsAllowed -= int32(len(podNames))
updatedPDB.Status.DisruptedPods = make(map[string]metav1.Time)
for _, name := range podNames {
updatedPDB.Status.DisruptedPods[name] = metav1.NewTime(dc.clock.Now())
}
if err := dc.coreClient.Tracker().Update(poddisruptionbudgetsResource, updatedPDB, updatedPDB.Namespace); err != nil {
t.Fatalf("Eviction (PDB update) failed: %v", err)
}
for _, name := range podNames {
if err := dc.coreClient.Tracker().Delete(podsResource, "default", name); err != nil {
t.Fatalf("Eviction (pod delete) failed: %v", err)
}
}
}
// The fake kube client does not update ResourceVersion or check for conflicts.
// Instead, we add a reactor that returns a conflict error on the first PDB
// update and success after that.
var failOnce sync.Once
dc.coreClient.Fake.PrependReactor("update", "poddisruptionbudgets", func(a core.Action) (handled bool, obj runtime.Object, err error) {
failOnce.Do(func() {
// (B) Evict two pods and fail this update.
evict(podNames[1], podNames[2])
handled = true
err = errors.NewConflict(a.GetResource().GroupResource(), pdb.Name, fmt.Errorf("conflict"))
})
return handled, obj, err
})
// (A) Delete one pod
if err := dc.coreClient.CoreV1().Pods("default").Delete(ctx, podNames[0], metav1.DeleteOptions{}); err != nil {
t.Fatal(err)
}
if err := waitForCacheCount(dc.podStore, len(podNames)-1); err != nil {
t.Fatalf("Failed to verify pods in informer cache: %v", err)
}
// The sync() function should either write a correct status which takes the
// evictions into account, or re-queue the PDB for another sync (by returning
// an error)
if err := dc.sync(ctx, pdbKey); err != nil {
t.Logf("sync() returned with error: %v", err)
} else {
t.Logf("sync() returned with no error")
}
// (C) Whether or not sync() returned an error, the PDB status should reflect
// the evictions that took place.
finalPDB, err := dc.coreClient.PolicyV1().PodDisruptionBudgets("default").Get(ctx, pdb.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to get PDB: %v", err)
}
if expected, actual := int32(0), finalPDB.Status.DisruptionsAllowed; expected != actual {
t.Errorf("DisruptionsAllowed should be %d, got %d", expected, actual)
}
}
func TestInvalidSelectors(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
testCases := map[string]struct {
labelSelector *metav1.LabelSelector
}{
"illegal value key": {
labelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"k8s.io/too/many/slashes": "value",
},
},
},
"illegal operator": {
labelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "foo",
Operator: metav1.LabelSelectorOperator("illegal"),
Values: []string{"bar"},
},
},
},
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt32(3))
pdb.Spec.Selector = tc.labelSelector
add(t, dc.pdbStore, pdb)
dc.sync(ctx, pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
})
}
}
func TestStalePodDisruption(t *testing.T) {
now := time.Now()
cases := map[string]struct {
pod *v1.Pod
timePassed time.Duration
wantConditions []v1.PodCondition
}{
"stale pod disruption": {
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: metav1.NamespaceDefault,
},
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
LastTransitionTime: metav1.Time{Time: now},
},
},
},
},
timePassed: 2*time.Minute + time.Second,
wantConditions: []v1.PodCondition{
{
Type: v1.DisruptionTarget,
Status: v1.ConditionFalse,
},
},
},
"pod disruption in progress": {
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: metav1.NamespaceDefault,
},
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
LastTransitionTime: metav1.Time{Time: now},
},
},
},
},
timePassed: 2*time.Minute - time.Second,
wantConditions: []v1.PodCondition{
{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
},
},
},
"pod disruption actuated": {
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: metav1.NamespaceDefault,
DeletionTimestamp: &metav1.Time{Time: now},
},
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
LastTransitionTime: metav1.Time{Time: now},
},
},
},
},
timePassed: 2*time.Minute + time.Second,
wantConditions: []v1.PodCondition{
{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
},
},
},
"no pod disruption": {
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: metav1.NamespaceDefault,
DeletionTimestamp: &metav1.Time{Time: now},
},
},
timePassed: 2*time.Minute + time.Second,
},
"pod disruption cleared": {
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: metav1.NamespaceDefault,
DeletionTimestamp: &metav1.Time{Time: now},
},
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{
Type: v1.DisruptionTarget,
Status: v1.ConditionFalse,
},
},
},
},
timePassed: 2*time.Minute + time.Second,
wantConditions: []v1.PodCondition{
{
Type: v1.DisruptionTarget,
Status: v1.ConditionFalse,
},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dc, _ := newFakeDisruptionControllerWithTime(ctx, now)
go dc.Run(ctx)
if _, err := dc.coreClient.CoreV1().Pods(tc.pod.Namespace).Create(ctx, tc.pod, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
dc.clock.Sleep(tc.timePassed)
if err := dc.informerFactory.Core().V1().Pods().Informer().GetIndexer().Add(tc.pod); err != nil {
t.Fatalf("Failed adding pod to indexer: %v", err)
}
diff := ""
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
pod, err := dc.kubeClient.CoreV1().Pods(tc.pod.Namespace).Get(ctx, tc.pod.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed getting updated pod: %v", err)
}
diff = cmp.Diff(tc.wantConditions, pod.Status.Conditions, cmpopts.IgnoreFields(v1.PodCondition{}, "LastTransitionTime"))
return diff == "", nil
}); err != nil {
t.Fatalf("Failed waiting for worker to sync: %v, (-want,+got):\n%s", err, diff)
}
})
}
}
// waitForCacheCount blocks until the given cache store has the desired number
// of items in it. This will return an error if the condition is not met after a
// 10 second timeout.
func waitForCacheCount(store cache.Store, n int) error {
return wait.Poll(10*time.Millisecond, 10*time.Second, func() (bool, error) {
return len(store.List()) == n, nil
})
}
func verifyEventEmitted(t *testing.T, dc *disruptionController, expectedEvent string) {
ticker := time.NewTicker(500 * time.Millisecond)
for {
select {
case e := <-dc.recorder.(*record.FakeRecorder).Events:
if strings.Contains(e, expectedEvent) {
return
}
case <-ticker.C:
t.Fatalf("Timed out: expected event not generated: %v", expectedEvent)
}
}
}
// TestMain adds klog flags to make debugging tests easier.
func TestMain(m *testing.M) {
klog.InitFlags(flag.CommandLine)
os.Exit(m.Run())
}
| pkg/controller/disruption/disruption_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00021962520258966833,
0.00017292411939706653,
0.0001612267951713875,
0.00017325289081782103,
0.000005709925062546972
] |
{
"id": 1,
"code_window": [
"\t// Start up a metrics server if requested\n",
"\tserveMetrics(s.Config.MetricsBindAddress, s.Config.Mode, s.Config.EnableProfiling, errCh)\n",
"\n",
"\t// Do platform-specific setup\n",
"\terr := s.platformSetup()\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\n",
"\tnoProxyName, err := labels.NewRequirement(apis.LabelServiceProxyName, selection.DoesNotExist, nil)\n",
"\tif err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "replace",
"edit_start_line_idx": 708
} | // Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
type DFA struct {
// atnStartState is the ATN state in which this was created
atnStartState DecisionState
decision int
// states is all the DFA states. Use Map to get the old state back; Set can only
// indicate whether it is there. Go maps implement key hash collisions and so on and are very
// good, but the DFAState is an object and can't be used directly as the key as it can in say JAva
// amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them
// to see if they really are the same object.
//
//
states *JStore[*DFAState, *ObjEqComparator[*DFAState]]
numstates int
s0 *DFAState
// precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa.
// True if the DFA is for a precedence decision and false otherwise.
precedenceDfa bool
}
func NewDFA(atnStartState DecisionState, decision int) *DFA {
dfa := &DFA{
atnStartState: atnStartState,
decision: decision,
states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst),
}
if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
dfa.precedenceDfa = true
dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false))
dfa.s0.isAcceptState = false
dfa.s0.requiresFullContext = false
}
return dfa
}
// getPrecedenceStartState gets the start state for the current precedence and
// returns the start state corresponding to the specified precedence if a start
// state exists for the specified precedence and nil otherwise. d must be a
// precedence DFA. See also isPrecedenceDfa.
func (d *DFA) getPrecedenceStartState(precedence int) *DFAState {
if !d.getPrecedenceDfa() {
panic("only precedence DFAs may contain a precedence start state")
}
// s0.edges is never nil for a precedence DFA
if precedence < 0 || precedence >= len(d.getS0().getEdges()) {
return nil
}
return d.getS0().getIthEdge(precedence)
}
// setPrecedenceStartState sets the start state for the current precedence. d
// must be a precedence DFA. See also isPrecedenceDfa.
func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
if !d.getPrecedenceDfa() {
panic("only precedence DFAs may contain a precedence start state")
}
if precedence < 0 {
return
}
// Synchronization on s0 here is ok. When the DFA is turned into a
// precedence DFA, s0 will be initialized once and not updated again. s0.edges
// is never nil for a precedence DFA.
s0 := d.getS0()
if precedence >= s0.numEdges() {
edges := append(s0.getEdges(), make([]*DFAState, precedence+1-s0.numEdges())...)
s0.setEdges(edges)
d.setS0(s0)
}
s0.setIthEdge(precedence, startState)
}
func (d *DFA) getPrecedenceDfa() bool {
return d.precedenceDfa
}
// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs
// from the current DFA configuration, then d.states is cleared, the initial
// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to
// store the start states for individual precedence values if precedenceDfa is
// true or nil otherwise, and d.precedenceDfa is updated.
func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
if d.getPrecedenceDfa() != precedenceDfa {
d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst)
d.numstates = 0
if precedenceDfa {
precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
precedenceState.setEdges(make([]*DFAState, 0))
precedenceState.isAcceptState = false
precedenceState.requiresFullContext = false
d.setS0(precedenceState)
} else {
d.setS0(nil)
}
d.precedenceDfa = precedenceDfa
}
}
func (d *DFA) getS0() *DFAState {
return d.s0
}
func (d *DFA) setS0(s *DFAState) {
d.s0 = s
}
// sortedStates returns the states in d sorted by their state number.
func (d *DFA) sortedStates() []*DFAState {
vs := d.states.SortedSlice(func(i, j *DFAState) bool {
return i.stateNumber < j.stateNumber
})
return vs
}
func (d *DFA) String(literalNames []string, symbolicNames []string) string {
if d.getS0() == nil {
return ""
}
return NewDFASerializer(d, literalNames, symbolicNames).String()
}
func (d *DFA) ToLexerString() string {
if d.getS0() == nil {
return ""
}
return NewLexerDFASerializer(d).String()
}
| vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00028943640063516796,
0.00018207376706413925,
0.00015951733803376555,
0.0001754020486259833,
0.000029254060791572556
] |
{
"id": 1,
"code_window": [
"\t// Start up a metrics server if requested\n",
"\tserveMetrics(s.Config.MetricsBindAddress, s.Config.Mode, s.Config.EnableProfiling, errCh)\n",
"\n",
"\t// Do platform-specific setup\n",
"\terr := s.platformSetup()\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\n",
"\tnoProxyName, err := labels.NewRequirement(apis.LabelServiceProxyName, selection.DoesNotExist, nil)\n",
"\tif err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "replace",
"edit_start_line_idx": 708
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1beta1
import (
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use
// with apply.
type RollingUpdateDeploymentApplyConfiguration struct {
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"`
}
// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with
// apply.
func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration {
return &RollingUpdateDeploymentApplyConfiguration{}
}
// WithMaxUnavailable sets the MaxUnavailable field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the MaxUnavailable field is set to the value of the last call.
func (b *RollingUpdateDeploymentApplyConfiguration) WithMaxUnavailable(value intstr.IntOrString) *RollingUpdateDeploymentApplyConfiguration {
b.MaxUnavailable = &value
return b
}
// WithMaxSurge sets the MaxSurge field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the MaxSurge field is set to the value of the last call.
func (b *RollingUpdateDeploymentApplyConfiguration) WithMaxSurge(value intstr.IntOrString) *RollingUpdateDeploymentApplyConfiguration {
b.MaxSurge = &value
return b
}
| staging/src/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00020682337344624102,
0.00017763498181011528,
0.00016447000962216407,
0.00017535976076032966,
0.000014291863408288918
] |
{
"id": 2,
"code_window": [
"// timeoutForNodePodCIDR is the time to wait for allocators to assign a PodCIDR to the\n",
"// node after it is registered.\n",
"var timeoutForNodePodCIDR = 5 * time.Minute\n",
"\n",
"func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfiguration) {\n",
"\tif config.Mode == \"\" {\n",
"\t\tklog.InfoS(\"Using iptables proxy\")\n",
"\t\tconfig.Mode = proxyconfigapi.ProxyModeIPTables\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// platformApplyDefaults is called after parsing command-line flags and/or reading the\n",
"// config file, to apply platform-specific default values to config.\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "add",
"edit_start_line_idx": 65
} | //go:build !windows
// +build !windows
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"fmt"
"net"
"os"
"path/filepath"
"reflect"
goruntime "runtime"
"strings"
"testing"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
clientsetfake "k8s.io/client-go/kubernetes/fake"
clientgotesting "k8s.io/client-go/testing"
proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config"
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utiliptablestest "k8s.io/kubernetes/pkg/util/iptables/testing"
netutils "k8s.io/utils/net"
"k8s.io/utils/pointer"
)
func Test_platformApplyDefaults(t *testing.T) {
testCases := []struct {
name string
mode proxyconfigapi.ProxyMode
expectedMode proxyconfigapi.ProxyMode
detectLocal proxyconfigapi.LocalMode
expectedDetectLocal proxyconfigapi.LocalMode
}{
{
name: "defaults",
mode: "",
expectedMode: proxyconfigapi.ProxyModeIPTables,
detectLocal: "",
expectedDetectLocal: proxyconfigapi.LocalModeClusterCIDR,
},
{
name: "explicit",
mode: proxyconfigapi.ProxyModeIPTables,
expectedMode: proxyconfigapi.ProxyModeIPTables,
detectLocal: proxyconfigapi.LocalModeClusterCIDR,
expectedDetectLocal: proxyconfigapi.LocalModeClusterCIDR,
},
{
name: "override mode",
mode: "ipvs",
expectedMode: proxyconfigapi.ProxyModeIPVS,
detectLocal: "",
expectedDetectLocal: proxyconfigapi.LocalModeClusterCIDR,
},
{
name: "override detect-local",
mode: "",
expectedMode: proxyconfigapi.ProxyModeIPTables,
detectLocal: "NodeCIDR",
expectedDetectLocal: proxyconfigapi.LocalModeNodeCIDR,
},
{
name: "override both",
mode: "ipvs",
expectedMode: proxyconfigapi.ProxyModeIPVS,
detectLocal: "NodeCIDR",
expectedDetectLocal: proxyconfigapi.LocalModeNodeCIDR,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
options := NewOptions()
config := &proxyconfigapi.KubeProxyConfiguration{
Mode: tc.mode,
DetectLocalMode: tc.detectLocal,
}
options.platformApplyDefaults(config)
if config.Mode != tc.expectedMode {
t.Fatalf("expected mode: %s, but got: %s", tc.expectedMode, config.Mode)
}
if config.DetectLocalMode != tc.expectedDetectLocal {
t.Fatalf("expected detect-local: %s, but got: %s", tc.expectedDetectLocal, config.DetectLocalMode)
}
})
}
}
func Test_getLocalDetector(t *testing.T) {
cases := []struct {
mode proxyconfigapi.LocalMode
config *proxyconfigapi.KubeProxyConfiguration
ipt utiliptables.Interface
expected proxyutiliptables.LocalTrafficDetector
nodePodCIDRs []string
errExpected bool
}{
// LocalModeClusterCIDR
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "10.0.0.0/14"},
ipt: utiliptablestest.NewFake(),
expected: resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/14", utiliptablestest.NewFake())),
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "2002::1234:abcd:ffff:c0a8:101/64"},
ipt: utiliptablestest.NewIPv6Fake(),
expected: resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByCIDR("2002::1234:abcd:ffff:c0a8:101/64", utiliptablestest.NewIPv6Fake())),
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "10.0.0.0/14"},
ipt: utiliptablestest.NewIPv6Fake(),
expected: nil,
errExpected: true,
},
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "2002::1234:abcd:ffff:c0a8:101/64"},
ipt: utiliptablestest.NewFake(),
expected: nil,
errExpected: true,
},
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: ""},
ipt: utiliptablestest.NewFake(),
expected: proxyutiliptables.NewNoOpLocalDetector(),
errExpected: false,
},
// LocalModeNodeCIDR
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "10.0.0.0/14"},
ipt: utiliptablestest.NewFake(),
expected: resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/24", utiliptablestest.NewFake())),
nodePodCIDRs: []string{"10.0.0.0/24"},
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "2002::1234:abcd:ffff:c0a8:101/64"},
ipt: utiliptablestest.NewIPv6Fake(),
expected: resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByCIDR("2002::1234:abcd:ffff:c0a8:101/96", utiliptablestest.NewIPv6Fake())),
nodePodCIDRs: []string{"2002::1234:abcd:ffff:c0a8:101/96"},
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "10.0.0.0/14"},
ipt: utiliptablestest.NewIPv6Fake(),
expected: nil,
nodePodCIDRs: []string{"10.0.0.0/24"},
errExpected: true,
},
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "2002::1234:abcd:ffff:c0a8:101/64"},
ipt: utiliptablestest.NewFake(),
expected: nil,
nodePodCIDRs: []string{"2002::1234:abcd:ffff:c0a8:101/96"},
errExpected: true,
},
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: ""},
ipt: utiliptablestest.NewFake(),
expected: proxyutiliptables.NewNoOpLocalDetector(),
nodePodCIDRs: []string{},
errExpected: false,
},
// unknown mode
{
mode: proxyconfigapi.LocalMode("abcd"),
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "10.0.0.0/14"},
ipt: utiliptablestest.NewFake(),
expected: proxyutiliptables.NewNoOpLocalDetector(),
errExpected: false,
},
// LocalModeBridgeInterface
{
mode: proxyconfigapi.LocalModeBridgeInterface,
config: &proxyconfigapi.KubeProxyConfiguration{
DetectLocal: proxyconfigapi.DetectLocalConfiguration{BridgeInterface: "eth"},
},
expected: resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByBridgeInterface("eth")),
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeBridgeInterface,
config: &proxyconfigapi.KubeProxyConfiguration{
DetectLocal: proxyconfigapi.DetectLocalConfiguration{BridgeInterface: "1234567890123456789"},
},
expected: resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByBridgeInterface("1234567890123456789")),
errExpected: false,
},
// LocalModeInterfaceNamePrefix
{
mode: proxyconfigapi.LocalModeInterfaceNamePrefix,
config: &proxyconfigapi.KubeProxyConfiguration{
DetectLocal: proxyconfigapi.DetectLocalConfiguration{InterfaceNamePrefix: "eth"},
},
expected: resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByInterfaceNamePrefix("eth")),
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeInterfaceNamePrefix,
config: &proxyconfigapi.KubeProxyConfiguration{
DetectLocal: proxyconfigapi.DetectLocalConfiguration{InterfaceNamePrefix: "1234567890123456789"},
},
expected: resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByInterfaceNamePrefix("1234567890123456789")),
errExpected: false,
},
}
for i, c := range cases {
r, err := getLocalDetector(c.mode, c.config, c.ipt, c.nodePodCIDRs)
if c.errExpected {
if err == nil {
t.Errorf("Case[%d] Expected error, but succeeded with %v", i, r)
}
continue
}
if err != nil {
t.Errorf("Case[%d] Error resolving detect-local: %v", i, err)
continue
}
if !reflect.DeepEqual(r, c.expected) {
t.Errorf("Case[%d] Unexpected detect-local implementation, expected: %q, got: %q", i, c.expected, r)
}
}
}
func Test_getDualStackLocalDetectorTuple(t *testing.T) {
cases := []struct {
mode proxyconfigapi.LocalMode
config *proxyconfigapi.KubeProxyConfiguration
ipt [2]utiliptables.Interface
expected [2]proxyutiliptables.LocalTrafficDetector
nodePodCIDRs []string
errExpected bool
}{
// LocalModeClusterCIDR
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "10.0.0.0/14,2002::1234:abcd:ffff:c0a8:101/64"},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: resolveDualStackLocalDetectors(t)(
proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/14", utiliptablestest.NewFake()))(
proxyutiliptables.NewDetectLocalByCIDR("2002::1234:abcd:ffff:c0a8:101/64", utiliptablestest.NewIPv6Fake())),
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "2002::1234:abcd:ffff:c0a8:101/64,10.0.0.0/14"},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: resolveDualStackLocalDetectors(t)(
proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/14", utiliptablestest.NewFake()))(
proxyutiliptables.NewDetectLocalByCIDR("2002::1234:abcd:ffff:c0a8:101/64", utiliptablestest.NewIPv6Fake())),
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "10.0.0.0/14"},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: [2]proxyutiliptables.LocalTrafficDetector{
resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/14", utiliptablestest.NewFake())),
proxyutiliptables.NewNoOpLocalDetector()},
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "2002::1234:abcd:ffff:c0a8:101/64"},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: [2]proxyutiliptables.LocalTrafficDetector{
proxyutiliptables.NewNoOpLocalDetector(),
resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByCIDR("2002::1234:abcd:ffff:c0a8:101/64", utiliptablestest.NewIPv6Fake()))},
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: ""},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: [2]proxyutiliptables.LocalTrafficDetector{proxyutiliptables.NewNoOpLocalDetector(), proxyutiliptables.NewNoOpLocalDetector()},
errExpected: false,
},
// LocalModeNodeCIDR
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "10.0.0.0/14,2002::1234:abcd:ffff:c0a8:101/64"},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: resolveDualStackLocalDetectors(t)(
proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/24", utiliptablestest.NewFake()))(
proxyutiliptables.NewDetectLocalByCIDR("2002::1234:abcd:ffff:c0a8:101/96", utiliptablestest.NewIPv6Fake())),
nodePodCIDRs: []string{"10.0.0.0/24", "2002::1234:abcd:ffff:c0a8:101/96"},
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "2002::1234:abcd:ffff:c0a8:101/64,10.0.0.0/14"},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: resolveDualStackLocalDetectors(t)(
proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/24", utiliptablestest.NewFake()))(
proxyutiliptables.NewDetectLocalByCIDR("2002::1234:abcd:ffff:c0a8:101/96", utiliptablestest.NewIPv6Fake())),
nodePodCIDRs: []string{"2002::1234:abcd:ffff:c0a8:101/96", "10.0.0.0/24"},
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "10.0.0.0/14"},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: [2]proxyutiliptables.LocalTrafficDetector{
resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/24", utiliptablestest.NewFake())),
proxyutiliptables.NewNoOpLocalDetector()},
nodePodCIDRs: []string{"10.0.0.0/24"},
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "2002::1234:abcd:ffff:c0a8:101/64"},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: [2]proxyutiliptables.LocalTrafficDetector{
proxyutiliptables.NewNoOpLocalDetector(),
resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByCIDR("2002::1234:abcd:ffff:c0a8:101/96", utiliptablestest.NewIPv6Fake()))},
nodePodCIDRs: []string{"2002::1234:abcd:ffff:c0a8:101/96"},
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: ""},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: [2]proxyutiliptables.LocalTrafficDetector{proxyutiliptables.NewNoOpLocalDetector(), proxyutiliptables.NewNoOpLocalDetector()},
nodePodCIDRs: []string{},
errExpected: false,
},
// LocalModeBridgeInterface
{
mode: proxyconfigapi.LocalModeBridgeInterface,
config: &proxyconfigapi.KubeProxyConfiguration{
DetectLocal: proxyconfigapi.DetectLocalConfiguration{BridgeInterface: "eth"},
},
expected: resolveDualStackLocalDetectors(t)(
proxyutiliptables.NewDetectLocalByBridgeInterface("eth"))(
proxyutiliptables.NewDetectLocalByBridgeInterface("eth")),
errExpected: false,
},
// LocalModeInterfaceNamePrefix
{
mode: proxyconfigapi.LocalModeInterfaceNamePrefix,
config: &proxyconfigapi.KubeProxyConfiguration{
DetectLocal: proxyconfigapi.DetectLocalConfiguration{InterfaceNamePrefix: "veth"},
},
expected: resolveDualStackLocalDetectors(t)(
proxyutiliptables.NewDetectLocalByInterfaceNamePrefix("veth"))(
proxyutiliptables.NewDetectLocalByInterfaceNamePrefix("veth")),
errExpected: false,
},
}
for i, c := range cases {
r, err := getDualStackLocalDetectorTuple(c.mode, c.config, c.ipt, c.nodePodCIDRs)
if c.errExpected {
if err == nil {
t.Errorf("Case[%d] expected error, but succeeded with %q", i, r)
}
continue
}
if err != nil {
t.Errorf("Case[%d] Error resolving detect-local: %v", i, err)
continue
}
if !reflect.DeepEqual(r, c.expected) {
t.Errorf("Case[%d] Unexpected detect-local implementation, expected: %q, got: %q", i, c.expected, r)
}
}
}
func makeNodeWithPodCIDRs(cidrs ...string) *v1.Node {
if len(cidrs) == 0 {
return &v1.Node{}
}
return &v1.Node{
Spec: v1.NodeSpec{
PodCIDR: cidrs[0],
PodCIDRs: cidrs,
},
}
}
func resolveLocalDetector(t *testing.T) func(proxyutiliptables.LocalTrafficDetector, error) proxyutiliptables.LocalTrafficDetector {
return func(localDetector proxyutiliptables.LocalTrafficDetector, err error) proxyutiliptables.LocalTrafficDetector {
t.Helper()
if err != nil {
t.Fatalf("Error resolving detect-local: %v", err)
}
return localDetector
}
}
func resolveDualStackLocalDetectors(t *testing.T) func(localDetector proxyutiliptables.LocalTrafficDetector, err1 error) func(proxyutiliptables.LocalTrafficDetector, error) [2]proxyutiliptables.LocalTrafficDetector {
return func(localDetector proxyutiliptables.LocalTrafficDetector, err error) func(proxyutiliptables.LocalTrafficDetector, error) [2]proxyutiliptables.LocalTrafficDetector {
t.Helper()
if err != nil {
t.Fatalf("Error resolving dual stack detect-local: %v", err)
}
return func(otherLocalDetector proxyutiliptables.LocalTrafficDetector, err1 error) [2]proxyutiliptables.LocalTrafficDetector {
t.Helper()
if err1 != nil {
t.Fatalf("Error resolving dual stack detect-local: %v", err)
}
return [2]proxyutiliptables.LocalTrafficDetector{localDetector, otherLocalDetector}
}
}
}
func TestConfigChange(t *testing.T) {
setUp := func() (*os.File, string, error) {
tempDir, err := os.MkdirTemp("", "kubeproxy-config-change")
if err != nil {
return nil, "", fmt.Errorf("unable to create temporary directory: %v", err)
}
fullPath := filepath.Join(tempDir, "kube-proxy-config")
file, err := os.Create(fullPath)
if err != nil {
return nil, "", fmt.Errorf("unexpected error when creating temp file: %v", err)
}
_, err = file.WriteString(`apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
bindAddressHardFail: false
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: /var/lib/kube-proxy/kubeconfig.conf
qps: 5
clusterCIDR: 10.244.0.0/16
configSyncPeriod: 15m0s
conntrack:
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
excludeCIDRs: null
minSyncPeriod: 0s
scheduler: ""
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: ""
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
detectLocalMode: "BridgeInterface"`)
if err != nil {
return nil, "", fmt.Errorf("unexpected error when writing content to temp kube-proxy config file: %v", err)
}
return file, tempDir, nil
}
tearDown := func(file *os.File, tempDir string) {
file.Close()
os.RemoveAll(tempDir)
}
testCases := []struct {
name string
proxyServer proxyRun
append bool
expectedErr string
}{
{
name: "update config file",
proxyServer: new(fakeProxyServerLongRun),
append: true,
expectedErr: "content of the proxy server's configuration file was updated",
},
{
name: "fake error",
proxyServer: new(fakeProxyServerError),
expectedErr: "mocking error from ProxyServer.Run()",
},
}
for _, tc := range testCases {
file, tempDir, err := setUp()
if err != nil {
t.Fatalf("unexpected error when setting up environment: %v", err)
}
opt := NewOptions()
opt.ConfigFile = file.Name()
err = opt.Complete()
if err != nil {
t.Fatal(err)
}
opt.proxyServer = tc.proxyServer
errCh := make(chan error, 1)
go func() {
errCh <- opt.runLoop()
}()
if tc.append {
file.WriteString("append fake content")
}
select {
case err := <-errCh:
if err != nil {
if !strings.Contains(err.Error(), tc.expectedErr) {
t.Errorf("[%s] Expected error containing %v, got %v", tc.name, tc.expectedErr, err)
}
}
case <-time.After(10 * time.Second):
t.Errorf("[%s] Timeout: unable to get any events or internal timeout.", tc.name)
}
tearDown(file, tempDir)
}
}
func Test_waitForPodCIDR(t *testing.T) {
expected := []string{"192.168.0.0/24", "fd00:1:2::/64"}
nodeName := "test-node"
oldNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
ResourceVersion: "1000",
},
Spec: v1.NodeSpec{
PodCIDR: "10.0.0.0/24",
PodCIDRs: []string{"10.0.0.0/24", "2001:db2:1/64"},
},
}
node := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
ResourceVersion: "1",
},
}
updatedNode := node.DeepCopy()
updatedNode.Spec.PodCIDRs = expected
updatedNode.Spec.PodCIDR = expected[0]
// start with the new node
client := clientsetfake.NewSimpleClientset()
client.AddReactor("list", "nodes", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
obj := &v1.NodeList{}
return true, obj, nil
})
fakeWatch := watch.NewFake()
client.PrependWatchReactor("nodes", clientgotesting.DefaultWatchReactor(fakeWatch, nil))
go func() {
fakeWatch.Add(node)
// receive a delete event for the old node
fakeWatch.Delete(oldNode)
// set the PodCIDRs on the new node
fakeWatch.Modify(updatedNode)
}()
got, err := waitForPodCIDR(client, node.Name)
if err != nil {
t.Errorf("waitForPodCIDR() unexpected error %v", err)
return
}
if !reflect.DeepEqual(got.Spec.PodCIDRs, expected) {
t.Errorf("waitForPodCIDR() got %v expected to be %v ", got.Spec.PodCIDRs, expected)
}
}
func TestGetConntrackMax(t *testing.T) {
ncores := goruntime.NumCPU()
testCases := []struct {
min int32
maxPerCore int32
expected int
err string
}{
{
expected: 0,
},
{
maxPerCore: 67890, // use this if Max is 0
min: 1, // avoid 0 default
expected: 67890 * ncores,
},
{
maxPerCore: 1, // ensure that Min is considered
min: 123456,
expected: 123456,
},
{
maxPerCore: 0, // leave system setting
min: 123456,
expected: 0,
},
}
for i, tc := range testCases {
cfg := proxyconfigapi.KubeProxyConntrackConfiguration{
Min: pointer.Int32(tc.min),
MaxPerCore: pointer.Int32(tc.maxPerCore),
}
x, e := getConntrackMax(cfg)
if e != nil {
if tc.err == "" {
t.Errorf("[%d] unexpected error: %v", i, e)
} else if !strings.Contains(e.Error(), tc.err) {
t.Errorf("[%d] expected an error containing %q: %v", i, tc.err, e)
}
} else if x != tc.expected {
t.Errorf("[%d] expected %d, got %d", i, tc.expected, x)
}
}
}
func TestProxyServer_createProxier(t *testing.T) {
tests := []struct {
name string
node *v1.Node
config *proxyconfigapi.KubeProxyConfiguration
wantPodCIDRs []string
}{
{
name: "LocalModeNodeCIDR store the node PodCIDRs obtained",
node: makeNodeWithPodCIDRs("10.0.0.0/24"),
config: &proxyconfigapi.KubeProxyConfiguration{DetectLocalMode: proxyconfigapi.LocalModeNodeCIDR},
wantPodCIDRs: []string{"10.0.0.0/24"},
},
{
name: "LocalModeNodeCIDR store the node PodCIDRs obtained dual stack",
node: makeNodeWithPodCIDRs("10.0.0.0/24", "2001:db2:1/64"),
config: &proxyconfigapi.KubeProxyConfiguration{DetectLocalMode: proxyconfigapi.LocalModeNodeCIDR},
wantPodCIDRs: []string{"10.0.0.0/24", "2001:db2:1/64"},
},
{
name: "LocalModeClusterCIDR does not get the node PodCIDRs",
node: makeNodeWithPodCIDRs("10.0.0.0/24", "2001:db2:1/64"),
config: &proxyconfigapi.KubeProxyConfiguration{DetectLocalMode: proxyconfigapi.LocalModeClusterCIDR},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
client := clientsetfake.NewSimpleClientset(tt.node)
s := &ProxyServer{
Config: tt.config,
Client: client,
Hostname: "nodename",
NodeIPs: map[v1.IPFamily]net.IP{
v1.IPv4Protocol: netutils.ParseIPSloppy("127.0.0.1"),
v1.IPv6Protocol: net.IPv6zero,
},
}
_, err := s.createProxier(tt.config)
// TODO: mock the exec.Interface to not fail probing iptables
if (err != nil) && !strings.Contains(err.Error(), "iptables is not supported for primary IP family") {
t.Errorf("ProxyServer.createProxier() error = %v", err)
return
}
if !reflect.DeepEqual(s.podCIDRs, tt.wantPodCIDRs) {
t.Errorf("Expected PodCIDRs %v got %v", tt.wantPodCIDRs, s.podCIDRs)
}
})
}
}
| cmd/kube-proxy/app/server_others_test.go | 1 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.9918319582939148,
0.08048843592405319,
0.00017081249097827822,
0.0036335820332169533,
0.2228858470916748
] |
{
"id": 2,
"code_window": [
"// timeoutForNodePodCIDR is the time to wait for allocators to assign a PodCIDR to the\n",
"// node after it is registered.\n",
"var timeoutForNodePodCIDR = 5 * time.Minute\n",
"\n",
"func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfiguration) {\n",
"\tif config.Mode == \"\" {\n",
"\t\tklog.InfoS(\"Using iptables proxy\")\n",
"\t\tconfig.Mode = proxyconfigapi.ProxyModeIPTables\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// platformApplyDefaults is called after parsing command-line flags and/or reading the\n",
"// config file, to apply platform-specific default values to config.\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "add",
"edit_start_line_idx": 65
} | // Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !js || wasm
// +build !js wasm
package prometheus
import "os"
func getPIDFn() func() (int, error) {
pid := os.Getpid()
return func() (int, error) {
return pid, nil
}
}
| vendor/github.com/prometheus/client_golang/prometheus/get_pid.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.0016591295134276152,
0.0006684131803922355,
0.00017141702119261026,
0.00017469302110839635,
0.000700543518178165
] |
{
"id": 2,
"code_window": [
"// timeoutForNodePodCIDR is the time to wait for allocators to assign a PodCIDR to the\n",
"// node after it is registered.\n",
"var timeoutForNodePodCIDR = 5 * time.Minute\n",
"\n",
"func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfiguration) {\n",
"\tif config.Mode == \"\" {\n",
"\t\tklog.InfoS(\"Using iptables proxy\")\n",
"\t\tconfig.Mode = proxyconfigapi.ProxyModeIPTables\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// platformApplyDefaults is called after parsing command-line flags and/or reading the\n",
"// config file, to apply platform-specific default values to config.\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "add",
"edit_start_line_idx": 65
} | //go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
import (
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Data.DeepCopyInto(&out.Data)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision.
func (in *ControllerRevision) DeepCopy() *ControllerRevision {
if in == nil {
return nil
}
out := new(ControllerRevision)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ControllerRevision) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ControllerRevision, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList.
func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList {
if in == nil {
return nil
}
out := new(ControllerRevisionList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ControllerRevisionList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Deployment) DeepCopyInto(out *Deployment) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment.
func (in *Deployment) DeepCopy() *Deployment {
if in == nil {
return nil
}
out := new(Deployment)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Deployment) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) {
*out = *in
in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition.
func (in *DeploymentCondition) DeepCopy() *DeploymentCondition {
if in == nil {
return nil
}
out := new(DeploymentCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentList) DeepCopyInto(out *DeploymentList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Deployment, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList.
func (in *DeploymentList) DeepCopy() *DeploymentList {
if in == nil {
return nil
}
out := new(DeploymentList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeploymentList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentRollback) DeepCopyInto(out *DeploymentRollback) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.UpdatedAnnotations != nil {
in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.RollbackTo = in.RollbackTo
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRollback.
func (in *DeploymentRollback) DeepCopy() *DeploymentRollback {
if in == nil {
return nil
}
out := new(DeploymentRollback)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeploymentRollback) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
*out = *in
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = new(int32)
**out = **in
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
in.Template.DeepCopyInto(&out.Template)
in.Strategy.DeepCopyInto(&out.Strategy)
if in.RevisionHistoryLimit != nil {
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
*out = new(int32)
**out = **in
}
if in.RollbackTo != nil {
in, out := &in.RollbackTo, &out.RollbackTo
*out = new(RollbackConfig)
**out = **in
}
if in.ProgressDeadlineSeconds != nil {
in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec.
func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
if in == nil {
return nil
}
out := new(DeploymentSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]DeploymentCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.CollisionCount != nil {
in, out := &in.CollisionCount, &out.CollisionCount
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus.
func (in *DeploymentStatus) DeepCopy() *DeploymentStatus {
if in == nil {
return nil
}
out := new(DeploymentStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) {
*out = *in
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdateDeployment)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy.
func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy {
if in == nil {
return nil
}
out := new(DeploymentStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackConfig.
func (in *RollbackConfig) DeepCopy() *RollbackConfig {
if in == nil {
return nil
}
out := new(RollbackConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) {
*out = *in
if in.MaxUnavailable != nil {
in, out := &in.MaxUnavailable, &out.MaxUnavailable
*out = new(intstr.IntOrString)
**out = **in
}
if in.MaxSurge != nil {
in, out := &in.MaxSurge, &out.MaxSurge
*out = new(intstr.IntOrString)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment.
func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment {
if in == nil {
return nil
}
out := new(RollingUpdateDeployment)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) {
*out = *in
if in.Partition != nil {
in, out := &in.Partition, &out.Partition
*out = new(int32)
**out = **in
}
if in.MaxUnavailable != nil {
in, out := &in.MaxUnavailable, &out.MaxUnavailable
*out = new(intstr.IntOrString)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy.
func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy {
if in == nil {
return nil
}
out := new(RollingUpdateStatefulSetStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Scale) DeepCopyInto(out *Scale) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale.
func (in *Scale) DeepCopy() *Scale {
if in == nil {
return nil
}
out := new(Scale)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Scale) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec.
func (in *ScaleSpec) DeepCopy() *ScaleSpec {
if in == nil {
return nil
}
out := new(ScaleSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus.
func (in *ScaleStatus) DeepCopy() *ScaleStatus {
if in == nil {
return nil
}
out := new(ScaleStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSet) DeepCopyInto(out *StatefulSet) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet.
func (in *StatefulSet) DeepCopy() *StatefulSet {
if in == nil {
return nil
}
out := new(StatefulSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *StatefulSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition.
func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition {
if in == nil {
return nil
}
out := new(StatefulSetCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]StatefulSet, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList.
func (in *StatefulSetList) DeepCopy() *StatefulSetList {
if in == nil {
return nil
}
out := new(StatefulSetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *StatefulSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetOrdinals) DeepCopyInto(out *StatefulSetOrdinals) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetOrdinals.
func (in *StatefulSetOrdinals) DeepCopy() *StatefulSetOrdinals {
if in == nil {
return nil
}
out := new(StatefulSetOrdinals)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetPersistentVolumeClaimRetentionPolicy.
func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopy() *StatefulSetPersistentVolumeClaimRetentionPolicy {
if in == nil {
return nil
}
out := new(StatefulSetPersistentVolumeClaimRetentionPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) {
*out = *in
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = new(int32)
**out = **in
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
in.Template.DeepCopyInto(&out.Template)
if in.VolumeClaimTemplates != nil {
in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates
*out = make([]corev1.PersistentVolumeClaim, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
if in.RevisionHistoryLimit != nil {
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
*out = new(int32)
**out = **in
}
if in.PersistentVolumeClaimRetentionPolicy != nil {
in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy
*out = new(StatefulSetPersistentVolumeClaimRetentionPolicy)
**out = **in
}
if in.Ordinals != nil {
in, out := &in.Ordinals, &out.Ordinals
*out = new(StatefulSetOrdinals)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec.
func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec {
if in == nil {
return nil
}
out := new(StatefulSetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) {
*out = *in
if in.ObservedGeneration != nil {
in, out := &in.ObservedGeneration, &out.ObservedGeneration
*out = new(int64)
**out = **in
}
if in.CollisionCount != nil {
in, out := &in.CollisionCount, &out.CollisionCount
*out = new(int32)
**out = **in
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]StatefulSetCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus.
func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus {
if in == nil {
return nil
}
out := new(StatefulSetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) {
*out = *in
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdateStatefulSetStrategy)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy.
func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy {
if in == nil {
return nil
}
out := new(StatefulSetUpdateStrategy)
in.DeepCopyInto(out)
return out
}
| staging/src/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.0011464423732832074,
0.0003664148098323494,
0.00017049146117642522,
0.00026854631141759455,
0.00023580687411595136
] |
{
"id": 2,
"code_window": [
"// timeoutForNodePodCIDR is the time to wait for allocators to assign a PodCIDR to the\n",
"// node after it is registered.\n",
"var timeoutForNodePodCIDR = 5 * time.Minute\n",
"\n",
"func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfiguration) {\n",
"\tif config.Mode == \"\" {\n",
"\t\tklog.InfoS(\"Using iptables proxy\")\n",
"\t\tconfig.Mode = proxyconfigapi.ProxyModeIPTables\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// platformApplyDefaults is called after parsing command-line flags and/or reading the\n",
"// config file, to apply platform-specific default values to config.\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "add",
"edit_start_line_idx": 65
} | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
)
const (
// MinFileSize represents minimum file size (1 MiB) for testing
MinFileSize = 1 * e2evolume.MiB
// FileSizeSmall represents small file size (1 MiB) for testing
FileSizeSmall = 1 * e2evolume.MiB
// FileSizeMedium represents medium file size (100 MiB) for testing
FileSizeMedium = 100 * e2evolume.MiB
// FileSizeLarge represents large file size (1 GiB) for testing
FileSizeLarge = 1 * e2evolume.GiB
)
// TestVolType represents a volume type to be tested in a TestSuite
type TestVolType string
var (
// InlineVolume represents a volume type that is used inline in volumeSource
InlineVolume TestVolType = "InlineVolume"
// PreprovisionedPV represents a volume type for pre-provisioned Persistent Volume
PreprovisionedPV TestVolType = "PreprovisionedPV"
// DynamicPV represents a volume type for dynamic provisioned Persistent Volume
DynamicPV TestVolType = "DynamicPV"
// CSIInlineVolume represents a volume type that is defined inline and provided by a CSI driver.
CSIInlineVolume TestVolType = "CSIInlineVolume"
// GenericEphemeralVolume represents a volume type that is defined inline and provisioned through a PVC.
GenericEphemeralVolume TestVolType = "GenericEphemeralVolume"
)
// TestSnapshotType represents a snapshot type to be tested in a TestSuite
type TestSnapshotType string
var (
// DynamicCreatedSnapshot represents a snapshot type for dynamic created snapshot
DynamicCreatedSnapshot TestSnapshotType = "DynamicSnapshot"
// PreprovisionedCreatedSnapshot represents a snapshot type for pre-provisioned snapshot
PreprovisionedCreatedSnapshot TestSnapshotType = "PreprovisionedSnapshot"
)
// TestSnapshotDeletionPolicy represents the deletion policy of the snapshot class
type TestSnapshotDeletionPolicy string
var (
// DeleteSnapshot represents delete policy
DeleteSnapshot TestSnapshotDeletionPolicy = "Delete"
// RetainSnapshot represents retain policy
RetainSnapshot TestSnapshotDeletionPolicy = "Retain"
)
func (t TestSnapshotDeletionPolicy) String() string {
return string(t)
}
// TestPattern represents a combination of parameters to be tested in a TestSuite
type TestPattern struct {
Name string // Name of TestPattern
FeatureTag string // featureTag for the TestSuite
VolType TestVolType // Volume type of the volume
FsType string // Fstype of the volume
VolMode v1.PersistentVolumeMode // PersistentVolumeMode of the volume
SnapshotType TestSnapshotType // Snapshot type of the snapshot
SnapshotDeletionPolicy TestSnapshotDeletionPolicy // Deletion policy of the snapshot class
BindingMode storagev1.VolumeBindingMode // VolumeBindingMode of the volume
AllowExpansion bool // AllowVolumeExpansion flag of the StorageClass
}
var (
// Definitions for default fsType
// DefaultFsInlineVolume is TestPattern for "Inline-volume (default fs)"
DefaultFsInlineVolume = TestPattern{
Name: "Inline-volume (default fs)",
VolType: InlineVolume,
}
// DefaultFsCSIEphemeralVolume is TestPattern for "CSI Ephemeral-volume (default fs)"
DefaultFsCSIEphemeralVolume = TestPattern{
Name: "CSI Ephemeral-volume (default fs)",
VolType: CSIInlineVolume,
}
// DefaultFsGenericEphemeralVolume is TestPattern for "Generic Ephemeral-volume (default fs)"
DefaultFsGenericEphemeralVolume = TestPattern{
Name: "Generic Ephemeral-volume (default fs)",
VolType: GenericEphemeralVolume,
AllowExpansion: true,
}
// DefaultFsPreprovisionedPV is TestPattern for "Pre-provisioned PV (default fs)"
DefaultFsPreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (default fs)",
VolType: PreprovisionedPV,
}
// DefaultFsDynamicPV is TestPattern for "Dynamic PV (default fs)"
DefaultFsDynamicPV = TestPattern{
Name: "Dynamic PV (default fs)",
VolType: DynamicPV,
SnapshotType: DynamicCreatedSnapshot,
SnapshotDeletionPolicy: DeleteSnapshot,
}
// Definitions for ext3
// Ext3InlineVolume is TestPattern for "Inline-volume (ext3)"
Ext3InlineVolume = TestPattern{
Name: "Inline-volume (ext3)",
VolType: InlineVolume,
FsType: "ext3",
}
// Ext3CSIEphemeralVolume is TestPattern for "CSI Ephemeral-volume (ext3)"
Ext3CSIEphemeralVolume = TestPattern{
Name: "CSI Ephemeral-volume (ext3)",
VolType: CSIInlineVolume,
FsType: "ext3",
}
// Ext3GenericEphemeralVolume is TestPattern for "Generic Ephemeral-volume (ext3)"
Ext3GenericEphemeralVolume = TestPattern{
Name: "Generic Ephemeral-volume (ext3)",
VolType: GenericEphemeralVolume,
FsType: "ext3",
}
// Ext3PreprovisionedPV is TestPattern for "Pre-provisioned PV (ext3)"
Ext3PreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (ext3)",
VolType: PreprovisionedPV,
FsType: "ext3",
}
// Ext3DynamicPV is TestPattern for "Dynamic PV (ext3)"
Ext3DynamicPV = TestPattern{
Name: "Dynamic PV (ext3)",
VolType: DynamicPV,
FsType: "ext3",
}
// Definitions for ext4
// Ext4InlineVolume is TestPattern for "Inline-volume (ext4)"
Ext4InlineVolume = TestPattern{
Name: "Inline-volume (ext4)",
VolType: InlineVolume,
FsType: "ext4",
}
// Ext4CSIEphemeralVolume is TestPattern for "CSI Ephemeral-volume (ext4)"
Ext4CSIEphemeralVolume = TestPattern{
Name: "CSI Ephemeral-volume (ext4)",
VolType: CSIInlineVolume,
FsType: "ext4",
}
// Ext4GenericEphemeralVolume is TestPattern for "Generic Ephemeral-volume (ext4)"
Ext4GenericEphemeralVolume = TestPattern{
Name: "Generic Ephemeral-volume (ext4)",
VolType: GenericEphemeralVolume,
FsType: "ext4",
}
// Ext4PreprovisionedPV is TestPattern for "Pre-provisioned PV (ext4)"
Ext4PreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (ext4)",
VolType: PreprovisionedPV,
FsType: "ext4",
}
// Ext4DynamicPV is TestPattern for "Dynamic PV (ext4)"
Ext4DynamicPV = TestPattern{
Name: "Dynamic PV (ext4)",
VolType: DynamicPV,
FsType: "ext4",
SnapshotType: DynamicCreatedSnapshot,
SnapshotDeletionPolicy: DeleteSnapshot,
}
// Definitions for xfs
// XfsInlineVolume is TestPattern for "Inline-volume (xfs)"
XfsInlineVolume = TestPattern{
Name: "Inline-volume (xfs)",
VolType: InlineVolume,
FsType: "xfs",
FeatureTag: "[Slow]",
}
// XfsCSIEphemeralVolume is TestPattern for "CSI Ephemeral-volume (xfs)"
XfsCSIEphemeralVolume = TestPattern{
Name: "CSI Ephemeral-volume (xfs)",
VolType: CSIInlineVolume,
FsType: "xfs",
FeatureTag: "[Slow]",
}
// XfsGenericEphemeralVolume is TestPattern for "Generic Ephemeral-volume (xfs)"
XfsGenericEphemeralVolume = TestPattern{
Name: "Generic Ephemeral-volume (xfs)",
VolType: GenericEphemeralVolume,
FsType: "xfs",
FeatureTag: "[Slow]",
}
// XfsPreprovisionedPV is TestPattern for "Pre-provisioned PV (xfs)"
XfsPreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (xfs)",
VolType: PreprovisionedPV,
FsType: "xfs",
FeatureTag: "[Slow]",
}
// XfsDynamicPV is TestPattern for "Dynamic PV (xfs)"
XfsDynamicPV = TestPattern{
Name: "Dynamic PV (xfs)",
VolType: DynamicPV,
FsType: "xfs",
FeatureTag: "[Slow]",
SnapshotType: DynamicCreatedSnapshot,
SnapshotDeletionPolicy: DeleteSnapshot,
}
// Definitions for ntfs
// NtfsInlineVolume is TestPattern for "Inline-volume (ntfs)"
NtfsInlineVolume = TestPattern{
Name: "Inline-volume (ntfs)",
VolType: InlineVolume,
FsType: "ntfs",
FeatureTag: "[Feature:Windows]",
}
// NtfsCSIEphemeralVolume is TestPattern for "CSI Ephemeral-volume (ntfs)"
NtfsCSIEphemeralVolume = TestPattern{
Name: "CSI Ephemeral-volume (ntfs) [alpha]",
VolType: CSIInlineVolume,
FsType: "ntfs",
FeatureTag: "[Feature:Windows]",
}
// NtfsGenericEphemeralVolume is TestPattern for "Generic Ephemeral-volume (ntfs)"
NtfsGenericEphemeralVolume = TestPattern{
Name: "Generic Ephemeral-volume (ntfs)",
VolType: GenericEphemeralVolume,
FsType: "ntfs",
FeatureTag: "[Feature:Windows]",
}
// NtfsPreprovisionedPV is TestPattern for "Pre-provisioned PV (ntfs)"
NtfsPreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (ntfs)",
VolType: PreprovisionedPV,
FsType: "ntfs",
FeatureTag: "[Feature:Windows]",
}
// NtfsDynamicPV is TestPattern for "Dynamic PV (ntfs)"
NtfsDynamicPV = TestPattern{
Name: "Dynamic PV (ntfs)",
VolType: DynamicPV,
FsType: "ntfs",
FeatureTag: "[Feature:Windows]",
SnapshotDeletionPolicy: DeleteSnapshot,
SnapshotType: DynamicCreatedSnapshot,
}
// Definitions for Filesystem volume mode
// FsVolModePreprovisionedPV is TestPattern for "Pre-provisioned PV (filesystem)"
FsVolModePreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (filesystem volmode)",
VolType: PreprovisionedPV,
VolMode: v1.PersistentVolumeFilesystem,
}
// FsVolModeDynamicPV is TestPattern for "Dynamic PV (filesystem)"
FsVolModeDynamicPV = TestPattern{
Name: "Dynamic PV (filesystem volmode)",
VolType: DynamicPV,
VolMode: v1.PersistentVolumeFilesystem,
}
// Definitions for block volume mode
// BlockVolModePreprovisionedPV is TestPattern for "Pre-provisioned PV (block)"
BlockVolModePreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (block volmode)",
VolType: PreprovisionedPV,
VolMode: v1.PersistentVolumeBlock,
}
// BlockVolModeDynamicPV is TestPattern for "Dynamic PV (block)"
BlockVolModeDynamicPV = TestPattern{
Name: "Dynamic PV (block volmode)",
VolType: DynamicPV,
VolMode: v1.PersistentVolumeBlock,
SnapshotType: DynamicCreatedSnapshot,
SnapshotDeletionPolicy: DeleteSnapshot,
}
// BlockVolModeGenericEphemeralVolume is for generic ephemeral inline volumes in raw block mode.
BlockVolModeGenericEphemeralVolume = TestPattern{
Name: "Generic Ephemeral-volume (block volmode) (late-binding)",
VolType: GenericEphemeralVolume,
VolMode: v1.PersistentVolumeBlock,
BindingMode: storagev1.VolumeBindingWaitForFirstConsumer,
AllowExpansion: true,
}
// Definitions for snapshot case
// DynamicSnapshotDelete is TestPattern for "Dynamic snapshot"
DynamicSnapshotDelete = TestPattern{
Name: "Dynamic Snapshot (delete policy)",
SnapshotType: DynamicCreatedSnapshot,
SnapshotDeletionPolicy: DeleteSnapshot,
VolType: DynamicPV,
}
// PreprovisionedSnapshotDelete is TestPattern for "Pre-provisioned snapshot"
PreprovisionedSnapshotDelete = TestPattern{
Name: "Pre-provisioned Snapshot (delete policy)",
SnapshotType: PreprovisionedCreatedSnapshot,
SnapshotDeletionPolicy: DeleteSnapshot,
VolType: DynamicPV,
}
// EphemeralSnapshotDelete is TestPattern for snapshotting of a generic ephemeral volume
// where snapshots are deleted.
EphemeralSnapshotDelete = TestPattern{
Name: "Ephemeral Snapshot (delete policy)",
SnapshotType: DynamicCreatedSnapshot,
SnapshotDeletionPolicy: DeleteSnapshot,
VolType: GenericEphemeralVolume,
}
// DynamicSnapshotRetain is TestPattern for "Dynamic snapshot"
DynamicSnapshotRetain = TestPattern{
Name: "Dynamic Snapshot (retain policy)",
SnapshotType: DynamicCreatedSnapshot,
SnapshotDeletionPolicy: RetainSnapshot,
VolType: DynamicPV,
}
// PreprovisionedSnapshotRetain is TestPattern for "Pre-provisioned snapshot"
PreprovisionedSnapshotRetain = TestPattern{
Name: "Pre-provisioned Snapshot (retain policy)",
SnapshotType: PreprovisionedCreatedSnapshot,
SnapshotDeletionPolicy: RetainSnapshot,
VolType: DynamicPV,
}
// EphemeralSnapshotDelete is TestPattern for snapshotting of a generic ephemeral volume
// where snapshots are preserved.
EphemeralSnapshotRetain = TestPattern{
Name: "Ephemeral Snapshot (retain policy)",
SnapshotType: DynamicCreatedSnapshot,
SnapshotDeletionPolicy: RetainSnapshot,
VolType: GenericEphemeralVolume,
}
// Definitions for volume expansion case
// DefaultFsDynamicPVAllowExpansion is TestPattern for "Dynamic PV (default fs)(allowExpansion)"
DefaultFsDynamicPVAllowExpansion = TestPattern{
Name: "Dynamic PV (default fs)(allowExpansion)",
VolType: DynamicPV,
AllowExpansion: true,
}
// NtfsDynamicPVAllowExpansion is TestPattern for "Dynamic PV (default fs)(allowExpansion)"
NtfsDynamicPVAllowExpansion = TestPattern{
Name: "Dynamic PV (ntfs)(allowExpansion)",
VolType: DynamicPV,
AllowExpansion: true,
FsType: "ntfs",
FeatureTag: "[Feature:Windows]",
}
// BlockVolModeDynamicPVAllowExpansion is TestPattern for "Dynamic PV (block volmode)(allowExpansion)"
BlockVolModeDynamicPVAllowExpansion = TestPattern{
Name: "Dynamic PV (block volmode)(allowExpansion)",
VolType: DynamicPV,
VolMode: v1.PersistentVolumeBlock,
AllowExpansion: true,
}
// Definitions for topology tests
// TopologyImmediate is TestPattern for immediate binding
TopologyImmediate = TestPattern{
Name: "Dynamic PV (immediate binding)",
VolType: DynamicPV,
BindingMode: storagev1.VolumeBindingImmediate,
}
// TopologyDelayed is TestPattern for delayed binding
TopologyDelayed = TestPattern{
Name: "Dynamic PV (delayed binding)",
VolType: DynamicPV,
BindingMode: storagev1.VolumeBindingWaitForFirstConsumer,
}
)
// NewVolTypeMap creates a map with the given TestVolTypes enabled
func NewVolTypeMap(types ...TestVolType) map[TestVolType]bool {
m := map[TestVolType]bool{}
for _, t := range types {
m[t] = true
}
return m
}
| test/e2e/storage/framework/testpattern.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.0005843131802976131,
0.00021973515686113387,
0.00016953807789832354,
0.0001837102900026366,
0.0000893540054676123
] |
{
"id": 3,
"code_window": [
"\tklog.V(2).InfoS(\"DetectLocalMode\", \"localMode\", string(config.DetectLocalMode))\n",
"}\n",
"\n",
"// createProxier creates the proxy.Provider\n",
"func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {\n",
"\tvar proxier proxy.Provider\n",
"\tvar err error\n",
"\n",
"\tif config.DetectLocalMode == proxyconfigapi.LocalModeNodeCIDR {\n",
"\t\tklog.InfoS(\"Watching for node, awaiting podCIDR allocation\", \"hostname\", s.Hostname)\n",
"\t\tnode, err := waitForPodCIDR(s.Client, s.Hostname)\n",
"\t\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// platformSetup is called after setting up the ProxyServer, but before creating the\n",
"// Proxier. It should fill in any platform-specific fields and perform other\n",
"// platform-specific setup.\n",
"func (s *ProxyServer) platformSetup() error {\n",
"\tif s.Config.DetectLocalMode == proxyconfigapi.LocalModeNodeCIDR {\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "replace",
"edit_start_line_idx": 78
} | //go:build !windows
// +build !windows
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"fmt"
"net"
"os"
"path/filepath"
"reflect"
goruntime "runtime"
"strings"
"testing"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
clientsetfake "k8s.io/client-go/kubernetes/fake"
clientgotesting "k8s.io/client-go/testing"
proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config"
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utiliptablestest "k8s.io/kubernetes/pkg/util/iptables/testing"
netutils "k8s.io/utils/net"
"k8s.io/utils/pointer"
)
func Test_platformApplyDefaults(t *testing.T) {
testCases := []struct {
name string
mode proxyconfigapi.ProxyMode
expectedMode proxyconfigapi.ProxyMode
detectLocal proxyconfigapi.LocalMode
expectedDetectLocal proxyconfigapi.LocalMode
}{
{
name: "defaults",
mode: "",
expectedMode: proxyconfigapi.ProxyModeIPTables,
detectLocal: "",
expectedDetectLocal: proxyconfigapi.LocalModeClusterCIDR,
},
{
name: "explicit",
mode: proxyconfigapi.ProxyModeIPTables,
expectedMode: proxyconfigapi.ProxyModeIPTables,
detectLocal: proxyconfigapi.LocalModeClusterCIDR,
expectedDetectLocal: proxyconfigapi.LocalModeClusterCIDR,
},
{
name: "override mode",
mode: "ipvs",
expectedMode: proxyconfigapi.ProxyModeIPVS,
detectLocal: "",
expectedDetectLocal: proxyconfigapi.LocalModeClusterCIDR,
},
{
name: "override detect-local",
mode: "",
expectedMode: proxyconfigapi.ProxyModeIPTables,
detectLocal: "NodeCIDR",
expectedDetectLocal: proxyconfigapi.LocalModeNodeCIDR,
},
{
name: "override both",
mode: "ipvs",
expectedMode: proxyconfigapi.ProxyModeIPVS,
detectLocal: "NodeCIDR",
expectedDetectLocal: proxyconfigapi.LocalModeNodeCIDR,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
options := NewOptions()
config := &proxyconfigapi.KubeProxyConfiguration{
Mode: tc.mode,
DetectLocalMode: tc.detectLocal,
}
options.platformApplyDefaults(config)
if config.Mode != tc.expectedMode {
t.Fatalf("expected mode: %s, but got: %s", tc.expectedMode, config.Mode)
}
if config.DetectLocalMode != tc.expectedDetectLocal {
t.Fatalf("expected detect-local: %s, but got: %s", tc.expectedDetectLocal, config.DetectLocalMode)
}
})
}
}
func Test_getLocalDetector(t *testing.T) {
cases := []struct {
mode proxyconfigapi.LocalMode
config *proxyconfigapi.KubeProxyConfiguration
ipt utiliptables.Interface
expected proxyutiliptables.LocalTrafficDetector
nodePodCIDRs []string
errExpected bool
}{
// LocalModeClusterCIDR
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "10.0.0.0/14"},
ipt: utiliptablestest.NewFake(),
expected: resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/14", utiliptablestest.NewFake())),
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "2002::1234:abcd:ffff:c0a8:101/64"},
ipt: utiliptablestest.NewIPv6Fake(),
expected: resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByCIDR("2002::1234:abcd:ffff:c0a8:101/64", utiliptablestest.NewIPv6Fake())),
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "10.0.0.0/14"},
ipt: utiliptablestest.NewIPv6Fake(),
expected: nil,
errExpected: true,
},
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "2002::1234:abcd:ffff:c0a8:101/64"},
ipt: utiliptablestest.NewFake(),
expected: nil,
errExpected: true,
},
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: ""},
ipt: utiliptablestest.NewFake(),
expected: proxyutiliptables.NewNoOpLocalDetector(),
errExpected: false,
},
// LocalModeNodeCIDR
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "10.0.0.0/14"},
ipt: utiliptablestest.NewFake(),
expected: resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/24", utiliptablestest.NewFake())),
nodePodCIDRs: []string{"10.0.0.0/24"},
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "2002::1234:abcd:ffff:c0a8:101/64"},
ipt: utiliptablestest.NewIPv6Fake(),
expected: resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByCIDR("2002::1234:abcd:ffff:c0a8:101/96", utiliptablestest.NewIPv6Fake())),
nodePodCIDRs: []string{"2002::1234:abcd:ffff:c0a8:101/96"},
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "10.0.0.0/14"},
ipt: utiliptablestest.NewIPv6Fake(),
expected: nil,
nodePodCIDRs: []string{"10.0.0.0/24"},
errExpected: true,
},
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "2002::1234:abcd:ffff:c0a8:101/64"},
ipt: utiliptablestest.NewFake(),
expected: nil,
nodePodCIDRs: []string{"2002::1234:abcd:ffff:c0a8:101/96"},
errExpected: true,
},
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: ""},
ipt: utiliptablestest.NewFake(),
expected: proxyutiliptables.NewNoOpLocalDetector(),
nodePodCIDRs: []string{},
errExpected: false,
},
// unknown mode
{
mode: proxyconfigapi.LocalMode("abcd"),
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "10.0.0.0/14"},
ipt: utiliptablestest.NewFake(),
expected: proxyutiliptables.NewNoOpLocalDetector(),
errExpected: false,
},
// LocalModeBridgeInterface
{
mode: proxyconfigapi.LocalModeBridgeInterface,
config: &proxyconfigapi.KubeProxyConfiguration{
DetectLocal: proxyconfigapi.DetectLocalConfiguration{BridgeInterface: "eth"},
},
expected: resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByBridgeInterface("eth")),
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeBridgeInterface,
config: &proxyconfigapi.KubeProxyConfiguration{
DetectLocal: proxyconfigapi.DetectLocalConfiguration{BridgeInterface: "1234567890123456789"},
},
expected: resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByBridgeInterface("1234567890123456789")),
errExpected: false,
},
// LocalModeInterfaceNamePrefix
{
mode: proxyconfigapi.LocalModeInterfaceNamePrefix,
config: &proxyconfigapi.KubeProxyConfiguration{
DetectLocal: proxyconfigapi.DetectLocalConfiguration{InterfaceNamePrefix: "eth"},
},
expected: resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByInterfaceNamePrefix("eth")),
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeInterfaceNamePrefix,
config: &proxyconfigapi.KubeProxyConfiguration{
DetectLocal: proxyconfigapi.DetectLocalConfiguration{InterfaceNamePrefix: "1234567890123456789"},
},
expected: resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByInterfaceNamePrefix("1234567890123456789")),
errExpected: false,
},
}
for i, c := range cases {
r, err := getLocalDetector(c.mode, c.config, c.ipt, c.nodePodCIDRs)
if c.errExpected {
if err == nil {
t.Errorf("Case[%d] Expected error, but succeeded with %v", i, r)
}
continue
}
if err != nil {
t.Errorf("Case[%d] Error resolving detect-local: %v", i, err)
continue
}
if !reflect.DeepEqual(r, c.expected) {
t.Errorf("Case[%d] Unexpected detect-local implementation, expected: %q, got: %q", i, c.expected, r)
}
}
}
func Test_getDualStackLocalDetectorTuple(t *testing.T) {
cases := []struct {
mode proxyconfigapi.LocalMode
config *proxyconfigapi.KubeProxyConfiguration
ipt [2]utiliptables.Interface
expected [2]proxyutiliptables.LocalTrafficDetector
nodePodCIDRs []string
errExpected bool
}{
// LocalModeClusterCIDR
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "10.0.0.0/14,2002::1234:abcd:ffff:c0a8:101/64"},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: resolveDualStackLocalDetectors(t)(
proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/14", utiliptablestest.NewFake()))(
proxyutiliptables.NewDetectLocalByCIDR("2002::1234:abcd:ffff:c0a8:101/64", utiliptablestest.NewIPv6Fake())),
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "2002::1234:abcd:ffff:c0a8:101/64,10.0.0.0/14"},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: resolveDualStackLocalDetectors(t)(
proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/14", utiliptablestest.NewFake()))(
proxyutiliptables.NewDetectLocalByCIDR("2002::1234:abcd:ffff:c0a8:101/64", utiliptablestest.NewIPv6Fake())),
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "10.0.0.0/14"},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: [2]proxyutiliptables.LocalTrafficDetector{
resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/14", utiliptablestest.NewFake())),
proxyutiliptables.NewNoOpLocalDetector()},
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "2002::1234:abcd:ffff:c0a8:101/64"},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: [2]proxyutiliptables.LocalTrafficDetector{
proxyutiliptables.NewNoOpLocalDetector(),
resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByCIDR("2002::1234:abcd:ffff:c0a8:101/64", utiliptablestest.NewIPv6Fake()))},
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeClusterCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: ""},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: [2]proxyutiliptables.LocalTrafficDetector{proxyutiliptables.NewNoOpLocalDetector(), proxyutiliptables.NewNoOpLocalDetector()},
errExpected: false,
},
// LocalModeNodeCIDR
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "10.0.0.0/14,2002::1234:abcd:ffff:c0a8:101/64"},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: resolveDualStackLocalDetectors(t)(
proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/24", utiliptablestest.NewFake()))(
proxyutiliptables.NewDetectLocalByCIDR("2002::1234:abcd:ffff:c0a8:101/96", utiliptablestest.NewIPv6Fake())),
nodePodCIDRs: []string{"10.0.0.0/24", "2002::1234:abcd:ffff:c0a8:101/96"},
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "2002::1234:abcd:ffff:c0a8:101/64,10.0.0.0/14"},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: resolveDualStackLocalDetectors(t)(
proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/24", utiliptablestest.NewFake()))(
proxyutiliptables.NewDetectLocalByCIDR("2002::1234:abcd:ffff:c0a8:101/96", utiliptablestest.NewIPv6Fake())),
nodePodCIDRs: []string{"2002::1234:abcd:ffff:c0a8:101/96", "10.0.0.0/24"},
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "10.0.0.0/14"},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: [2]proxyutiliptables.LocalTrafficDetector{
resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/24", utiliptablestest.NewFake())),
proxyutiliptables.NewNoOpLocalDetector()},
nodePodCIDRs: []string{"10.0.0.0/24"},
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: "2002::1234:abcd:ffff:c0a8:101/64"},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: [2]proxyutiliptables.LocalTrafficDetector{
proxyutiliptables.NewNoOpLocalDetector(),
resolveLocalDetector(t)(proxyutiliptables.NewDetectLocalByCIDR("2002::1234:abcd:ffff:c0a8:101/96", utiliptablestest.NewIPv6Fake()))},
nodePodCIDRs: []string{"2002::1234:abcd:ffff:c0a8:101/96"},
errExpected: false,
},
{
mode: proxyconfigapi.LocalModeNodeCIDR,
config: &proxyconfigapi.KubeProxyConfiguration{ClusterCIDR: ""},
ipt: [2]utiliptables.Interface{utiliptablestest.NewFake(), utiliptablestest.NewIPv6Fake()},
expected: [2]proxyutiliptables.LocalTrafficDetector{proxyutiliptables.NewNoOpLocalDetector(), proxyutiliptables.NewNoOpLocalDetector()},
nodePodCIDRs: []string{},
errExpected: false,
},
// LocalModeBridgeInterface
{
mode: proxyconfigapi.LocalModeBridgeInterface,
config: &proxyconfigapi.KubeProxyConfiguration{
DetectLocal: proxyconfigapi.DetectLocalConfiguration{BridgeInterface: "eth"},
},
expected: resolveDualStackLocalDetectors(t)(
proxyutiliptables.NewDetectLocalByBridgeInterface("eth"))(
proxyutiliptables.NewDetectLocalByBridgeInterface("eth")),
errExpected: false,
},
// LocalModeInterfaceNamePrefix
{
mode: proxyconfigapi.LocalModeInterfaceNamePrefix,
config: &proxyconfigapi.KubeProxyConfiguration{
DetectLocal: proxyconfigapi.DetectLocalConfiguration{InterfaceNamePrefix: "veth"},
},
expected: resolveDualStackLocalDetectors(t)(
proxyutiliptables.NewDetectLocalByInterfaceNamePrefix("veth"))(
proxyutiliptables.NewDetectLocalByInterfaceNamePrefix("veth")),
errExpected: false,
},
}
for i, c := range cases {
r, err := getDualStackLocalDetectorTuple(c.mode, c.config, c.ipt, c.nodePodCIDRs)
if c.errExpected {
if err == nil {
t.Errorf("Case[%d] expected error, but succeeded with %q", i, r)
}
continue
}
if err != nil {
t.Errorf("Case[%d] Error resolving detect-local: %v", i, err)
continue
}
if !reflect.DeepEqual(r, c.expected) {
t.Errorf("Case[%d] Unexpected detect-local implementation, expected: %q, got: %q", i, c.expected, r)
}
}
}
func makeNodeWithPodCIDRs(cidrs ...string) *v1.Node {
if len(cidrs) == 0 {
return &v1.Node{}
}
return &v1.Node{
Spec: v1.NodeSpec{
PodCIDR: cidrs[0],
PodCIDRs: cidrs,
},
}
}
func resolveLocalDetector(t *testing.T) func(proxyutiliptables.LocalTrafficDetector, error) proxyutiliptables.LocalTrafficDetector {
return func(localDetector proxyutiliptables.LocalTrafficDetector, err error) proxyutiliptables.LocalTrafficDetector {
t.Helper()
if err != nil {
t.Fatalf("Error resolving detect-local: %v", err)
}
return localDetector
}
}
func resolveDualStackLocalDetectors(t *testing.T) func(localDetector proxyutiliptables.LocalTrafficDetector, err1 error) func(proxyutiliptables.LocalTrafficDetector, error) [2]proxyutiliptables.LocalTrafficDetector {
return func(localDetector proxyutiliptables.LocalTrafficDetector, err error) func(proxyutiliptables.LocalTrafficDetector, error) [2]proxyutiliptables.LocalTrafficDetector {
t.Helper()
if err != nil {
t.Fatalf("Error resolving dual stack detect-local: %v", err)
}
return func(otherLocalDetector proxyutiliptables.LocalTrafficDetector, err1 error) [2]proxyutiliptables.LocalTrafficDetector {
t.Helper()
if err1 != nil {
t.Fatalf("Error resolving dual stack detect-local: %v", err)
}
return [2]proxyutiliptables.LocalTrafficDetector{localDetector, otherLocalDetector}
}
}
}
func TestConfigChange(t *testing.T) {
setUp := func() (*os.File, string, error) {
tempDir, err := os.MkdirTemp("", "kubeproxy-config-change")
if err != nil {
return nil, "", fmt.Errorf("unable to create temporary directory: %v", err)
}
fullPath := filepath.Join(tempDir, "kube-proxy-config")
file, err := os.Create(fullPath)
if err != nil {
return nil, "", fmt.Errorf("unexpected error when creating temp file: %v", err)
}
_, err = file.WriteString(`apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
bindAddressHardFail: false
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: /var/lib/kube-proxy/kubeconfig.conf
qps: 5
clusterCIDR: 10.244.0.0/16
configSyncPeriod: 15m0s
conntrack:
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
excludeCIDRs: null
minSyncPeriod: 0s
scheduler: ""
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: ""
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
detectLocalMode: "BridgeInterface"`)
if err != nil {
return nil, "", fmt.Errorf("unexpected error when writing content to temp kube-proxy config file: %v", err)
}
return file, tempDir, nil
}
tearDown := func(file *os.File, tempDir string) {
file.Close()
os.RemoveAll(tempDir)
}
testCases := []struct {
name string
proxyServer proxyRun
append bool
expectedErr string
}{
{
name: "update config file",
proxyServer: new(fakeProxyServerLongRun),
append: true,
expectedErr: "content of the proxy server's configuration file was updated",
},
{
name: "fake error",
proxyServer: new(fakeProxyServerError),
expectedErr: "mocking error from ProxyServer.Run()",
},
}
for _, tc := range testCases {
file, tempDir, err := setUp()
if err != nil {
t.Fatalf("unexpected error when setting up environment: %v", err)
}
opt := NewOptions()
opt.ConfigFile = file.Name()
err = opt.Complete()
if err != nil {
t.Fatal(err)
}
opt.proxyServer = tc.proxyServer
errCh := make(chan error, 1)
go func() {
errCh <- opt.runLoop()
}()
if tc.append {
file.WriteString("append fake content")
}
select {
case err := <-errCh:
if err != nil {
if !strings.Contains(err.Error(), tc.expectedErr) {
t.Errorf("[%s] Expected error containing %v, got %v", tc.name, tc.expectedErr, err)
}
}
case <-time.After(10 * time.Second):
t.Errorf("[%s] Timeout: unable to get any events or internal timeout.", tc.name)
}
tearDown(file, tempDir)
}
}
func Test_waitForPodCIDR(t *testing.T) {
expected := []string{"192.168.0.0/24", "fd00:1:2::/64"}
nodeName := "test-node"
oldNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
ResourceVersion: "1000",
},
Spec: v1.NodeSpec{
PodCIDR: "10.0.0.0/24",
PodCIDRs: []string{"10.0.0.0/24", "2001:db2:1/64"},
},
}
node := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
ResourceVersion: "1",
},
}
updatedNode := node.DeepCopy()
updatedNode.Spec.PodCIDRs = expected
updatedNode.Spec.PodCIDR = expected[0]
// start with the new node
client := clientsetfake.NewSimpleClientset()
client.AddReactor("list", "nodes", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
obj := &v1.NodeList{}
return true, obj, nil
})
fakeWatch := watch.NewFake()
client.PrependWatchReactor("nodes", clientgotesting.DefaultWatchReactor(fakeWatch, nil))
go func() {
fakeWatch.Add(node)
// receive a delete event for the old node
fakeWatch.Delete(oldNode)
// set the PodCIDRs on the new node
fakeWatch.Modify(updatedNode)
}()
got, err := waitForPodCIDR(client, node.Name)
if err != nil {
t.Errorf("waitForPodCIDR() unexpected error %v", err)
return
}
if !reflect.DeepEqual(got.Spec.PodCIDRs, expected) {
t.Errorf("waitForPodCIDR() got %v expected to be %v ", got.Spec.PodCIDRs, expected)
}
}
func TestGetConntrackMax(t *testing.T) {
ncores := goruntime.NumCPU()
testCases := []struct {
min int32
maxPerCore int32
expected int
err string
}{
{
expected: 0,
},
{
maxPerCore: 67890, // use this if Max is 0
min: 1, // avoid 0 default
expected: 67890 * ncores,
},
{
maxPerCore: 1, // ensure that Min is considered
min: 123456,
expected: 123456,
},
{
maxPerCore: 0, // leave system setting
min: 123456,
expected: 0,
},
}
for i, tc := range testCases {
cfg := proxyconfigapi.KubeProxyConntrackConfiguration{
Min: pointer.Int32(tc.min),
MaxPerCore: pointer.Int32(tc.maxPerCore),
}
x, e := getConntrackMax(cfg)
if e != nil {
if tc.err == "" {
t.Errorf("[%d] unexpected error: %v", i, e)
} else if !strings.Contains(e.Error(), tc.err) {
t.Errorf("[%d] expected an error containing %q: %v", i, tc.err, e)
}
} else if x != tc.expected {
t.Errorf("[%d] expected %d, got %d", i, tc.expected, x)
}
}
}
func TestProxyServer_createProxier(t *testing.T) {
tests := []struct {
name string
node *v1.Node
config *proxyconfigapi.KubeProxyConfiguration
wantPodCIDRs []string
}{
{
name: "LocalModeNodeCIDR store the node PodCIDRs obtained",
node: makeNodeWithPodCIDRs("10.0.0.0/24"),
config: &proxyconfigapi.KubeProxyConfiguration{DetectLocalMode: proxyconfigapi.LocalModeNodeCIDR},
wantPodCIDRs: []string{"10.0.0.0/24"},
},
{
name: "LocalModeNodeCIDR store the node PodCIDRs obtained dual stack",
node: makeNodeWithPodCIDRs("10.0.0.0/24", "2001:db2:1/64"),
config: &proxyconfigapi.KubeProxyConfiguration{DetectLocalMode: proxyconfigapi.LocalModeNodeCIDR},
wantPodCIDRs: []string{"10.0.0.0/24", "2001:db2:1/64"},
},
{
name: "LocalModeClusterCIDR does not get the node PodCIDRs",
node: makeNodeWithPodCIDRs("10.0.0.0/24", "2001:db2:1/64"),
config: &proxyconfigapi.KubeProxyConfiguration{DetectLocalMode: proxyconfigapi.LocalModeClusterCIDR},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
client := clientsetfake.NewSimpleClientset(tt.node)
s := &ProxyServer{
Config: tt.config,
Client: client,
Hostname: "nodename",
NodeIPs: map[v1.IPFamily]net.IP{
v1.IPv4Protocol: netutils.ParseIPSloppy("127.0.0.1"),
v1.IPv6Protocol: net.IPv6zero,
},
}
_, err := s.createProxier(tt.config)
// TODO: mock the exec.Interface to not fail probing iptables
if (err != nil) && !strings.Contains(err.Error(), "iptables is not supported for primary IP family") {
t.Errorf("ProxyServer.createProxier() error = %v", err)
return
}
if !reflect.DeepEqual(s.podCIDRs, tt.wantPodCIDRs) {
t.Errorf("Expected PodCIDRs %v got %v", tt.wantPodCIDRs, s.podCIDRs)
}
})
}
}
| cmd/kube-proxy/app/server_others_test.go | 1 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.9989757537841797,
0.015524109825491905,
0.0001663279690546915,
0.0002833576872944832,
0.11841432005167007
] |
{
"id": 3,
"code_window": [
"\tklog.V(2).InfoS(\"DetectLocalMode\", \"localMode\", string(config.DetectLocalMode))\n",
"}\n",
"\n",
"// createProxier creates the proxy.Provider\n",
"func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {\n",
"\tvar proxier proxy.Provider\n",
"\tvar err error\n",
"\n",
"\tif config.DetectLocalMode == proxyconfigapi.LocalModeNodeCIDR {\n",
"\t\tklog.InfoS(\"Watching for node, awaiting podCIDR allocation\", \"hostname\", s.Hostname)\n",
"\t\tnode, err := waitForPodCIDR(s.Client, s.Hostname)\n",
"\t\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// platformSetup is called after setting up the ProxyServer, but before creating the\n",
"// Proxier. It should fill in any platform-specific fields and perform other\n",
"// platform-specific setup.\n",
"func (s *ProxyServer) platformSetup() error {\n",
"\tif s.Config.DetectLocalMode == proxyconfigapi.LocalModeNodeCIDR {\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "replace",
"edit_start_line_idx": 78
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
"net/http"
v1 "k8s.io/api/node/v1"
"k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
type NodeV1Interface interface {
RESTClient() rest.Interface
RuntimeClassesGetter
}
// NodeV1Client is used to interact with features provided by the node.k8s.io group.
type NodeV1Client struct {
restClient rest.Interface
}
func (c *NodeV1Client) RuntimeClasses() RuntimeClassInterface {
return newRuntimeClasses(c)
}
// NewForConfig creates a new NodeV1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*NodeV1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
httpClient, err := rest.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new NodeV1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NodeV1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil {
return nil, err
}
return &NodeV1Client{client}, nil
}
// NewForConfigOrDie creates a new NodeV1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *NodeV1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new NodeV1Client for the given RESTClient.
func New(c rest.Interface) *NodeV1Client {
return &NodeV1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := v1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *NodeV1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
| staging/src/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.014658942818641663,
0.0025150689762085676,
0.0001636435918044299,
0.0007353881956078112,
0.004126011859625578
] |
{
"id": 3,
"code_window": [
"\tklog.V(2).InfoS(\"DetectLocalMode\", \"localMode\", string(config.DetectLocalMode))\n",
"}\n",
"\n",
"// createProxier creates the proxy.Provider\n",
"func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {\n",
"\tvar proxier proxy.Provider\n",
"\tvar err error\n",
"\n",
"\tif config.DetectLocalMode == proxyconfigapi.LocalModeNodeCIDR {\n",
"\t\tklog.InfoS(\"Watching for node, awaiting podCIDR allocation\", \"hostname\", s.Hostname)\n",
"\t\tnode, err := waitForPodCIDR(s.Client, s.Hostname)\n",
"\t\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// platformSetup is called after setting up the ProxyServer, but before creating the\n",
"// Proxier. It should fill in any platform-specific fields and perform other\n",
"// platform-specific setup.\n",
"func (s *ProxyServer) platformSetup() error {\n",
"\tif s.Config.DetectLocalMode == proxyconfigapi.LocalModeNodeCIDR {\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "replace",
"edit_start_line_idx": 78
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1beta1
// RollbackConfigApplyConfiguration represents an declarative configuration of the RollbackConfig type for use
// with apply.
type RollbackConfigApplyConfiguration struct {
Revision *int64 `json:"revision,omitempty"`
}
// RollbackConfigApplyConfiguration constructs an declarative configuration of the RollbackConfig type for use with
// apply.
func RollbackConfig() *RollbackConfigApplyConfiguration {
return &RollbackConfigApplyConfiguration{}
}
// WithRevision sets the Revision field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Revision field is set to the value of the last call.
func (b *RollbackConfigApplyConfiguration) WithRevision(value int64) *RollbackConfigApplyConfiguration {
b.Revision = &value
return b
}
| staging/src/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00017813638260122389,
0.00017250326345674694,
0.00016593464533798397,
0.0001729709911160171,
0.0000054824795370223
] |
{
"id": 3,
"code_window": [
"\tklog.V(2).InfoS(\"DetectLocalMode\", \"localMode\", string(config.DetectLocalMode))\n",
"}\n",
"\n",
"// createProxier creates the proxy.Provider\n",
"func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {\n",
"\tvar proxier proxy.Provider\n",
"\tvar err error\n",
"\n",
"\tif config.DetectLocalMode == proxyconfigapi.LocalModeNodeCIDR {\n",
"\t\tklog.InfoS(\"Watching for node, awaiting podCIDR allocation\", \"hostname\", s.Hostname)\n",
"\t\tnode, err := waitForPodCIDR(s.Client, s.Hostname)\n",
"\t\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// platformSetup is called after setting up the ProxyServer, but before creating the\n",
"// Proxier. It should fill in any platform-specific fields and perform other\n",
"// platform-specific setup.\n",
"func (s *ProxyServer) platformSetup() error {\n",
"\tif s.Config.DetectLocalMode == proxyconfigapi.LocalModeNodeCIDR {\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "replace",
"edit_start_line_idx": 78
} | apiVersion: v1
kind: Pod
metadata:
name: restrictedvolumes5
spec:
containers:
- image: registry.k8s.io/pause
name: container1
securityContext:
allowPrivilegeEscalation: false
initContainers:
- image: registry.k8s.io/pause
name: initcontainer1
securityContext:
allowPrivilegeEscalation: false
securityContext:
runAsNonRoot: true
volumes:
- glusterfs:
endpoints: test
path: test
name: volume1
| staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.11/fail/restrictedvolumes5.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00017373387527186424,
0.00017213479441124946,
0.00017063245468307287,
0.00017203809693455696,
0.0000012679951169047854
] |
{
"id": 4,
"code_window": [
"\t\tklog.InfoS(\"Watching for node, awaiting podCIDR allocation\", \"hostname\", s.Hostname)\n",
"\t\tnode, err := waitForPodCIDR(s.Client, s.Hostname)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, err\n",
"\t\t}\n",
"\t\ts.podCIDRs = node.Spec.PodCIDRs\n",
"\t\tklog.InfoS(\"NodeInfo\", \"podCIDRs\", node.Spec.PodCIDRs)\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\treturn err\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "replace",
"edit_start_line_idx": 87
} | //go:build !windows
// +build !windows
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to configure and run a
// Kubernetes app process.
package app
import (
"context"
"errors"
"fmt"
goruntime "runtime"
"strings"
"time"
"github.com/google/cadvisor/machine"
"github.com/google/cadvisor/utils/sysfs"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/cache"
"k8s.io/apimachinery/pkg/fields"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
toolswatch "k8s.io/client-go/tools/watch"
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
"k8s.io/kubernetes/pkg/proxy"
proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config"
"k8s.io/kubernetes/pkg/proxy/iptables"
"k8s.io/kubernetes/pkg/proxy/ipvs"
utilipset "k8s.io/kubernetes/pkg/proxy/ipvs/ipset"
utilipvs "k8s.io/kubernetes/pkg/proxy/ipvs/util"
proxymetrics "k8s.io/kubernetes/pkg/proxy/metrics"
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
"k8s.io/utils/exec"
netutils "k8s.io/utils/net"
"k8s.io/klog/v2"
)
// timeoutForNodePodCIDR is the time to wait for allocators to assign a PodCIDR to the
// node after it is registered.
var timeoutForNodePodCIDR = 5 * time.Minute
func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfiguration) {
if config.Mode == "" {
klog.InfoS("Using iptables proxy")
config.Mode = proxyconfigapi.ProxyModeIPTables
}
if config.DetectLocalMode == "" {
klog.V(4).InfoS("Defaulting detect-local-mode", "localModeClusterCIDR", string(proxyconfigapi.LocalModeClusterCIDR))
config.DetectLocalMode = proxyconfigapi.LocalModeClusterCIDR
}
klog.V(2).InfoS("DetectLocalMode", "localMode", string(config.DetectLocalMode))
}
// createProxier creates the proxy.Provider
func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {
var proxier proxy.Provider
var err error
if config.DetectLocalMode == proxyconfigapi.LocalModeNodeCIDR {
klog.InfoS("Watching for node, awaiting podCIDR allocation", "hostname", s.Hostname)
node, err := waitForPodCIDR(s.Client, s.Hostname)
if err != nil {
return nil, err
}
s.podCIDRs = node.Spec.PodCIDRs
klog.InfoS("NodeInfo", "podCIDRs", node.Spec.PodCIDRs)
}
primaryProtocol := utiliptables.ProtocolIPv4
if s.PrimaryIPFamily == v1.IPv6Protocol {
primaryProtocol = utiliptables.ProtocolIPv6
}
execer := exec.New()
iptInterface := utiliptables.New(execer, primaryProtocol)
var ipt [2]utiliptables.Interface
dualStack := true // While we assume that node supports, we do further checks below
// Create iptables handlers for both families, one is already created
// Always ordered as IPv4, IPv6
if primaryProtocol == utiliptables.ProtocolIPv4 {
ipt[0] = iptInterface
ipt[1] = utiliptables.New(execer, utiliptables.ProtocolIPv6)
} else {
ipt[0] = utiliptables.New(execer, utiliptables.ProtocolIPv4)
ipt[1] = iptInterface
}
if !ipt[0].Present() {
return nil, fmt.Errorf("iptables is not supported for primary IP family %q", primaryProtocol)
} else if !ipt[1].Present() {
klog.InfoS("kube-proxy running in single-stack mode: secondary ipFamily is not supported", "ipFamily", ipt[1].Protocol())
dualStack = false
// Validate NodePortAddresses is single-stack
npaByFamily := proxyutil.MapCIDRsByIPFamily(config.NodePortAddresses)
secondaryFamily := proxyutil.OtherIPFamily(s.PrimaryIPFamily)
badAddrs := npaByFamily[secondaryFamily]
if len(badAddrs) > 0 {
klog.InfoS("Ignoring --nodeport-addresses of the wrong family", "ipFamily", secondaryFamily, "addresses", badAddrs)
}
}
if config.Mode == proxyconfigapi.ProxyModeIPTables {
klog.InfoS("Using iptables Proxier")
if dualStack {
klog.InfoS("kube-proxy running in dual-stack mode", "ipFamily", iptInterface.Protocol())
klog.InfoS("Creating dualStackProxier for iptables")
// Always ordered to match []ipt
var localDetectors [2]proxyutiliptables.LocalTrafficDetector
localDetectors, err = getDualStackLocalDetectorTuple(config.DetectLocalMode, config, ipt, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
// TODO this has side effects that should only happen when Run() is invoked.
proxier, err = iptables.NewDualStackProxier(
ipt,
utilsysctl.New(),
execer,
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.IPTables.MasqueradeAll,
*config.IPTables.LocalhostNodePorts,
int(*config.IPTables.MasqueradeBit),
localDetectors,
s.Hostname,
s.NodeIPs,
s.Recorder,
s.HealthzServer,
config.NodePortAddresses,
)
} else {
// Create a single-stack proxier if and only if the node does not support dual-stack (i.e, no iptables support).
var localDetector proxyutiliptables.LocalTrafficDetector
localDetector, err = getLocalDetector(config.DetectLocalMode, config, iptInterface, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
// TODO this has side effects that should only happen when Run() is invoked.
proxier, err = iptables.NewProxier(
s.PrimaryIPFamily,
iptInterface,
utilsysctl.New(),
execer,
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.IPTables.MasqueradeAll,
*config.IPTables.LocalhostNodePorts,
int(*config.IPTables.MasqueradeBit),
localDetector,
s.Hostname,
s.NodeIPs[s.PrimaryIPFamily],
s.Recorder,
s.HealthzServer,
config.NodePortAddresses,
)
}
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
} else if config.Mode == proxyconfigapi.ProxyModeIPVS {
kernelHandler := ipvs.NewLinuxKernelHandler()
ipsetInterface := utilipset.New(execer)
ipvsInterface := utilipvs.New()
if err := ipvs.CanUseIPVSProxier(ipvsInterface, ipsetInterface, config.IPVS.Scheduler); err != nil {
return nil, fmt.Errorf("can't use the IPVS proxier: %v", err)
}
klog.InfoS("Using ipvs Proxier")
if dualStack {
klog.InfoS("Creating dualStackProxier for ipvs")
// Always ordered to match []ipt
var localDetectors [2]proxyutiliptables.LocalTrafficDetector
localDetectors, err = getDualStackLocalDetectorTuple(config.DetectLocalMode, config, ipt, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
proxier, err = ipvs.NewDualStackProxier(
ipt,
ipvsInterface,
ipsetInterface,
utilsysctl.New(),
execer,
config.IPVS.SyncPeriod.Duration,
config.IPVS.MinSyncPeriod.Duration,
config.IPVS.ExcludeCIDRs,
config.IPVS.StrictARP,
config.IPVS.TCPTimeout.Duration,
config.IPVS.TCPFinTimeout.Duration,
config.IPVS.UDPTimeout.Duration,
config.IPTables.MasqueradeAll,
int(*config.IPTables.MasqueradeBit),
localDetectors,
s.Hostname,
s.NodeIPs,
s.Recorder,
s.HealthzServer,
config.IPVS.Scheduler,
config.NodePortAddresses,
kernelHandler,
)
} else {
var localDetector proxyutiliptables.LocalTrafficDetector
localDetector, err = getLocalDetector(config.DetectLocalMode, config, iptInterface, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
proxier, err = ipvs.NewProxier(
s.PrimaryIPFamily,
iptInterface,
ipvsInterface,
ipsetInterface,
utilsysctl.New(),
execer,
config.IPVS.SyncPeriod.Duration,
config.IPVS.MinSyncPeriod.Duration,
config.IPVS.ExcludeCIDRs,
config.IPVS.StrictARP,
config.IPVS.TCPTimeout.Duration,
config.IPVS.TCPFinTimeout.Duration,
config.IPVS.UDPTimeout.Duration,
config.IPTables.MasqueradeAll,
int(*config.IPTables.MasqueradeBit),
localDetector,
s.Hostname,
s.NodeIPs[s.PrimaryIPFamily],
s.Recorder,
s.HealthzServer,
config.IPVS.Scheduler,
config.NodePortAddresses,
kernelHandler,
)
}
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
}
return proxier, nil
}
func (s *ProxyServer) platformSetup() error {
ct := &realConntracker{}
max, err := getConntrackMax(s.Config.Conntrack)
if err != nil {
return err
}
if max > 0 {
err := ct.SetMax(max)
if err != nil {
if err != errReadOnlySysFS {
return err
}
// errReadOnlySysFS is caused by a known docker issue (https://github.com/docker/docker/issues/24000),
// the only remediation we know is to restart the docker daemon.
// Here we'll send an node event with specific reason and message, the
// administrator should decide whether and how to handle this issue,
// whether to drain the node and restart docker. Occurs in other container runtimes
// as well.
// TODO(random-liu): Remove this when the docker bug is fixed.
const message = "CRI error: /sys is read-only: " +
"cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000)"
s.Recorder.Eventf(s.NodeRef, nil, v1.EventTypeWarning, err.Error(), "StartKubeProxy", message)
}
}
if s.Config.Conntrack.TCPEstablishedTimeout != nil && s.Config.Conntrack.TCPEstablishedTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.TCPEstablishedTimeout.Duration / time.Second)
if err := ct.SetTCPEstablishedTimeout(timeout); err != nil {
return err
}
}
if s.Config.Conntrack.TCPCloseWaitTimeout != nil && s.Config.Conntrack.TCPCloseWaitTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.TCPCloseWaitTimeout.Duration / time.Second)
if err := ct.SetTCPCloseWaitTimeout(timeout); err != nil {
return err
}
}
proxymetrics.RegisterMetrics()
return nil
}
func getConntrackMax(config proxyconfigapi.KubeProxyConntrackConfiguration) (int, error) {
if config.MaxPerCore != nil && *config.MaxPerCore > 0 {
floor := 0
if config.Min != nil {
floor = int(*config.Min)
}
scaled := int(*config.MaxPerCore) * detectNumCPU()
if scaled > floor {
klog.V(3).InfoS("GetConntrackMax: using scaled conntrack-max-per-core")
return scaled, nil
}
klog.V(3).InfoS("GetConntrackMax: using conntrack-min")
return floor, nil
}
return 0, nil
}
func waitForPodCIDR(client clientset.Interface, nodeName string) (*v1.Node, error) {
// since allocators can assign the podCIDR after the node registers, we do a watch here to wait
// for podCIDR to be assigned, instead of assuming that the Get() on startup will have it.
ctx, cancelFunc := context.WithTimeout(context.TODO(), timeoutForNodePodCIDR)
defer cancelFunc()
fieldSelector := fields.OneTermEqualSelector("metadata.name", nodeName).String()
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) {
options.FieldSelector = fieldSelector
return client.CoreV1().Nodes().List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector
return client.CoreV1().Nodes().Watch(ctx, options)
},
}
condition := func(event watch.Event) (bool, error) {
// don't process delete events
if event.Type != watch.Modified && event.Type != watch.Added {
return false, nil
}
n, ok := event.Object.(*v1.Node)
if !ok {
return false, fmt.Errorf("event object not of type Node")
}
// don't consider the node if is going to be deleted and keep waiting
if !n.DeletionTimestamp.IsZero() {
return false, nil
}
return n.Spec.PodCIDR != "" && len(n.Spec.PodCIDRs) > 0, nil
}
evt, err := toolswatch.UntilWithSync(ctx, lw, &v1.Node{}, nil, condition)
if err != nil {
return nil, fmt.Errorf("timeout waiting for PodCIDR allocation to configure detect-local-mode %v: %v", proxyconfigapi.LocalModeNodeCIDR, err)
}
if n, ok := evt.Object.(*v1.Node); ok {
return n, nil
}
return nil, fmt.Errorf("event object not of type node")
}
func detectNumCPU() int {
// try get numCPU from /sys firstly due to a known issue (https://github.com/kubernetes/kubernetes/issues/99225)
_, numCPU, err := machine.GetTopology(sysfs.NewRealSysFs())
if err != nil || numCPU < 1 {
return goruntime.NumCPU()
}
return numCPU
}
func getLocalDetector(mode proxyconfigapi.LocalMode, config *proxyconfigapi.KubeProxyConfiguration, ipt utiliptables.Interface, nodePodCIDRs []string) (proxyutiliptables.LocalTrafficDetector, error) {
switch mode {
case proxyconfigapi.LocalModeClusterCIDR:
// LocalModeClusterCIDR is the default if --detect-local-mode wasn't passed,
// but --cluster-cidr is optional.
if len(strings.TrimSpace(config.ClusterCIDR)) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no cluster CIDR defined")
break
}
return proxyutiliptables.NewDetectLocalByCIDR(config.ClusterCIDR, ipt)
case proxyconfigapi.LocalModeNodeCIDR:
if len(nodePodCIDRs) == 0 {
klog.InfoS("Detect-local-mode set to NodeCIDR, but no PodCIDR defined at node")
break
}
return proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt)
case proxyconfigapi.LocalModeBridgeInterface:
return proxyutiliptables.NewDetectLocalByBridgeInterface(config.DetectLocal.BridgeInterface)
case proxyconfigapi.LocalModeInterfaceNamePrefix:
return proxyutiliptables.NewDetectLocalByInterfaceNamePrefix(config.DetectLocal.InterfaceNamePrefix)
}
klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode))
return proxyutiliptables.NewNoOpLocalDetector(), nil
}
func getDualStackLocalDetectorTuple(mode proxyconfigapi.LocalMode, config *proxyconfigapi.KubeProxyConfiguration, ipt [2]utiliptables.Interface, nodePodCIDRs []string) ([2]proxyutiliptables.LocalTrafficDetector, error) {
var err error
localDetectors := [2]proxyutiliptables.LocalTrafficDetector{proxyutiliptables.NewNoOpLocalDetector(), proxyutiliptables.NewNoOpLocalDetector()}
switch mode {
case proxyconfigapi.LocalModeClusterCIDR:
// LocalModeClusterCIDR is the default if --detect-local-mode wasn't passed,
// but --cluster-cidr is optional.
if len(strings.TrimSpace(config.ClusterCIDR)) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no cluster CIDR defined")
break
}
clusterCIDRs := cidrTuple(config.ClusterCIDR)
if len(strings.TrimSpace(clusterCIDRs[0])) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no IPv4 cluster CIDR defined, defaulting to no-op detect-local for IPv4")
} else {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(clusterCIDRs[0], ipt[0])
if err != nil { // don't loose the original error
return localDetectors, err
}
}
if len(strings.TrimSpace(clusterCIDRs[1])) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, defaulting to no-op detect-local for IPv6")
} else {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(clusterCIDRs[1], ipt[1])
}
return localDetectors, err
case proxyconfigapi.LocalModeNodeCIDR:
if len(nodePodCIDRs) == 0 {
klog.InfoS("No node info available to configure detect-local-mode NodeCIDR")
break
}
// localDetectors, like ipt, need to be of the order [IPv4, IPv6], but PodCIDRs is setup so that PodCIDRs[0] == PodCIDR.
// so have to handle the case where PodCIDR can be IPv6 and set that to localDetectors[1]
if netutils.IsIPv6CIDRString(nodePodCIDRs[0]) {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt[1])
if err != nil {
return localDetectors, err
}
if len(nodePodCIDRs) > 1 {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[1], ipt[0])
}
} else {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt[0])
if err != nil {
return localDetectors, err
}
if len(nodePodCIDRs) > 1 {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[1], ipt[1])
}
}
return localDetectors, err
case proxyconfigapi.LocalModeBridgeInterface, proxyconfigapi.LocalModeInterfaceNamePrefix:
localDetector, err := getLocalDetector(mode, config, ipt[0], nodePodCIDRs)
if err == nil {
localDetectors[0] = localDetector
localDetectors[1] = localDetector
}
return localDetectors, err
default:
klog.InfoS("Unknown detect-local-mode", "detectLocalMode", mode)
}
klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode))
return localDetectors, nil
}
// cidrTuple takes a comma separated list of CIDRs and return a tuple (ipv4cidr,ipv6cidr)
// The returned tuple is guaranteed to have the order (ipv4,ipv6) and if no cidr from a family is found an
// empty string "" is inserted.
func cidrTuple(cidrList string) [2]string {
cidrs := [2]string{"", ""}
foundIPv4 := false
foundIPv6 := false
for _, cidr := range strings.Split(cidrList, ",") {
if netutils.IsIPv6CIDRString(cidr) && !foundIPv6 {
cidrs[1] = cidr
foundIPv6 = true
} else if !foundIPv4 {
cidrs[0] = cidr
foundIPv4 = true
}
if foundIPv6 && foundIPv4 {
break
}
}
return cidrs
}
// cleanupAndExit remove iptables rules and ipset/ipvs rules
func cleanupAndExit() error {
execer := exec.New()
// cleanup IPv6 and IPv4 iptables rules, regardless of current configuration
ipts := []utiliptables.Interface{
utiliptables.New(execer, utiliptables.ProtocolIPv4),
utiliptables.New(execer, utiliptables.ProtocolIPv6),
}
ipsetInterface := utilipset.New(execer)
ipvsInterface := utilipvs.New()
var encounteredError bool
for _, ipt := range ipts {
encounteredError = iptables.CleanupLeftovers(ipt) || encounteredError
encounteredError = ipvs.CleanupLeftovers(ipvsInterface, ipt, ipsetInterface) || encounteredError
}
if encounteredError {
return errors.New("encountered an error while tearing down rules")
}
return nil
}
| cmd/kube-proxy/app/server_others.go | 1 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.9979552030563354,
0.0664043202996254,
0.00016237603267654777,
0.00017421162920072675,
0.2247725874185562
] |
{
"id": 4,
"code_window": [
"\t\tklog.InfoS(\"Watching for node, awaiting podCIDR allocation\", \"hostname\", s.Hostname)\n",
"\t\tnode, err := waitForPodCIDR(s.Client, s.Hostname)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, err\n",
"\t\t}\n",
"\t\ts.podCIDRs = node.Spec.PodCIDRs\n",
"\t\tklog.InfoS(\"NodeInfo\", \"podCIDRs\", node.Spec.PodCIDRs)\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\treturn err\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "replace",
"edit_start_line_idx": 87
} | // Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
// For the proc file format details,
// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
// Constants for the various /proc/net/unix enumerations.
// TODO: match against x/sys/unix or similar?
const (
netUnixTypeStream = 1
netUnixTypeDgram = 2
netUnixTypeSeqpacket = 5
netUnixFlagDefault = 0
netUnixFlagListen = 1 << 16
netUnixStateUnconnected = 1
netUnixStateConnecting = 2
netUnixStateConnected = 3
netUnixStateDisconnected = 4
)
// NetUNIXType is the type of the type field.
type NetUNIXType uint64
// NetUNIXFlags is the type of the flags field.
type NetUNIXFlags uint64
// NetUNIXState is the type of the state field.
type NetUNIXState uint64
// NetUNIXLine represents a line of /proc/net/unix.
type NetUNIXLine struct {
KernelPtr string
RefCount uint64
Protocol uint64
Flags NetUNIXFlags
Type NetUNIXType
State NetUNIXState
Inode uint64
Path string
}
// NetUNIX holds the data read from /proc/net/unix.
type NetUNIX struct {
Rows []*NetUNIXLine
}
// NetUNIX returns data read from /proc/net/unix.
func (fs FS) NetUNIX() (*NetUNIX, error) {
return readNetUNIX(fs.proc.Path("net/unix"))
}
// readNetUNIX reads data in /proc/net/unix format from the specified file.
func readNetUNIX(file string) (*NetUNIX, error) {
// This file could be quite large and a streaming read is desirable versus
// reading the entire contents at once.
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
return parseNetUNIX(f)
}
// parseNetUNIX creates a NetUnix structure from the incoming stream.
func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
// Begin scanning by checking for the existence of Inode.
s := bufio.NewScanner(r)
s.Scan()
// From the man page of proc(5), it does not contain an Inode field,
// but in actually it exists. This code works for both cases.
hasInode := strings.Contains(s.Text(), "Inode")
// Expect a minimum number of fields, but Inode and Path are optional:
// Num RefCount Protocol Flags Type St Inode Path
minFields := 6
if hasInode {
minFields++
}
var nu NetUNIX
for s.Scan() {
line := s.Text()
item, err := nu.parseLine(line, hasInode, minFields)
if err != nil {
return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err)
}
nu.Rows = append(nu.Rows, item)
}
if err := s.Err(); err != nil {
return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err)
}
return &nu, nil
}
func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
fields := strings.Fields(line)
l := len(fields)
if l < min {
return nil, fmt.Errorf("expected at least %d fields but got %d", min, l)
}
// Field offsets are as follows:
// Num RefCount Protocol Flags Type St Inode Path
kernelPtr := strings.TrimSuffix(fields[0], ":")
users, err := u.parseUsers(fields[1])
if err != nil {
return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err)
}
flags, err := u.parseFlags(fields[3])
if err != nil {
return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err)
}
typ, err := u.parseType(fields[4])
if err != nil {
return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err)
}
state, err := u.parseState(fields[5])
if err != nil {
return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err)
}
var inode uint64
if hasInode {
inode, err = u.parseInode(fields[6])
if err != nil {
return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err)
}
}
n := &NetUNIXLine{
KernelPtr: kernelPtr,
RefCount: users,
Type: typ,
Flags: flags,
State: state,
Inode: inode,
}
// Path field is optional.
if l > min {
// Path occurs at either index 6 or 7 depending on whether inode is
// already present.
pathIdx := 7
if !hasInode {
pathIdx--
}
n.Path = fields[pathIdx]
}
return n, nil
}
func (u NetUNIX) parseUsers(s string) (uint64, error) {
return strconv.ParseUint(s, 16, 32)
}
func (u NetUNIX) parseType(s string) (NetUNIXType, error) {
typ, err := strconv.ParseUint(s, 16, 16)
if err != nil {
return 0, err
}
return NetUNIXType(typ), nil
}
func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) {
flags, err := strconv.ParseUint(s, 16, 32)
if err != nil {
return 0, err
}
return NetUNIXFlags(flags), nil
}
func (u NetUNIX) parseState(s string) (NetUNIXState, error) {
st, err := strconv.ParseInt(s, 16, 8)
if err != nil {
return 0, err
}
return NetUNIXState(st), nil
}
func (u NetUNIX) parseInode(s string) (uint64, error) {
return strconv.ParseUint(s, 10, 64)
}
func (t NetUNIXType) String() string {
switch t {
case netUnixTypeStream:
return "stream"
case netUnixTypeDgram:
return "dgram"
case netUnixTypeSeqpacket:
return "seqpacket"
}
return "unknown"
}
func (f NetUNIXFlags) String() string {
switch f {
case netUnixFlagListen:
return "listen"
default:
return "default"
}
}
func (s NetUNIXState) String() string {
switch s {
case netUnixStateUnconnected:
return "unconnected"
case netUnixStateConnecting:
return "connecting"
case netUnixStateConnected:
return "connected"
case netUnixStateDisconnected:
return "disconnected"
}
return "unknown"
}
| vendor/github.com/prometheus/procfs/net_unix.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.0023700972087681293,
0.00038090156158432364,
0.0001602490956429392,
0.00017093407223001122,
0.0005892498884350061
] |
{
"id": 4,
"code_window": [
"\t\tklog.InfoS(\"Watching for node, awaiting podCIDR allocation\", \"hostname\", s.Hostname)\n",
"\t\tnode, err := waitForPodCIDR(s.Client, s.Hostname)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, err\n",
"\t\t}\n",
"\t\ts.podCIDRs = node.Spec.PodCIDRs\n",
"\t\tklog.InfoS(\"NodeInfo\", \"podCIDRs\", node.Spec.PodCIDRs)\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\treturn err\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "replace",
"edit_start_line_idx": 87
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rest
import (
"testing"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// TestWipeObjectMetaSystemFields validates that system populated fields are set on an object
func TestWipeObjectMetaSystemFields(t *testing.T) {
resource := metav1.ObjectMeta{}
WipeObjectMetaSystemFields(&resource)
if !resource.CreationTimestamp.Time.IsZero() {
t.Errorf("resource.CreationTimestamp is set")
}
if len(resource.UID) != 0 {
t.Errorf("resource.UID is set")
}
if resource.DeletionTimestamp != nil {
t.Errorf("resource.DeletionTimestamp is set")
}
if resource.DeletionGracePeriodSeconds != nil {
t.Errorf("resource.DeletionGracePeriodSeconds is set")
}
if len(resource.SelfLink) != 0 {
t.Errorf("resource.SelfLink is set")
}
}
// TestFillObjectMetaSystemFields validates that system populated fields are set on an object
func TestFillObjectMetaSystemFields(t *testing.T) {
resource := metav1.ObjectMeta{}
FillObjectMetaSystemFields(&resource)
if resource.CreationTimestamp.Time.IsZero() {
t.Errorf("resource.CreationTimestamp is zero")
}
if len(resource.UID) == 0 {
t.Errorf("resource.UID missing")
}
}
// TestHasObjectMetaSystemFieldValues validates that true is returned if and only if all fields are populated
func TestHasObjectMetaSystemFieldValues(t *testing.T) {
resource := metav1.ObjectMeta{}
objMeta, err := meta.Accessor(&resource)
if err != nil {
t.Fatal(err)
}
if metav1.HasObjectMetaSystemFieldValues(objMeta) {
t.Errorf("the resource does not have all fields yet populated, but incorrectly reports it does")
}
FillObjectMetaSystemFields(&resource)
if !metav1.HasObjectMetaSystemFieldValues(objMeta) {
t.Errorf("the resource does have all fields populated, but incorrectly reports it does not")
}
}
func TestEnsureObjectNamespaceMatchesRequestNamespace(t *testing.T) {
testcases := []struct {
name string
reqNS string
objNS string
expectErr bool
expectObjNS string
}{
{
name: "cluster-scoped req, cluster-scoped obj",
reqNS: "",
objNS: "",
expectErr: false,
expectObjNS: "",
},
{
name: "cluster-scoped req, namespaced obj",
reqNS: "",
objNS: "foo",
expectErr: false,
expectObjNS: "", // no error, object is forced to cluster-scoped for backwards compatibility
},
{
name: "namespaced req, no-namespace obj",
reqNS: "foo",
objNS: "",
expectErr: false,
expectObjNS: "foo", // no error, object is updated to match request for backwards compatibility
},
{
name: "namespaced req, matching obj",
reqNS: "foo",
objNS: "foo",
expectErr: false,
expectObjNS: "foo",
},
{
name: "namespaced req, mis-matched obj",
reqNS: "foo",
objNS: "bar",
expectErr: true,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
obj := metav1.ObjectMeta{Namespace: tc.objNS}
err := EnsureObjectNamespaceMatchesRequestNamespace(tc.reqNS, &obj)
if tc.expectErr {
if err == nil {
t.Fatal("expected err, got none")
}
return
} else if err != nil {
t.Fatalf("unexpected err: %v", err)
}
if obj.Namespace != tc.expectObjNS {
t.Fatalf("expected obj ns %q, got %q", tc.expectObjNS, obj.Namespace)
}
})
}
}
| staging/src/k8s.io/apiserver/pkg/registry/rest/meta_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.0001803644117899239,
0.00017449709412176162,
0.00016828729712869972,
0.00017462352116126567,
0.000003010195314345765
] |
{
"id": 4,
"code_window": [
"\t\tklog.InfoS(\"Watching for node, awaiting podCIDR allocation\", \"hostname\", s.Hostname)\n",
"\t\tnode, err := waitForPodCIDR(s.Client, s.Hostname)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, err\n",
"\t\t}\n",
"\t\ts.podCIDRs = node.Spec.PodCIDRs\n",
"\t\tklog.InfoS(\"NodeInfo\", \"podCIDRs\", node.Spec.PodCIDRs)\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\treturn err\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "replace",
"edit_start_line_idx": 87
} | package computestorage
import (
"context"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"go.opencensus.io/trace"
"golang.org/x/sys/windows"
)
// GetLayerVhdMountPath returns the volume path for a virtual disk of a writable container layer.
func GetLayerVhdMountPath(ctx context.Context, vhdHandle windows.Handle) (path string, err error) {
title := "hcsshim.GetLayerVhdMountPath"
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
var mountPath *uint16
err = hcsGetLayerVhdMountPath(vhdHandle, &mountPath)
if err != nil {
return "", errors.Wrap(err, "failed to get vhd mount path")
}
path = interop.ConvertAndFreeCoTaskMemString(mountPath)
return path, nil
}
| vendor/github.com/Microsoft/hcsshim/computestorage/mount.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00026494471239857376,
0.00020379583293106407,
0.00017235121049452573,
0.0001740915613481775,
0.000043244624976068735
] |
{
"id": 5,
"code_window": [
"\t\t}\n",
"\t\ts.podCIDRs = node.Spec.PodCIDRs\n",
"\t\tklog.InfoS(\"NodeInfo\", \"podCIDRs\", node.Spec.PodCIDRs)\n",
"\t}\n",
"\n",
"\tprimaryProtocol := utiliptables.ProtocolIPv4\n",
"\tif s.PrimaryIPFamily == v1.IPv6Protocol {\n",
"\t\tprimaryProtocol = utiliptables.ProtocolIPv6\n",
"\t}\n",
"\texecer := exec.New()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\terr := s.setupConntrack()\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\n",
"\tproxymetrics.RegisterMetrics()\n",
"\treturn nil\n",
"}\n",
"\n",
"// createProxier creates the proxy.Provider\n",
"func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {\n",
"\tvar proxier proxy.Provider\n",
"\tvar err error\n",
"\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "add",
"edit_start_line_idx": 93
} | //go:build !windows
// +build !windows
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to configure and run a
// Kubernetes app process.
package app
import (
"context"
"errors"
"fmt"
goruntime "runtime"
"strings"
"time"
"github.com/google/cadvisor/machine"
"github.com/google/cadvisor/utils/sysfs"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/cache"
"k8s.io/apimachinery/pkg/fields"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
toolswatch "k8s.io/client-go/tools/watch"
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
"k8s.io/kubernetes/pkg/proxy"
proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config"
"k8s.io/kubernetes/pkg/proxy/iptables"
"k8s.io/kubernetes/pkg/proxy/ipvs"
utilipset "k8s.io/kubernetes/pkg/proxy/ipvs/ipset"
utilipvs "k8s.io/kubernetes/pkg/proxy/ipvs/util"
proxymetrics "k8s.io/kubernetes/pkg/proxy/metrics"
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
"k8s.io/utils/exec"
netutils "k8s.io/utils/net"
"k8s.io/klog/v2"
)
// timeoutForNodePodCIDR is the time to wait for allocators to assign a PodCIDR to the
// node after it is registered.
var timeoutForNodePodCIDR = 5 * time.Minute
func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfiguration) {
if config.Mode == "" {
klog.InfoS("Using iptables proxy")
config.Mode = proxyconfigapi.ProxyModeIPTables
}
if config.DetectLocalMode == "" {
klog.V(4).InfoS("Defaulting detect-local-mode", "localModeClusterCIDR", string(proxyconfigapi.LocalModeClusterCIDR))
config.DetectLocalMode = proxyconfigapi.LocalModeClusterCIDR
}
klog.V(2).InfoS("DetectLocalMode", "localMode", string(config.DetectLocalMode))
}
// createProxier creates the proxy.Provider
func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {
var proxier proxy.Provider
var err error
if config.DetectLocalMode == proxyconfigapi.LocalModeNodeCIDR {
klog.InfoS("Watching for node, awaiting podCIDR allocation", "hostname", s.Hostname)
node, err := waitForPodCIDR(s.Client, s.Hostname)
if err != nil {
return nil, err
}
s.podCIDRs = node.Spec.PodCIDRs
klog.InfoS("NodeInfo", "podCIDRs", node.Spec.PodCIDRs)
}
primaryProtocol := utiliptables.ProtocolIPv4
if s.PrimaryIPFamily == v1.IPv6Protocol {
primaryProtocol = utiliptables.ProtocolIPv6
}
execer := exec.New()
iptInterface := utiliptables.New(execer, primaryProtocol)
var ipt [2]utiliptables.Interface
dualStack := true // While we assume that node supports, we do further checks below
// Create iptables handlers for both families, one is already created
// Always ordered as IPv4, IPv6
if primaryProtocol == utiliptables.ProtocolIPv4 {
ipt[0] = iptInterface
ipt[1] = utiliptables.New(execer, utiliptables.ProtocolIPv6)
} else {
ipt[0] = utiliptables.New(execer, utiliptables.ProtocolIPv4)
ipt[1] = iptInterface
}
if !ipt[0].Present() {
return nil, fmt.Errorf("iptables is not supported for primary IP family %q", primaryProtocol)
} else if !ipt[1].Present() {
klog.InfoS("kube-proxy running in single-stack mode: secondary ipFamily is not supported", "ipFamily", ipt[1].Protocol())
dualStack = false
// Validate NodePortAddresses is single-stack
npaByFamily := proxyutil.MapCIDRsByIPFamily(config.NodePortAddresses)
secondaryFamily := proxyutil.OtherIPFamily(s.PrimaryIPFamily)
badAddrs := npaByFamily[secondaryFamily]
if len(badAddrs) > 0 {
klog.InfoS("Ignoring --nodeport-addresses of the wrong family", "ipFamily", secondaryFamily, "addresses", badAddrs)
}
}
if config.Mode == proxyconfigapi.ProxyModeIPTables {
klog.InfoS("Using iptables Proxier")
if dualStack {
klog.InfoS("kube-proxy running in dual-stack mode", "ipFamily", iptInterface.Protocol())
klog.InfoS("Creating dualStackProxier for iptables")
// Always ordered to match []ipt
var localDetectors [2]proxyutiliptables.LocalTrafficDetector
localDetectors, err = getDualStackLocalDetectorTuple(config.DetectLocalMode, config, ipt, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
// TODO this has side effects that should only happen when Run() is invoked.
proxier, err = iptables.NewDualStackProxier(
ipt,
utilsysctl.New(),
execer,
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.IPTables.MasqueradeAll,
*config.IPTables.LocalhostNodePorts,
int(*config.IPTables.MasqueradeBit),
localDetectors,
s.Hostname,
s.NodeIPs,
s.Recorder,
s.HealthzServer,
config.NodePortAddresses,
)
} else {
// Create a single-stack proxier if and only if the node does not support dual-stack (i.e, no iptables support).
var localDetector proxyutiliptables.LocalTrafficDetector
localDetector, err = getLocalDetector(config.DetectLocalMode, config, iptInterface, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
// TODO this has side effects that should only happen when Run() is invoked.
proxier, err = iptables.NewProxier(
s.PrimaryIPFamily,
iptInterface,
utilsysctl.New(),
execer,
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.IPTables.MasqueradeAll,
*config.IPTables.LocalhostNodePorts,
int(*config.IPTables.MasqueradeBit),
localDetector,
s.Hostname,
s.NodeIPs[s.PrimaryIPFamily],
s.Recorder,
s.HealthzServer,
config.NodePortAddresses,
)
}
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
} else if config.Mode == proxyconfigapi.ProxyModeIPVS {
kernelHandler := ipvs.NewLinuxKernelHandler()
ipsetInterface := utilipset.New(execer)
ipvsInterface := utilipvs.New()
if err := ipvs.CanUseIPVSProxier(ipvsInterface, ipsetInterface, config.IPVS.Scheduler); err != nil {
return nil, fmt.Errorf("can't use the IPVS proxier: %v", err)
}
klog.InfoS("Using ipvs Proxier")
if dualStack {
klog.InfoS("Creating dualStackProxier for ipvs")
// Always ordered to match []ipt
var localDetectors [2]proxyutiliptables.LocalTrafficDetector
localDetectors, err = getDualStackLocalDetectorTuple(config.DetectLocalMode, config, ipt, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
proxier, err = ipvs.NewDualStackProxier(
ipt,
ipvsInterface,
ipsetInterface,
utilsysctl.New(),
execer,
config.IPVS.SyncPeriod.Duration,
config.IPVS.MinSyncPeriod.Duration,
config.IPVS.ExcludeCIDRs,
config.IPVS.StrictARP,
config.IPVS.TCPTimeout.Duration,
config.IPVS.TCPFinTimeout.Duration,
config.IPVS.UDPTimeout.Duration,
config.IPTables.MasqueradeAll,
int(*config.IPTables.MasqueradeBit),
localDetectors,
s.Hostname,
s.NodeIPs,
s.Recorder,
s.HealthzServer,
config.IPVS.Scheduler,
config.NodePortAddresses,
kernelHandler,
)
} else {
var localDetector proxyutiliptables.LocalTrafficDetector
localDetector, err = getLocalDetector(config.DetectLocalMode, config, iptInterface, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
proxier, err = ipvs.NewProxier(
s.PrimaryIPFamily,
iptInterface,
ipvsInterface,
ipsetInterface,
utilsysctl.New(),
execer,
config.IPVS.SyncPeriod.Duration,
config.IPVS.MinSyncPeriod.Duration,
config.IPVS.ExcludeCIDRs,
config.IPVS.StrictARP,
config.IPVS.TCPTimeout.Duration,
config.IPVS.TCPFinTimeout.Duration,
config.IPVS.UDPTimeout.Duration,
config.IPTables.MasqueradeAll,
int(*config.IPTables.MasqueradeBit),
localDetector,
s.Hostname,
s.NodeIPs[s.PrimaryIPFamily],
s.Recorder,
s.HealthzServer,
config.IPVS.Scheduler,
config.NodePortAddresses,
kernelHandler,
)
}
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
}
return proxier, nil
}
func (s *ProxyServer) platformSetup() error {
ct := &realConntracker{}
max, err := getConntrackMax(s.Config.Conntrack)
if err != nil {
return err
}
if max > 0 {
err := ct.SetMax(max)
if err != nil {
if err != errReadOnlySysFS {
return err
}
// errReadOnlySysFS is caused by a known docker issue (https://github.com/docker/docker/issues/24000),
// the only remediation we know is to restart the docker daemon.
// Here we'll send an node event with specific reason and message, the
// administrator should decide whether and how to handle this issue,
// whether to drain the node and restart docker. Occurs in other container runtimes
// as well.
// TODO(random-liu): Remove this when the docker bug is fixed.
const message = "CRI error: /sys is read-only: " +
"cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000)"
s.Recorder.Eventf(s.NodeRef, nil, v1.EventTypeWarning, err.Error(), "StartKubeProxy", message)
}
}
if s.Config.Conntrack.TCPEstablishedTimeout != nil && s.Config.Conntrack.TCPEstablishedTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.TCPEstablishedTimeout.Duration / time.Second)
if err := ct.SetTCPEstablishedTimeout(timeout); err != nil {
return err
}
}
if s.Config.Conntrack.TCPCloseWaitTimeout != nil && s.Config.Conntrack.TCPCloseWaitTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.TCPCloseWaitTimeout.Duration / time.Second)
if err := ct.SetTCPCloseWaitTimeout(timeout); err != nil {
return err
}
}
proxymetrics.RegisterMetrics()
return nil
}
func getConntrackMax(config proxyconfigapi.KubeProxyConntrackConfiguration) (int, error) {
if config.MaxPerCore != nil && *config.MaxPerCore > 0 {
floor := 0
if config.Min != nil {
floor = int(*config.Min)
}
scaled := int(*config.MaxPerCore) * detectNumCPU()
if scaled > floor {
klog.V(3).InfoS("GetConntrackMax: using scaled conntrack-max-per-core")
return scaled, nil
}
klog.V(3).InfoS("GetConntrackMax: using conntrack-min")
return floor, nil
}
return 0, nil
}
func waitForPodCIDR(client clientset.Interface, nodeName string) (*v1.Node, error) {
// since allocators can assign the podCIDR after the node registers, we do a watch here to wait
// for podCIDR to be assigned, instead of assuming that the Get() on startup will have it.
ctx, cancelFunc := context.WithTimeout(context.TODO(), timeoutForNodePodCIDR)
defer cancelFunc()
fieldSelector := fields.OneTermEqualSelector("metadata.name", nodeName).String()
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) {
options.FieldSelector = fieldSelector
return client.CoreV1().Nodes().List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector
return client.CoreV1().Nodes().Watch(ctx, options)
},
}
condition := func(event watch.Event) (bool, error) {
// don't process delete events
if event.Type != watch.Modified && event.Type != watch.Added {
return false, nil
}
n, ok := event.Object.(*v1.Node)
if !ok {
return false, fmt.Errorf("event object not of type Node")
}
// don't consider the node if is going to be deleted and keep waiting
if !n.DeletionTimestamp.IsZero() {
return false, nil
}
return n.Spec.PodCIDR != "" && len(n.Spec.PodCIDRs) > 0, nil
}
evt, err := toolswatch.UntilWithSync(ctx, lw, &v1.Node{}, nil, condition)
if err != nil {
return nil, fmt.Errorf("timeout waiting for PodCIDR allocation to configure detect-local-mode %v: %v", proxyconfigapi.LocalModeNodeCIDR, err)
}
if n, ok := evt.Object.(*v1.Node); ok {
return n, nil
}
return nil, fmt.Errorf("event object not of type node")
}
func detectNumCPU() int {
// try get numCPU from /sys firstly due to a known issue (https://github.com/kubernetes/kubernetes/issues/99225)
_, numCPU, err := machine.GetTopology(sysfs.NewRealSysFs())
if err != nil || numCPU < 1 {
return goruntime.NumCPU()
}
return numCPU
}
func getLocalDetector(mode proxyconfigapi.LocalMode, config *proxyconfigapi.KubeProxyConfiguration, ipt utiliptables.Interface, nodePodCIDRs []string) (proxyutiliptables.LocalTrafficDetector, error) {
switch mode {
case proxyconfigapi.LocalModeClusterCIDR:
// LocalModeClusterCIDR is the default if --detect-local-mode wasn't passed,
// but --cluster-cidr is optional.
if len(strings.TrimSpace(config.ClusterCIDR)) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no cluster CIDR defined")
break
}
return proxyutiliptables.NewDetectLocalByCIDR(config.ClusterCIDR, ipt)
case proxyconfigapi.LocalModeNodeCIDR:
if len(nodePodCIDRs) == 0 {
klog.InfoS("Detect-local-mode set to NodeCIDR, but no PodCIDR defined at node")
break
}
return proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt)
case proxyconfigapi.LocalModeBridgeInterface:
return proxyutiliptables.NewDetectLocalByBridgeInterface(config.DetectLocal.BridgeInterface)
case proxyconfigapi.LocalModeInterfaceNamePrefix:
return proxyutiliptables.NewDetectLocalByInterfaceNamePrefix(config.DetectLocal.InterfaceNamePrefix)
}
klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode))
return proxyutiliptables.NewNoOpLocalDetector(), nil
}
func getDualStackLocalDetectorTuple(mode proxyconfigapi.LocalMode, config *proxyconfigapi.KubeProxyConfiguration, ipt [2]utiliptables.Interface, nodePodCIDRs []string) ([2]proxyutiliptables.LocalTrafficDetector, error) {
var err error
localDetectors := [2]proxyutiliptables.LocalTrafficDetector{proxyutiliptables.NewNoOpLocalDetector(), proxyutiliptables.NewNoOpLocalDetector()}
switch mode {
case proxyconfigapi.LocalModeClusterCIDR:
// LocalModeClusterCIDR is the default if --detect-local-mode wasn't passed,
// but --cluster-cidr is optional.
if len(strings.TrimSpace(config.ClusterCIDR)) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no cluster CIDR defined")
break
}
clusterCIDRs := cidrTuple(config.ClusterCIDR)
if len(strings.TrimSpace(clusterCIDRs[0])) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no IPv4 cluster CIDR defined, defaulting to no-op detect-local for IPv4")
} else {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(clusterCIDRs[0], ipt[0])
if err != nil { // don't loose the original error
return localDetectors, err
}
}
if len(strings.TrimSpace(clusterCIDRs[1])) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, defaulting to no-op detect-local for IPv6")
} else {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(clusterCIDRs[1], ipt[1])
}
return localDetectors, err
case proxyconfigapi.LocalModeNodeCIDR:
if len(nodePodCIDRs) == 0 {
klog.InfoS("No node info available to configure detect-local-mode NodeCIDR")
break
}
// localDetectors, like ipt, need to be of the order [IPv4, IPv6], but PodCIDRs is setup so that PodCIDRs[0] == PodCIDR.
// so have to handle the case where PodCIDR can be IPv6 and set that to localDetectors[1]
if netutils.IsIPv6CIDRString(nodePodCIDRs[0]) {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt[1])
if err != nil {
return localDetectors, err
}
if len(nodePodCIDRs) > 1 {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[1], ipt[0])
}
} else {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt[0])
if err != nil {
return localDetectors, err
}
if len(nodePodCIDRs) > 1 {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[1], ipt[1])
}
}
return localDetectors, err
case proxyconfigapi.LocalModeBridgeInterface, proxyconfigapi.LocalModeInterfaceNamePrefix:
localDetector, err := getLocalDetector(mode, config, ipt[0], nodePodCIDRs)
if err == nil {
localDetectors[0] = localDetector
localDetectors[1] = localDetector
}
return localDetectors, err
default:
klog.InfoS("Unknown detect-local-mode", "detectLocalMode", mode)
}
klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode))
return localDetectors, nil
}
// cidrTuple takes a comma separated list of CIDRs and return a tuple (ipv4cidr,ipv6cidr)
// The returned tuple is guaranteed to have the order (ipv4,ipv6) and if no cidr from a family is found an
// empty string "" is inserted.
func cidrTuple(cidrList string) [2]string {
cidrs := [2]string{"", ""}
foundIPv4 := false
foundIPv6 := false
for _, cidr := range strings.Split(cidrList, ",") {
if netutils.IsIPv6CIDRString(cidr) && !foundIPv6 {
cidrs[1] = cidr
foundIPv6 = true
} else if !foundIPv4 {
cidrs[0] = cidr
foundIPv4 = true
}
if foundIPv6 && foundIPv4 {
break
}
}
return cidrs
}
// cleanupAndExit remove iptables rules and ipset/ipvs rules
func cleanupAndExit() error {
execer := exec.New()
// cleanup IPv6 and IPv4 iptables rules, regardless of current configuration
ipts := []utiliptables.Interface{
utiliptables.New(execer, utiliptables.ProtocolIPv4),
utiliptables.New(execer, utiliptables.ProtocolIPv6),
}
ipsetInterface := utilipset.New(execer)
ipvsInterface := utilipvs.New()
var encounteredError bool
for _, ipt := range ipts {
encounteredError = iptables.CleanupLeftovers(ipt) || encounteredError
encounteredError = ipvs.CleanupLeftovers(ipvsInterface, ipt, ipsetInterface) || encounteredError
}
if encounteredError {
return errors.New("encountered an error while tearing down rules")
}
return nil
}
| cmd/kube-proxy/app/server_others.go | 1 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.9993315935134888,
0.18576133251190186,
0.00016444097855128348,
0.0004930355353280902,
0.380942702293396
] |
{
"id": 5,
"code_window": [
"\t\t}\n",
"\t\ts.podCIDRs = node.Spec.PodCIDRs\n",
"\t\tklog.InfoS(\"NodeInfo\", \"podCIDRs\", node.Spec.PodCIDRs)\n",
"\t}\n",
"\n",
"\tprimaryProtocol := utiliptables.ProtocolIPv4\n",
"\tif s.PrimaryIPFamily == v1.IPv6Protocol {\n",
"\t\tprimaryProtocol = utiliptables.ProtocolIPv6\n",
"\t}\n",
"\texecer := exec.New()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\terr := s.setupConntrack()\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\n",
"\tproxymetrics.RegisterMetrics()\n",
"\treturn nil\n",
"}\n",
"\n",
"// createProxier creates the proxy.Provider\n",
"func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {\n",
"\tvar proxier proxy.Provider\n",
"\tvar err error\n",
"\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "add",
"edit_start_line_idx": 93
} | apiVersion: apps/v1
kind: Deployment
metadata:
labels:
run: hello
name: scale-1
spec:
replicas: 1
selector:
matchLabels:
run: hello
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
run: hello
spec:
containers:
- image: aronchick/hello-node:2.0
imagePullPolicy: IfNotPresent
name: hello
| hack/testdata/scale-deploy-1.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00017667116480879486,
0.00017502624541521072,
0.00017388767446391284,
0.00017451988242100924,
0.0000011914287370018428
] |
{
"id": 5,
"code_window": [
"\t\t}\n",
"\t\ts.podCIDRs = node.Spec.PodCIDRs\n",
"\t\tklog.InfoS(\"NodeInfo\", \"podCIDRs\", node.Spec.PodCIDRs)\n",
"\t}\n",
"\n",
"\tprimaryProtocol := utiliptables.ProtocolIPv4\n",
"\tif s.PrimaryIPFamily == v1.IPv6Protocol {\n",
"\t\tprimaryProtocol = utiliptables.ProtocolIPv6\n",
"\t}\n",
"\texecer := exec.New()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\terr := s.setupConntrack()\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\n",
"\tproxymetrics.RegisterMetrics()\n",
"\treturn nil\n",
"}\n",
"\n",
"// createProxier creates the proxy.Provider\n",
"func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {\n",
"\tvar proxier proxy.Provider\n",
"\tvar err error\n",
"\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "add",
"edit_start_line_idx": 93
} | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "node.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
// addKnownTypes adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&RuntimeClass{},
&RuntimeClassList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
| staging/src/k8s.io/api/node/v1alpha1/register.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00043210701551288366,
0.00021508452482521534,
0.00016281913849525154,
0.00017370222485624254,
0.00009719961963128299
] |
{
"id": 5,
"code_window": [
"\t\t}\n",
"\t\ts.podCIDRs = node.Spec.PodCIDRs\n",
"\t\tklog.InfoS(\"NodeInfo\", \"podCIDRs\", node.Spec.PodCIDRs)\n",
"\t}\n",
"\n",
"\tprimaryProtocol := utiliptables.ProtocolIPv4\n",
"\tif s.PrimaryIPFamily == v1.IPv6Protocol {\n",
"\t\tprimaryProtocol = utiliptables.ProtocolIPv6\n",
"\t}\n",
"\texecer := exec.New()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\terr := s.setupConntrack()\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\n",
"\tproxymetrics.RegisterMetrics()\n",
"\treturn nil\n",
"}\n",
"\n",
"// createProxier creates the proxy.Provider\n",
"func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {\n",
"\tvar proxier proxy.Provider\n",
"\tvar err error\n",
"\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "add",
"edit_start_line_idx": 93
} | // Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package packages loads Go packages for inspection and analysis.
The Load function takes as input a list of patterns and return a list of Package
structs describing individual packages matched by those patterns.
The LoadMode controls the amount of detail in the loaded packages.
Load passes most patterns directly to the underlying build tool,
but all patterns with the prefix "query=", where query is a
non-empty string of letters from [a-z], are reserved and may be
interpreted as query operators.
Two query operators are currently supported: "file" and "pattern".
The query "file=path/to/file.go" matches the package or packages enclosing
the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go"
might return the packages "fmt" and "fmt [fmt.test]".
The query "pattern=string" causes "string" to be passed directly to
the underlying build tool. In most cases this is unnecessary,
but an application can use Load("pattern=" + x) as an escaping mechanism
to ensure that x is not interpreted as a query operator if it contains '='.
All other query operators are reserved for future use and currently
cause Load to report an error.
The Package struct provides basic information about the package, including
- ID, a unique identifier for the package in the returned set;
- GoFiles, the names of the package's Go source files;
- Imports, a map from source import strings to the Packages they name;
- Types, the type information for the package's exported symbols;
- Syntax, the parsed syntax trees for the package's source code; and
- TypeInfo, the result of a complete type-check of the package syntax trees.
(See the documentation for type Package for the complete list of fields
and more detailed descriptions.)
For example,
Load(nil, "bytes", "unicode...")
returns four Package structs describing the standard library packages
bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern
can match multiple packages and that a package might be matched by
multiple patterns: in general it is not possible to determine which
packages correspond to which patterns.
Note that the list returned by Load contains only the packages matched
by the patterns. Their dependencies can be found by walking the import
graph using the Imports fields.
The Load function can be configured by passing a pointer to a Config as
the first argument. A nil Config is equivalent to the zero Config, which
causes Load to run in LoadFiles mode, collecting minimal information.
See the documentation for type Config for details.
As noted earlier, the Config.Mode controls the amount of detail
reported about the loaded packages. See the documentation for type LoadMode
for details.
Most tools should pass their command-line arguments (after any flags)
uninterpreted to the loader, so that the loader can interpret them
according to the conventions of the underlying build system.
See the Example function for typical usage.
*/
package packages // import "golang.org/x/tools/go/packages"
/*
Motivation and design considerations
The new package's design solves problems addressed by two existing
packages: go/build, which locates and describes packages, and
golang.org/x/tools/go/loader, which loads, parses and type-checks them.
The go/build.Package structure encodes too much of the 'go build' way
of organizing projects, leaving us in need of a data type that describes a
package of Go source code independent of the underlying build system.
We wanted something that works equally well with go build and vgo, and
also other build systems such as Bazel and Blaze, making it possible to
construct analysis tools that work in all these environments.
Tools such as errcheck and staticcheck were essentially unavailable to
the Go community at Google, and some of Google's internal tools for Go
are unavailable externally.
This new package provides a uniform way to obtain package metadata by
querying each of these build systems, optionally supporting their
preferred command-line notations for packages, so that tools integrate
neatly with users' build environments. The Metadata query function
executes an external query tool appropriate to the current workspace.
Loading packages always returns the complete import graph "all the way down",
even if all you want is information about a single package, because the query
mechanisms of all the build systems we currently support ({go,vgo} list, and
blaze/bazel aspect-based query) cannot provide detailed information
about one package without visiting all its dependencies too, so there is
no additional asymptotic cost to providing transitive information.
(This property might not be true of a hypothetical 5th build system.)
In calls to TypeCheck, all initial packages, and any package that
transitively depends on one of them, must be loaded from source.
Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from
source; D may be loaded from export data, and E may not be loaded at all
(though it's possible that D's export data mentions it, so a
types.Package may be created for it and exposed.)
The old loader had a feature to suppress type-checking of function
bodies on a per-package basis, primarily intended to reduce the work of
obtaining type information for imported packages. Now that imports are
satisfied by export data, the optimization no longer seems necessary.
Despite some early attempts, the old loader did not exploit export data,
instead always using the equivalent of WholeProgram mode. This was due
to the complexity of mixing source and export data packages (now
resolved by the upward traversal mentioned above), and because export data
files were nearly always missing or stale. Now that 'go build' supports
caching, all the underlying build systems can guarantee to produce
export data in a reasonable (amortized) time.
Test "main" packages synthesized by the build system are now reported as
first-class packages, avoiding the need for clients (such as go/ssa) to
reinvent this generation logic.
One way in which go/packages is simpler than the old loader is in its
treatment of in-package tests. In-package tests are packages that
consist of all the files of the library under test, plus the test files.
The old loader constructed in-package tests by a two-phase process of
mutation called "augmentation": first it would construct and type check
all the ordinary library packages and type-check the packages that
depend on them; then it would add more (test) files to the package and
type-check again. This two-phase approach had four major problems:
1) in processing the tests, the loader modified the library package,
leaving no way for a client application to see both the test
package and the library package; one would mutate into the other.
2) because test files can declare additional methods on types defined in
the library portion of the package, the dispatch of method calls in
the library portion was affected by the presence of the test files.
This should have been a clue that the packages were logically
different.
3) this model of "augmentation" assumed at most one in-package test
per library package, which is true of projects using 'go build',
but not other build systems.
4) because of the two-phase nature of test processing, all packages that
import the library package had to be processed before augmentation,
forcing a "one-shot" API and preventing the client from calling Load
in several times in sequence as is now possible in WholeProgram mode.
(TypeCheck mode has a similar one-shot restriction for a different reason.)
Early drafts of this package supported "multi-shot" operation.
Although it allowed clients to make a sequence of calls (or concurrent
calls) to Load, building up the graph of Packages incrementally,
it was of marginal value: it complicated the API
(since it allowed some options to vary across calls but not others),
it complicated the implementation,
it cannot be made to work in Types mode, as explained above,
and it was less efficient than making one combined call (when this is possible).
Among the clients we have inspected, none made multiple calls to load
but could not be easily and satisfactorily modified to make only a single call.
However, applications changes may be required.
For example, the ssadump command loads the user-specified packages
and in addition the runtime package. It is tempting to simply append
"runtime" to the user-provided list, but that does not work if the user
specified an ad-hoc package such as [a.go b.go].
Instead, ssadump no longer requests the runtime package,
but seeks it among the dependencies of the user-specified packages,
and emits an error if it is not found.
Overlays: The Overlay field in the Config allows providing alternate contents
for Go source files, by providing a mapping from file path to contents.
go/packages will pull in new imports added in overlay files when go/packages
is run in LoadImports mode or greater.
Overlay support for the go list driver isn't complete yet: if the file doesn't
exist on disk, it will only be recognized in an overlay if it is a non-test file
and the package would be reported even without the overlay.
Questions & Tasks
- Add GOARCH/GOOS?
They are not portable concepts, but could be made portable.
Our goal has been to allow users to express themselves using the conventions
of the underlying build system: if the build system honors GOARCH
during a build and during a metadata query, then so should
applications built atop that query mechanism.
Conversely, if the target architecture of the build is determined by
command-line flags, the application can pass the relevant
flags through to the build system using a command such as:
myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin"
However, this approach is low-level, unwieldy, and non-portable.
GOOS and GOARCH seem important enough to warrant a dedicated option.
- How should we handle partial failures such as a mixture of good and
malformed patterns, existing and non-existent packages, successful and
failed builds, import failures, import cycles, and so on, in a call to
Load?
- Support bazel, blaze, and go1.10 list, not just go1.11 list.
- Handle (and test) various partial success cases, e.g.
a mixture of good packages and:
invalid patterns
nonexistent packages
empty packages
packages with malformed package or import declarations
unreadable files
import cycles
other parse errors
type errors
Make sure we record errors at the correct place in the graph.
- Missing packages among initial arguments are not reported.
Return bogus packages for them, like golist does.
- "undeclared name" errors (for example) are reported out of source file
order. I suspect this is due to the breadth-first resolution now used
by go/types. Is that a bug? Discuss with gri.
*/
| vendor/golang.org/x/tools/go/packages/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00018023479788098484,
0.00016798525757621974,
0.00016064810915850103,
0.00016774740652181208,
0.000004009400527138496
] |
{
"id": 6,
"code_window": [
"\t}\n",
"\n",
"\treturn proxier, nil\n",
"}\n",
"\n",
"func (s *ProxyServer) platformSetup() error {\n",
"\tct := &realConntracker{}\n",
"\n",
"\tmax, err := getConntrackMax(s.Config.Conntrack)\n",
"\tif err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (s *ProxyServer) setupConntrack() error {\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "replace",
"edit_start_line_idx": 273
} | //go:build !windows
// +build !windows
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to configure and run a
// Kubernetes app process.
package app
import (
"context"
"errors"
"fmt"
goruntime "runtime"
"strings"
"time"
"github.com/google/cadvisor/machine"
"github.com/google/cadvisor/utils/sysfs"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/cache"
"k8s.io/apimachinery/pkg/fields"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
toolswatch "k8s.io/client-go/tools/watch"
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
"k8s.io/kubernetes/pkg/proxy"
proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config"
"k8s.io/kubernetes/pkg/proxy/iptables"
"k8s.io/kubernetes/pkg/proxy/ipvs"
utilipset "k8s.io/kubernetes/pkg/proxy/ipvs/ipset"
utilipvs "k8s.io/kubernetes/pkg/proxy/ipvs/util"
proxymetrics "k8s.io/kubernetes/pkg/proxy/metrics"
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
"k8s.io/utils/exec"
netutils "k8s.io/utils/net"
"k8s.io/klog/v2"
)
// timeoutForNodePodCIDR is the time to wait for allocators to assign a PodCIDR to the
// node after it is registered.
var timeoutForNodePodCIDR = 5 * time.Minute
func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfiguration) {
if config.Mode == "" {
klog.InfoS("Using iptables proxy")
config.Mode = proxyconfigapi.ProxyModeIPTables
}
if config.DetectLocalMode == "" {
klog.V(4).InfoS("Defaulting detect-local-mode", "localModeClusterCIDR", string(proxyconfigapi.LocalModeClusterCIDR))
config.DetectLocalMode = proxyconfigapi.LocalModeClusterCIDR
}
klog.V(2).InfoS("DetectLocalMode", "localMode", string(config.DetectLocalMode))
}
// createProxier creates the proxy.Provider
func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {
var proxier proxy.Provider
var err error
if config.DetectLocalMode == proxyconfigapi.LocalModeNodeCIDR {
klog.InfoS("Watching for node, awaiting podCIDR allocation", "hostname", s.Hostname)
node, err := waitForPodCIDR(s.Client, s.Hostname)
if err != nil {
return nil, err
}
s.podCIDRs = node.Spec.PodCIDRs
klog.InfoS("NodeInfo", "podCIDRs", node.Spec.PodCIDRs)
}
primaryProtocol := utiliptables.ProtocolIPv4
if s.PrimaryIPFamily == v1.IPv6Protocol {
primaryProtocol = utiliptables.ProtocolIPv6
}
execer := exec.New()
iptInterface := utiliptables.New(execer, primaryProtocol)
var ipt [2]utiliptables.Interface
dualStack := true // While we assume that node supports, we do further checks below
// Create iptables handlers for both families, one is already created
// Always ordered as IPv4, IPv6
if primaryProtocol == utiliptables.ProtocolIPv4 {
ipt[0] = iptInterface
ipt[1] = utiliptables.New(execer, utiliptables.ProtocolIPv6)
} else {
ipt[0] = utiliptables.New(execer, utiliptables.ProtocolIPv4)
ipt[1] = iptInterface
}
if !ipt[0].Present() {
return nil, fmt.Errorf("iptables is not supported for primary IP family %q", primaryProtocol)
} else if !ipt[1].Present() {
klog.InfoS("kube-proxy running in single-stack mode: secondary ipFamily is not supported", "ipFamily", ipt[1].Protocol())
dualStack = false
// Validate NodePortAddresses is single-stack
npaByFamily := proxyutil.MapCIDRsByIPFamily(config.NodePortAddresses)
secondaryFamily := proxyutil.OtherIPFamily(s.PrimaryIPFamily)
badAddrs := npaByFamily[secondaryFamily]
if len(badAddrs) > 0 {
klog.InfoS("Ignoring --nodeport-addresses of the wrong family", "ipFamily", secondaryFamily, "addresses", badAddrs)
}
}
if config.Mode == proxyconfigapi.ProxyModeIPTables {
klog.InfoS("Using iptables Proxier")
if dualStack {
klog.InfoS("kube-proxy running in dual-stack mode", "ipFamily", iptInterface.Protocol())
klog.InfoS("Creating dualStackProxier for iptables")
// Always ordered to match []ipt
var localDetectors [2]proxyutiliptables.LocalTrafficDetector
localDetectors, err = getDualStackLocalDetectorTuple(config.DetectLocalMode, config, ipt, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
// TODO this has side effects that should only happen when Run() is invoked.
proxier, err = iptables.NewDualStackProxier(
ipt,
utilsysctl.New(),
execer,
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.IPTables.MasqueradeAll,
*config.IPTables.LocalhostNodePorts,
int(*config.IPTables.MasqueradeBit),
localDetectors,
s.Hostname,
s.NodeIPs,
s.Recorder,
s.HealthzServer,
config.NodePortAddresses,
)
} else {
// Create a single-stack proxier if and only if the node does not support dual-stack (i.e, no iptables support).
var localDetector proxyutiliptables.LocalTrafficDetector
localDetector, err = getLocalDetector(config.DetectLocalMode, config, iptInterface, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
// TODO this has side effects that should only happen when Run() is invoked.
proxier, err = iptables.NewProxier(
s.PrimaryIPFamily,
iptInterface,
utilsysctl.New(),
execer,
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.IPTables.MasqueradeAll,
*config.IPTables.LocalhostNodePorts,
int(*config.IPTables.MasqueradeBit),
localDetector,
s.Hostname,
s.NodeIPs[s.PrimaryIPFamily],
s.Recorder,
s.HealthzServer,
config.NodePortAddresses,
)
}
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
} else if config.Mode == proxyconfigapi.ProxyModeIPVS {
kernelHandler := ipvs.NewLinuxKernelHandler()
ipsetInterface := utilipset.New(execer)
ipvsInterface := utilipvs.New()
if err := ipvs.CanUseIPVSProxier(ipvsInterface, ipsetInterface, config.IPVS.Scheduler); err != nil {
return nil, fmt.Errorf("can't use the IPVS proxier: %v", err)
}
klog.InfoS("Using ipvs Proxier")
if dualStack {
klog.InfoS("Creating dualStackProxier for ipvs")
// Always ordered to match []ipt
var localDetectors [2]proxyutiliptables.LocalTrafficDetector
localDetectors, err = getDualStackLocalDetectorTuple(config.DetectLocalMode, config, ipt, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
proxier, err = ipvs.NewDualStackProxier(
ipt,
ipvsInterface,
ipsetInterface,
utilsysctl.New(),
execer,
config.IPVS.SyncPeriod.Duration,
config.IPVS.MinSyncPeriod.Duration,
config.IPVS.ExcludeCIDRs,
config.IPVS.StrictARP,
config.IPVS.TCPTimeout.Duration,
config.IPVS.TCPFinTimeout.Duration,
config.IPVS.UDPTimeout.Duration,
config.IPTables.MasqueradeAll,
int(*config.IPTables.MasqueradeBit),
localDetectors,
s.Hostname,
s.NodeIPs,
s.Recorder,
s.HealthzServer,
config.IPVS.Scheduler,
config.NodePortAddresses,
kernelHandler,
)
} else {
var localDetector proxyutiliptables.LocalTrafficDetector
localDetector, err = getLocalDetector(config.DetectLocalMode, config, iptInterface, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
proxier, err = ipvs.NewProxier(
s.PrimaryIPFamily,
iptInterface,
ipvsInterface,
ipsetInterface,
utilsysctl.New(),
execer,
config.IPVS.SyncPeriod.Duration,
config.IPVS.MinSyncPeriod.Duration,
config.IPVS.ExcludeCIDRs,
config.IPVS.StrictARP,
config.IPVS.TCPTimeout.Duration,
config.IPVS.TCPFinTimeout.Duration,
config.IPVS.UDPTimeout.Duration,
config.IPTables.MasqueradeAll,
int(*config.IPTables.MasqueradeBit),
localDetector,
s.Hostname,
s.NodeIPs[s.PrimaryIPFamily],
s.Recorder,
s.HealthzServer,
config.IPVS.Scheduler,
config.NodePortAddresses,
kernelHandler,
)
}
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
}
return proxier, nil
}
func (s *ProxyServer) platformSetup() error {
ct := &realConntracker{}
max, err := getConntrackMax(s.Config.Conntrack)
if err != nil {
return err
}
if max > 0 {
err := ct.SetMax(max)
if err != nil {
if err != errReadOnlySysFS {
return err
}
// errReadOnlySysFS is caused by a known docker issue (https://github.com/docker/docker/issues/24000),
// the only remediation we know is to restart the docker daemon.
// Here we'll send an node event with specific reason and message, the
// administrator should decide whether and how to handle this issue,
// whether to drain the node and restart docker. Occurs in other container runtimes
// as well.
// TODO(random-liu): Remove this when the docker bug is fixed.
const message = "CRI error: /sys is read-only: " +
"cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000)"
s.Recorder.Eventf(s.NodeRef, nil, v1.EventTypeWarning, err.Error(), "StartKubeProxy", message)
}
}
if s.Config.Conntrack.TCPEstablishedTimeout != nil && s.Config.Conntrack.TCPEstablishedTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.TCPEstablishedTimeout.Duration / time.Second)
if err := ct.SetTCPEstablishedTimeout(timeout); err != nil {
return err
}
}
if s.Config.Conntrack.TCPCloseWaitTimeout != nil && s.Config.Conntrack.TCPCloseWaitTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.TCPCloseWaitTimeout.Duration / time.Second)
if err := ct.SetTCPCloseWaitTimeout(timeout); err != nil {
return err
}
}
proxymetrics.RegisterMetrics()
return nil
}
func getConntrackMax(config proxyconfigapi.KubeProxyConntrackConfiguration) (int, error) {
if config.MaxPerCore != nil && *config.MaxPerCore > 0 {
floor := 0
if config.Min != nil {
floor = int(*config.Min)
}
scaled := int(*config.MaxPerCore) * detectNumCPU()
if scaled > floor {
klog.V(3).InfoS("GetConntrackMax: using scaled conntrack-max-per-core")
return scaled, nil
}
klog.V(3).InfoS("GetConntrackMax: using conntrack-min")
return floor, nil
}
return 0, nil
}
func waitForPodCIDR(client clientset.Interface, nodeName string) (*v1.Node, error) {
// since allocators can assign the podCIDR after the node registers, we do a watch here to wait
// for podCIDR to be assigned, instead of assuming that the Get() on startup will have it.
ctx, cancelFunc := context.WithTimeout(context.TODO(), timeoutForNodePodCIDR)
defer cancelFunc()
fieldSelector := fields.OneTermEqualSelector("metadata.name", nodeName).String()
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) {
options.FieldSelector = fieldSelector
return client.CoreV1().Nodes().List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector
return client.CoreV1().Nodes().Watch(ctx, options)
},
}
condition := func(event watch.Event) (bool, error) {
// don't process delete events
if event.Type != watch.Modified && event.Type != watch.Added {
return false, nil
}
n, ok := event.Object.(*v1.Node)
if !ok {
return false, fmt.Errorf("event object not of type Node")
}
// don't consider the node if is going to be deleted and keep waiting
if !n.DeletionTimestamp.IsZero() {
return false, nil
}
return n.Spec.PodCIDR != "" && len(n.Spec.PodCIDRs) > 0, nil
}
evt, err := toolswatch.UntilWithSync(ctx, lw, &v1.Node{}, nil, condition)
if err != nil {
return nil, fmt.Errorf("timeout waiting for PodCIDR allocation to configure detect-local-mode %v: %v", proxyconfigapi.LocalModeNodeCIDR, err)
}
if n, ok := evt.Object.(*v1.Node); ok {
return n, nil
}
return nil, fmt.Errorf("event object not of type node")
}
func detectNumCPU() int {
// try get numCPU from /sys firstly due to a known issue (https://github.com/kubernetes/kubernetes/issues/99225)
_, numCPU, err := machine.GetTopology(sysfs.NewRealSysFs())
if err != nil || numCPU < 1 {
return goruntime.NumCPU()
}
return numCPU
}
func getLocalDetector(mode proxyconfigapi.LocalMode, config *proxyconfigapi.KubeProxyConfiguration, ipt utiliptables.Interface, nodePodCIDRs []string) (proxyutiliptables.LocalTrafficDetector, error) {
switch mode {
case proxyconfigapi.LocalModeClusterCIDR:
// LocalModeClusterCIDR is the default if --detect-local-mode wasn't passed,
// but --cluster-cidr is optional.
if len(strings.TrimSpace(config.ClusterCIDR)) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no cluster CIDR defined")
break
}
return proxyutiliptables.NewDetectLocalByCIDR(config.ClusterCIDR, ipt)
case proxyconfigapi.LocalModeNodeCIDR:
if len(nodePodCIDRs) == 0 {
klog.InfoS("Detect-local-mode set to NodeCIDR, but no PodCIDR defined at node")
break
}
return proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt)
case proxyconfigapi.LocalModeBridgeInterface:
return proxyutiliptables.NewDetectLocalByBridgeInterface(config.DetectLocal.BridgeInterface)
case proxyconfigapi.LocalModeInterfaceNamePrefix:
return proxyutiliptables.NewDetectLocalByInterfaceNamePrefix(config.DetectLocal.InterfaceNamePrefix)
}
klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode))
return proxyutiliptables.NewNoOpLocalDetector(), nil
}
func getDualStackLocalDetectorTuple(mode proxyconfigapi.LocalMode, config *proxyconfigapi.KubeProxyConfiguration, ipt [2]utiliptables.Interface, nodePodCIDRs []string) ([2]proxyutiliptables.LocalTrafficDetector, error) {
var err error
localDetectors := [2]proxyutiliptables.LocalTrafficDetector{proxyutiliptables.NewNoOpLocalDetector(), proxyutiliptables.NewNoOpLocalDetector()}
switch mode {
case proxyconfigapi.LocalModeClusterCIDR:
// LocalModeClusterCIDR is the default if --detect-local-mode wasn't passed,
// but --cluster-cidr is optional.
if len(strings.TrimSpace(config.ClusterCIDR)) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no cluster CIDR defined")
break
}
clusterCIDRs := cidrTuple(config.ClusterCIDR)
if len(strings.TrimSpace(clusterCIDRs[0])) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no IPv4 cluster CIDR defined, defaulting to no-op detect-local for IPv4")
} else {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(clusterCIDRs[0], ipt[0])
if err != nil { // don't loose the original error
return localDetectors, err
}
}
if len(strings.TrimSpace(clusterCIDRs[1])) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, defaulting to no-op detect-local for IPv6")
} else {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(clusterCIDRs[1], ipt[1])
}
return localDetectors, err
case proxyconfigapi.LocalModeNodeCIDR:
if len(nodePodCIDRs) == 0 {
klog.InfoS("No node info available to configure detect-local-mode NodeCIDR")
break
}
// localDetectors, like ipt, need to be of the order [IPv4, IPv6], but PodCIDRs is setup so that PodCIDRs[0] == PodCIDR.
// so have to handle the case where PodCIDR can be IPv6 and set that to localDetectors[1]
if netutils.IsIPv6CIDRString(nodePodCIDRs[0]) {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt[1])
if err != nil {
return localDetectors, err
}
if len(nodePodCIDRs) > 1 {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[1], ipt[0])
}
} else {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt[0])
if err != nil {
return localDetectors, err
}
if len(nodePodCIDRs) > 1 {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[1], ipt[1])
}
}
return localDetectors, err
case proxyconfigapi.LocalModeBridgeInterface, proxyconfigapi.LocalModeInterfaceNamePrefix:
localDetector, err := getLocalDetector(mode, config, ipt[0], nodePodCIDRs)
if err == nil {
localDetectors[0] = localDetector
localDetectors[1] = localDetector
}
return localDetectors, err
default:
klog.InfoS("Unknown detect-local-mode", "detectLocalMode", mode)
}
klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode))
return localDetectors, nil
}
// cidrTuple takes a comma separated list of CIDRs and return a tuple (ipv4cidr,ipv6cidr)
// The returned tuple is guaranteed to have the order (ipv4,ipv6) and if no cidr from a family is found an
// empty string "" is inserted.
func cidrTuple(cidrList string) [2]string {
cidrs := [2]string{"", ""}
foundIPv4 := false
foundIPv6 := false
for _, cidr := range strings.Split(cidrList, ",") {
if netutils.IsIPv6CIDRString(cidr) && !foundIPv6 {
cidrs[1] = cidr
foundIPv6 = true
} else if !foundIPv4 {
cidrs[0] = cidr
foundIPv4 = true
}
if foundIPv6 && foundIPv4 {
break
}
}
return cidrs
}
// cleanupAndExit remove iptables rules and ipset/ipvs rules
func cleanupAndExit() error {
execer := exec.New()
// cleanup IPv6 and IPv4 iptables rules, regardless of current configuration
ipts := []utiliptables.Interface{
utiliptables.New(execer, utiliptables.ProtocolIPv4),
utiliptables.New(execer, utiliptables.ProtocolIPv6),
}
ipsetInterface := utilipset.New(execer)
ipvsInterface := utilipvs.New()
var encounteredError bool
for _, ipt := range ipts {
encounteredError = iptables.CleanupLeftovers(ipt) || encounteredError
encounteredError = ipvs.CleanupLeftovers(ipvsInterface, ipt, ipsetInterface) || encounteredError
}
if encounteredError {
return errors.New("encountered an error while tearing down rules")
}
return nil
}
| cmd/kube-proxy/app/server_others.go | 1 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.9983581900596619,
0.1668452024459839,
0.0001662664581090212,
0.00024304578255396336,
0.33926019072532654
] |
{
"id": 6,
"code_window": [
"\t}\n",
"\n",
"\treturn proxier, nil\n",
"}\n",
"\n",
"func (s *ProxyServer) platformSetup() error {\n",
"\tct := &realConntracker{}\n",
"\n",
"\tmax, err := getConntrackMax(s.Config.Conntrack)\n",
"\tif err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (s *ProxyServer) setupConntrack() error {\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "replace",
"edit_start_line_idx": 273
} | // Copyright 2015 go-swagger maintainers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package spec
import (
"encoding/json"
"fmt"
"github.com/go-openapi/swag"
"k8s.io/kube-openapi/pkg/internal"
jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json"
)
// Swagger this is the root document object for the API specification.
// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier)
// together into one document.
//
// For more information: http://goo.gl/8us55a#swagger-object-
type Swagger struct {
VendorExtensible
SwaggerProps
}
// MarshalJSON marshals this swagger structure to json
func (s Swagger) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshaling {
return internal.DeterministicMarshal(s)
}
b1, err := json.Marshal(s.SwaggerProps)
if err != nil {
return nil, err
}
b2, err := json.Marshal(s.VendorExtensible)
if err != nil {
return nil, err
}
return swag.ConcatJSON(b1, b2), nil
}
// MarshalJSON marshals this swagger structure to json
func (s Swagger) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
var x struct {
Extensions
SwaggerProps
}
x.Extensions = internal.SanitizeExtensions(s.Extensions)
x.SwaggerProps = s.SwaggerProps
return opts.MarshalNext(enc, x)
}
// UnmarshalJSON unmarshals a swagger spec from json
func (s *Swagger) UnmarshalJSON(data []byte) error {
if internal.UseOptimizedJSONUnmarshaling {
return jsonv2.Unmarshal(data, s)
}
var sw Swagger
if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil {
return err
}
if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil {
return err
}
*s = sw
return nil
}
func (s *Swagger) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
// Note: If you're willing to make breaking changes, it is possible to
// optimize this and other usages of this pattern:
// https://github.com/kubernetes/kube-openapi/pull/319#discussion_r983165948
var x struct {
Extensions
SwaggerProps
}
if err := opts.UnmarshalNext(dec, &x); err != nil {
return err
}
s.Extensions = internal.SanitizeExtensions(x.Extensions)
s.SwaggerProps = x.SwaggerProps
return nil
}
// SwaggerProps captures the top-level properties of an Api specification
//
// NOTE: validation rules
// - the scheme, when present must be from [http, https, ws, wss]
// - BasePath must start with a leading "/"
// - Paths is required
type SwaggerProps struct {
ID string `json:"id,omitempty"`
Consumes []string `json:"consumes,omitempty"`
Produces []string `json:"produces,omitempty"`
Schemes []string `json:"schemes,omitempty"`
Swagger string `json:"swagger,omitempty"`
Info *Info `json:"info,omitempty"`
Host string `json:"host,omitempty"`
BasePath string `json:"basePath,omitempty"`
Paths *Paths `json:"paths"`
Definitions Definitions `json:"definitions,omitempty"`
Parameters map[string]Parameter `json:"parameters,omitempty"`
Responses map[string]Response `json:"responses,omitempty"`
SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"`
Security []map[string][]string `json:"security,omitempty"`
Tags []Tag `json:"tags,omitempty"`
ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
}
// Dependencies represent a dependencies property
type Dependencies map[string]SchemaOrStringArray
// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property
type SchemaOrBool struct {
Allows bool
Schema *Schema
}
var jsTrue = []byte("true")
var jsFalse = []byte("false")
// MarshalJSON convert this object to JSON
func (s SchemaOrBool) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshaling {
return internal.DeterministicMarshal(s)
}
if s.Schema != nil {
return json.Marshal(s.Schema)
}
if s.Schema == nil && !s.Allows {
return jsFalse, nil
}
return jsTrue, nil
}
// MarshalJSON convert this object to JSON
func (s SchemaOrBool) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
if s.Schema != nil {
return opts.MarshalNext(enc, s.Schema)
}
if s.Schema == nil && !s.Allows {
return enc.WriteToken(jsonv2.False)
}
return enc.WriteToken(jsonv2.True)
}
// UnmarshalJSON converts this bool or schema object from a JSON structure
func (s *SchemaOrBool) UnmarshalJSON(data []byte) error {
if internal.UseOptimizedJSONUnmarshaling {
return jsonv2.Unmarshal(data, s)
}
var nw SchemaOrBool
if len(data) > 0 && data[0] == '{' {
var sch Schema
if err := json.Unmarshal(data, &sch); err != nil {
return err
}
nw.Schema = &sch
nw.Allows = true
} else {
json.Unmarshal(data, &nw.Allows)
}
*s = nw
return nil
}
func (s *SchemaOrBool) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
switch k := dec.PeekKind(); k {
case '{':
err := opts.UnmarshalNext(dec, &s.Schema)
if err != nil {
return err
}
s.Allows = true
return nil
case 't', 'f':
err := opts.UnmarshalNext(dec, &s.Allows)
if err != nil {
return err
}
return nil
default:
return fmt.Errorf("expected object or bool, not '%v'", k.String())
}
}
// SchemaOrStringArray represents a schema or a string array
type SchemaOrStringArray struct {
Schema *Schema
Property []string
}
// MarshalJSON converts this schema object or array into JSON structure
func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshaling {
return internal.DeterministicMarshal(s)
}
if len(s.Property) > 0 {
return json.Marshal(s.Property)
}
if s.Schema != nil {
return json.Marshal(s.Schema)
}
return []byte("null"), nil
}
// MarshalJSON converts this schema object or array into JSON structure
func (s SchemaOrStringArray) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
if len(s.Property) > 0 {
return opts.MarshalNext(enc, s.Property)
}
if s.Schema != nil {
return opts.MarshalNext(enc, s.Schema)
}
return enc.WriteToken(jsonv2.Null)
}
// UnmarshalJSON converts this schema object or array from a JSON structure
func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error {
if internal.UseOptimizedJSONUnmarshaling {
return jsonv2.Unmarshal(data, s)
}
var first byte
if len(data) > 1 {
first = data[0]
}
var nw SchemaOrStringArray
if first == '{' {
var sch Schema
if err := json.Unmarshal(data, &sch); err != nil {
return err
}
nw.Schema = &sch
}
if first == '[' {
if err := json.Unmarshal(data, &nw.Property); err != nil {
return err
}
}
*s = nw
return nil
}
func (s *SchemaOrStringArray) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
switch dec.PeekKind() {
case '{':
return opts.UnmarshalNext(dec, &s.Schema)
case '[':
return opts.UnmarshalNext(dec, &s.Property)
default:
_, err := dec.ReadValue()
return err
}
}
// Definitions contains the models explicitly defined in this spec
// An object to hold data types that can be consumed and produced by operations.
// These data types can be primitives, arrays or models.
//
// For more information: http://goo.gl/8us55a#definitionsObject
type Definitions map[string]Schema
// SecurityDefinitions a declaration of the security schemes available to be used in the specification.
// This does not enforce the security schemes on the operations and only serves to provide
// the relevant details for each scheme.
//
// For more information: http://goo.gl/8us55a#securityDefinitionsObject
type SecurityDefinitions map[string]*SecurityScheme
// StringOrArray represents a value that can either be a string
// or an array of strings. Mainly here for serialization purposes
type StringOrArray []string
// Contains returns true when the value is contained in the slice
func (s StringOrArray) Contains(value string) bool {
for _, str := range s {
if str == value {
return true
}
}
return false
}
// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string
func (s *StringOrArray) UnmarshalJSON(data []byte) error {
if internal.UseOptimizedJSONUnmarshaling {
return jsonv2.Unmarshal(data, s)
}
var first byte
if len(data) > 1 {
first = data[0]
}
if first == '[' {
var parsed []string
if err := json.Unmarshal(data, &parsed); err != nil {
return err
}
*s = StringOrArray(parsed)
return nil
}
var single interface{}
if err := json.Unmarshal(data, &single); err != nil {
return err
}
if single == nil {
return nil
}
switch v := single.(type) {
case string:
*s = StringOrArray([]string{v})
return nil
default:
return fmt.Errorf("only string or array is allowed, not %T", single)
}
}
func (s *StringOrArray) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
switch k := dec.PeekKind(); k {
case '[':
*s = StringOrArray{}
return opts.UnmarshalNext(dec, (*[]string)(s))
case '"':
*s = StringOrArray{""}
return opts.UnmarshalNext(dec, &(*s)[0])
case 'n':
// Throw out null token
_, _ = dec.ReadToken()
return nil
default:
return fmt.Errorf("expected string or array, not '%v'", k.String())
}
}
// MarshalJSON converts this string or array to a JSON array or JSON string
func (s StringOrArray) MarshalJSON() ([]byte, error) {
if len(s) == 1 {
return json.Marshal([]string(s)[0])
}
return json.Marshal([]string(s))
}
// SchemaOrArray represents a value that can either be a Schema
// or an array of Schema. Mainly here for serialization purposes
type SchemaOrArray struct {
Schema *Schema
Schemas []Schema
}
// Len returns the number of schemas in this property
func (s SchemaOrArray) Len() int {
if s.Schema != nil {
return 1
}
return len(s.Schemas)
}
// ContainsType returns true when one of the schemas is of the specified type
func (s *SchemaOrArray) ContainsType(name string) bool {
if s.Schema != nil {
return s.Schema.Type != nil && s.Schema.Type.Contains(name)
}
return false
}
// MarshalJSON converts this schema object or array into JSON structure
func (s SchemaOrArray) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshaling {
return internal.DeterministicMarshal(s)
}
if s.Schemas != nil {
return json.Marshal(s.Schemas)
}
return json.Marshal(s.Schema)
}
// MarshalJSON converts this schema object or array into JSON structure
func (s SchemaOrArray) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
if s.Schemas != nil {
return opts.MarshalNext(enc, s.Schemas)
}
return opts.MarshalNext(enc, s.Schema)
}
// UnmarshalJSON converts this schema object or array from a JSON structure
func (s *SchemaOrArray) UnmarshalJSON(data []byte) error {
if internal.UseOptimizedJSONUnmarshaling {
return jsonv2.Unmarshal(data, s)
}
var nw SchemaOrArray
var first byte
if len(data) > 1 {
first = data[0]
}
if first == '{' {
var sch Schema
if err := json.Unmarshal(data, &sch); err != nil {
return err
}
nw.Schema = &sch
}
if first == '[' {
if err := json.Unmarshal(data, &nw.Schemas); err != nil {
return err
}
}
*s = nw
return nil
}
func (s *SchemaOrArray) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
switch dec.PeekKind() {
case '{':
return opts.UnmarshalNext(dec, &s.Schema)
case '[':
return opts.UnmarshalNext(dec, &s.Schemas)
default:
_, err := dec.ReadValue()
return err
}
}
| vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.6674830317497253,
0.01652039773762226,
0.00016489664267282933,
0.00044377922313287854,
0.09929826110601425
] |
{
"id": 6,
"code_window": [
"\t}\n",
"\n",
"\treturn proxier, nil\n",
"}\n",
"\n",
"func (s *ProxyServer) platformSetup() error {\n",
"\tct := &realConntracker{}\n",
"\n",
"\tmax, err := getConntrackMax(s.Config.Conntrack)\n",
"\tif err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (s *ProxyServer) setupConntrack() error {\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "replace",
"edit_start_line_idx": 273
} | // Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Implements methods to remove frames from profiles.
package profile
import (
"fmt"
"regexp"
"strings"
)
var (
reservedNames = []string{"(anonymous namespace)", "operator()"}
bracketRx = func() *regexp.Regexp {
var quotedNames []string
for _, name := range append(reservedNames, "(") {
quotedNames = append(quotedNames, regexp.QuoteMeta(name))
}
return regexp.MustCompile(strings.Join(quotedNames, "|"))
}()
)
// simplifyFunc does some primitive simplification of function names.
func simplifyFunc(f string) string {
// Account for leading '.' on the PPC ELF v1 ABI.
funcName := strings.TrimPrefix(f, ".")
// Account for unsimplified names -- try to remove the argument list by trimming
// starting from the first '(', but skipping reserved names that have '('.
for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) {
foundReserved := false
for _, res := range reservedNames {
if funcName[ind[0]:ind[1]] == res {
foundReserved = true
break
}
}
if !foundReserved {
funcName = funcName[:ind[0]]
break
}
}
return funcName
}
// Prune removes all nodes beneath a node matching dropRx, and not
// matching keepRx. If the root node of a Sample matches, the sample
// will have an empty stack.
func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
prune := make(map[uint64]bool)
pruneBeneath := make(map[uint64]bool)
for _, loc := range p.Location {
var i int
for i = len(loc.Line) - 1; i >= 0; i-- {
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
funcName := simplifyFunc(fn.Name)
if dropRx.MatchString(funcName) {
if keepRx == nil || !keepRx.MatchString(funcName) {
break
}
}
}
}
if i >= 0 {
// Found matching entry to prune.
pruneBeneath[loc.ID] = true
// Remove the matching location.
if i == len(loc.Line)-1 {
// Matched the top entry: prune the whole location.
prune[loc.ID] = true
} else {
loc.Line = loc.Line[i+1:]
}
}
}
// Prune locs from each Sample
for _, sample := range p.Sample {
// Scan from the root to the leaves to find the prune location.
// Do not prune frames before the first user frame, to avoid
// pruning everything.
foundUser := false
for i := len(sample.Location) - 1; i >= 0; i-- {
id := sample.Location[i].ID
if !prune[id] && !pruneBeneath[id] {
foundUser = true
continue
}
if !foundUser {
continue
}
if prune[id] {
sample.Location = sample.Location[i+1:]
break
}
if pruneBeneath[id] {
sample.Location = sample.Location[i:]
break
}
}
}
}
// RemoveUninteresting prunes and elides profiles using built-in
// tables of uninteresting function names.
func (p *Profile) RemoveUninteresting() error {
var keep, drop *regexp.Regexp
var err error
if p.DropFrames != "" {
if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil {
return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err)
}
if p.KeepFrames != "" {
if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil {
return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err)
}
}
p.Prune(drop, keep)
}
return nil
}
// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself.
//
// Please see the example below to understand this method as well as
// the difference from Prune method.
//
// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline.
//
// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A.
// Prune(A, nil) returns [B,C,B,D] by removing A itself.
//
// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom.
// Prune(B, nil) returns [D] because a matching node is found by scanning from the root.
func (p *Profile) PruneFrom(dropRx *regexp.Regexp) {
pruneBeneath := make(map[uint64]bool)
for _, loc := range p.Location {
for i := 0; i < len(loc.Line); i++ {
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
funcName := simplifyFunc(fn.Name)
if dropRx.MatchString(funcName) {
// Found matching entry to prune.
pruneBeneath[loc.ID] = true
loc.Line = loc.Line[i:]
break
}
}
}
}
// Prune locs from each Sample
for _, sample := range p.Sample {
// Scan from the bottom leaf to the root to find the prune location.
for i, loc := range sample.Location {
if pruneBeneath[loc.ID] {
sample.Location = sample.Location[i:]
break
}
}
}
}
| vendor/github.com/google/pprof/profile/prune.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.000683150312397629,
0.0002114217495545745,
0.0001650676567805931,
0.00017471425235271454,
0.00011700543109327555
] |
{
"id": 6,
"code_window": [
"\t}\n",
"\n",
"\treturn proxier, nil\n",
"}\n",
"\n",
"func (s *ProxyServer) platformSetup() error {\n",
"\tct := &realConntracker{}\n",
"\n",
"\tmax, err := getConntrackMax(s.Config.Conntrack)\n",
"\tif err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (s *ProxyServer) setupConntrack() error {\n"
],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "replace",
"edit_start_line_idx": 273
} | // Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.21.9
// source: google/api/field_behavior.proto
package annotations
import (
reflect "reflect"
sync "sync"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
descriptorpb "google.golang.org/protobuf/types/descriptorpb"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// An indicator of the behavior of a given field (for example, that a field
// is required in requests, or given as output but ignored as input).
// This **does not** change the behavior in protocol buffers itself; it only
// denotes the behavior and may affect how API tooling handles the field.
//
// Note: This enum **may** receive new values in the future.
type FieldBehavior int32
const (
// Conventional default for enums. Do not use this.
FieldBehavior_FIELD_BEHAVIOR_UNSPECIFIED FieldBehavior = 0
// Specifically denotes a field as optional.
// While all fields in protocol buffers are optional, this may be specified
// for emphasis if appropriate.
FieldBehavior_OPTIONAL FieldBehavior = 1
// Denotes a field as required.
// This indicates that the field **must** be provided as part of the request,
// and failure to do so will cause an error (usually `INVALID_ARGUMENT`).
FieldBehavior_REQUIRED FieldBehavior = 2
// Denotes a field as output only.
// This indicates that the field is provided in responses, but including the
// field in a request does nothing (the server *must* ignore it and
// *must not* throw an error as a result of the field's presence).
FieldBehavior_OUTPUT_ONLY FieldBehavior = 3
// Denotes a field as input only.
// This indicates that the field is provided in requests, and the
// corresponding field is not included in output.
FieldBehavior_INPUT_ONLY FieldBehavior = 4
// Denotes a field as immutable.
// This indicates that the field may be set once in a request to create a
// resource, but may not be changed thereafter.
FieldBehavior_IMMUTABLE FieldBehavior = 5
// Denotes that a (repeated) field is an unordered list.
// This indicates that the service may provide the elements of the list
// in any arbitrary order, rather than the order the user originally
// provided. Additionally, the list's order may or may not be stable.
FieldBehavior_UNORDERED_LIST FieldBehavior = 6
// Denotes that this field returns a non-empty default value if not set.
// This indicates that if the user provides the empty value in a request,
// a non-empty value will be returned. The user will not be aware of what
// non-empty value to expect.
FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7
)
// Enum value maps for FieldBehavior.
var (
FieldBehavior_name = map[int32]string{
0: "FIELD_BEHAVIOR_UNSPECIFIED",
1: "OPTIONAL",
2: "REQUIRED",
3: "OUTPUT_ONLY",
4: "INPUT_ONLY",
5: "IMMUTABLE",
6: "UNORDERED_LIST",
7: "NON_EMPTY_DEFAULT",
}
FieldBehavior_value = map[string]int32{
"FIELD_BEHAVIOR_UNSPECIFIED": 0,
"OPTIONAL": 1,
"REQUIRED": 2,
"OUTPUT_ONLY": 3,
"INPUT_ONLY": 4,
"IMMUTABLE": 5,
"UNORDERED_LIST": 6,
"NON_EMPTY_DEFAULT": 7,
}
)
func (x FieldBehavior) Enum() *FieldBehavior {
p := new(FieldBehavior)
*p = x
return p
}
func (x FieldBehavior) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (FieldBehavior) Descriptor() protoreflect.EnumDescriptor {
return file_google_api_field_behavior_proto_enumTypes[0].Descriptor()
}
func (FieldBehavior) Type() protoreflect.EnumType {
return &file_google_api_field_behavior_proto_enumTypes[0]
}
func (x FieldBehavior) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use FieldBehavior.Descriptor instead.
func (FieldBehavior) EnumDescriptor() ([]byte, []int) {
return file_google_api_field_behavior_proto_rawDescGZIP(), []int{0}
}
var file_google_api_field_behavior_proto_extTypes = []protoimpl.ExtensionInfo{
{
ExtendedType: (*descriptorpb.FieldOptions)(nil),
ExtensionType: ([]FieldBehavior)(nil),
Field: 1052,
Name: "google.api.field_behavior",
Tag: "varint,1052,rep,name=field_behavior,enum=google.api.FieldBehavior",
Filename: "google/api/field_behavior.proto",
},
}
// Extension fields to descriptorpb.FieldOptions.
var (
// A designation of a specific field behavior (required, output only, etc.)
// in protobuf messages.
//
// Examples:
//
// string name = 1 [(google.api.field_behavior) = REQUIRED];
// State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// google.protobuf.Duration ttl = 1
// [(google.api.field_behavior) = INPUT_ONLY];
// google.protobuf.Timestamp expire_time = 1
// [(google.api.field_behavior) = OUTPUT_ONLY,
// (google.api.field_behavior) = IMMUTABLE];
//
// repeated google.api.FieldBehavior field_behavior = 1052;
E_FieldBehavior = &file_google_api_field_behavior_proto_extTypes[0]
)
var File_google_api_field_behavior_proto protoreflect.FileDescriptor
var file_google_api_field_behavior_proto_rawDesc = []byte{
0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65,
0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64,
0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a,
0xa6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f,
0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56,
0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12,
0x0c, 0x0a, 0x08, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0f, 0x0a,
0x0b, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x0e,
0x0a, 0x0a, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x12, 0x0d,
0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a,
0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10,
0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44,
0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c,
0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e,
0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69,
0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x52, 0x0d, 0x66, 0x69, 0x65,
0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70, 0x0a, 0x0e, 0x63, 0x6f,
0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x12, 0x46, 0x69,
0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (
file_google_api_field_behavior_proto_rawDescOnce sync.Once
file_google_api_field_behavior_proto_rawDescData = file_google_api_field_behavior_proto_rawDesc
)
func file_google_api_field_behavior_proto_rawDescGZIP() []byte {
file_google_api_field_behavior_proto_rawDescOnce.Do(func() {
file_google_api_field_behavior_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_behavior_proto_rawDescData)
})
return file_google_api_field_behavior_proto_rawDescData
}
var file_google_api_field_behavior_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_google_api_field_behavior_proto_goTypes = []interface{}{
(FieldBehavior)(0), // 0: google.api.FieldBehavior
(*descriptorpb.FieldOptions)(nil), // 1: google.protobuf.FieldOptions
}
var file_google_api_field_behavior_proto_depIdxs = []int32{
1, // 0: google.api.field_behavior:extendee -> google.protobuf.FieldOptions
0, // 1: google.api.field_behavior:type_name -> google.api.FieldBehavior
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
1, // [1:2] is the sub-list for extension type_name
0, // [0:1] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_google_api_field_behavior_proto_init() }
func file_google_api_field_behavior_proto_init() {
if File_google_api_field_behavior_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_api_field_behavior_proto_rawDesc,
NumEnums: 1,
NumMessages: 0,
NumExtensions: 1,
NumServices: 0,
},
GoTypes: file_google_api_field_behavior_proto_goTypes,
DependencyIndexes: file_google_api_field_behavior_proto_depIdxs,
EnumInfos: file_google_api_field_behavior_proto_enumTypes,
ExtensionInfos: file_google_api_field_behavior_proto_extTypes,
}.Build()
File_google_api_field_behavior_proto = out.File
file_google_api_field_behavior_proto_rawDesc = nil
file_google_api_field_behavior_proto_goTypes = nil
file_google_api_field_behavior_proto_depIdxs = nil
}
| vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.0007390864193439484,
0.00019354341202415526,
0.0001631781633477658,
0.00016903236974030733,
0.0001094361359719187
] |
{
"id": 7,
"code_window": [
"\t\t}\n",
"\t}\n",
"\n",
"\tproxymetrics.RegisterMetrics()\n",
"\treturn nil\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "replace",
"edit_start_line_idx": 313
} | //go:build !windows
// +build !windows
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to configure and run a
// Kubernetes app process.
package app
import (
"context"
"errors"
"fmt"
goruntime "runtime"
"strings"
"time"
"github.com/google/cadvisor/machine"
"github.com/google/cadvisor/utils/sysfs"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/cache"
"k8s.io/apimachinery/pkg/fields"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
toolswatch "k8s.io/client-go/tools/watch"
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
"k8s.io/kubernetes/pkg/proxy"
proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config"
"k8s.io/kubernetes/pkg/proxy/iptables"
"k8s.io/kubernetes/pkg/proxy/ipvs"
utilipset "k8s.io/kubernetes/pkg/proxy/ipvs/ipset"
utilipvs "k8s.io/kubernetes/pkg/proxy/ipvs/util"
proxymetrics "k8s.io/kubernetes/pkg/proxy/metrics"
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
"k8s.io/utils/exec"
netutils "k8s.io/utils/net"
"k8s.io/klog/v2"
)
// timeoutForNodePodCIDR is the time to wait for allocators to assign a PodCIDR to the
// node after it is registered.
var timeoutForNodePodCIDR = 5 * time.Minute
func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfiguration) {
if config.Mode == "" {
klog.InfoS("Using iptables proxy")
config.Mode = proxyconfigapi.ProxyModeIPTables
}
if config.DetectLocalMode == "" {
klog.V(4).InfoS("Defaulting detect-local-mode", "localModeClusterCIDR", string(proxyconfigapi.LocalModeClusterCIDR))
config.DetectLocalMode = proxyconfigapi.LocalModeClusterCIDR
}
klog.V(2).InfoS("DetectLocalMode", "localMode", string(config.DetectLocalMode))
}
// createProxier creates the proxy.Provider
func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {
var proxier proxy.Provider
var err error
if config.DetectLocalMode == proxyconfigapi.LocalModeNodeCIDR {
klog.InfoS("Watching for node, awaiting podCIDR allocation", "hostname", s.Hostname)
node, err := waitForPodCIDR(s.Client, s.Hostname)
if err != nil {
return nil, err
}
s.podCIDRs = node.Spec.PodCIDRs
klog.InfoS("NodeInfo", "podCIDRs", node.Spec.PodCIDRs)
}
primaryProtocol := utiliptables.ProtocolIPv4
if s.PrimaryIPFamily == v1.IPv6Protocol {
primaryProtocol = utiliptables.ProtocolIPv6
}
execer := exec.New()
iptInterface := utiliptables.New(execer, primaryProtocol)
var ipt [2]utiliptables.Interface
dualStack := true // While we assume that node supports, we do further checks below
// Create iptables handlers for both families, one is already created
// Always ordered as IPv4, IPv6
if primaryProtocol == utiliptables.ProtocolIPv4 {
ipt[0] = iptInterface
ipt[1] = utiliptables.New(execer, utiliptables.ProtocolIPv6)
} else {
ipt[0] = utiliptables.New(execer, utiliptables.ProtocolIPv4)
ipt[1] = iptInterface
}
if !ipt[0].Present() {
return nil, fmt.Errorf("iptables is not supported for primary IP family %q", primaryProtocol)
} else if !ipt[1].Present() {
klog.InfoS("kube-proxy running in single-stack mode: secondary ipFamily is not supported", "ipFamily", ipt[1].Protocol())
dualStack = false
// Validate NodePortAddresses is single-stack
npaByFamily := proxyutil.MapCIDRsByIPFamily(config.NodePortAddresses)
secondaryFamily := proxyutil.OtherIPFamily(s.PrimaryIPFamily)
badAddrs := npaByFamily[secondaryFamily]
if len(badAddrs) > 0 {
klog.InfoS("Ignoring --nodeport-addresses of the wrong family", "ipFamily", secondaryFamily, "addresses", badAddrs)
}
}
if config.Mode == proxyconfigapi.ProxyModeIPTables {
klog.InfoS("Using iptables Proxier")
if dualStack {
klog.InfoS("kube-proxy running in dual-stack mode", "ipFamily", iptInterface.Protocol())
klog.InfoS("Creating dualStackProxier for iptables")
// Always ordered to match []ipt
var localDetectors [2]proxyutiliptables.LocalTrafficDetector
localDetectors, err = getDualStackLocalDetectorTuple(config.DetectLocalMode, config, ipt, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
// TODO this has side effects that should only happen when Run() is invoked.
proxier, err = iptables.NewDualStackProxier(
ipt,
utilsysctl.New(),
execer,
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.IPTables.MasqueradeAll,
*config.IPTables.LocalhostNodePorts,
int(*config.IPTables.MasqueradeBit),
localDetectors,
s.Hostname,
s.NodeIPs,
s.Recorder,
s.HealthzServer,
config.NodePortAddresses,
)
} else {
// Create a single-stack proxier if and only if the node does not support dual-stack (i.e, no iptables support).
var localDetector proxyutiliptables.LocalTrafficDetector
localDetector, err = getLocalDetector(config.DetectLocalMode, config, iptInterface, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
// TODO this has side effects that should only happen when Run() is invoked.
proxier, err = iptables.NewProxier(
s.PrimaryIPFamily,
iptInterface,
utilsysctl.New(),
execer,
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.IPTables.MasqueradeAll,
*config.IPTables.LocalhostNodePorts,
int(*config.IPTables.MasqueradeBit),
localDetector,
s.Hostname,
s.NodeIPs[s.PrimaryIPFamily],
s.Recorder,
s.HealthzServer,
config.NodePortAddresses,
)
}
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
} else if config.Mode == proxyconfigapi.ProxyModeIPVS {
kernelHandler := ipvs.NewLinuxKernelHandler()
ipsetInterface := utilipset.New(execer)
ipvsInterface := utilipvs.New()
if err := ipvs.CanUseIPVSProxier(ipvsInterface, ipsetInterface, config.IPVS.Scheduler); err != nil {
return nil, fmt.Errorf("can't use the IPVS proxier: %v", err)
}
klog.InfoS("Using ipvs Proxier")
if dualStack {
klog.InfoS("Creating dualStackProxier for ipvs")
// Always ordered to match []ipt
var localDetectors [2]proxyutiliptables.LocalTrafficDetector
localDetectors, err = getDualStackLocalDetectorTuple(config.DetectLocalMode, config, ipt, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
proxier, err = ipvs.NewDualStackProxier(
ipt,
ipvsInterface,
ipsetInterface,
utilsysctl.New(),
execer,
config.IPVS.SyncPeriod.Duration,
config.IPVS.MinSyncPeriod.Duration,
config.IPVS.ExcludeCIDRs,
config.IPVS.StrictARP,
config.IPVS.TCPTimeout.Duration,
config.IPVS.TCPFinTimeout.Duration,
config.IPVS.UDPTimeout.Duration,
config.IPTables.MasqueradeAll,
int(*config.IPTables.MasqueradeBit),
localDetectors,
s.Hostname,
s.NodeIPs,
s.Recorder,
s.HealthzServer,
config.IPVS.Scheduler,
config.NodePortAddresses,
kernelHandler,
)
} else {
var localDetector proxyutiliptables.LocalTrafficDetector
localDetector, err = getLocalDetector(config.DetectLocalMode, config, iptInterface, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
proxier, err = ipvs.NewProxier(
s.PrimaryIPFamily,
iptInterface,
ipvsInterface,
ipsetInterface,
utilsysctl.New(),
execer,
config.IPVS.SyncPeriod.Duration,
config.IPVS.MinSyncPeriod.Duration,
config.IPVS.ExcludeCIDRs,
config.IPVS.StrictARP,
config.IPVS.TCPTimeout.Duration,
config.IPVS.TCPFinTimeout.Duration,
config.IPVS.UDPTimeout.Duration,
config.IPTables.MasqueradeAll,
int(*config.IPTables.MasqueradeBit),
localDetector,
s.Hostname,
s.NodeIPs[s.PrimaryIPFamily],
s.Recorder,
s.HealthzServer,
config.IPVS.Scheduler,
config.NodePortAddresses,
kernelHandler,
)
}
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
}
return proxier, nil
}
func (s *ProxyServer) platformSetup() error {
ct := &realConntracker{}
max, err := getConntrackMax(s.Config.Conntrack)
if err != nil {
return err
}
if max > 0 {
err := ct.SetMax(max)
if err != nil {
if err != errReadOnlySysFS {
return err
}
// errReadOnlySysFS is caused by a known docker issue (https://github.com/docker/docker/issues/24000),
// the only remediation we know is to restart the docker daemon.
// Here we'll send an node event with specific reason and message, the
// administrator should decide whether and how to handle this issue,
// whether to drain the node and restart docker. Occurs in other container runtimes
// as well.
// TODO(random-liu): Remove this when the docker bug is fixed.
const message = "CRI error: /sys is read-only: " +
"cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000)"
s.Recorder.Eventf(s.NodeRef, nil, v1.EventTypeWarning, err.Error(), "StartKubeProxy", message)
}
}
if s.Config.Conntrack.TCPEstablishedTimeout != nil && s.Config.Conntrack.TCPEstablishedTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.TCPEstablishedTimeout.Duration / time.Second)
if err := ct.SetTCPEstablishedTimeout(timeout); err != nil {
return err
}
}
if s.Config.Conntrack.TCPCloseWaitTimeout != nil && s.Config.Conntrack.TCPCloseWaitTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.TCPCloseWaitTimeout.Duration / time.Second)
if err := ct.SetTCPCloseWaitTimeout(timeout); err != nil {
return err
}
}
proxymetrics.RegisterMetrics()
return nil
}
func getConntrackMax(config proxyconfigapi.KubeProxyConntrackConfiguration) (int, error) {
if config.MaxPerCore != nil && *config.MaxPerCore > 0 {
floor := 0
if config.Min != nil {
floor = int(*config.Min)
}
scaled := int(*config.MaxPerCore) * detectNumCPU()
if scaled > floor {
klog.V(3).InfoS("GetConntrackMax: using scaled conntrack-max-per-core")
return scaled, nil
}
klog.V(3).InfoS("GetConntrackMax: using conntrack-min")
return floor, nil
}
return 0, nil
}
func waitForPodCIDR(client clientset.Interface, nodeName string) (*v1.Node, error) {
// since allocators can assign the podCIDR after the node registers, we do a watch here to wait
// for podCIDR to be assigned, instead of assuming that the Get() on startup will have it.
ctx, cancelFunc := context.WithTimeout(context.TODO(), timeoutForNodePodCIDR)
defer cancelFunc()
fieldSelector := fields.OneTermEqualSelector("metadata.name", nodeName).String()
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) {
options.FieldSelector = fieldSelector
return client.CoreV1().Nodes().List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector
return client.CoreV1().Nodes().Watch(ctx, options)
},
}
condition := func(event watch.Event) (bool, error) {
// don't process delete events
if event.Type != watch.Modified && event.Type != watch.Added {
return false, nil
}
n, ok := event.Object.(*v1.Node)
if !ok {
return false, fmt.Errorf("event object not of type Node")
}
// don't consider the node if is going to be deleted and keep waiting
if !n.DeletionTimestamp.IsZero() {
return false, nil
}
return n.Spec.PodCIDR != "" && len(n.Spec.PodCIDRs) > 0, nil
}
evt, err := toolswatch.UntilWithSync(ctx, lw, &v1.Node{}, nil, condition)
if err != nil {
return nil, fmt.Errorf("timeout waiting for PodCIDR allocation to configure detect-local-mode %v: %v", proxyconfigapi.LocalModeNodeCIDR, err)
}
if n, ok := evt.Object.(*v1.Node); ok {
return n, nil
}
return nil, fmt.Errorf("event object not of type node")
}
func detectNumCPU() int {
// try get numCPU from /sys firstly due to a known issue (https://github.com/kubernetes/kubernetes/issues/99225)
_, numCPU, err := machine.GetTopology(sysfs.NewRealSysFs())
if err != nil || numCPU < 1 {
return goruntime.NumCPU()
}
return numCPU
}
func getLocalDetector(mode proxyconfigapi.LocalMode, config *proxyconfigapi.KubeProxyConfiguration, ipt utiliptables.Interface, nodePodCIDRs []string) (proxyutiliptables.LocalTrafficDetector, error) {
switch mode {
case proxyconfigapi.LocalModeClusterCIDR:
// LocalModeClusterCIDR is the default if --detect-local-mode wasn't passed,
// but --cluster-cidr is optional.
if len(strings.TrimSpace(config.ClusterCIDR)) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no cluster CIDR defined")
break
}
return proxyutiliptables.NewDetectLocalByCIDR(config.ClusterCIDR, ipt)
case proxyconfigapi.LocalModeNodeCIDR:
if len(nodePodCIDRs) == 0 {
klog.InfoS("Detect-local-mode set to NodeCIDR, but no PodCIDR defined at node")
break
}
return proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt)
case proxyconfigapi.LocalModeBridgeInterface:
return proxyutiliptables.NewDetectLocalByBridgeInterface(config.DetectLocal.BridgeInterface)
case proxyconfigapi.LocalModeInterfaceNamePrefix:
return proxyutiliptables.NewDetectLocalByInterfaceNamePrefix(config.DetectLocal.InterfaceNamePrefix)
}
klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode))
return proxyutiliptables.NewNoOpLocalDetector(), nil
}
func getDualStackLocalDetectorTuple(mode proxyconfigapi.LocalMode, config *proxyconfigapi.KubeProxyConfiguration, ipt [2]utiliptables.Interface, nodePodCIDRs []string) ([2]proxyutiliptables.LocalTrafficDetector, error) {
var err error
localDetectors := [2]proxyutiliptables.LocalTrafficDetector{proxyutiliptables.NewNoOpLocalDetector(), proxyutiliptables.NewNoOpLocalDetector()}
switch mode {
case proxyconfigapi.LocalModeClusterCIDR:
// LocalModeClusterCIDR is the default if --detect-local-mode wasn't passed,
// but --cluster-cidr is optional.
if len(strings.TrimSpace(config.ClusterCIDR)) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no cluster CIDR defined")
break
}
clusterCIDRs := cidrTuple(config.ClusterCIDR)
if len(strings.TrimSpace(clusterCIDRs[0])) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no IPv4 cluster CIDR defined, defaulting to no-op detect-local for IPv4")
} else {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(clusterCIDRs[0], ipt[0])
if err != nil { // don't loose the original error
return localDetectors, err
}
}
if len(strings.TrimSpace(clusterCIDRs[1])) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, defaulting to no-op detect-local for IPv6")
} else {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(clusterCIDRs[1], ipt[1])
}
return localDetectors, err
case proxyconfigapi.LocalModeNodeCIDR:
if len(nodePodCIDRs) == 0 {
klog.InfoS("No node info available to configure detect-local-mode NodeCIDR")
break
}
// localDetectors, like ipt, need to be of the order [IPv4, IPv6], but PodCIDRs is setup so that PodCIDRs[0] == PodCIDR.
// so have to handle the case where PodCIDR can be IPv6 and set that to localDetectors[1]
if netutils.IsIPv6CIDRString(nodePodCIDRs[0]) {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt[1])
if err != nil {
return localDetectors, err
}
if len(nodePodCIDRs) > 1 {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[1], ipt[0])
}
} else {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt[0])
if err != nil {
return localDetectors, err
}
if len(nodePodCIDRs) > 1 {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[1], ipt[1])
}
}
return localDetectors, err
case proxyconfigapi.LocalModeBridgeInterface, proxyconfigapi.LocalModeInterfaceNamePrefix:
localDetector, err := getLocalDetector(mode, config, ipt[0], nodePodCIDRs)
if err == nil {
localDetectors[0] = localDetector
localDetectors[1] = localDetector
}
return localDetectors, err
default:
klog.InfoS("Unknown detect-local-mode", "detectLocalMode", mode)
}
klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode))
return localDetectors, nil
}
// cidrTuple takes a comma separated list of CIDRs and return a tuple (ipv4cidr,ipv6cidr)
// The returned tuple is guaranteed to have the order (ipv4,ipv6) and if no cidr from a family is found an
// empty string "" is inserted.
func cidrTuple(cidrList string) [2]string {
cidrs := [2]string{"", ""}
foundIPv4 := false
foundIPv6 := false
for _, cidr := range strings.Split(cidrList, ",") {
if netutils.IsIPv6CIDRString(cidr) && !foundIPv6 {
cidrs[1] = cidr
foundIPv6 = true
} else if !foundIPv4 {
cidrs[0] = cidr
foundIPv4 = true
}
if foundIPv6 && foundIPv4 {
break
}
}
return cidrs
}
// cleanupAndExit remove iptables rules and ipset/ipvs rules
func cleanupAndExit() error {
execer := exec.New()
// cleanup IPv6 and IPv4 iptables rules, regardless of current configuration
ipts := []utiliptables.Interface{
utiliptables.New(execer, utiliptables.ProtocolIPv4),
utiliptables.New(execer, utiliptables.ProtocolIPv6),
}
ipsetInterface := utilipset.New(execer)
ipvsInterface := utilipvs.New()
var encounteredError bool
for _, ipt := range ipts {
encounteredError = iptables.CleanupLeftovers(ipt) || encounteredError
encounteredError = ipvs.CleanupLeftovers(ipvsInterface, ipt, ipsetInterface) || encounteredError
}
if encounteredError {
return errors.New("encountered an error while tearing down rules")
}
return nil
}
| cmd/kube-proxy/app/server_others.go | 1 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.06566247344017029,
0.0014265531208366156,
0.0001650454505579546,
0.0001715543621685356,
0.00890809204429388
] |
{
"id": 7,
"code_window": [
"\t\t}\n",
"\t}\n",
"\n",
"\tproxymetrics.RegisterMetrics()\n",
"\treturn nil\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "replace",
"edit_start_line_idx": 313
} | package storage
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
import (
"net/url"
"time"
)
// SASOptions includes options used by SAS URIs for different
// services and resources.
type SASOptions struct {
APIVersion string
Start time.Time
Expiry time.Time
IP string
UseHTTPS bool
Identifier string
}
func addQueryParameter(query url.Values, key, value string) url.Values {
if value != "" {
query.Add(key, value)
}
return query
}
| vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00018136586004402488,
0.0001768411457305774,
0.0001693779486231506,
0.00017977961397264153,
0.000005316858278092695
] |
{
"id": 7,
"code_window": [
"\t\t}\n",
"\t}\n",
"\n",
"\tproxymetrics.RegisterMetrics()\n",
"\treturn nil\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "replace",
"edit_start_line_idx": 313
} | /*
Copyright (c) 2020 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"net"
"path"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
)
// InventoryPath composed of entities by Name
func InventoryPath(entities []mo.ManagedEntity) string {
val := "/"
for _, entity := range entities {
// Skip root folder in building inventory path.
if entity.Parent == nil {
continue
}
val = path.Join(val, entity.Name)
}
return val
}
func HostSystemManagementIPs(config []types.VirtualNicManagerNetConfig) []net.IP {
var ips []net.IP
for _, nc := range config {
if nc.NicType != string(types.HostVirtualNicManagerNicTypeManagement) {
continue
}
for ix := range nc.CandidateVnic {
for _, selectedVnicKey := range nc.SelectedVnic {
if nc.CandidateVnic[ix].Key != selectedVnicKey {
continue
}
ip := net.ParseIP(nc.CandidateVnic[ix].Spec.Ip.IpAddress)
if ip != nil {
ips = append(ips, ip)
}
}
}
}
return ips
}
| vendor/github.com/vmware/govmomi/internal/helpers.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.0003739823878277093,
0.00020085347932763398,
0.0001664207666181028,
0.00017146932077594101,
0.00007081534567987546
] |
{
"id": 7,
"code_window": [
"\t\t}\n",
"\t}\n",
"\n",
"\tproxymetrics.RegisterMetrics()\n",
"\treturn nil\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/server_others.go",
"type": "replace",
"edit_start_line_idx": 313
} | apiVersion: v1
kind: Pod
metadata:
name: seccompprofile_restricted0
spec:
containers:
- image: registry.k8s.io/pause
name: container1
securityContext:
allowPrivilegeEscalation: false
initContainers:
- image: registry.k8s.io/pause
name: initcontainer1
securityContext:
allowPrivilegeEscalation: false
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
| staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.19/pass/seccompprofile_restricted0.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00016951884026639163,
0.00016933929873630404,
0.00016915974265430123,
0.00016933929873630404,
1.795488060452044e-7
] |
{
"id": 8,
"code_window": [
"\t\t}\n",
"\t}\n",
"}\n",
"\n",
"func TestProxyServer_createProxier(t *testing.T) {\n",
"\ttests := []struct {\n",
"\t\tname string\n",
"\t\tnode *v1.Node\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func TestProxyServer_platformSetup(t *testing.T) {\n"
],
"file_path": "cmd/kube-proxy/app/server_others_test.go",
"type": "replace",
"edit_start_line_idx": 648
} | //go:build windows
// +build windows
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to configure and run a
// Kubernetes app process.
package app
import (
"errors"
"fmt"
"net"
"strconv"
// Enable pprof HTTP handlers.
_ "net/http/pprof"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/proxy"
proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config"
"k8s.io/kubernetes/pkg/proxy/winkernel"
)
func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfiguration) {
if config.Mode == "" {
config.Mode = proxyconfigapi.ProxyModeKernelspace
}
}
// createProxier creates the proxy.Provider
func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {
var healthzPort int
if len(config.HealthzBindAddress) > 0 {
_, port, _ := net.SplitHostPort(config.HealthzBindAddress)
healthzPort, _ = strconv.Atoi(port)
}
// Check if Kernel Space can be used.
canUseWinKernelProxy, err := winkernel.CanUseWinKernelProxier(winkernel.WindowsKernelCompatTester{})
if !canUseWinKernelProxy && err != nil {
return nil, err
}
var proxier proxy.Provider
dualStackMode := getDualStackMode(config.Winkernel.NetworkName, winkernel.DualStackCompatTester{})
if dualStackMode {
klog.InfoS("Creating dualStackProxier for Windows kernel.")
proxier, err = winkernel.NewDualStackProxier(
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.ClusterCIDR,
s.Hostname,
s.NodeIPs,
s.Recorder,
s.HealthzServer,
config.Winkernel,
healthzPort,
)
} else {
proxier, err = winkernel.NewProxier(
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.ClusterCIDR,
s.Hostname,
s.NodeIPs[s.PrimaryIPFamily],
s.Recorder,
s.HealthzServer,
config.Winkernel,
healthzPort,
)
}
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
return proxier, nil
}
func (s *ProxyServer) platformSetup() error {
winkernel.RegisterMetrics()
return nil
}
func getDualStackMode(networkname string, compatTester winkernel.StackCompatTester) bool {
return compatTester.DualStackCompatible(networkname)
}
// cleanupAndExit cleans up after a previous proxy run
func cleanupAndExit() error {
return errors.New("--cleanup-and-exit is not implemented on Windows")
}
| cmd/kube-proxy/app/server_windows.go | 1 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00295142806135118,
0.0007000155164860189,
0.00016494806914124638,
0.0004029438132420182,
0.0007949370774440467
] |
{
"id": 8,
"code_window": [
"\t\t}\n",
"\t}\n",
"}\n",
"\n",
"func TestProxyServer_createProxier(t *testing.T) {\n",
"\ttests := []struct {\n",
"\t\tname string\n",
"\t\tnode *v1.Node\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func TestProxyServer_platformSetup(t *testing.T) {\n"
],
"file_path": "cmd/kube-proxy/app/server_others_test.go",
"type": "replace",
"edit_start_line_idx": 648
} | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/volume/util/fs"
)
var _ MetricsProvider = &metricsDu{}
// metricsDu represents a MetricsProvider that calculates the used and
// available Volume space by calling fs.DiskUsage() and gathering
// filesystem info for the Volume path.
type metricsDu struct {
// the directory path the volume is mounted to.
path string
}
// NewMetricsDu creates a new metricsDu with the Volume path.
func NewMetricsDu(path string) MetricsProvider {
return &metricsDu{path}
}
// GetMetrics calculates the volume usage and device free space by executing "du"
// and gathering filesystem info for the Volume path.
// See MetricsProvider.GetMetrics
func (md *metricsDu) GetMetrics() (*Metrics, error) {
metrics := &Metrics{Time: metav1.Now()}
if md.path == "" {
return metrics, NewNoPathDefinedError()
}
err := md.getDiskUsage(metrics)
if err != nil {
return metrics, err
}
err = md.getFsInfo(metrics)
if err != nil {
return metrics, err
}
return metrics, nil
}
// getDiskUsage writes metrics.Used and metric.InodesUsed from fs.DiskUsage
func (md *metricsDu) getDiskUsage(metrics *Metrics) error {
usage, err := fs.DiskUsage(md.path)
if err != nil {
return err
}
metrics.Used = resource.NewQuantity(usage.Bytes, resource.BinarySI)
metrics.InodesUsed = resource.NewQuantity(usage.Inodes, resource.BinarySI)
return nil
}
// getFsInfo writes metrics.Capacity and metrics.Available from the filesystem
// info
func (md *metricsDu) getFsInfo(metrics *Metrics) error {
available, capacity, _, inodes, inodesFree, _, err := fs.Info(md.path)
if err != nil {
return NewFsInfoFailedError(err)
}
metrics.Available = resource.NewQuantity(available, resource.BinarySI)
metrics.Capacity = resource.NewQuantity(capacity, resource.BinarySI)
metrics.Inodes = resource.NewQuantity(inodes, resource.BinarySI)
metrics.InodesFree = resource.NewQuantity(inodesFree, resource.BinarySI)
return nil
}
| pkg/volume/metrics_du.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00017781801579985768,
0.00017100773402489722,
0.00016474639414809644,
0.00016900143236853182,
0.000004378731318865903
] |
{
"id": 8,
"code_window": [
"\t\t}\n",
"\t}\n",
"}\n",
"\n",
"func TestProxyServer_createProxier(t *testing.T) {\n",
"\ttests := []struct {\n",
"\t\tname string\n",
"\t\tnode *v1.Node\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func TestProxyServer_platformSetup(t *testing.T) {\n"
],
"file_path": "cmd/kube-proxy/app/server_others_test.go",
"type": "replace",
"edit_start_line_idx": 648
} | apiVersion: v1
kind: Pod
metadata:
name: restrictedvolumes16
spec:
containers:
- image: registry.k8s.io/pause
name: container1
securityContext:
allowPrivilegeEscalation: false
initContainers:
- image: registry.k8s.io/pause
name: initcontainer1
securityContext:
allowPrivilegeEscalation: false
securityContext:
runAsNonRoot: true
volumes:
- name: volume1
portworxVolume:
fsType: ext4
volumeID: test
| staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.11/fail/restrictedvolumes16.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00016748752386774868,
0.00016584235709160566,
0.00016277241229545325,
0.00016726713511161506,
0.000002172642552977777
] |
{
"id": 8,
"code_window": [
"\t\t}\n",
"\t}\n",
"}\n",
"\n",
"func TestProxyServer_createProxier(t *testing.T) {\n",
"\ttests := []struct {\n",
"\t\tname string\n",
"\t\tnode *v1.Node\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func TestProxyServer_platformSetup(t *testing.T) {\n"
],
"file_path": "cmd/kube-proxy/app/server_others_test.go",
"type": "replace",
"edit_start_line_idx": 648
} | apiVersion: v1
kind: Pod
metadata:
name: restrictedvolumes14
spec:
containers:
- image: registry.k8s.io/pause
name: container1
securityContext:
allowPrivilegeEscalation: false
initContainers:
- image: registry.k8s.io/pause
name: initcontainer1
securityContext:
allowPrivilegeEscalation: false
securityContext:
runAsNonRoot: true
volumes:
- name: volume1
quobyte:
registry: localhost:1234
volume: test
| staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.14/fail/restrictedvolumes14.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.0001688875345280394,
0.00016601620882283896,
0.00016305233293678612,
0.0001661087735556066,
0.0000023831098587834276
] |
{
"id": 9,
"code_window": [
"\t\t\t\t\tv1.IPv6Protocol: net.IPv6zero,\n",
"\t\t\t\t},\n",
"\t\t\t}\n",
"\t\t\t_, err := s.createProxier(tt.config)\n",
"\t\t\t// TODO: mock the exec.Interface to not fail probing iptables\n",
"\t\t\tif (err != nil) && !strings.Contains(err.Error(), \"iptables is not supported for primary IP family\") {\n",
"\t\t\t\tt.Errorf(\"ProxyServer.createProxier() error = %v\", err)\n",
"\t\t\t\treturn\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\terr := s.platformSetup()\n",
"\t\t\tif err != nil {\n"
],
"file_path": "cmd/kube-proxy/app/server_others_test.go",
"type": "replace",
"edit_start_line_idx": 685
} | //go:build windows
// +build windows
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to configure and run a
// Kubernetes app process.
package app
import (
"errors"
"fmt"
"net"
"strconv"
// Enable pprof HTTP handlers.
_ "net/http/pprof"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/proxy"
proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config"
"k8s.io/kubernetes/pkg/proxy/winkernel"
)
func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfiguration) {
if config.Mode == "" {
config.Mode = proxyconfigapi.ProxyModeKernelspace
}
}
// createProxier creates the proxy.Provider
func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {
var healthzPort int
if len(config.HealthzBindAddress) > 0 {
_, port, _ := net.SplitHostPort(config.HealthzBindAddress)
healthzPort, _ = strconv.Atoi(port)
}
// Check if Kernel Space can be used.
canUseWinKernelProxy, err := winkernel.CanUseWinKernelProxier(winkernel.WindowsKernelCompatTester{})
if !canUseWinKernelProxy && err != nil {
return nil, err
}
var proxier proxy.Provider
dualStackMode := getDualStackMode(config.Winkernel.NetworkName, winkernel.DualStackCompatTester{})
if dualStackMode {
klog.InfoS("Creating dualStackProxier for Windows kernel.")
proxier, err = winkernel.NewDualStackProxier(
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.ClusterCIDR,
s.Hostname,
s.NodeIPs,
s.Recorder,
s.HealthzServer,
config.Winkernel,
healthzPort,
)
} else {
proxier, err = winkernel.NewProxier(
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.ClusterCIDR,
s.Hostname,
s.NodeIPs[s.PrimaryIPFamily],
s.Recorder,
s.HealthzServer,
config.Winkernel,
healthzPort,
)
}
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
return proxier, nil
}
func (s *ProxyServer) platformSetup() error {
winkernel.RegisterMetrics()
return nil
}
func getDualStackMode(networkname string, compatTester winkernel.StackCompatTester) bool {
return compatTester.DualStackCompatible(networkname)
}
// cleanupAndExit cleans up after a previous proxy run
func cleanupAndExit() error {
return errors.New("--cleanup-and-exit is not implemented on Windows")
}
| cmd/kube-proxy/app/server_windows.go | 1 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.004528824705630541,
0.0010485169477760792,
0.00016018646419979632,
0.00017837314226198941,
0.001355128362774849
] |
{
"id": 9,
"code_window": [
"\t\t\t\t\tv1.IPv6Protocol: net.IPv6zero,\n",
"\t\t\t\t},\n",
"\t\t\t}\n",
"\t\t\t_, err := s.createProxier(tt.config)\n",
"\t\t\t// TODO: mock the exec.Interface to not fail probing iptables\n",
"\t\t\tif (err != nil) && !strings.Contains(err.Error(), \"iptables is not supported for primary IP family\") {\n",
"\t\t\t\tt.Errorf(\"ProxyServer.createProxier() error = %v\", err)\n",
"\t\t\t\treturn\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\terr := s.platformSetup()\n",
"\t\t\tif err != nil {\n"
],
"file_path": "cmd/kube-proxy/app/server_others_test.go",
"type": "replace",
"edit_start_line_idx": 685
} | /*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type RegistryChanges struct {
AddValues []RegistryValue `json:"AddValues,omitempty"`
DeleteKeys []RegistryKey `json:"DeleteKeys,omitempty"`
}
| vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00017004093388095498,
0.00016622868133708835,
0.00016241644334513694,
0.00016622868133708835,
0.00000381224526790902
] |
{
"id": 9,
"code_window": [
"\t\t\t\t\tv1.IPv6Protocol: net.IPv6zero,\n",
"\t\t\t\t},\n",
"\t\t\t}\n",
"\t\t\t_, err := s.createProxier(tt.config)\n",
"\t\t\t// TODO: mock the exec.Interface to not fail probing iptables\n",
"\t\t\tif (err != nil) && !strings.Contains(err.Error(), \"iptables is not supported for primary IP family\") {\n",
"\t\t\t\tt.Errorf(\"ProxyServer.createProxier() error = %v\", err)\n",
"\t\t\t\treturn\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\terr := s.platformSetup()\n",
"\t\t\tif err != nil {\n"
],
"file_path": "cmd/kube-proxy/app/server_others_test.go",
"type": "replace",
"edit_start_line_idx": 685
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_ClusterRoleBinding(obj *rbacv1beta1.ClusterRoleBinding) {
if len(obj.RoleRef.APIGroup) == 0 {
obj.RoleRef.APIGroup = GroupName
}
}
func SetDefaults_RoleBinding(obj *rbacv1beta1.RoleBinding) {
if len(obj.RoleRef.APIGroup) == 0 {
obj.RoleRef.APIGroup = GroupName
}
}
func SetDefaults_Subject(obj *rbacv1beta1.Subject) {
if len(obj.APIGroup) == 0 {
switch obj.Kind {
case rbacv1beta1.ServiceAccountKind:
obj.APIGroup = ""
case rbacv1beta1.UserKind:
obj.APIGroup = GroupName
case rbacv1beta1.GroupKind:
obj.APIGroup = GroupName
}
}
}
| pkg/apis/rbac/v1beta1/defaults.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.0001791208633221686,
0.00017493781342636794,
0.00016494758892804384,
0.00017734087305143476,
0.000005255773430690169
] |
{
"id": 9,
"code_window": [
"\t\t\t\t\tv1.IPv6Protocol: net.IPv6zero,\n",
"\t\t\t\t},\n",
"\t\t\t}\n",
"\t\t\t_, err := s.createProxier(tt.config)\n",
"\t\t\t// TODO: mock the exec.Interface to not fail probing iptables\n",
"\t\t\tif (err != nil) && !strings.Contains(err.Error(), \"iptables is not supported for primary IP family\") {\n",
"\t\t\t\tt.Errorf(\"ProxyServer.createProxier() error = %v\", err)\n",
"\t\t\t\treturn\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\terr := s.platformSetup()\n",
"\t\t\tif err != nil {\n"
],
"file_path": "cmd/kube-proxy/app/server_others_test.go",
"type": "replace",
"edit_start_line_idx": 685
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
json "encoding/json"
"fmt"
v1beta3 "k8s.io/api/flowcontrol/v1beta3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3"
testing "k8s.io/client-go/testing"
)
// FakePriorityLevelConfigurations implements PriorityLevelConfigurationInterface
type FakePriorityLevelConfigurations struct {
Fake *FakeFlowcontrolV1beta3
}
var prioritylevelconfigurationsResource = v1beta3.SchemeGroupVersion.WithResource("prioritylevelconfigurations")
var prioritylevelconfigurationsKind = v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfiguration")
// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any.
func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.PriorityLevelConfiguration, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1beta3.PriorityLevelConfiguration{})
if obj == nil {
return nil, err
}
return obj.(*v1beta3.PriorityLevelConfiguration), err
}
// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors.
func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.PriorityLevelConfigurationList, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1beta3.PriorityLevelConfigurationList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1beta3.PriorityLevelConfigurationList{ListMeta: obj.(*v1beta3.PriorityLevelConfigurationList).ListMeta}
for _, item := range obj.(*v1beta3.PriorityLevelConfigurationList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations.
func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts))
}
// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta3.PriorityLevelConfiguration{})
if obj == nil {
return nil, err
}
return obj.(*v1beta3.PriorityLevelConfiguration), err
}
// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta3.PriorityLevelConfiguration{})
if obj == nil {
return nil, err
}
return obj.(*v1beta3.PriorityLevelConfiguration), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error) {
obj, err := c.Fake.
Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1beta3.PriorityLevelConfiguration{})
if obj == nil {
return nil, err
}
return obj.(*v1beta3.PriorityLevelConfiguration), err
}
// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs.
func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewRootDeleteActionWithOptions(prioritylevelconfigurationsResource, name, opts), &v1beta3.PriorityLevelConfiguration{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts)
_, err := c.Fake.Invokes(action, &v1beta3.PriorityLevelConfigurationList{})
return err
}
// Patch applies the patch and returns the patched priorityLevelConfiguration.
func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.PriorityLevelConfiguration, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1beta3.PriorityLevelConfiguration{})
if obj == nil {
return nil, err
}
return obj.(*v1beta3.PriorityLevelConfiguration), err
}
// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration.
func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) {
if priorityLevelConfiguration == nil {
return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil")
}
data, err := json.Marshal(priorityLevelConfiguration)
if err != nil {
return nil, err
}
name := priorityLevelConfiguration.Name
if name == nil {
return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply")
}
obj, err := c.Fake.
Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta3.PriorityLevelConfiguration{})
if obj == nil {
return nil, err
}
return obj.(*v1beta3.PriorityLevelConfiguration), err
}
// ApplyStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) {
if priorityLevelConfiguration == nil {
return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil")
}
data, err := json.Marshal(priorityLevelConfiguration)
if err != nil {
return nil, err
}
name := priorityLevelConfiguration.Name
if name == nil {
return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply")
}
obj, err := c.Fake.
Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1beta3.PriorityLevelConfiguration{})
if obj == nil {
return nil, err
}
return obj.(*v1beta3.PriorityLevelConfiguration), err
}
| staging/src/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00044962973333895206,
0.00019071831775363535,
0.00016151528689078987,
0.00016603856056462973,
0.00006906341150170192
] |
{
"id": 10,
"code_window": [
"\t\"k8s.io/kubernetes/pkg/proxy/winkernel\"\n",
")\n",
"\n",
"func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfiguration) {\n",
"\tif config.Mode == \"\" {\n",
"\t\tconfig.Mode = proxyconfigapi.ProxyModeKernelspace\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"// platformApplyDefaults is called after parsing command-line flags and/or reading the\n",
"// config file, to apply platform-specific default values to config.\n"
],
"file_path": "cmd/kube-proxy/app/server_windows.go",
"type": "add",
"edit_start_line_idx": 38
} | //go:build !windows
// +build !windows
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to configure and run a
// Kubernetes app process.
package app
import (
"context"
"errors"
"fmt"
goruntime "runtime"
"strings"
"time"
"github.com/google/cadvisor/machine"
"github.com/google/cadvisor/utils/sysfs"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/cache"
"k8s.io/apimachinery/pkg/fields"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
toolswatch "k8s.io/client-go/tools/watch"
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
"k8s.io/kubernetes/pkg/proxy"
proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config"
"k8s.io/kubernetes/pkg/proxy/iptables"
"k8s.io/kubernetes/pkg/proxy/ipvs"
utilipset "k8s.io/kubernetes/pkg/proxy/ipvs/ipset"
utilipvs "k8s.io/kubernetes/pkg/proxy/ipvs/util"
proxymetrics "k8s.io/kubernetes/pkg/proxy/metrics"
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
"k8s.io/utils/exec"
netutils "k8s.io/utils/net"
"k8s.io/klog/v2"
)
// timeoutForNodePodCIDR is the time to wait for allocators to assign a PodCIDR to the
// node after it is registered.
var timeoutForNodePodCIDR = 5 * time.Minute
func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfiguration) {
if config.Mode == "" {
klog.InfoS("Using iptables proxy")
config.Mode = proxyconfigapi.ProxyModeIPTables
}
if config.DetectLocalMode == "" {
klog.V(4).InfoS("Defaulting detect-local-mode", "localModeClusterCIDR", string(proxyconfigapi.LocalModeClusterCIDR))
config.DetectLocalMode = proxyconfigapi.LocalModeClusterCIDR
}
klog.V(2).InfoS("DetectLocalMode", "localMode", string(config.DetectLocalMode))
}
// createProxier creates the proxy.Provider
func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {
var proxier proxy.Provider
var err error
if config.DetectLocalMode == proxyconfigapi.LocalModeNodeCIDR {
klog.InfoS("Watching for node, awaiting podCIDR allocation", "hostname", s.Hostname)
node, err := waitForPodCIDR(s.Client, s.Hostname)
if err != nil {
return nil, err
}
s.podCIDRs = node.Spec.PodCIDRs
klog.InfoS("NodeInfo", "podCIDRs", node.Spec.PodCIDRs)
}
primaryProtocol := utiliptables.ProtocolIPv4
if s.PrimaryIPFamily == v1.IPv6Protocol {
primaryProtocol = utiliptables.ProtocolIPv6
}
execer := exec.New()
iptInterface := utiliptables.New(execer, primaryProtocol)
var ipt [2]utiliptables.Interface
dualStack := true // While we assume that node supports, we do further checks below
// Create iptables handlers for both families, one is already created
// Always ordered as IPv4, IPv6
if primaryProtocol == utiliptables.ProtocolIPv4 {
ipt[0] = iptInterface
ipt[1] = utiliptables.New(execer, utiliptables.ProtocolIPv6)
} else {
ipt[0] = utiliptables.New(execer, utiliptables.ProtocolIPv4)
ipt[1] = iptInterface
}
if !ipt[0].Present() {
return nil, fmt.Errorf("iptables is not supported for primary IP family %q", primaryProtocol)
} else if !ipt[1].Present() {
klog.InfoS("kube-proxy running in single-stack mode: secondary ipFamily is not supported", "ipFamily", ipt[1].Protocol())
dualStack = false
// Validate NodePortAddresses is single-stack
npaByFamily := proxyutil.MapCIDRsByIPFamily(config.NodePortAddresses)
secondaryFamily := proxyutil.OtherIPFamily(s.PrimaryIPFamily)
badAddrs := npaByFamily[secondaryFamily]
if len(badAddrs) > 0 {
klog.InfoS("Ignoring --nodeport-addresses of the wrong family", "ipFamily", secondaryFamily, "addresses", badAddrs)
}
}
if config.Mode == proxyconfigapi.ProxyModeIPTables {
klog.InfoS("Using iptables Proxier")
if dualStack {
klog.InfoS("kube-proxy running in dual-stack mode", "ipFamily", iptInterface.Protocol())
klog.InfoS("Creating dualStackProxier for iptables")
// Always ordered to match []ipt
var localDetectors [2]proxyutiliptables.LocalTrafficDetector
localDetectors, err = getDualStackLocalDetectorTuple(config.DetectLocalMode, config, ipt, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
// TODO this has side effects that should only happen when Run() is invoked.
proxier, err = iptables.NewDualStackProxier(
ipt,
utilsysctl.New(),
execer,
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.IPTables.MasqueradeAll,
*config.IPTables.LocalhostNodePorts,
int(*config.IPTables.MasqueradeBit),
localDetectors,
s.Hostname,
s.NodeIPs,
s.Recorder,
s.HealthzServer,
config.NodePortAddresses,
)
} else {
// Create a single-stack proxier if and only if the node does not support dual-stack (i.e, no iptables support).
var localDetector proxyutiliptables.LocalTrafficDetector
localDetector, err = getLocalDetector(config.DetectLocalMode, config, iptInterface, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
// TODO this has side effects that should only happen when Run() is invoked.
proxier, err = iptables.NewProxier(
s.PrimaryIPFamily,
iptInterface,
utilsysctl.New(),
execer,
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.IPTables.MasqueradeAll,
*config.IPTables.LocalhostNodePorts,
int(*config.IPTables.MasqueradeBit),
localDetector,
s.Hostname,
s.NodeIPs[s.PrimaryIPFamily],
s.Recorder,
s.HealthzServer,
config.NodePortAddresses,
)
}
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
} else if config.Mode == proxyconfigapi.ProxyModeIPVS {
kernelHandler := ipvs.NewLinuxKernelHandler()
ipsetInterface := utilipset.New(execer)
ipvsInterface := utilipvs.New()
if err := ipvs.CanUseIPVSProxier(ipvsInterface, ipsetInterface, config.IPVS.Scheduler); err != nil {
return nil, fmt.Errorf("can't use the IPVS proxier: %v", err)
}
klog.InfoS("Using ipvs Proxier")
if dualStack {
klog.InfoS("Creating dualStackProxier for ipvs")
// Always ordered to match []ipt
var localDetectors [2]proxyutiliptables.LocalTrafficDetector
localDetectors, err = getDualStackLocalDetectorTuple(config.DetectLocalMode, config, ipt, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
proxier, err = ipvs.NewDualStackProxier(
ipt,
ipvsInterface,
ipsetInterface,
utilsysctl.New(),
execer,
config.IPVS.SyncPeriod.Duration,
config.IPVS.MinSyncPeriod.Duration,
config.IPVS.ExcludeCIDRs,
config.IPVS.StrictARP,
config.IPVS.TCPTimeout.Duration,
config.IPVS.TCPFinTimeout.Duration,
config.IPVS.UDPTimeout.Duration,
config.IPTables.MasqueradeAll,
int(*config.IPTables.MasqueradeBit),
localDetectors,
s.Hostname,
s.NodeIPs,
s.Recorder,
s.HealthzServer,
config.IPVS.Scheduler,
config.NodePortAddresses,
kernelHandler,
)
} else {
var localDetector proxyutiliptables.LocalTrafficDetector
localDetector, err = getLocalDetector(config.DetectLocalMode, config, iptInterface, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
proxier, err = ipvs.NewProxier(
s.PrimaryIPFamily,
iptInterface,
ipvsInterface,
ipsetInterface,
utilsysctl.New(),
execer,
config.IPVS.SyncPeriod.Duration,
config.IPVS.MinSyncPeriod.Duration,
config.IPVS.ExcludeCIDRs,
config.IPVS.StrictARP,
config.IPVS.TCPTimeout.Duration,
config.IPVS.TCPFinTimeout.Duration,
config.IPVS.UDPTimeout.Duration,
config.IPTables.MasqueradeAll,
int(*config.IPTables.MasqueradeBit),
localDetector,
s.Hostname,
s.NodeIPs[s.PrimaryIPFamily],
s.Recorder,
s.HealthzServer,
config.IPVS.Scheduler,
config.NodePortAddresses,
kernelHandler,
)
}
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
}
return proxier, nil
}
func (s *ProxyServer) platformSetup() error {
ct := &realConntracker{}
max, err := getConntrackMax(s.Config.Conntrack)
if err != nil {
return err
}
if max > 0 {
err := ct.SetMax(max)
if err != nil {
if err != errReadOnlySysFS {
return err
}
// errReadOnlySysFS is caused by a known docker issue (https://github.com/docker/docker/issues/24000),
// the only remediation we know is to restart the docker daemon.
// Here we'll send an node event with specific reason and message, the
// administrator should decide whether and how to handle this issue,
// whether to drain the node and restart docker. Occurs in other container runtimes
// as well.
// TODO(random-liu): Remove this when the docker bug is fixed.
const message = "CRI error: /sys is read-only: " +
"cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000)"
s.Recorder.Eventf(s.NodeRef, nil, v1.EventTypeWarning, err.Error(), "StartKubeProxy", message)
}
}
if s.Config.Conntrack.TCPEstablishedTimeout != nil && s.Config.Conntrack.TCPEstablishedTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.TCPEstablishedTimeout.Duration / time.Second)
if err := ct.SetTCPEstablishedTimeout(timeout); err != nil {
return err
}
}
if s.Config.Conntrack.TCPCloseWaitTimeout != nil && s.Config.Conntrack.TCPCloseWaitTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.TCPCloseWaitTimeout.Duration / time.Second)
if err := ct.SetTCPCloseWaitTimeout(timeout); err != nil {
return err
}
}
proxymetrics.RegisterMetrics()
return nil
}
func getConntrackMax(config proxyconfigapi.KubeProxyConntrackConfiguration) (int, error) {
if config.MaxPerCore != nil && *config.MaxPerCore > 0 {
floor := 0
if config.Min != nil {
floor = int(*config.Min)
}
scaled := int(*config.MaxPerCore) * detectNumCPU()
if scaled > floor {
klog.V(3).InfoS("GetConntrackMax: using scaled conntrack-max-per-core")
return scaled, nil
}
klog.V(3).InfoS("GetConntrackMax: using conntrack-min")
return floor, nil
}
return 0, nil
}
func waitForPodCIDR(client clientset.Interface, nodeName string) (*v1.Node, error) {
// since allocators can assign the podCIDR after the node registers, we do a watch here to wait
// for podCIDR to be assigned, instead of assuming that the Get() on startup will have it.
ctx, cancelFunc := context.WithTimeout(context.TODO(), timeoutForNodePodCIDR)
defer cancelFunc()
fieldSelector := fields.OneTermEqualSelector("metadata.name", nodeName).String()
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) {
options.FieldSelector = fieldSelector
return client.CoreV1().Nodes().List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector
return client.CoreV1().Nodes().Watch(ctx, options)
},
}
condition := func(event watch.Event) (bool, error) {
// don't process delete events
if event.Type != watch.Modified && event.Type != watch.Added {
return false, nil
}
n, ok := event.Object.(*v1.Node)
if !ok {
return false, fmt.Errorf("event object not of type Node")
}
// don't consider the node if is going to be deleted and keep waiting
if !n.DeletionTimestamp.IsZero() {
return false, nil
}
return n.Spec.PodCIDR != "" && len(n.Spec.PodCIDRs) > 0, nil
}
evt, err := toolswatch.UntilWithSync(ctx, lw, &v1.Node{}, nil, condition)
if err != nil {
return nil, fmt.Errorf("timeout waiting for PodCIDR allocation to configure detect-local-mode %v: %v", proxyconfigapi.LocalModeNodeCIDR, err)
}
if n, ok := evt.Object.(*v1.Node); ok {
return n, nil
}
return nil, fmt.Errorf("event object not of type node")
}
func detectNumCPU() int {
// try get numCPU from /sys firstly due to a known issue (https://github.com/kubernetes/kubernetes/issues/99225)
_, numCPU, err := machine.GetTopology(sysfs.NewRealSysFs())
if err != nil || numCPU < 1 {
return goruntime.NumCPU()
}
return numCPU
}
func getLocalDetector(mode proxyconfigapi.LocalMode, config *proxyconfigapi.KubeProxyConfiguration, ipt utiliptables.Interface, nodePodCIDRs []string) (proxyutiliptables.LocalTrafficDetector, error) {
switch mode {
case proxyconfigapi.LocalModeClusterCIDR:
// LocalModeClusterCIDR is the default if --detect-local-mode wasn't passed,
// but --cluster-cidr is optional.
if len(strings.TrimSpace(config.ClusterCIDR)) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no cluster CIDR defined")
break
}
return proxyutiliptables.NewDetectLocalByCIDR(config.ClusterCIDR, ipt)
case proxyconfigapi.LocalModeNodeCIDR:
if len(nodePodCIDRs) == 0 {
klog.InfoS("Detect-local-mode set to NodeCIDR, but no PodCIDR defined at node")
break
}
return proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt)
case proxyconfigapi.LocalModeBridgeInterface:
return proxyutiliptables.NewDetectLocalByBridgeInterface(config.DetectLocal.BridgeInterface)
case proxyconfigapi.LocalModeInterfaceNamePrefix:
return proxyutiliptables.NewDetectLocalByInterfaceNamePrefix(config.DetectLocal.InterfaceNamePrefix)
}
klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode))
return proxyutiliptables.NewNoOpLocalDetector(), nil
}
func getDualStackLocalDetectorTuple(mode proxyconfigapi.LocalMode, config *proxyconfigapi.KubeProxyConfiguration, ipt [2]utiliptables.Interface, nodePodCIDRs []string) ([2]proxyutiliptables.LocalTrafficDetector, error) {
var err error
localDetectors := [2]proxyutiliptables.LocalTrafficDetector{proxyutiliptables.NewNoOpLocalDetector(), proxyutiliptables.NewNoOpLocalDetector()}
switch mode {
case proxyconfigapi.LocalModeClusterCIDR:
// LocalModeClusterCIDR is the default if --detect-local-mode wasn't passed,
// but --cluster-cidr is optional.
if len(strings.TrimSpace(config.ClusterCIDR)) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no cluster CIDR defined")
break
}
clusterCIDRs := cidrTuple(config.ClusterCIDR)
if len(strings.TrimSpace(clusterCIDRs[0])) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no IPv4 cluster CIDR defined, defaulting to no-op detect-local for IPv4")
} else {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(clusterCIDRs[0], ipt[0])
if err != nil { // don't loose the original error
return localDetectors, err
}
}
if len(strings.TrimSpace(clusterCIDRs[1])) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, defaulting to no-op detect-local for IPv6")
} else {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(clusterCIDRs[1], ipt[1])
}
return localDetectors, err
case proxyconfigapi.LocalModeNodeCIDR:
if len(nodePodCIDRs) == 0 {
klog.InfoS("No node info available to configure detect-local-mode NodeCIDR")
break
}
// localDetectors, like ipt, need to be of the order [IPv4, IPv6], but PodCIDRs is setup so that PodCIDRs[0] == PodCIDR.
// so have to handle the case where PodCIDR can be IPv6 and set that to localDetectors[1]
if netutils.IsIPv6CIDRString(nodePodCIDRs[0]) {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt[1])
if err != nil {
return localDetectors, err
}
if len(nodePodCIDRs) > 1 {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[1], ipt[0])
}
} else {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt[0])
if err != nil {
return localDetectors, err
}
if len(nodePodCIDRs) > 1 {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[1], ipt[1])
}
}
return localDetectors, err
case proxyconfigapi.LocalModeBridgeInterface, proxyconfigapi.LocalModeInterfaceNamePrefix:
localDetector, err := getLocalDetector(mode, config, ipt[0], nodePodCIDRs)
if err == nil {
localDetectors[0] = localDetector
localDetectors[1] = localDetector
}
return localDetectors, err
default:
klog.InfoS("Unknown detect-local-mode", "detectLocalMode", mode)
}
klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode))
return localDetectors, nil
}
// cidrTuple takes a comma separated list of CIDRs and return a tuple (ipv4cidr,ipv6cidr)
// The returned tuple is guaranteed to have the order (ipv4,ipv6) and if no cidr from a family is found an
// empty string "" is inserted.
func cidrTuple(cidrList string) [2]string {
cidrs := [2]string{"", ""}
foundIPv4 := false
foundIPv6 := false
for _, cidr := range strings.Split(cidrList, ",") {
if netutils.IsIPv6CIDRString(cidr) && !foundIPv6 {
cidrs[1] = cidr
foundIPv6 = true
} else if !foundIPv4 {
cidrs[0] = cidr
foundIPv4 = true
}
if foundIPv6 && foundIPv4 {
break
}
}
return cidrs
}
// cleanupAndExit remove iptables rules and ipset/ipvs rules
func cleanupAndExit() error {
execer := exec.New()
// cleanup IPv6 and IPv4 iptables rules, regardless of current configuration
ipts := []utiliptables.Interface{
utiliptables.New(execer, utiliptables.ProtocolIPv4),
utiliptables.New(execer, utiliptables.ProtocolIPv6),
}
ipsetInterface := utilipset.New(execer)
ipvsInterface := utilipvs.New()
var encounteredError bool
for _, ipt := range ipts {
encounteredError = iptables.CleanupLeftovers(ipt) || encounteredError
encounteredError = ipvs.CleanupLeftovers(ipvsInterface, ipt, ipsetInterface) || encounteredError
}
if encounteredError {
return errors.New("encountered an error while tearing down rules")
}
return nil
}
| cmd/kube-proxy/app/server_others.go | 1 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.9958927631378174,
0.05074182152748108,
0.0001676399406278506,
0.0002105818421114236,
0.18929095566272736
] |
{
"id": 10,
"code_window": [
"\t\"k8s.io/kubernetes/pkg/proxy/winkernel\"\n",
")\n",
"\n",
"func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfiguration) {\n",
"\tif config.Mode == \"\" {\n",
"\t\tconfig.Mode = proxyconfigapi.ProxyModeKernelspace\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"// platformApplyDefaults is called after parsing command-line flags and/or reading the\n",
"// config file, to apply platform-specific default values to config.\n"
],
"file_path": "cmd/kube-proxy/app/server_windows.go",
"type": "add",
"edit_start_line_idx": 38
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"k8s.io/utils/pointer"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
// SetDefaults_Deployment sets additional defaults compared to its counterpart
// in extensions. These addons are:
// - MaxUnavailable during rolling update set to 25% (1 in extensions)
// - MaxSurge value during rolling update set to 25% (1 in extensions)
// - RevisionHistoryLimit set to 10 (not set in extensions)
// - ProgressDeadlineSeconds set to 600s (not set in extensions)
func SetDefaults_Deployment(obj *appsv1.Deployment) {
// Set DeploymentSpec.Replicas to 1 if it is not set.
if obj.Spec.Replicas == nil {
obj.Spec.Replicas = new(int32)
*obj.Spec.Replicas = 1
}
strategy := &obj.Spec.Strategy
// Set default DeploymentStrategyType as RollingUpdate.
if strategy.Type == "" {
strategy.Type = appsv1.RollingUpdateDeploymentStrategyType
}
if strategy.Type == appsv1.RollingUpdateDeploymentStrategyType {
if strategy.RollingUpdate == nil {
rollingUpdate := appsv1.RollingUpdateDeployment{}
strategy.RollingUpdate = &rollingUpdate
}
if strategy.RollingUpdate.MaxUnavailable == nil {
// Set default MaxUnavailable as 25% by default.
maxUnavailable := intstr.FromString("25%")
strategy.RollingUpdate.MaxUnavailable = &maxUnavailable
}
if strategy.RollingUpdate.MaxSurge == nil {
// Set default MaxSurge as 25% by default.
maxSurge := intstr.FromString("25%")
strategy.RollingUpdate.MaxSurge = &maxSurge
}
}
if obj.Spec.RevisionHistoryLimit == nil {
obj.Spec.RevisionHistoryLimit = new(int32)
*obj.Spec.RevisionHistoryLimit = 10
}
if obj.Spec.ProgressDeadlineSeconds == nil {
obj.Spec.ProgressDeadlineSeconds = new(int32)
*obj.Spec.ProgressDeadlineSeconds = 600
}
}
func SetDefaults_DaemonSet(obj *appsv1.DaemonSet) {
updateStrategy := &obj.Spec.UpdateStrategy
if updateStrategy.Type == "" {
updateStrategy.Type = appsv1.RollingUpdateDaemonSetStrategyType
}
if updateStrategy.Type == appsv1.RollingUpdateDaemonSetStrategyType {
if updateStrategy.RollingUpdate == nil {
rollingUpdate := appsv1.RollingUpdateDaemonSet{}
updateStrategy.RollingUpdate = &rollingUpdate
}
if updateStrategy.RollingUpdate.MaxUnavailable == nil {
// Set default MaxUnavailable as 1 by default.
maxUnavailable := intstr.FromInt32(1)
updateStrategy.RollingUpdate.MaxUnavailable = &maxUnavailable
}
if updateStrategy.RollingUpdate.MaxSurge == nil {
// Set default MaxSurge as 0 by default.
maxSurge := intstr.FromInt32(0)
updateStrategy.RollingUpdate.MaxSurge = &maxSurge
}
}
if obj.Spec.RevisionHistoryLimit == nil {
obj.Spec.RevisionHistoryLimit = new(int32)
*obj.Spec.RevisionHistoryLimit = 10
}
}
func SetDefaults_StatefulSet(obj *appsv1.StatefulSet) {
if len(obj.Spec.PodManagementPolicy) == 0 {
obj.Spec.PodManagementPolicy = appsv1.OrderedReadyPodManagement
}
if obj.Spec.UpdateStrategy.Type == "" {
obj.Spec.UpdateStrategy.Type = appsv1.RollingUpdateStatefulSetStrategyType
if obj.Spec.UpdateStrategy.RollingUpdate == nil {
// UpdateStrategy.RollingUpdate will take default values below.
obj.Spec.UpdateStrategy.RollingUpdate = &appsv1.RollingUpdateStatefulSetStrategy{}
}
}
if obj.Spec.UpdateStrategy.Type == appsv1.RollingUpdateStatefulSetStrategyType &&
obj.Spec.UpdateStrategy.RollingUpdate != nil {
if obj.Spec.UpdateStrategy.RollingUpdate.Partition == nil {
obj.Spec.UpdateStrategy.RollingUpdate.Partition = pointer.Int32(0)
}
if utilfeature.DefaultFeatureGate.Enabled(features.MaxUnavailableStatefulSet) {
if obj.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable == nil {
maxUnavailable := intstr.FromInt32(1)
obj.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable = &maxUnavailable
}
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) {
if obj.Spec.PersistentVolumeClaimRetentionPolicy == nil {
obj.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy{}
}
if len(obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenDeleted) == 0 {
obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenDeleted = appsv1.RetainPersistentVolumeClaimRetentionPolicyType
}
if len(obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenScaled) == 0 {
obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenScaled = appsv1.RetainPersistentVolumeClaimRetentionPolicyType
}
}
if obj.Spec.Replicas == nil {
obj.Spec.Replicas = new(int32)
*obj.Spec.Replicas = 1
}
if obj.Spec.RevisionHistoryLimit == nil {
obj.Spec.RevisionHistoryLimit = new(int32)
*obj.Spec.RevisionHistoryLimit = 10
}
}
func SetDefaults_ReplicaSet(obj *appsv1.ReplicaSet) {
if obj.Spec.Replicas == nil {
obj.Spec.Replicas = new(int32)
*obj.Spec.Replicas = 1
}
}
| pkg/apis/apps/v1/defaults.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.03231443837285042,
0.0021931270603090525,
0.00016964675160124898,
0.0001726451446302235,
0.007777342572808266
] |
{
"id": 10,
"code_window": [
"\t\"k8s.io/kubernetes/pkg/proxy/winkernel\"\n",
")\n",
"\n",
"func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfiguration) {\n",
"\tif config.Mode == \"\" {\n",
"\t\tconfig.Mode = proxyconfigapi.ProxyModeKernelspace\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"// platformApplyDefaults is called after parsing command-line flags and/or reading the\n",
"// config file, to apply platform-specific default values to config.\n"
],
"file_path": "cmd/kube-proxy/app/server_windows.go",
"type": "add",
"edit_start_line_idx": 38
} | #!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copies any built binaries (and other generated files) out of the Docker build container.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
source "${KUBE_ROOT}/build/common.sh"
kube::build::verify_prereqs
kube::build::copy_output
| build/copy-output.sh | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00017217299318872392,
0.00016954663442447782,
0.00016563634562771767,
0.00017083057900890708,
0.0000028187841962790117
] |
{
"id": 10,
"code_window": [
"\t\"k8s.io/kubernetes/pkg/proxy/winkernel\"\n",
")\n",
"\n",
"func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfiguration) {\n",
"\tif config.Mode == \"\" {\n",
"\t\tconfig.Mode = proxyconfigapi.ProxyModeKernelspace\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"// platformApplyDefaults is called after parsing command-line flags and/or reading the\n",
"// config file, to apply platform-specific default values to config.\n"
],
"file_path": "cmd/kube-proxy/app/server_windows.go",
"type": "add",
"edit_start_line_idx": 38
} | /*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internalversion
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// SetListOptionsDefaults sets defaults on the provided ListOptions if applicable.
//
// TODO(#115478): once the watch-list fg is always on we register this function in the scheme (via AddTypeDefaultingFunc).
// TODO(#115478): when the function is registered in the scheme remove all callers of this method.
func SetListOptionsDefaults(obj *ListOptions, isWatchListFeatureEnabled bool) {
if !isWatchListFeatureEnabled {
return
}
if obj.SendInitialEvents != nil || len(obj.ResourceVersionMatch) != 0 {
return
}
legacy := obj.ResourceVersion == "" || obj.ResourceVersion == "0"
if obj.Watch && legacy {
turnOnInitialEvents := true
obj.SendInitialEvents = &turnOnInitialEvents
obj.ResourceVersionMatch = metav1.ResourceVersionMatchNotOlderThan
}
}
| staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.010054642334580421,
0.0026433460880070925,
0.00017155386740341783,
0.00017359390039928257,
0.004278914071619511
] |
{
"id": 11,
"code_window": [
"\tif config.Mode == \"\" {\n",
"\t\tconfig.Mode = proxyconfigapi.ProxyModeKernelspace\n",
"\t}\n",
"}\n",
"\n",
"// createProxier creates the proxy.Provider\n",
"func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {\n",
"\tvar healthzPort int\n",
"\tif len(config.HealthzBindAddress) > 0 {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// platformSetup is called after setting up the ProxyServer, but before creating the\n",
"// Proxier. It should fill in any platform-specific fields and perform other\n",
"// platform-specific setup.\n",
"func (s *ProxyServer) platformSetup() error {\n",
"\twinkernel.RegisterMetrics()\n",
"\treturn nil\n",
"}\n",
"\n"
],
"file_path": "cmd/kube-proxy/app/server_windows.go",
"type": "add",
"edit_start_line_idx": 44
} | //go:build windows
// +build windows
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to configure and run a
// Kubernetes app process.
package app
import (
"errors"
"fmt"
"net"
"strconv"
// Enable pprof HTTP handlers.
_ "net/http/pprof"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/proxy"
proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config"
"k8s.io/kubernetes/pkg/proxy/winkernel"
)
func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfiguration) {
if config.Mode == "" {
config.Mode = proxyconfigapi.ProxyModeKernelspace
}
}
// createProxier creates the proxy.Provider
func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {
var healthzPort int
if len(config.HealthzBindAddress) > 0 {
_, port, _ := net.SplitHostPort(config.HealthzBindAddress)
healthzPort, _ = strconv.Atoi(port)
}
// Check if Kernel Space can be used.
canUseWinKernelProxy, err := winkernel.CanUseWinKernelProxier(winkernel.WindowsKernelCompatTester{})
if !canUseWinKernelProxy && err != nil {
return nil, err
}
var proxier proxy.Provider
dualStackMode := getDualStackMode(config.Winkernel.NetworkName, winkernel.DualStackCompatTester{})
if dualStackMode {
klog.InfoS("Creating dualStackProxier for Windows kernel.")
proxier, err = winkernel.NewDualStackProxier(
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.ClusterCIDR,
s.Hostname,
s.NodeIPs,
s.Recorder,
s.HealthzServer,
config.Winkernel,
healthzPort,
)
} else {
proxier, err = winkernel.NewProxier(
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.ClusterCIDR,
s.Hostname,
s.NodeIPs[s.PrimaryIPFamily],
s.Recorder,
s.HealthzServer,
config.Winkernel,
healthzPort,
)
}
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
return proxier, nil
}
func (s *ProxyServer) platformSetup() error {
winkernel.RegisterMetrics()
return nil
}
func getDualStackMode(networkname string, compatTester winkernel.StackCompatTester) bool {
return compatTester.DualStackCompatible(networkname)
}
// cleanupAndExit cleans up after a previous proxy run
func cleanupAndExit() error {
return errors.New("--cleanup-and-exit is not implemented on Windows")
}
| cmd/kube-proxy/app/server_windows.go | 1 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.997474730014801,
0.31179484724998474,
0.00016495845920871943,
0.04970964416861534,
0.42761287093162537
] |
{
"id": 11,
"code_window": [
"\tif config.Mode == \"\" {\n",
"\t\tconfig.Mode = proxyconfigapi.ProxyModeKernelspace\n",
"\t}\n",
"}\n",
"\n",
"// createProxier creates the proxy.Provider\n",
"func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {\n",
"\tvar healthzPort int\n",
"\tif len(config.HealthzBindAddress) > 0 {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// platformSetup is called after setting up the ProxyServer, but before creating the\n",
"// Proxier. It should fill in any platform-specific fields and perform other\n",
"// platform-specific setup.\n",
"func (s *ProxyServer) platformSetup() error {\n",
"\twinkernel.RegisterMetrics()\n",
"\treturn nil\n",
"}\n",
"\n"
],
"file_path": "cmd/kube-proxy/app/server_windows.go",
"type": "add",
"edit_start_line_idx": 44
} | #!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script checks restricted packages are imported or not and outputs the
# result. Target directory's path and allowed packages against checking are
# listed in `staging/publishing/import-restrictions.yaml`.
# Usage: `hack/verify-imports.sh`.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
make -C "${KUBE_ROOT}" WHAT=cmd/importverifier
# Find binary
importverifier=$(kube::util::find-binary "importverifier")
if [[ ! -x "$importverifier" ]]; then
{
echo "It looks as if you don't have a compiled importverifier binary"
echo
echo "If you are running from a clone of the git repo, please run"
echo "'make WHAT=cmd/importverifier'."
} >&2
exit 1
fi
"${importverifier}" "k8s.io/" "${KUBE_ROOT}/staging/publishing/import-restrictions.yaml"
| hack/verify-imports.sh | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00017708149971440434,
0.0001725648617139086,
0.0001639926922507584,
0.0001741159794619307,
0.000004571989393298281
] |
{
"id": 11,
"code_window": [
"\tif config.Mode == \"\" {\n",
"\t\tconfig.Mode = proxyconfigapi.ProxyModeKernelspace\n",
"\t}\n",
"}\n",
"\n",
"// createProxier creates the proxy.Provider\n",
"func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {\n",
"\tvar healthzPort int\n",
"\tif len(config.HealthzBindAddress) > 0 {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// platformSetup is called after setting up the ProxyServer, but before creating the\n",
"// Proxier. It should fill in any platform-specific fields and perform other\n",
"// platform-specific setup.\n",
"func (s *ProxyServer) platformSetup() error {\n",
"\twinkernel.RegisterMetrics()\n",
"\treturn nil\n",
"}\n",
"\n"
],
"file_path": "cmd/kube-proxy/app/server_windows.go",
"type": "add",
"edit_start_line_idx": 44
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package applyconfiguration
import (
schema "k8s.io/apimachinery/pkg/runtime/schema"
v1 "k8s.io/code-generator/examples/MixedCase/apis/example/v1"
examplev1 "k8s.io/code-generator/examples/MixedCase/applyconfiguration/example/v1"
)
// ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no
// apply configuration type exists for the given GroupVersionKind.
func ForKind(kind schema.GroupVersionKind) interface{} {
switch kind {
// Group=example.crd.code-generator.k8s.io, Version=v1
case v1.SchemeGroupVersion.WithKind("ClusterTestType"):
return &examplev1.ClusterTestTypeApplyConfiguration{}
case v1.SchemeGroupVersion.WithKind("TestType"):
return &examplev1.TestTypeApplyConfiguration{}
}
return nil
}
| staging/src/k8s.io/code-generator/examples/MixedCase/applyconfiguration/utils.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00017809326527640224,
0.00017305821529589593,
0.00016543667879886925,
0.0001743514440022409,
0.000004669820100389188
] |
{
"id": 11,
"code_window": [
"\tif config.Mode == \"\" {\n",
"\t\tconfig.Mode = proxyconfigapi.ProxyModeKernelspace\n",
"\t}\n",
"}\n",
"\n",
"// createProxier creates the proxy.Provider\n",
"func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {\n",
"\tvar healthzPort int\n",
"\tif len(config.HealthzBindAddress) > 0 {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// platformSetup is called after setting up the ProxyServer, but before creating the\n",
"// Proxier. It should fill in any platform-specific fields and perform other\n",
"// platform-specific setup.\n",
"func (s *ProxyServer) platformSetup() error {\n",
"\twinkernel.RegisterMetrics()\n",
"\treturn nil\n",
"}\n",
"\n"
],
"file_path": "cmd/kube-proxy/app/server_windows.go",
"type": "add",
"edit_start_line_idx": 44
} | apiVersion: v1
kind: Pod
metadata:
name: restrictedvolumes13
spec:
containers:
- image: registry.k8s.io/pause
name: container1
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
initContainers:
- image: registry.k8s.io/pause
name: initcontainer1
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
volumes:
- name: volume1
vsphereVolume:
volumePath: test
| staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.25/fail/restrictedvolumes13.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00017864786786958575,
0.00017708928498905152,
0.00017455898341722786,
0.00017806098912842572,
0.0000018051607639790745
] |
{
"id": 12,
"code_window": [
"\n",
"\treturn proxier, nil\n",
"}\n",
"\n",
"func (s *ProxyServer) platformSetup() error {\n",
"\twinkernel.RegisterMetrics()\n",
"\treturn nil\n",
"}\n",
"\n",
"func getDualStackMode(networkname string, compatTester winkernel.StackCompatTester) bool {\n",
"\treturn compatTester.DualStackCompatible(networkname)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/server_windows.go",
"type": "replace",
"edit_start_line_idx": 95
} | //go:build !windows
// +build !windows
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to configure and run a
// Kubernetes app process.
package app
import (
"context"
"errors"
"fmt"
goruntime "runtime"
"strings"
"time"
"github.com/google/cadvisor/machine"
"github.com/google/cadvisor/utils/sysfs"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/cache"
"k8s.io/apimachinery/pkg/fields"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
toolswatch "k8s.io/client-go/tools/watch"
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
"k8s.io/kubernetes/pkg/proxy"
proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config"
"k8s.io/kubernetes/pkg/proxy/iptables"
"k8s.io/kubernetes/pkg/proxy/ipvs"
utilipset "k8s.io/kubernetes/pkg/proxy/ipvs/ipset"
utilipvs "k8s.io/kubernetes/pkg/proxy/ipvs/util"
proxymetrics "k8s.io/kubernetes/pkg/proxy/metrics"
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
"k8s.io/utils/exec"
netutils "k8s.io/utils/net"
"k8s.io/klog/v2"
)
// timeoutForNodePodCIDR is the time to wait for allocators to assign a PodCIDR to the
// node after it is registered.
var timeoutForNodePodCIDR = 5 * time.Minute
func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfiguration) {
if config.Mode == "" {
klog.InfoS("Using iptables proxy")
config.Mode = proxyconfigapi.ProxyModeIPTables
}
if config.DetectLocalMode == "" {
klog.V(4).InfoS("Defaulting detect-local-mode", "localModeClusterCIDR", string(proxyconfigapi.LocalModeClusterCIDR))
config.DetectLocalMode = proxyconfigapi.LocalModeClusterCIDR
}
klog.V(2).InfoS("DetectLocalMode", "localMode", string(config.DetectLocalMode))
}
// createProxier creates the proxy.Provider
func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration) (proxy.Provider, error) {
var proxier proxy.Provider
var err error
if config.DetectLocalMode == proxyconfigapi.LocalModeNodeCIDR {
klog.InfoS("Watching for node, awaiting podCIDR allocation", "hostname", s.Hostname)
node, err := waitForPodCIDR(s.Client, s.Hostname)
if err != nil {
return nil, err
}
s.podCIDRs = node.Spec.PodCIDRs
klog.InfoS("NodeInfo", "podCIDRs", node.Spec.PodCIDRs)
}
primaryProtocol := utiliptables.ProtocolIPv4
if s.PrimaryIPFamily == v1.IPv6Protocol {
primaryProtocol = utiliptables.ProtocolIPv6
}
execer := exec.New()
iptInterface := utiliptables.New(execer, primaryProtocol)
var ipt [2]utiliptables.Interface
dualStack := true // While we assume that node supports, we do further checks below
// Create iptables handlers for both families, one is already created
// Always ordered as IPv4, IPv6
if primaryProtocol == utiliptables.ProtocolIPv4 {
ipt[0] = iptInterface
ipt[1] = utiliptables.New(execer, utiliptables.ProtocolIPv6)
} else {
ipt[0] = utiliptables.New(execer, utiliptables.ProtocolIPv4)
ipt[1] = iptInterface
}
if !ipt[0].Present() {
return nil, fmt.Errorf("iptables is not supported for primary IP family %q", primaryProtocol)
} else if !ipt[1].Present() {
klog.InfoS("kube-proxy running in single-stack mode: secondary ipFamily is not supported", "ipFamily", ipt[1].Protocol())
dualStack = false
// Validate NodePortAddresses is single-stack
npaByFamily := proxyutil.MapCIDRsByIPFamily(config.NodePortAddresses)
secondaryFamily := proxyutil.OtherIPFamily(s.PrimaryIPFamily)
badAddrs := npaByFamily[secondaryFamily]
if len(badAddrs) > 0 {
klog.InfoS("Ignoring --nodeport-addresses of the wrong family", "ipFamily", secondaryFamily, "addresses", badAddrs)
}
}
if config.Mode == proxyconfigapi.ProxyModeIPTables {
klog.InfoS("Using iptables Proxier")
if dualStack {
klog.InfoS("kube-proxy running in dual-stack mode", "ipFamily", iptInterface.Protocol())
klog.InfoS("Creating dualStackProxier for iptables")
// Always ordered to match []ipt
var localDetectors [2]proxyutiliptables.LocalTrafficDetector
localDetectors, err = getDualStackLocalDetectorTuple(config.DetectLocalMode, config, ipt, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
// TODO this has side effects that should only happen when Run() is invoked.
proxier, err = iptables.NewDualStackProxier(
ipt,
utilsysctl.New(),
execer,
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.IPTables.MasqueradeAll,
*config.IPTables.LocalhostNodePorts,
int(*config.IPTables.MasqueradeBit),
localDetectors,
s.Hostname,
s.NodeIPs,
s.Recorder,
s.HealthzServer,
config.NodePortAddresses,
)
} else {
// Create a single-stack proxier if and only if the node does not support dual-stack (i.e, no iptables support).
var localDetector proxyutiliptables.LocalTrafficDetector
localDetector, err = getLocalDetector(config.DetectLocalMode, config, iptInterface, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
// TODO this has side effects that should only happen when Run() is invoked.
proxier, err = iptables.NewProxier(
s.PrimaryIPFamily,
iptInterface,
utilsysctl.New(),
execer,
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.IPTables.MasqueradeAll,
*config.IPTables.LocalhostNodePorts,
int(*config.IPTables.MasqueradeBit),
localDetector,
s.Hostname,
s.NodeIPs[s.PrimaryIPFamily],
s.Recorder,
s.HealthzServer,
config.NodePortAddresses,
)
}
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
} else if config.Mode == proxyconfigapi.ProxyModeIPVS {
kernelHandler := ipvs.NewLinuxKernelHandler()
ipsetInterface := utilipset.New(execer)
ipvsInterface := utilipvs.New()
if err := ipvs.CanUseIPVSProxier(ipvsInterface, ipsetInterface, config.IPVS.Scheduler); err != nil {
return nil, fmt.Errorf("can't use the IPVS proxier: %v", err)
}
klog.InfoS("Using ipvs Proxier")
if dualStack {
klog.InfoS("Creating dualStackProxier for ipvs")
// Always ordered to match []ipt
var localDetectors [2]proxyutiliptables.LocalTrafficDetector
localDetectors, err = getDualStackLocalDetectorTuple(config.DetectLocalMode, config, ipt, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
proxier, err = ipvs.NewDualStackProxier(
ipt,
ipvsInterface,
ipsetInterface,
utilsysctl.New(),
execer,
config.IPVS.SyncPeriod.Duration,
config.IPVS.MinSyncPeriod.Duration,
config.IPVS.ExcludeCIDRs,
config.IPVS.StrictARP,
config.IPVS.TCPTimeout.Duration,
config.IPVS.TCPFinTimeout.Duration,
config.IPVS.UDPTimeout.Duration,
config.IPTables.MasqueradeAll,
int(*config.IPTables.MasqueradeBit),
localDetectors,
s.Hostname,
s.NodeIPs,
s.Recorder,
s.HealthzServer,
config.IPVS.Scheduler,
config.NodePortAddresses,
kernelHandler,
)
} else {
var localDetector proxyutiliptables.LocalTrafficDetector
localDetector, err = getLocalDetector(config.DetectLocalMode, config, iptInterface, s.podCIDRs)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
proxier, err = ipvs.NewProxier(
s.PrimaryIPFamily,
iptInterface,
ipvsInterface,
ipsetInterface,
utilsysctl.New(),
execer,
config.IPVS.SyncPeriod.Duration,
config.IPVS.MinSyncPeriod.Duration,
config.IPVS.ExcludeCIDRs,
config.IPVS.StrictARP,
config.IPVS.TCPTimeout.Duration,
config.IPVS.TCPFinTimeout.Duration,
config.IPVS.UDPTimeout.Duration,
config.IPTables.MasqueradeAll,
int(*config.IPTables.MasqueradeBit),
localDetector,
s.Hostname,
s.NodeIPs[s.PrimaryIPFamily],
s.Recorder,
s.HealthzServer,
config.IPVS.Scheduler,
config.NodePortAddresses,
kernelHandler,
)
}
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
}
return proxier, nil
}
func (s *ProxyServer) platformSetup() error {
ct := &realConntracker{}
max, err := getConntrackMax(s.Config.Conntrack)
if err != nil {
return err
}
if max > 0 {
err := ct.SetMax(max)
if err != nil {
if err != errReadOnlySysFS {
return err
}
// errReadOnlySysFS is caused by a known docker issue (https://github.com/docker/docker/issues/24000),
// the only remediation we know is to restart the docker daemon.
// Here we'll send an node event with specific reason and message, the
// administrator should decide whether and how to handle this issue,
// whether to drain the node and restart docker. Occurs in other container runtimes
// as well.
// TODO(random-liu): Remove this when the docker bug is fixed.
const message = "CRI error: /sys is read-only: " +
"cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000)"
s.Recorder.Eventf(s.NodeRef, nil, v1.EventTypeWarning, err.Error(), "StartKubeProxy", message)
}
}
if s.Config.Conntrack.TCPEstablishedTimeout != nil && s.Config.Conntrack.TCPEstablishedTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.TCPEstablishedTimeout.Duration / time.Second)
if err := ct.SetTCPEstablishedTimeout(timeout); err != nil {
return err
}
}
if s.Config.Conntrack.TCPCloseWaitTimeout != nil && s.Config.Conntrack.TCPCloseWaitTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.TCPCloseWaitTimeout.Duration / time.Second)
if err := ct.SetTCPCloseWaitTimeout(timeout); err != nil {
return err
}
}
proxymetrics.RegisterMetrics()
return nil
}
func getConntrackMax(config proxyconfigapi.KubeProxyConntrackConfiguration) (int, error) {
if config.MaxPerCore != nil && *config.MaxPerCore > 0 {
floor := 0
if config.Min != nil {
floor = int(*config.Min)
}
scaled := int(*config.MaxPerCore) * detectNumCPU()
if scaled > floor {
klog.V(3).InfoS("GetConntrackMax: using scaled conntrack-max-per-core")
return scaled, nil
}
klog.V(3).InfoS("GetConntrackMax: using conntrack-min")
return floor, nil
}
return 0, nil
}
func waitForPodCIDR(client clientset.Interface, nodeName string) (*v1.Node, error) {
// since allocators can assign the podCIDR after the node registers, we do a watch here to wait
// for podCIDR to be assigned, instead of assuming that the Get() on startup will have it.
ctx, cancelFunc := context.WithTimeout(context.TODO(), timeoutForNodePodCIDR)
defer cancelFunc()
fieldSelector := fields.OneTermEqualSelector("metadata.name", nodeName).String()
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) {
options.FieldSelector = fieldSelector
return client.CoreV1().Nodes().List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector
return client.CoreV1().Nodes().Watch(ctx, options)
},
}
condition := func(event watch.Event) (bool, error) {
// don't process delete events
if event.Type != watch.Modified && event.Type != watch.Added {
return false, nil
}
n, ok := event.Object.(*v1.Node)
if !ok {
return false, fmt.Errorf("event object not of type Node")
}
// don't consider the node if is going to be deleted and keep waiting
if !n.DeletionTimestamp.IsZero() {
return false, nil
}
return n.Spec.PodCIDR != "" && len(n.Spec.PodCIDRs) > 0, nil
}
evt, err := toolswatch.UntilWithSync(ctx, lw, &v1.Node{}, nil, condition)
if err != nil {
return nil, fmt.Errorf("timeout waiting for PodCIDR allocation to configure detect-local-mode %v: %v", proxyconfigapi.LocalModeNodeCIDR, err)
}
if n, ok := evt.Object.(*v1.Node); ok {
return n, nil
}
return nil, fmt.Errorf("event object not of type node")
}
func detectNumCPU() int {
// try get numCPU from /sys firstly due to a known issue (https://github.com/kubernetes/kubernetes/issues/99225)
_, numCPU, err := machine.GetTopology(sysfs.NewRealSysFs())
if err != nil || numCPU < 1 {
return goruntime.NumCPU()
}
return numCPU
}
func getLocalDetector(mode proxyconfigapi.LocalMode, config *proxyconfigapi.KubeProxyConfiguration, ipt utiliptables.Interface, nodePodCIDRs []string) (proxyutiliptables.LocalTrafficDetector, error) {
switch mode {
case proxyconfigapi.LocalModeClusterCIDR:
// LocalModeClusterCIDR is the default if --detect-local-mode wasn't passed,
// but --cluster-cidr is optional.
if len(strings.TrimSpace(config.ClusterCIDR)) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no cluster CIDR defined")
break
}
return proxyutiliptables.NewDetectLocalByCIDR(config.ClusterCIDR, ipt)
case proxyconfigapi.LocalModeNodeCIDR:
if len(nodePodCIDRs) == 0 {
klog.InfoS("Detect-local-mode set to NodeCIDR, but no PodCIDR defined at node")
break
}
return proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt)
case proxyconfigapi.LocalModeBridgeInterface:
return proxyutiliptables.NewDetectLocalByBridgeInterface(config.DetectLocal.BridgeInterface)
case proxyconfigapi.LocalModeInterfaceNamePrefix:
return proxyutiliptables.NewDetectLocalByInterfaceNamePrefix(config.DetectLocal.InterfaceNamePrefix)
}
klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode))
return proxyutiliptables.NewNoOpLocalDetector(), nil
}
func getDualStackLocalDetectorTuple(mode proxyconfigapi.LocalMode, config *proxyconfigapi.KubeProxyConfiguration, ipt [2]utiliptables.Interface, nodePodCIDRs []string) ([2]proxyutiliptables.LocalTrafficDetector, error) {
var err error
localDetectors := [2]proxyutiliptables.LocalTrafficDetector{proxyutiliptables.NewNoOpLocalDetector(), proxyutiliptables.NewNoOpLocalDetector()}
switch mode {
case proxyconfigapi.LocalModeClusterCIDR:
// LocalModeClusterCIDR is the default if --detect-local-mode wasn't passed,
// but --cluster-cidr is optional.
if len(strings.TrimSpace(config.ClusterCIDR)) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no cluster CIDR defined")
break
}
clusterCIDRs := cidrTuple(config.ClusterCIDR)
if len(strings.TrimSpace(clusterCIDRs[0])) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no IPv4 cluster CIDR defined, defaulting to no-op detect-local for IPv4")
} else {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(clusterCIDRs[0], ipt[0])
if err != nil { // don't loose the original error
return localDetectors, err
}
}
if len(strings.TrimSpace(clusterCIDRs[1])) == 0 {
klog.InfoS("Detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, defaulting to no-op detect-local for IPv6")
} else {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(clusterCIDRs[1], ipt[1])
}
return localDetectors, err
case proxyconfigapi.LocalModeNodeCIDR:
if len(nodePodCIDRs) == 0 {
klog.InfoS("No node info available to configure detect-local-mode NodeCIDR")
break
}
// localDetectors, like ipt, need to be of the order [IPv4, IPv6], but PodCIDRs is setup so that PodCIDRs[0] == PodCIDR.
// so have to handle the case where PodCIDR can be IPv6 and set that to localDetectors[1]
if netutils.IsIPv6CIDRString(nodePodCIDRs[0]) {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt[1])
if err != nil {
return localDetectors, err
}
if len(nodePodCIDRs) > 1 {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[1], ipt[0])
}
} else {
localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[0], ipt[0])
if err != nil {
return localDetectors, err
}
if len(nodePodCIDRs) > 1 {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(nodePodCIDRs[1], ipt[1])
}
}
return localDetectors, err
case proxyconfigapi.LocalModeBridgeInterface, proxyconfigapi.LocalModeInterfaceNamePrefix:
localDetector, err := getLocalDetector(mode, config, ipt[0], nodePodCIDRs)
if err == nil {
localDetectors[0] = localDetector
localDetectors[1] = localDetector
}
return localDetectors, err
default:
klog.InfoS("Unknown detect-local-mode", "detectLocalMode", mode)
}
klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode))
return localDetectors, nil
}
// cidrTuple takes a comma separated list of CIDRs and return a tuple (ipv4cidr,ipv6cidr)
// The returned tuple is guaranteed to have the order (ipv4,ipv6) and if no cidr from a family is found an
// empty string "" is inserted.
func cidrTuple(cidrList string) [2]string {
cidrs := [2]string{"", ""}
foundIPv4 := false
foundIPv6 := false
for _, cidr := range strings.Split(cidrList, ",") {
if netutils.IsIPv6CIDRString(cidr) && !foundIPv6 {
cidrs[1] = cidr
foundIPv6 = true
} else if !foundIPv4 {
cidrs[0] = cidr
foundIPv4 = true
}
if foundIPv6 && foundIPv4 {
break
}
}
return cidrs
}
// cleanupAndExit remove iptables rules and ipset/ipvs rules
func cleanupAndExit() error {
execer := exec.New()
// cleanup IPv6 and IPv4 iptables rules, regardless of current configuration
ipts := []utiliptables.Interface{
utiliptables.New(execer, utiliptables.ProtocolIPv4),
utiliptables.New(execer, utiliptables.ProtocolIPv6),
}
ipsetInterface := utilipset.New(execer)
ipvsInterface := utilipvs.New()
var encounteredError bool
for _, ipt := range ipts {
encounteredError = iptables.CleanupLeftovers(ipt) || encounteredError
encounteredError = ipvs.CleanupLeftovers(ipvsInterface, ipt, ipsetInterface) || encounteredError
}
if encounteredError {
return errors.New("encountered an error while tearing down rules")
}
return nil
}
| cmd/kube-proxy/app/server_others.go | 1 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.9968596696853638,
0.06644900143146515,
0.00016642014088574797,
0.00017312912677880377,
0.22643399238586426
] |
{
"id": 12,
"code_window": [
"\n",
"\treturn proxier, nil\n",
"}\n",
"\n",
"func (s *ProxyServer) platformSetup() error {\n",
"\twinkernel.RegisterMetrics()\n",
"\treturn nil\n",
"}\n",
"\n",
"func getDualStackMode(networkname string, compatTester winkernel.StackCompatTester) bool {\n",
"\treturn compatTester.DualStackCompatible(networkname)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/server_windows.go",
"type": "replace",
"edit_start_line_idx": 95
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package scheme
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
examplegroupv1 "k8s.io/code-generator/examples/HyphenGroup/apis/example/v1"
)
var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
examplegroupv1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(Scheme))
}
| staging/src/k8s.io/code-generator/examples/HyphenGroup/clientset/versioned/scheme/register.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00017840054351836443,
0.0001722271990729496,
0.00016576795314904302,
0.0001720604777801782,
0.000004867646566708572
] |
{
"id": 12,
"code_window": [
"\n",
"\treturn proxier, nil\n",
"}\n",
"\n",
"func (s *ProxyServer) platformSetup() error {\n",
"\twinkernel.RegisterMetrics()\n",
"\treturn nil\n",
"}\n",
"\n",
"func getDualStackMode(networkname string, compatTester winkernel.StackCompatTester) bool {\n",
"\treturn compatTester.DualStackCompatible(networkname)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/server_windows.go",
"type": "replace",
"edit_start_line_idx": 95
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import v1 "k8s.io/api/core/v1"
// FakePodDeletionSafetyProvider is a fake PodDeletionSafetyProvider for test.
type FakePodDeletionSafetyProvider struct {
HasRunning bool
}
func (f *FakePodDeletionSafetyProvider) PodCouldHaveRunningContainers(pod *v1.Pod) bool {
return f.HasRunning
}
| pkg/kubelet/status/testing/fake_pod_deletion_safety.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.0005172243691049516,
0.00029075503698550165,
0.00017730148101691157,
0.00017773920262698084,
0.00016013810818549246
] |
{
"id": 12,
"code_window": [
"\n",
"\treturn proxier, nil\n",
"}\n",
"\n",
"func (s *ProxyServer) platformSetup() error {\n",
"\twinkernel.RegisterMetrics()\n",
"\treturn nil\n",
"}\n",
"\n",
"func getDualStackMode(networkname string, compatTester winkernel.StackCompatTester) bool {\n",
"\treturn compatTester.DualStackCompatible(networkname)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/server_windows.go",
"type": "replace",
"edit_start_line_idx": 95
} | /*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"io"
"os"
"path/filepath"
"testing"
"time"
)
func TestProcessEncryptionConfig(t *testing.T) {
testCases := []struct {
name string
filePath string
expectError bool
}{
{
name: "empty config file",
filePath: "testdata/empty_config.yaml",
expectError: true,
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
ctx := context.Background()
d := NewDynamicEncryptionConfiguration(
testCase.name,
testCase.filePath,
nil,
"",
)
_, _, err := d.processEncryptionConfig(ctx)
if testCase.expectError && err == nil {
t.Fatalf("expected error but got none")
}
if !testCase.expectError && err != nil {
t.Fatalf("expected no error but got %v", err)
}
})
}
}
func TestWatchEncryptionConfigFile(t *testing.T) {
testCases := []struct {
name string
generateEvent func(filePath string, cancel context.CancelFunc)
expectError bool
}{
{
name: "file not renamed or removed",
expectError: false,
generateEvent: func(filePath string, cancel context.CancelFunc) {
os.Chtimes(filePath, time.Now(), time.Now())
// wait for the event to be handled
time.Sleep(1 * time.Second)
cancel()
os.Remove(filePath)
},
},
{
name: "file renamed",
expectError: true,
generateEvent: func(filePath string, cancel context.CancelFunc) {
os.Rename(filePath, filePath+"1")
// wait for the event to be handled
time.Sleep(1 * time.Second)
os.Remove(filePath + "1")
},
},
{
name: "file removed",
expectError: true,
generateEvent: func(filePath string, cancel context.CancelFunc) {
// allow watcher handle to start
time.Sleep(1 * time.Second)
os.Remove(filePath)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
testFilePath := copyFileForTest(t, "testdata/ec_config.yaml")
d := NewDynamicEncryptionConfiguration(
testCase.name,
testFilePath,
nil,
"",
)
errs := make(chan error, 1)
go func() {
err := d.watchEncryptionConfigFile(ctx)
errs <- err
}()
testCase.generateEvent(d.filePath, cancel)
err := <-errs
if testCase.expectError && err == nil {
t.Fatalf("expected error but got none")
}
if !testCase.expectError && err != nil {
t.Fatalf("expected no error but got %v", err)
}
})
}
}
func copyFileForTest(t *testing.T, srcFilePath string) string {
t.Helper()
// get directory from source file path
srcDir := filepath.Dir(srcFilePath)
// get file name from source file path
srcFileName := filepath.Base(srcFilePath)
// set new file path
dstFilePath := filepath.Join(srcDir, "test_"+srcFileName)
// copy src file to dst file
r, err := os.Open(srcFilePath)
if err != nil {
t.Fatalf("failed to open source file: %v", err)
}
defer r.Close()
w, err := os.Create(dstFilePath)
if err != nil {
t.Fatalf("failed to create destination file: %v", err)
}
defer w.Close()
// copy the file
_, err = io.Copy(w, r)
if err != nil {
t.Fatalf("failed to copy file: %v", err)
}
err = w.Close()
if err != nil {
t.Fatalf("failed to close destination file: %v", err)
}
return dstFilePath
}
| staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/8abfa89e82b242abae47575c8ef6c15a56b516b8 | [
0.00022620134404860437,
0.00017748045502230525,
0.00016843932098709047,
0.000172039755852893,
0.000014182195627654437
] |
{
"id": 1,
"code_window": [
"func (p throttledDockerPuller) IsImagePresent(name string) (bool, error) {\n",
"\treturn p.puller.IsImagePresent(name)\n",
"}\n",
"\n",
"// TODO (random-liu) Almost never used, should we remove this?\n",
"// DockerContainers is a map of containers\n",
"type DockerContainers map[kubetypes.DockerID]*docker.APIContainers\n",
"\n",
"func (c DockerContainers) FindPodContainer(podFullName string, uid types.UID, containerName string) (*docker.APIContainers, bool, uint64) {\n",
"\tfor _, dockerContainer := range c {\n",
"\t\tif len(dockerContainer.Names) == 0 {\n",
"\t\t\tcontinue\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// An internal helper function.\n",
"func findPodContainer(dockerContainers []*docker.APIContainers, podFullName string, uid types.UID, containerName string) (*docker.APIContainers, bool, uint64) {\n",
"\tfor _, dockerContainer := range dockerContainers {\n"
],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 213
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"fmt"
"math/rand"
"net/http"
"path"
"strconv"
"strings"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/parsers"
docker "github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/credentialprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/leaky"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
)
const (
PodInfraContainerName = leaky.PodInfraContainerName
DockerPrefix = "docker://"
PodInfraContainerImage = "beta.gcr.io/google_containers/pause:2.0"
LogSuffix = "log"
)
const (
// Taken from lmctfy https://github.com/google/lmctfy/blob/master/lmctfy/controllers/cpu_controller.cc
minShares = 2
sharesPerCPU = 1024
milliCPUToCPU = 1000
// 100000 is equivalent to 100ms
quotaPeriod = 100000
)
// DockerInterface is an abstract interface for testability. It abstracts the interface of docker.Client.
type DockerInterface interface {
ListContainers(options docker.ListContainersOptions) ([]docker.APIContainers, error)
InspectContainer(id string) (*docker.Container, error)
CreateContainer(docker.CreateContainerOptions) (*docker.Container, error)
StartContainer(id string, hostConfig *docker.HostConfig) error
StopContainer(id string, timeout uint) error
RemoveContainer(opts docker.RemoveContainerOptions) error
InspectImage(image string) (*docker.Image, error)
ListImages(opts docker.ListImagesOptions) ([]docker.APIImages, error)
PullImage(opts docker.PullImageOptions, auth docker.AuthConfiguration) error
RemoveImage(image string) error
Logs(opts docker.LogsOptions) error
Version() (*docker.Env, error)
Info() (*docker.Env, error)
CreateExec(docker.CreateExecOptions) (*docker.Exec, error)
StartExec(string, docker.StartExecOptions) error
InspectExec(id string) (*docker.ExecInspect, error)
AttachToContainer(opts docker.AttachToContainerOptions) error
}
// KubeletContainerName encapsulates a pod name and a Kubernetes container name.
type KubeletContainerName struct {
PodFullName string
PodUID types.UID
ContainerName string
}
// DockerPuller is an abstract interface for testability. It abstracts image pull operations.
type DockerPuller interface {
Pull(image string, secrets []api.Secret) error
IsImagePresent(image string) (bool, error)
}
// dockerPuller is the default implementation of DockerPuller.
type dockerPuller struct {
client DockerInterface
keyring credentialprovider.DockerKeyring
}
type throttledDockerPuller struct {
puller dockerPuller
limiter util.RateLimiter
}
// newDockerPuller creates a new instance of the default implementation of DockerPuller.
func newDockerPuller(client DockerInterface, qps float32, burst int) DockerPuller {
dp := dockerPuller{
client: client,
keyring: credentialprovider.NewDockerKeyring(),
}
if qps == 0.0 {
return dp
}
return &throttledDockerPuller{
puller: dp,
limiter: util.NewTokenBucketRateLimiter(qps, burst),
}
}
func parseImageName(image string) (string, string) {
return parsers.ParseRepositoryTag(image)
}
func filterHTTPError(err error, image string) error {
// docker/docker/pull/11314 prints detailed error info for docker pull.
// When it hits 502, it returns a verbose html output including an inline svg,
// which makes the output of kubectl get pods much harder to parse.
// Here converts such verbose output to a concise one.
jerr, ok := err.(*jsonmessage.JSONError)
if ok && (jerr.Code == http.StatusBadGateway ||
jerr.Code == http.StatusServiceUnavailable ||
jerr.Code == http.StatusGatewayTimeout) {
glog.V(2).Infof("Pulling image %q failed: %v", image, err)
return kubecontainer.RegistryUnavailable
} else {
return err
}
}
func (p dockerPuller) Pull(image string, secrets []api.Secret) error {
repoToPull, tag := parseImageName(image)
// If no tag was specified, use the default "latest".
if len(tag) == 0 {
tag = "latest"
}
opts := docker.PullImageOptions{
Repository: repoToPull,
Tag: tag,
}
keyring, err := credentialprovider.MakeDockerKeyring(secrets, p.keyring)
if err != nil {
return err
}
creds, haveCredentials := keyring.Lookup(repoToPull)
if !haveCredentials {
glog.V(1).Infof("Pulling image %s without credentials", image)
err := p.client.PullImage(opts, docker.AuthConfiguration{})
if err == nil {
return nil
}
// Image spec: [<registry>/]<repository>/<image>[:<version] so we count '/'
explicitRegistry := (strings.Count(image, "/") == 2)
// Hack, look for a private registry, and decorate the error with the lack of
// credentials. This is heuristic, and really probably could be done better
// by talking to the registry API directly from the kubelet here.
if explicitRegistry {
return fmt.Errorf("image pull failed for %s, this may be because there are no credentials on this request. details: (%v)", image, err)
}
return filterHTTPError(err, image)
}
var pullErrs []error
for _, currentCreds := range creds {
err := p.client.PullImage(opts, currentCreds)
// If there was no error, return success
if err == nil {
return nil
}
pullErrs = append(pullErrs, filterHTTPError(err, image))
}
return utilerrors.NewAggregate(pullErrs)
}
func (p throttledDockerPuller) Pull(image string, secrets []api.Secret) error {
if p.limiter.CanAccept() {
return p.puller.Pull(image, secrets)
}
return fmt.Errorf("pull QPS exceeded.")
}
func (p dockerPuller) IsImagePresent(image string) (bool, error) {
_, err := p.client.InspectImage(image)
if err == nil {
return true, nil
}
if err == docker.ErrNoSuchImage {
return false, nil
}
return false, err
}
func (p throttledDockerPuller) IsImagePresent(name string) (bool, error) {
return p.puller.IsImagePresent(name)
}
// TODO (random-liu) Almost never used, should we remove this?
// DockerContainers is a map of containers
type DockerContainers map[kubetypes.DockerID]*docker.APIContainers
func (c DockerContainers) FindPodContainer(podFullName string, uid types.UID, containerName string) (*docker.APIContainers, bool, uint64) {
for _, dockerContainer := range c {
if len(dockerContainer.Names) == 0 {
continue
}
// TODO(proppy): build the docker container name and do a map lookup instead?
dockerName, hash, err := ParseDockerName(dockerContainer.Names[0])
if err != nil {
continue
}
if dockerName.PodFullName == podFullName &&
(uid == "" || dockerName.PodUID == uid) &&
dockerName.ContainerName == containerName {
return dockerContainer, true, hash
}
}
return nil, false, 0
}
const containerNamePrefix = "k8s"
// Creates a name which can be reversed to identify both full pod name and container name.
func BuildDockerName(dockerName KubeletContainerName, container *api.Container) (string, string) {
containerName := dockerName.ContainerName + "." + strconv.FormatUint(kubecontainer.HashContainer(container), 16)
stableName := fmt.Sprintf("%s_%s_%s_%s",
containerNamePrefix,
containerName,
dockerName.PodFullName,
dockerName.PodUID)
return stableName, fmt.Sprintf("%s_%08x", stableName, rand.Uint32())
}
// Unpacks a container name, returning the pod full name and container name we would have used to
// construct the docker name. If we are unable to parse the name, an error is returned.
func ParseDockerName(name string) (dockerName *KubeletContainerName, hash uint64, err error) {
// For some reason docker appears to be appending '/' to names.
// If it's there, strip it.
name = strings.TrimPrefix(name, "/")
parts := strings.Split(name, "_")
if len(parts) == 0 || parts[0] != containerNamePrefix {
err = fmt.Errorf("failed to parse Docker container name %q into parts", name)
return nil, 0, err
}
if len(parts) < 6 {
// We have at least 5 fields. We may have more in the future.
// Anything with less fields than this is not something we can
// manage.
glog.Warningf("found a container with the %q prefix, but too few fields (%d): %q", containerNamePrefix, len(parts), name)
err = fmt.Errorf("Docker container name %q has less parts than expected %v", name, parts)
return nil, 0, err
}
nameParts := strings.Split(parts[1], ".")
containerName := nameParts[0]
if len(nameParts) > 1 {
hash, err = strconv.ParseUint(nameParts[1], 16, 32)
if err != nil {
glog.Warningf("invalid container hash %q in container %q", nameParts[1], name)
}
}
podFullName := parts[2] + "_" + parts[3]
podUID := types.UID(parts[4])
return &KubeletContainerName{podFullName, podUID, containerName}, hash, nil
}
func LogSymlink(containerLogsDir, podFullName, containerName, dockerId string) string {
return path.Join(containerLogsDir, fmt.Sprintf("%s_%s-%s.%s", podFullName, containerName, dockerId, LogSuffix))
}
// Get a *docker.Client, either using the endpoint passed in, or using
// DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT path per their spec
func getDockerClient(dockerEndpoint string) (*docker.Client, error) {
if len(dockerEndpoint) > 0 {
glog.Infof("Connecting to docker on %s", dockerEndpoint)
return docker.NewClient(dockerEndpoint)
}
return docker.NewClientFromEnv()
}
func ConnectToDockerOrDie(dockerEndpoint string) DockerInterface {
if dockerEndpoint == "fake://" {
return &FakeDockerClient{
VersionInfo: docker.Env{"ApiVersion=1.18"},
}
}
client, err := getDockerClient(dockerEndpoint)
if err != nil {
glog.Fatalf("Couldn't connect to docker: %v", err)
}
return client
}
// milliCPUToQuota converts milliCPU to CFS quota and period values
func milliCPUToQuota(milliCPU int64) (quota int64, period int64) {
// CFS quota is measured in two values:
// - cfs_period_us=100ms (the amount of time to measure usage across)
// - cfs_quota=20ms (the amount of cpu time allowed to be used across a period)
// so in the above example, you are limited to 20% of a single CPU
// for multi-cpu environments, you just scale equivalent amounts
if milliCPU == 0 {
// take the default behavior from docker
return
}
// we set the period to 100ms by default
period = quotaPeriod
// we then convert your milliCPU to a value normalized over a period
quota = (milliCPU * quotaPeriod) / milliCPUToCPU
return
}
func milliCPUToShares(milliCPU int64) int64 {
if milliCPU == 0 {
// Docker converts zero milliCPU to unset, which maps to kernel default
// for unset: 1024. Return 2 here to really match kernel default for
// zero milliCPU.
return minShares
}
// Conceptually (milliCPU / milliCPUToCPU) * sharesPerCPU, but factored to improve rounding.
shares := (milliCPU * sharesPerCPU) / milliCPUToCPU
if shares < minShares {
return minShares
}
return shares
}
// GetKubeletDockerContainers lists all container or just the running ones.
// Returns a map of docker containers that we manage, keyed by container ID.
// TODO: Move this function with dockerCache to DockerManager.
func GetKubeletDockerContainers(client DockerInterface, allContainers bool) (DockerContainers, error) {
result := make(DockerContainers)
containers, err := client.ListContainers(docker.ListContainersOptions{All: allContainers})
if err != nil {
return nil, err
}
for i := range containers {
container := &containers[i]
if len(container.Names) == 0 {
continue
}
// Skip containers that we didn't create to allow users to manually
// spin up their own containers if they want.
// TODO(dchen1107): Remove the old separator "--" by end of Oct
if !strings.HasPrefix(container.Names[0], "/"+containerNamePrefix+"_") &&
!strings.HasPrefix(container.Names[0], "/"+containerNamePrefix+"--") {
glog.V(3).Infof("Docker Container: %s is not managed by kubelet.", container.Names[0])
continue
}
result[kubetypes.DockerID(container.ID)] = container
}
return result, nil
}
| pkg/kubelet/dockertools/docker.go | 1 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.9978783130645752,
0.17539136111736298,
0.00016676961968187243,
0.0009521882748231292,
0.3618013858795166
] |
{
"id": 1,
"code_window": [
"func (p throttledDockerPuller) IsImagePresent(name string) (bool, error) {\n",
"\treturn p.puller.IsImagePresent(name)\n",
"}\n",
"\n",
"// TODO (random-liu) Almost never used, should we remove this?\n",
"// DockerContainers is a map of containers\n",
"type DockerContainers map[kubetypes.DockerID]*docker.APIContainers\n",
"\n",
"func (c DockerContainers) FindPodContainer(podFullName string, uid types.UID, containerName string) (*docker.APIContainers, bool, uint64) {\n",
"\tfor _, dockerContainer := range c {\n",
"\t\tif len(dockerContainer.Names) == 0 {\n",
"\t\t\tcontinue\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// An internal helper function.\n",
"func findPodContainer(dockerContainers []*docker.APIContainers, podFullName string, uid types.UID, containerName string) (*docker.APIContainers, bool, uint64) {\n",
"\tfor _, dockerContainer := range dockerContainers {\n"
],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 213
} | package airbrake
import (
"errors"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/tobi/airbrake-go"
)
// AirbrakeHook to send exceptions to an exception-tracking service compatible
// with the Airbrake API.
type airbrakeHook struct {
APIKey string
Endpoint string
Environment string
}
func NewHook(endpoint, apiKey, env string) *airbrakeHook {
return &airbrakeHook{
APIKey: apiKey,
Endpoint: endpoint,
Environment: env,
}
}
func (hook *airbrakeHook) Fire(entry *logrus.Entry) error {
airbrake.ApiKey = hook.APIKey
airbrake.Endpoint = hook.Endpoint
airbrake.Environment = hook.Environment
var notifyErr error
err, ok := entry.Data["error"].(error)
if ok {
notifyErr = err
} else {
notifyErr = errors.New(entry.Message)
}
airErr := airbrake.Notify(notifyErr)
if airErr != nil {
return fmt.Errorf("Failed to send error to Airbrake: %s", airErr)
}
return nil
}
func (hook *airbrakeHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.ErrorLevel,
logrus.FatalLevel,
logrus.PanicLevel,
}
}
| Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.00017495757492724806,
0.00017054825730156153,
0.00016597325156908482,
0.00017055781790986657,
0.0000027546711862669326
] |
{
"id": 1,
"code_window": [
"func (p throttledDockerPuller) IsImagePresent(name string) (bool, error) {\n",
"\treturn p.puller.IsImagePresent(name)\n",
"}\n",
"\n",
"// TODO (random-liu) Almost never used, should we remove this?\n",
"// DockerContainers is a map of containers\n",
"type DockerContainers map[kubetypes.DockerID]*docker.APIContainers\n",
"\n",
"func (c DockerContainers) FindPodContainer(podFullName string, uid types.UID, containerName string) (*docker.APIContainers, bool, uint64) {\n",
"\tfor _, dockerContainer := range c {\n",
"\t\tif len(dockerContainer.Names) == 0 {\n",
"\t\t\tcontinue\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// An internal helper function.\n",
"func findPodContainer(dockerContainers []*docker.APIContainers, podFullName string, uid types.UID, containerName string) (*docker.APIContainers, bool, uint64) {\n",
"\tfor _, dockerContainer := range dockerContainers {\n"
],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 213
} | package logrus_papertrail
import (
"fmt"
"net"
"os"
"time"
"github.com/Sirupsen/logrus"
)
const (
format = "Jan 2 15:04:05"
)
// PapertrailHook to send logs to a logging service compatible with the Papertrail API.
type PapertrailHook struct {
Host string
Port int
AppName string
UDPConn net.Conn
}
// NewPapertrailHook creates a hook to be added to an instance of logger.
func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) {
conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port))
return &PapertrailHook{host, port, appName, conn}, err
}
// Fire is called when a log event is fired.
func (hook *PapertrailHook) Fire(entry *logrus.Entry) error {
date := time.Now().Format(format)
payload := fmt.Sprintf("<22> %s %s: [%s] %s", date, hook.AppName, entry.Level, entry.Message)
bytesWritten, err := hook.UDPConn.Write([]byte(payload))
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err)
return err
}
return nil
}
// Levels returns the available logging levels.
func (hook *PapertrailHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
logrus.WarnLevel,
logrus.InfoLevel,
logrus.DebugLevel,
}
}
| Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.00017242088506463915,
0.00016964018868748099,
0.00016475362644996494,
0.0001706914626993239,
0.0000028240001483936794
] |
{
"id": 1,
"code_window": [
"func (p throttledDockerPuller) IsImagePresent(name string) (bool, error) {\n",
"\treturn p.puller.IsImagePresent(name)\n",
"}\n",
"\n",
"// TODO (random-liu) Almost never used, should we remove this?\n",
"// DockerContainers is a map of containers\n",
"type DockerContainers map[kubetypes.DockerID]*docker.APIContainers\n",
"\n",
"func (c DockerContainers) FindPodContainer(podFullName string, uid types.UID, containerName string) (*docker.APIContainers, bool, uint64) {\n",
"\tfor _, dockerContainer := range c {\n",
"\t\tif len(dockerContainer.Names) == 0 {\n",
"\t\t\tcontinue\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// An internal helper function.\n",
"func findPodContainer(dockerContainers []*docker.APIContainers, podFullName string, uid types.UID, containerName string) (*docker.APIContainers, bool, uint64) {\n",
"\tfor _, dockerContainer := range dockerContainers {\n"
],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 213
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"io"
"time"
"github.com/spf13/cobra"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/kubectl"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/resource"
)
// DeleteOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of
// referencing the cmd.Flags()
type DeleteOptions struct {
Filenames []string
}
const (
delete_long = `Delete resources by filenames, stdin, resources and names, or by resources and label selector.
JSON and YAML formats are accepted.
Only one type of the arguments may be specified: filenames, resources and names, or resources and label selector
Note that the delete command does NOT do resource version checks, so if someone
submits an update to a resource right when you submit a delete, their update
will be lost along with the rest of the resource.`
delete_example = `# Delete a pod using the type and name specified in pod.json.
$ kubectl delete -f ./pod.json
# Delete a pod based on the type and name in the JSON passed into stdin.
$ cat pod.json | kubectl delete -f -
# Delete pods and services with same names "baz" and "foo"
$ kubectl delete pod,service baz foo
# Delete pods and services with label name=myLabel.
$ kubectl delete pods,services -l name=myLabel
# Delete a pod with UID 1234-56-7890-234234-456456.
$ kubectl delete pod 1234-56-7890-234234-456456
# Delete all pods
$ kubectl delete pods --all`
)
func NewCmdDelete(f *cmdutil.Factory, out io.Writer) *cobra.Command {
options := &DeleteOptions{}
// retrieve a list of handled resources from printer as valid args
validArgs := []string{}
p, err := f.Printer(nil, false, false, false, false, []string{})
cmdutil.CheckErr(err)
if p != nil {
validArgs = p.HandledResources()
}
cmd := &cobra.Command{
Use: "delete ([-f FILENAME] | TYPE [(NAME | -l label | --all)])",
Short: "Delete resources by filenames, stdin, resources and names, or by resources and label selector.",
Long: delete_long,
Example: delete_example,
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(cmdutil.ValidateOutputArgs(cmd))
err := RunDelete(f, out, cmd, args, options)
cmdutil.CheckErr(err)
},
ValidArgs: validArgs,
}
usage := "Filename, directory, or URL to a file containing the resource to delete."
kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage)
cmd.Flags().StringP("selector", "l", "", "Selector (label query) to filter on.")
cmd.Flags().Bool("all", false, "[-all] to select all the specified resources.")
cmd.Flags().Bool("ignore-not-found", false, "Treat \"resource not found\" as a successful delete. Defaults to \"true\" when --all is specified.")
cmd.Flags().Bool("cascade", true, "If true, cascade the deletion of the resources managed by this resource (e.g. Pods created by a ReplicationController). Default true.")
cmd.Flags().Int("grace-period", -1, "Period of time in seconds given to the resource to terminate gracefully. Ignored if negative.")
cmd.Flags().Duration("timeout", 0, "The length of time to wait before giving up on a delete, zero means determine a timeout from the size of the object")
cmdutil.AddOutputFlagsForMutation(cmd)
return cmd
}
func RunDelete(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, options *DeleteOptions) error {
cmdNamespace, enforceNamespace, err := f.DefaultNamespace()
if err != nil {
return err
}
deleteAll := cmdutil.GetFlagBool(cmd, "all")
mapper, typer := f.Object()
r := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()).
ContinueOnError().
NamespaceParam(cmdNamespace).DefaultNamespace().
FilenameParam(enforceNamespace, options.Filenames...).
SelectorParam(cmdutil.GetFlagString(cmd, "selector")).
SelectAllParam(deleteAll).
ResourceTypeOrNameArgs(false, args...).RequireObject(false).
Flatten().
Do()
err = r.Err()
if err != nil {
return err
}
ignoreNotFound := cmdutil.GetFlagBool(cmd, "ignore-not-found")
if deleteAll {
f := cmd.Flags().Lookup("ignore-not-found")
// The flag should never be missing
if f == nil {
return fmt.Errorf("missing --ignore-not-found flag")
}
// If the user didn't explicitly set the option, default to ignoring NotFound errors when used with --all
if !f.Changed {
ignoreNotFound = true
}
}
shortOutput := cmdutil.GetFlagString(cmd, "output") == "name"
// By default use a reaper to delete all related resources.
if cmdutil.GetFlagBool(cmd, "cascade") {
return ReapResult(r, f, out, cmdutil.GetFlagBool(cmd, "cascade"), ignoreNotFound, cmdutil.GetFlagDuration(cmd, "timeout"), cmdutil.GetFlagInt(cmd, "grace-period"), shortOutput, mapper)
}
return DeleteResult(r, out, ignoreNotFound, shortOutput, mapper)
}
func ReapResult(r *resource.Result, f *cmdutil.Factory, out io.Writer, isDefaultDelete, ignoreNotFound bool, timeout time.Duration, gracePeriod int, shortOutput bool, mapper meta.RESTMapper) error {
found := 0
if ignoreNotFound {
r = r.IgnoreErrors(errors.IsNotFound)
}
err := r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
found++
reaper, err := f.Reaper(info.Mapping)
if err != nil {
// If there is no reaper for this resources and the user didn't explicitly ask for stop.
if kubectl.IsNoSuchReaperError(err) && isDefaultDelete {
return deleteResource(info, out, shortOutput, mapper)
}
return cmdutil.AddSourceToErr("reaping", info.Source, err)
}
var options *api.DeleteOptions
if gracePeriod >= 0 {
options = api.NewDeleteOptions(int64(gracePeriod))
}
if _, err := reaper.Stop(info.Namespace, info.Name, timeout, options); err != nil {
return cmdutil.AddSourceToErr("stopping", info.Source, err)
}
cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "deleted")
return nil
})
if err != nil {
return err
}
if found == 0 {
fmt.Fprintf(out, "No resources found\n")
}
return nil
}
func DeleteResult(r *resource.Result, out io.Writer, ignoreNotFound bool, shortOutput bool, mapper meta.RESTMapper) error {
found := 0
if ignoreNotFound {
r = r.IgnoreErrors(errors.IsNotFound)
}
err := r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
found++
return deleteResource(info, out, shortOutput, mapper)
})
if err != nil {
return err
}
if found == 0 {
fmt.Fprintf(out, "No resources found\n")
}
return nil
}
func deleteResource(info *resource.Info, out io.Writer, shortOutput bool, mapper meta.RESTMapper) error {
if err := resource.NewHelper(info.Client, info.Mapping).Delete(info.Namespace, info.Name); err != nil {
return cmdutil.AddSourceToErr("deleting", info.Source, err)
}
cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "deleted")
return nil
}
| pkg/kubectl/cmd/delete.go | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.0013979399809613824,
0.00022665172582492232,
0.0001628644677111879,
0.00017067542648874223,
0.00025564306997694075
] |
{
"id": 2,
"code_window": [
"\t\tif len(dockerContainer.Names) == 0 {\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\t// TODO(proppy): build the docker container name and do a map lookup instead?\n",
"\t\tdockerName, hash, err := ParseDockerName(dockerContainer.Names[0])\n",
"\t\tif err != nil {\n",
"\t\t\tcontinue\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 222
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"encoding/json"
"fmt"
"hash/adler32"
"reflect"
"sort"
"strconv"
"strings"
"testing"
"github.com/docker/docker/pkg/jsonmessage"
docker "github.com/fsouza/go-dockerclient"
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/credentialprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/network"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
)
func verifyCalls(t *testing.T, fakeDocker *FakeDockerClient, calls []string) {
fakeDocker.Lock()
defer fakeDocker.Unlock()
verifyStringArrayEquals(t, fakeDocker.called, calls)
}
func verifyStringArrayEquals(t *testing.T, actual, expected []string) {
invalid := len(actual) != len(expected)
if !invalid {
for ix, value := range actual {
if expected[ix] != value {
invalid = true
}
}
}
if invalid {
t.Errorf("Expected: %#v, Actual: %#v", expected, actual)
}
}
func TestGetContainerID(t *testing.T) {
fakeDocker := &FakeDockerClient{}
fakeDocker.ContainerList = []docker.APIContainers{
{
ID: "foobar",
Names: []string{"/k8s_foo_qux_ns_1234_42"},
},
{
ID: "barbar",
Names: []string{"/k8s_bar_qux_ns_2565_42"},
},
}
fakeDocker.Container = &docker.Container{
ID: "foobar",
}
dockerContainers, err := GetKubeletDockerContainers(fakeDocker, false)
if err != nil {
t.Errorf("Expected no error, Got %#v", err)
}
if len(dockerContainers) != 2 {
t.Errorf("Expected %#v, Got %#v", fakeDocker.ContainerList, dockerContainers)
}
verifyCalls(t, fakeDocker, []string{"list"})
dockerContainer, found, _ := dockerContainers.FindPodContainer("qux_ns", "", "foo")
if dockerContainer == nil || !found {
t.Errorf("Failed to find container %#v", dockerContainer)
}
fakeDocker.ClearCalls()
dockerContainer, found, _ = dockerContainers.FindPodContainer("foobar", "", "foo")
verifyCalls(t, fakeDocker, []string{})
if dockerContainer != nil || found {
t.Errorf("Should not have found container %#v", dockerContainer)
}
}
func verifyPackUnpack(t *testing.T, podNamespace, podUID, podName, containerName string) {
container := &api.Container{Name: containerName}
hasher := adler32.New()
util.DeepHashObject(hasher, *container)
computedHash := uint64(hasher.Sum32())
podFullName := fmt.Sprintf("%s_%s", podName, podNamespace)
_, name := BuildDockerName(KubeletContainerName{podFullName, types.UID(podUID), container.Name}, container)
returned, hash, err := ParseDockerName(name)
if err != nil {
t.Errorf("Failed to parse Docker container name %q: %v", name, err)
}
if podFullName != returned.PodFullName || podUID != string(returned.PodUID) || containerName != returned.ContainerName || computedHash != hash {
t.Errorf("For (%s, %s, %s, %d), unpacked (%s, %s, %s, %d)", podFullName, podUID, containerName, computedHash, returned.PodFullName, returned.PodUID, returned.ContainerName, hash)
}
}
func TestContainerNaming(t *testing.T) {
podUID := "12345678"
verifyPackUnpack(t, "file", podUID, "name", "container")
verifyPackUnpack(t, "file", podUID, "name-with-dashes", "container")
// UID is same as pod name
verifyPackUnpack(t, "file", podUID, podUID, "container")
// No Container name
verifyPackUnpack(t, "other", podUID, "name", "")
container := &api.Container{Name: "container"}
podName := "foo"
podNamespace := "test"
name := fmt.Sprintf("k8s_%s_%s_%s_%s_42", container.Name, podName, podNamespace, podUID)
podFullName := fmt.Sprintf("%s_%s", podName, podNamespace)
returned, hash, err := ParseDockerName(name)
if err != nil {
t.Errorf("Failed to parse Docker container name %q: %v", name, err)
}
if returned.PodFullName != podFullName || string(returned.PodUID) != podUID || returned.ContainerName != container.Name || hash != 0 {
t.Errorf("unexpected parse: %s %s %s %d", returned.PodFullName, returned.PodUID, returned.ContainerName, hash)
}
}
func TestVersion(t *testing.T) {
fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Version=1.1.3", "ApiVersion=1.15"}}
manager := &DockerManager{client: fakeDocker}
version, err := manager.Version()
if err != nil {
t.Errorf("got error while getting docker server version - %s", err)
}
expectedVersion, _ := docker.NewAPIVersion("1.15")
if e, a := expectedVersion.String(), version.String(); e != a {
t.Errorf("invalid docker server version. expected: %v, got: %v", e, a)
}
}
func TestExecSupportExists(t *testing.T) {
fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Version=1.3.0", "ApiVersion=1.15"}}
runner := &DockerManager{client: fakeDocker}
useNativeExec, err := runner.nativeExecSupportExists()
if err != nil {
t.Errorf("got error while checking for exec support - %s", err)
}
if !useNativeExec {
t.Errorf("invalid exec support check output. Expected true")
}
}
func TestExecSupportNotExists(t *testing.T) {
fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Version=1.1.2", "ApiVersion=1.14"}}
runner := &DockerManager{client: fakeDocker}
useNativeExec, _ := runner.nativeExecSupportExists()
if useNativeExec {
t.Errorf("invalid exec support check output.")
}
}
func TestDockerContainerCommand(t *testing.T) {
runner := &DockerManager{}
containerID := kubetypes.DockerID("1234").ContainerID()
command := []string{"ls"}
cmd, _ := runner.getRunInContainerCommand(containerID, command)
if cmd.Dir != "/var/lib/docker/execdriver/native/"+containerID.ID {
t.Errorf("unexpected command CWD: %s", cmd.Dir)
}
if !reflect.DeepEqual(cmd.Args, []string{"/usr/sbin/nsinit", "exec", "ls"}) {
t.Errorf("unexpected command args: %s", cmd.Args)
}
}
func TestParseImageName(t *testing.T) {
tests := []struct {
imageName string
name string
tag string
}{
{"ubuntu", "ubuntu", ""},
{"ubuntu:2342", "ubuntu", "2342"},
{"ubuntu:latest", "ubuntu", "latest"},
{"foo/bar:445566", "foo/bar", "445566"},
{"registry.example.com:5000/foobar", "registry.example.com:5000/foobar", ""},
{"registry.example.com:5000/foobar:5342", "registry.example.com:5000/foobar", "5342"},
{"registry.example.com:5000/foobar:latest", "registry.example.com:5000/foobar", "latest"},
}
for _, test := range tests {
name, tag := parseImageName(test.imageName)
if name != test.name || tag != test.tag {
t.Errorf("Expected name/tag: %s/%s, got %s/%s", test.name, test.tag, name, tag)
}
}
}
func TestPullWithNoSecrets(t *testing.T) {
tests := []struct {
imageName string
expectedImage string
}{
{"ubuntu", "ubuntu:latest using {}"},
{"ubuntu:2342", "ubuntu:2342 using {}"},
{"ubuntu:latest", "ubuntu:latest using {}"},
{"foo/bar:445566", "foo/bar:445566 using {}"},
{"registry.example.com:5000/foobar", "registry.example.com:5000/foobar:latest using {}"},
{"registry.example.com:5000/foobar:5342", "registry.example.com:5000/foobar:5342 using {}"},
{"registry.example.com:5000/foobar:latest", "registry.example.com:5000/foobar:latest using {}"},
}
for _, test := range tests {
fakeKeyring := &credentialprovider.FakeKeyring{}
fakeClient := &FakeDockerClient{}
dp := dockerPuller{
client: fakeClient,
keyring: fakeKeyring,
}
err := dp.Pull(test.imageName, []api.Secret{})
if err != nil {
t.Errorf("unexpected non-nil err: %s", err)
continue
}
if e, a := 1, len(fakeClient.pulled); e != a {
t.Errorf("%s: expected 1 pulled image, got %d: %v", test.imageName, a, fakeClient.pulled)
continue
}
if e, a := test.expectedImage, fakeClient.pulled[0]; e != a {
t.Errorf("%s: expected pull of %q, but got %q", test.imageName, e, a)
}
}
}
func TestPullWithJSONError(t *testing.T) {
tests := map[string]struct {
imageName string
err error
expectedError string
}{
"Json error": {
"ubuntu",
&jsonmessage.JSONError{Code: 50, Message: "Json error"},
"Json error",
},
"Bad gateway": {
"ubuntu",
&jsonmessage.JSONError{Code: 502, Message: "<!doctype html>\n<html class=\"no-js\" lang=\"\">\n <head>\n </head>\n <body>\n <h1>Oops, there was an error!</h1>\n <p>We have been contacted of this error, feel free to check out <a href=\"http://status.docker.com/\">status.docker.com</a>\n to see if there is a bigger issue.</p>\n\n </body>\n</html>"},
kubecontainer.RegistryUnavailable.Error(),
},
}
for i, test := range tests {
fakeKeyring := &credentialprovider.FakeKeyring{}
fakeClient := &FakeDockerClient{
Errors: map[string]error{"pull": test.err},
}
puller := &dockerPuller{
client: fakeClient,
keyring: fakeKeyring,
}
err := puller.Pull(test.imageName, []api.Secret{})
if err == nil || !strings.Contains(err.Error(), test.expectedError) {
t.Errorf("%s: expect error %s, got : %s", i, test.expectedError, err)
continue
}
}
}
func TestPullWithSecrets(t *testing.T) {
// auth value is equivalent to: "username":"passed-user","password":"passed-password"
dockerCfg := map[string]map[string]string{"index.docker.io/v1/": {"email": "passed-email", "auth": "cGFzc2VkLXVzZXI6cGFzc2VkLXBhc3N3b3Jk"}}
dockercfgContent, err := json.Marshal(dockerCfg)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
dockerConfigJson := map[string]map[string]map[string]string{"auths": dockerCfg}
dockerConfigJsonContent, err := json.Marshal(dockerConfigJson)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
tests := map[string]struct {
imageName string
passedSecrets []api.Secret
builtInDockerConfig credentialprovider.DockerConfig
expectedPulls []string
}{
"no matching secrets": {
"ubuntu",
[]api.Secret{},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{}),
[]string{"ubuntu:latest using {}"},
},
"default keyring secrets": {
"ubuntu",
[]api.Secret{},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{"index.docker.io/v1/": {"built-in", "password", "email"}}),
[]string{`ubuntu:latest using {"username":"built-in","password":"password","email":"email"}`},
},
"default keyring secrets unused": {
"ubuntu",
[]api.Secret{},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{"extraneous": {"built-in", "password", "email"}}),
[]string{`ubuntu:latest using {}`},
},
"builtin keyring secrets, but use passed": {
"ubuntu",
[]api.Secret{{Type: api.SecretTypeDockercfg, Data: map[string][]byte{api.DockerConfigKey: dockercfgContent}}},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{"index.docker.io/v1/": {"built-in", "password", "email"}}),
[]string{`ubuntu:latest using {"username":"passed-user","password":"passed-password","email":"passed-email"}`},
},
"builtin keyring secrets, but use passed with new docker config": {
"ubuntu",
[]api.Secret{{Type: api.SecretTypeDockerConfigJson, Data: map[string][]byte{api.DockerConfigJsonKey: dockerConfigJsonContent}}},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{"index.docker.io/v1/": {"built-in", "password", "email"}}),
[]string{`ubuntu:latest using {"username":"passed-user","password":"passed-password","email":"passed-email"}`},
},
}
for _, test := range tests {
builtInKeyRing := &credentialprovider.BasicDockerKeyring{}
builtInKeyRing.Add(test.builtInDockerConfig)
fakeClient := &FakeDockerClient{}
dp := dockerPuller{
client: fakeClient,
keyring: builtInKeyRing,
}
err := dp.Pull(test.imageName, test.passedSecrets)
if err != nil {
t.Errorf("unexpected non-nil err: %s", err)
continue
}
if e, a := 1, len(fakeClient.pulled); e != a {
t.Errorf("%s: expected 1 pulled image, got %d: %v", test.imageName, a, fakeClient.pulled)
continue
}
if e, a := test.expectedPulls, fakeClient.pulled; !reflect.DeepEqual(e, a) {
t.Errorf("%s: expected pull of %v, but got %v", test.imageName, e, a)
}
}
}
func TestDockerKeyringLookupFails(t *testing.T) {
fakeKeyring := &credentialprovider.FakeKeyring{}
fakeClient := &FakeDockerClient{
Errors: map[string]error{"pull": fmt.Errorf("test error")},
}
dp := dockerPuller{
client: fakeClient,
keyring: fakeKeyring,
}
err := dp.Pull("host/repository/image:version", []api.Secret{})
if err == nil {
t.Errorf("unexpected non-error")
}
msg := "image pull failed for host/repository/image:version, this may be because there are no credentials on this request. details: (test error)"
if err.Error() != msg {
t.Errorf("expected: %s, saw: %s", msg, err.Error())
}
}
func TestDockerKeyringLookup(t *testing.T) {
ada := docker.AuthConfiguration{
Username: "ada",
Password: "smash",
Email: "[email protected]",
}
grace := docker.AuthConfiguration{
Username: "grace",
Password: "squash",
Email: "[email protected]",
}
dk := &credentialprovider.BasicDockerKeyring{}
dk.Add(credentialprovider.DockerConfig{
"bar.example.com/pong": credentialprovider.DockerConfigEntry{
Username: grace.Username,
Password: grace.Password,
Email: grace.Email,
},
"bar.example.com": credentialprovider.DockerConfigEntry{
Username: ada.Username,
Password: ada.Password,
Email: ada.Email,
},
})
tests := []struct {
image string
match []docker.AuthConfiguration
ok bool
}{
// direct match
{"bar.example.com", []docker.AuthConfiguration{ada}, true},
// direct match deeper than other possible matches
{"bar.example.com/pong", []docker.AuthConfiguration{grace, ada}, true},
// no direct match, deeper path ignored
{"bar.example.com/ping", []docker.AuthConfiguration{ada}, true},
// match first part of path token
{"bar.example.com/pongz", []docker.AuthConfiguration{grace, ada}, true},
// match regardless of sub-path
{"bar.example.com/pong/pang", []docker.AuthConfiguration{grace, ada}, true},
// no host match
{"example.com", []docker.AuthConfiguration{}, false},
{"foo.example.com", []docker.AuthConfiguration{}, false},
}
for i, tt := range tests {
match, ok := dk.Lookup(tt.image)
if tt.ok != ok {
t.Errorf("case %d: expected ok=%t, got %t", i, tt.ok, ok)
}
if !reflect.DeepEqual(tt.match, match) {
t.Errorf("case %d: expected match=%#v, got %#v", i, tt.match, match)
}
}
}
// This validates that dockercfg entries with a scheme and url path are properly matched
// by images that only match the hostname.
// NOTE: the above covers the case of a more specific match trumping just hostname.
func TestIssue3797(t *testing.T) {
rex := docker.AuthConfiguration{
Username: "rex",
Password: "tiny arms",
Email: "[email protected]",
}
dk := &credentialprovider.BasicDockerKeyring{}
dk.Add(credentialprovider.DockerConfig{
"https://quay.io/v1/": credentialprovider.DockerConfigEntry{
Username: rex.Username,
Password: rex.Password,
Email: rex.Email,
},
})
tests := []struct {
image string
match []docker.AuthConfiguration
ok bool
}{
// direct match
{"quay.io", []docker.AuthConfiguration{rex}, true},
// partial matches
{"quay.io/foo", []docker.AuthConfiguration{rex}, true},
{"quay.io/foo/bar", []docker.AuthConfiguration{rex}, true},
}
for i, tt := range tests {
match, ok := dk.Lookup(tt.image)
if tt.ok != ok {
t.Errorf("case %d: expected ok=%t, got %t", i, tt.ok, ok)
}
if !reflect.DeepEqual(tt.match, match) {
t.Errorf("case %d: expected match=%#v, got %#v", i, tt.match, match)
}
}
}
type imageTrackingDockerClient struct {
*FakeDockerClient
imageName string
}
func (f *imageTrackingDockerClient) InspectImage(name string) (image *docker.Image, err error) {
image, err = f.FakeDockerClient.InspectImage(name)
f.imageName = name
return
}
func TestIsImagePresent(t *testing.T) {
cl := &imageTrackingDockerClient{&FakeDockerClient{}, ""}
puller := &dockerPuller{
client: cl,
}
_, _ = puller.IsImagePresent("abc:123")
if cl.imageName != "abc:123" {
t.Errorf("expected inspection of image abc:123, instead inspected image %v", cl.imageName)
}
}
type podsByID []*kubecontainer.Pod
func (b podsByID) Len() int { return len(b) }
func (b podsByID) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b podsByID) Less(i, j int) bool { return b[i].ID < b[j].ID }
type containersByID []*kubecontainer.Container
func (b containersByID) Len() int { return len(b) }
func (b containersByID) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b containersByID) Less(i, j int) bool { return b[i].ID.ID < b[j].ID.ID }
func TestFindContainersByPod(t *testing.T) {
tests := []struct {
containerList []docker.APIContainers
exitedContainerList []docker.APIContainers
all bool
expectedPods []*kubecontainer.Pod
}{
{
[]docker.APIContainers{
{
ID: "foobar",
Names: []string{"/k8s_foobar.1234_qux_ns_1234_42"},
},
{
ID: "barbar",
Names: []string{"/k8s_barbar.1234_qux_ns_2343_42"},
},
{
ID: "baz",
Names: []string{"/k8s_baz.1234_qux_ns_1234_42"},
},
},
[]docker.APIContainers{
{
ID: "barfoo",
Names: []string{"/k8s_barfoo.1234_qux_ns_1234_42"},
},
{
ID: "bazbaz",
Names: []string{"/k8s_bazbaz.1234_qux_ns_5678_42"},
},
},
false,
[]*kubecontainer.Pod{
{
ID: "1234",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: kubetypes.DockerID("foobar").ContainerID(),
Name: "foobar",
Hash: 0x1234,
},
{
ID: kubetypes.DockerID("baz").ContainerID(),
Name: "baz",
Hash: 0x1234,
},
},
},
{
ID: "2343",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: kubetypes.DockerID("barbar").ContainerID(),
Name: "barbar",
Hash: 0x1234,
},
},
},
},
},
{
[]docker.APIContainers{
{
ID: "foobar",
Names: []string{"/k8s_foobar.1234_qux_ns_1234_42"},
},
{
ID: "barbar",
Names: []string{"/k8s_barbar.1234_qux_ns_2343_42"},
},
{
ID: "baz",
Names: []string{"/k8s_baz.1234_qux_ns_1234_42"},
},
},
[]docker.APIContainers{
{
ID: "barfoo",
Names: []string{"/k8s_barfoo.1234_qux_ns_1234_42"},
},
{
ID: "bazbaz",
Names: []string{"/k8s_bazbaz.1234_qux_ns_5678_42"},
},
},
true,
[]*kubecontainer.Pod{
{
ID: "1234",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: kubetypes.DockerID("foobar").ContainerID(),
Name: "foobar",
Hash: 0x1234,
},
{
ID: kubetypes.DockerID("barfoo").ContainerID(),
Name: "barfoo",
Hash: 0x1234,
},
{
ID: kubetypes.DockerID("baz").ContainerID(),
Name: "baz",
Hash: 0x1234,
},
},
},
{
ID: "2343",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: kubetypes.DockerID("barbar").ContainerID(),
Name: "barbar",
Hash: 0x1234,
},
},
},
{
ID: "5678",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: kubetypes.DockerID("bazbaz").ContainerID(),
Name: "bazbaz",
Hash: 0x1234,
},
},
},
},
},
{
[]docker.APIContainers{},
[]docker.APIContainers{},
true,
nil,
},
}
fakeClient := &FakeDockerClient{}
np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
// image back-off is set to nil, this test shouldnt pull images
containerManager := NewFakeDockerManager(fakeClient, &record.FakeRecorder{}, nil, nil, &cadvisorapi.MachineInfo{}, PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, nil, nil, nil)
for i, test := range tests {
fakeClient.ContainerList = test.containerList
fakeClient.ExitedContainerList = test.exitedContainerList
result, _ := containerManager.GetPods(test.all)
for i := range result {
sort.Sort(containersByID(result[i].Containers))
}
for i := range test.expectedPods {
sort.Sort(containersByID(test.expectedPods[i].Containers))
}
sort.Sort(podsByID(result))
sort.Sort(podsByID(test.expectedPods))
if !reflect.DeepEqual(test.expectedPods, result) {
t.Errorf("%d: expected: %#v, saw: %#v", i, test.expectedPods, result)
}
}
}
func TestMakePortsAndBindings(t *testing.T) {
ports := []kubecontainer.PortMapping{
{
ContainerPort: 80,
HostPort: 8080,
HostIP: "127.0.0.1",
},
{
ContainerPort: 443,
HostPort: 443,
Protocol: "tcp",
},
{
ContainerPort: 444,
HostPort: 444,
Protocol: "udp",
},
{
ContainerPort: 445,
HostPort: 445,
Protocol: "foobar",
},
{
ContainerPort: 443,
HostPort: 446,
Protocol: "tcp",
},
{
ContainerPort: 443,
HostPort: 446,
Protocol: "udp",
},
}
exposedPorts, bindings := makePortsAndBindings(ports)
// Count the expected exposed ports and bindings
expectedExposedPorts := map[string]struct{}{}
for _, binding := range ports {
dockerKey := strconv.Itoa(binding.ContainerPort) + "/" + string(binding.Protocol)
expectedExposedPorts[dockerKey] = struct{}{}
}
// Should expose right ports in docker
if len(expectedExposedPorts) != len(exposedPorts) {
t.Errorf("Unexpected ports and bindings, %#v %#v %#v", ports, exposedPorts, bindings)
}
// Construct expected bindings
expectPortBindings := map[string][]docker.PortBinding{
"80/tcp": {
docker.PortBinding{
HostPort: "8080",
HostIP: "127.0.0.1",
},
},
"443/tcp": {
docker.PortBinding{
HostPort: "443",
HostIP: "",
},
docker.PortBinding{
HostPort: "446",
HostIP: "",
},
},
"443/udp": {
docker.PortBinding{
HostPort: "446",
HostIP: "",
},
},
"444/udp": {
docker.PortBinding{
HostPort: "444",
HostIP: "",
},
},
"445/tcp": {
docker.PortBinding{
HostPort: "445",
HostIP: "",
},
},
}
// interate the bindings by dockerPort, and check its portBindings
for dockerPort, portBindings := range bindings {
switch dockerPort {
case "80/tcp", "443/tcp", "443/udp", "444/udp", "445/tcp":
if !reflect.DeepEqual(expectPortBindings[string(dockerPort)], portBindings) {
t.Errorf("Unexpected portbindings for %#v, expected: %#v, but got: %#v",
dockerPort, expectPortBindings[string(dockerPort)], portBindings)
}
default:
t.Errorf("Unexpected docker port: %#v with portbindings: %#v", dockerPort, portBindings)
}
}
}
func TestMilliCPUToQuota(t *testing.T) {
testCases := []struct {
input int64
quota int64
period int64
}{
{
input: int64(0),
quota: int64(0),
period: int64(0),
},
{
input: int64(200),
quota: int64(20000),
period: int64(100000),
},
{
input: int64(500),
quota: int64(50000),
period: int64(100000),
},
{
input: int64(1000),
quota: int64(100000),
period: int64(100000),
},
{
input: int64(1500),
quota: int64(150000),
period: int64(100000),
},
}
for _, testCase := range testCases {
quota, period := milliCPUToQuota(testCase.input)
if quota != testCase.quota || period != testCase.period {
t.Errorf("Input %v, expected quota %v period %v, but got quota %v period %v", testCase.input, testCase.quota, testCase.period, quota, period)
}
}
}
| pkg/kubelet/dockertools/docker_test.go | 1 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.9969504475593567,
0.058686062693595886,
0.0001623723073862493,
0.0001731185766402632,
0.2071809321641922
] |
{
"id": 2,
"code_window": [
"\t\tif len(dockerContainer.Names) == 0 {\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\t// TODO(proppy): build the docker container name and do a map lookup instead?\n",
"\t\tdockerName, hash, err := ParseDockerName(dockerContainer.Names[0])\n",
"\t\tif err != nil {\n",
"\t\t\tcontinue\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 222
} | language: go
install: go get -t
| Godeps/_workspace/src/github.com/imdario/mergo/.travis.yml | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.0001766398490872234,
0.0001766398490872234,
0.0001766398490872234,
0.0001766398490872234,
0
] |
{
"id": 2,
"code_window": [
"\t\tif len(dockerContainer.Names) == 0 {\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\t// TODO(proppy): build the docker container name and do a map lookup instead?\n",
"\t\tdockerName, hash, err := ParseDockerName(dockerContainer.Names[0])\n",
"\t\tif err != nil {\n",
"\t\t\tcontinue\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 222
} | // Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
// Licensed under the same terms as Go itself:
// https://code.google.com/p/go/source/browse/LICENSE
// Package http2 implements the HTTP/2 protocol.
//
// This is a work in progress. This package is low-level and intended
// to be used directly by very few people. Most users will use it
// indirectly through integration with the net/http package. See
// ConfigureServer. That ConfigureServer call will likely be automatic
// or available via an empty import in the future.
//
// This package currently targets draft-14. See http://http2.github.io/
package http2
import (
"bufio"
"fmt"
"io"
"net/http"
"strconv"
"sync"
)
var VerboseLogs = false
const (
// ClientPreface is the string that must be sent by new
// connections from clients.
ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
// SETTINGS_MAX_FRAME_SIZE default
// http://http2.github.io/http2-spec/#rfc.section.6.5.2
initialMaxFrameSize = 16384
// NextProtoTLS is the NPN/ALPN protocol negotiated during
// HTTP/2's TLS setup.
NextProtoTLS = "h2"
// http://http2.github.io/http2-spec/#SettingValues
initialHeaderTableSize = 4096
initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
defaultMaxReadFrameSize = 1 << 20
)
var (
clientPreface = []byte(ClientPreface)
)
type streamState int
const (
stateIdle streamState = iota
stateOpen
stateHalfClosedLocal
stateHalfClosedRemote
stateResvLocal
stateResvRemote
stateClosed
)
var stateName = [...]string{
stateIdle: "Idle",
stateOpen: "Open",
stateHalfClosedLocal: "HalfClosedLocal",
stateHalfClosedRemote: "HalfClosedRemote",
stateResvLocal: "ResvLocal",
stateResvRemote: "ResvRemote",
stateClosed: "Closed",
}
func (st streamState) String() string {
return stateName[st]
}
// Setting is a setting parameter: which setting it is, and its value.
type Setting struct {
// ID is which setting is being set.
// See http://http2.github.io/http2-spec/#SettingValues
ID SettingID
// Val is the value.
Val uint32
}
func (s Setting) String() string {
return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
}
// Valid reports whether the setting is valid.
func (s Setting) Valid() error {
// Limits and error codes from 6.5.2 Defined SETTINGS Parameters
switch s.ID {
case SettingEnablePush:
if s.Val != 1 && s.Val != 0 {
return ConnectionError(ErrCodeProtocol)
}
case SettingInitialWindowSize:
if s.Val > 1<<31-1 {
return ConnectionError(ErrCodeFlowControl)
}
case SettingMaxFrameSize:
if s.Val < 16384 || s.Val > 1<<24-1 {
return ConnectionError(ErrCodeProtocol)
}
}
return nil
}
// A SettingID is an HTTP/2 setting as defined in
// http://http2.github.io/http2-spec/#iana-settings
type SettingID uint16
const (
SettingHeaderTableSize SettingID = 0x1
SettingEnablePush SettingID = 0x2
SettingMaxConcurrentStreams SettingID = 0x3
SettingInitialWindowSize SettingID = 0x4
SettingMaxFrameSize SettingID = 0x5
SettingMaxHeaderListSize SettingID = 0x6
)
var settingName = map[SettingID]string{
SettingHeaderTableSize: "HEADER_TABLE_SIZE",
SettingEnablePush: "ENABLE_PUSH",
SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
SettingMaxFrameSize: "MAX_FRAME_SIZE",
SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
}
func (s SettingID) String() string {
if v, ok := settingName[s]; ok {
return v
}
return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
}
func validHeader(v string) bool {
if len(v) == 0 {
return false
}
for _, r := range v {
// "Just as in HTTP/1.x, header field names are
// strings of ASCII characters that are compared in a
// case-insensitive fashion. However, header field
// names MUST be converted to lowercase prior to their
// encoding in HTTP/2. "
if r >= 127 || ('A' <= r && r <= 'Z') {
return false
}
}
return true
}
var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)
func init() {
for i := 100; i <= 999; i++ {
if v := http.StatusText(i); v != "" {
httpCodeStringCommon[i] = strconv.Itoa(i)
}
}
}
func httpCodeString(code int) string {
if s, ok := httpCodeStringCommon[code]; ok {
return s
}
return strconv.Itoa(code)
}
// from pkg io
type stringWriter interface {
WriteString(s string) (n int, err error)
}
// A gate lets two goroutines coordinate their activities.
type gate chan struct{}
func (g gate) Done() { g <- struct{}{} }
func (g gate) Wait() { <-g }
// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
type closeWaiter chan struct{}
// Init makes a closeWaiter usable.
// It exists because so a closeWaiter value can be placed inside a
// larger struct and have the Mutex and Cond's memory in the same
// allocation.
func (cw *closeWaiter) Init() {
*cw = make(chan struct{})
}
// Close marks the closeWaiter as closed and unblocks any waiters.
func (cw closeWaiter) Close() {
close(cw)
}
// Wait waits for the closeWaiter to become closed.
func (cw closeWaiter) Wait() {
<-cw
}
// bufferedWriter is a buffered writer that writes to w.
// Its buffered writer is lazily allocated as needed, to minimize
// idle memory usage with many connections.
type bufferedWriter struct {
w io.Writer // immutable
bw *bufio.Writer // non-nil when data is buffered
}
func newBufferedWriter(w io.Writer) *bufferedWriter {
return &bufferedWriter{w: w}
}
var bufWriterPool = sync.Pool{
New: func() interface{} {
// TODO: pick something better? this is a bit under
// (3 x typical 1500 byte MTU) at least.
return bufio.NewWriterSize(nil, 4<<10)
},
}
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
if w.bw == nil {
bw := bufWriterPool.Get().(*bufio.Writer)
bw.Reset(w.w)
w.bw = bw
}
return w.bw.Write(p)
}
func (w *bufferedWriter) Flush() error {
bw := w.bw
if bw == nil {
return nil
}
err := bw.Flush()
bw.Reset(nil)
bufWriterPool.Put(bw)
w.bw = nil
return err
}
| Godeps/_workspace/src/github.com/bradfitz/http2/http2.go | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.0025399569422006607,
0.00026924710255116224,
0.00016336732369381934,
0.00017075339565053582,
0.00046379564446397126
] |
{
"id": 2,
"code_window": [
"\t\tif len(dockerContainer.Names) == 0 {\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\t// TODO(proppy): build the docker container name and do a map lookup instead?\n",
"\t\tdockerName, hash, err := ParseDockerName(dockerContainer.Names[0])\n",
"\t\tif err != nil {\n",
"\t\t\tcontinue\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 222
} | #!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Installs etcd into /usr/local/bin
set -o errexit
set -o nounset
set -o pipefail
ETCD_VERSION=${ETCD_VERSION:-v2.0.12}
full_name=etcd-${ETCD_VERSION}-linux-amd64
archive_url=https://github.com/coreos/etcd/releases/download/${ETCD_VERSION}/${full_name}.tar.gz
download_dir=/tmp/etcd-${ETCD_VERSION}
mkdir ${download_dir}
function cleanup {
rm -rf ${download_dir}
}
trap cleanup EXIT
cd ${download_dir}
echo "Downloading etcd (${archive_url})..."
curl -s -L ${archive_url} | tar xvz
echo "Installing etcd (/usr/local/bin/etcd)..."
mv ./${full_name}/etcd* /usr/local/bin/
| cluster/mesos/docker/test/bin/install-etcd.sh | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.00017661423771642148,
0.00017427853890694678,
0.0001718264538794756,
0.00017472867330070585,
0.000001645703150643385
] |
{
"id": 3,
"code_window": [
"}\n",
"\n",
"// GetKubeletDockerContainers lists all container or just the running ones.\n",
"// Returns a map of docker containers that we manage, keyed by container ID.\n",
"// TODO: Move this function with dockerCache to DockerManager.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"// Returns a list of docker containers that we manage, keyed by container ID.\n"
],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 350
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"fmt"
"math/rand"
"net/http"
"path"
"strconv"
"strings"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/parsers"
docker "github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/credentialprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/leaky"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
)
const (
PodInfraContainerName = leaky.PodInfraContainerName
DockerPrefix = "docker://"
PodInfraContainerImage = "beta.gcr.io/google_containers/pause:2.0"
LogSuffix = "log"
)
const (
// Taken from lmctfy https://github.com/google/lmctfy/blob/master/lmctfy/controllers/cpu_controller.cc
minShares = 2
sharesPerCPU = 1024
milliCPUToCPU = 1000
// 100000 is equivalent to 100ms
quotaPeriod = 100000
)
// DockerInterface is an abstract interface for testability. It abstracts the interface of docker.Client.
type DockerInterface interface {
ListContainers(options docker.ListContainersOptions) ([]docker.APIContainers, error)
InspectContainer(id string) (*docker.Container, error)
CreateContainer(docker.CreateContainerOptions) (*docker.Container, error)
StartContainer(id string, hostConfig *docker.HostConfig) error
StopContainer(id string, timeout uint) error
RemoveContainer(opts docker.RemoveContainerOptions) error
InspectImage(image string) (*docker.Image, error)
ListImages(opts docker.ListImagesOptions) ([]docker.APIImages, error)
PullImage(opts docker.PullImageOptions, auth docker.AuthConfiguration) error
RemoveImage(image string) error
Logs(opts docker.LogsOptions) error
Version() (*docker.Env, error)
Info() (*docker.Env, error)
CreateExec(docker.CreateExecOptions) (*docker.Exec, error)
StartExec(string, docker.StartExecOptions) error
InspectExec(id string) (*docker.ExecInspect, error)
AttachToContainer(opts docker.AttachToContainerOptions) error
}
// KubeletContainerName encapsulates a pod name and a Kubernetes container name.
type KubeletContainerName struct {
PodFullName string
PodUID types.UID
ContainerName string
}
// DockerPuller is an abstract interface for testability. It abstracts image pull operations.
type DockerPuller interface {
Pull(image string, secrets []api.Secret) error
IsImagePresent(image string) (bool, error)
}
// dockerPuller is the default implementation of DockerPuller.
type dockerPuller struct {
client DockerInterface
keyring credentialprovider.DockerKeyring
}
type throttledDockerPuller struct {
puller dockerPuller
limiter util.RateLimiter
}
// newDockerPuller creates a new instance of the default implementation of DockerPuller.
func newDockerPuller(client DockerInterface, qps float32, burst int) DockerPuller {
dp := dockerPuller{
client: client,
keyring: credentialprovider.NewDockerKeyring(),
}
if qps == 0.0 {
return dp
}
return &throttledDockerPuller{
puller: dp,
limiter: util.NewTokenBucketRateLimiter(qps, burst),
}
}
func parseImageName(image string) (string, string) {
return parsers.ParseRepositoryTag(image)
}
func filterHTTPError(err error, image string) error {
// docker/docker/pull/11314 prints detailed error info for docker pull.
// When it hits 502, it returns a verbose html output including an inline svg,
// which makes the output of kubectl get pods much harder to parse.
// Here converts such verbose output to a concise one.
jerr, ok := err.(*jsonmessage.JSONError)
if ok && (jerr.Code == http.StatusBadGateway ||
jerr.Code == http.StatusServiceUnavailable ||
jerr.Code == http.StatusGatewayTimeout) {
glog.V(2).Infof("Pulling image %q failed: %v", image, err)
return kubecontainer.RegistryUnavailable
} else {
return err
}
}
func (p dockerPuller) Pull(image string, secrets []api.Secret) error {
repoToPull, tag := parseImageName(image)
// If no tag was specified, use the default "latest".
if len(tag) == 0 {
tag = "latest"
}
opts := docker.PullImageOptions{
Repository: repoToPull,
Tag: tag,
}
keyring, err := credentialprovider.MakeDockerKeyring(secrets, p.keyring)
if err != nil {
return err
}
creds, haveCredentials := keyring.Lookup(repoToPull)
if !haveCredentials {
glog.V(1).Infof("Pulling image %s without credentials", image)
err := p.client.PullImage(opts, docker.AuthConfiguration{})
if err == nil {
return nil
}
// Image spec: [<registry>/]<repository>/<image>[:<version] so we count '/'
explicitRegistry := (strings.Count(image, "/") == 2)
// Hack, look for a private registry, and decorate the error with the lack of
// credentials. This is heuristic, and really probably could be done better
// by talking to the registry API directly from the kubelet here.
if explicitRegistry {
return fmt.Errorf("image pull failed for %s, this may be because there are no credentials on this request. details: (%v)", image, err)
}
return filterHTTPError(err, image)
}
var pullErrs []error
for _, currentCreds := range creds {
err := p.client.PullImage(opts, currentCreds)
// If there was no error, return success
if err == nil {
return nil
}
pullErrs = append(pullErrs, filterHTTPError(err, image))
}
return utilerrors.NewAggregate(pullErrs)
}
func (p throttledDockerPuller) Pull(image string, secrets []api.Secret) error {
if p.limiter.CanAccept() {
return p.puller.Pull(image, secrets)
}
return fmt.Errorf("pull QPS exceeded.")
}
func (p dockerPuller) IsImagePresent(image string) (bool, error) {
_, err := p.client.InspectImage(image)
if err == nil {
return true, nil
}
if err == docker.ErrNoSuchImage {
return false, nil
}
return false, err
}
func (p throttledDockerPuller) IsImagePresent(name string) (bool, error) {
return p.puller.IsImagePresent(name)
}
// TODO (random-liu) Almost never used, should we remove this?
// DockerContainers is a map of containers
type DockerContainers map[kubetypes.DockerID]*docker.APIContainers
func (c DockerContainers) FindPodContainer(podFullName string, uid types.UID, containerName string) (*docker.APIContainers, bool, uint64) {
for _, dockerContainer := range c {
if len(dockerContainer.Names) == 0 {
continue
}
// TODO(proppy): build the docker container name and do a map lookup instead?
dockerName, hash, err := ParseDockerName(dockerContainer.Names[0])
if err != nil {
continue
}
if dockerName.PodFullName == podFullName &&
(uid == "" || dockerName.PodUID == uid) &&
dockerName.ContainerName == containerName {
return dockerContainer, true, hash
}
}
return nil, false, 0
}
const containerNamePrefix = "k8s"
// Creates a name which can be reversed to identify both full pod name and container name.
func BuildDockerName(dockerName KubeletContainerName, container *api.Container) (string, string) {
containerName := dockerName.ContainerName + "." + strconv.FormatUint(kubecontainer.HashContainer(container), 16)
stableName := fmt.Sprintf("%s_%s_%s_%s",
containerNamePrefix,
containerName,
dockerName.PodFullName,
dockerName.PodUID)
return stableName, fmt.Sprintf("%s_%08x", stableName, rand.Uint32())
}
// Unpacks a container name, returning the pod full name and container name we would have used to
// construct the docker name. If we are unable to parse the name, an error is returned.
func ParseDockerName(name string) (dockerName *KubeletContainerName, hash uint64, err error) {
// For some reason docker appears to be appending '/' to names.
// If it's there, strip it.
name = strings.TrimPrefix(name, "/")
parts := strings.Split(name, "_")
if len(parts) == 0 || parts[0] != containerNamePrefix {
err = fmt.Errorf("failed to parse Docker container name %q into parts", name)
return nil, 0, err
}
if len(parts) < 6 {
// We have at least 5 fields. We may have more in the future.
// Anything with less fields than this is not something we can
// manage.
glog.Warningf("found a container with the %q prefix, but too few fields (%d): %q", containerNamePrefix, len(parts), name)
err = fmt.Errorf("Docker container name %q has less parts than expected %v", name, parts)
return nil, 0, err
}
nameParts := strings.Split(parts[1], ".")
containerName := nameParts[0]
if len(nameParts) > 1 {
hash, err = strconv.ParseUint(nameParts[1], 16, 32)
if err != nil {
glog.Warningf("invalid container hash %q in container %q", nameParts[1], name)
}
}
podFullName := parts[2] + "_" + parts[3]
podUID := types.UID(parts[4])
return &KubeletContainerName{podFullName, podUID, containerName}, hash, nil
}
func LogSymlink(containerLogsDir, podFullName, containerName, dockerId string) string {
return path.Join(containerLogsDir, fmt.Sprintf("%s_%s-%s.%s", podFullName, containerName, dockerId, LogSuffix))
}
// Get a *docker.Client, either using the endpoint passed in, or using
// DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT path per their spec
func getDockerClient(dockerEndpoint string) (*docker.Client, error) {
if len(dockerEndpoint) > 0 {
glog.Infof("Connecting to docker on %s", dockerEndpoint)
return docker.NewClient(dockerEndpoint)
}
return docker.NewClientFromEnv()
}
func ConnectToDockerOrDie(dockerEndpoint string) DockerInterface {
if dockerEndpoint == "fake://" {
return &FakeDockerClient{
VersionInfo: docker.Env{"ApiVersion=1.18"},
}
}
client, err := getDockerClient(dockerEndpoint)
if err != nil {
glog.Fatalf("Couldn't connect to docker: %v", err)
}
return client
}
// milliCPUToQuota converts milliCPU to CFS quota and period values
func milliCPUToQuota(milliCPU int64) (quota int64, period int64) {
// CFS quota is measured in two values:
// - cfs_period_us=100ms (the amount of time to measure usage across)
// - cfs_quota=20ms (the amount of cpu time allowed to be used across a period)
// so in the above example, you are limited to 20% of a single CPU
// for multi-cpu environments, you just scale equivalent amounts
if milliCPU == 0 {
// take the default behavior from docker
return
}
// we set the period to 100ms by default
period = quotaPeriod
// we then convert your milliCPU to a value normalized over a period
quota = (milliCPU * quotaPeriod) / milliCPUToCPU
return
}
func milliCPUToShares(milliCPU int64) int64 {
if milliCPU == 0 {
// Docker converts zero milliCPU to unset, which maps to kernel default
// for unset: 1024. Return 2 here to really match kernel default for
// zero milliCPU.
return minShares
}
// Conceptually (milliCPU / milliCPUToCPU) * sharesPerCPU, but factored to improve rounding.
shares := (milliCPU * sharesPerCPU) / milliCPUToCPU
if shares < minShares {
return minShares
}
return shares
}
// GetKubeletDockerContainers lists all container or just the running ones.
// Returns a map of docker containers that we manage, keyed by container ID.
// TODO: Move this function with dockerCache to DockerManager.
func GetKubeletDockerContainers(client DockerInterface, allContainers bool) (DockerContainers, error) {
result := make(DockerContainers)
containers, err := client.ListContainers(docker.ListContainersOptions{All: allContainers})
if err != nil {
return nil, err
}
for i := range containers {
container := &containers[i]
if len(container.Names) == 0 {
continue
}
// Skip containers that we didn't create to allow users to manually
// spin up their own containers if they want.
// TODO(dchen1107): Remove the old separator "--" by end of Oct
if !strings.HasPrefix(container.Names[0], "/"+containerNamePrefix+"_") &&
!strings.HasPrefix(container.Names[0], "/"+containerNamePrefix+"--") {
glog.V(3).Infof("Docker Container: %s is not managed by kubelet.", container.Names[0])
continue
}
result[kubetypes.DockerID(container.ID)] = container
}
return result, nil
}
| pkg/kubelet/dockertools/docker.go | 1 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.21617262065410614,
0.0074493782594799995,
0.000166884929058142,
0.00037608601269312203,
0.034640613943338394
] |
{
"id": 3,
"code_window": [
"}\n",
"\n",
"// GetKubeletDockerContainers lists all container or just the running ones.\n",
"// Returns a map of docker containers that we manage, keyed by container ID.\n",
"// TODO: Move this function with dockerCache to DockerManager.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"// Returns a list of docker containers that we manage, keyed by container ID.\n"
],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 350
} | package networks
import (
"github.com/mitchellh/mapstructure"
"github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/pagination"
)
type commonResult struct {
gophercloud.Result
}
// Extract is a function that accepts a result and extracts a network resource.
func (r commonResult) Extract() (*Network, error) {
if r.Err != nil {
return nil, r.Err
}
var res struct {
Network *Network `json:"network"`
}
err := mapstructure.Decode(r.Body, &res)
return res.Network, err
}
// CreateResult represents the result of a create operation.
type CreateResult struct {
commonResult
}
// GetResult represents the result of a get operation.
type GetResult struct {
commonResult
}
// UpdateResult represents the result of an update operation.
type UpdateResult struct {
commonResult
}
// DeleteResult represents the result of a delete operation.
type DeleteResult struct {
gophercloud.ErrResult
}
// Network represents, well, a network.
type Network struct {
// UUID for the network
ID string `mapstructure:"id" json:"id"`
// Human-readable name for the network. Might not be unique.
Name string `mapstructure:"name" json:"name"`
// The administrative state of network. If false (down), the network does not forward packets.
AdminStateUp bool `mapstructure:"admin_state_up" json:"admin_state_up"`
// Indicates whether network is currently operational. Possible values include
// `ACTIVE', `DOWN', `BUILD', or `ERROR'. Plug-ins might define additional values.
Status string `mapstructure:"status" json:"status"`
// Subnets associated with this network.
Subnets []string `mapstructure:"subnets" json:"subnets"`
// Owner of network. Only admin users can specify a tenant_id other than its own.
TenantID string `mapstructure:"tenant_id" json:"tenant_id"`
// Specifies whether the network resource can be accessed by any tenant or not.
Shared bool `mapstructure:"shared" json:"shared"`
}
// NetworkPage is the page returned by a pager when traversing over a
// collection of networks.
type NetworkPage struct {
pagination.LinkedPageBase
}
// NextPageURL is invoked when a paginated collection of networks has reached
// the end of a page and the pager seeks to traverse over a new one. In order
// to do this, it needs to construct the next page's URL.
func (p NetworkPage) NextPageURL() (string, error) {
type resp struct {
Links []gophercloud.Link `mapstructure:"networks_links"`
}
var r resp
err := mapstructure.Decode(p.Body, &r)
if err != nil {
return "", err
}
return gophercloud.ExtractNextURL(r.Links)
}
// IsEmpty checks whether a NetworkPage struct is empty.
func (p NetworkPage) IsEmpty() (bool, error) {
is, err := ExtractNetworks(p)
if err != nil {
return true, nil
}
return len(is) == 0, nil
}
// ExtractNetworks accepts a Page struct, specifically a NetworkPage struct,
// and extracts the elements into a slice of Network structs. In other words,
// a generic collection is mapped into a relevant slice.
func ExtractNetworks(page pagination.Page) ([]Network, error) {
var resp struct {
Networks []Network `mapstructure:"networks" json:"networks"`
}
err := mapstructure.Decode(page.(NetworkPage).Body, &resp)
return resp.Networks, err
}
| Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/results.go | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.00020127803145442158,
0.00017171418585348874,
0.00016481852799188346,
0.00016842760669533163,
0.00000966533934843028
] |
{
"id": 3,
"code_window": [
"}\n",
"\n",
"// GetKubeletDockerContainers lists all container or just the running ones.\n",
"// Returns a map of docker containers that we manage, keyed by container ID.\n",
"// TODO: Move this function with dockerCache to DockerManager.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"// Returns a list of docker containers that we manage, keyed by container ID.\n"
],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 350
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tools
import (
"github.com/coreos/go-etcd/etcd"
)
const (
EtcdErrorCodeNotFound = 100
EtcdErrorCodeTestFailed = 101
EtcdErrorCodeNodeExist = 105
EtcdErrorCodeValueRequired = 200
)
var (
EtcdErrorNotFound = &etcd.EtcdError{ErrorCode: EtcdErrorCodeNotFound}
EtcdErrorTestFailed = &etcd.EtcdError{ErrorCode: EtcdErrorCodeTestFailed}
EtcdErrorNodeExist = &etcd.EtcdError{ErrorCode: EtcdErrorCodeNodeExist}
EtcdErrorValueRequired = &etcd.EtcdError{ErrorCode: EtcdErrorCodeValueRequired}
)
// EtcdClient is an injectable interface for testing.
type EtcdClient interface {
GetCluster() []string
Get(key string, sort, recursive bool) (*etcd.Response, error)
Set(key, value string, ttl uint64) (*etcd.Response, error)
Create(key, value string, ttl uint64) (*etcd.Response, error)
CompareAndSwap(key, value string, ttl uint64, prevValue string, prevIndex uint64) (*etcd.Response, error)
Delete(key string, recursive bool) (*etcd.Response, error)
// I'd like to use directional channels here (e.g. <-chan) but this interface mimics
// the etcd client interface which doesn't, and it doesn't seem worth it to wrap the api.
Watch(prefix string, waitIndex uint64, recursive bool, receiver chan *etcd.Response, stop chan bool) (*etcd.Response, error)
}
| pkg/tools/interfaces.go | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.00023229049111250788,
0.00018341238319408149,
0.00016436621081084013,
0.00017604495224077255,
0.00002491468512744177
] |
{
"id": 3,
"code_window": [
"}\n",
"\n",
"// GetKubeletDockerContainers lists all container or just the running ones.\n",
"// Returns a map of docker containers that we manage, keyed by container ID.\n",
"// TODO: Move this function with dockerCache to DockerManager.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"// Returns a list of docker containers that we manage, keyed by container ID.\n"
],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 350
} | /*
Package sessions provides information and interaction with the Session
Persistence feature of the Rackspace Cloud Load Balancer service.
Session persistence is a feature of the load balancing service that forces
multiple requests from clients (of the same protocol) to be directed to the
same node. This is common with many web applications that do not inherently
share application state between back-end servers.
There are two modes to choose from: HTTP_COOKIE and SOURCE_IP. You can only set
one of the session persistence modes on a load balancer, and it can only
support one protocol. If you set HTTP_COOKIE mode for an HTTP load balancer, it
supports session persistence for HTTP requests only. Likewise, if you set
SOURCE_IP mode for an HTTPS load balancer, it supports session persistence for
only HTTPS requests.
To support session persistence for both HTTP and HTTPS requests concurrently,
choose one of the following options:
- Use two load balancers, one configured for session persistence for HTTP
requests and the other configured for session persistence for HTTPS requests.
That way, the load balancers support session persistence for both HTTP and
HTTPS requests concurrently, with each load balancer supporting one of the
protocols.
- Use one load balancer, configure it for session persistence for HTTP requests,
and then enable SSL termination for that load balancer. The load balancer
supports session persistence for both HTTP and HTTPS requests concurrently.
*/
package sessions
| Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.00017230765661224723,
0.00016577857604715973,
0.0001625650329515338,
0.00016412082186434418,
0.0000038266630326688755
] |
{
"id": 4,
"code_window": [
"// TODO: Move this function with dockerCache to DockerManager.\n",
"func GetKubeletDockerContainers(client DockerInterface, allContainers bool) (DockerContainers, error) {\n",
"\tresult := make(DockerContainers)\n",
"\tcontainers, err := client.ListContainers(docker.ListContainersOptions{All: allContainers})\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func GetKubeletDockerContainers(client DockerInterface, allContainers bool) ([]*docker.APIContainers, error) {\n",
"\tresult := []*docker.APIContainers{}\n"
],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 352
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"fmt"
"math/rand"
"net/http"
"path"
"strconv"
"strings"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/parsers"
docker "github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/credentialprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/leaky"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
)
const (
PodInfraContainerName = leaky.PodInfraContainerName
DockerPrefix = "docker://"
PodInfraContainerImage = "beta.gcr.io/google_containers/pause:2.0"
LogSuffix = "log"
)
const (
// Taken from lmctfy https://github.com/google/lmctfy/blob/master/lmctfy/controllers/cpu_controller.cc
minShares = 2
sharesPerCPU = 1024
milliCPUToCPU = 1000
// 100000 is equivalent to 100ms
quotaPeriod = 100000
)
// DockerInterface is an abstract interface for testability. It abstracts the interface of docker.Client.
type DockerInterface interface {
ListContainers(options docker.ListContainersOptions) ([]docker.APIContainers, error)
InspectContainer(id string) (*docker.Container, error)
CreateContainer(docker.CreateContainerOptions) (*docker.Container, error)
StartContainer(id string, hostConfig *docker.HostConfig) error
StopContainer(id string, timeout uint) error
RemoveContainer(opts docker.RemoveContainerOptions) error
InspectImage(image string) (*docker.Image, error)
ListImages(opts docker.ListImagesOptions) ([]docker.APIImages, error)
PullImage(opts docker.PullImageOptions, auth docker.AuthConfiguration) error
RemoveImage(image string) error
Logs(opts docker.LogsOptions) error
Version() (*docker.Env, error)
Info() (*docker.Env, error)
CreateExec(docker.CreateExecOptions) (*docker.Exec, error)
StartExec(string, docker.StartExecOptions) error
InspectExec(id string) (*docker.ExecInspect, error)
AttachToContainer(opts docker.AttachToContainerOptions) error
}
// KubeletContainerName encapsulates a pod name and a Kubernetes container name.
type KubeletContainerName struct {
PodFullName string
PodUID types.UID
ContainerName string
}
// DockerPuller is an abstract interface for testability. It abstracts image pull operations.
type DockerPuller interface {
Pull(image string, secrets []api.Secret) error
IsImagePresent(image string) (bool, error)
}
// dockerPuller is the default implementation of DockerPuller.
type dockerPuller struct {
client DockerInterface
keyring credentialprovider.DockerKeyring
}
type throttledDockerPuller struct {
puller dockerPuller
limiter util.RateLimiter
}
// newDockerPuller creates a new instance of the default implementation of DockerPuller.
func newDockerPuller(client DockerInterface, qps float32, burst int) DockerPuller {
dp := dockerPuller{
client: client,
keyring: credentialprovider.NewDockerKeyring(),
}
if qps == 0.0 {
return dp
}
return &throttledDockerPuller{
puller: dp,
limiter: util.NewTokenBucketRateLimiter(qps, burst),
}
}
func parseImageName(image string) (string, string) {
return parsers.ParseRepositoryTag(image)
}
func filterHTTPError(err error, image string) error {
// docker/docker/pull/11314 prints detailed error info for docker pull.
// When it hits 502, it returns a verbose html output including an inline svg,
// which makes the output of kubectl get pods much harder to parse.
// Here converts such verbose output to a concise one.
jerr, ok := err.(*jsonmessage.JSONError)
if ok && (jerr.Code == http.StatusBadGateway ||
jerr.Code == http.StatusServiceUnavailable ||
jerr.Code == http.StatusGatewayTimeout) {
glog.V(2).Infof("Pulling image %q failed: %v", image, err)
return kubecontainer.RegistryUnavailable
} else {
return err
}
}
func (p dockerPuller) Pull(image string, secrets []api.Secret) error {
repoToPull, tag := parseImageName(image)
// If no tag was specified, use the default "latest".
if len(tag) == 0 {
tag = "latest"
}
opts := docker.PullImageOptions{
Repository: repoToPull,
Tag: tag,
}
keyring, err := credentialprovider.MakeDockerKeyring(secrets, p.keyring)
if err != nil {
return err
}
creds, haveCredentials := keyring.Lookup(repoToPull)
if !haveCredentials {
glog.V(1).Infof("Pulling image %s without credentials", image)
err := p.client.PullImage(opts, docker.AuthConfiguration{})
if err == nil {
return nil
}
// Image spec: [<registry>/]<repository>/<image>[:<version] so we count '/'
explicitRegistry := (strings.Count(image, "/") == 2)
// Hack, look for a private registry, and decorate the error with the lack of
// credentials. This is heuristic, and really probably could be done better
// by talking to the registry API directly from the kubelet here.
if explicitRegistry {
return fmt.Errorf("image pull failed for %s, this may be because there are no credentials on this request. details: (%v)", image, err)
}
return filterHTTPError(err, image)
}
var pullErrs []error
for _, currentCreds := range creds {
err := p.client.PullImage(opts, currentCreds)
// If there was no error, return success
if err == nil {
return nil
}
pullErrs = append(pullErrs, filterHTTPError(err, image))
}
return utilerrors.NewAggregate(pullErrs)
}
func (p throttledDockerPuller) Pull(image string, secrets []api.Secret) error {
if p.limiter.CanAccept() {
return p.puller.Pull(image, secrets)
}
return fmt.Errorf("pull QPS exceeded.")
}
func (p dockerPuller) IsImagePresent(image string) (bool, error) {
_, err := p.client.InspectImage(image)
if err == nil {
return true, nil
}
if err == docker.ErrNoSuchImage {
return false, nil
}
return false, err
}
func (p throttledDockerPuller) IsImagePresent(name string) (bool, error) {
return p.puller.IsImagePresent(name)
}
// TODO (random-liu) Almost never used, should we remove this?
// DockerContainers is a map of containers
type DockerContainers map[kubetypes.DockerID]*docker.APIContainers
func (c DockerContainers) FindPodContainer(podFullName string, uid types.UID, containerName string) (*docker.APIContainers, bool, uint64) {
for _, dockerContainer := range c {
if len(dockerContainer.Names) == 0 {
continue
}
// TODO(proppy): build the docker container name and do a map lookup instead?
dockerName, hash, err := ParseDockerName(dockerContainer.Names[0])
if err != nil {
continue
}
if dockerName.PodFullName == podFullName &&
(uid == "" || dockerName.PodUID == uid) &&
dockerName.ContainerName == containerName {
return dockerContainer, true, hash
}
}
return nil, false, 0
}
const containerNamePrefix = "k8s"
// Creates a name which can be reversed to identify both full pod name and container name.
func BuildDockerName(dockerName KubeletContainerName, container *api.Container) (string, string) {
containerName := dockerName.ContainerName + "." + strconv.FormatUint(kubecontainer.HashContainer(container), 16)
stableName := fmt.Sprintf("%s_%s_%s_%s",
containerNamePrefix,
containerName,
dockerName.PodFullName,
dockerName.PodUID)
return stableName, fmt.Sprintf("%s_%08x", stableName, rand.Uint32())
}
// Unpacks a container name, returning the pod full name and container name we would have used to
// construct the docker name. If we are unable to parse the name, an error is returned.
func ParseDockerName(name string) (dockerName *KubeletContainerName, hash uint64, err error) {
// For some reason docker appears to be appending '/' to names.
// If it's there, strip it.
name = strings.TrimPrefix(name, "/")
parts := strings.Split(name, "_")
if len(parts) == 0 || parts[0] != containerNamePrefix {
err = fmt.Errorf("failed to parse Docker container name %q into parts", name)
return nil, 0, err
}
if len(parts) < 6 {
// We have at least 5 fields. We may have more in the future.
// Anything with less fields than this is not something we can
// manage.
glog.Warningf("found a container with the %q prefix, but too few fields (%d): %q", containerNamePrefix, len(parts), name)
err = fmt.Errorf("Docker container name %q has less parts than expected %v", name, parts)
return nil, 0, err
}
nameParts := strings.Split(parts[1], ".")
containerName := nameParts[0]
if len(nameParts) > 1 {
hash, err = strconv.ParseUint(nameParts[1], 16, 32)
if err != nil {
glog.Warningf("invalid container hash %q in container %q", nameParts[1], name)
}
}
podFullName := parts[2] + "_" + parts[3]
podUID := types.UID(parts[4])
return &KubeletContainerName{podFullName, podUID, containerName}, hash, nil
}
func LogSymlink(containerLogsDir, podFullName, containerName, dockerId string) string {
return path.Join(containerLogsDir, fmt.Sprintf("%s_%s-%s.%s", podFullName, containerName, dockerId, LogSuffix))
}
// Get a *docker.Client, either using the endpoint passed in, or using
// DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT path per their spec
func getDockerClient(dockerEndpoint string) (*docker.Client, error) {
if len(dockerEndpoint) > 0 {
glog.Infof("Connecting to docker on %s", dockerEndpoint)
return docker.NewClient(dockerEndpoint)
}
return docker.NewClientFromEnv()
}
func ConnectToDockerOrDie(dockerEndpoint string) DockerInterface {
if dockerEndpoint == "fake://" {
return &FakeDockerClient{
VersionInfo: docker.Env{"ApiVersion=1.18"},
}
}
client, err := getDockerClient(dockerEndpoint)
if err != nil {
glog.Fatalf("Couldn't connect to docker: %v", err)
}
return client
}
// milliCPUToQuota converts milliCPU to CFS quota and period values
func milliCPUToQuota(milliCPU int64) (quota int64, period int64) {
// CFS quota is measured in two values:
// - cfs_period_us=100ms (the amount of time to measure usage across)
// - cfs_quota=20ms (the amount of cpu time allowed to be used across a period)
// so in the above example, you are limited to 20% of a single CPU
// for multi-cpu environments, you just scale equivalent amounts
if milliCPU == 0 {
// take the default behavior from docker
return
}
// we set the period to 100ms by default
period = quotaPeriod
// we then convert your milliCPU to a value normalized over a period
quota = (milliCPU * quotaPeriod) / milliCPUToCPU
return
}
func milliCPUToShares(milliCPU int64) int64 {
if milliCPU == 0 {
// Docker converts zero milliCPU to unset, which maps to kernel default
// for unset: 1024. Return 2 here to really match kernel default for
// zero milliCPU.
return minShares
}
// Conceptually (milliCPU / milliCPUToCPU) * sharesPerCPU, but factored to improve rounding.
shares := (milliCPU * sharesPerCPU) / milliCPUToCPU
if shares < minShares {
return minShares
}
return shares
}
// GetKubeletDockerContainers lists all container or just the running ones.
// Returns a map of docker containers that we manage, keyed by container ID.
// TODO: Move this function with dockerCache to DockerManager.
func GetKubeletDockerContainers(client DockerInterface, allContainers bool) (DockerContainers, error) {
result := make(DockerContainers)
containers, err := client.ListContainers(docker.ListContainersOptions{All: allContainers})
if err != nil {
return nil, err
}
for i := range containers {
container := &containers[i]
if len(container.Names) == 0 {
continue
}
// Skip containers that we didn't create to allow users to manually
// spin up their own containers if they want.
// TODO(dchen1107): Remove the old separator "--" by end of Oct
if !strings.HasPrefix(container.Names[0], "/"+containerNamePrefix+"_") &&
!strings.HasPrefix(container.Names[0], "/"+containerNamePrefix+"--") {
glog.V(3).Infof("Docker Container: %s is not managed by kubelet.", container.Names[0])
continue
}
result[kubetypes.DockerID(container.ID)] = container
}
return result, nil
}
| pkg/kubelet/dockertools/docker.go | 1 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.9980930685997009,
0.12704400718212128,
0.0001716689148452133,
0.002860453212633729,
0.3005335032939911
] |
{
"id": 4,
"code_window": [
"// TODO: Move this function with dockerCache to DockerManager.\n",
"func GetKubeletDockerContainers(client DockerInterface, allContainers bool) (DockerContainers, error) {\n",
"\tresult := make(DockerContainers)\n",
"\tcontainers, err := client.ListContainers(docker.ListContainersOptions{All: allContainers})\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func GetKubeletDockerContainers(client DockerInterface, allContainers bool) ([]*docker.APIContainers, error) {\n",
"\tresult := []*docker.APIContainers{}\n"
],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 352
} | *.prof
*.test
*.swp
/bin/
| Godeps/_workspace/src/github.com/boltdb/bolt/.gitignore | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.0001688793272478506,
0.0001688793272478506,
0.0001688793272478506,
0.0001688793272478506,
0
] |
{
"id": 4,
"code_window": [
"// TODO: Move this function with dockerCache to DockerManager.\n",
"func GetKubeletDockerContainers(client DockerInterface, allContainers bool) (DockerContainers, error) {\n",
"\tresult := make(DockerContainers)\n",
"\tcontainers, err := client.ListContainers(docker.ListContainersOptions{All: allContainers})\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func GetKubeletDockerContainers(client DockerInterface, allContainers bool) ([]*docker.APIContainers, error) {\n",
"\tresult := []*docker.APIContainers{}\n"
],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 352
} | // Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Handler for /validate content.
// Validates cadvisor dependencies - kernel, os, docker setup.
package docker
import (
"sync"
dclient "github.com/fsouza/go-dockerclient"
)
var (
dockerClient *dclient.Client
dockerClientErr error
once sync.Once
)
func Client() (*dclient.Client, error) {
once.Do(func() {
dockerClient, dockerClientErr = dclient.NewClient(*ArgDockerEndpoint)
})
return dockerClient, dockerClientErr
}
| Godeps/_workspace/src/github.com/google/cadvisor/container/docker/client.go | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.026343615725636482,
0.006790783256292343,
0.00017699440650176257,
0.0003212612646166235,
0.011289355345070362
] |
{
"id": 4,
"code_window": [
"// TODO: Move this function with dockerCache to DockerManager.\n",
"func GetKubeletDockerContainers(client DockerInterface, allContainers bool) (DockerContainers, error) {\n",
"\tresult := make(DockerContainers)\n",
"\tcontainers, err := client.ListContainers(docker.ListContainersOptions{All: allContainers})\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func GetKubeletDockerContainers(client DockerInterface, allContainers bool) ([]*docker.APIContainers, error) {\n",
"\tresult := []*docker.APIContainers{}\n"
],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 352
} | package accounts
import "github.com/rackspace/gophercloud"
// GetOptsBuilder allows extensions to add additional headers to the Get
// request.
type GetOptsBuilder interface {
ToAccountGetMap() (map[string]string, error)
}
// GetOpts is a structure that contains parameters for getting an account's
// metadata.
type GetOpts struct {
Newest bool `h:"X-Newest"`
}
// ToAccountGetMap formats a GetOpts into a map[string]string of headers.
func (opts GetOpts) ToAccountGetMap() (map[string]string, error) {
return gophercloud.BuildHeaders(opts)
}
// Get is a function that retrieves an account's metadata. To extract just the
// custom metadata, call the ExtractMetadata method on the GetResult. To extract
// all the headers that are returned (including the metadata), call the
// ExtractHeader method on the GetResult.
func Get(c *gophercloud.ServiceClient, opts GetOptsBuilder) GetResult {
var res GetResult
h := c.AuthenticatedHeaders()
if opts != nil {
headers, err := opts.ToAccountGetMap()
if err != nil {
res.Err = err
return res
}
for k, v := range headers {
h[k] = v
}
}
resp, err := c.Request("HEAD", getURL(c), gophercloud.RequestOpts{
MoreHeaders: h,
OkCodes: []int{204},
})
if resp != nil {
res.Header = resp.Header
}
res.Err = err
return res
}
// UpdateOptsBuilder allows extensions to add additional headers to the Update
// request.
type UpdateOptsBuilder interface {
ToAccountUpdateMap() (map[string]string, error)
}
// UpdateOpts is a structure that contains parameters for updating, creating, or
// deleting an account's metadata.
type UpdateOpts struct {
Metadata map[string]string
ContentType string `h:"Content-Type"`
DetectContentType bool `h:"X-Detect-Content-Type"`
TempURLKey string `h:"X-Account-Meta-Temp-URL-Key"`
TempURLKey2 string `h:"X-Account-Meta-Temp-URL-Key-2"`
}
// ToAccountUpdateMap formats an UpdateOpts into a map[string]string of headers.
func (opts UpdateOpts) ToAccountUpdateMap() (map[string]string, error) {
headers, err := gophercloud.BuildHeaders(opts)
if err != nil {
return nil, err
}
for k, v := range opts.Metadata {
headers["X-Account-Meta-"+k] = v
}
return headers, err
}
// Update is a function that creates, updates, or deletes an account's metadata.
// To extract the headers returned, call the Extract method on the UpdateResult.
func Update(c *gophercloud.ServiceClient, opts UpdateOptsBuilder) UpdateResult {
var res UpdateResult
h := make(map[string]string)
if opts != nil {
headers, err := opts.ToAccountUpdateMap()
if err != nil {
res.Err = err
return res
}
for k, v := range headers {
h[k] = v
}
}
resp, err := c.Request("POST", updateURL(c), gophercloud.RequestOpts{
MoreHeaders: h,
OkCodes: []int{201, 202, 204},
})
if resp != nil {
res.Header = resp.Header
}
res.Err = err
return res
}
| Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests.go | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.00028723065042868257,
0.00022331083891913295,
0.00016724383749533445,
0.00022992790036369115,
0.000038476668123621494
] |
{
"id": 5,
"code_window": [
"\t\tif !strings.HasPrefix(container.Names[0], \"/\"+containerNamePrefix+\"_\") &&\n",
"\t\t\t!strings.HasPrefix(container.Names[0], \"/\"+containerNamePrefix+\"--\") {\n",
"\t\t\tglog.V(3).Infof(\"Docker Container: %s is not managed by kubelet.\", container.Names[0])\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\tresult[kubetypes.DockerID(container.ID)] = container\n",
"\t}\n",
"\treturn result, nil\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tresult = append(result, container)\n"
],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 371
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"fmt"
"math/rand"
"net/http"
"path"
"strconv"
"strings"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/parsers"
docker "github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/credentialprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/leaky"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
)
const (
PodInfraContainerName = leaky.PodInfraContainerName
DockerPrefix = "docker://"
PodInfraContainerImage = "beta.gcr.io/google_containers/pause:2.0"
LogSuffix = "log"
)
const (
// Taken from lmctfy https://github.com/google/lmctfy/blob/master/lmctfy/controllers/cpu_controller.cc
minShares = 2
sharesPerCPU = 1024
milliCPUToCPU = 1000
// 100000 is equivalent to 100ms
quotaPeriod = 100000
)
// DockerInterface is an abstract interface for testability. It abstracts the interface of docker.Client.
type DockerInterface interface {
ListContainers(options docker.ListContainersOptions) ([]docker.APIContainers, error)
InspectContainer(id string) (*docker.Container, error)
CreateContainer(docker.CreateContainerOptions) (*docker.Container, error)
StartContainer(id string, hostConfig *docker.HostConfig) error
StopContainer(id string, timeout uint) error
RemoveContainer(opts docker.RemoveContainerOptions) error
InspectImage(image string) (*docker.Image, error)
ListImages(opts docker.ListImagesOptions) ([]docker.APIImages, error)
PullImage(opts docker.PullImageOptions, auth docker.AuthConfiguration) error
RemoveImage(image string) error
Logs(opts docker.LogsOptions) error
Version() (*docker.Env, error)
Info() (*docker.Env, error)
CreateExec(docker.CreateExecOptions) (*docker.Exec, error)
StartExec(string, docker.StartExecOptions) error
InspectExec(id string) (*docker.ExecInspect, error)
AttachToContainer(opts docker.AttachToContainerOptions) error
}
// KubeletContainerName encapsulates a pod name and a Kubernetes container name.
type KubeletContainerName struct {
PodFullName string
PodUID types.UID
ContainerName string
}
// DockerPuller is an abstract interface for testability. It abstracts image pull operations.
type DockerPuller interface {
Pull(image string, secrets []api.Secret) error
IsImagePresent(image string) (bool, error)
}
// dockerPuller is the default implementation of DockerPuller.
type dockerPuller struct {
client DockerInterface
keyring credentialprovider.DockerKeyring
}
type throttledDockerPuller struct {
puller dockerPuller
limiter util.RateLimiter
}
// newDockerPuller creates a new instance of the default implementation of DockerPuller.
func newDockerPuller(client DockerInterface, qps float32, burst int) DockerPuller {
dp := dockerPuller{
client: client,
keyring: credentialprovider.NewDockerKeyring(),
}
if qps == 0.0 {
return dp
}
return &throttledDockerPuller{
puller: dp,
limiter: util.NewTokenBucketRateLimiter(qps, burst),
}
}
func parseImageName(image string) (string, string) {
return parsers.ParseRepositoryTag(image)
}
func filterHTTPError(err error, image string) error {
// docker/docker/pull/11314 prints detailed error info for docker pull.
// When it hits 502, it returns a verbose html output including an inline svg,
// which makes the output of kubectl get pods much harder to parse.
// Here converts such verbose output to a concise one.
jerr, ok := err.(*jsonmessage.JSONError)
if ok && (jerr.Code == http.StatusBadGateway ||
jerr.Code == http.StatusServiceUnavailable ||
jerr.Code == http.StatusGatewayTimeout) {
glog.V(2).Infof("Pulling image %q failed: %v", image, err)
return kubecontainer.RegistryUnavailable
} else {
return err
}
}
func (p dockerPuller) Pull(image string, secrets []api.Secret) error {
repoToPull, tag := parseImageName(image)
// If no tag was specified, use the default "latest".
if len(tag) == 0 {
tag = "latest"
}
opts := docker.PullImageOptions{
Repository: repoToPull,
Tag: tag,
}
keyring, err := credentialprovider.MakeDockerKeyring(secrets, p.keyring)
if err != nil {
return err
}
creds, haveCredentials := keyring.Lookup(repoToPull)
if !haveCredentials {
glog.V(1).Infof("Pulling image %s without credentials", image)
err := p.client.PullImage(opts, docker.AuthConfiguration{})
if err == nil {
return nil
}
// Image spec: [<registry>/]<repository>/<image>[:<version] so we count '/'
explicitRegistry := (strings.Count(image, "/") == 2)
// Hack, look for a private registry, and decorate the error with the lack of
// credentials. This is heuristic, and really probably could be done better
// by talking to the registry API directly from the kubelet here.
if explicitRegistry {
return fmt.Errorf("image pull failed for %s, this may be because there are no credentials on this request. details: (%v)", image, err)
}
return filterHTTPError(err, image)
}
var pullErrs []error
for _, currentCreds := range creds {
err := p.client.PullImage(opts, currentCreds)
// If there was no error, return success
if err == nil {
return nil
}
pullErrs = append(pullErrs, filterHTTPError(err, image))
}
return utilerrors.NewAggregate(pullErrs)
}
func (p throttledDockerPuller) Pull(image string, secrets []api.Secret) error {
if p.limiter.CanAccept() {
return p.puller.Pull(image, secrets)
}
return fmt.Errorf("pull QPS exceeded.")
}
func (p dockerPuller) IsImagePresent(image string) (bool, error) {
_, err := p.client.InspectImage(image)
if err == nil {
return true, nil
}
if err == docker.ErrNoSuchImage {
return false, nil
}
return false, err
}
func (p throttledDockerPuller) IsImagePresent(name string) (bool, error) {
return p.puller.IsImagePresent(name)
}
// TODO (random-liu) Almost never used, should we remove this?
// DockerContainers is a map of containers
type DockerContainers map[kubetypes.DockerID]*docker.APIContainers
func (c DockerContainers) FindPodContainer(podFullName string, uid types.UID, containerName string) (*docker.APIContainers, bool, uint64) {
for _, dockerContainer := range c {
if len(dockerContainer.Names) == 0 {
continue
}
// TODO(proppy): build the docker container name and do a map lookup instead?
dockerName, hash, err := ParseDockerName(dockerContainer.Names[0])
if err != nil {
continue
}
if dockerName.PodFullName == podFullName &&
(uid == "" || dockerName.PodUID == uid) &&
dockerName.ContainerName == containerName {
return dockerContainer, true, hash
}
}
return nil, false, 0
}
const containerNamePrefix = "k8s"
// Creates a name which can be reversed to identify both full pod name and container name.
func BuildDockerName(dockerName KubeletContainerName, container *api.Container) (string, string) {
containerName := dockerName.ContainerName + "." + strconv.FormatUint(kubecontainer.HashContainer(container), 16)
stableName := fmt.Sprintf("%s_%s_%s_%s",
containerNamePrefix,
containerName,
dockerName.PodFullName,
dockerName.PodUID)
return stableName, fmt.Sprintf("%s_%08x", stableName, rand.Uint32())
}
// Unpacks a container name, returning the pod full name and container name we would have used to
// construct the docker name. If we are unable to parse the name, an error is returned.
func ParseDockerName(name string) (dockerName *KubeletContainerName, hash uint64, err error) {
// For some reason docker appears to be appending '/' to names.
// If it's there, strip it.
name = strings.TrimPrefix(name, "/")
parts := strings.Split(name, "_")
if len(parts) == 0 || parts[0] != containerNamePrefix {
err = fmt.Errorf("failed to parse Docker container name %q into parts", name)
return nil, 0, err
}
if len(parts) < 6 {
// We have at least 5 fields. We may have more in the future.
// Anything with less fields than this is not something we can
// manage.
glog.Warningf("found a container with the %q prefix, but too few fields (%d): %q", containerNamePrefix, len(parts), name)
err = fmt.Errorf("Docker container name %q has less parts than expected %v", name, parts)
return nil, 0, err
}
nameParts := strings.Split(parts[1], ".")
containerName := nameParts[0]
if len(nameParts) > 1 {
hash, err = strconv.ParseUint(nameParts[1], 16, 32)
if err != nil {
glog.Warningf("invalid container hash %q in container %q", nameParts[1], name)
}
}
podFullName := parts[2] + "_" + parts[3]
podUID := types.UID(parts[4])
return &KubeletContainerName{podFullName, podUID, containerName}, hash, nil
}
func LogSymlink(containerLogsDir, podFullName, containerName, dockerId string) string {
return path.Join(containerLogsDir, fmt.Sprintf("%s_%s-%s.%s", podFullName, containerName, dockerId, LogSuffix))
}
// Get a *docker.Client, either using the endpoint passed in, or using
// DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT path per their spec
func getDockerClient(dockerEndpoint string) (*docker.Client, error) {
if len(dockerEndpoint) > 0 {
glog.Infof("Connecting to docker on %s", dockerEndpoint)
return docker.NewClient(dockerEndpoint)
}
return docker.NewClientFromEnv()
}
func ConnectToDockerOrDie(dockerEndpoint string) DockerInterface {
if dockerEndpoint == "fake://" {
return &FakeDockerClient{
VersionInfo: docker.Env{"ApiVersion=1.18"},
}
}
client, err := getDockerClient(dockerEndpoint)
if err != nil {
glog.Fatalf("Couldn't connect to docker: %v", err)
}
return client
}
// milliCPUToQuota converts milliCPU to CFS quota and period values
func milliCPUToQuota(milliCPU int64) (quota int64, period int64) {
// CFS quota is measured in two values:
// - cfs_period_us=100ms (the amount of time to measure usage across)
// - cfs_quota=20ms (the amount of cpu time allowed to be used across a period)
// so in the above example, you are limited to 20% of a single CPU
// for multi-cpu environments, you just scale equivalent amounts
if milliCPU == 0 {
// take the default behavior from docker
return
}
// we set the period to 100ms by default
period = quotaPeriod
// we then convert your milliCPU to a value normalized over a period
quota = (milliCPU * quotaPeriod) / milliCPUToCPU
return
}
func milliCPUToShares(milliCPU int64) int64 {
if milliCPU == 0 {
// Docker converts zero milliCPU to unset, which maps to kernel default
// for unset: 1024. Return 2 here to really match kernel default for
// zero milliCPU.
return minShares
}
// Conceptually (milliCPU / milliCPUToCPU) * sharesPerCPU, but factored to improve rounding.
shares := (milliCPU * sharesPerCPU) / milliCPUToCPU
if shares < minShares {
return minShares
}
return shares
}
// GetKubeletDockerContainers lists all container or just the running ones.
// Returns a map of docker containers that we manage, keyed by container ID.
// TODO: Move this function with dockerCache to DockerManager.
func GetKubeletDockerContainers(client DockerInterface, allContainers bool) (DockerContainers, error) {
result := make(DockerContainers)
containers, err := client.ListContainers(docker.ListContainersOptions{All: allContainers})
if err != nil {
return nil, err
}
for i := range containers {
container := &containers[i]
if len(container.Names) == 0 {
continue
}
// Skip containers that we didn't create to allow users to manually
// spin up their own containers if they want.
// TODO(dchen1107): Remove the old separator "--" by end of Oct
if !strings.HasPrefix(container.Names[0], "/"+containerNamePrefix+"_") &&
!strings.HasPrefix(container.Names[0], "/"+containerNamePrefix+"--") {
glog.V(3).Infof("Docker Container: %s is not managed by kubelet.", container.Names[0])
continue
}
result[kubetypes.DockerID(container.ID)] = container
}
return result, nil
}
| pkg/kubelet/dockertools/docker.go | 1 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.9959396123886108,
0.04855973646044731,
0.00016523676458746195,
0.0002831424353644252,
0.20310078561306
] |
{
"id": 5,
"code_window": [
"\t\tif !strings.HasPrefix(container.Names[0], \"/\"+containerNamePrefix+\"_\") &&\n",
"\t\t\t!strings.HasPrefix(container.Names[0], \"/\"+containerNamePrefix+\"--\") {\n",
"\t\t\tglog.V(3).Infof(\"Docker Container: %s is not managed by kubelet.\", container.Names[0])\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\tresult[kubetypes.DockerID(container.ID)] = container\n",
"\t}\n",
"\treturn result, nil\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tresult = append(result, container)\n"
],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 371
} | package inf
import (
"math/big"
)
// Rounder represents a method for rounding the (possibly infinite decimal)
// result of a division to a finite Dec. It is used by Dec.Round() and
// Dec.Quo().
//
// See the Example for results of using each Rounder with some sample values.
//
type Rounder rounder
// See http://speleotrove.com/decimal/damodel.html#refround for more detailed
// definitions of these rounding modes.
var (
RoundDown Rounder // towards 0
RoundUp Rounder // away from 0
RoundFloor Rounder // towards -infinity
RoundCeil Rounder // towards +infinity
RoundHalfDown Rounder // to nearest; towards 0 if same distance
RoundHalfUp Rounder // to nearest; away from 0 if same distance
RoundHalfEven Rounder // to nearest; even last digit if same distance
)
// RoundExact is to be used in the case when rounding is not necessary.
// When used with Quo or Round, it returns the result verbatim when it can be
// expressed exactly with the given precision, and it returns nil otherwise.
// QuoExact is a shorthand for using Quo with RoundExact.
var RoundExact Rounder
type rounder interface {
// When UseRemainder() returns true, the Round() method is passed the
// remainder of the division, expressed as the numerator and denominator of
// a rational.
UseRemainder() bool
// Round sets the rounded value of a quotient to z, and returns z.
// quo is rounded down (truncated towards zero) to the scale obtained from
// the Scaler in Quo().
//
// When the remainder is not used, remNum and remDen are nil.
// When used, the remainder is normalized between -1 and 1; that is:
//
// -|remDen| < remNum < |remDen|
//
// remDen has the same sign as y, and remNum is zero or has the same sign
// as x.
Round(z, quo *Dec, remNum, remDen *big.Int) *Dec
}
type rndr struct {
useRem bool
round func(z, quo *Dec, remNum, remDen *big.Int) *Dec
}
func (r rndr) UseRemainder() bool {
return r.useRem
}
func (r rndr) Round(z, quo *Dec, remNum, remDen *big.Int) *Dec {
return r.round(z, quo, remNum, remDen)
}
var intSign = []*big.Int{big.NewInt(-1), big.NewInt(0), big.NewInt(1)}
func roundHalf(f func(c int, odd uint) (roundUp bool)) func(z, q *Dec, rA, rB *big.Int) *Dec {
return func(z, q *Dec, rA, rB *big.Int) *Dec {
z.Set(q)
brA, brB := rA.BitLen(), rB.BitLen()
if brA < brB-1 {
// brA < brB-1 => |rA| < |rB/2|
return z
}
roundUp := false
srA, srB := rA.Sign(), rB.Sign()
s := srA * srB
if brA == brB-1 {
rA2 := new(big.Int).Lsh(rA, 1)
if s < 0 {
rA2.Neg(rA2)
}
roundUp = f(rA2.Cmp(rB)*srB, z.UnscaledBig().Bit(0))
} else {
// brA > brB-1 => |rA| > |rB/2|
roundUp = true
}
if roundUp {
z.UnscaledBig().Add(z.UnscaledBig(), intSign[s+1])
}
return z
}
}
func init() {
RoundExact = rndr{true,
func(z, q *Dec, rA, rB *big.Int) *Dec {
if rA.Sign() != 0 {
return nil
}
return z.Set(q)
}}
RoundDown = rndr{false,
func(z, q *Dec, rA, rB *big.Int) *Dec {
return z.Set(q)
}}
RoundUp = rndr{true,
func(z, q *Dec, rA, rB *big.Int) *Dec {
z.Set(q)
if rA.Sign() != 0 {
z.UnscaledBig().Add(z.UnscaledBig(), intSign[rA.Sign()*rB.Sign()+1])
}
return z
}}
RoundFloor = rndr{true,
func(z, q *Dec, rA, rB *big.Int) *Dec {
z.Set(q)
if rA.Sign()*rB.Sign() < 0 {
z.UnscaledBig().Add(z.UnscaledBig(), intSign[0])
}
return z
}}
RoundCeil = rndr{true,
func(z, q *Dec, rA, rB *big.Int) *Dec {
z.Set(q)
if rA.Sign()*rB.Sign() > 0 {
z.UnscaledBig().Add(z.UnscaledBig(), intSign[2])
}
return z
}}
RoundHalfDown = rndr{true, roundHalf(
func(c int, odd uint) bool {
return c > 0
})}
RoundHalfUp = rndr{true, roundHalf(
func(c int, odd uint) bool {
return c >= 0
})}
RoundHalfEven = rndr{true, roundHalf(
func(c int, odd uint) bool {
return c > 0 || c == 0 && odd == 1
})}
}
| Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/rounder.go | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.0001765677734510973,
0.00017076186486519873,
0.000161082498379983,
0.00017142060096375644,
0.00000485602095068316
] |
{
"id": 5,
"code_window": [
"\t\tif !strings.HasPrefix(container.Names[0], \"/\"+containerNamePrefix+\"_\") &&\n",
"\t\t\t!strings.HasPrefix(container.Names[0], \"/\"+containerNamePrefix+\"--\") {\n",
"\t\t\tglog.V(3).Infof(\"Docker Container: %s is not managed by kubelet.\", container.Names[0])\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\tresult[kubetypes.DockerID(container.ID)] = container\n",
"\t}\n",
"\treturn result, nil\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tresult = append(result, container)\n"
],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 371
} | <!DOCTYPE html>
<html lang="en">
<script src="http://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
<!-- d3 is used by histogram.js
<script src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script>
-->
<script src="https://raw.githubusercontent.com/nnnick/Chart.js/master/Chart.js"></script>
<script src="script.js"></script>
<script src="histogram.js"></script>
<head>
<meta content="text/html; charset=utf-8" http-equiv="Content-Type">
<meta charset="utf-8">
<meta content="width=device-width" name="viewport">
<link href="/style.css" rel="stylesheet">
<title>((( - PRODUCTION -))) Guestbook</title>
</head>
<body>
<TABLE>
<TR>
<TD>
<div id="k8petstore-entries">
<p>Waiting for database connection...This will get overwritten...</p>
</div>
</TD>
<TD>
<div id="header">
<h1>k8-bps.</h1>
</div><br>
<div>
<p><h2 id="k8petstore-host-address"></h2></p>
<p><a href="/env">/env</a>
<a href="/info">/info</a></p>
</div>
</TD>
</TR>
<TR >
<TD colspan="2">
<canvas id="myChart" width="2000" height="600"></canvas>
</TD>
</TR>
</TABLE>
</body>
</html>
| examples/k8petstore/web-server/static/index.html | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.00017464156553614885,
0.00017250058590434492,
0.0001680200221017003,
0.00017366663087159395,
0.0000023557749955216423
] |
{
"id": 5,
"code_window": [
"\t\tif !strings.HasPrefix(container.Names[0], \"/\"+containerNamePrefix+\"_\") &&\n",
"\t\t\t!strings.HasPrefix(container.Names[0], \"/\"+containerNamePrefix+\"--\") {\n",
"\t\t\tglog.V(3).Infof(\"Docker Container: %s is not managed by kubelet.\", container.Names[0])\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\tresult[kubetypes.DockerID(container.ID)] = container\n",
"\t}\n",
"\treturn result, nil\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tresult = append(result, container)\n"
],
"file_path": "pkg/kubelet/dockertools/docker.go",
"type": "replace",
"edit_start_line_idx": 371
} | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package minion
import (
"syscall"
log "github.com/golang/glog"
)
// enterPrivateMountNamespace does just that: the current mount ns is unshared (isolated)
// and then made a slave to the root mount / of the parent mount ns (mount events from /
// or its children that happen in the parent NS propagate to us).
//
// this is not yet compatible with volume plugins as implemented by the kubelet, which
// depends on using host-volume args to 'docker run' to attach plugin volumes to CT's
// at runtime. as such, docker needs to be able to see the volumes mounted by k8s plugins,
// which is impossible if k8s volume plugins are running in an isolated mount ns.
//
// an alternative approach would be to always run the kubelet in the host's mount-ns and
// rely upon mesos to forcibly umount bindings in the task sandbox before rmdir'ing it:
// https://issues.apache.org/jira/browse/MESOS-349.
//
// use at your own risk.
func enterPrivateMountNamespace() {
log.Warningln("EXPERIMENTAL FEATURE: entering private mount ns")
// enter a new mount NS, useful for isolating changes to the mount table
// that are made by the kubelet for storage volumes.
err := syscall.Unshare(syscall.CLONE_NEWNS)
if err != nil {
log.Fatalf("failed to enter private mount NS: %v", err)
}
// make the rootfs / rslave to the parent mount NS so that we
// pick up on any changes made there
err = syscall.Mount("", "/", "dontcare", syscall.MS_REC|syscall.MS_SLAVE, "")
if err != nil {
log.Fatalf("failed to mark / rslave: %v", err)
}
}
| contrib/mesos/pkg/minion/mountns_linux.go | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.00017836480401456356,
0.0001710965298116207,
0.00016061747737694532,
0.00017207840573973954,
0.00000644911187919206
] |
{
"id": 6,
"code_window": [
"\t\tt.Errorf(\"Expected %#v, Got %#v\", fakeDocker.ContainerList, dockerContainers)\n",
"\t}\n",
"\tverifyCalls(t, fakeDocker, []string{\"list\"})\n",
"\tdockerContainer, found, _ := dockerContainers.FindPodContainer(\"qux_ns\", \"\", \"foo\")\n",
"\tif dockerContainer == nil || !found {\n",
"\t\tt.Errorf(\"Failed to find container %#v\", dockerContainer)\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tdockerContainer, found, _ := findPodContainer(dockerContainers, \"qux_ns\", \"\", \"foo\")\n"
],
"file_path": "pkg/kubelet/dockertools/docker_test.go",
"type": "replace",
"edit_start_line_idx": 85
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"fmt"
"math/rand"
"net/http"
"path"
"strconv"
"strings"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/parsers"
docker "github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/credentialprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/leaky"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
)
const (
PodInfraContainerName = leaky.PodInfraContainerName
DockerPrefix = "docker://"
PodInfraContainerImage = "beta.gcr.io/google_containers/pause:2.0"
LogSuffix = "log"
)
const (
// Taken from lmctfy https://github.com/google/lmctfy/blob/master/lmctfy/controllers/cpu_controller.cc
minShares = 2
sharesPerCPU = 1024
milliCPUToCPU = 1000
// 100000 is equivalent to 100ms
quotaPeriod = 100000
)
// DockerInterface is an abstract interface for testability. It abstracts the interface of docker.Client.
type DockerInterface interface {
ListContainers(options docker.ListContainersOptions) ([]docker.APIContainers, error)
InspectContainer(id string) (*docker.Container, error)
CreateContainer(docker.CreateContainerOptions) (*docker.Container, error)
StartContainer(id string, hostConfig *docker.HostConfig) error
StopContainer(id string, timeout uint) error
RemoveContainer(opts docker.RemoveContainerOptions) error
InspectImage(image string) (*docker.Image, error)
ListImages(opts docker.ListImagesOptions) ([]docker.APIImages, error)
PullImage(opts docker.PullImageOptions, auth docker.AuthConfiguration) error
RemoveImage(image string) error
Logs(opts docker.LogsOptions) error
Version() (*docker.Env, error)
Info() (*docker.Env, error)
CreateExec(docker.CreateExecOptions) (*docker.Exec, error)
StartExec(string, docker.StartExecOptions) error
InspectExec(id string) (*docker.ExecInspect, error)
AttachToContainer(opts docker.AttachToContainerOptions) error
}
// KubeletContainerName encapsulates a pod name and a Kubernetes container name.
type KubeletContainerName struct {
PodFullName string
PodUID types.UID
ContainerName string
}
// DockerPuller is an abstract interface for testability. It abstracts image pull operations.
type DockerPuller interface {
Pull(image string, secrets []api.Secret) error
IsImagePresent(image string) (bool, error)
}
// dockerPuller is the default implementation of DockerPuller.
type dockerPuller struct {
client DockerInterface
keyring credentialprovider.DockerKeyring
}
type throttledDockerPuller struct {
puller dockerPuller
limiter util.RateLimiter
}
// newDockerPuller creates a new instance of the default implementation of DockerPuller.
func newDockerPuller(client DockerInterface, qps float32, burst int) DockerPuller {
dp := dockerPuller{
client: client,
keyring: credentialprovider.NewDockerKeyring(),
}
if qps == 0.0 {
return dp
}
return &throttledDockerPuller{
puller: dp,
limiter: util.NewTokenBucketRateLimiter(qps, burst),
}
}
func parseImageName(image string) (string, string) {
return parsers.ParseRepositoryTag(image)
}
func filterHTTPError(err error, image string) error {
// docker/docker/pull/11314 prints detailed error info for docker pull.
// When it hits 502, it returns a verbose html output including an inline svg,
// which makes the output of kubectl get pods much harder to parse.
// Here converts such verbose output to a concise one.
jerr, ok := err.(*jsonmessage.JSONError)
if ok && (jerr.Code == http.StatusBadGateway ||
jerr.Code == http.StatusServiceUnavailable ||
jerr.Code == http.StatusGatewayTimeout) {
glog.V(2).Infof("Pulling image %q failed: %v", image, err)
return kubecontainer.RegistryUnavailable
} else {
return err
}
}
func (p dockerPuller) Pull(image string, secrets []api.Secret) error {
repoToPull, tag := parseImageName(image)
// If no tag was specified, use the default "latest".
if len(tag) == 0 {
tag = "latest"
}
opts := docker.PullImageOptions{
Repository: repoToPull,
Tag: tag,
}
keyring, err := credentialprovider.MakeDockerKeyring(secrets, p.keyring)
if err != nil {
return err
}
creds, haveCredentials := keyring.Lookup(repoToPull)
if !haveCredentials {
glog.V(1).Infof("Pulling image %s without credentials", image)
err := p.client.PullImage(opts, docker.AuthConfiguration{})
if err == nil {
return nil
}
// Image spec: [<registry>/]<repository>/<image>[:<version] so we count '/'
explicitRegistry := (strings.Count(image, "/") == 2)
// Hack, look for a private registry, and decorate the error with the lack of
// credentials. This is heuristic, and really probably could be done better
// by talking to the registry API directly from the kubelet here.
if explicitRegistry {
return fmt.Errorf("image pull failed for %s, this may be because there are no credentials on this request. details: (%v)", image, err)
}
return filterHTTPError(err, image)
}
var pullErrs []error
for _, currentCreds := range creds {
err := p.client.PullImage(opts, currentCreds)
// If there was no error, return success
if err == nil {
return nil
}
pullErrs = append(pullErrs, filterHTTPError(err, image))
}
return utilerrors.NewAggregate(pullErrs)
}
func (p throttledDockerPuller) Pull(image string, secrets []api.Secret) error {
if p.limiter.CanAccept() {
return p.puller.Pull(image, secrets)
}
return fmt.Errorf("pull QPS exceeded.")
}
func (p dockerPuller) IsImagePresent(image string) (bool, error) {
_, err := p.client.InspectImage(image)
if err == nil {
return true, nil
}
if err == docker.ErrNoSuchImage {
return false, nil
}
return false, err
}
func (p throttledDockerPuller) IsImagePresent(name string) (bool, error) {
return p.puller.IsImagePresent(name)
}
// TODO (random-liu) Almost never used, should we remove this?
// DockerContainers is a map of containers
type DockerContainers map[kubetypes.DockerID]*docker.APIContainers
func (c DockerContainers) FindPodContainer(podFullName string, uid types.UID, containerName string) (*docker.APIContainers, bool, uint64) {
for _, dockerContainer := range c {
if len(dockerContainer.Names) == 0 {
continue
}
// TODO(proppy): build the docker container name and do a map lookup instead?
dockerName, hash, err := ParseDockerName(dockerContainer.Names[0])
if err != nil {
continue
}
if dockerName.PodFullName == podFullName &&
(uid == "" || dockerName.PodUID == uid) &&
dockerName.ContainerName == containerName {
return dockerContainer, true, hash
}
}
return nil, false, 0
}
const containerNamePrefix = "k8s"
// Creates a name which can be reversed to identify both full pod name and container name.
func BuildDockerName(dockerName KubeletContainerName, container *api.Container) (string, string) {
containerName := dockerName.ContainerName + "." + strconv.FormatUint(kubecontainer.HashContainer(container), 16)
stableName := fmt.Sprintf("%s_%s_%s_%s",
containerNamePrefix,
containerName,
dockerName.PodFullName,
dockerName.PodUID)
return stableName, fmt.Sprintf("%s_%08x", stableName, rand.Uint32())
}
// Unpacks a container name, returning the pod full name and container name we would have used to
// construct the docker name. If we are unable to parse the name, an error is returned.
func ParseDockerName(name string) (dockerName *KubeletContainerName, hash uint64, err error) {
// For some reason docker appears to be appending '/' to names.
// If it's there, strip it.
name = strings.TrimPrefix(name, "/")
parts := strings.Split(name, "_")
if len(parts) == 0 || parts[0] != containerNamePrefix {
err = fmt.Errorf("failed to parse Docker container name %q into parts", name)
return nil, 0, err
}
if len(parts) < 6 {
// We have at least 5 fields. We may have more in the future.
// Anything with less fields than this is not something we can
// manage.
glog.Warningf("found a container with the %q prefix, but too few fields (%d): %q", containerNamePrefix, len(parts), name)
err = fmt.Errorf("Docker container name %q has less parts than expected %v", name, parts)
return nil, 0, err
}
nameParts := strings.Split(parts[1], ".")
containerName := nameParts[0]
if len(nameParts) > 1 {
hash, err = strconv.ParseUint(nameParts[1], 16, 32)
if err != nil {
glog.Warningf("invalid container hash %q in container %q", nameParts[1], name)
}
}
podFullName := parts[2] + "_" + parts[3]
podUID := types.UID(parts[4])
return &KubeletContainerName{podFullName, podUID, containerName}, hash, nil
}
func LogSymlink(containerLogsDir, podFullName, containerName, dockerId string) string {
return path.Join(containerLogsDir, fmt.Sprintf("%s_%s-%s.%s", podFullName, containerName, dockerId, LogSuffix))
}
// Get a *docker.Client, either using the endpoint passed in, or using
// DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT path per their spec
func getDockerClient(dockerEndpoint string) (*docker.Client, error) {
if len(dockerEndpoint) > 0 {
glog.Infof("Connecting to docker on %s", dockerEndpoint)
return docker.NewClient(dockerEndpoint)
}
return docker.NewClientFromEnv()
}
func ConnectToDockerOrDie(dockerEndpoint string) DockerInterface {
if dockerEndpoint == "fake://" {
return &FakeDockerClient{
VersionInfo: docker.Env{"ApiVersion=1.18"},
}
}
client, err := getDockerClient(dockerEndpoint)
if err != nil {
glog.Fatalf("Couldn't connect to docker: %v", err)
}
return client
}
// milliCPUToQuota converts milliCPU to CFS quota and period values
func milliCPUToQuota(milliCPU int64) (quota int64, period int64) {
// CFS quota is measured in two values:
// - cfs_period_us=100ms (the amount of time to measure usage across)
// - cfs_quota=20ms (the amount of cpu time allowed to be used across a period)
// so in the above example, you are limited to 20% of a single CPU
// for multi-cpu environments, you just scale equivalent amounts
if milliCPU == 0 {
// take the default behavior from docker
return
}
// we set the period to 100ms by default
period = quotaPeriod
// we then convert your milliCPU to a value normalized over a period
quota = (milliCPU * quotaPeriod) / milliCPUToCPU
return
}
func milliCPUToShares(milliCPU int64) int64 {
if milliCPU == 0 {
// Docker converts zero milliCPU to unset, which maps to kernel default
// for unset: 1024. Return 2 here to really match kernel default for
// zero milliCPU.
return minShares
}
// Conceptually (milliCPU / milliCPUToCPU) * sharesPerCPU, but factored to improve rounding.
shares := (milliCPU * sharesPerCPU) / milliCPUToCPU
if shares < minShares {
return minShares
}
return shares
}
// GetKubeletDockerContainers lists all container or just the running ones.
// Returns a map of docker containers that we manage, keyed by container ID.
// TODO: Move this function with dockerCache to DockerManager.
func GetKubeletDockerContainers(client DockerInterface, allContainers bool) (DockerContainers, error) {
result := make(DockerContainers)
containers, err := client.ListContainers(docker.ListContainersOptions{All: allContainers})
if err != nil {
return nil, err
}
for i := range containers {
container := &containers[i]
if len(container.Names) == 0 {
continue
}
// Skip containers that we didn't create to allow users to manually
// spin up their own containers if they want.
// TODO(dchen1107): Remove the old separator "--" by end of Oct
if !strings.HasPrefix(container.Names[0], "/"+containerNamePrefix+"_") &&
!strings.HasPrefix(container.Names[0], "/"+containerNamePrefix+"--") {
glog.V(3).Infof("Docker Container: %s is not managed by kubelet.", container.Names[0])
continue
}
result[kubetypes.DockerID(container.ID)] = container
}
return result, nil
}
| pkg/kubelet/dockertools/docker.go | 1 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.9988566637039185,
0.07976603507995605,
0.00016689003678038716,
0.00020248585497029126,
0.26811864972114563
] |
{
"id": 6,
"code_window": [
"\t\tt.Errorf(\"Expected %#v, Got %#v\", fakeDocker.ContainerList, dockerContainers)\n",
"\t}\n",
"\tverifyCalls(t, fakeDocker, []string{\"list\"})\n",
"\tdockerContainer, found, _ := dockerContainers.FindPodContainer(\"qux_ns\", \"\", \"foo\")\n",
"\tif dockerContainer == nil || !found {\n",
"\t\tt.Errorf(\"Failed to find container %#v\", dockerContainer)\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tdockerContainer, found, _ := findPodContainer(dockerContainers, \"qux_ns\", \"\", \"foo\")\n"
],
"file_path": "pkg/kubelet/dockertools/docker_test.go",
"type": "replace",
"edit_start_line_idx": 85
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"io"
"strings"
"k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/kubectl"
"github.com/spf13/cobra"
)
// AddPrinterFlags adds printing related flags to a command (e.g. output format, no headers, template path)
func AddPrinterFlags(cmd *cobra.Command) {
cmd.Flags().StringP("output", "o", "", "Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md].")
cmd.Flags().String("output-version", "", "Output the formatted object with the given version (default api-version).")
cmd.Flags().Bool("no-headers", false, "When using the default output, don't print headers.")
// template shorthand -t is deprecated to support -t for --tty
// TODO: remove template flag shorthand -t
cmd.Flags().StringP("template", "t", "", "Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].")
cmd.Flags().MarkShorthandDeprecated("template", "please use --template instead")
cmd.Flags().String("sort-by", "", "If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string.")
cmd.Flags().BoolP("show-all", "a", false, "When printing, show all resources (default hide terminated pods.)")
}
// AddOutputFlagsForMutation adds output related flags to a command. Used by mutations only.
func AddOutputFlagsForMutation(cmd *cobra.Command) {
cmd.Flags().StringP("output", "o", "", "Output mode. Use \"-o name\" for shorter output (resource/name).")
}
// PrintSuccess prints message after finishing mutating operations
func PrintSuccess(mapper meta.RESTMapper, shortOutput bool, out io.Writer, resource string, name string, operation string) {
resource, _ = mapper.ResourceSingularizer(resource)
if shortOutput {
// -o name: prints resource/name
if len(resource) > 0 {
fmt.Fprintf(out, "%s/%s\n", resource, name)
} else {
fmt.Fprintf(out, "%s\n", name)
}
} else {
// understandable output by default
if len(resource) > 0 {
fmt.Fprintf(out, "%s \"%s\" %s\n", resource, name, operation)
} else {
fmt.Fprintf(out, "\"%s\" %s\n", name, operation)
}
}
}
// ValidateOutputArgs validates -o flag args for mutations
func ValidateOutputArgs(cmd *cobra.Command) error {
outputMode := GetFlagString(cmd, "output")
if outputMode != "" && outputMode != "name" {
return UsageError(cmd, "Unexpected -o output mode: %v. We only support '-o name'.", outputMode)
}
return nil
}
// OutputVersion returns the preferred output version for generic content (JSON, YAML, or templates)
func OutputVersion(cmd *cobra.Command, defaultVersion string) string {
outputVersion := GetFlagString(cmd, "output-version")
if len(outputVersion) == 0 {
outputVersion = defaultVersion
}
return outputVersion
}
// PrinterForCommand returns the default printer for this command.
// Requires that printer flags have been added to cmd (see AddPrinterFlags).
func PrinterForCommand(cmd *cobra.Command) (kubectl.ResourcePrinter, bool, error) {
outputFormat := GetFlagString(cmd, "output")
// templates are logically optional for specifying a format.
// TODO once https://github.com/kubernetes/kubernetes/issues/12668 is fixed, this should fall back to GetFlagString
templateFile, _ := cmd.Flags().GetString("template")
if len(outputFormat) == 0 && len(templateFile) != 0 {
outputFormat = "template"
}
templateFormat := []string{"go-template=", "go-template-file=", "jsonpath=", "jsonpath-file="}
for _, format := range templateFormat {
if strings.HasPrefix(outputFormat, format) {
templateFile = outputFormat[len(format):]
outputFormat = format[:len(format)-1]
}
}
printer, generic, err := kubectl.GetPrinter(outputFormat, templateFile)
if err != nil {
return nil, generic, err
}
return maybeWrapSortingPrinter(cmd, printer), generic, nil
}
func maybeWrapSortingPrinter(cmd *cobra.Command, printer kubectl.ResourcePrinter) kubectl.ResourcePrinter {
sorting, err := cmd.Flags().GetString("sort-by")
if err != nil {
// error can happen on missing flag or bad flag type. In either case, this command didn't intent to sort
return printer
}
if len(sorting) != 0 {
return &kubectl.SortingPrinter{
Delegate: printer,
SortField: fmt.Sprintf("{%s}", sorting),
}
}
return printer
}
| pkg/kubectl/cmd/util/printing.go | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.0002959585399366915,
0.0001909176935441792,
0.0001692828518571332,
0.00017816713079810143,
0.00003333334461785853
] |
{
"id": 6,
"code_window": [
"\t\tt.Errorf(\"Expected %#v, Got %#v\", fakeDocker.ContainerList, dockerContainers)\n",
"\t}\n",
"\tverifyCalls(t, fakeDocker, []string{\"list\"})\n",
"\tdockerContainer, found, _ := dockerContainers.FindPodContainer(\"qux_ns\", \"\", \"foo\")\n",
"\tif dockerContainer == nil || !found {\n",
"\t\tt.Errorf(\"Failed to find container %#v\", dockerContainer)\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tdockerContainer, found, _ := findPodContainer(dockerContainers, \"qux_ns\", \"\", \"foo\")\n"
],
"file_path": "pkg/kubelet/dockertools/docker_test.go",
"type": "replace",
"edit_start_line_idx": 85
} | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestUnversionedWarning(t *testing.T) {
beginMark := beginMungeTag(unversionedWarningTag)
endMark := endMungeTag(unversionedWarningTag)
warningString := makeUnversionedWarning("filename.md").String()
warningBlock := beginMark + "\n" + warningString + endMark + "\n"
var cases = []struct {
in string
expected string
}{
{"", warningBlock},
{
"Foo\nBar\n",
warningBlock + "\nFoo\nBar\n",
},
{
"Foo\n<!-- TAG IS_VERSIONED -->\nBar",
"Foo\n<!-- TAG IS_VERSIONED -->\nBar",
},
{
beginMark + "\n" + endMark + "\n",
warningBlock,
},
{
beginMark + "\n" + "something\n" + endMark + "\n",
warningBlock,
},
{
"Foo\n" + beginMark + "\n" + endMark + "\nBar\n",
"Foo\n" + warningBlock + "Bar\n",
},
{
"Foo\n" + warningBlock + "Bar\n",
"Foo\n" + warningBlock + "Bar\n",
},
}
for i, c := range cases {
in := getMungeLines(c.in)
expected := getMungeLines(c.expected)
actual, err := updateUnversionedWarning("filename.md", in)
assert.NoError(t, err)
if !expected.Equal(actual) {
t.Errorf("case[%d]: expected %v got %v", i, expected.String(), actual.String())
}
}
}
| cmd/mungedocs/unversioned_warning_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.00018183856445830315,
0.00017522023699712008,
0.0001679399429121986,
0.00017643944011069834,
0.000004838720087718684
] |
{
"id": 6,
"code_window": [
"\t\tt.Errorf(\"Expected %#v, Got %#v\", fakeDocker.ContainerList, dockerContainers)\n",
"\t}\n",
"\tverifyCalls(t, fakeDocker, []string{\"list\"})\n",
"\tdockerContainer, found, _ := dockerContainers.FindPodContainer(\"qux_ns\", \"\", \"foo\")\n",
"\tif dockerContainer == nil || !found {\n",
"\t\tt.Errorf(\"Failed to find container %#v\", dockerContainer)\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tdockerContainer, found, _ := findPodContainer(dockerContainers, \"qux_ns\", \"\", \"foo\")\n"
],
"file_path": "pkg/kubelet/dockertools/docker_test.go",
"type": "replace",
"edit_start_line_idx": 85
} | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"io"
"net"
"os"
"reflect"
"strings"
"testing"
"github.com/golang/glog"
"golang.org/x/crypto/ssh"
)
type testSSHServer struct {
Host string
Port string
Type string
Data []byte
PrivateKey []byte
PublicKey []byte
}
func runTestSSHServer(user, password string) (*testSSHServer, error) {
result := &testSSHServer{}
// Largely derived from https://godoc.org/golang.org/x/crypto/ssh#example-NewServerConn
config := &ssh.ServerConfig{
PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
if c.User() == user && string(pass) == password {
return nil, nil
}
return nil, fmt.Errorf("password rejected for %s", c.User())
},
PublicKeyCallback: func(c ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
result.Type = key.Type()
result.Data = ssh.MarshalAuthorizedKey(key)
return nil, nil
},
}
privateKey, publicKey, err := GenerateKey(2048)
if err != nil {
return nil, err
}
privateBytes := EncodePrivateKey(privateKey)
signer, err := ssh.ParsePrivateKey(privateBytes)
if err != nil {
return nil, err
}
config.AddHostKey(signer)
result.PrivateKey = privateBytes
publicBytes, err := EncodePublicKey(publicKey)
if err != nil {
return nil, err
}
result.PublicKey = publicBytes
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return nil, err
}
host, port, err := net.SplitHostPort(listener.Addr().String())
if err != nil {
return nil, err
}
result.Host = host
result.Port = port
go func() {
// TODO: return this port.
defer listener.Close()
conn, err := listener.Accept()
if err != nil {
glog.Errorf("Failed to accept: %v", err)
}
_, chans, reqs, err := ssh.NewServerConn(conn, config)
if err != nil {
glog.Errorf("Failed handshake: %v", err)
}
go ssh.DiscardRequests(reqs)
for newChannel := range chans {
if newChannel.ChannelType() != "direct-tcpip" {
newChannel.Reject(ssh.UnknownChannelType, fmt.Sprintf("unknown channel type: %s", newChannel.ChannelType()))
continue
}
channel, requests, err := newChannel.Accept()
if err != nil {
glog.Errorf("Failed to accept channel: %v", err)
}
for req := range requests {
glog.Infof("Got request: %v", req)
}
channel.Close()
}
}()
return result, nil
}
func TestSSHTunnel(t *testing.T) {
private, public, err := GenerateKey(2048)
if err != nil {
t.Errorf("unexpected error: %v", err)
t.FailNow()
}
server, err := runTestSSHServer("foo", "bar")
if err != nil {
t.Errorf("unexpected error: %v", err)
t.FailNow()
}
privateData := EncodePrivateKey(private)
tunnel, err := NewSSHTunnelFromBytes("foo", privateData, server.Host)
if err != nil {
t.Errorf("unexpected error: %v", err)
t.FailNow()
}
tunnel.SSHPort = server.Port
if err := tunnel.Open(); err != nil {
t.Errorf("unexpected error: %v", err)
t.FailNow()
}
_, err = tunnel.Dial("tcp", "127.0.0.1:8080")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if server.Type != "ssh-rsa" {
t.Errorf("expected %s, got %s", "ssh-rsa", server.Type)
}
publicData, err := EncodeSSHKey(public)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(server.Data, publicData) {
t.Errorf("expected %s, got %s", string(server.Data), string(privateData))
}
if err := tunnel.Close(); err != nil {
t.Errorf("unexpected error: %v", err)
}
}
type mockSSHDialer struct {
network string
addr string
config *ssh.ClientConfig
}
func (d *mockSSHDialer) Dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) {
d.network = network
d.addr = addr
d.config = config
return nil, fmt.Errorf("mock error from Dial")
}
type mockSigner struct {
}
func (s *mockSigner) PublicKey() ssh.PublicKey {
panic("mockSigner.PublicKey not implemented")
}
func (s *mockSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature, error) {
panic("mockSigner.Sign not implemented")
}
func TestSSHUser(t *testing.T) {
signer := &mockSigner{}
table := []struct {
title string
user string
host string
signer ssh.Signer
command string
expectUser string
}{
{
title: "all values provided",
user: "testuser",
host: "testhost",
signer: signer,
command: "uptime",
expectUser: "testuser",
},
{
title: "empty user defaults to GetEnv(USER)",
user: "",
host: "testhost",
signer: signer,
command: "uptime",
expectUser: os.Getenv("USER"),
},
}
for _, item := range table {
dialer := &mockSSHDialer{}
_, _, _, err := runSSHCommand(dialer, item.command, item.user, item.host, item.signer)
if err == nil {
t.Errorf("expected error (as mock returns error); did not get one")
}
errString := err.Error()
if !strings.HasPrefix(errString, fmt.Sprintf("error getting SSH client to %s@%s:", item.expectUser, item.host)) {
t.Errorf("unexpected error: %v", errString)
}
if dialer.network != "tcp" {
t.Errorf("unexpected network: %v", dialer.network)
}
if dialer.config.User != item.expectUser {
t.Errorf("unexpected user: %v", dialer.config.User)
}
if len(dialer.config.Auth) != 1 {
t.Errorf("unexpected auth: %v", dialer.config.Auth)
}
// (No way to test Auth - nothing exported?)
}
}
| pkg/util/ssh_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.00047957076458260417,
0.0002043975400738418,
0.00016516604227945209,
0.00017451438179705292,
0.00008206519851228222
] |
{
"id": 7,
"code_window": [
"\t\tt.Errorf(\"Failed to find container %#v\", dockerContainer)\n",
"\t}\n",
"\n",
"\tfakeDocker.ClearCalls()\n",
"\tdockerContainer, found, _ = dockerContainers.FindPodContainer(\"foobar\", \"\", \"foo\")\n",
"\tverifyCalls(t, fakeDocker, []string{})\n",
"\tif dockerContainer != nil || found {\n",
"\t\tt.Errorf(\"Should not have found container %#v\", dockerContainer)\n",
"\t}\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdockerContainer, found, _ = findPodContainer(dockerContainers, \"foobar\", \"\", \"foo\")\n"
],
"file_path": "pkg/kubelet/dockertools/docker_test.go",
"type": "replace",
"edit_start_line_idx": 91
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"fmt"
"math/rand"
"net/http"
"path"
"strconv"
"strings"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/parsers"
docker "github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/credentialprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/leaky"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
)
const (
PodInfraContainerName = leaky.PodInfraContainerName
DockerPrefix = "docker://"
PodInfraContainerImage = "beta.gcr.io/google_containers/pause:2.0"
LogSuffix = "log"
)
const (
// Taken from lmctfy https://github.com/google/lmctfy/blob/master/lmctfy/controllers/cpu_controller.cc
minShares = 2
sharesPerCPU = 1024
milliCPUToCPU = 1000
// 100000 is equivalent to 100ms
quotaPeriod = 100000
)
// DockerInterface is an abstract interface for testability. It abstracts the interface of docker.Client.
type DockerInterface interface {
ListContainers(options docker.ListContainersOptions) ([]docker.APIContainers, error)
InspectContainer(id string) (*docker.Container, error)
CreateContainer(docker.CreateContainerOptions) (*docker.Container, error)
StartContainer(id string, hostConfig *docker.HostConfig) error
StopContainer(id string, timeout uint) error
RemoveContainer(opts docker.RemoveContainerOptions) error
InspectImage(image string) (*docker.Image, error)
ListImages(opts docker.ListImagesOptions) ([]docker.APIImages, error)
PullImage(opts docker.PullImageOptions, auth docker.AuthConfiguration) error
RemoveImage(image string) error
Logs(opts docker.LogsOptions) error
Version() (*docker.Env, error)
Info() (*docker.Env, error)
CreateExec(docker.CreateExecOptions) (*docker.Exec, error)
StartExec(string, docker.StartExecOptions) error
InspectExec(id string) (*docker.ExecInspect, error)
AttachToContainer(opts docker.AttachToContainerOptions) error
}
// KubeletContainerName encapsulates a pod name and a Kubernetes container name.
type KubeletContainerName struct {
PodFullName string
PodUID types.UID
ContainerName string
}
// DockerPuller is an abstract interface for testability. It abstracts image pull operations.
type DockerPuller interface {
Pull(image string, secrets []api.Secret) error
IsImagePresent(image string) (bool, error)
}
// dockerPuller is the default implementation of DockerPuller.
type dockerPuller struct {
client DockerInterface
keyring credentialprovider.DockerKeyring
}
type throttledDockerPuller struct {
puller dockerPuller
limiter util.RateLimiter
}
// newDockerPuller creates a new instance of the default implementation of DockerPuller.
func newDockerPuller(client DockerInterface, qps float32, burst int) DockerPuller {
dp := dockerPuller{
client: client,
keyring: credentialprovider.NewDockerKeyring(),
}
if qps == 0.0 {
return dp
}
return &throttledDockerPuller{
puller: dp,
limiter: util.NewTokenBucketRateLimiter(qps, burst),
}
}
func parseImageName(image string) (string, string) {
return parsers.ParseRepositoryTag(image)
}
func filterHTTPError(err error, image string) error {
// docker/docker/pull/11314 prints detailed error info for docker pull.
// When it hits 502, it returns a verbose html output including an inline svg,
// which makes the output of kubectl get pods much harder to parse.
// Here converts such verbose output to a concise one.
jerr, ok := err.(*jsonmessage.JSONError)
if ok && (jerr.Code == http.StatusBadGateway ||
jerr.Code == http.StatusServiceUnavailable ||
jerr.Code == http.StatusGatewayTimeout) {
glog.V(2).Infof("Pulling image %q failed: %v", image, err)
return kubecontainer.RegistryUnavailable
} else {
return err
}
}
func (p dockerPuller) Pull(image string, secrets []api.Secret) error {
repoToPull, tag := parseImageName(image)
// If no tag was specified, use the default "latest".
if len(tag) == 0 {
tag = "latest"
}
opts := docker.PullImageOptions{
Repository: repoToPull,
Tag: tag,
}
keyring, err := credentialprovider.MakeDockerKeyring(secrets, p.keyring)
if err != nil {
return err
}
creds, haveCredentials := keyring.Lookup(repoToPull)
if !haveCredentials {
glog.V(1).Infof("Pulling image %s without credentials", image)
err := p.client.PullImage(opts, docker.AuthConfiguration{})
if err == nil {
return nil
}
// Image spec: [<registry>/]<repository>/<image>[:<version] so we count '/'
explicitRegistry := (strings.Count(image, "/") == 2)
// Hack, look for a private registry, and decorate the error with the lack of
// credentials. This is heuristic, and really probably could be done better
// by talking to the registry API directly from the kubelet here.
if explicitRegistry {
return fmt.Errorf("image pull failed for %s, this may be because there are no credentials on this request. details: (%v)", image, err)
}
return filterHTTPError(err, image)
}
var pullErrs []error
for _, currentCreds := range creds {
err := p.client.PullImage(opts, currentCreds)
// If there was no error, return success
if err == nil {
return nil
}
pullErrs = append(pullErrs, filterHTTPError(err, image))
}
return utilerrors.NewAggregate(pullErrs)
}
func (p throttledDockerPuller) Pull(image string, secrets []api.Secret) error {
if p.limiter.CanAccept() {
return p.puller.Pull(image, secrets)
}
return fmt.Errorf("pull QPS exceeded.")
}
func (p dockerPuller) IsImagePresent(image string) (bool, error) {
_, err := p.client.InspectImage(image)
if err == nil {
return true, nil
}
if err == docker.ErrNoSuchImage {
return false, nil
}
return false, err
}
func (p throttledDockerPuller) IsImagePresent(name string) (bool, error) {
return p.puller.IsImagePresent(name)
}
// TODO (random-liu) Almost never used, should we remove this?
// DockerContainers is a map of containers
type DockerContainers map[kubetypes.DockerID]*docker.APIContainers
func (c DockerContainers) FindPodContainer(podFullName string, uid types.UID, containerName string) (*docker.APIContainers, bool, uint64) {
for _, dockerContainer := range c {
if len(dockerContainer.Names) == 0 {
continue
}
// TODO(proppy): build the docker container name and do a map lookup instead?
dockerName, hash, err := ParseDockerName(dockerContainer.Names[0])
if err != nil {
continue
}
if dockerName.PodFullName == podFullName &&
(uid == "" || dockerName.PodUID == uid) &&
dockerName.ContainerName == containerName {
return dockerContainer, true, hash
}
}
return nil, false, 0
}
const containerNamePrefix = "k8s"
// Creates a name which can be reversed to identify both full pod name and container name.
func BuildDockerName(dockerName KubeletContainerName, container *api.Container) (string, string) {
containerName := dockerName.ContainerName + "." + strconv.FormatUint(kubecontainer.HashContainer(container), 16)
stableName := fmt.Sprintf("%s_%s_%s_%s",
containerNamePrefix,
containerName,
dockerName.PodFullName,
dockerName.PodUID)
return stableName, fmt.Sprintf("%s_%08x", stableName, rand.Uint32())
}
// Unpacks a container name, returning the pod full name and container name we would have used to
// construct the docker name. If we are unable to parse the name, an error is returned.
func ParseDockerName(name string) (dockerName *KubeletContainerName, hash uint64, err error) {
// For some reason docker appears to be appending '/' to names.
// If it's there, strip it.
name = strings.TrimPrefix(name, "/")
parts := strings.Split(name, "_")
if len(parts) == 0 || parts[0] != containerNamePrefix {
err = fmt.Errorf("failed to parse Docker container name %q into parts", name)
return nil, 0, err
}
if len(parts) < 6 {
// We have at least 5 fields. We may have more in the future.
// Anything with less fields than this is not something we can
// manage.
glog.Warningf("found a container with the %q prefix, but too few fields (%d): %q", containerNamePrefix, len(parts), name)
err = fmt.Errorf("Docker container name %q has less parts than expected %v", name, parts)
return nil, 0, err
}
nameParts := strings.Split(parts[1], ".")
containerName := nameParts[0]
if len(nameParts) > 1 {
hash, err = strconv.ParseUint(nameParts[1], 16, 32)
if err != nil {
glog.Warningf("invalid container hash %q in container %q", nameParts[1], name)
}
}
podFullName := parts[2] + "_" + parts[3]
podUID := types.UID(parts[4])
return &KubeletContainerName{podFullName, podUID, containerName}, hash, nil
}
func LogSymlink(containerLogsDir, podFullName, containerName, dockerId string) string {
return path.Join(containerLogsDir, fmt.Sprintf("%s_%s-%s.%s", podFullName, containerName, dockerId, LogSuffix))
}
// Get a *docker.Client, either using the endpoint passed in, or using
// DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT path per their spec
func getDockerClient(dockerEndpoint string) (*docker.Client, error) {
if len(dockerEndpoint) > 0 {
glog.Infof("Connecting to docker on %s", dockerEndpoint)
return docker.NewClient(dockerEndpoint)
}
return docker.NewClientFromEnv()
}
func ConnectToDockerOrDie(dockerEndpoint string) DockerInterface {
if dockerEndpoint == "fake://" {
return &FakeDockerClient{
VersionInfo: docker.Env{"ApiVersion=1.18"},
}
}
client, err := getDockerClient(dockerEndpoint)
if err != nil {
glog.Fatalf("Couldn't connect to docker: %v", err)
}
return client
}
// milliCPUToQuota converts milliCPU to CFS quota and period values
func milliCPUToQuota(milliCPU int64) (quota int64, period int64) {
// CFS quota is measured in two values:
// - cfs_period_us=100ms (the amount of time to measure usage across)
// - cfs_quota=20ms (the amount of cpu time allowed to be used across a period)
// so in the above example, you are limited to 20% of a single CPU
// for multi-cpu environments, you just scale equivalent amounts
if milliCPU == 0 {
// take the default behavior from docker
return
}
// we set the period to 100ms by default
period = quotaPeriod
// we then convert your milliCPU to a value normalized over a period
quota = (milliCPU * quotaPeriod) / milliCPUToCPU
return
}
func milliCPUToShares(milliCPU int64) int64 {
if milliCPU == 0 {
// Docker converts zero milliCPU to unset, which maps to kernel default
// for unset: 1024. Return 2 here to really match kernel default for
// zero milliCPU.
return minShares
}
// Conceptually (milliCPU / milliCPUToCPU) * sharesPerCPU, but factored to improve rounding.
shares := (milliCPU * sharesPerCPU) / milliCPUToCPU
if shares < minShares {
return minShares
}
return shares
}
// GetKubeletDockerContainers lists all container or just the running ones.
// Returns a map of docker containers that we manage, keyed by container ID.
// TODO: Move this function with dockerCache to DockerManager.
func GetKubeletDockerContainers(client DockerInterface, allContainers bool) (DockerContainers, error) {
result := make(DockerContainers)
containers, err := client.ListContainers(docker.ListContainersOptions{All: allContainers})
if err != nil {
return nil, err
}
for i := range containers {
container := &containers[i]
if len(container.Names) == 0 {
continue
}
// Skip containers that we didn't create to allow users to manually
// spin up their own containers if they want.
// TODO(dchen1107): Remove the old separator "--" by end of Oct
if !strings.HasPrefix(container.Names[0], "/"+containerNamePrefix+"_") &&
!strings.HasPrefix(container.Names[0], "/"+containerNamePrefix+"--") {
glog.V(3).Infof("Docker Container: %s is not managed by kubelet.", container.Names[0])
continue
}
result[kubetypes.DockerID(container.ID)] = container
}
return result, nil
}
| pkg/kubelet/dockertools/docker.go | 1 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.9982447624206543,
0.05317812040448189,
0.00016578013310208917,
0.00019479729235172272,
0.2227240800857544
] |
{
"id": 7,
"code_window": [
"\t\tt.Errorf(\"Failed to find container %#v\", dockerContainer)\n",
"\t}\n",
"\n",
"\tfakeDocker.ClearCalls()\n",
"\tdockerContainer, found, _ = dockerContainers.FindPodContainer(\"foobar\", \"\", \"foo\")\n",
"\tverifyCalls(t, fakeDocker, []string{})\n",
"\tif dockerContainer != nil || found {\n",
"\t\tt.Errorf(\"Should not have found container %#v\", dockerContainer)\n",
"\t}\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdockerContainer, found, _ = findPodContainer(dockerContainers, \"foobar\", \"\", \"foo\")\n"
],
"file_path": "pkg/kubelet/dockertools/docker_test.go",
"type": "replace",
"edit_start_line_idx": 91
} | // Copyright 2012 Gary Burd
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package redis
import (
"errors"
"fmt"
"strconv"
)
// ErrNil indicates that a reply value is nil.
var ErrNil = errors.New("redigo: nil returned")
// Int is a helper that converts a command reply to an integer. If err is not
// equal to nil, then Int returns 0, err. Otherwise, Int converts the
// reply to an int as follows:
//
// Reply type Result
// integer int(reply), nil
// bulk string parsed reply, nil
// nil 0, ErrNil
// other 0, error
func Int(reply interface{}, err error) (int, error) {
if err != nil {
return 0, err
}
switch reply := reply.(type) {
case int64:
x := int(reply)
if int64(x) != reply {
return 0, strconv.ErrRange
}
return x, nil
case []byte:
n, err := strconv.ParseInt(string(reply), 10, 0)
return int(n), err
case nil:
return 0, ErrNil
case Error:
return 0, reply
}
return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply)
}
// Int64 is a helper that converts a command reply to 64 bit integer. If err is
// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the
// reply to an int64 as follows:
//
// Reply type Result
// integer reply, nil
// bulk string parsed reply, nil
// nil 0, ErrNil
// other 0, error
func Int64(reply interface{}, err error) (int64, error) {
if err != nil {
return 0, err
}
switch reply := reply.(type) {
case int64:
return reply, nil
case []byte:
n, err := strconv.ParseInt(string(reply), 10, 64)
return n, err
case nil:
return 0, ErrNil
case Error:
return 0, reply
}
return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply)
}
var errNegativeInt = errors.New("redigo: unexpected value for Uint64")
// Uint64 is a helper that converts a command reply to 64 bit integer. If err is
// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the
// reply to an int64 as follows:
//
// Reply type Result
// integer reply, nil
// bulk string parsed reply, nil
// nil 0, ErrNil
// other 0, error
func Uint64(reply interface{}, err error) (uint64, error) {
if err != nil {
return 0, err
}
switch reply := reply.(type) {
case int64:
if reply < 0 {
return 0, errNegativeInt
}
return uint64(reply), nil
case []byte:
n, err := strconv.ParseUint(string(reply), 10, 64)
return n, err
case nil:
return 0, ErrNil
case Error:
return 0, reply
}
return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply)
}
// Float64 is a helper that converts a command reply to 64 bit float. If err is
// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts
// the reply to an int as follows:
//
// Reply type Result
// bulk string parsed reply, nil
// nil 0, ErrNil
// other 0, error
func Float64(reply interface{}, err error) (float64, error) {
if err != nil {
return 0, err
}
switch reply := reply.(type) {
case []byte:
n, err := strconv.ParseFloat(string(reply), 64)
return n, err
case nil:
return 0, ErrNil
case Error:
return 0, reply
}
return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply)
}
// String is a helper that converts a command reply to a string. If err is not
// equal to nil, then String returns "", err. Otherwise String converts the
// reply to a string as follows:
//
// Reply type Result
// bulk string string(reply), nil
// simple string reply, nil
// nil "", ErrNil
// other "", error
func String(reply interface{}, err error) (string, error) {
if err != nil {
return "", err
}
switch reply := reply.(type) {
case []byte:
return string(reply), nil
case string:
return reply, nil
case nil:
return "", ErrNil
case Error:
return "", reply
}
return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply)
}
// Bytes is a helper that converts a command reply to a slice of bytes. If err
// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts
// the reply to a slice of bytes as follows:
//
// Reply type Result
// bulk string reply, nil
// simple string []byte(reply), nil
// nil nil, ErrNil
// other nil, error
func Bytes(reply interface{}, err error) ([]byte, error) {
if err != nil {
return nil, err
}
switch reply := reply.(type) {
case []byte:
return reply, nil
case string:
return []byte(reply), nil
case nil:
return nil, ErrNil
case Error:
return nil, reply
}
return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply)
}
// Bool is a helper that converts a command reply to a boolean. If err is not
// equal to nil, then Bool returns false, err. Otherwise Bool converts the
// reply to boolean as follows:
//
// Reply type Result
// integer value != 0, nil
// bulk string strconv.ParseBool(reply)
// nil false, ErrNil
// other false, error
func Bool(reply interface{}, err error) (bool, error) {
if err != nil {
return false, err
}
switch reply := reply.(type) {
case int64:
return reply != 0, nil
case []byte:
return strconv.ParseBool(string(reply))
case nil:
return false, ErrNil
case Error:
return false, reply
}
return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply)
}
// MultiBulk is deprecated. Use Values.
func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) }
// Values is a helper that converts an array command reply to a []interface{}.
// If err is not equal to nil, then Values returns nil, err. Otherwise, Values
// converts the reply as follows:
//
// Reply type Result
// array reply, nil
// nil nil, ErrNil
// other nil, error
func Values(reply interface{}, err error) ([]interface{}, error) {
if err != nil {
return nil, err
}
switch reply := reply.(type) {
case []interface{}:
return reply, nil
case nil:
return nil, ErrNil
case Error:
return nil, reply
}
return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply)
}
// Strings is a helper that converts an array command reply to a []string. If
// err is not equal to nil, then Strings returns nil, err. Nil array items are
// converted to "" in the output slice. Strings returns an error if an array
// item is not a bulk string or nil.
func Strings(reply interface{}, err error) ([]string, error) {
if err != nil {
return nil, err
}
switch reply := reply.(type) {
case []interface{}:
result := make([]string, len(reply))
for i := range reply {
if reply[i] == nil {
continue
}
p, ok := reply[i].([]byte)
if !ok {
return nil, fmt.Errorf("redigo: unexpected element type for Strings, got type %T", reply[i])
}
result[i] = string(p)
}
return result, nil
case nil:
return nil, ErrNil
case Error:
return nil, reply
}
return nil, fmt.Errorf("redigo: unexpected type for Strings, got type %T", reply)
}
// Ints is a helper that converts an array command reply to a []int. If
// err is not equal to nil, then Ints returns nil, err.
func Ints(reply interface{}, err error) ([]int, error) {
var ints []int
if reply == nil {
return ints, ErrNil
}
values, err := Values(reply, err)
if err != nil {
return ints, err
}
if err := ScanSlice(values, &ints); err != nil {
return ints, err
}
return ints, nil
}
// StringMap is a helper that converts an array of strings (alternating key, value)
// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format.
// Requires an even number of values in result.
func StringMap(result interface{}, err error) (map[string]string, error) {
values, err := Values(result, err)
if err != nil {
return nil, err
}
if len(values)%2 != 0 {
return nil, errors.New("redigo: StringMap expects even number of values result")
}
m := make(map[string]string, len(values)/2)
for i := 0; i < len(values); i += 2 {
key, okKey := values[i].([]byte)
value, okValue := values[i+1].([]byte)
if !okKey || !okValue {
return nil, errors.New("redigo: ScanMap key not a bulk string value")
}
m[string(key)] = string(value)
}
return m, nil
}
| Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply.go | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.0014639552682638168,
0.0002120483695762232,
0.00016039203910622746,
0.00017215308616869152,
0.00022489311231765896
] |
{
"id": 7,
"code_window": [
"\t\tt.Errorf(\"Failed to find container %#v\", dockerContainer)\n",
"\t}\n",
"\n",
"\tfakeDocker.ClearCalls()\n",
"\tdockerContainer, found, _ = dockerContainers.FindPodContainer(\"foobar\", \"\", \"foo\")\n",
"\tverifyCalls(t, fakeDocker, []string{})\n",
"\tif dockerContainer != nil || found {\n",
"\t\tt.Errorf(\"Should not have found container %#v\", dockerContainer)\n",
"\t}\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdockerContainer, found, _ = findPodContainer(dockerContainers, \"foobar\", \"\", \"foo\")\n"
],
"file_path": "pkg/kubelet/dockertools/docker_test.go",
"type": "replace",
"edit_start_line_idx": 91
} | language: go
go:
- 1.3
install:
- go get -v ./...
- go get github.com/onsi/ginkgo
- go install github.com/onsi/ginkgo/ginkgo
script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --failOnPending --randomizeSuites --race
| Godeps/_workspace/src/github.com/onsi/gomega/.travis.yml | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.00017850857693701982,
0.00017685344209894538,
0.0001751982927089557,
0.00017685344209894538,
0.00000165514211403206
] |
{
"id": 7,
"code_window": [
"\t\tt.Errorf(\"Failed to find container %#v\", dockerContainer)\n",
"\t}\n",
"\n",
"\tfakeDocker.ClearCalls()\n",
"\tdockerContainer, found, _ = dockerContainers.FindPodContainer(\"foobar\", \"\", \"foo\")\n",
"\tverifyCalls(t, fakeDocker, []string{})\n",
"\tif dockerContainer != nil || found {\n",
"\t\tt.Errorf(\"Should not have found container %#v\", dockerContainer)\n",
"\t}\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdockerContainer, found, _ = findPodContainer(dockerContainers, \"foobar\", \"\", \"foo\")\n"
],
"file_path": "pkg/kubelet/dockertools/docker_test.go",
"type": "replace",
"edit_start_line_idx": 91
} | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package secret contains the internal representation of secret volumes.
package secret
| pkg/volume/secret/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.00018059673311654478,
0.000180502247530967,
0.0001804077619453892,
0.000180502247530967,
9.448558557778597e-8
] |
{
"id": 8,
"code_window": [
"\t\tHash: hash,\n",
"\t}\n",
"}\n",
"\n",
"func dockerContainersToPod(containers DockerContainers) kubecontainer.Pod {\n",
"\tvar pod kubecontainer.Pod\n",
"\tfor _, c := range containers {\n",
"\t\tdockerName, hash, err := ParseDockerName(c.Names[0])\n",
"\t\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func dockerContainersToPod(containers []*docker.APIContainers) kubecontainer.Pod {\n"
],
"file_path": "pkg/kubelet/dockertools/manager_test.go",
"type": "replace",
"edit_start_line_idx": 358
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"encoding/json"
"fmt"
"hash/adler32"
"reflect"
"sort"
"strconv"
"strings"
"testing"
"github.com/docker/docker/pkg/jsonmessage"
docker "github.com/fsouza/go-dockerclient"
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/credentialprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/network"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
)
func verifyCalls(t *testing.T, fakeDocker *FakeDockerClient, calls []string) {
fakeDocker.Lock()
defer fakeDocker.Unlock()
verifyStringArrayEquals(t, fakeDocker.called, calls)
}
func verifyStringArrayEquals(t *testing.T, actual, expected []string) {
invalid := len(actual) != len(expected)
if !invalid {
for ix, value := range actual {
if expected[ix] != value {
invalid = true
}
}
}
if invalid {
t.Errorf("Expected: %#v, Actual: %#v", expected, actual)
}
}
func TestGetContainerID(t *testing.T) {
fakeDocker := &FakeDockerClient{}
fakeDocker.ContainerList = []docker.APIContainers{
{
ID: "foobar",
Names: []string{"/k8s_foo_qux_ns_1234_42"},
},
{
ID: "barbar",
Names: []string{"/k8s_bar_qux_ns_2565_42"},
},
}
fakeDocker.Container = &docker.Container{
ID: "foobar",
}
dockerContainers, err := GetKubeletDockerContainers(fakeDocker, false)
if err != nil {
t.Errorf("Expected no error, Got %#v", err)
}
if len(dockerContainers) != 2 {
t.Errorf("Expected %#v, Got %#v", fakeDocker.ContainerList, dockerContainers)
}
verifyCalls(t, fakeDocker, []string{"list"})
dockerContainer, found, _ := dockerContainers.FindPodContainer("qux_ns", "", "foo")
if dockerContainer == nil || !found {
t.Errorf("Failed to find container %#v", dockerContainer)
}
fakeDocker.ClearCalls()
dockerContainer, found, _ = dockerContainers.FindPodContainer("foobar", "", "foo")
verifyCalls(t, fakeDocker, []string{})
if dockerContainer != nil || found {
t.Errorf("Should not have found container %#v", dockerContainer)
}
}
func verifyPackUnpack(t *testing.T, podNamespace, podUID, podName, containerName string) {
container := &api.Container{Name: containerName}
hasher := adler32.New()
util.DeepHashObject(hasher, *container)
computedHash := uint64(hasher.Sum32())
podFullName := fmt.Sprintf("%s_%s", podName, podNamespace)
_, name := BuildDockerName(KubeletContainerName{podFullName, types.UID(podUID), container.Name}, container)
returned, hash, err := ParseDockerName(name)
if err != nil {
t.Errorf("Failed to parse Docker container name %q: %v", name, err)
}
if podFullName != returned.PodFullName || podUID != string(returned.PodUID) || containerName != returned.ContainerName || computedHash != hash {
t.Errorf("For (%s, %s, %s, %d), unpacked (%s, %s, %s, %d)", podFullName, podUID, containerName, computedHash, returned.PodFullName, returned.PodUID, returned.ContainerName, hash)
}
}
func TestContainerNaming(t *testing.T) {
podUID := "12345678"
verifyPackUnpack(t, "file", podUID, "name", "container")
verifyPackUnpack(t, "file", podUID, "name-with-dashes", "container")
// UID is same as pod name
verifyPackUnpack(t, "file", podUID, podUID, "container")
// No Container name
verifyPackUnpack(t, "other", podUID, "name", "")
container := &api.Container{Name: "container"}
podName := "foo"
podNamespace := "test"
name := fmt.Sprintf("k8s_%s_%s_%s_%s_42", container.Name, podName, podNamespace, podUID)
podFullName := fmt.Sprintf("%s_%s", podName, podNamespace)
returned, hash, err := ParseDockerName(name)
if err != nil {
t.Errorf("Failed to parse Docker container name %q: %v", name, err)
}
if returned.PodFullName != podFullName || string(returned.PodUID) != podUID || returned.ContainerName != container.Name || hash != 0 {
t.Errorf("unexpected parse: %s %s %s %d", returned.PodFullName, returned.PodUID, returned.ContainerName, hash)
}
}
func TestVersion(t *testing.T) {
fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Version=1.1.3", "ApiVersion=1.15"}}
manager := &DockerManager{client: fakeDocker}
version, err := manager.Version()
if err != nil {
t.Errorf("got error while getting docker server version - %s", err)
}
expectedVersion, _ := docker.NewAPIVersion("1.15")
if e, a := expectedVersion.String(), version.String(); e != a {
t.Errorf("invalid docker server version. expected: %v, got: %v", e, a)
}
}
func TestExecSupportExists(t *testing.T) {
fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Version=1.3.0", "ApiVersion=1.15"}}
runner := &DockerManager{client: fakeDocker}
useNativeExec, err := runner.nativeExecSupportExists()
if err != nil {
t.Errorf("got error while checking for exec support - %s", err)
}
if !useNativeExec {
t.Errorf("invalid exec support check output. Expected true")
}
}
func TestExecSupportNotExists(t *testing.T) {
fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Version=1.1.2", "ApiVersion=1.14"}}
runner := &DockerManager{client: fakeDocker}
useNativeExec, _ := runner.nativeExecSupportExists()
if useNativeExec {
t.Errorf("invalid exec support check output.")
}
}
func TestDockerContainerCommand(t *testing.T) {
runner := &DockerManager{}
containerID := kubetypes.DockerID("1234").ContainerID()
command := []string{"ls"}
cmd, _ := runner.getRunInContainerCommand(containerID, command)
if cmd.Dir != "/var/lib/docker/execdriver/native/"+containerID.ID {
t.Errorf("unexpected command CWD: %s", cmd.Dir)
}
if !reflect.DeepEqual(cmd.Args, []string{"/usr/sbin/nsinit", "exec", "ls"}) {
t.Errorf("unexpected command args: %s", cmd.Args)
}
}
func TestParseImageName(t *testing.T) {
tests := []struct {
imageName string
name string
tag string
}{
{"ubuntu", "ubuntu", ""},
{"ubuntu:2342", "ubuntu", "2342"},
{"ubuntu:latest", "ubuntu", "latest"},
{"foo/bar:445566", "foo/bar", "445566"},
{"registry.example.com:5000/foobar", "registry.example.com:5000/foobar", ""},
{"registry.example.com:5000/foobar:5342", "registry.example.com:5000/foobar", "5342"},
{"registry.example.com:5000/foobar:latest", "registry.example.com:5000/foobar", "latest"},
}
for _, test := range tests {
name, tag := parseImageName(test.imageName)
if name != test.name || tag != test.tag {
t.Errorf("Expected name/tag: %s/%s, got %s/%s", test.name, test.tag, name, tag)
}
}
}
func TestPullWithNoSecrets(t *testing.T) {
tests := []struct {
imageName string
expectedImage string
}{
{"ubuntu", "ubuntu:latest using {}"},
{"ubuntu:2342", "ubuntu:2342 using {}"},
{"ubuntu:latest", "ubuntu:latest using {}"},
{"foo/bar:445566", "foo/bar:445566 using {}"},
{"registry.example.com:5000/foobar", "registry.example.com:5000/foobar:latest using {}"},
{"registry.example.com:5000/foobar:5342", "registry.example.com:5000/foobar:5342 using {}"},
{"registry.example.com:5000/foobar:latest", "registry.example.com:5000/foobar:latest using {}"},
}
for _, test := range tests {
fakeKeyring := &credentialprovider.FakeKeyring{}
fakeClient := &FakeDockerClient{}
dp := dockerPuller{
client: fakeClient,
keyring: fakeKeyring,
}
err := dp.Pull(test.imageName, []api.Secret{})
if err != nil {
t.Errorf("unexpected non-nil err: %s", err)
continue
}
if e, a := 1, len(fakeClient.pulled); e != a {
t.Errorf("%s: expected 1 pulled image, got %d: %v", test.imageName, a, fakeClient.pulled)
continue
}
if e, a := test.expectedImage, fakeClient.pulled[0]; e != a {
t.Errorf("%s: expected pull of %q, but got %q", test.imageName, e, a)
}
}
}
func TestPullWithJSONError(t *testing.T) {
tests := map[string]struct {
imageName string
err error
expectedError string
}{
"Json error": {
"ubuntu",
&jsonmessage.JSONError{Code: 50, Message: "Json error"},
"Json error",
},
"Bad gateway": {
"ubuntu",
&jsonmessage.JSONError{Code: 502, Message: "<!doctype html>\n<html class=\"no-js\" lang=\"\">\n <head>\n </head>\n <body>\n <h1>Oops, there was an error!</h1>\n <p>We have been contacted of this error, feel free to check out <a href=\"http://status.docker.com/\">status.docker.com</a>\n to see if there is a bigger issue.</p>\n\n </body>\n</html>"},
kubecontainer.RegistryUnavailable.Error(),
},
}
for i, test := range tests {
fakeKeyring := &credentialprovider.FakeKeyring{}
fakeClient := &FakeDockerClient{
Errors: map[string]error{"pull": test.err},
}
puller := &dockerPuller{
client: fakeClient,
keyring: fakeKeyring,
}
err := puller.Pull(test.imageName, []api.Secret{})
if err == nil || !strings.Contains(err.Error(), test.expectedError) {
t.Errorf("%s: expect error %s, got : %s", i, test.expectedError, err)
continue
}
}
}
func TestPullWithSecrets(t *testing.T) {
// auth value is equivalent to: "username":"passed-user","password":"passed-password"
dockerCfg := map[string]map[string]string{"index.docker.io/v1/": {"email": "passed-email", "auth": "cGFzc2VkLXVzZXI6cGFzc2VkLXBhc3N3b3Jk"}}
dockercfgContent, err := json.Marshal(dockerCfg)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
dockerConfigJson := map[string]map[string]map[string]string{"auths": dockerCfg}
dockerConfigJsonContent, err := json.Marshal(dockerConfigJson)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
tests := map[string]struct {
imageName string
passedSecrets []api.Secret
builtInDockerConfig credentialprovider.DockerConfig
expectedPulls []string
}{
"no matching secrets": {
"ubuntu",
[]api.Secret{},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{}),
[]string{"ubuntu:latest using {}"},
},
"default keyring secrets": {
"ubuntu",
[]api.Secret{},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{"index.docker.io/v1/": {"built-in", "password", "email"}}),
[]string{`ubuntu:latest using {"username":"built-in","password":"password","email":"email"}`},
},
"default keyring secrets unused": {
"ubuntu",
[]api.Secret{},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{"extraneous": {"built-in", "password", "email"}}),
[]string{`ubuntu:latest using {}`},
},
"builtin keyring secrets, but use passed": {
"ubuntu",
[]api.Secret{{Type: api.SecretTypeDockercfg, Data: map[string][]byte{api.DockerConfigKey: dockercfgContent}}},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{"index.docker.io/v1/": {"built-in", "password", "email"}}),
[]string{`ubuntu:latest using {"username":"passed-user","password":"passed-password","email":"passed-email"}`},
},
"builtin keyring secrets, but use passed with new docker config": {
"ubuntu",
[]api.Secret{{Type: api.SecretTypeDockerConfigJson, Data: map[string][]byte{api.DockerConfigJsonKey: dockerConfigJsonContent}}},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{"index.docker.io/v1/": {"built-in", "password", "email"}}),
[]string{`ubuntu:latest using {"username":"passed-user","password":"passed-password","email":"passed-email"}`},
},
}
for _, test := range tests {
builtInKeyRing := &credentialprovider.BasicDockerKeyring{}
builtInKeyRing.Add(test.builtInDockerConfig)
fakeClient := &FakeDockerClient{}
dp := dockerPuller{
client: fakeClient,
keyring: builtInKeyRing,
}
err := dp.Pull(test.imageName, test.passedSecrets)
if err != nil {
t.Errorf("unexpected non-nil err: %s", err)
continue
}
if e, a := 1, len(fakeClient.pulled); e != a {
t.Errorf("%s: expected 1 pulled image, got %d: %v", test.imageName, a, fakeClient.pulled)
continue
}
if e, a := test.expectedPulls, fakeClient.pulled; !reflect.DeepEqual(e, a) {
t.Errorf("%s: expected pull of %v, but got %v", test.imageName, e, a)
}
}
}
func TestDockerKeyringLookupFails(t *testing.T) {
fakeKeyring := &credentialprovider.FakeKeyring{}
fakeClient := &FakeDockerClient{
Errors: map[string]error{"pull": fmt.Errorf("test error")},
}
dp := dockerPuller{
client: fakeClient,
keyring: fakeKeyring,
}
err := dp.Pull("host/repository/image:version", []api.Secret{})
if err == nil {
t.Errorf("unexpected non-error")
}
msg := "image pull failed for host/repository/image:version, this may be because there are no credentials on this request. details: (test error)"
if err.Error() != msg {
t.Errorf("expected: %s, saw: %s", msg, err.Error())
}
}
func TestDockerKeyringLookup(t *testing.T) {
ada := docker.AuthConfiguration{
Username: "ada",
Password: "smash",
Email: "[email protected]",
}
grace := docker.AuthConfiguration{
Username: "grace",
Password: "squash",
Email: "[email protected]",
}
dk := &credentialprovider.BasicDockerKeyring{}
dk.Add(credentialprovider.DockerConfig{
"bar.example.com/pong": credentialprovider.DockerConfigEntry{
Username: grace.Username,
Password: grace.Password,
Email: grace.Email,
},
"bar.example.com": credentialprovider.DockerConfigEntry{
Username: ada.Username,
Password: ada.Password,
Email: ada.Email,
},
})
tests := []struct {
image string
match []docker.AuthConfiguration
ok bool
}{
// direct match
{"bar.example.com", []docker.AuthConfiguration{ada}, true},
// direct match deeper than other possible matches
{"bar.example.com/pong", []docker.AuthConfiguration{grace, ada}, true},
// no direct match, deeper path ignored
{"bar.example.com/ping", []docker.AuthConfiguration{ada}, true},
// match first part of path token
{"bar.example.com/pongz", []docker.AuthConfiguration{grace, ada}, true},
// match regardless of sub-path
{"bar.example.com/pong/pang", []docker.AuthConfiguration{grace, ada}, true},
// no host match
{"example.com", []docker.AuthConfiguration{}, false},
{"foo.example.com", []docker.AuthConfiguration{}, false},
}
for i, tt := range tests {
match, ok := dk.Lookup(tt.image)
if tt.ok != ok {
t.Errorf("case %d: expected ok=%t, got %t", i, tt.ok, ok)
}
if !reflect.DeepEqual(tt.match, match) {
t.Errorf("case %d: expected match=%#v, got %#v", i, tt.match, match)
}
}
}
// This validates that dockercfg entries with a scheme and url path are properly matched
// by images that only match the hostname.
// NOTE: the above covers the case of a more specific match trumping just hostname.
func TestIssue3797(t *testing.T) {
rex := docker.AuthConfiguration{
Username: "rex",
Password: "tiny arms",
Email: "[email protected]",
}
dk := &credentialprovider.BasicDockerKeyring{}
dk.Add(credentialprovider.DockerConfig{
"https://quay.io/v1/": credentialprovider.DockerConfigEntry{
Username: rex.Username,
Password: rex.Password,
Email: rex.Email,
},
})
tests := []struct {
image string
match []docker.AuthConfiguration
ok bool
}{
// direct match
{"quay.io", []docker.AuthConfiguration{rex}, true},
// partial matches
{"quay.io/foo", []docker.AuthConfiguration{rex}, true},
{"quay.io/foo/bar", []docker.AuthConfiguration{rex}, true},
}
for i, tt := range tests {
match, ok := dk.Lookup(tt.image)
if tt.ok != ok {
t.Errorf("case %d: expected ok=%t, got %t", i, tt.ok, ok)
}
if !reflect.DeepEqual(tt.match, match) {
t.Errorf("case %d: expected match=%#v, got %#v", i, tt.match, match)
}
}
}
type imageTrackingDockerClient struct {
*FakeDockerClient
imageName string
}
func (f *imageTrackingDockerClient) InspectImage(name string) (image *docker.Image, err error) {
image, err = f.FakeDockerClient.InspectImage(name)
f.imageName = name
return
}
func TestIsImagePresent(t *testing.T) {
cl := &imageTrackingDockerClient{&FakeDockerClient{}, ""}
puller := &dockerPuller{
client: cl,
}
_, _ = puller.IsImagePresent("abc:123")
if cl.imageName != "abc:123" {
t.Errorf("expected inspection of image abc:123, instead inspected image %v", cl.imageName)
}
}
type podsByID []*kubecontainer.Pod
func (b podsByID) Len() int { return len(b) }
func (b podsByID) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b podsByID) Less(i, j int) bool { return b[i].ID < b[j].ID }
type containersByID []*kubecontainer.Container
func (b containersByID) Len() int { return len(b) }
func (b containersByID) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b containersByID) Less(i, j int) bool { return b[i].ID.ID < b[j].ID.ID }
func TestFindContainersByPod(t *testing.T) {
tests := []struct {
containerList []docker.APIContainers
exitedContainerList []docker.APIContainers
all bool
expectedPods []*kubecontainer.Pod
}{
{
[]docker.APIContainers{
{
ID: "foobar",
Names: []string{"/k8s_foobar.1234_qux_ns_1234_42"},
},
{
ID: "barbar",
Names: []string{"/k8s_barbar.1234_qux_ns_2343_42"},
},
{
ID: "baz",
Names: []string{"/k8s_baz.1234_qux_ns_1234_42"},
},
},
[]docker.APIContainers{
{
ID: "barfoo",
Names: []string{"/k8s_barfoo.1234_qux_ns_1234_42"},
},
{
ID: "bazbaz",
Names: []string{"/k8s_bazbaz.1234_qux_ns_5678_42"},
},
},
false,
[]*kubecontainer.Pod{
{
ID: "1234",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: kubetypes.DockerID("foobar").ContainerID(),
Name: "foobar",
Hash: 0x1234,
},
{
ID: kubetypes.DockerID("baz").ContainerID(),
Name: "baz",
Hash: 0x1234,
},
},
},
{
ID: "2343",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: kubetypes.DockerID("barbar").ContainerID(),
Name: "barbar",
Hash: 0x1234,
},
},
},
},
},
{
[]docker.APIContainers{
{
ID: "foobar",
Names: []string{"/k8s_foobar.1234_qux_ns_1234_42"},
},
{
ID: "barbar",
Names: []string{"/k8s_barbar.1234_qux_ns_2343_42"},
},
{
ID: "baz",
Names: []string{"/k8s_baz.1234_qux_ns_1234_42"},
},
},
[]docker.APIContainers{
{
ID: "barfoo",
Names: []string{"/k8s_barfoo.1234_qux_ns_1234_42"},
},
{
ID: "bazbaz",
Names: []string{"/k8s_bazbaz.1234_qux_ns_5678_42"},
},
},
true,
[]*kubecontainer.Pod{
{
ID: "1234",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: kubetypes.DockerID("foobar").ContainerID(),
Name: "foobar",
Hash: 0x1234,
},
{
ID: kubetypes.DockerID("barfoo").ContainerID(),
Name: "barfoo",
Hash: 0x1234,
},
{
ID: kubetypes.DockerID("baz").ContainerID(),
Name: "baz",
Hash: 0x1234,
},
},
},
{
ID: "2343",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: kubetypes.DockerID("barbar").ContainerID(),
Name: "barbar",
Hash: 0x1234,
},
},
},
{
ID: "5678",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: kubetypes.DockerID("bazbaz").ContainerID(),
Name: "bazbaz",
Hash: 0x1234,
},
},
},
},
},
{
[]docker.APIContainers{},
[]docker.APIContainers{},
true,
nil,
},
}
fakeClient := &FakeDockerClient{}
np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
// image back-off is set to nil, this test shouldnt pull images
containerManager := NewFakeDockerManager(fakeClient, &record.FakeRecorder{}, nil, nil, &cadvisorapi.MachineInfo{}, PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, nil, nil, nil)
for i, test := range tests {
fakeClient.ContainerList = test.containerList
fakeClient.ExitedContainerList = test.exitedContainerList
result, _ := containerManager.GetPods(test.all)
for i := range result {
sort.Sort(containersByID(result[i].Containers))
}
for i := range test.expectedPods {
sort.Sort(containersByID(test.expectedPods[i].Containers))
}
sort.Sort(podsByID(result))
sort.Sort(podsByID(test.expectedPods))
if !reflect.DeepEqual(test.expectedPods, result) {
t.Errorf("%d: expected: %#v, saw: %#v", i, test.expectedPods, result)
}
}
}
func TestMakePortsAndBindings(t *testing.T) {
ports := []kubecontainer.PortMapping{
{
ContainerPort: 80,
HostPort: 8080,
HostIP: "127.0.0.1",
},
{
ContainerPort: 443,
HostPort: 443,
Protocol: "tcp",
},
{
ContainerPort: 444,
HostPort: 444,
Protocol: "udp",
},
{
ContainerPort: 445,
HostPort: 445,
Protocol: "foobar",
},
{
ContainerPort: 443,
HostPort: 446,
Protocol: "tcp",
},
{
ContainerPort: 443,
HostPort: 446,
Protocol: "udp",
},
}
exposedPorts, bindings := makePortsAndBindings(ports)
// Count the expected exposed ports and bindings
expectedExposedPorts := map[string]struct{}{}
for _, binding := range ports {
dockerKey := strconv.Itoa(binding.ContainerPort) + "/" + string(binding.Protocol)
expectedExposedPorts[dockerKey] = struct{}{}
}
// Should expose right ports in docker
if len(expectedExposedPorts) != len(exposedPorts) {
t.Errorf("Unexpected ports and bindings, %#v %#v %#v", ports, exposedPorts, bindings)
}
// Construct expected bindings
expectPortBindings := map[string][]docker.PortBinding{
"80/tcp": {
docker.PortBinding{
HostPort: "8080",
HostIP: "127.0.0.1",
},
},
"443/tcp": {
docker.PortBinding{
HostPort: "443",
HostIP: "",
},
docker.PortBinding{
HostPort: "446",
HostIP: "",
},
},
"443/udp": {
docker.PortBinding{
HostPort: "446",
HostIP: "",
},
},
"444/udp": {
docker.PortBinding{
HostPort: "444",
HostIP: "",
},
},
"445/tcp": {
docker.PortBinding{
HostPort: "445",
HostIP: "",
},
},
}
// interate the bindings by dockerPort, and check its portBindings
for dockerPort, portBindings := range bindings {
switch dockerPort {
case "80/tcp", "443/tcp", "443/udp", "444/udp", "445/tcp":
if !reflect.DeepEqual(expectPortBindings[string(dockerPort)], portBindings) {
t.Errorf("Unexpected portbindings for %#v, expected: %#v, but got: %#v",
dockerPort, expectPortBindings[string(dockerPort)], portBindings)
}
default:
t.Errorf("Unexpected docker port: %#v with portbindings: %#v", dockerPort, portBindings)
}
}
}
func TestMilliCPUToQuota(t *testing.T) {
testCases := []struct {
input int64
quota int64
period int64
}{
{
input: int64(0),
quota: int64(0),
period: int64(0),
},
{
input: int64(200),
quota: int64(20000),
period: int64(100000),
},
{
input: int64(500),
quota: int64(50000),
period: int64(100000),
},
{
input: int64(1000),
quota: int64(100000),
period: int64(100000),
},
{
input: int64(1500),
quota: int64(150000),
period: int64(100000),
},
}
for _, testCase := range testCases {
quota, period := milliCPUToQuota(testCase.input)
if quota != testCase.quota || period != testCase.period {
t.Errorf("Input %v, expected quota %v period %v, but got quota %v period %v", testCase.input, testCase.quota, testCase.period, quota, period)
}
}
}
| pkg/kubelet/dockertools/docker_test.go | 1 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.9920240640640259,
0.04430852457880974,
0.00016371709352824837,
0.00019620204693637788,
0.1893523931503296
] |
{
"id": 8,
"code_window": [
"\t\tHash: hash,\n",
"\t}\n",
"}\n",
"\n",
"func dockerContainersToPod(containers DockerContainers) kubecontainer.Pod {\n",
"\tvar pod kubecontainer.Pod\n",
"\tfor _, c := range containers {\n",
"\t\tdockerName, hash, err := ParseDockerName(c.Names[0])\n",
"\t\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func dockerContainersToPod(containers []*docker.APIContainers) kubecontainer.Pod {\n"
],
"file_path": "pkg/kubelet/dockertools/manager_test.go",
"type": "replace",
"edit_start_line_idx": 358
} | package probing
import (
"encoding/json"
"errors"
"net/http"
"sync"
"time"
)
var (
ErrNotFound = errors.New("probing: id not found")
ErrExist = errors.New("probing: id exists")
)
type Prober interface {
AddHTTP(id string, probingInterval time.Duration, endpoints []string) error
Remove(id string) error
RemoveAll()
Reset(id string) error
Status(id string) (Status, error)
}
type prober struct {
mu sync.Mutex
targets map[string]*status
tr http.RoundTripper
}
func NewProber(tr http.RoundTripper) Prober {
p := &prober{targets: make(map[string]*status)}
if tr == nil {
p.tr = http.DefaultTransport
} else {
p.tr = tr
}
return p
}
func (p *prober) AddHTTP(id string, probingInterval time.Duration, endpoints []string) error {
p.mu.Lock()
defer p.mu.Unlock()
if _, ok := p.targets[id]; ok {
return ErrExist
}
s := &status{stopC: make(chan struct{})}
p.targets[id] = s
ticker := time.NewTicker(probingInterval)
go func() {
pinned := 0
for {
select {
case <-ticker.C:
start := time.Now()
req, err := http.NewRequest("GET", endpoints[pinned], nil)
if err != nil {
panic(err)
}
resp, err := p.tr.RoundTrip(req)
if err != nil {
s.recordFailure()
pinned = (pinned + 1) % len(endpoints)
continue
}
var hh Health
d := json.NewDecoder(resp.Body)
err = d.Decode(&hh)
resp.Body.Close()
if err != nil || !hh.OK {
s.recordFailure()
pinned = (pinned + 1) % len(endpoints)
continue
}
s.record(time.Since(start), hh.Now)
case <-s.stopC:
ticker.Stop()
return
}
}
}()
return nil
}
func (p *prober) Remove(id string) error {
p.mu.Lock()
defer p.mu.Unlock()
s, ok := p.targets[id]
if !ok {
return ErrNotFound
}
close(s.stopC)
delete(p.targets, id)
return nil
}
func (p *prober) RemoveAll() {
p.mu.Lock()
defer p.mu.Unlock()
for _, s := range p.targets {
close(s.stopC)
}
p.targets = make(map[string]*status)
}
func (p *prober) Reset(id string) error {
p.mu.Lock()
defer p.mu.Unlock()
s, ok := p.targets[id]
if !ok {
return ErrNotFound
}
s.reset()
return nil
}
func (p *prober) Status(id string) (Status, error) {
p.mu.Lock()
defer p.mu.Unlock()
s, ok := p.targets[id]
if !ok {
return nil, ErrNotFound
}
return s, nil
}
| Godeps/_workspace/src/github.com/xiang90/probing/prober.go | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.0003323341952636838,
0.00018977319996338338,
0.00016251156921498477,
0.0001697622356005013,
0.000045378012146102265
] |
{
"id": 8,
"code_window": [
"\t\tHash: hash,\n",
"\t}\n",
"}\n",
"\n",
"func dockerContainersToPod(containers DockerContainers) kubecontainer.Pod {\n",
"\tvar pod kubecontainer.Pod\n",
"\tfor _, c := range containers {\n",
"\t\tdockerName, hash, err := ParseDockerName(c.Names[0])\n",
"\t\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func dockerContainersToPod(containers []*docker.APIContainers) kubecontainer.Pod {\n"
],
"file_path": "pkg/kubelet/dockertools/manager_test.go",
"type": "replace",
"edit_start_line_idx": 358
} | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package election
import (
"sync"
"k8s.io/kubernetes/pkg/watch"
)
// Fake allows for testing of anything consuming a MasterElector.
type Fake struct {
mux *watch.Broadcaster
currentMaster Master
lock sync.Mutex // Protect access of currentMaster
}
// NewFake makes a new fake MasterElector.
func NewFake() *Fake {
// 0 means block for clients.
return &Fake{mux: watch.NewBroadcaster(0, watch.WaitIfChannelFull)}
}
func (f *Fake) ChangeMaster(newMaster Master) {
f.lock.Lock()
defer f.lock.Unlock()
f.mux.Action(watch.Modified, newMaster)
f.currentMaster = newMaster
}
func (f *Fake) Elect(path, id string) watch.Interface {
f.lock.Lock()
defer f.lock.Unlock()
w := f.mux.Watch()
if f.currentMaster != "" {
f.mux.Action(watch.Modified, f.currentMaster)
}
return w
}
| contrib/mesos/pkg/election/fake.go | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.00024066238256637007,
0.00018410755728837103,
0.0001642368151806295,
0.00017703211051411927,
0.00002587535527709406
] |
{
"id": 8,
"code_window": [
"\t\tHash: hash,\n",
"\t}\n",
"}\n",
"\n",
"func dockerContainersToPod(containers DockerContainers) kubecontainer.Pod {\n",
"\tvar pod kubecontainer.Pod\n",
"\tfor _, c := range containers {\n",
"\t\tdockerName, hash, err := ParseDockerName(c.Names[0])\n",
"\t\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func dockerContainersToPod(containers []*docker.APIContainers) kubecontainer.Pod {\n"
],
"file_path": "pkg/kubelet/dockertools/manager_test.go",
"type": "replace",
"edit_start_line_idx": 358
} | package main
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
"github.com/docker/libcontainer"
"github.com/docker/libcontainer/cgroups/systemd"
"github.com/docker/libcontainer/configs"
)
func loadConfig(context *cli.Context) (*configs.Config, error) {
if path := context.String("config"); path != "" {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
var config *configs.Config
if err := json.NewDecoder(f).Decode(&config); err != nil {
return nil, err
}
return config, nil
}
config := getTemplate()
modify(config, context)
return config, nil
}
func loadFactory(context *cli.Context) (libcontainer.Factory, error) {
cgm := libcontainer.Cgroupfs
if context.Bool("systemd") {
if systemd.UseSystemd() {
cgm = libcontainer.SystemdCgroups
} else {
logrus.Warn("systemd cgroup flag passed, but systemd support for managing cgroups is not available.")
}
}
root := context.GlobalString("root")
abs, err := filepath.Abs(root)
if err != nil {
return nil, err
}
return libcontainer.New(abs, cgm, func(l *libcontainer.LinuxFactory) error {
l.CriuPath = context.GlobalString("criu")
return nil
})
}
func getContainer(context *cli.Context) (libcontainer.Container, error) {
factory, err := loadFactory(context)
if err != nil {
return nil, err
}
container, err := factory.Load(context.String("id"))
if err != nil {
return nil, err
}
return container, nil
}
func fatal(err error) {
if lerr, ok := err.(libcontainer.Error); ok {
lerr.Detail(os.Stderr)
os.Exit(1)
}
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
func fatalf(t string, v ...interface{}) {
fmt.Fprintf(os.Stderr, t, v...)
os.Exit(1)
}
| Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/utils.go | 0 | https://github.com/kubernetes/kubernetes/commit/b127901871b4081f65904e73eda786a7710149b5 | [
0.00030951379449106753,
0.00018791164620779455,
0.00016320445865858346,
0.0001696685649221763,
0.000046289191232062876
] |
{
"id": 0,
"code_window": [
"// @todo add tests\n",
"func (app *BaseApp) initAutobackupHooks() error {\n",
"\tc := cron.New()\n",
"\n",
"\tloadJob := func() {\n",
"\t\tc.Stop()\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tisServe := false\n"
],
"file_path": "core/base_backup.go",
"type": "add",
"edit_start_line_idx": 253
} | package forms
import (
"os"
"time"
"github.com/pocketbase/pocketbase/core"
"github.com/pocketbase/pocketbase/daos"
"github.com/pocketbase/pocketbase/models/settings"
)
// SettingsUpsert is a [settings.Settings] upsert (create/update) form.
type SettingsUpsert struct {
*settings.Settings
app core.App
dao *daos.Dao
}
// NewSettingsUpsert creates a new [SettingsUpsert] form with initializer
// config created from the provided [core.App] instance.
//
// If you want to submit the form as part of a transaction,
// you can change the default Dao via [SetDao()].
func NewSettingsUpsert(app core.App) *SettingsUpsert {
form := &SettingsUpsert{
app: app,
dao: app.Dao(),
}
// load the application settings into the form
form.Settings, _ = app.Settings().Clone()
return form
}
// SetDao replaces the default form Dao instance with the provided one.
func (form *SettingsUpsert) SetDao(dao *daos.Dao) {
form.dao = dao
}
// Validate makes the form validatable by implementing [validation.Validatable] interface.
func (form *SettingsUpsert) Validate() error {
return form.Settings.Validate()
}
// Submit validates the form and upserts the loaded settings.
//
// On success the app settings will be refreshed with the form ones.
//
// You can optionally provide a list of InterceptorFunc to further
// modify the form behavior before persisting it.
func (form *SettingsUpsert) Submit(interceptors ...InterceptorFunc[*settings.Settings]) error {
if err := form.Validate(); err != nil {
return err
}
return runInterceptors(form.Settings, func(s *settings.Settings) error {
form.Settings = s
encryptionKey := os.Getenv(form.app.EncryptionEnv())
if err := form.dao.SaveSettings(form.Settings, encryptionKey); err != nil {
return err
}
// explicitly trigger old logs deletion
form.app.LogsDao().DeleteOldRequests(
time.Now().AddDate(0, 0, -1*form.Settings.Logs.MaxDays),
)
if form.Settings.Logs.MaxDays == 0 {
// no logs are allowed -> reclaim preserved disk space after the previous delete operation
form.app.LogsDao().Vacuum()
}
// merge the application settings with the form ones
return form.app.Settings().Merge(form.Settings)
}, interceptors...)
}
| forms/settings_upsert.go | 1 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.00018141705368179828,
0.0001714660320430994,
0.00016579404473304749,
0.0001700017019174993,
0.0000047565017666784115
] |
{
"id": 0,
"code_window": [
"// @todo add tests\n",
"func (app *BaseApp) initAutobackupHooks() error {\n",
"\tc := cron.New()\n",
"\n",
"\tloadJob := func() {\n",
"\t\tc.Stop()\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tisServe := false\n"
],
"file_path": "core/base_backup.go",
"type": "add",
"edit_start_line_idx": 253
} | <svg viewBox="0 0 36 36" fill="none" role="img" xmlns="http://www.w3.org/2000/svg" width="128" height="128"><title>Nellie Bly</title><mask id="mask__beam" maskUnits="userSpaceOnUse" x="0" y="0" width="36" height="36"><rect width="36" height="36" fill="#FFFFFF"></rect></mask><g mask="url(#mask__beam)"><rect width="36" height="36" fill="#a89f1d"></rect><rect x="0" y="0" width="36" height="36" transform="translate(8 8) rotate(118 18 18) scale(1.1)" fill="#fcaf14" rx="6"></rect><g transform="translate(4 4) rotate(-8 18 18)"><path d="M13,20 a1,0.75 0 0,0 10,0" fill="#000000"></path><rect x="11" y="14" width="1.5" height="2" rx="1" stroke="none" fill="#000000"></rect><rect x="23" y="14" width="1.5" height="2" rx="1" stroke="none" fill="#000000"></rect></g></g></svg>
| ui/public/images/avatars/avatar1.svg | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.0001733798999339342,
0.0001733798999339342,
0.0001733798999339342,
0.0001733798999339342,
0
] |
{
"id": 0,
"code_window": [
"// @todo add tests\n",
"func (app *BaseApp) initAutobackupHooks() error {\n",
"\tc := cron.New()\n",
"\n",
"\tloadJob := func() {\n",
"\t\tc.Stop()\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tisServe := false\n"
],
"file_path": "core/base_backup.go",
"type": "add",
"edit_start_line_idx": 253
} | package apis_test
import (
"context"
"net/http"
"strings"
"testing"
"time"
"github.com/labstack/echo/v5"
"github.com/pocketbase/pocketbase/daos"
"github.com/pocketbase/pocketbase/tests"
"github.com/pocketbase/pocketbase/tools/subscriptions"
"github.com/pocketbase/pocketbase/tools/types"
)
func TestRecordAuthMethodsList(t *testing.T) {
scenarios := []tests.ApiScenario{
{
Name: "missing collection",
Method: http.MethodGet,
Url: "/api/collections/missing/auth-methods",
ExpectedStatus: 404,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "non auth collection",
Method: http.MethodGet,
Url: "/api/collections/demo1/auth-methods",
ExpectedStatus: 400,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "auth collection with all auth methods allowed",
Method: http.MethodGet,
Url: "/api/collections/users/auth-methods",
ExpectedStatus: 200,
ExpectedContent: []string{
`"usernamePassword":true`,
`"emailPassword":true`,
`"authProviders":[{`,
`"name":"gitlab"`,
`"state":`,
`"codeVerifier":`,
`"codeChallenge":`,
`"codeChallengeMethod":`,
`"authUrl":`,
`redirect_uri="`, // ensures that the redirect_uri is the last url param
},
},
{
Name: "auth collection with only email/password auth allowed",
Method: http.MethodGet,
Url: "/api/collections/clients/auth-methods",
ExpectedStatus: 200,
ExpectedContent: []string{
`"usernamePassword":false`,
`"emailPassword":true`,
`"authProviders":[]`,
},
},
}
for _, scenario := range scenarios {
scenario.Test(t)
}
}
func TestRecordAuthWithPassword(t *testing.T) {
scenarios := []tests.ApiScenario{
{
Name: "invalid body format",
Method: http.MethodPost,
Url: "/api/collections/users/auth-with-password",
Body: strings.NewReader(`{"identity`),
ExpectedStatus: 400,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "empty body params",
Method: http.MethodPost,
Url: "/api/collections/users/auth-with-password",
Body: strings.NewReader(`{"identity":"","password":""}`),
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":{`,
`"identity":{`,
`"password":{`,
},
},
// username
{
Name: "invalid username and valid password",
Method: http.MethodPost,
Url: "/api/collections/users/auth-with-password",
Body: strings.NewReader(`{
"identity":"invalid",
"password":"1234567890"
}`),
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":{}`,
},
ExpectedEvents: map[string]int{
"OnRecordBeforeAuthWithPasswordRequest": 1,
},
},
{
Name: "valid username and invalid password",
Method: http.MethodPost,
Url: "/api/collections/users/auth-with-password",
Body: strings.NewReader(`{
"identity":"test2_username",
"password":"invalid"
}`),
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":{}`,
},
ExpectedEvents: map[string]int{
"OnRecordBeforeAuthWithPasswordRequest": 1,
},
},
{
Name: "valid username and valid password in restricted collection",
Method: http.MethodPost,
Url: "/api/collections/nologin/auth-with-password",
Body: strings.NewReader(`{
"identity":"test_username",
"password":"1234567890"
}`),
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":{}`,
},
ExpectedEvents: map[string]int{
"OnRecordBeforeAuthWithPasswordRequest": 1,
},
},
{
Name: "valid username and valid password in allowed collection",
Method: http.MethodPost,
Url: "/api/collections/users/auth-with-password",
Body: strings.NewReader(`{
"identity":"test2_username",
"password":"1234567890"
}`),
ExpectedStatus: 200,
ExpectedContent: []string{
`"record":{`,
`"token":"`,
`"id":"oap640cot4yru2s"`,
`"email":"[email protected]"`,
},
ExpectedEvents: map[string]int{
"OnRecordBeforeAuthWithPasswordRequest": 1,
"OnRecordAfterAuthWithPasswordRequest": 1,
"OnRecordAuthRequest": 1,
},
},
// email
{
Name: "invalid email and valid password",
Method: http.MethodPost,
Url: "/api/collections/users/auth-with-password",
Body: strings.NewReader(`{
"identity":"[email protected]",
"password":"1234567890"
}`),
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":{}`,
},
ExpectedEvents: map[string]int{
"OnRecordBeforeAuthWithPasswordRequest": 1,
},
},
{
Name: "valid email and invalid password",
Method: http.MethodPost,
Url: "/api/collections/users/auth-with-password",
Body: strings.NewReader(`{
"identity":"[email protected]",
"password":"invalid"
}`),
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":{}`,
},
ExpectedEvents: map[string]int{
"OnRecordBeforeAuthWithPasswordRequest": 1,
},
},
{
Name: "valid email and valid password in restricted collection",
Method: http.MethodPost,
Url: "/api/collections/nologin/auth-with-password",
Body: strings.NewReader(`{
"identity":"[email protected]",
"password":"1234567890"
}`),
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":{}`,
},
ExpectedEvents: map[string]int{
"OnRecordBeforeAuthWithPasswordRequest": 1,
},
},
{
Name: "valid email and valid password in allowed collection",
Method: http.MethodPost,
Url: "/api/collections/users/auth-with-password",
Body: strings.NewReader(`{
"identity":"[email protected]",
"password":"1234567890"
}`),
ExpectedStatus: 200,
ExpectedContent: []string{
`"record":{`,
`"token":"`,
`"id":"4q1xlclmfloku33"`,
`"email":"[email protected]"`,
},
ExpectedEvents: map[string]int{
"OnRecordBeforeAuthWithPasswordRequest": 1,
"OnRecordAfterAuthWithPasswordRequest": 1,
"OnRecordAuthRequest": 1,
},
},
// with already authenticated record or admin
{
Name: "authenticated record",
Method: http.MethodPost,
Url: "/api/collections/users/auth-with-password",
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiY29sbGVjdGlvbklkIjoiX3BiX3VzZXJzX2F1dGhfIiwiZXhwIjoyMjA4OTg1MjYxfQ.UwD8JvkbQtXpymT09d7J6fdA0aP9g4FJ1GPh_ggEkzc",
},
Body: strings.NewReader(`{
"identity":"[email protected]",
"password":"1234567890"
}`),
ExpectedStatus: 200,
ExpectedContent: []string{
`"record":{`,
`"token":"`,
`"id":"4q1xlclmfloku33"`,
`"email":"[email protected]"`,
},
ExpectedEvents: map[string]int{
"OnRecordBeforeAuthWithPasswordRequest": 1,
"OnRecordAfterAuthWithPasswordRequest": 1,
"OnRecordAuthRequest": 1,
},
},
{
Name: "authenticated admin",
Method: http.MethodPost,
Url: "/api/collections/users/auth-with-password",
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6InN5d2JoZWNuaDQ2cmhtMCIsInR5cGUiOiJhZG1pbiIsImV4cCI6MjIwODk4NTI2MX0.M1m--VOqGyv0d23eeUc0r9xE8ZzHaYVmVFw1VZW6gT8",
},
Body: strings.NewReader(`{
"identity":"[email protected]",
"password":"1234567890"
}`),
ExpectedStatus: 200,
ExpectedContent: []string{
`"record":{`,
`"token":"`,
`"id":"4q1xlclmfloku33"`,
`"email":"[email protected]"`,
},
ExpectedEvents: map[string]int{
"OnRecordBeforeAuthWithPasswordRequest": 1,
"OnRecordAfterAuthWithPasswordRequest": 1,
"OnRecordAuthRequest": 1,
},
},
}
for _, scenario := range scenarios {
scenario.Test(t)
}
}
func TestRecordAuthRefresh(t *testing.T) {
scenarios := []tests.ApiScenario{
{
Name: "unauthorized",
Method: http.MethodPost,
Url: "/api/collections/users/auth-refresh",
ExpectedStatus: 401,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "admin",
Method: http.MethodPost,
Url: "/api/collections/users/auth-refresh",
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6InN5d2JoZWNuaDQ2cmhtMCIsInR5cGUiOiJhZG1pbiIsImV4cCI6MjIwODk4NTI2MX0.M1m--VOqGyv0d23eeUc0r9xE8ZzHaYVmVFw1VZW6gT8",
},
ExpectedStatus: 401,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "auth record + not an auth collection",
Method: http.MethodPost,
Url: "/api/collections/demo1/auth-refresh",
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiY29sbGVjdGlvbklkIjoiX3BiX3VzZXJzX2F1dGhfIiwiZXhwIjoyMjA4OTg1MjYxfQ.UwD8JvkbQtXpymT09d7J6fdA0aP9g4FJ1GPh_ggEkzc",
},
ExpectedStatus: 400,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "auth record + different auth collection",
Method: http.MethodPost,
Url: "/api/collections/clients/auth-refresh?expand=rel,missing",
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiY29sbGVjdGlvbklkIjoiX3BiX3VzZXJzX2F1dGhfIiwiZXhwIjoyMjA4OTg1MjYxfQ.UwD8JvkbQtXpymT09d7J6fdA0aP9g4FJ1GPh_ggEkzc",
},
ExpectedStatus: 403,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "auth record + same auth collection as the token",
Method: http.MethodPost,
Url: "/api/collections/users/auth-refresh?expand=rel,missing",
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiY29sbGVjdGlvbklkIjoiX3BiX3VzZXJzX2F1dGhfIiwiZXhwIjoyMjA4OTg1MjYxfQ.UwD8JvkbQtXpymT09d7J6fdA0aP9g4FJ1GPh_ggEkzc",
},
ExpectedStatus: 200,
ExpectedContent: []string{
`"token":`,
`"record":`,
`"id":"4q1xlclmfloku33"`,
`"emailVisibility":false`,
`"email":"[email protected]"`, // the owner can always view their email address
`"expand":`,
`"rel":`,
`"id":"llvuca81nly1qls"`,
},
NotExpectedContent: []string{
`"missing":`,
},
ExpectedEvents: map[string]int{
"OnRecordBeforeAuthRefreshRequest": 1,
"OnRecordAuthRequest": 1,
"OnRecordAfterAuthRefreshRequest": 1,
},
},
}
for _, scenario := range scenarios {
scenario.Test(t)
}
}
func TestRecordAuthRequestPasswordReset(t *testing.T) {
scenarios := []tests.ApiScenario{
{
Name: "not an auth collection",
Method: http.MethodPost,
Url: "/api/collections/demo1/request-password-reset",
Body: strings.NewReader(``),
ExpectedStatus: 400,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "empty data",
Method: http.MethodPost,
Url: "/api/collections/users/request-password-reset",
Body: strings.NewReader(``),
ExpectedStatus: 400,
ExpectedContent: []string{`"data":{"email":{"code":"validation_required","message":"Cannot be blank."}}`},
},
{
Name: "invalid data",
Method: http.MethodPost,
Url: "/api/collections/users/request-password-reset",
Body: strings.NewReader(`{"email`),
ExpectedStatus: 400,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "missing auth record",
Method: http.MethodPost,
Url: "/api/collections/users/request-password-reset",
Body: strings.NewReader(`{"email":"[email protected]"}`),
Delay: 100 * time.Millisecond,
ExpectedStatus: 204,
},
{
Name: "existing auth record",
Method: http.MethodPost,
Url: "/api/collections/users/request-password-reset",
Body: strings.NewReader(`{"email":"[email protected]"}`),
Delay: 100 * time.Millisecond,
ExpectedStatus: 204,
ExpectedEvents: map[string]int{
"OnModelBeforeUpdate": 1,
"OnModelAfterUpdate": 1,
"OnRecordBeforeRequestPasswordResetRequest": 1,
"OnRecordAfterRequestPasswordResetRequest": 1,
"OnMailerBeforeRecordResetPasswordSend": 1,
"OnMailerAfterRecordResetPasswordSend": 1,
},
},
{
Name: "existing auth record (after already sent)",
Method: http.MethodPost,
Url: "/api/collections/clients/request-password-reset",
Body: strings.NewReader(`{"email":"[email protected]"}`),
Delay: 100 * time.Millisecond,
ExpectedStatus: 204,
BeforeTestFunc: func(t *testing.T, app *tests.TestApp, e *echo.Echo) {
// simulate recent password request sent
authRecord, err := app.Dao().FindFirstRecordByData("clients", "email", "[email protected]")
if err != nil {
t.Fatal(err)
}
authRecord.SetLastResetSentAt(types.NowDateTime())
dao := daos.New(app.Dao().DB()) // new dao to ignore hooks
if err := dao.Save(authRecord); err != nil {
t.Fatal(err)
}
},
},
{
Name: "existing auth record in a collection with disabled password login",
Method: http.MethodPost,
Url: "/api/collections/nologin/request-password-reset",
Body: strings.NewReader(`{"email":"[email protected]"}`),
ExpectedStatus: 400,
ExpectedContent: []string{`"data":{}`},
},
}
for _, scenario := range scenarios {
scenario.Test(t)
}
}
func TestRecordAuthConfirmPasswordReset(t *testing.T) {
scenarios := []tests.ApiScenario{
{
Name: "empty data",
Method: http.MethodPost,
Url: "/api/collections/users/confirm-password-reset",
Body: strings.NewReader(``),
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":{`,
`"password":{"code":"validation_required"`,
`"passwordConfirm":{"code":"validation_required"`,
`"token":{"code":"validation_required"`,
},
},
{
Name: "invalid data format",
Method: http.MethodPost,
Url: "/api/collections/users/confirm-password-reset",
Body: strings.NewReader(`{"password`),
ExpectedStatus: 400,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "expired token and invalid password",
Method: http.MethodPost,
Url: "/api/collections/users/confirm-password-reset",
Body: strings.NewReader(`{
"token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImNvbGxlY3Rpb25JZCI6Il9wYl91c2Vyc19hdXRoXyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiZXhwIjoxNjQwOTkxNjYxfQ.TayHoXkOTM0w8InkBEb86npMJEaf6YVUrxrRmMgFjeY",
"password":"1234567",
"passwordConfirm":"7654321"
}`),
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":{`,
`"token":{"code":"validation_invalid_token"`,
`"password":{"code":"validation_length_out_of_range"`,
`"passwordConfirm":{"code":"validation_values_mismatch"`,
},
},
{
Name: "non auth collection",
Method: http.MethodPost,
Url: "/api/collections/demo1/confirm-password-reset?expand=rel,missing",
Body: strings.NewReader(`{
"token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImNvbGxlY3Rpb25JZCI6Il9wYl91c2Vyc19hdXRoXyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiZXhwIjoyMjA4OTg1MjYxfQ.R_4FOSUHIuJQ5Crl3PpIPCXMsoHzuTaNlccpXg_3FOg",
"password":"12345678",
"passwordConfirm":"12345678"
}`),
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":{}`,
},
},
{
Name: "different auth collection",
Method: http.MethodPost,
Url: "/api/collections/clients/confirm-password-reset?expand=rel,missing",
Body: strings.NewReader(`{
"token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImNvbGxlY3Rpb25JZCI6Il9wYl91c2Vyc19hdXRoXyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiZXhwIjoyMjA4OTg1MjYxfQ.R_4FOSUHIuJQ5Crl3PpIPCXMsoHzuTaNlccpXg_3FOg",
"password":"12345678",
"passwordConfirm":"12345678"
}`),
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":{"token":{"code":"validation_token_collection_mismatch"`,
},
},
{
Name: "valid token and data",
Method: http.MethodPost,
Url: "/api/collections/users/confirm-password-reset",
Body: strings.NewReader(`{
"token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImNvbGxlY3Rpb25JZCI6Il9wYl91c2Vyc19hdXRoXyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiZXhwIjoyMjA4OTg1MjYxfQ.R_4FOSUHIuJQ5Crl3PpIPCXMsoHzuTaNlccpXg_3FOg",
"password":"12345678",
"passwordConfirm":"12345678"
}`),
ExpectedStatus: 204,
ExpectedEvents: map[string]int{
"OnModelAfterUpdate": 1,
"OnModelBeforeUpdate": 1,
"OnRecordBeforeConfirmPasswordResetRequest": 1,
"OnRecordAfterConfirmPasswordResetRequest": 1,
},
},
}
for _, scenario := range scenarios {
scenario.Test(t)
}
}
func TestRecordAuthRequestVerification(t *testing.T) {
scenarios := []tests.ApiScenario{
{
Name: "not an auth collection",
Method: http.MethodPost,
Url: "/api/collections/demo1/request-verification",
Body: strings.NewReader(``),
ExpectedStatus: 400,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "empty data",
Method: http.MethodPost,
Url: "/api/collections/users/request-verification",
Body: strings.NewReader(``),
ExpectedStatus: 400,
ExpectedContent: []string{`"data":{"email":{"code":"validation_required","message":"Cannot be blank."}}`},
},
{
Name: "invalid data",
Method: http.MethodPost,
Url: "/api/collections/users/request-verification",
Body: strings.NewReader(`{"email`),
ExpectedStatus: 400,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "missing auth record",
Method: http.MethodPost,
Url: "/api/collections/users/request-verification",
Body: strings.NewReader(`{"email":"[email protected]"}`),
Delay: 100 * time.Millisecond,
ExpectedStatus: 204,
},
{
Name: "already verified auth record",
Method: http.MethodPost,
Url: "/api/collections/users/request-verification",
Body: strings.NewReader(`{"email":"[email protected]"}`),
Delay: 100 * time.Millisecond,
ExpectedStatus: 204,
ExpectedEvents: map[string]int{
"OnRecordBeforeRequestVerificationRequest": 1,
"OnRecordAfterRequestVerificationRequest": 1,
},
},
{
Name: "existing auth record",
Method: http.MethodPost,
Url: "/api/collections/users/request-verification",
Body: strings.NewReader(`{"email":"[email protected]"}`),
Delay: 100 * time.Millisecond,
ExpectedStatus: 204,
ExpectedEvents: map[string]int{
"OnModelBeforeUpdate": 1,
"OnModelAfterUpdate": 1,
"OnRecordBeforeRequestVerificationRequest": 1,
"OnRecordAfterRequestVerificationRequest": 1,
"OnMailerBeforeRecordVerificationSend": 1,
"OnMailerAfterRecordVerificationSend": 1,
},
},
{
Name: "existing auth record (after already sent)",
Method: http.MethodPost,
Url: "/api/collections/users/request-verification",
Body: strings.NewReader(`{"email":"[email protected]"}`),
Delay: 100 * time.Millisecond,
ExpectedStatus: 204,
ExpectedEvents: map[string]int{
// "OnRecordBeforeRequestVerificationRequest": 1,
// "OnRecordAfterRequestVerificationRequest": 1,
},
BeforeTestFunc: func(t *testing.T, app *tests.TestApp, e *echo.Echo) {
// simulate recent verification sent
authRecord, err := app.Dao().FindFirstRecordByData("users", "email", "[email protected]")
if err != nil {
t.Fatal(err)
}
authRecord.SetLastVerificationSentAt(types.NowDateTime())
dao := daos.New(app.Dao().DB()) // new dao to ignore hooks
if err := dao.Save(authRecord); err != nil {
t.Fatal(err)
}
},
},
}
for _, scenario := range scenarios {
scenario.Test(t)
}
}
func TestRecordAuthConfirmVerification(t *testing.T) {
scenarios := []tests.ApiScenario{
{
Name: "empty data",
Method: http.MethodPost,
Url: "/api/collections/users/confirm-verification",
Body: strings.NewReader(``),
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":{`,
`"token":{"code":"validation_required"`,
},
},
{
Name: "invalid data format",
Method: http.MethodPost,
Url: "/api/collections/users/confirm-verification",
Body: strings.NewReader(`{"password`),
ExpectedStatus: 400,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "expired token",
Method: http.MethodPost,
Url: "/api/collections/users/confirm-verification",
Body: strings.NewReader(`{
"token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImNvbGxlY3Rpb25JZCI6Il9wYl91c2Vyc19hdXRoXyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiZXhwIjoxNjQwOTkxNjYxfQ.Avbt9IP8sBisVz_2AGrlxLDvangVq4PhL2zqQVYLKlE"
}`),
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":{`,
`"token":{"code":"validation_invalid_token"`,
},
},
{
Name: "non auth collection",
Method: http.MethodPost,
Url: "/api/collections/demo1/confirm-verification?expand=rel,missing",
Body: strings.NewReader(`{
"token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImNvbGxlY3Rpb25JZCI6Il9wYl91c2Vyc19hdXRoXyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiZXhwIjoyMjA4OTg1MjYxfQ.R_4FOSUHIuJQ5Crl3PpIPCXMsoHzuTaNlccpXg_3FOg"
}`),
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":{}`,
},
},
{
Name: "different auth collection",
Method: http.MethodPost,
Url: "/api/collections/clients/confirm-verification?expand=rel,missing",
Body: strings.NewReader(`{
"token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImNvbGxlY3Rpb25JZCI6Il9wYl91c2Vyc19hdXRoXyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiZXhwIjoyMjA4OTg1MjYxfQ.hL16TVmStHFdHLc4a860bRqJ3sFfzjv0_NRNzwsvsrc"
}`),
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":{"token":{"code":"validation_token_collection_mismatch"`,
},
},
{
Name: "valid token",
Method: http.MethodPost,
Url: "/api/collections/users/confirm-verification",
Body: strings.NewReader(`{
"token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImNvbGxlY3Rpb25JZCI6Il9wYl91c2Vyc19hdXRoXyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiZXhwIjoyMjA4OTg1MjYxfQ.hL16TVmStHFdHLc4a860bRqJ3sFfzjv0_NRNzwsvsrc"
}`),
ExpectedStatus: 204,
ExpectedEvents: map[string]int{
"OnModelAfterUpdate": 1,
"OnModelBeforeUpdate": 1,
"OnRecordBeforeConfirmVerificationRequest": 1,
"OnRecordAfterConfirmVerificationRequest": 1,
},
},
{
Name: "valid token (already verified)",
Method: http.MethodPost,
Url: "/api/collections/users/confirm-verification",
Body: strings.NewReader(`{
"token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6Im9hcDY0MGNvdDR5cnUycyIsImVtYWlsIjoidGVzdDJAZXhhbXBsZS5jb20iLCJjb2xsZWN0aW9uSWQiOiJfcGJfdXNlcnNfYXV0aF8iLCJ0eXBlIjoiYXV0aFJlY29yZCIsImV4cCI6MjIwODk4NTI2MX0.PsOABmYUzGbd088g8iIBL4-pf7DUZm0W5Ju6lL5JVRg"
}`),
ExpectedStatus: 204,
ExpectedEvents: map[string]int{
"OnRecordBeforeConfirmVerificationRequest": 1,
"OnRecordAfterConfirmVerificationRequest": 1,
},
},
{
Name: "valid verification token from a collection without allowed login",
Method: http.MethodPost,
Url: "/api/collections/nologin/confirm-verification",
Body: strings.NewReader(`{
"token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6ImRjNDlrNmpnZWpuNDBoMyIsImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImNvbGxlY3Rpb25JZCI6ImtwdjcwOXNrMmxxYnFrOCIsInR5cGUiOiJhdXRoUmVjb3JkIiwiZXhwIjoyMjA4OTg1MjYxfQ.coREjeTDS3_Go7DP1nxHtevIX5rujwHU-_mRB6oOm3w"
}`),
ExpectedStatus: 204,
ExpectedContent: []string{},
ExpectedEvents: map[string]int{
"OnModelAfterUpdate": 1,
"OnModelBeforeUpdate": 1,
"OnRecordBeforeConfirmVerificationRequest": 1,
"OnRecordAfterConfirmVerificationRequest": 1,
},
},
}
for _, scenario := range scenarios {
scenario.Test(t)
}
}
func TestRecordAuthRequestEmailChange(t *testing.T) {
scenarios := []tests.ApiScenario{
{
Name: "unauthorized",
Method: http.MethodPost,
Url: "/api/collections/users/request-email-change",
Body: strings.NewReader(`{"newEmail":"[email protected]"}`),
ExpectedStatus: 401,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "not an auth collection",
Method: http.MethodPost,
Url: "/api/collections/demo1/request-email-change",
Body: strings.NewReader(`{"newEmail":"[email protected]"}`),
ExpectedStatus: 400,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "admin authentication",
Method: http.MethodPost,
Url: "/api/collections/users/request-email-change",
Body: strings.NewReader(`{"newEmail":"[email protected]"}`),
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6InN5d2JoZWNuaDQ2cmhtMCIsInR5cGUiOiJhZG1pbiIsImV4cCI6MjIwODk4NTI2MX0.M1m--VOqGyv0d23eeUc0r9xE8ZzHaYVmVFw1VZW6gT8",
},
ExpectedStatus: 401,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "record authentication but from different auth collection",
Method: http.MethodPost,
Url: "/api/collections/clients/request-email-change",
Body: strings.NewReader(`{"newEmail":"[email protected]"}`),
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiY29sbGVjdGlvbklkIjoiX3BiX3VzZXJzX2F1dGhfIiwiZXhwIjoyMjA4OTg1MjYxfQ.UwD8JvkbQtXpymT09d7J6fdA0aP9g4FJ1GPh_ggEkzc",
},
ExpectedStatus: 403,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "invalid data",
Method: http.MethodPost,
Url: "/api/collections/users/request-email-change",
Body: strings.NewReader(`{"newEmail`),
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiY29sbGVjdGlvbklkIjoiX3BiX3VzZXJzX2F1dGhfIiwiZXhwIjoyMjA4OTg1MjYxfQ.UwD8JvkbQtXpymT09d7J6fdA0aP9g4FJ1GPh_ggEkzc",
},
ExpectedStatus: 400,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "empty data",
Method: http.MethodPost,
Url: "/api/collections/users/request-email-change",
Body: strings.NewReader(`{}`),
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiY29sbGVjdGlvbklkIjoiX3BiX3VzZXJzX2F1dGhfIiwiZXhwIjoyMjA4OTg1MjYxfQ.UwD8JvkbQtXpymT09d7J6fdA0aP9g4FJ1GPh_ggEkzc",
},
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":`,
`"newEmail":{"code":"validation_required"`,
},
},
{
Name: "valid data (existing email)",
Method: http.MethodPost,
Url: "/api/collections/users/request-email-change",
Body: strings.NewReader(`{"newEmail":"[email protected]"}`),
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiY29sbGVjdGlvbklkIjoiX3BiX3VzZXJzX2F1dGhfIiwiZXhwIjoyMjA4OTg1MjYxfQ.UwD8JvkbQtXpymT09d7J6fdA0aP9g4FJ1GPh_ggEkzc",
},
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":`,
`"newEmail":{"code":"validation_record_email_exists"`,
},
},
{
Name: "valid data (new email)",
Method: http.MethodPost,
Url: "/api/collections/users/request-email-change",
Body: strings.NewReader(`{"newEmail":"[email protected]"}`),
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiY29sbGVjdGlvbklkIjoiX3BiX3VzZXJzX2F1dGhfIiwiZXhwIjoyMjA4OTg1MjYxfQ.UwD8JvkbQtXpymT09d7J6fdA0aP9g4FJ1GPh_ggEkzc",
},
ExpectedStatus: 204,
ExpectedEvents: map[string]int{
"OnMailerBeforeRecordChangeEmailSend": 1,
"OnMailerAfterRecordChangeEmailSend": 1,
"OnRecordBeforeRequestEmailChangeRequest": 1,
"OnRecordAfterRequestEmailChangeRequest": 1,
},
},
}
for _, scenario := range scenarios {
scenario.Test(t)
}
}
func TestRecordAuthConfirmEmailChange(t *testing.T) {
scenarios := []tests.ApiScenario{
{
Name: "not an auth collection",
Method: http.MethodPost,
Url: "/api/collections/demo1/confirm-email-change",
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":{}`,
},
},
{
Name: "empty data",
Method: http.MethodPost,
Url: "/api/collections/users/confirm-email-change",
Body: strings.NewReader(``),
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":`,
`"token":{"code":"validation_required"`,
`"password":{"code":"validation_required"`,
},
},
{
Name: "invalid data",
Method: http.MethodPost,
Url: "/api/collections/users/confirm-email-change",
Body: strings.NewReader(`{"token`),
ExpectedStatus: 400,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "expired token and correct password",
Method: http.MethodPost,
Url: "/api/collections/users/confirm-email-change",
Body: strings.NewReader(`{
"token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsImNvbGxlY3Rpb25JZCI6Il9wYl91c2Vyc19hdXRoXyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiZW1haWwiOiJ0ZXN0QGV4YW1wbGUuY29tIiwibmV3RW1haWwiOiJjaGFuZ2VAZXhhbXBsZS5jb20iLCJleHAiOjE2NDA5OTE2NjF9.D20jh5Ss7SZyXRUXjjEyLCYo9Ky0N5cE5dKB_MGJ8G8",
"password":"1234567890"
}`),
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":{`,
`"token":{`,
`"code":"validation_invalid_token"`,
},
},
{
Name: "valid token and incorrect password",
Method: http.MethodPost,
Url: "/api/collections/users/confirm-email-change",
Body: strings.NewReader(`{
"token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsImNvbGxlY3Rpb25JZCI6Il9wYl91c2Vyc19hdXRoXyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiZW1haWwiOiJ0ZXN0QGV4YW1wbGUuY29tIiwibmV3RW1haWwiOiJjaGFuZ2VAZXhhbXBsZS5jb20iLCJleHAiOjIyMDg5ODUyNjF9.1sG6cL708pRXXjiHRZhG-in0X5fnttSf5nNcadKoYRs",
"password":"1234567891"
}`),
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":{`,
`"password":{`,
`"code":"validation_invalid_password"`,
},
},
{
Name: "valid token and correct password",
Method: http.MethodPost,
Url: "/api/collections/users/confirm-email-change",
Body: strings.NewReader(`{
"token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsImNvbGxlY3Rpb25JZCI6Il9wYl91c2Vyc19hdXRoXyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiZW1haWwiOiJ0ZXN0QGV4YW1wbGUuY29tIiwibmV3RW1haWwiOiJjaGFuZ2VAZXhhbXBsZS5jb20iLCJleHAiOjIyMDg5ODUyNjF9.1sG6cL708pRXXjiHRZhG-in0X5fnttSf5nNcadKoYRs",
"password":"1234567890"
}`),
ExpectedStatus: 204,
ExpectedEvents: map[string]int{
"OnModelAfterUpdate": 1,
"OnModelBeforeUpdate": 1,
"OnRecordBeforeConfirmEmailChangeRequest": 1,
"OnRecordAfterConfirmEmailChangeRequest": 1,
},
},
{
Name: "valid token and correct password in different auth collection",
Method: http.MethodPost,
Url: "/api/collections/clients/confirm-email-change",
Body: strings.NewReader(`{
"token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsImNvbGxlY3Rpb25JZCI6Il9wYl91c2Vyc19hdXRoXyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiZW1haWwiOiJ0ZXN0QGV4YW1wbGUuY29tIiwibmV3RW1haWwiOiJjaGFuZ2VAZXhhbXBsZS5jb20iLCJleHAiOjIyMDg5ODUyNjF9.1sG6cL708pRXXjiHRZhG-in0X5fnttSf5nNcadKoYRs",
"password":"1234567890"
}`),
ExpectedStatus: 400,
ExpectedContent: []string{
`"data":{`,
`"token":{"code":"validation_token_collection_mismatch"`,
},
},
}
for _, scenario := range scenarios {
scenario.Test(t)
}
}
func TestRecordAuthListExternalsAuths(t *testing.T) {
scenarios := []tests.ApiScenario{
{
Name: "unauthorized",
Method: http.MethodGet,
Url: "/api/collections/users/records/4q1xlclmfloku33/external-auths",
ExpectedStatus: 401,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "admin + nonexisting record id",
Method: http.MethodGet,
Url: "/api/collections/users/records/missing/external-auths",
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6InN5d2JoZWNuaDQ2cmhtMCIsInR5cGUiOiJhZG1pbiIsImV4cCI6MjIwODk4NTI2MX0.M1m--VOqGyv0d23eeUc0r9xE8ZzHaYVmVFw1VZW6gT8",
},
ExpectedStatus: 404,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "admin + existing record id and no external auths",
Method: http.MethodGet,
Url: "/api/collections/users/records/oap640cot4yru2s/external-auths",
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6InN5d2JoZWNuaDQ2cmhtMCIsInR5cGUiOiJhZG1pbiIsImV4cCI6MjIwODk4NTI2MX0.M1m--VOqGyv0d23eeUc0r9xE8ZzHaYVmVFw1VZW6gT8",
},
ExpectedStatus: 200,
ExpectedContent: []string{`[]`},
ExpectedEvents: map[string]int{"OnRecordListExternalAuthsRequest": 1},
},
{
Name: "admin + existing user id and 2 external auths",
Method: http.MethodGet,
Url: "/api/collections/users/records/4q1xlclmfloku33/external-auths",
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6InN5d2JoZWNuaDQ2cmhtMCIsInR5cGUiOiJhZG1pbiIsImV4cCI6MjIwODk4NTI2MX0.M1m--VOqGyv0d23eeUc0r9xE8ZzHaYVmVFw1VZW6gT8",
},
ExpectedStatus: 200,
ExpectedContent: []string{
`"id":"clmflokuq1xl341"`,
`"id":"dlmflokuq1xl342"`,
`"recordId":"4q1xlclmfloku33"`,
`"collectionId":"_pb_users_auth_"`,
},
ExpectedEvents: map[string]int{"OnRecordListExternalAuthsRequest": 1},
},
{
Name: "auth record + trying to list another user external auths",
Method: http.MethodGet,
Url: "/api/collections/users/records/4q1xlclmfloku33/external-auths",
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiJ9.eyJpZCI6Im9hcDY0MGNvdDR5cnUycyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiY29sbGVjdGlvbklkIjoiX3BiX3VzZXJzX2F1dGhfIiwiZXhwIjoyMjA4OTg1MjYxfQ.uatnTBFqMnF0p4FkmwEpA9R-uGFu0Putwyk6NJCKBno",
},
ExpectedStatus: 403,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "auth record + trying to list another user external auths from different collection",
Method: http.MethodGet,
Url: "/api/collections/clients/records/o1y0dd0spd786md/external-auths",
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiJ9.eyJpZCI6Im9hcDY0MGNvdDR5cnUycyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiY29sbGVjdGlvbklkIjoiX3BiX3VzZXJzX2F1dGhfIiwiZXhwIjoyMjA4OTg1MjYxfQ.uatnTBFqMnF0p4FkmwEpA9R-uGFu0Putwyk6NJCKBno",
},
ExpectedStatus: 403,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "auth record + owner without external auths",
Method: http.MethodGet,
Url: "/api/collections/users/records/oap640cot4yru2s/external-auths",
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiJ9.eyJpZCI6Im9hcDY0MGNvdDR5cnUycyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiY29sbGVjdGlvbklkIjoiX3BiX3VzZXJzX2F1dGhfIiwiZXhwIjoyMjA4OTg1MjYxfQ.uatnTBFqMnF0p4FkmwEpA9R-uGFu0Putwyk6NJCKBno",
},
ExpectedStatus: 200,
ExpectedContent: []string{`[]`},
ExpectedEvents: map[string]int{"OnRecordListExternalAuthsRequest": 1},
},
{
Name: "authorized as user - owner with 2 external auths",
Method: http.MethodGet,
Url: "/api/collections/users/records/4q1xlclmfloku33/external-auths",
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiY29sbGVjdGlvbklkIjoiX3BiX3VzZXJzX2F1dGhfIiwiZXhwIjoyMjA4OTg1MjYxfQ.UwD8JvkbQtXpymT09d7J6fdA0aP9g4FJ1GPh_ggEkzc",
},
ExpectedStatus: 200,
ExpectedContent: []string{
`"id":"clmflokuq1xl341"`,
`"id":"dlmflokuq1xl342"`,
`"recordId":"4q1xlclmfloku33"`,
`"collectionId":"_pb_users_auth_"`,
},
ExpectedEvents: map[string]int{"OnRecordListExternalAuthsRequest": 1},
},
}
for _, scenario := range scenarios {
scenario.Test(t)
}
}
func TestRecordAuthUnlinkExternalsAuth(t *testing.T) {
scenarios := []tests.ApiScenario{
{
Name: "unauthorized",
Method: http.MethodDelete,
Url: "/api/collections/users/records/4q1xlclmfloku33/external-auths/google",
ExpectedStatus: 401,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "admin - nonexisting recod id",
Method: http.MethodDelete,
Url: "/api/collections/users/records/missing/external-auths/google",
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6InN5d2JoZWNuaDQ2cmhtMCIsInR5cGUiOiJhZG1pbiIsImV4cCI6MjIwODk4NTI2MX0.M1m--VOqGyv0d23eeUc0r9xE8ZzHaYVmVFw1VZW6gT8",
},
ExpectedStatus: 404,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "admin - nonlinked provider",
Method: http.MethodDelete,
Url: "/api/collections/users/records/4q1xlclmfloku33/external-auths/facebook",
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6InN5d2JoZWNuaDQ2cmhtMCIsInR5cGUiOiJhZG1pbiIsImV4cCI6MjIwODk4NTI2MX0.M1m--VOqGyv0d23eeUc0r9xE8ZzHaYVmVFw1VZW6gT8",
},
ExpectedStatus: 404,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "admin - linked provider",
Method: http.MethodDelete,
Url: "/api/collections/users/records/4q1xlclmfloku33/external-auths/google",
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6InN5d2JoZWNuaDQ2cmhtMCIsInR5cGUiOiJhZG1pbiIsImV4cCI6MjIwODk4NTI2MX0.M1m--VOqGyv0d23eeUc0r9xE8ZzHaYVmVFw1VZW6gT8",
},
ExpectedStatus: 204,
ExpectedContent: []string{},
ExpectedEvents: map[string]int{
"OnModelAfterDelete": 1,
"OnModelBeforeDelete": 1,
"OnRecordAfterUnlinkExternalAuthRequest": 1,
"OnRecordBeforeUnlinkExternalAuthRequest": 1,
},
AfterTestFunc: func(t *testing.T, app *tests.TestApp, e *echo.Echo) {
record, err := app.Dao().FindRecordById("users", "4q1xlclmfloku33")
if err != nil {
t.Fatal(err)
}
auth, _ := app.Dao().FindExternalAuthByRecordAndProvider(record, "google")
if auth != nil {
t.Fatalf("Expected the google ExternalAuth to be deleted, got got \n%v", auth)
}
},
},
{
Name: "auth record - trying to unlink another user external auth",
Method: http.MethodDelete,
Url: "/api/collections/users/records/4q1xlclmfloku33/external-auths/google",
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiJ9.eyJpZCI6Im9hcDY0MGNvdDR5cnUycyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiY29sbGVjdGlvbklkIjoiX3BiX3VzZXJzX2F1dGhfIiwiZXhwIjoyMjA4OTg1MjYxfQ.uatnTBFqMnF0p4FkmwEpA9R-uGFu0Putwyk6NJCKBno",
},
ExpectedStatus: 403,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "auth record - trying to unlink another user external auth from different collection",
Method: http.MethodDelete,
Url: "/api/collections/clients/records/o1y0dd0spd786md/external-auths/google",
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiY29sbGVjdGlvbklkIjoiX3BiX3VzZXJzX2F1dGhfIiwiZXhwIjoyMjA4OTg1MjYxfQ.UwD8JvkbQtXpymT09d7J6fdA0aP9g4FJ1GPh_ggEkzc",
},
ExpectedStatus: 403,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "auth record - owner with existing external auth",
Method: http.MethodDelete,
Url: "/api/collections/users/records/4q1xlclmfloku33/external-auths/google",
RequestHeaders: map[string]string{
"Authorization": "eyJhbGciOiJIUzI1NiJ9.eyJpZCI6IjRxMXhsY2xtZmxva3UzMyIsInR5cGUiOiJhdXRoUmVjb3JkIiwiY29sbGVjdGlvbklkIjoiX3BiX3VzZXJzX2F1dGhfIiwiZXhwIjoyMjA4OTg1MjYxfQ.UwD8JvkbQtXpymT09d7J6fdA0aP9g4FJ1GPh_ggEkzc",
},
ExpectedStatus: 204,
ExpectedContent: []string{},
ExpectedEvents: map[string]int{
"OnModelAfterDelete": 1,
"OnModelBeforeDelete": 1,
"OnRecordAfterUnlinkExternalAuthRequest": 1,
"OnRecordBeforeUnlinkExternalAuthRequest": 1,
},
AfterTestFunc: func(t *testing.T, app *tests.TestApp, e *echo.Echo) {
record, err := app.Dao().FindRecordById("users", "4q1xlclmfloku33")
if err != nil {
t.Fatal(err)
}
auth, _ := app.Dao().FindExternalAuthByRecordAndProvider(record, "google")
if auth != nil {
t.Fatalf("Expected the google ExternalAuth to be deleted, got got \n%v", auth)
}
},
},
}
for _, scenario := range scenarios {
scenario.Test(t)
}
}
func TestRecordAuthOAuth2Redirect(t *testing.T) {
c1 := subscriptions.NewDefaultClient()
c2 := subscriptions.NewDefaultClient()
c2.Subscribe("@oauth2")
c3 := subscriptions.NewDefaultClient()
c3.Subscribe("test1", "@oauth2")
c4 := subscriptions.NewDefaultClient()
c4.Subscribe("test1", "test2")
c5 := subscriptions.NewDefaultClient()
c5.Subscribe("@oauth2")
c5.Discard()
beforeTestFunc := func(t *testing.T, app *tests.TestApp, e *echo.Echo) {
app.SubscriptionsBroker().Register(c1)
app.SubscriptionsBroker().Register(c2)
app.SubscriptionsBroker().Register(c3)
app.SubscriptionsBroker().Register(c4)
app.SubscriptionsBroker().Register(c5)
}
scenarios := []tests.ApiScenario{
{
Name: "no state query param",
Method: http.MethodGet,
Url: "/api/oauth2-redirect?code=123",
ExpectedStatus: 400,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "no code query param",
Method: http.MethodGet,
Url: "/api/oauth2-redirect?state=" + c3.Id(),
ExpectedStatus: 400,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "missing client",
Method: http.MethodGet,
Url: "/api/oauth2-redirect?code=123&state=missing",
ExpectedStatus: 404,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "discarded client with @oauth2 subscription",
Method: http.MethodGet,
Url: "/api/oauth2-redirect?code=123&state=" + c5.Id(),
BeforeTestFunc: beforeTestFunc,
ExpectedStatus: 404,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "client without @oauth2 subscription",
Method: http.MethodGet,
Url: "/api/oauth2-redirect?code=123&state=" + c4.Id(),
BeforeTestFunc: beforeTestFunc,
ExpectedStatus: 404,
ExpectedContent: []string{`"data":{}`},
},
{
Name: "client with @oauth2 subscription",
Method: http.MethodGet,
Url: "/api/oauth2-redirect?code=123&state=" + c3.Id(),
BeforeTestFunc: func(t *testing.T, app *tests.TestApp, e *echo.Echo) {
beforeTestFunc(t, app, e)
ctx, cancelFunc := context.WithTimeout(context.Background(), 1*time.Second)
go func() {
defer cancelFunc()
L:
for {
select {
case <-c1.Channel():
t.Error("Unexpected c1 message")
break L
case <-c2.Channel():
t.Error("Unexpected c2 message")
break L
case msg := <-c3.Channel():
if msg.Name != "@oauth2" {
t.Errorf("Expected @oauth2 msg.Name, got %q", msg.Name)
}
expectedParams := []string{`"state"`, `"code"`}
for _, p := range expectedParams {
if !strings.Contains(msg.Data, p) {
t.Errorf("Couldn't find %s in \n%v", p, msg.Data)
}
}
break L
case <-c4.Channel():
t.Error("Unexpected c4 message")
break L
case <-c5.Channel():
t.Error("Unexpected c5 message")
break L
case <-ctx.Done():
t.Error("Context timeout reached")
break L
}
}
}()
},
ExpectedStatus: http.StatusTemporaryRedirect,
},
}
for _, scenario := range scenarios {
scenario.Test(t)
}
}
| apis/record_auth_test.go | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.0008090568007901311,
0.0001819352328311652,
0.00016505253734067082,
0.00016870808030944318,
0.00007736787665635347
] |
{
"id": 0,
"code_window": [
"// @todo add tests\n",
"func (app *BaseApp) initAutobackupHooks() error {\n",
"\tc := cron.New()\n",
"\n",
"\tloadJob := func() {\n",
"\t\tc.Stop()\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tisServe := false\n"
],
"file_path": "core/base_backup.go",
"type": "add",
"edit_start_line_idx": 253
} | <svg viewBox="0 0 36 36" fill="none" role="img" xmlns="http://www.w3.org/2000/svg" width="128" height="128"><title>Mary Roebling</title><mask id="mask__beam" maskUnits="userSpaceOnUse" x="0" y="0" width="36" height="36"><rect width="36" height="36" fill="#FFFFFF"></rect></mask><g mask="url(#mask__beam)"><rect width="36" height="36" fill="#f0f0d8"></rect><rect x="0" y="0" width="36" height="36" transform="translate(5 -1) rotate(155 18 18) scale(1.2)" fill="#000000" rx="6"></rect><g transform="translate(3 -4) rotate(-5 18 18)"><path d="M15 21c2 1 4 1 6 0" stroke="#FFFFFF" fill="none" stroke-linecap="round"></path><rect x="14" y="14" width="1.5" height="2" rx="1" stroke="none" fill="#FFFFFF"></rect><rect x="20" y="14" width="1.5" height="2" rx="1" stroke="none" fill="#FFFFFF"></rect></g></g></svg>
| ui/public/images/avatars/avatar0.svg | 0 | https://github.com/pocketbase/pocketbase/commit/5551f8f5aa16f49c8100078aca83b472c444db1e | [
0.00017223456234205514,
0.00017223456234205514,
0.00017223456234205514,
0.00017223456234205514,
0
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.