hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 6,
"code_window": [
"\t\t\t\t// OK\n",
"\t\t\tdefault:\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n",
"\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t// We're only concerned with variables, locals and outputs\n"
],
"file_path": "terraform/transform_reference.go",
"type": "add",
"edit_start_line_idx": 223
} | package terraform
import (
"fmt"
"log"
"sort"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/lang"
"github.com/hashicorp/terraform/states"
)
// GraphNodeReferenceable must be implemented by any node that represents
// a Terraform thing that can be referenced (resource, module, etc.).
//
// Even if the thing has no name, this should return an empty list. By
// implementing this and returning a non-nil result, you say that this CAN
// be referenced and other methods of referencing may still be possible (such
// as by path!)
type GraphNodeReferenceable interface {
GraphNodeSubPath
// ReferenceableAddrs returns a list of addresses through which this can be
// referenced.
ReferenceableAddrs() []addrs.Referenceable
}
// GraphNodeReferencer must be implemented by nodes that reference other
// Terraform items and therefore depend on them.
type GraphNodeReferencer interface {
GraphNodeSubPath
// References returns a list of references made by this node, which
// include both a referenced address and source location information for
// the reference.
References() []*addrs.Reference
}
type GraphNodeAttachDependencies interface {
GraphNodeResource
AttachDependencies([]addrs.AbsResource)
}
// GraphNodeReferenceOutside is an interface that can optionally be implemented.
// A node that implements it can specify that its own referenceable addresses
// and/or the addresses it references are in a different module than the
// node itself.
//
// Any referenceable addresses returned by ReferenceableAddrs are interpreted
// relative to the returned selfPath.
//
// Any references returned by References are interpreted relative to the
// returned referencePath.
//
// It is valid but not required for either of these paths to match what is
// returned by method Path, though if both match the main Path then there
// is no reason to implement this method.
//
// The primary use-case for this is the nodes representing module input
// variables, since their expressions are resolved in terms of their calling
// module, but they are still referenced from their own module.
type GraphNodeReferenceOutside interface {
// ReferenceOutside returns a path in which any references from this node
// are resolved.
ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance)
}
// ReferenceTransformer is a GraphTransformer that connects all the
// nodes that reference each other in order to form the proper ordering.
type ReferenceTransformer struct{}
func (t *ReferenceTransformer) Transform(g *Graph) error {
// Build a reference map so we can efficiently look up the references
vs := g.Vertices()
m := NewReferenceMap(vs)
// Find the things that reference things and connect them
for _, v := range vs {
parents, _ := m.References(v)
parentsDbg := make([]string, len(parents))
for i, v := range parents {
parentsDbg[i] = dag.VertexName(v)
}
log.Printf(
"[DEBUG] ReferenceTransformer: %q references: %v",
dag.VertexName(v), parentsDbg)
for _, parent := range parents {
g.Connect(dag.BasicEdge(v, parent))
}
if len(parents) > 0 {
continue
}
}
return nil
}
// AttachDependenciesTransformer records all resource dependencies for each
// instance, and attaches the addresses to the node itself. Managed resource
// will record these in the state for proper ordering of destroy operations.
type AttachDependenciesTransformer struct {
Config *configs.Config
State *states.State
Schemas *Schemas
}
func (t AttachDependenciesTransformer) Transform(g *Graph) error {
for _, v := range g.Vertices() {
attacher, ok := v.(GraphNodeAttachDependencies)
if !ok {
continue
}
selfAddr := attacher.ResourceAddr()
// Data sources don't need to track destroy dependencies
if selfAddr.Resource.Mode == addrs.DataResourceMode {
continue
}
ans, err := g.Ancestors(v)
if err != nil {
return err
}
// dedupe addrs when there's multiple instances involved, or
// multiple paths in the un-reduced graph
depMap := map[string]addrs.AbsResource{}
for _, d := range ans.List() {
var addr addrs.AbsResource
switch d := d.(type) {
case GraphNodeResourceInstance:
instAddr := d.ResourceInstanceAddr()
addr = instAddr.Resource.Resource.Absolute(instAddr.Module)
case GraphNodeResource:
addr = d.ResourceAddr()
default:
continue
}
// Data sources don't need to track destroy dependencies
if addr.Resource.Mode == addrs.DataResourceMode {
continue
}
if addr.Equal(selfAddr) {
continue
}
depMap[addr.String()] = addr
}
deps := make([]addrs.AbsResource, 0, len(depMap))
for _, d := range depMap {
deps = append(deps, d)
}
sort.Slice(deps, func(i, j int) bool {
return deps[i].String() < deps[j].String()
})
log.Printf("[TRACE] AttachDependenciesTransformer: %s depends on %s", attacher.ResourceAddr(), deps)
attacher.AttachDependencies(deps)
}
return nil
}
// DestroyReferenceTransformer is a GraphTransformer that reverses the edges
// for locals and outputs that depend on other nodes which will be
// removed during destroy. If a destroy node is evaluated before the local or
// output value, it will be removed from the state, and the later interpolation
// will fail.
type DestroyValueReferenceTransformer struct{}
func (t *DestroyValueReferenceTransformer) Transform(g *Graph) error {
vs := g.Vertices()
for _, v := range vs {
switch v.(type) {
case *NodeApplyableOutput, *NodeLocal:
// OK
default:
continue
}
// reverse any outgoing edges so that the value is evaluated first.
for _, e := range g.EdgesFrom(v) {
target := e.Target()
// only destroy nodes will be evaluated in reverse
if _, ok := target.(GraphNodeDestroyer); !ok {
continue
}
log.Printf("[TRACE] output dep: %s", dag.VertexName(target))
g.RemoveEdge(e)
g.Connect(&DestroyEdge{S: target, T: v})
}
}
return nil
}
// PruneUnusedValuesTransformer is s GraphTransformer that removes local and
// output values which are not referenced in the graph. Since outputs and
// locals always need to be evaluated, if they reference a resource that is not
// available in the state the interpolation could fail.
type PruneUnusedValuesTransformer struct{}
func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error {
// this might need multiple runs in order to ensure that pruning a value
// doesn't effect a previously checked value.
for removed := 0; ; removed = 0 {
for _, v := range g.Vertices() {
switch v.(type) {
case *NodeApplyableOutput, *NodeLocal:
// OK
default:
continue
}
dependants := g.UpEdges(v)
switch dependants.Len() {
case 0:
// nothing at all depends on this
g.Remove(v)
removed++
case 1:
// because an output's destroy node always depends on the output,
// we need to check for the case of a single destroy node.
d := dependants.List()[0]
if _, ok := d.(*NodeDestroyableOutput); ok {
g.Remove(v)
removed++
}
}
}
if removed == 0 {
break
}
}
return nil
}
// ReferenceMap is a structure that can be used to efficiently check
// for references on a graph.
type ReferenceMap struct {
// vertices is a map from internal reference keys (as produced by the
// mapKey method) to one or more vertices that are identified by each key.
//
// A particular reference key might actually identify multiple vertices,
// e.g. in situations where one object is contained inside another.
vertices map[string][]dag.Vertex
// edges is a map whose keys are a subset of the internal reference keys
// from "vertices", and whose values are the nodes that refer to each
// key. The values in this map are the referrers, while values in
// "verticies" are the referents. The keys in both cases are referents.
edges map[string][]dag.Vertex
}
// References returns the set of vertices that the given vertex refers to,
// and any referenced addresses that do not have corresponding vertices.
func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []addrs.Referenceable) {
rn, ok := v.(GraphNodeReferencer)
if !ok {
return nil, nil
}
if _, ok := v.(GraphNodeSubPath); !ok {
return nil, nil
}
var matches []dag.Vertex
var missing []addrs.Referenceable
for _, ref := range rn.References() {
subject := ref.Subject
key := m.referenceMapKey(v, subject)
if _, exists := m.vertices[key]; !exists {
// If what we were looking for was a ResourceInstance then we
// might be in a resource-oriented graph rather than an
// instance-oriented graph, and so we'll see if we have the
// resource itself instead.
switch ri := subject.(type) {
case addrs.ResourceInstance:
subject = ri.ContainingResource()
case addrs.ResourceInstancePhase:
subject = ri.ContainingResource()
}
key = m.referenceMapKey(v, subject)
}
vertices := m.vertices[key]
for _, rv := range vertices {
// don't include self-references
if rv == v {
continue
}
matches = append(matches, rv)
}
if len(vertices) == 0 {
missing = append(missing, ref.Subject)
}
}
return matches, missing
}
// Referrers returns the set of vertices that refer to the given vertex.
func (m *ReferenceMap) Referrers(v dag.Vertex) []dag.Vertex {
rn, ok := v.(GraphNodeReferenceable)
if !ok {
return nil
}
sp, ok := v.(GraphNodeSubPath)
if !ok {
return nil
}
var matches []dag.Vertex
for _, addr := range rn.ReferenceableAddrs() {
key := m.mapKey(sp.Path(), addr)
referrers, ok := m.edges[key]
if !ok {
continue
}
// If the referrer set includes our own given vertex then we skip,
// since we don't want to return self-references.
selfRef := false
for _, p := range referrers {
if p == v {
selfRef = true
break
}
}
if selfRef {
continue
}
matches = append(matches, referrers...)
}
return matches
}
func (m *ReferenceMap) mapKey(path addrs.ModuleInstance, addr addrs.Referenceable) string {
return fmt.Sprintf("%s|%s", path.String(), addr.String())
}
// vertexReferenceablePath returns the path in which the given vertex can be
// referenced. This is the path that its results from ReferenceableAddrs
// are considered to be relative to.
//
// Only GraphNodeSubPath implementations can be referenced, so this method will
// panic if the given vertex does not implement that interface.
func (m *ReferenceMap) vertexReferenceablePath(v dag.Vertex) addrs.ModuleInstance {
sp, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
panic(fmt.Errorf("vertexMapKey on vertex type %T which doesn't implement GraphNodeSubPath", sp))
}
if outside, ok := v.(GraphNodeReferenceOutside); ok {
// Vertex is referenced from a different module than where it was
// declared.
path, _ := outside.ReferenceOutside()
return path
}
// Vertex is referenced from the same module as where it was declared.
return sp.Path()
}
// vertexReferencePath returns the path in which references _from_ the given
// vertex must be interpreted.
//
// Only GraphNodeSubPath implementations can have references, so this method
// will panic if the given vertex does not implement that interface.
func vertexReferencePath(referrer dag.Vertex) addrs.ModuleInstance {
sp, ok := referrer.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
panic(fmt.Errorf("vertexReferencePath on vertex type %T which doesn't implement GraphNodeSubPath", sp))
}
var path addrs.ModuleInstance
if outside, ok := referrer.(GraphNodeReferenceOutside); ok {
// Vertex makes references to objects in a different module than where
// it was declared.
_, path = outside.ReferenceOutside()
return path
}
// Vertex makes references to objects in the same module as where it
// was declared.
return sp.Path()
}
// referenceMapKey produces keys for the "edges" map. "referrer" is the vertex
// that the reference is from, and "addr" is the address of the object being
// referenced.
//
// The result is an opaque string that includes both the address of the given
// object and the address of the module instance that object belongs to.
//
// Only GraphNodeSubPath implementations can be referrers, so this method will
// panic if the given vertex does not implement that interface.
func (m *ReferenceMap) referenceMapKey(referrer dag.Vertex, addr addrs.Referenceable) string {
path := vertexReferencePath(referrer)
return m.mapKey(path, addr)
}
// NewReferenceMap is used to create a new reference map for the
// given set of vertices.
func NewReferenceMap(vs []dag.Vertex) *ReferenceMap {
var m ReferenceMap
// Build the lookup table
vertices := make(map[string][]dag.Vertex)
for _, v := range vs {
_, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
continue
}
// We're only looking for referenceable nodes
rn, ok := v.(GraphNodeReferenceable)
if !ok {
continue
}
path := m.vertexReferenceablePath(v)
// Go through and cache them
for _, addr := range rn.ReferenceableAddrs() {
key := m.mapKey(path, addr)
vertices[key] = append(vertices[key], v)
}
// Any node can be referenced by the address of the module it belongs
// to or any of that module's ancestors.
for _, addr := range path.Ancestors()[1:] {
// Can be referenced either as the specific call instance (with
// an instance key) or as the bare module call itself (the "module"
// block in the parent module that created the instance).
callPath, call := addr.Call()
callInstPath, callInst := addr.CallInstance()
callKey := m.mapKey(callPath, call)
callInstKey := m.mapKey(callInstPath, callInst)
vertices[callKey] = append(vertices[callKey], v)
vertices[callInstKey] = append(vertices[callInstKey], v)
}
}
// Build the lookup table for referenced by
edges := make(map[string][]dag.Vertex)
for _, v := range vs {
_, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
continue
}
rn, ok := v.(GraphNodeReferencer)
if !ok {
// We're only looking for referenceable nodes
continue
}
// Go through and cache them
for _, ref := range rn.References() {
if ref.Subject == nil {
// Should never happen
panic(fmt.Sprintf("%T.References returned reference with nil subject", rn))
}
key := m.referenceMapKey(v, ref.Subject)
edges[key] = append(edges[key], v)
}
}
m.vertices = vertices
m.edges = edges
return &m
}
// ReferencesFromConfig returns the references that a configuration has
// based on the interpolated variables in a configuration.
func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Reference {
if body == nil {
return nil
}
refs, _ := lang.ReferencesInBlock(body, schema)
return refs
}
// appendResourceDestroyReferences identifies resource and resource instance
// references in the given slice and appends to it the "destroy-phase"
// equivalents of those references, returning the result.
//
// This can be used in the References implementation for a node which must also
// depend on the destruction of anything it references.
func appendResourceDestroyReferences(refs []*addrs.Reference) []*addrs.Reference {
given := refs
for _, ref := range given {
switch tr := ref.Subject.(type) {
case addrs.Resource:
newRef := *ref // shallow copy
newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
refs = append(refs, &newRef)
case addrs.ResourceInstance:
newRef := *ref // shallow copy
newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
refs = append(refs, &newRef)
}
}
return refs
}
func modulePrefixStr(p addrs.ModuleInstance) string {
return p.String()
}
func modulePrefixList(result []string, prefix string) []string {
if prefix != "" {
for i, v := range result {
result[i] = fmt.Sprintf("%s.%s", prefix, v)
}
}
return result
}
| terraform/transform_reference.go | 1 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.05224976688623428,
0.0016144209075719118,
0.00016447219240944833,
0.00018520525190979242,
0.007006651256233454
] |
{
"id": 6,
"code_window": [
"\t\t\t\t// OK\n",
"\t\t\tdefault:\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n",
"\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t// We're only concerned with variables, locals and outputs\n"
],
"file_path": "terraform/transform_reference.go",
"type": "add",
"edit_start_line_idx": 223
} | ---
layout: "commands-workspace"
page_title: "Command: workspace select"
sidebar_current: "docs-commands-workspace-sub-select"
description: |-
The terraform workspace select command is used to choose a workspace.
---
# Command: workspace select
The `terraform workspace select` command is used to choose a different
workspace to use for further operations.
## Usage
Usage: `terraform workspace select [NAME]`
This command will select another workspace. The named workspace must already
exist.
## Example
```
$ terraform workspace list
default
* development
jsmith-test
$ terraform workspace select default
Switched to workspace "default".
```
| website/docs/commands/workspace/select.html.md | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.0002707337844185531,
0.0001977209612959996,
0.00016854022396728396,
0.00017580491839908063,
0.00004245122545398772
] |
{
"id": 6,
"code_window": [
"\t\t\t\t// OK\n",
"\t\t\tdefault:\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n",
"\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t// We're only concerned with variables, locals and outputs\n"
],
"file_path": "terraform/transform_reference.go",
"type": "add",
"edit_start_line_idx": 223
} | The following files were ported to Go from C files of libyaml, and thus
are still covered by their original copyright and license:
apic.go
emitterc.go
parserc.go
readerc.go
scannerc.go
writerc.go
yamlh.go
yamlprivateh.go
Copyright (c) 2006 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.00017478183144703507,
0.00017127148748841137,
0.00016864342615008354,
0.0001708303316263482,
0.0000024041496544668917
] |
{
"id": 6,
"code_window": [
"\t\t\t\t// OK\n",
"\t\t\tdefault:\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n",
"\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t// We're only concerned with variables, locals and outputs\n"
],
"file_path": "terraform/transform_reference.go",
"type": "add",
"edit_start_line_idx": 223
} | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build 386,darwin
package unix
import (
"syscall"
)
func setTimespec(sec, nsec int64) Timespec {
return Timespec{Sec: int32(sec), Nsec: int32(nsec)}
}
func setTimeval(sec, usec int64) Timeval {
return Timeval{Sec: int32(sec), Usec: int32(usec)}
}
//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error)
func Gettimeofday(tv *Timeval) (err error) {
// The tv passed to gettimeofday must be non-nil
// but is otherwise unused. The answers come back
// in the two registers.
sec, usec, err := gettimeofday(tv)
tv.Sec = int32(sec)
tv.Usec = int32(usec)
return err
}
func SetKevent(k *Kevent_t, fd, mode, flags int) {
k.Ident = uint32(fd)
k.Filter = int16(mode)
k.Flags = uint16(flags)
}
func (iov *Iovec) SetLen(length int) {
iov.Len = uint32(length)
}
func (msghdr *Msghdr) SetControllen(length int) {
msghdr.Controllen = uint32(length)
}
func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint32(length)
}
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
// of darwin/386 the syscall is called sysctl instead of __sysctl.
const SYS___SYSCTL = SYS_SYSCTL
//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64
//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64
//sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64
//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64
//sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64
//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
//sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64
| vendor/golang.org/x/sys/unix/syscall_darwin_386.go | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.0013550783041864634,
0.0003403054433874786,
0.00016665898147039115,
0.0001709092757664621,
0.00041429803241044283
] |
{
"id": 7,
"code_window": [
"\t\t\tdependants := g.UpEdges(v)\n",
"\n",
"\t\t\tswitch dependants.Len() {\n",
"\t\t\tcase 0:\n",
"\t\t\t\t// nothing at all depends on this\n",
"\t\t\t\tg.Remove(v)\n",
"\t\t\t\tremoved++\n",
"\t\t\tcase 1:\n",
"\t\t\t\t// because an output's destroy node always depends on the output,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tlog.Printf(\"[TRACE] PruneUnusedValuesTransformer: removing unused value %s\", dag.VertexName(v))\n"
],
"file_path": "terraform/transform_reference.go",
"type": "add",
"edit_start_line_idx": 231
} | package terraform
import (
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/plans"
"github.com/hashicorp/terraform/states"
"github.com/hashicorp/terraform/tfdiags"
)
// ApplyGraphBuilder implements GraphBuilder and is responsible for building
// a graph for applying a Terraform diff.
//
// Because the graph is built from the diff (vs. the config or state),
// this helps ensure that the apply-time graph doesn't modify any resources
// that aren't explicitly in the diff. There are other scenarios where the
// diff can be deviated, so this is just one layer of protection.
type ApplyGraphBuilder struct {
// Config is the configuration tree that the diff was built from.
Config *configs.Config
// Changes describes the changes that we need apply.
Changes *plans.Changes
// State is the current state
State *states.State
// Components is a factory for the plug-in components (providers and
// provisioners) available for use.
Components contextComponentFactory
// Schemas is the repository of schemas we will draw from to analyse
// the configuration.
Schemas *Schemas
// Targets are resources to target. This is only required to make sure
// unnecessary outputs aren't included in the apply graph. The plan
// builder successfully handles targeting resources. In the future,
// outputs should go into the diff so that this is unnecessary.
Targets []addrs.Targetable
// DisableReduce, if true, will not reduce the graph. Great for testing.
DisableReduce bool
// Destroy, if true, represents a pure destroy operation
Destroy bool
// Validate will do structural validation of the graph.
Validate bool
}
// See GraphBuilder
func (b *ApplyGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
return (&BasicGraphBuilder{
Steps: b.Steps(),
Validate: b.Validate,
Name: "ApplyGraphBuilder",
}).Build(path)
}
// See GraphBuilder
func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
// Custom factory for creating providers.
concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
return &NodeApplyableProvider{
NodeAbstractProvider: a,
}
}
concreteResource := func(a *NodeAbstractResource) dag.Vertex {
return &NodeApplyableResource{
NodeAbstractResource: a,
}
}
concreteOrphanResource := func(a *NodeAbstractResource) dag.Vertex {
return &NodeDestroyResource{
NodeAbstractResource: a,
}
}
concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex {
return &NodeApplyableResourceInstance{
NodeAbstractResourceInstance: a,
}
}
steps := []GraphTransformer{
// Creates all the resources represented in the config. During apply,
// we use this just to ensure that the whole-resource metadata is
// updated to reflect things such as whether the count argument is
// set in config, or which provider configuration manages each resource.
&ConfigTransformer{
Concrete: concreteResource,
Config: b.Config,
},
// Creates all the resource instances represented in the diff, along
// with dependency edges against the whole-resource nodes added by
// ConfigTransformer above.
&DiffTransformer{
Concrete: concreteResourceInstance,
State: b.State,
Changes: b.Changes,
},
// Creates extra cleanup nodes for any entire resources that are
// no longer present in config, so we can make sure we clean up the
// leftover empty resource states after the instances have been
// destroyed.
// (We don't track this particular type of change in the plan because
// it's just cleanup of our own state object, and so doesn't effect
// any real remote objects or consumable outputs.)
&OrphanResourceTransformer{
Concrete: concreteOrphanResource,
Config: b.Config,
State: b.State,
},
// Create orphan output nodes
&OrphanOutputTransformer{Config: b.Config, State: b.State},
// Attach the configuration to any resources
&AttachResourceConfigTransformer{Config: b.Config},
// Attach the state
&AttachStateTransformer{State: b.State},
// Provisioner-related transformations
&MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()},
&ProvisionerTransformer{},
// Add root variables
&RootVariableTransformer{Config: b.Config},
// Add the local values
&LocalTransformer{Config: b.Config},
// Add the outputs
&OutputTransformer{Config: b.Config},
// Add module variables
&ModuleVariableTransformer{Config: b.Config},
// add providers
TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config),
// Remove modules no longer present in the config
&RemovedModuleTransformer{Config: b.Config, State: b.State},
// Must attach schemas before ReferenceTransformer so that we can
// analyze the configuration to find references.
&AttachSchemaTransformer{Schemas: b.Schemas},
// Connect references so ordering is correct
&ReferenceTransformer{},
&AttachDependenciesTransformer{},
// Destruction ordering
&DestroyEdgeTransformer{
Config: b.Config,
State: b.State,
Schemas: b.Schemas,
},
&CBDEdgeTransformer{
Config: b.Config,
State: b.State,
Schemas: b.Schemas,
Destroy: b.Destroy,
},
// Handle destroy time transformations for output and local values.
// Reverse the edges from outputs and locals, so that
// interpolations don't fail during destroy.
// Create a destroy node for outputs to remove them from the state.
// Prune unreferenced values, which may have interpolations that can't
// be resolved.
GraphTransformIf(
func() bool { return b.Destroy },
GraphTransformMulti(
&DestroyValueReferenceTransformer{},
&DestroyOutputTransformer{},
&PruneUnusedValuesTransformer{},
),
),
// Add the node to fix the state count boundaries
&CountBoundaryTransformer{
Config: b.Config,
},
// Target
&TargetsTransformer{Targets: b.Targets},
// Close opened plugin connections
&CloseProviderTransformer{},
&CloseProvisionerTransformer{},
// Single root
&RootTransformer{},
}
if !b.DisableReduce {
// Perform the transitive reduction to make our graph a bit
// more sane if possible (it usually is possible).
steps = append(steps, &TransitiveReductionTransformer{})
}
return steps
}
| terraform/graph_builder_apply.go | 1 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.00022292547510005534,
0.00017359505000058562,
0.00016153024625964463,
0.0001682302972767502,
0.000013578466678154655
] |
{
"id": 7,
"code_window": [
"\t\t\tdependants := g.UpEdges(v)\n",
"\n",
"\t\t\tswitch dependants.Len() {\n",
"\t\t\tcase 0:\n",
"\t\t\t\t// nothing at all depends on this\n",
"\t\t\t\tg.Remove(v)\n",
"\t\t\t\tremoved++\n",
"\t\t\tcase 1:\n",
"\t\t\t\t// because an output's destroy node always depends on the output,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tlog.Printf(\"[TRACE] PruneUnusedValuesTransformer: removing unused value %s\", dag.VertexName(v))\n"
],
"file_path": "terraform/transform_reference.go",
"type": "add",
"edit_start_line_idx": 231
} | resource "test" "foo" {
things = ["foo"]
}
data "test" "foo" {
count = length(test.foo.things)
}
| terraform/testdata/refresh-data-count/refresh-data-count.tf | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.00017640370060689747,
0.00017640370060689747,
0.00017640370060689747,
0.00017640370060689747,
0
] |
{
"id": 7,
"code_window": [
"\t\t\tdependants := g.UpEdges(v)\n",
"\n",
"\t\t\tswitch dependants.Len() {\n",
"\t\t\tcase 0:\n",
"\t\t\t\t// nothing at all depends on this\n",
"\t\t\t\tg.Remove(v)\n",
"\t\t\t\tremoved++\n",
"\t\t\tcase 1:\n",
"\t\t\t\t// because an output's destroy node always depends on the output,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tlog.Printf(\"[TRACE] PruneUnusedValuesTransformer: removing unused value %s\", dag.VertexName(v))\n"
],
"file_path": "terraform/transform_reference.go",
"type": "add",
"edit_start_line_idx": 231
} | package customdiff
import (
"testing"
"github.com/hashicorp/terraform/helper/schema"
)
func TestComputedIf(t *testing.T) {
t.Run("true", func(t *testing.T) {
var condCalls int
var gotOld, gotNew string
provider := testProvider(
map[string]*schema.Schema{
"foo": {
Type: schema.TypeString,
Optional: true,
},
"comp": {
Type: schema.TypeString,
Computed: true,
},
},
ComputedIf("comp", func(d *schema.ResourceDiff, meta interface{}) bool {
// When we set "ForceNew", our CustomizeDiff function is actually
// called a second time to construct the "create" portion of
// the replace diff. On the second call, the old value is masked
// as "" to suggest that the object is being created rather than
// updated.
condCalls++
old, new := d.GetChange("foo")
gotOld = old.(string)
gotNew = new.(string)
return true
}),
)
diff, err := testDiff(
provider,
map[string]string{
"foo": "bar",
"comp": "old",
},
map[string]string{
"foo": "baz",
},
)
if err != nil {
t.Fatalf("Diff failed with error: %s", err)
}
if condCalls != 1 {
t.Fatalf("Wrong number of conditional callback calls %d; want %d", condCalls, 1)
} else {
if got, want := gotOld, "bar"; got != want {
t.Errorf("wrong old value %q on first call; want %q", got, want)
}
if got, want := gotNew, "baz"; got != want {
t.Errorf("wrong new value %q on first call; want %q", got, want)
}
}
if !diff.Attributes["comp"].NewComputed {
t.Error("Attribute 'comp' is not marked as NewComputed")
}
})
t.Run("false", func(t *testing.T) {
var condCalls int
var gotOld, gotNew string
provider := testProvider(
map[string]*schema.Schema{
"foo": {
Type: schema.TypeString,
Optional: true,
},
"comp": {
Type: schema.TypeString,
Computed: true,
},
},
ComputedIf("comp", func(d *schema.ResourceDiff, meta interface{}) bool {
condCalls++
old, new := d.GetChange("foo")
gotOld = old.(string)
gotNew = new.(string)
return false
}),
)
diff, err := testDiff(
provider,
map[string]string{
"foo": "bar",
"comp": "old",
},
map[string]string{
"foo": "baz",
},
)
if err != nil {
t.Fatalf("Diff failed with error: %s", err)
}
if condCalls != 1 {
t.Fatalf("Wrong number of conditional callback calls %d; want %d", condCalls, 1)
} else {
if got, want := gotOld, "bar"; got != want {
t.Errorf("wrong old value %q on first call; want %q", got, want)
}
if got, want := gotNew, "baz"; got != want {
t.Errorf("wrong new value %q on first call; want %q", got, want)
}
}
if diff.Attributes["comp"] != nil && diff.Attributes["comp"].NewComputed {
t.Error("Attribute 'foo' is marked as NewComputed, but should not be")
}
})
}
| helper/customdiff/computed_test.go | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.00017675642448011786,
0.0001735686237225309,
0.00016829484957270324,
0.0001735964324325323,
0.0000026439990961080184
] |
{
"id": 7,
"code_window": [
"\t\t\tdependants := g.UpEdges(v)\n",
"\n",
"\t\t\tswitch dependants.Len() {\n",
"\t\t\tcase 0:\n",
"\t\t\t\t// nothing at all depends on this\n",
"\t\t\t\tg.Remove(v)\n",
"\t\t\t\tremoved++\n",
"\t\t\tcase 1:\n",
"\t\t\t\t// because an output's destroy node always depends on the output,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tlog.Printf(\"[TRACE] PruneUnusedValuesTransformer: removing unused value %s\", dag.VertexName(v))\n"
],
"file_path": "terraform/transform_reference.go",
"type": "add",
"edit_start_line_idx": 231
} | package routerinsertion
// FirewallExt is an extension to the base Firewall object
type FirewallExt struct {
// RouterIDs are the routers that the firewall is attached to.
RouterIDs []string `json:"router_ids"`
}
| vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/results.go | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.0004022073408123106,
0.0004022073408123106,
0.0004022073408123106,
0.0004022073408123106,
0
] |
{
"id": 8,
"code_window": [
"\t\t\t\t// because an output's destroy node always depends on the output,\n",
"\t\t\t\t// we need to check for the case of a single destroy node.\n",
"\t\t\t\td := dependants.List()[0]\n",
"\t\t\t\tif _, ok := d.(*NodeDestroyableOutput); ok {\n",
"\t\t\t\t\tg.Remove(v)\n",
"\t\t\t\t\tremoved++\n",
"\t\t\t\t}\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\tlog.Printf(\"[TRACE] PruneUnusedValuesTransformer: removing unused value %s\", dag.VertexName(v))\n"
],
"file_path": "terraform/transform_reference.go",
"type": "add",
"edit_start_line_idx": 238
} | package terraform
import (
"fmt"
"log"
"sort"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/dag"
"github.com/hashicorp/terraform/lang"
"github.com/hashicorp/terraform/states"
)
// GraphNodeReferenceable must be implemented by any node that represents
// a Terraform thing that can be referenced (resource, module, etc.).
//
// Even if the thing has no name, this should return an empty list. By
// implementing this and returning a non-nil result, you say that this CAN
// be referenced and other methods of referencing may still be possible (such
// as by path!)
type GraphNodeReferenceable interface {
GraphNodeSubPath
// ReferenceableAddrs returns a list of addresses through which this can be
// referenced.
ReferenceableAddrs() []addrs.Referenceable
}
// GraphNodeReferencer must be implemented by nodes that reference other
// Terraform items and therefore depend on them.
type GraphNodeReferencer interface {
GraphNodeSubPath
// References returns a list of references made by this node, which
// include both a referenced address and source location information for
// the reference.
References() []*addrs.Reference
}
type GraphNodeAttachDependencies interface {
GraphNodeResource
AttachDependencies([]addrs.AbsResource)
}
// GraphNodeReferenceOutside is an interface that can optionally be implemented.
// A node that implements it can specify that its own referenceable addresses
// and/or the addresses it references are in a different module than the
// node itself.
//
// Any referenceable addresses returned by ReferenceableAddrs are interpreted
// relative to the returned selfPath.
//
// Any references returned by References are interpreted relative to the
// returned referencePath.
//
// It is valid but not required for either of these paths to match what is
// returned by method Path, though if both match the main Path then there
// is no reason to implement this method.
//
// The primary use-case for this is the nodes representing module input
// variables, since their expressions are resolved in terms of their calling
// module, but they are still referenced from their own module.
type GraphNodeReferenceOutside interface {
// ReferenceOutside returns a path in which any references from this node
// are resolved.
ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance)
}
// ReferenceTransformer is a GraphTransformer that connects all the
// nodes that reference each other in order to form the proper ordering.
type ReferenceTransformer struct{}
func (t *ReferenceTransformer) Transform(g *Graph) error {
// Build a reference map so we can efficiently look up the references
vs := g.Vertices()
m := NewReferenceMap(vs)
// Find the things that reference things and connect them
for _, v := range vs {
parents, _ := m.References(v)
parentsDbg := make([]string, len(parents))
for i, v := range parents {
parentsDbg[i] = dag.VertexName(v)
}
log.Printf(
"[DEBUG] ReferenceTransformer: %q references: %v",
dag.VertexName(v), parentsDbg)
for _, parent := range parents {
g.Connect(dag.BasicEdge(v, parent))
}
if len(parents) > 0 {
continue
}
}
return nil
}
// AttachDependenciesTransformer records all resource dependencies for each
// instance, and attaches the addresses to the node itself. Managed resource
// will record these in the state for proper ordering of destroy operations.
type AttachDependenciesTransformer struct {
Config *configs.Config
State *states.State
Schemas *Schemas
}
func (t AttachDependenciesTransformer) Transform(g *Graph) error {
for _, v := range g.Vertices() {
attacher, ok := v.(GraphNodeAttachDependencies)
if !ok {
continue
}
selfAddr := attacher.ResourceAddr()
// Data sources don't need to track destroy dependencies
if selfAddr.Resource.Mode == addrs.DataResourceMode {
continue
}
ans, err := g.Ancestors(v)
if err != nil {
return err
}
// dedupe addrs when there's multiple instances involved, or
// multiple paths in the un-reduced graph
depMap := map[string]addrs.AbsResource{}
for _, d := range ans.List() {
var addr addrs.AbsResource
switch d := d.(type) {
case GraphNodeResourceInstance:
instAddr := d.ResourceInstanceAddr()
addr = instAddr.Resource.Resource.Absolute(instAddr.Module)
case GraphNodeResource:
addr = d.ResourceAddr()
default:
continue
}
// Data sources don't need to track destroy dependencies
if addr.Resource.Mode == addrs.DataResourceMode {
continue
}
if addr.Equal(selfAddr) {
continue
}
depMap[addr.String()] = addr
}
deps := make([]addrs.AbsResource, 0, len(depMap))
for _, d := range depMap {
deps = append(deps, d)
}
sort.Slice(deps, func(i, j int) bool {
return deps[i].String() < deps[j].String()
})
log.Printf("[TRACE] AttachDependenciesTransformer: %s depends on %s", attacher.ResourceAddr(), deps)
attacher.AttachDependencies(deps)
}
return nil
}
// DestroyReferenceTransformer is a GraphTransformer that reverses the edges
// for locals and outputs that depend on other nodes which will be
// removed during destroy. If a destroy node is evaluated before the local or
// output value, it will be removed from the state, and the later interpolation
// will fail.
type DestroyValueReferenceTransformer struct{}
func (t *DestroyValueReferenceTransformer) Transform(g *Graph) error {
vs := g.Vertices()
for _, v := range vs {
switch v.(type) {
case *NodeApplyableOutput, *NodeLocal:
// OK
default:
continue
}
// reverse any outgoing edges so that the value is evaluated first.
for _, e := range g.EdgesFrom(v) {
target := e.Target()
// only destroy nodes will be evaluated in reverse
if _, ok := target.(GraphNodeDestroyer); !ok {
continue
}
log.Printf("[TRACE] output dep: %s", dag.VertexName(target))
g.RemoveEdge(e)
g.Connect(&DestroyEdge{S: target, T: v})
}
}
return nil
}
// PruneUnusedValuesTransformer is s GraphTransformer that removes local and
// output values which are not referenced in the graph. Since outputs and
// locals always need to be evaluated, if they reference a resource that is not
// available in the state the interpolation could fail.
type PruneUnusedValuesTransformer struct{}
func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error {
// this might need multiple runs in order to ensure that pruning a value
// doesn't effect a previously checked value.
for removed := 0; ; removed = 0 {
for _, v := range g.Vertices() {
switch v.(type) {
case *NodeApplyableOutput, *NodeLocal:
// OK
default:
continue
}
dependants := g.UpEdges(v)
switch dependants.Len() {
case 0:
// nothing at all depends on this
g.Remove(v)
removed++
case 1:
// because an output's destroy node always depends on the output,
// we need to check for the case of a single destroy node.
d := dependants.List()[0]
if _, ok := d.(*NodeDestroyableOutput); ok {
g.Remove(v)
removed++
}
}
}
if removed == 0 {
break
}
}
return nil
}
// ReferenceMap is a structure that can be used to efficiently check
// for references on a graph.
type ReferenceMap struct {
// vertices is a map from internal reference keys (as produced by the
// mapKey method) to one or more vertices that are identified by each key.
//
// A particular reference key might actually identify multiple vertices,
// e.g. in situations where one object is contained inside another.
vertices map[string][]dag.Vertex
// edges is a map whose keys are a subset of the internal reference keys
// from "vertices", and whose values are the nodes that refer to each
// key. The values in this map are the referrers, while values in
// "verticies" are the referents. The keys in both cases are referents.
edges map[string][]dag.Vertex
}
// References returns the set of vertices that the given vertex refers to,
// and any referenced addresses that do not have corresponding vertices.
func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []addrs.Referenceable) {
rn, ok := v.(GraphNodeReferencer)
if !ok {
return nil, nil
}
if _, ok := v.(GraphNodeSubPath); !ok {
return nil, nil
}
var matches []dag.Vertex
var missing []addrs.Referenceable
for _, ref := range rn.References() {
subject := ref.Subject
key := m.referenceMapKey(v, subject)
if _, exists := m.vertices[key]; !exists {
// If what we were looking for was a ResourceInstance then we
// might be in a resource-oriented graph rather than an
// instance-oriented graph, and so we'll see if we have the
// resource itself instead.
switch ri := subject.(type) {
case addrs.ResourceInstance:
subject = ri.ContainingResource()
case addrs.ResourceInstancePhase:
subject = ri.ContainingResource()
}
key = m.referenceMapKey(v, subject)
}
vertices := m.vertices[key]
for _, rv := range vertices {
// don't include self-references
if rv == v {
continue
}
matches = append(matches, rv)
}
if len(vertices) == 0 {
missing = append(missing, ref.Subject)
}
}
return matches, missing
}
// Referrers returns the set of vertices that refer to the given vertex.
func (m *ReferenceMap) Referrers(v dag.Vertex) []dag.Vertex {
rn, ok := v.(GraphNodeReferenceable)
if !ok {
return nil
}
sp, ok := v.(GraphNodeSubPath)
if !ok {
return nil
}
var matches []dag.Vertex
for _, addr := range rn.ReferenceableAddrs() {
key := m.mapKey(sp.Path(), addr)
referrers, ok := m.edges[key]
if !ok {
continue
}
// If the referrer set includes our own given vertex then we skip,
// since we don't want to return self-references.
selfRef := false
for _, p := range referrers {
if p == v {
selfRef = true
break
}
}
if selfRef {
continue
}
matches = append(matches, referrers...)
}
return matches
}
func (m *ReferenceMap) mapKey(path addrs.ModuleInstance, addr addrs.Referenceable) string {
return fmt.Sprintf("%s|%s", path.String(), addr.String())
}
// vertexReferenceablePath returns the path in which the given vertex can be
// referenced. This is the path that its results from ReferenceableAddrs
// are considered to be relative to.
//
// Only GraphNodeSubPath implementations can be referenced, so this method will
// panic if the given vertex does not implement that interface.
func (m *ReferenceMap) vertexReferenceablePath(v dag.Vertex) addrs.ModuleInstance {
sp, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
panic(fmt.Errorf("vertexMapKey on vertex type %T which doesn't implement GraphNodeSubPath", sp))
}
if outside, ok := v.(GraphNodeReferenceOutside); ok {
// Vertex is referenced from a different module than where it was
// declared.
path, _ := outside.ReferenceOutside()
return path
}
// Vertex is referenced from the same module as where it was declared.
return sp.Path()
}
// vertexReferencePath returns the path in which references _from_ the given
// vertex must be interpreted.
//
// Only GraphNodeSubPath implementations can have references, so this method
// will panic if the given vertex does not implement that interface.
func vertexReferencePath(referrer dag.Vertex) addrs.ModuleInstance {
sp, ok := referrer.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
panic(fmt.Errorf("vertexReferencePath on vertex type %T which doesn't implement GraphNodeSubPath", sp))
}
var path addrs.ModuleInstance
if outside, ok := referrer.(GraphNodeReferenceOutside); ok {
// Vertex makes references to objects in a different module than where
// it was declared.
_, path = outside.ReferenceOutside()
return path
}
// Vertex makes references to objects in the same module as where it
// was declared.
return sp.Path()
}
// referenceMapKey produces keys for the "edges" map. "referrer" is the vertex
// that the reference is from, and "addr" is the address of the object being
// referenced.
//
// The result is an opaque string that includes both the address of the given
// object and the address of the module instance that object belongs to.
//
// Only GraphNodeSubPath implementations can be referrers, so this method will
// panic if the given vertex does not implement that interface.
func (m *ReferenceMap) referenceMapKey(referrer dag.Vertex, addr addrs.Referenceable) string {
path := vertexReferencePath(referrer)
return m.mapKey(path, addr)
}
// NewReferenceMap is used to create a new reference map for the
// given set of vertices.
func NewReferenceMap(vs []dag.Vertex) *ReferenceMap {
var m ReferenceMap
// Build the lookup table
vertices := make(map[string][]dag.Vertex)
for _, v := range vs {
_, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
continue
}
// We're only looking for referenceable nodes
rn, ok := v.(GraphNodeReferenceable)
if !ok {
continue
}
path := m.vertexReferenceablePath(v)
// Go through and cache them
for _, addr := range rn.ReferenceableAddrs() {
key := m.mapKey(path, addr)
vertices[key] = append(vertices[key], v)
}
// Any node can be referenced by the address of the module it belongs
// to or any of that module's ancestors.
for _, addr := range path.Ancestors()[1:] {
// Can be referenced either as the specific call instance (with
// an instance key) or as the bare module call itself (the "module"
// block in the parent module that created the instance).
callPath, call := addr.Call()
callInstPath, callInst := addr.CallInstance()
callKey := m.mapKey(callPath, call)
callInstKey := m.mapKey(callInstPath, callInst)
vertices[callKey] = append(vertices[callKey], v)
vertices[callInstKey] = append(vertices[callInstKey], v)
}
}
// Build the lookup table for referenced by
edges := make(map[string][]dag.Vertex)
for _, v := range vs {
_, ok := v.(GraphNodeSubPath)
if !ok {
// Only nodes with paths can participate in a reference map.
continue
}
rn, ok := v.(GraphNodeReferencer)
if !ok {
// We're only looking for referenceable nodes
continue
}
// Go through and cache them
for _, ref := range rn.References() {
if ref.Subject == nil {
// Should never happen
panic(fmt.Sprintf("%T.References returned reference with nil subject", rn))
}
key := m.referenceMapKey(v, ref.Subject)
edges[key] = append(edges[key], v)
}
}
m.vertices = vertices
m.edges = edges
return &m
}
// ReferencesFromConfig returns the references that a configuration has
// based on the interpolated variables in a configuration.
func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Reference {
if body == nil {
return nil
}
refs, _ := lang.ReferencesInBlock(body, schema)
return refs
}
// appendResourceDestroyReferences identifies resource and resource instance
// references in the given slice and appends to it the "destroy-phase"
// equivalents of those references, returning the result.
//
// This can be used in the References implementation for a node which must also
// depend on the destruction of anything it references.
func appendResourceDestroyReferences(refs []*addrs.Reference) []*addrs.Reference {
given := refs
for _, ref := range given {
switch tr := ref.Subject.(type) {
case addrs.Resource:
newRef := *ref // shallow copy
newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
refs = append(refs, &newRef)
case addrs.ResourceInstance:
newRef := *ref // shallow copy
newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
refs = append(refs, &newRef)
}
}
return refs
}
func modulePrefixStr(p addrs.ModuleInstance) string {
return p.String()
}
func modulePrefixList(result []string, prefix string) []string {
if prefix != "" {
for i, v := range result {
result[i] = fmt.Sprintf("%s.%s", prefix, v)
}
}
return result
}
| terraform/transform_reference.go | 1 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.9980104565620422,
0.04389011114835739,
0.0001637668756302446,
0.0027940087020397186,
0.18679039180278778
] |
{
"id": 8,
"code_window": [
"\t\t\t\t// because an output's destroy node always depends on the output,\n",
"\t\t\t\t// we need to check for the case of a single destroy node.\n",
"\t\t\t\td := dependants.List()[0]\n",
"\t\t\t\tif _, ok := d.(*NodeDestroyableOutput); ok {\n",
"\t\t\t\t\tg.Remove(v)\n",
"\t\t\t\t\tremoved++\n",
"\t\t\t\t}\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\tlog.Printf(\"[TRACE] PruneUnusedValuesTransformer: removing unused value %s\", dag.VertexName(v))\n"
],
"file_path": "terraform/transform_reference.go",
"type": "add",
"edit_start_line_idx": 238
} | package authentication
import (
"fmt"
"strings"
"github.com/Azure/go-autorest/autorest/azure/cli"
)
func (a *azureCLIProfile) populateSubscriptionID() error {
subscriptionId, err := a.findDefaultSubscriptionId()
if err != nil {
return err
}
a.subscriptionId = subscriptionId
return nil
}
func (a *azureCLIProfile) populateTenantID() error {
subscription, err := a.findSubscription(a.subscriptionId)
if err != nil {
return err
}
a.tenantId = subscription.TenantID
return nil
}
func (a *azureCLIProfile) populateClientId() error {
// we can now pull out the ClientID and the Access Token to use from the Access Token
tokensPath, err := cli.AccessTokensPath()
if err != nil {
return fmt.Errorf("Error loading the Tokens Path from the Azure CLI: %+v", err)
}
tokens, err := cli.LoadTokens(tokensPath)
if err != nil {
return fmt.Errorf("No Authorization Tokens were found - please ensure the Azure CLI is installed and then log-in with `az login`.")
}
validToken, err := findValidAccessTokenForTenant(tokens, a.tenantId)
if err != nil {
return fmt.Errorf("No Authorization Tokens were found - please re-authenticate using `az login`.")
}
token := *validToken
a.clientId = token.ClientID
return nil
}
func (a *azureCLIProfile) populateEnvironment() error {
subscription, err := a.findSubscription(a.subscriptionId)
if err != nil {
return err
}
a.environment = normalizeEnvironmentName(subscription.EnvironmentName)
return nil
}
func (a azureCLIProfile) findDefaultSubscriptionId() (string, error) {
for _, subscription := range a.profile.Subscriptions {
if subscription.IsDefault {
return subscription.ID, nil
}
}
return "", fmt.Errorf("No Subscription was Marked as Default in the Azure Profile.")
}
func (a azureCLIProfile) findSubscription(subscriptionId string) (*cli.Subscription, error) {
for _, subscription := range a.profile.Subscriptions {
if strings.EqualFold(subscription.ID, subscriptionId) {
return &subscription, nil
}
}
return nil, fmt.Errorf("Subscription %q was not found in your Azure CLI credentials. Please verify it exists in `az account list`.", subscriptionId)
}
| vendor/github.com/hashicorp/go-azure-helpers/authentication/azure_cli_profile_population.go | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.00020699069136753678,
0.00017603077867534012,
0.00016540969954803586,
0.00017376634059473872,
0.00001139790219895076
] |
{
"id": 8,
"code_window": [
"\t\t\t\t// because an output's destroy node always depends on the output,\n",
"\t\t\t\t// we need to check for the case of a single destroy node.\n",
"\t\t\t\td := dependants.List()[0]\n",
"\t\t\t\tif _, ok := d.(*NodeDestroyableOutput); ok {\n",
"\t\t\t\t\tg.Remove(v)\n",
"\t\t\t\t\tremoved++\n",
"\t\t\t\t}\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\tlog.Printf(\"[TRACE] PruneUnusedValuesTransformer: removing unused value %s\", dag.VertexName(v))\n"
],
"file_path": "terraform/transform_reference.go",
"type": "add",
"edit_start_line_idx": 238
} | package terraform
import (
"regexp"
"strings"
"testing"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/plans"
"github.com/hashicorp/terraform/states"
)
func cbdTestGraph(t *testing.T, mod string, changes *plans.Changes, state *states.State) *Graph {
module := testModule(t, mod)
applyBuilder := &ApplyGraphBuilder{
Config: module,
Changes: changes,
Components: simpleMockComponentFactory(),
Schemas: simpleTestSchemas(),
State: state,
}
g, err := (&BasicGraphBuilder{
Steps: cbdTestSteps(applyBuilder.Steps()),
Name: "ApplyGraphBuilder",
}).Build(addrs.RootModuleInstance)
if err != nil {
t.Fatalf("err: %s", err)
}
return filterInstances(g)
}
// override the apply graph builder to halt the process after CBD
func cbdTestSteps(steps []GraphTransformer) []GraphTransformer {
found := false
var i int
var t GraphTransformer
for i, t = range steps {
if _, ok := t.(*CBDEdgeTransformer); ok {
found = true
break
}
}
if !found {
panic("CBDEdgeTransformer not found")
}
return steps[:i+1]
}
// remove extra nodes for easier test comparisons
func filterInstances(g *Graph) *Graph {
for _, v := range g.Vertices() {
if _, ok := v.(GraphNodeResourceInstance); !ok {
g.Remove(v)
}
}
return g
}
func TestCBDEdgeTransformer(t *testing.T) {
changes := &plans.Changes{
Resources: []*plans.ResourceInstanceChangeSrc{
{
Addr: mustResourceInstanceAddr("test_object.A"),
ChangeSrc: plans.ChangeSrc{
Action: plans.CreateThenDelete,
},
},
{
Addr: mustResourceInstanceAddr("test_object.B"),
ChangeSrc: plans.ChangeSrc{
Action: plans.Update,
},
},
},
}
state := states.NewState()
root := state.EnsureModule(addrs.RootModuleInstance)
root.SetResourceInstanceCurrent(
mustResourceInstanceAddr("test_object.A").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"A"}`),
},
mustProviderConfig("provider.test"),
)
root.SetResourceInstanceCurrent(
mustResourceInstanceAddr("test_object.B").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"B","test_list":["x"]}`),
Dependencies: []addrs.AbsResource{mustResourceAddr("test_object.A")},
},
mustProviderConfig("provider.test"),
)
g := cbdTestGraph(t, "transform-destroy-cbd-edge-basic", changes, state)
g = filterInstances(g)
actual := strings.TrimSpace(g.String())
expected := regexp.MustCompile(strings.TrimSpace(`
(?m)test_object.A
test_object.A \(destroy deposed \w+\)
test_object.A
test_object.B
test_object.B
test_object.A
`))
if !expected.MatchString(actual) {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestCBDEdgeTransformerMulti(t *testing.T) {
changes := &plans.Changes{
Resources: []*plans.ResourceInstanceChangeSrc{
{
Addr: mustResourceInstanceAddr("test_object.A"),
ChangeSrc: plans.ChangeSrc{
Action: plans.CreateThenDelete,
},
},
{
Addr: mustResourceInstanceAddr("test_object.B"),
ChangeSrc: plans.ChangeSrc{
Action: plans.CreateThenDelete,
},
},
{
Addr: mustResourceInstanceAddr("test_object.C"),
ChangeSrc: plans.ChangeSrc{
Action: plans.Update,
},
},
},
}
state := states.NewState()
root := state.EnsureModule(addrs.RootModuleInstance)
root.SetResourceInstanceCurrent(
mustResourceInstanceAddr("test_object.A").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"A"}`),
},
mustProviderConfig("provider.test"),
)
root.SetResourceInstanceCurrent(
mustResourceInstanceAddr("test_object.B").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"B"}`),
},
mustProviderConfig("provider.test"),
)
root.SetResourceInstanceCurrent(
mustResourceInstanceAddr("test_object.C").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"C","test_list":["x"]}`),
Dependencies: []addrs.AbsResource{
mustResourceAddr("test_object.A"),
mustResourceAddr("test_object.B"),
},
},
mustProviderConfig("provider.test"),
)
g := cbdTestGraph(t, "transform-destroy-cbd-edge-multi", changes, state)
g = filterInstances(g)
actual := strings.TrimSpace(g.String())
expected := regexp.MustCompile(strings.TrimSpace(`
(?m)test_object.A
test_object.A \(destroy deposed \w+\)
test_object.A
test_object.C
test_object.B
test_object.B \(destroy deposed \w+\)
test_object.B
test_object.C
test_object.C
test_object.A
test_object.B
`))
if !expected.MatchString(actual) {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestCBDEdgeTransformer_depNonCBDCount(t *testing.T) {
changes := &plans.Changes{
Resources: []*plans.ResourceInstanceChangeSrc{
{
Addr: mustResourceInstanceAddr("test_object.A"),
ChangeSrc: plans.ChangeSrc{
Action: plans.CreateThenDelete,
},
},
{
Addr: mustResourceInstanceAddr("test_object.B[0]"),
ChangeSrc: plans.ChangeSrc{
Action: plans.Update,
},
},
{
Addr: mustResourceInstanceAddr("test_object.B[1]"),
ChangeSrc: plans.ChangeSrc{
Action: plans.Update,
},
},
},
}
state := states.NewState()
root := state.EnsureModule(addrs.RootModuleInstance)
root.SetResourceInstanceCurrent(
mustResourceInstanceAddr("test_object.A").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"A"}`),
},
mustProviderConfig("provider.test"),
)
root.SetResourceInstanceCurrent(
mustResourceInstanceAddr("test_object.B[0]").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"B","test_list":["x"]}`),
Dependencies: []addrs.AbsResource{mustResourceAddr("test_object.A")},
},
mustProviderConfig("provider.test"),
)
root.SetResourceInstanceCurrent(
mustResourceInstanceAddr("test_object.B[1]").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"B","test_list":["x"]}`),
Dependencies: []addrs.AbsResource{mustResourceAddr("test_object.A")},
},
mustProviderConfig("provider.test"),
)
g := cbdTestGraph(t, "transform-cbd-destroy-edge-count", changes, state)
actual := strings.TrimSpace(g.String())
expected := regexp.MustCompile(strings.TrimSpace(`
(?m)test_object.A
test_object.A \(destroy deposed \w+\)
test_object.A
test_object.B\[0\]
test_object.B\[1\]
test_object.B\[0\]
test_object.A
test_object.B\[1\]
test_object.A`))
if !expected.MatchString(actual) {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
func TestCBDEdgeTransformer_depNonCBDCountBoth(t *testing.T) {
changes := &plans.Changes{
Resources: []*plans.ResourceInstanceChangeSrc{
{
Addr: mustResourceInstanceAddr("test_object.A[0]"),
ChangeSrc: plans.ChangeSrc{
Action: plans.CreateThenDelete,
},
},
{
Addr: mustResourceInstanceAddr("test_object.A[1]"),
ChangeSrc: plans.ChangeSrc{
Action: plans.CreateThenDelete,
},
},
{
Addr: mustResourceInstanceAddr("test_object.B[0]"),
ChangeSrc: plans.ChangeSrc{
Action: plans.Update,
},
},
{
Addr: mustResourceInstanceAddr("test_object.B[1]"),
ChangeSrc: plans.ChangeSrc{
Action: plans.Update,
},
},
},
}
state := states.NewState()
root := state.EnsureModule(addrs.RootModuleInstance)
root.SetResourceInstanceCurrent(
mustResourceInstanceAddr("test_object.A[0]").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"A"}`),
},
mustProviderConfig("provider.test"),
)
root.SetResourceInstanceCurrent(
mustResourceInstanceAddr("test_object.A[1]").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"A"}`),
},
mustProviderConfig("provider.test"),
)
root.SetResourceInstanceCurrent(
mustResourceInstanceAddr("test_object.B[0]").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"B","test_list":["x"]}`),
Dependencies: []addrs.AbsResource{mustResourceAddr("test_object.A")},
},
mustProviderConfig("provider.test"),
)
root.SetResourceInstanceCurrent(
mustResourceInstanceAddr("test_object.B[1]").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"B","test_list":["x"]}`),
Dependencies: []addrs.AbsResource{mustResourceAddr("test_object.A")},
},
mustProviderConfig("provider.test"),
)
g := cbdTestGraph(t, "transform-cbd-destroy-edge-both-count", changes, state)
actual := strings.TrimSpace(g.String())
expected := regexp.MustCompile(strings.TrimSpace(`
test_object.A\[0\]
test_object.A\[0\] \(destroy deposed \w+\)
test_object.A\[0\]
test_object.A\[1\]
test_object.B\[0\]
test_object.B\[1\]
test_object.A\[1\]
test_object.A\[1\] \(destroy deposed \w+\)
test_object.A\[0\]
test_object.A\[1\]
test_object.B\[0\]
test_object.B\[1\]
test_object.B\[0\]
test_object.A\[0\]
test_object.A\[1\]
test_object.B\[1\]
test_object.A\[0\]
test_object.A\[1\]
`))
if !expected.MatchString(actual) {
t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected)
}
}
| terraform/transform_destroy_cbd_test.go | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.017078792676329613,
0.001322554424405098,
0.00017013450269587338,
0.00017603016749490052,
0.0037405460607260466
] |
{
"id": 8,
"code_window": [
"\t\t\t\t// because an output's destroy node always depends on the output,\n",
"\t\t\t\t// we need to check for the case of a single destroy node.\n",
"\t\t\t\td := dependants.List()[0]\n",
"\t\t\t\tif _, ok := d.(*NodeDestroyableOutput); ok {\n",
"\t\t\t\t\tg.Remove(v)\n",
"\t\t\t\t\tremoved++\n",
"\t\t\t\t}\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\tlog.Printf(\"[TRACE] PruneUnusedValuesTransformer: removing unused value %s\", dag.VertexName(v))\n"
],
"file_path": "terraform/transform_reference.go",
"type": "add",
"edit_start_line_idx": 238
} | // +build windows
package readline
import (
"bufio"
"io"
"strconv"
"strings"
"sync"
"unicode/utf8"
"unsafe"
)
const (
_ = uint16(0)
COLOR_FBLUE = 0x0001
COLOR_FGREEN = 0x0002
COLOR_FRED = 0x0004
COLOR_FINTENSITY = 0x0008
COLOR_BBLUE = 0x0010
COLOR_BGREEN = 0x0020
COLOR_BRED = 0x0040
COLOR_BINTENSITY = 0x0080
COMMON_LVB_UNDERSCORE = 0x8000
COMMON_LVB_BOLD = 0x0007
)
var ColorTableFg = []word{
0, // 30: Black
COLOR_FRED, // 31: Red
COLOR_FGREEN, // 32: Green
COLOR_FRED | COLOR_FGREEN, // 33: Yellow
COLOR_FBLUE, // 34: Blue
COLOR_FRED | COLOR_FBLUE, // 35: Magenta
COLOR_FGREEN | COLOR_FBLUE, // 36: Cyan
COLOR_FRED | COLOR_FBLUE | COLOR_FGREEN, // 37: White
}
var ColorTableBg = []word{
0, // 40: Black
COLOR_BRED, // 41: Red
COLOR_BGREEN, // 42: Green
COLOR_BRED | COLOR_BGREEN, // 43: Yellow
COLOR_BBLUE, // 44: Blue
COLOR_BRED | COLOR_BBLUE, // 45: Magenta
COLOR_BGREEN | COLOR_BBLUE, // 46: Cyan
COLOR_BRED | COLOR_BBLUE | COLOR_BGREEN, // 47: White
}
type ANSIWriter struct {
target io.Writer
wg sync.WaitGroup
ctx *ANSIWriterCtx
sync.Mutex
}
func NewANSIWriter(w io.Writer) *ANSIWriter {
a := &ANSIWriter{
target: w,
ctx: NewANSIWriterCtx(w),
}
return a
}
func (a *ANSIWriter) Close() error {
a.wg.Wait()
return nil
}
type ANSIWriterCtx struct {
isEsc bool
isEscSeq bool
arg []string
target *bufio.Writer
wantFlush bool
}
func NewANSIWriterCtx(target io.Writer) *ANSIWriterCtx {
return &ANSIWriterCtx{
target: bufio.NewWriter(target),
}
}
func (a *ANSIWriterCtx) Flush() {
a.target.Flush()
}
func (a *ANSIWriterCtx) process(r rune) bool {
if a.wantFlush {
if r == 0 || r == CharEsc {
a.wantFlush = false
a.target.Flush()
}
}
if a.isEscSeq {
a.isEscSeq = a.ioloopEscSeq(a.target, r, &a.arg)
return true
}
switch r {
case CharEsc:
a.isEsc = true
case '[':
if a.isEsc {
a.arg = nil
a.isEscSeq = true
a.isEsc = false
break
}
fallthrough
default:
a.target.WriteRune(r)
a.wantFlush = true
}
return true
}
func (a *ANSIWriterCtx) ioloopEscSeq(w *bufio.Writer, r rune, argptr *[]string) bool {
arg := *argptr
var err error
if r >= 'A' && r <= 'D' {
count := short(GetInt(arg, 1))
info, err := GetConsoleScreenBufferInfo()
if err != nil {
return false
}
switch r {
case 'A': // up
info.dwCursorPosition.y -= count
case 'B': // down
info.dwCursorPosition.y += count
case 'C': // right
info.dwCursorPosition.x += count
case 'D': // left
info.dwCursorPosition.x -= count
}
SetConsoleCursorPosition(&info.dwCursorPosition)
return false
}
switch r {
case 'J':
killLines()
case 'K':
eraseLine()
case 'm':
color := word(0)
for _, item := range arg {
var c int
c, err = strconv.Atoi(item)
if err != nil {
w.WriteString("[" + strings.Join(arg, ";") + "m")
break
}
if c >= 30 && c < 40 {
color ^= COLOR_FINTENSITY
color |= ColorTableFg[c-30]
} else if c >= 40 && c < 50 {
color ^= COLOR_BINTENSITY
color |= ColorTableBg[c-40]
} else if c == 4 {
color |= COMMON_LVB_UNDERSCORE | ColorTableFg[7]
} else if c == 1 {
color |= COMMON_LVB_BOLD | COLOR_FINTENSITY
} else { // unknown code treat as reset
color = ColorTableFg[7]
}
}
if err != nil {
break
}
kernel.SetConsoleTextAttribute(stdout, uintptr(color))
case '\007': // set title
case ';':
if len(arg) == 0 || arg[len(arg)-1] != "" {
arg = append(arg, "")
*argptr = arg
}
return true
default:
if len(arg) == 0 {
arg = append(arg, "")
}
arg[len(arg)-1] += string(r)
*argptr = arg
return true
}
*argptr = nil
return false
}
func (a *ANSIWriter) Write(b []byte) (int, error) {
a.Lock()
defer a.Unlock()
off := 0
for len(b) > off {
r, size := utf8.DecodeRune(b[off:])
if size == 0 {
return off, io.ErrShortWrite
}
off += size
a.ctx.process(r)
}
a.ctx.Flush()
return off, nil
}
func killLines() error {
sbi, err := GetConsoleScreenBufferInfo()
if err != nil {
return err
}
size := (sbi.dwCursorPosition.y - sbi.dwSize.y) * sbi.dwSize.x
size += sbi.dwCursorPosition.x
var written int
kernel.FillConsoleOutputAttribute(stdout, uintptr(ColorTableFg[7]),
uintptr(size),
sbi.dwCursorPosition.ptr(),
uintptr(unsafe.Pointer(&written)),
)
return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '),
uintptr(size),
sbi.dwCursorPosition.ptr(),
uintptr(unsafe.Pointer(&written)),
)
}
func eraseLine() error {
sbi, err := GetConsoleScreenBufferInfo()
if err != nil {
return err
}
size := sbi.dwSize.x
sbi.dwCursorPosition.x = 0
var written int
return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '),
uintptr(size),
sbi.dwCursorPosition.ptr(),
uintptr(unsafe.Pointer(&written)),
)
}
| vendor/github.com/chzyer/readline/ansi_windows.go | 0 | https://github.com/hashicorp/terraform/commit/fe3edb8e46f8f8677277e3fd8a2a5466dbcd16aa | [
0.0010193124180659652,
0.0002416332863504067,
0.00016395124839618802,
0.00017415116599295288,
0.00018745938723441213
] |
{
"id": 0,
"code_window": [
"\t\t\tOOMScoreAdj: util.IntPtr(qos.KubeProxyOOMScoreAdj),\n",
"\t\t\tResourceContainer: \"/kube-proxy\",\n",
"\t\t\tIPTablesSyncPeriod: unversioned.Duration{30 * time.Second},\n",
"\t\t\tUDPIdleTimeout: unversioned.Duration{250 * time.Millisecond},\n",
"\t\t\tMode: componentconfig.ProxyModeUserspace,\n",
"\t\t\tConntrackMax: 256 * 1024, // 4x default (64k)\n",
"\t\t\tConntrackTCPEstablishedTimeout: unversioned.Duration{Duration: 24 * time.Hour}, // 1 day (1/5 default)\n",
"\t\t},\n",
"\t\tKubeAPIQPS: 5.0,\n",
"\t\tKubeAPIBurst: 10,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "replace",
"edit_start_line_idx": 59
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
api "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait"
)
const (
endpointHttpPort = 8080
endpointUdpPort = 8081
testContainerHttpPort = 8080
clusterHttpPort = 80
clusterUdpPort = 90
nodeHttpPort = 32080
nodeUdpPort = 32081
loadBalancerHttpPort = 100
netexecImageName = "gcr.io/google_containers/netexec:1.0"
testPodName = "test-container-pod"
hostTestPodName = "host-test-container-pod"
nodePortServiceName = "node-port-service"
loadBalancerServiceName = "load-balancer-service"
enableLoadBalancerTest = false
)
type KubeProxyTestConfig struct {
testContainerPod *api.Pod
hostTestContainerPod *api.Pod
endpointPods []*api.Pod
f *Framework
nodePortService *api.Service
loadBalancerService *api.Service
externalAddrs []string
nodes []api.Node
}
var _ = Describe("KubeProxy", func() {
f := NewFramework("e2e-kubeproxy")
config := &KubeProxyTestConfig{
f: f,
}
// Slow issue #14204 (10 min)
It("should test kube-proxy [Slow]", func() {
By("cleaning up any pre-existing namespaces used by this test")
config.cleanup()
By("Setting up for the tests")
config.setup()
//TODO Need to add hit externalIPs test
By("TODO: Need to add hit externalIPs test")
By("Hit Test with All Endpoints")
config.hitAll()
config.deleteNetProxyPod()
By("Hit Test with Fewer Endpoints")
config.hitAll()
By("Deleting nodePortservice and ensuring that service cannot be hit")
config.deleteNodePortService()
config.hitNodePort(0) // expect 0 endpoints to be hit
if enableLoadBalancerTest {
By("Deleting loadBalancerService and ensuring that service cannot be hit")
config.deleteLoadBalancerService()
config.hitLoadBalancer(0) // expect 0 endpoints to be hit
}
})
})
func (config *KubeProxyTestConfig) hitAll() {
By("Hitting endpoints from host and container")
config.hitEndpoints()
By("Hitting clusterIP from host and container")
config.hitClusterIP(len(config.endpointPods))
By("Hitting nodePort from host and container")
config.hitNodePort(len(config.endpointPods))
if enableLoadBalancerTest {
By("Waiting for LoadBalancer Ingress Setup")
config.waitForLoadBalancerIngressSetup()
By("Hitting LoadBalancer")
config.hitLoadBalancer(len(config.endpointPods))
}
}
func (config *KubeProxyTestConfig) hitLoadBalancer(epCount int) {
lbIP := config.loadBalancerService.Status.LoadBalancer.Ingress[0].IP
hostNames := make(map[string]bool)
tries := epCount*epCount + 5
for i := 0; i < tries; i++ {
transport := &http.Transport{}
httpClient := createHTTPClient(transport)
resp, err := httpClient.Get(fmt.Sprintf("http://%s:%d/hostName", lbIP, loadBalancerHttpPort))
if err == nil {
defer resp.Body.Close()
hostName, err := ioutil.ReadAll(resp.Body)
if err == nil {
hostNames[string(hostName)] = true
}
}
transport.CloseIdleConnections()
}
Expect(len(hostNames)).To(BeNumerically("==", epCount), "LoadBalancer did not hit all pods")
}
func createHTTPClient(transport *http.Transport) *http.Client {
client := &http.Client{
Transport: transport,
Timeout: 5 * time.Second,
}
return client
}
func (config *KubeProxyTestConfig) hitClusterIP(epCount int) {
clusterIP := config.nodePortService.Spec.ClusterIP
tries := epCount*epCount + 15 // if epCount == 0
By("dialing(udp) node1 --> clusterIP:clusterUdpPort")
config.dialFromNode("udp", clusterIP, clusterUdpPort, tries, epCount)
By("dialing(http) node1 --> clusterIP:clusterHttpPort")
config.dialFromNode("http", clusterIP, clusterHttpPort, tries, epCount)
By("dialing(udp) test container --> clusterIP:clusterUdpPort")
config.dialFromTestContainer("udp", clusterIP, clusterUdpPort, tries, epCount)
By("dialing(http) test container --> clusterIP:clusterHttpPort")
config.dialFromTestContainer("http", clusterIP, clusterHttpPort, tries, epCount)
By("dialing(udp) endpoint container --> clusterIP:clusterUdpPort")
config.dialFromEndpointContainer("udp", clusterIP, clusterUdpPort, tries, epCount)
By("dialing(http) endpoint container --> clusterIP:clusterHttpPort")
config.dialFromEndpointContainer("http", clusterIP, clusterHttpPort, tries, epCount)
}
func (config *KubeProxyTestConfig) hitNodePort(epCount int) {
node1_IP := config.externalAddrs[0]
tries := epCount*epCount + 15 // if epCount == 0
By("dialing(udp) node1 --> node1:nodeUdpPort")
config.dialFromNode("udp", node1_IP, nodeUdpPort, tries, epCount)
By("dialing(http) node1 --> node1:nodeHttpPort")
config.dialFromNode("http", node1_IP, nodeHttpPort, tries, epCount)
By("dialing(udp) test container --> node1:nodeUdpPort")
config.dialFromTestContainer("udp", node1_IP, nodeUdpPort, tries, epCount)
By("dialing(http) test container --> node1:nodeHttpPort")
config.dialFromTestContainer("http", node1_IP, nodeHttpPort, tries, epCount)
By("dialing(udp) endpoint container --> node1:nodeUdpPort")
config.dialFromEndpointContainer("udp", node1_IP, nodeUdpPort, tries, epCount)
By("dialing(http) endpoint container --> node1:nodeHttpPort")
config.dialFromEndpointContainer("http", node1_IP, nodeHttpPort, tries, epCount)
// TODO: doesn't work because masquerading is not done
By("TODO: Test disabled. dialing(udp) node --> 127.0.0.1:nodeUdpPort")
//config.dialFromNode("udp", "127.0.0.1", nodeUdpPort, tries, epCount)
// TODO: doesn't work because masquerading is not done
By("Test disabled. dialing(http) node --> 127.0.0.1:nodeHttpPort")
//config.dialFromNode("http", "127.0.0.1", nodeHttpPort, tries, epCount)
node2_IP := config.externalAddrs[1]
By("dialing(udp) node1 --> node2:nodeUdpPort")
config.dialFromNode("udp", node2_IP, nodeUdpPort, tries, epCount)
By("dialing(http) node1 --> node2:nodeHttpPort")
config.dialFromNode("http", node2_IP, nodeHttpPort, tries, epCount)
}
func (config *KubeProxyTestConfig) hitEndpoints() {
for _, endpointPod := range config.endpointPods {
Expect(len(endpointPod.Status.PodIP)).To(BeNumerically(">", 0), "podIP is empty:%s", endpointPod.Status.PodIP)
By("dialing(udp) endpointPodIP:endpointUdpPort from node1")
config.dialFromNode("udp", endpointPod.Status.PodIP, endpointUdpPort, 5, 1)
By("dialing(http) endpointPodIP:endpointHttpPort from node1")
config.dialFromNode("http", endpointPod.Status.PodIP, endpointHttpPort, 5, 1)
By("dialing(udp) endpointPodIP:endpointUdpPort from test container")
config.dialFromTestContainer("udp", endpointPod.Status.PodIP, endpointUdpPort, 5, 1)
By("dialing(http) endpointPodIP:endpointHttpPort from test container")
config.dialFromTestContainer("http", endpointPod.Status.PodIP, endpointHttpPort, 5, 1)
}
}
func (config *KubeProxyTestConfig) dialFromEndpointContainer(protocol, targetIP string, targetPort, tries, expectedCount int) {
config.dialFromContainer(protocol, config.endpointPods[0].Status.PodIP, targetIP, endpointHttpPort, targetPort, tries, expectedCount)
}
func (config *KubeProxyTestConfig) dialFromTestContainer(protocol, targetIP string, targetPort, tries, expectedCount int) {
config.dialFromContainer(protocol, config.testContainerPod.Status.PodIP, targetIP, testContainerHttpPort, targetPort, tries, expectedCount)
}
func (config *KubeProxyTestConfig) dialFromContainer(protocol, containerIP, targetIP string, containerHttpPort, targetPort, tries, expectedCount int) {
cmd := fmt.Sprintf("curl -q 'http://%s:%d/dial?request=hostName&protocol=%s&host=%s&port=%d&tries=%d'",
containerIP,
containerHttpPort,
protocol,
targetIP,
targetPort,
tries)
By(fmt.Sprintf("Dialing from container. Running command:%s", cmd))
stdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, cmd)
var output map[string][]string
err := json.Unmarshal([]byte(stdout), &output)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Could not unmarshal curl response: %s", stdout))
hostNamesMap := array2map(output["responses"])
Expect(len(hostNamesMap)).To(BeNumerically("==", expectedCount), fmt.Sprintf("Response was:%v", output))
}
func (config *KubeProxyTestConfig) dialFromNode(protocol, targetIP string, targetPort, tries, expectedCount int) {
var cmd string
if protocol == "udp" {
cmd = fmt.Sprintf("echo 'hostName' | timeout -t 3 nc -w 1 -u %s %d", targetIP, targetPort)
} else {
cmd = fmt.Sprintf("curl -s --connect-timeout 1 http://%s:%d/hostName", targetIP, targetPort)
}
forLoop := fmt.Sprintf("for i in $(seq 1 %d); do %s; echo; done | grep -v '^\\s*$' |sort | uniq -c | wc -l", tries, cmd)
By(fmt.Sprintf("Dialing from node. command:%s", forLoop))
stdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, forLoop)
Expect(strconv.Atoi(strings.TrimSpace(stdout))).To(BeNumerically("==", expectedCount))
}
func (config *KubeProxyTestConfig) createNetShellPodSpec(podName string, node string) *api.Pod {
pod := &api.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
},
ObjectMeta: api.ObjectMeta{
Name: podName,
Namespace: config.f.Namespace.Name,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "webserver",
Image: netexecImageName,
ImagePullPolicy: api.PullIfNotPresent,
Command: []string{
"/netexec",
fmt.Sprintf("--http-port=%d", endpointHttpPort),
fmt.Sprintf("--udp-port=%d", endpointUdpPort),
},
Ports: []api.ContainerPort{
{
Name: "http",
ContainerPort: endpointHttpPort,
},
{
Name: "udp",
ContainerPort: endpointUdpPort,
Protocol: api.ProtocolUDP,
},
},
},
},
NodeName: node,
},
}
return pod
}
func (config *KubeProxyTestConfig) createTestPodSpec() *api.Pod {
pod := &api.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
},
ObjectMeta: api.ObjectMeta{
Name: testPodName,
Namespace: config.f.Namespace.Name,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "webserver",
Image: netexecImageName,
ImagePullPolicy: api.PullIfNotPresent,
Command: []string{
"/netexec",
fmt.Sprintf("--http-port=%d", endpointHttpPort),
fmt.Sprintf("--udp-port=%d", endpointUdpPort),
},
Ports: []api.ContainerPort{
{
Name: "http",
ContainerPort: testContainerHttpPort,
},
},
},
},
},
}
return pod
}
func (config *KubeProxyTestConfig) createNodePortService(selector map[string]string) {
serviceSpec := &api.Service{
ObjectMeta: api.ObjectMeta{
Name: nodePortServiceName,
},
Spec: api.ServiceSpec{
Type: api.ServiceTypeNodePort,
Ports: []api.ServicePort{
{Port: clusterHttpPort, Name: "http", Protocol: api.ProtocolTCP, NodePort: nodeHttpPort, TargetPort: intstr.FromInt(endpointHttpPort)},
{Port: clusterUdpPort, Name: "udp", Protocol: api.ProtocolUDP, NodePort: nodeUdpPort, TargetPort: intstr.FromInt(endpointUdpPort)},
},
Selector: selector,
},
}
config.nodePortService = config.createService(serviceSpec)
}
func (config *KubeProxyTestConfig) deleteNodePortService() {
err := config.getServiceClient().Delete(config.nodePortService.Name)
Expect(err).NotTo(HaveOccurred(), "error while deleting NodePortService. err:%v)", err)
time.Sleep(15 * time.Second) // wait for kube-proxy to catch up with the service being deleted.
}
func (config *KubeProxyTestConfig) createLoadBalancerService(selector map[string]string) {
serviceSpec := &api.Service{
ObjectMeta: api.ObjectMeta{
Name: loadBalancerServiceName,
},
Spec: api.ServiceSpec{
Type: api.ServiceTypeLoadBalancer,
Ports: []api.ServicePort{
{Port: loadBalancerHttpPort, Name: "http", Protocol: "TCP", TargetPort: intstr.FromInt(endpointHttpPort)},
},
Selector: selector,
},
}
config.createService(serviceSpec)
}
func (config *KubeProxyTestConfig) deleteLoadBalancerService() {
go func() { config.getServiceClient().Delete(config.loadBalancerService.Name) }()
time.Sleep(15 * time.Second) // wait for kube-proxy to catch up with the service being deleted.
}
func (config *KubeProxyTestConfig) waitForLoadBalancerIngressSetup() {
err := wait.Poll(2*time.Second, 120*time.Second, func() (bool, error) {
service, err := config.getServiceClient().Get(loadBalancerServiceName)
if err != nil {
return false, err
} else {
if len(service.Status.LoadBalancer.Ingress) > 0 {
return true, nil
} else {
return false, fmt.Errorf("Service LoadBalancer Ingress was not setup.")
}
}
})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to setup Load Balancer Service. err:%v", err))
config.loadBalancerService, _ = config.getServiceClient().Get(loadBalancerServiceName)
}
func (config *KubeProxyTestConfig) createTestPods() {
testContainerPod := config.createTestPodSpec()
hostTestContainerPod := NewHostExecPodSpec(config.f.Namespace.Name, hostTestPodName)
config.createPod(testContainerPod)
config.createPod(hostTestContainerPod)
expectNoError(config.f.WaitForPodRunning(testContainerPod.Name))
expectNoError(config.f.WaitForPodRunning(hostTestContainerPod.Name))
var err error
config.testContainerPod, err = config.getPodClient().Get(testContainerPod.Name)
if err != nil {
Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err)
}
config.hostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name)
if err != nil {
Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err)
}
}
func (config *KubeProxyTestConfig) createService(serviceSpec *api.Service) *api.Service {
_, err := config.getServiceClient().Create(serviceSpec)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
err = waitForService(config.f.Client, config.f.Namespace.Name, serviceSpec.Name, true, 5*time.Second, 45*time.Second)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err))
createdService, err := config.getServiceClient().Get(serviceSpec.Name)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
return createdService
}
func (config *KubeProxyTestConfig) setup() {
By("creating a selector")
selectorName := "selector-" + string(util.NewUUID())
serviceSelector := map[string]string{
selectorName: "true",
}
By("Getting node addresses")
nodeList := ListSchedulableNodesOrDie(config.f.Client)
config.externalAddrs = NodeAddresses(nodeList, api.NodeExternalIP)
if len(config.externalAddrs) < 2 {
// fall back to legacy IPs
config.externalAddrs = NodeAddresses(nodeList, api.NodeLegacyHostIP)
}
Expect(len(config.externalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
config.nodes = nodeList.Items
if enableLoadBalancerTest {
By("Creating the LoadBalancer Service on top of the pods in kubernetes")
config.createLoadBalancerService(serviceSelector)
}
By("Creating the service pods in kubernetes")
podName := "netserver"
config.endpointPods = config.createNetProxyPods(podName, serviceSelector)
By("Creating the service on top of the pods in kubernetes")
config.createNodePortService(serviceSelector)
By("Creating test pods")
config.createTestPods()
}
func (config *KubeProxyTestConfig) cleanup() {
nsClient := config.getNamespacesClient()
nsList, err := nsClient.List(api.ListOptions{})
if err == nil {
for _, ns := range nsList.Items {
if strings.Contains(ns.Name, config.f.BaseName) && ns.Name != config.f.Namespace.Name {
nsClient.Delete(ns.Name)
}
}
}
}
func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
nodes := ListSchedulableNodesOrDie(config.f.Client)
// create pods, one for each node
createdPods := make([]*api.Pod, 0, len(nodes.Items))
for i, n := range nodes.Items {
podName := fmt.Sprintf("%s-%d", podName, i)
pod := config.createNetShellPodSpec(podName, n.Name)
pod.ObjectMeta.Labels = selector
createdPod := config.createPod(pod)
createdPods = append(createdPods, createdPod)
}
// wait that all of them are up
runningPods := make([]*api.Pod, 0, len(nodes.Items))
for _, p := range createdPods {
expectNoError(config.f.WaitForPodRunning(p.Name))
rp, err := config.getPodClient().Get(p.Name)
expectNoError(err)
runningPods = append(runningPods, rp)
}
return runningPods
}
func (config *KubeProxyTestConfig) deleteNetProxyPod() {
pod := config.endpointPods[0]
config.getPodClient().Delete(pod.Name, api.NewDeleteOptions(0))
config.endpointPods = config.endpointPods[1:]
// wait for pod being deleted.
err := waitForPodToDisappear(config.f.Client, config.f.Namespace.Name, pod.Name, labels.Everything(), time.Second, util.ForeverTestTimeout)
if err != nil {
Failf("Failed to delete %s pod: %v", pod.Name, err)
}
// wait for endpoint being removed.
err = waitForServiceEndpointsNum(config.f.Client, config.f.Namespace.Name, nodePortServiceName, len(config.endpointPods), time.Second, util.ForeverTestTimeout)
if err != nil {
Failf("Failed to remove endpoint from service: %s", nodePortServiceName)
}
// wait for kube-proxy to catch up with the pod being deleted.
time.Sleep(5 * time.Second)
}
func (config *KubeProxyTestConfig) createPod(pod *api.Pod) *api.Pod {
createdPod, err := config.getPodClient().Create(pod)
if err != nil {
Failf("Failed to create %s pod: %v", pod.Name, err)
}
return createdPod
}
func (config *KubeProxyTestConfig) getPodClient() client.PodInterface {
return config.f.Client.Pods(config.f.Namespace.Name)
}
func (config *KubeProxyTestConfig) getServiceClient() client.ServiceInterface {
return config.f.Client.Services(config.f.Namespace.Name)
}
func (config *KubeProxyTestConfig) getNamespacesClient() client.NamespaceInterface {
return config.f.Client.Namespaces()
}
func array2map(arr []string) map[string]bool {
retval := make(map[string]bool)
if len(arr) == 0 {
return retval
}
for _, str := range arr {
retval[str] = true
}
return retval
}
| test/e2e/kubeproxy.go | 1 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.006446673534810543,
0.0004844214126933366,
0.0001623003336135298,
0.00017000899242702872,
0.0011620630975812674
] |
{
"id": 0,
"code_window": [
"\t\t\tOOMScoreAdj: util.IntPtr(qos.KubeProxyOOMScoreAdj),\n",
"\t\t\tResourceContainer: \"/kube-proxy\",\n",
"\t\t\tIPTablesSyncPeriod: unversioned.Duration{30 * time.Second},\n",
"\t\t\tUDPIdleTimeout: unversioned.Duration{250 * time.Millisecond},\n",
"\t\t\tMode: componentconfig.ProxyModeUserspace,\n",
"\t\t\tConntrackMax: 256 * 1024, // 4x default (64k)\n",
"\t\t\tConntrackTCPEstablishedTimeout: unversioned.Duration{Duration: 24 * time.Hour}, // 1 day (1/5 default)\n",
"\t\t},\n",
"\t\tKubeAPIQPS: 5.0,\n",
"\t\tKubeAPIBurst: 10,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "replace",
"edit_start_line_idx": 59
} | #!/usr/bin/env node
var azure = require('./lib/azure_wrapper.js');
azure.destroy_cluster(process.argv[2]);
console.log('The cluster had been destroyed, you can delete the state file now.');
| docs/getting-started-guides/coreos/azure/destroy-cluster.js | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00016988049901556224,
0.00016988049901556224,
0.00016988049901556224,
0.00016988049901556224,
0
] |
{
"id": 0,
"code_window": [
"\t\t\tOOMScoreAdj: util.IntPtr(qos.KubeProxyOOMScoreAdj),\n",
"\t\t\tResourceContainer: \"/kube-proxy\",\n",
"\t\t\tIPTablesSyncPeriod: unversioned.Duration{30 * time.Second},\n",
"\t\t\tUDPIdleTimeout: unversioned.Duration{250 * time.Millisecond},\n",
"\t\t\tMode: componentconfig.ProxyModeUserspace,\n",
"\t\t\tConntrackMax: 256 * 1024, // 4x default (64k)\n",
"\t\t\tConntrackTCPEstablishedTimeout: unversioned.Duration{Duration: 24 * time.Hour}, // 1 day (1/5 default)\n",
"\t\t},\n",
"\t\tKubeAPIQPS: 5.0,\n",
"\t\tKubeAPIBurst: 10,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "replace",
"edit_start_line_idx": 59
} | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1_test
import (
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testing/compat"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/api/validation"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/validation/field"
)
func TestCompatibility_v1_PodSecurityContext(t *testing.T) {
cases := []struct {
name string
input string
expectedKeys map[string]string
absentKeys []string
}{
{
name: "hostNetwork = true",
input: `
{
"kind":"Pod",
"apiVersion":"v1",
"metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"},
"spec": {
"hostNetwork": true,
"containers":[{
"name":"a",
"image":"my-container-image"
}]
}
}
`,
expectedKeys: map[string]string{
"spec.hostNetwork": "true",
},
},
{
name: "hostNetwork = false",
input: `
{
"kind":"Pod",
"apiVersion":"v1",
"metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"},
"spec": {
"hostNetwork": false,
"containers":[{
"name":"a",
"image":"my-container-image"
}]
}
}
`,
absentKeys: []string{
"spec.hostNetwork",
},
},
{
name: "hostIPC = true",
input: `
{
"kind":"Pod",
"apiVersion":"v1",
"metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"},
"spec": {
"hostIPC": true,
"containers":[{
"name":"a",
"image":"my-container-image"
}]
}
}
`,
expectedKeys: map[string]string{
"spec.hostIPC": "true",
},
},
{
name: "hostIPC = false",
input: `
{
"kind":"Pod",
"apiVersion":"v1",
"metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"},
"spec": {
"hostIPC": false,
"containers":[{
"name":"a",
"image":"my-container-image"
}]
}
}
`,
absentKeys: []string{
"spec.hostIPC",
},
},
{
name: "hostPID = true",
input: `
{
"kind":"Pod",
"apiVersion":"v1",
"metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"},
"spec": {
"hostPID": true,
"containers":[{
"name":"a",
"image":"my-container-image"
}]
}
}
`,
expectedKeys: map[string]string{
"spec.hostPID": "true",
},
},
{
name: "hostPID = false",
input: `
{
"kind":"Pod",
"apiVersion":"v1",
"metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"},
"spec": {
"hostPID": false,
"containers":[{
"name":"a",
"image":"my-container-image"
}]
}
}
`,
absentKeys: []string{
"spec.hostPID",
},
},
{
name: "reseting defaults for pre-v1.1 mirror pods",
input: `
{
"kind":"Pod",
"apiVersion":"v1",
"metadata":{
"name":"my-pod-name",
"namespace":"my-pod-namespace",
"annotations": {
"kubernetes.io/config.mirror": "mirror"
}
},
"spec": {
"containers":[{
"name":"a",
"image":"my-container-image",
"resources": {
"limits": {
"cpu": "100m"
}
}
}]
}
}
`,
absentKeys: []string{
"spec.terminationGracePeriodSeconds",
"spec.containers[0].resources.requests",
},
},
{
name: "preserving defaults for v1.1+ mirror pods",
input: `
{
"kind":"Pod",
"apiVersion":"v1",
"metadata":{
"name":"my-pod-name",
"namespace":"my-pod-namespace",
"annotations": {
"kubernetes.io/config.mirror": "cbe924f710c7e26f7693d6a341bcfad0"
}
},
"spec": {
"containers":[{
"name":"a",
"image":"my-container-image",
"resources": {
"limits": {
"cpu": "100m"
}
}
}]
}
}
`,
expectedKeys: map[string]string{
"spec.terminationGracePeriodSeconds": "30",
"spec.containers[0].resources.requests": "map[cpu:100m]",
},
},
}
validator := func(obj runtime.Object) field.ErrorList {
return validation.ValidatePodSpec(&(obj.(*api.Pod).Spec), field.NewPath("spec"))
}
for _, tc := range cases {
t.Logf("Testing 1.0.0 backward compatibility for %v", tc.name)
compat.TestCompatibility(t, v1.SchemeGroupVersion, []byte(tc.input), validator, tc.expectedKeys, tc.absentKeys)
}
}
| pkg/api/v1/backward_compatibility_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00017839594511315227,
0.0001679684064583853,
0.00016324990428984165,
0.000167552221682854,
0.000003210794602637179
] |
{
"id": 0,
"code_window": [
"\t\t\tOOMScoreAdj: util.IntPtr(qos.KubeProxyOOMScoreAdj),\n",
"\t\t\tResourceContainer: \"/kube-proxy\",\n",
"\t\t\tIPTablesSyncPeriod: unversioned.Duration{30 * time.Second},\n",
"\t\t\tUDPIdleTimeout: unversioned.Duration{250 * time.Millisecond},\n",
"\t\t\tMode: componentconfig.ProxyModeUserspace,\n",
"\t\t\tConntrackMax: 256 * 1024, // 4x default (64k)\n",
"\t\t\tConntrackTCPEstablishedTimeout: unversioned.Duration{Duration: 24 * time.Hour}, // 1 day (1/5 default)\n",
"\t\t},\n",
"\t\tKubeAPIQPS: 5.0,\n",
"\t\tKubeAPIBurst: 10,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "replace",
"edit_start_line_idx": 59
} | // Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package discovery
import (
"fmt"
"net"
"strings"
"github.com/coreos/etcd/pkg/types"
)
var (
// indirection for testing
lookupSRV = net.LookupSRV
resolveTCPAddr = net.ResolveTCPAddr
)
// TODO(barakmich): Currently ignores priority and weight (as they don't make as much sense for a bootstrap)
// Also doesn't do any lookups for the token (though it could)
// Also sees each entry as a separate instance.
func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (string, string, error) {
stringParts := make([]string, 0)
tempName := int(0)
tcpAPUrls := make([]string, 0)
// First, resolve the apurls
for _, url := range apurls {
tcpAddr, err := resolveTCPAddr("tcp", url.Host)
if err != nil {
plog.Errorf("couldn't resolve host %s during SRV discovery", url.Host)
return "", "", err
}
tcpAPUrls = append(tcpAPUrls, tcpAddr.String())
}
updateNodeMap := func(service, prefix string) error {
_, addrs, err := lookupSRV(service, "tcp", dns)
if err != nil {
return err
}
for _, srv := range addrs {
target := strings.TrimSuffix(srv.Target, ".")
host := net.JoinHostPort(target, fmt.Sprintf("%d", srv.Port))
tcpAddr, err := resolveTCPAddr("tcp", host)
if err != nil {
plog.Warningf("couldn't resolve host %s during SRV discovery", host)
continue
}
n := ""
for _, url := range tcpAPUrls {
if url == tcpAddr.String() {
n = name
}
}
if n == "" {
n = fmt.Sprintf("%d", tempName)
tempName += 1
}
stringParts = append(stringParts, fmt.Sprintf("%s=%s%s", n, prefix, host))
plog.Noticef("got bootstrap from DNS for %s at %s%s", service, prefix, host)
}
return nil
}
failCount := 0
err := updateNodeMap("etcd-server-ssl", "https://")
srvErr := make([]string, 2)
if err != nil {
srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _etcd-server-ssl %s", err)
failCount += 1
}
err = updateNodeMap("etcd-server", "http://")
if err != nil {
srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _etcd-server %s", err)
failCount += 1
}
if failCount == 2 {
plog.Warningf(srvErr[0])
plog.Warningf(srvErr[1])
plog.Errorf("SRV discovery failed: too many errors querying DNS SRV records")
return "", "", err
}
return strings.Join(stringParts, ","), defaultToken, nil
}
| Godeps/_workspace/src/github.com/coreos/etcd/discovery/srv.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.0005504757282324135,
0.00021384062711149454,
0.0001638763933442533,
0.0001724006433505565,
0.00011288627865724266
] |
{
"id": 1,
"code_window": [
"package app\n",
"\n",
"import (\n",
"\t\"errors\"\n",
"\t\"net\"\n",
"\t\"net/http\"\n",
"\t_ \"net/http/pprof\"\n",
"\t\"strconv\"\n",
"\t\"time\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"fmt\"\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 22
} | <!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
<!-- BEGIN STRIP_FOR_RELEASE -->
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
If you are using a released version of Kubernetes, you should
refer to the docs that go with that version.
<!-- TAG RELEASE_LINK, added by the munger automatically -->
<strong>
The latest release of this document can be found
[here](http://releases.k8s.io/release-1.1/docs/admin/kube-proxy.md).
Documentation for other releases can be found at
[releases.k8s.io](http://releases.k8s.io).
</strong>
--
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## kube-proxy
### Synopsis
The Kubernetes network proxy runs on each node. This
reflects services as defined in the Kubernetes API on each node and can do simple
TCP,UDP stream forwarding or round robin TCP,UDP forwarding across a set of backends.
Service cluster ips and ports are currently found through Docker-links-compatible
environment variables specifying ports opened by the service proxy. There is an optional
addon that provides cluster DNS for these cluster IPs. The user must create a service
with the apiserver API to configure the proxy.
```
kube-proxy
```
### Options
```
--bind-address=0.0.0.0: The IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)
--cleanup-iptables[=false]: If true cleanup iptables rules and exit.
--config-sync-period=15m0s: How often configuration from the apiserver is refreshed. Must be greater than 0.
--conntrack-max=262144: Maximum number of NAT connections to track (0 to leave as-is)
--conntrack-tcp-timeout-established=24h0m0s: Idle timeout for established TCP connections (0 to leave as-is)
--google-json-key="": The Google Cloud Platform Service Account JSON Key to use for authentication.
--healthz-bind-address=127.0.0.1: The IP address for the health check server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)
--healthz-port=10249: The port to bind the health check server. Use 0 to disable.
--hostname-override="": If non-empty, will use this string as identification instead of the actual hostname.
--iptables-sync-period=30s: How often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.
--kube-api-burst=10: Burst to use while talking with kubernetes apiserver
--kube-api-qps=5: QPS to use while talking with kubernetes apiserver
--kubeconfig="": Path to kubeconfig file with authorization information (the master location is set by the master flag).
--log-flush-frequency=5s: Maximum number of seconds between log flushes
--masquerade-all[=false]: If using the pure iptables proxy, SNAT everything
--master="": The address of the Kubernetes API server (overrides any value in kubeconfig)
--oom-score-adj=-999: The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]
--proxy-mode=userspace: Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.
--proxy-port-range=: Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
--udp-timeout=250ms: How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace
```
###### Auto generated by spf13/cobra on 27-Jan-2016
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[]()
<!-- END MUNGE: GENERATED_ANALYTICS -->
| docs/admin/kube-proxy.md | 1 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.000371151661965996,
0.00022301942226476967,
0.0001646862510824576,
0.00018134665151592344,
0.00008042355329962447
] |
{
"id": 1,
"code_window": [
"package app\n",
"\n",
"import (\n",
"\t\"errors\"\n",
"\t\"net\"\n",
"\t\"net/http\"\n",
"\t_ \"net/http/pprof\"\n",
"\t\"strconv\"\n",
"\t\"time\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"fmt\"\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 22
} | name: kubernetes-master
summary: Container Cluster Management Master
description: |
Provides a kubernetes api endpoint, scheduler for managing containers.
maintainers:
- Matt Bruzek <[email protected]>
- Whit Morriss <[email protected]>
- Charles Butler <[email protected]>
tags:
- ops
- network
provides:
client-api:
interface: kubernetes-client
minions-api:
interface: kubernetes-api
requires:
etcd:
interface: etcd
network:
interface: overlay-network
| cluster/juju/charms/trusty/kubernetes-master/metadata.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00044731138041242957,
0.0002845296112354845,
0.0001659520494285971,
0.0002403253165539354,
0.00011904139682883397
] |
{
"id": 1,
"code_window": [
"package app\n",
"\n",
"import (\n",
"\t\"errors\"\n",
"\t\"net\"\n",
"\t\"net/http\"\n",
"\t_ \"net/http/pprof\"\n",
"\t\"strconv\"\n",
"\t\"time\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"fmt\"\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 22
} | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package podtask maps Kubernetes pods to Mesos tasks.
package podtask
| contrib/mesos/pkg/scheduler/podtask/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00017151658539660275,
0.00016838643932715058,
0.00016525630780961365,
0.00016838643932715058,
0.0000031301387934945524
] |
{
"id": 1,
"code_window": [
"package app\n",
"\n",
"import (\n",
"\t\"errors\"\n",
"\t\"net\"\n",
"\t\"net/http\"\n",
"\t_ \"net/http/pprof\"\n",
"\t\"strconv\"\n",
"\t\"time\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"fmt\"\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 22
} | kind: ReplicationController
apiVersion: v1
metadata:
name: spark-gluster-worker-controller
labels:
component: spark-worker
spec:
replicas: 2
selector:
component: spark-worker
template:
metadata:
labels:
component: spark-worker
uses: spark-master
spec:
containers:
- name: spark-worker
image: gcr.io/google_containers/spark-worker:1.5.1_v2
ports:
- containerPort: 8888
volumeMounts:
- mountPath: /mnt/glusterfs
name: glusterfsvol
resources:
requests:
cpu: 100m
volumes:
- name: glusterfsvol
glusterfs:
endpoints: glusterfs-cluster
path: MyVolume
readOnly: false
| examples/spark/spark-gluster/spark-worker-controller.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.0001755549747031182,
0.00017261476023122668,
0.00016953726299107075,
0.00017268338706344366,
0.000002131597057086765
] |
{
"id": 2,
"code_window": [
"\tProxier proxy.ProxyProvider\n",
"\tBroadcaster record.EventBroadcaster\n",
"\tRecorder record.EventRecorder\n",
"\tConntracker Conntracker // if nil, ignored\n",
"}\n",
"\n",
"const (\n",
"\tproxyModeUserspace = \"userspace\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tProxyMode string\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 60
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to configure and run a
// Kubernetes app process.
package app
import (
"errors"
"net"
"net/http"
_ "net/http/pprof"
"strconv"
"time"
"k8s.io/kubernetes/cmd/kube-proxy/app/options"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/record"
kubeclient "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/proxy"
proxyconfig "k8s.io/kubernetes/pkg/proxy/config"
"k8s.io/kubernetes/pkg/proxy/iptables"
"k8s.io/kubernetes/pkg/proxy/userspace"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
utildbus "k8s.io/kubernetes/pkg/util/dbus"
"k8s.io/kubernetes/pkg/util/exec"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilnet "k8s.io/kubernetes/pkg/util/net"
nodeutil "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/util/oom"
"github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
type ProxyServer struct {
Client *kubeclient.Client
Config *options.ProxyServerConfig
IptInterface utiliptables.Interface
Proxier proxy.ProxyProvider
Broadcaster record.EventBroadcaster
Recorder record.EventRecorder
Conntracker Conntracker // if nil, ignored
}
const (
proxyModeUserspace = "userspace"
proxyModeIptables = "iptables"
experimentalProxyModeAnnotation = options.ExperimentalProxyModeAnnotation
betaProxyModeAnnotation = "net.beta.kubernetes.io/proxy-mode"
)
func checkKnownProxyMode(proxyMode string) bool {
switch proxyMode {
case "", proxyModeUserspace, proxyModeIptables:
return true
}
return false
}
func NewProxyServer(
client *kubeclient.Client,
config *options.ProxyServerConfig,
iptInterface utiliptables.Interface,
proxier proxy.ProxyProvider,
broadcaster record.EventBroadcaster,
recorder record.EventRecorder,
conntracker Conntracker,
) (*ProxyServer, error) {
return &ProxyServer{
Client: client,
Config: config,
IptInterface: iptInterface,
Proxier: proxier,
Broadcaster: broadcaster,
Recorder: recorder,
Conntracker: conntracker,
}, nil
}
// NewProxyCommand creates a *cobra.Command object with default parameters
func NewProxyCommand() *cobra.Command {
s := options.NewProxyConfig()
s.AddFlags(pflag.CommandLine)
cmd := &cobra.Command{
Use: "kube-proxy",
Long: `The Kubernetes network proxy runs on each node. This
reflects services as defined in the Kubernetes API on each node and can do simple
TCP,UDP stream forwarding or round robin TCP,UDP forwarding across a set of backends.
Service cluster ips and ports are currently found through Docker-links-compatible
environment variables specifying ports opened by the service proxy. There is an optional
addon that provides cluster DNS for these cluster IPs. The user must create a service
with the apiserver API to configure the proxy.`,
Run: func(cmd *cobra.Command, args []string) {
},
}
return cmd
}
// NewProxyServerDefault creates a new ProxyServer object with default parameters.
func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, error) {
protocol := utiliptables.ProtocolIpv4
if net.ParseIP(config.BindAddress).To4() == nil {
protocol = utiliptables.ProtocolIpv6
}
// Create a iptables utils.
execer := exec.New()
dbus := utildbus.New()
iptInterface := utiliptables.New(execer, dbus, protocol)
// We omit creation of pretty much everything if we run in cleanup mode
if config.CleanupAndExit {
return &ProxyServer{
Config: config,
IptInterface: iptInterface,
}, nil
}
// TODO(vmarmol): Use container config for this.
var oomAdjuster *oom.OOMAdjuster
if config.OOMScoreAdj != nil {
oomAdjuster = oom.NewOOMAdjuster()
if err := oomAdjuster.ApplyOOMScoreAdj(0, *config.OOMScoreAdj); err != nil {
glog.V(2).Info(err)
}
}
if config.ResourceContainer != "" {
// Run in its own container.
if err := util.RunInResourceContainer(config.ResourceContainer); err != nil {
glog.Warningf("Failed to start in resource-only container %q: %v", config.ResourceContainer, err)
} else {
glog.V(2).Infof("Running in resource-only container %q", config.ResourceContainer)
}
}
// Create a Kube Client
// define api config source
if config.Kubeconfig == "" && config.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: config.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: config.Master}}).ClientConfig()
if err != nil {
return nil, err
}
// Override kubeconfig qps/burst settings from flags
kubeconfig.QPS = config.KubeAPIQPS
kubeconfig.Burst = config.KubeAPIBurst
client, err := kubeclient.New(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
// Create event recorder
hostname := nodeutil.GetHostname(config.HostnameOverride)
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: hostname})
var proxier proxy.ProxyProvider
var endpointsHandler proxyconfig.EndpointsConfigHandler
proxyMode := getProxyMode(string(config.Mode), client.Nodes(), hostname, iptInterface)
if proxyMode == proxyModeIptables {
glog.V(2).Info("Using iptables Proxier.")
proxierIptables, err := iptables.NewProxier(iptInterface, execer, config.IPTablesSyncPeriod.Duration, config.MasqueradeAll)
if err != nil {
glog.Fatalf("Unable to create proxier: %v", err)
}
proxier = proxierIptables
endpointsHandler = proxierIptables
// No turning back. Remove artifacts that might still exist from the userspace Proxier.
glog.V(2).Info("Tearing down userspace rules. Errors here are acceptable.")
userspace.CleanupLeftovers(iptInterface)
} else {
glog.V(2).Info("Using userspace Proxier.")
// This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for
// our config.EndpointsConfigHandler.
loadBalancer := userspace.NewLoadBalancerRR()
// set EndpointsConfigHandler to our loadBalancer
endpointsHandler = loadBalancer
proxierUserspace, err := userspace.NewProxier(
loadBalancer,
net.ParseIP(config.BindAddress),
iptInterface,
*utilnet.ParsePortRangeOrDie(config.PortRange),
config.IPTablesSyncPeriod.Duration,
config.UDPIdleTimeout.Duration,
)
if err != nil {
glog.Fatalf("Unable to create proxier: %v", err)
}
proxier = proxierUserspace
// Remove artifacts from the pure-iptables Proxier.
glog.V(2).Info("Tearing down pure-iptables proxy rules. Errors here are acceptable.")
iptables.CleanupLeftovers(iptInterface)
}
iptInterface.AddReloadFunc(proxier.Sync)
// Create configs (i.e. Watches for Services and Endpoints)
// Note: RegisterHandler() calls need to happen before creation of Sources because sources
// only notify on changes, and the initial update (on process start) may be lost if no handlers
// are registered yet.
serviceConfig := proxyconfig.NewServiceConfig()
serviceConfig.RegisterHandler(proxier)
endpointsConfig := proxyconfig.NewEndpointsConfig()
endpointsConfig.RegisterHandler(endpointsHandler)
proxyconfig.NewSourceAPI(
client,
config.ConfigSyncPeriod,
serviceConfig.Channel("api"),
endpointsConfig.Channel("api"),
)
config.NodeRef = &api.ObjectReference{
Kind: "Node",
Name: hostname,
UID: types.UID(hostname),
Namespace: "",
}
conntracker := realConntracker{}
return NewProxyServer(client, config, iptInterface, proxier, eventBroadcaster, recorder, conntracker)
}
// Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set).
func (s *ProxyServer) Run() error {
// remove iptables rules and exit
if s.Config.CleanupAndExit {
encounteredError := userspace.CleanupLeftovers(s.IptInterface)
encounteredError = iptables.CleanupLeftovers(s.IptInterface) || encounteredError
if encounteredError {
return errors.New("Encountered an error while tearing down rules.")
}
return nil
}
s.Broadcaster.StartRecordingToSink(s.Client.Events(""))
// Start up Healthz service if requested
if s.Config.HealthzPort > 0 {
go util.Until(func() {
err := http.ListenAndServe(s.Config.HealthzBindAddress+":"+strconv.Itoa(s.Config.HealthzPort), nil)
if err != nil {
glog.Errorf("Starting health server failed: %v", err)
}
}, 5*time.Second, util.NeverStop)
}
// Tune conntrack, if requested
if s.Conntracker != nil {
if s.Config.ConntrackMax > 0 {
if err := s.Conntracker.SetMax(s.Config.ConntrackMax); err != nil {
return err
}
}
if s.Config.ConntrackTCPEstablishedTimeout.Duration > 0 {
if err := s.Conntracker.SetTCPEstablishedTimeout(int(s.Config.ConntrackTCPEstablishedTimeout.Duration / time.Second)); err != nil {
return err
}
}
}
// Birth Cry after the birth is successful
s.birthCry()
// Just loop forever for now...
s.Proxier.SyncLoop()
return nil
}
type nodeGetter interface {
Get(hostname string) (*api.Node, error)
}
func getProxyMode(proxyMode string, client nodeGetter, hostname string, iptver iptables.IptablesVersioner) string {
if proxyMode == proxyModeUserspace {
return proxyModeUserspace
} else if proxyMode == proxyModeIptables {
return tryIptablesProxy(iptver)
} else if proxyMode != "" {
glog.V(1).Infof("Flag proxy-mode=%q unknown, assuming iptables proxy", proxyMode)
return tryIptablesProxy(iptver)
}
// proxyMode == "" - choose the best option.
if client == nil {
glog.Errorf("nodeGetter is nil: assuming iptables proxy")
return tryIptablesProxy(iptver)
}
node, err := client.Get(hostname)
if err != nil {
glog.Errorf("Can't get Node %q, assuming iptables proxy: %v", hostname, err)
return tryIptablesProxy(iptver)
}
if node == nil {
glog.Errorf("Got nil Node %q, assuming iptables proxy: %v", hostname)
return tryIptablesProxy(iptver)
}
proxyMode, found := node.Annotations[betaProxyModeAnnotation]
if found {
glog.V(1).Infof("Found beta annotation %q = %q", betaProxyModeAnnotation, proxyMode)
} else {
// We already published some information about this annotation with the "experimental" name, so we will respect it.
proxyMode, found = node.Annotations[experimentalProxyModeAnnotation]
if found {
glog.V(1).Infof("Found experimental annotation %q = %q", experimentalProxyModeAnnotation, proxyMode)
}
}
if proxyMode == proxyModeUserspace {
glog.V(1).Infof("Annotation demands userspace proxy")
return proxyModeUserspace
}
return tryIptablesProxy(iptver)
}
func tryIptablesProxy(iptver iptables.IptablesVersioner) string {
var err error
// guaranteed false on error, error only necessary for debugging
useIptablesProxy, err := iptables.CanUseIptablesProxier(iptver)
if err != nil {
glog.Errorf("Can't determine whether to use iptables proxy, using userspace proxier: %v", err)
return proxyModeUserspace
}
if useIptablesProxy {
return proxyModeIptables
}
// Fallback.
glog.V(1).Infof("Can't use iptables proxy, using userspace proxier: %v", err)
return proxyModeUserspace
}
func (s *ProxyServer) birthCry() {
s.Recorder.Eventf(s.Config.NodeRef, api.EventTypeNormal, "Starting", "Starting kube-proxy.")
}
| cmd/kube-proxy/app/server.go | 1 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.9983285069465637,
0.19063633680343628,
0.00016343120660167187,
0.0004459387855604291,
0.3757081627845764
] |
{
"id": 2,
"code_window": [
"\tProxier proxy.ProxyProvider\n",
"\tBroadcaster record.EventBroadcaster\n",
"\tRecorder record.EventRecorder\n",
"\tConntracker Conntracker // if nil, ignored\n",
"}\n",
"\n",
"const (\n",
"\tproxyModeUserspace = \"userspace\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tProxyMode string\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 60
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"math/rand"
"net"
"net/http"
"net/http/httptest"
"strconv"
"testing"
"time"
etcd "github.com/coreos/etcd/client"
"github.com/stretchr/testify/assert"
)
const validEtcdVersion = "etcd 2.0.9"
func TestIsEtcdNotFound(t *testing.T) {
try := func(err error, isNotFound bool) {
if IsEtcdNotFound(err) != isNotFound {
t.Errorf("Expected %#v to return %v, but it did not", err, isNotFound)
}
}
try(&etcd.Error{Code: 101}, false)
try(nil, false)
try(fmt.Errorf("some other kind of error"), false)
}
func TestGetEtcdVersion_ValidVersion(t *testing.T) {
testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, validEtcdVersion)
}))
// TODO: Uncomment when fix #19254
// defer testServer.Close()
var version string
var err error
if version, err = GetEtcdVersion(testServer.URL); err != nil {
t.Errorf("Unexpected error: %v", err)
}
assert.Equal(t, validEtcdVersion, version, "Unexpected version")
assert.Nil(t, err)
}
func TestGetEtcdVersion_ErrorStatus(t *testing.T) {
testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusServiceUnavailable)
}))
// TODO: Uncomment when fix #19254
// defer testServer.Close()
_, err := GetEtcdVersion(testServer.URL)
assert.NotNil(t, err)
}
func TestGetEtcdVersion_NotListening(t *testing.T) {
portIsOpen := func(port int) bool {
conn, err := net.DialTimeout("tcp", "127.0.0.1:"+strconv.Itoa(port), 1*time.Second)
if err == nil {
conn.Close()
return true
}
return false
}
port := rand.Intn((1 << 16) - 1)
for tried := 0; portIsOpen(port); tried++ {
if tried >= 10 {
t.Fatal("Couldn't find a closed TCP port to continue testing")
}
port++
}
_, err := GetEtcdVersion("http://127.0.0.1:" + strconv.Itoa(port))
assert.NotNil(t, err)
}
func TestEtcdHealthCheck(t *testing.T) {
tests := []struct {
data string
expectErr bool
}{
{
data: "{\"health\": \"true\"}",
expectErr: false,
},
{
data: "{\"health\": \"false\"}",
expectErr: true,
},
{
data: "invalid json",
expectErr: true,
},
}
for _, test := range tests {
err := EtcdHealthCheck([]byte(test.data))
if err != nil && !test.expectErr {
t.Errorf("unexpected error: %v", err)
}
if err == nil && test.expectErr {
t.Error("unexpected non-error")
}
}
}
| pkg/storage/etcd/util/etcd_util_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00017947268497664481,
0.0001740930019877851,
0.00016770038928370923,
0.00017436800408177078,
0.0000035397792998992372
] |
{
"id": 2,
"code_window": [
"\tProxier proxy.ProxyProvider\n",
"\tBroadcaster record.EventBroadcaster\n",
"\tRecorder record.EventRecorder\n",
"\tConntracker Conntracker // if nil, ignored\n",
"}\n",
"\n",
"const (\n",
"\tproxyModeUserspace = \"userspace\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tProxyMode string\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 60
} | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// ************************************************************
// DO NOT EDIT.
// THIS FILE IS AUTO-GENERATED BY codecgen.
// ************************************************************
package testing
import (
"errors"
"fmt"
codec1978 "github.com/ugorji/go/codec"
pkg2_api "k8s.io/kubernetes/pkg/api"
pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
pkg3_types "k8s.io/kubernetes/pkg/types"
"reflect"
"runtime"
time "time"
)
const (
// ----- content types ----
codecSelferC_UTF81234 = 1
codecSelferC_RAW1234 = 0
// ----- value types used ----
codecSelferValueTypeArray1234 = 10
codecSelferValueTypeMap1234 = 9
// ----- containerStateValues ----
codecSelfer_containerMapKey1234 = 2
codecSelfer_containerMapValue1234 = 3
codecSelfer_containerMapEnd1234 = 4
codecSelfer_containerArrayElem1234 = 6
codecSelfer_containerArrayEnd1234 = 7
)
var (
codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
)
type codecSelfer1234 struct{}
func init() {
if codec1978.GenVersion != 5 {
_, file, _, _ := runtime.Caller(0)
err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
5, codec1978.GenVersion, file)
panic(err)
}
if false { // reference the types, but skip this branch at build/run time
var v0 pkg2_api.ObjectMeta
var v1 pkg1_unversioned.TypeMeta
var v2 pkg3_types.UID
var v3 time.Time
_, _, _, _ = v0, v1, v2, v3
}
}
func (x *TestStruct) CodecEncodeSelf(e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
if x == nil {
r.EncodeNil()
} else {
yym1 := z.EncBinary()
_ = yym1
if false {
} else if z.HasExtensions() && z.EncExt(x) {
} else {
yysep2 := !z.EncBinary()
yy2arr2 := z.EncBasicHandle().StructToArray
var yyq2 [7]bool
_, _, _ = yysep2, yyq2, yy2arr2
const yyr2 bool = false
yyq2[0] = x.Kind != ""
yyq2[1] = x.APIVersion != ""
yyq2[2] = true
var yynn2 int
if yyr2 || yy2arr2 {
r.EncodeArrayStart(7)
} else {
yynn2 = 4
for _, b := range yyq2 {
if b {
yynn2++
}
}
r.EncodeMapStart(yynn2)
yynn2 = 0
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq2[0] {
yym4 := z.EncBinary()
_ = yym4
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
}
} else {
r.EncodeString(codecSelferC_UTF81234, "")
}
} else {
if yyq2[0] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("kind"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym5 := z.EncBinary()
_ = yym5
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
}
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq2[1] {
yym7 := z.EncBinary()
_ = yym7
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
}
} else {
r.EncodeString(codecSelferC_UTF81234, "")
}
} else {
if yyq2[1] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym8 := z.EncBinary()
_ = yym8
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
}
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq2[2] {
yy10 := &x.ObjectMeta
yy10.CodecEncodeSelf(e)
} else {
r.EncodeNil()
}
} else {
if yyq2[2] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("metadata"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yy11 := &x.ObjectMeta
yy11.CodecEncodeSelf(e)
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
yym13 := z.EncBinary()
_ = yym13
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.Key))
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("Key"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym14 := z.EncBinary()
_ = yym14
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.Key))
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if x.Map == nil {
r.EncodeNil()
} else {
yym16 := z.EncBinary()
_ = yym16
if false {
} else {
z.F.EncMapStringIntV(x.Map, false, e)
}
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("Map"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.Map == nil {
r.EncodeNil()
} else {
yym17 := z.EncBinary()
_ = yym17
if false {
} else {
z.F.EncMapStringIntV(x.Map, false, e)
}
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if x.StringList == nil {
r.EncodeNil()
} else {
yym19 := z.EncBinary()
_ = yym19
if false {
} else {
z.F.EncSliceStringV(x.StringList, false, e)
}
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("StringList"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.StringList == nil {
r.EncodeNil()
} else {
yym20 := z.EncBinary()
_ = yym20
if false {
} else {
z.F.EncSliceStringV(x.StringList, false, e)
}
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if x.IntList == nil {
r.EncodeNil()
} else {
yym22 := z.EncBinary()
_ = yym22
if false {
} else {
z.F.EncSliceIntV(x.IntList, false, e)
}
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("IntList"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.IntList == nil {
r.EncodeNil()
} else {
yym23 := z.EncBinary()
_ = yym23
if false {
} else {
z.F.EncSliceIntV(x.IntList, false, e)
}
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
z.EncSendContainerState(codecSelfer_containerMapEnd1234)
}
}
}
}
func (x *TestStruct) CodecDecodeSelf(d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
yym24 := z.DecBinary()
_ = yym24
if false {
} else if z.HasExtensions() && z.DecExt(x) {
} else {
yyct25 := r.ContainerType()
if yyct25 == codecSelferValueTypeMap1234 {
yyl25 := r.ReadMapStart()
if yyl25 == 0 {
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
} else {
x.codecDecodeSelfFromMap(yyl25, d)
}
} else if yyct25 == codecSelferValueTypeArray1234 {
yyl25 := r.ReadArrayStart()
if yyl25 == 0 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
x.codecDecodeSelfFromArray(yyl25, d)
}
} else {
panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
}
}
}
func (x *TestStruct) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yys26Slc = z.DecScratchBuffer() // default slice to decode into
_ = yys26Slc
var yyhl26 bool = l >= 0
for yyj26 := 0; ; yyj26++ {
if yyhl26 {
if yyj26 >= l {
break
}
} else {
if r.CheckBreak() {
break
}
}
z.DecSendContainerState(codecSelfer_containerMapKey1234)
yys26Slc = r.DecodeBytes(yys26Slc, true, true)
yys26 := string(yys26Slc)
z.DecSendContainerState(codecSelfer_containerMapValue1234)
switch yys26 {
case "kind":
if r.TryDecodeAsNil() {
x.Kind = ""
} else {
x.Kind = string(r.DecodeString())
}
case "apiVersion":
if r.TryDecodeAsNil() {
x.APIVersion = ""
} else {
x.APIVersion = string(r.DecodeString())
}
case "metadata":
if r.TryDecodeAsNil() {
x.ObjectMeta = pkg2_api.ObjectMeta{}
} else {
yyv29 := &x.ObjectMeta
yyv29.CodecDecodeSelf(d)
}
case "Key":
if r.TryDecodeAsNil() {
x.Key = ""
} else {
x.Key = string(r.DecodeString())
}
case "Map":
if r.TryDecodeAsNil() {
x.Map = nil
} else {
yyv31 := &x.Map
yym32 := z.DecBinary()
_ = yym32
if false {
} else {
z.F.DecMapStringIntX(yyv31, false, d)
}
}
case "StringList":
if r.TryDecodeAsNil() {
x.StringList = nil
} else {
yyv33 := &x.StringList
yym34 := z.DecBinary()
_ = yym34
if false {
} else {
z.F.DecSliceStringX(yyv33, false, d)
}
}
case "IntList":
if r.TryDecodeAsNil() {
x.IntList = nil
} else {
yyv35 := &x.IntList
yym36 := z.DecBinary()
_ = yym36
if false {
} else {
z.F.DecSliceIntX(yyv35, false, d)
}
}
default:
z.DecStructFieldNotFound(-1, yys26)
} // end switch yys26
} // end for yyj26
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
}
func (x *TestStruct) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyj37 int
var yyb37 bool
var yyhl37 bool = l >= 0
yyj37++
if yyhl37 {
yyb37 = yyj37 > l
} else {
yyb37 = r.CheckBreak()
}
if yyb37 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.Kind = ""
} else {
x.Kind = string(r.DecodeString())
}
yyj37++
if yyhl37 {
yyb37 = yyj37 > l
} else {
yyb37 = r.CheckBreak()
}
if yyb37 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.APIVersion = ""
} else {
x.APIVersion = string(r.DecodeString())
}
yyj37++
if yyhl37 {
yyb37 = yyj37 > l
} else {
yyb37 = r.CheckBreak()
}
if yyb37 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.ObjectMeta = pkg2_api.ObjectMeta{}
} else {
yyv40 := &x.ObjectMeta
yyv40.CodecDecodeSelf(d)
}
yyj37++
if yyhl37 {
yyb37 = yyj37 > l
} else {
yyb37 = r.CheckBreak()
}
if yyb37 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.Key = ""
} else {
x.Key = string(r.DecodeString())
}
yyj37++
if yyhl37 {
yyb37 = yyj37 > l
} else {
yyb37 = r.CheckBreak()
}
if yyb37 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.Map = nil
} else {
yyv42 := &x.Map
yym43 := z.DecBinary()
_ = yym43
if false {
} else {
z.F.DecMapStringIntX(yyv42, false, d)
}
}
yyj37++
if yyhl37 {
yyb37 = yyj37 > l
} else {
yyb37 = r.CheckBreak()
}
if yyb37 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.StringList = nil
} else {
yyv44 := &x.StringList
yym45 := z.DecBinary()
_ = yym45
if false {
} else {
z.F.DecSliceStringX(yyv44, false, d)
}
}
yyj37++
if yyhl37 {
yyb37 = yyj37 > l
} else {
yyb37 = r.CheckBreak()
}
if yyb37 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.IntList = nil
} else {
yyv46 := &x.IntList
yym47 := z.DecBinary()
_ = yym47
if false {
} else {
z.F.DecSliceIntX(yyv46, false, d)
}
}
for {
yyj37++
if yyhl37 {
yyb37 = yyj37 > l
} else {
yyb37 = r.CheckBreak()
}
if yyb37 {
break
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
z.DecStructFieldNotFound(yyj37-1, "")
}
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
}
| pkg/kubectl/testing/types.generated.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00046583786024712026,
0.00017954246141016483,
0.00016724223678465933,
0.000173788343090564,
0.0000387568143196404
] |
{
"id": 2,
"code_window": [
"\tProxier proxy.ProxyProvider\n",
"\tBroadcaster record.EventBroadcaster\n",
"\tRecorder record.EventRecorder\n",
"\tConntracker Conntracker // if nil, ignored\n",
"}\n",
"\n",
"const (\n",
"\tproxyModeUserspace = \"userspace\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tProxyMode string\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 60
} | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"fmt"
"reflect"
"strconv"
"time"
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/meta"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/endpoints"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/client/cache"
client "k8s.io/kubernetes/pkg/client/unversioned"
kservice "k8s.io/kubernetes/pkg/controller/endpoint"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/workqueue"
"k8s.io/kubernetes/pkg/watch"
"github.com/golang/glog"
)
var (
keyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc
)
type EndpointController interface {
Run(workers int, stopCh <-chan struct{})
}
// NewEndpointController returns a new *EndpointController.
func NewEndpointController(client *client.Client) *endpointController {
e := &endpointController{
client: client,
queue: workqueue.New(),
}
e.serviceStore.Store, e.serviceController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return e.client.Services(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return e.client.Services(api.NamespaceAll).Watch(options)
},
},
&api.Service{},
kservice.FullServiceResyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: e.enqueueService,
UpdateFunc: func(old, cur interface{}) {
e.enqueueService(cur)
},
DeleteFunc: e.enqueueService,
},
)
e.podStore.Store, e.podController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return e.client.Pods(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return e.client.Pods(api.NamespaceAll).Watch(options)
},
},
&api.Pod{},
5*time.Minute,
framework.ResourceEventHandlerFuncs{
AddFunc: e.addPod,
UpdateFunc: e.updatePod,
DeleteFunc: e.deletePod,
},
)
return e
}
// EndpointController manages selector-based service endpoints.
type endpointController struct {
client *client.Client
serviceStore cache.StoreToServiceLister
podStore cache.StoreToPodLister
// Services that need to be updated. A channel is inappropriate here,
// because it allows services with lots of pods to be serviced much
// more often than services with few pods; it also would cause a
// service that's inserted multiple times to be processed more than
// necessary.
queue *workqueue.Type
// Since we join two objects, we'll watch both of them with
// controllers.
serviceController *framework.Controller
podController *framework.Controller
}
// Runs e; will not return until stopCh is closed. workers determines how many
// endpoints will be handled in parallel.
func (e *endpointController) Run(workers int, stopCh <-chan struct{}) {
defer util.HandleCrash()
go e.serviceController.Run(stopCh)
go e.podController.Run(stopCh)
for i := 0; i < workers; i++ {
go util.Until(e.worker, time.Second, stopCh)
}
go func() {
defer util.HandleCrash()
time.Sleep(5 * time.Minute) // give time for our cache to fill
e.checkLeftoverEndpoints()
}()
<-stopCh
e.queue.ShutDown()
}
func (e *endpointController) getPodServiceMemberships(pod *api.Pod) (sets.String, error) {
set := sets.String{}
services, err := e.serviceStore.GetPodServices(pod)
if err != nil {
// don't log this error because this function makes pointless
// errors when no services match.
return set, nil
}
for i := range services {
key, err := keyFunc(&services[i])
if err != nil {
return nil, err
}
set.Insert(key)
}
return set, nil
}
// When a pod is added, figure out what services it will be a member of and
// enqueue them. obj must have *api.Pod type.
func (e *endpointController) addPod(obj interface{}) {
pod := obj.(*api.Pod)
services, err := e.getPodServiceMemberships(pod)
if err != nil {
glog.Errorf("Unable to get pod %v/%v's service memberships: %v", pod.Namespace, pod.Name, err)
return
}
for key := range services {
e.queue.Add(key)
}
}
// When a pod is updated, figure out what services it used to be a member of
// and what services it will be a member of, and enqueue the union of these.
// old and cur must be *api.Pod types.
func (e *endpointController) updatePod(old, cur interface{}) {
if api.Semantic.DeepEqual(old, cur) {
return
}
newPod := old.(*api.Pod)
services, err := e.getPodServiceMemberships(newPod)
if err != nil {
glog.Errorf("Unable to get pod %v/%v's service memberships: %v", newPod.Namespace, newPod.Name, err)
return
}
oldPod := cur.(*api.Pod)
// Only need to get the old services if the labels changed.
if !reflect.DeepEqual(newPod.Labels, oldPod.Labels) {
oldServices, err := e.getPodServiceMemberships(oldPod)
if err != nil {
glog.Errorf("Unable to get pod %v/%v's service memberships: %v", oldPod.Namespace, oldPod.Name, err)
return
}
services = services.Union(oldServices)
}
for key := range services {
e.queue.Add(key)
}
}
// When a pod is deleted, enqueue the services the pod used to be a member of.
// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item.
func (e *endpointController) deletePod(obj interface{}) {
if _, ok := obj.(*api.Pod); ok {
// Enqueue all the services that the pod used to be a member
// of. This happens to be exactly the same thing we do when a
// pod is added.
e.addPod(obj)
return
}
podKey, err := keyFunc(obj)
if err != nil {
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
}
glog.Infof("Pod %q was deleted but we don't have a record of its final state, so it will take up to %v before it will be removed from all endpoint records.", podKey, kservice.FullServiceResyncPeriod)
// TODO: keep a map of pods to services to handle this condition.
}
// obj could be an *api.Service, or a DeletionFinalStateUnknown marker item.
func (e *endpointController) enqueueService(obj interface{}) {
key, err := keyFunc(obj)
if err != nil {
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
}
e.queue.Add(key)
}
// worker runs a worker thread that just dequeues items, processes them, and
// marks them done. You may run as many of these in parallel as you wish; the
// workqueue guarantees that they will not end up processing the same service
// at the same time.
func (e *endpointController) worker() {
for {
func() {
key, quit := e.queue.Get()
if quit {
return
}
// Use defer: in the unlikely event that there's a
// panic, we'd still like this to get marked done--
// otherwise the controller will not be able to sync
// this service again until it is restarted.
defer e.queue.Done(key)
e.syncService(key.(string))
}()
}
}
// HACK(sttts): add annotations to the endpoint about the respective container ports
func (e *endpointController) syncService(key string) {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Now().Sub(startTime))
}()
obj, exists, err := e.serviceStore.Store.GetByKey(key)
if err != nil || !exists {
// Delete the corresponding endpoint, as the service has been deleted.
// TODO: Please note that this will delete an endpoint when a
// service is deleted. However, if we're down at the time when
// the service is deleted, we will miss that deletion, so this
// doesn't completely solve the problem. See #6877.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
glog.Errorf("Need to delete endpoint with key %q, but couldn't understand the key: %v", key, err)
// Don't retry, as the key isn't going to magically become understandable.
return
}
err = e.client.Endpoints(namespace).Delete(name)
if err != nil && !errors.IsNotFound(err) {
glog.Errorf("Error deleting endpoint %q: %v", key, err)
e.queue.Add(key) // Retry
}
return
}
service := obj.(*api.Service)
if service.Spec.Selector == nil {
// services without a selector receive no endpoints from this controller;
// these services will receive the endpoints that are created out-of-band via the REST API.
return
}
glog.V(5).Infof("About to update endpoints for service %q", key)
pods, err := e.podStore.Pods(service.Namespace).List(labels.Set(service.Spec.Selector).AsSelector())
if err != nil {
// Since we're getting stuff from a local cache, it is
// basically impossible to get this error.
glog.Errorf("Error syncing service %q: %v", key, err)
e.queue.Add(key) // Retry
return
}
subsets := []api.EndpointSubset{}
containerPortAnnotations := map[string]string{} // by <HostIP>:<Port>
for i := range pods.Items {
pod := &pods.Items[i]
for i := range service.Spec.Ports {
servicePort := &service.Spec.Ports[i]
portName := servicePort.Name
portProto := servicePort.Protocol
portNum, containerPort, err := findPort(pod, servicePort)
if err != nil {
glog.V(4).Infof("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err)
continue
}
// HACK(jdef): use HostIP instead of pod.CurrentState.PodIP for generic mesos compat
if len(pod.Status.HostIP) == 0 {
glog.V(4).Infof("Failed to find a host IP for pod %s/%s", pod.Namespace, pod.Name)
continue
}
if pod.DeletionTimestamp != nil {
glog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name)
continue
}
if !api.IsPodReady(pod) {
glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name)
continue
}
// HACK(jdef): use HostIP instead of pod.CurrentState.PodIP for generic mesos compat
epp := api.EndpointPort{Name: portName, Port: portNum, Protocol: portProto}
epa := api.EndpointAddress{IP: pod.Status.HostIP, TargetRef: &api.ObjectReference{
Kind: "Pod",
Namespace: pod.ObjectMeta.Namespace,
Name: pod.ObjectMeta.Name,
UID: pod.ObjectMeta.UID,
ResourceVersion: pod.ObjectMeta.ResourceVersion,
}}
subsets = append(subsets, api.EndpointSubset{Addresses: []api.EndpointAddress{epa}, Ports: []api.EndpointPort{epp}})
containerPortAnnotations[fmt.Sprintf(meta.ContainerPortKeyFormat, portProto, pod.Status.HostIP, portNum)] = strconv.Itoa(containerPort)
}
}
subsets = endpoints.RepackSubsets(subsets)
// See if there's actually an update here.
currentEndpoints, err := e.client.Endpoints(service.Namespace).Get(service.Name)
if err != nil {
if errors.IsNotFound(err) {
currentEndpoints = &api.Endpoints{
ObjectMeta: api.ObjectMeta{
Name: service.Name,
Labels: service.Labels,
},
}
} else {
glog.Errorf("Error getting endpoints: %v", err)
e.queue.Add(key) // Retry
return
}
}
if reflect.DeepEqual(currentEndpoints.Subsets, subsets) && reflect.DeepEqual(currentEndpoints.Labels, service.Labels) {
glog.V(5).Infof("Endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name)
return
}
newEndpoints := currentEndpoints
newEndpoints.Subsets = subsets
newEndpoints.Labels = service.Labels
if newEndpoints.Annotations == nil {
newEndpoints.Annotations = map[string]string{}
}
for hostIpPort, containerPort := range containerPortAnnotations {
newEndpoints.Annotations[hostIpPort] = containerPort
}
if len(currentEndpoints.ResourceVersion) == 0 {
// No previous endpoints, create them
_, err = e.client.Endpoints(service.Namespace).Create(newEndpoints)
} else {
// Pre-existing
_, err = e.client.Endpoints(service.Namespace).Update(newEndpoints)
}
if err != nil {
glog.Errorf("Error updating endpoints: %v", err)
e.queue.Add(key) // Retry
}
}
// checkLeftoverEndpoints lists all currently existing endpoints and adds their
// service to the queue. This will detect endpoints that exist with no
// corresponding service; these endpoints need to be deleted. We only need to
// do this once on startup, because in steady-state these are detected (but
// some stragglers could have been left behind if the endpoint controller
// reboots).
func (e *endpointController) checkLeftoverEndpoints() {
list, err := e.client.Endpoints(api.NamespaceAll).List(api.ListOptions{})
if err != nil {
glog.Errorf("Unable to list endpoints (%v); orphaned endpoints will not be cleaned up. (They're pretty harmless, but you can restart this component if you want another attempt made.)", err)
return
}
for i := range list.Items {
ep := &list.Items[i]
key, err := keyFunc(ep)
if err != nil {
glog.Errorf("Unable to get key for endpoint %#v", ep)
continue
}
e.queue.Add(key)
}
}
// findPort locates the container port for the given pod and portName. If the
// targetPort is a number, use that. If the targetPort is a string, look that
// string up in all named ports in all containers in the target pod. If no
// match is found, fail.
//
// HACK(jdef): return the HostPort in addition to the ContainerPort for generic mesos compatibility
func findPort(pod *api.Pod, svcPort *api.ServicePort) (int, int, error) {
portName := svcPort.TargetPort
switch portName.Type {
case intstr.String:
name := portName.StrVal
for _, container := range pod.Spec.Containers {
for _, port := range container.Ports {
if port.Name == name && port.Protocol == svcPort.Protocol {
hostPort, err := findMappedPortName(pod, port.Protocol, name)
return hostPort, port.ContainerPort, err
}
}
}
case intstr.Int:
// HACK(jdef): slightly different semantics from upstream here:
// we ensure that if the user spec'd a port in the service that
// it actually maps to a host-port assigned to the pod. upstream
// doesn't check this and happily returns the container port spec'd
// in the service, but that doesn't align w/ mesos port mgmt.
p := portName.IntValue()
for _, container := range pod.Spec.Containers {
for _, port := range container.Ports {
if port.ContainerPort == p && port.Protocol == svcPort.Protocol {
hostPort, err := findMappedPort(pod, port.Protocol, p)
return hostPort, port.ContainerPort, err
}
}
}
}
return 0, 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID)
}
func findMappedPort(pod *api.Pod, protocol api.Protocol, port int) (int, error) {
if len(pod.Annotations) > 0 {
key := fmt.Sprintf(meta.PortMappingKeyFormat, string(protocol), port)
if value, found := pod.Annotations[key]; found {
return strconv.Atoi(value)
}
}
return 0, fmt.Errorf("failed to find mapped container %s port: %d", protocol, port)
}
func findMappedPortName(pod *api.Pod, protocol api.Protocol, portName string) (int, error) {
if len(pod.Annotations) > 0 {
key := fmt.Sprintf(meta.PortNameMappingKeyFormat, string(protocol), portName)
if value, found := pod.Annotations[key]; found {
return strconv.Atoi(value)
}
}
return 0, fmt.Errorf("failed to find mapped container %s port name: %q", protocol, portName)
}
| contrib/mesos/pkg/service/endpoints_controller.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00020061395480297506,
0.00017105313600040972,
0.00016181913088075817,
0.00016996618069242686,
0.000006104412932472769
] |
{
"id": 3,
"code_window": [
"\tproxier proxy.ProxyProvider,\n",
"\tbroadcaster record.EventBroadcaster,\n",
"\trecorder record.EventRecorder,\n",
"\tconntracker Conntracker,\n",
") (*ProxyServer, error) {\n",
"\treturn &ProxyServer{\n",
"\t\tClient: client,\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tproxyMode string,\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 85
} | <!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
<!-- BEGIN STRIP_FOR_RELEASE -->
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
If you are using a released version of Kubernetes, you should
refer to the docs that go with that version.
<!-- TAG RELEASE_LINK, added by the munger automatically -->
<strong>
The latest release of this document can be found
[here](http://releases.k8s.io/release-1.1/docs/admin/kube-proxy.md).
Documentation for other releases can be found at
[releases.k8s.io](http://releases.k8s.io).
</strong>
--
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## kube-proxy
### Synopsis
The Kubernetes network proxy runs on each node. This
reflects services as defined in the Kubernetes API on each node and can do simple
TCP,UDP stream forwarding or round robin TCP,UDP forwarding across a set of backends.
Service cluster ips and ports are currently found through Docker-links-compatible
environment variables specifying ports opened by the service proxy. There is an optional
addon that provides cluster DNS for these cluster IPs. The user must create a service
with the apiserver API to configure the proxy.
```
kube-proxy
```
### Options
```
--bind-address=0.0.0.0: The IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)
--cleanup-iptables[=false]: If true cleanup iptables rules and exit.
--config-sync-period=15m0s: How often configuration from the apiserver is refreshed. Must be greater than 0.
--conntrack-max=262144: Maximum number of NAT connections to track (0 to leave as-is)
--conntrack-tcp-timeout-established=24h0m0s: Idle timeout for established TCP connections (0 to leave as-is)
--google-json-key="": The Google Cloud Platform Service Account JSON Key to use for authentication.
--healthz-bind-address=127.0.0.1: The IP address for the health check server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)
--healthz-port=10249: The port to bind the health check server. Use 0 to disable.
--hostname-override="": If non-empty, will use this string as identification instead of the actual hostname.
--iptables-sync-period=30s: How often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.
--kube-api-burst=10: Burst to use while talking with kubernetes apiserver
--kube-api-qps=5: QPS to use while talking with kubernetes apiserver
--kubeconfig="": Path to kubeconfig file with authorization information (the master location is set by the master flag).
--log-flush-frequency=5s: Maximum number of seconds between log flushes
--masquerade-all[=false]: If using the pure iptables proxy, SNAT everything
--master="": The address of the Kubernetes API server (overrides any value in kubeconfig)
--oom-score-adj=-999: The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]
--proxy-mode=userspace: Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.
--proxy-port-range=: Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
--udp-timeout=250ms: How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace
```
###### Auto generated by spf13/cobra on 27-Jan-2016
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[]()
<!-- END MUNGE: GENERATED_ANALYTICS -->
| docs/admin/kube-proxy.md | 1 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00019091398280579597,
0.00017169717466458678,
0.00016494051669724286,
0.00017023496911861002,
0.0000075429134085425176
] |
{
"id": 3,
"code_window": [
"\tproxier proxy.ProxyProvider,\n",
"\tbroadcaster record.EventBroadcaster,\n",
"\trecorder record.EventRecorder,\n",
"\tconntracker Conntracker,\n",
") (*ProxyServer, error) {\n",
"\treturn &ProxyServer{\n",
"\t\tClient: client,\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tproxyMode string,\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 85
} | The MIT License (MIT)
Copyright (c) 2014 Nate Finch
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. | Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/LICENSE | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00017591071082279086,
0.00017256422142963856,
0.00017083415878005326,
0.0001709477510303259,
0.000002366789885854814
] |
{
"id": 3,
"code_window": [
"\tproxier proxy.ProxyProvider,\n",
"\tbroadcaster record.EventBroadcaster,\n",
"\trecorder record.EventRecorder,\n",
"\tconntracker Conntracker,\n",
") (*ProxyServer, error) {\n",
"\treturn &ProxyServer{\n",
"\t\tClient: client,\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tproxyMode string,\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 85
} | "VH2&H\\\/" | Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-318 | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00017587248294148594,
0.00017587248294148594,
0.00017587248294148594,
0.00017587248294148594,
0
] |
{
"id": 3,
"code_window": [
"\tproxier proxy.ProxyProvider,\n",
"\tbroadcaster record.EventBroadcaster,\n",
"\trecorder record.EventRecorder,\n",
"\tconntracker Conntracker,\n",
") (*ProxyServer, error) {\n",
"\treturn &ProxyServer{\n",
"\t\tClient: client,\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tproxyMode string,\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 85
} | not_null(unknown_key, str) | Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-179 | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00017498024681117386,
0.00017498024681117386,
0.00017498024681117386,
0.00017498024681117386,
0
] |
{
"id": 4,
"code_window": [
"\t\tProxier: proxier,\n",
"\t\tBroadcaster: broadcaster,\n",
"\t\tRecorder: recorder,\n",
"\t\tConntracker: conntracker,\n",
"\t}, nil\n",
"}\n",
"\n",
"// NewProxyCommand creates a *cobra.Command object with default parameters\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tProxyMode: proxyMode,\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 94
} | <!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
<!-- BEGIN STRIP_FOR_RELEASE -->
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
If you are using a released version of Kubernetes, you should
refer to the docs that go with that version.
<!-- TAG RELEASE_LINK, added by the munger automatically -->
<strong>
The latest release of this document can be found
[here](http://releases.k8s.io/release-1.1/docs/admin/kube-proxy.md).
Documentation for other releases can be found at
[releases.k8s.io](http://releases.k8s.io).
</strong>
--
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## kube-proxy
### Synopsis
The Kubernetes network proxy runs on each node. This
reflects services as defined in the Kubernetes API on each node and can do simple
TCP,UDP stream forwarding or round robin TCP,UDP forwarding across a set of backends.
Service cluster ips and ports are currently found through Docker-links-compatible
environment variables specifying ports opened by the service proxy. There is an optional
addon that provides cluster DNS for these cluster IPs. The user must create a service
with the apiserver API to configure the proxy.
```
kube-proxy
```
### Options
```
--bind-address=0.0.0.0: The IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)
--cleanup-iptables[=false]: If true cleanup iptables rules and exit.
--config-sync-period=15m0s: How often configuration from the apiserver is refreshed. Must be greater than 0.
--conntrack-max=262144: Maximum number of NAT connections to track (0 to leave as-is)
--conntrack-tcp-timeout-established=24h0m0s: Idle timeout for established TCP connections (0 to leave as-is)
--google-json-key="": The Google Cloud Platform Service Account JSON Key to use for authentication.
--healthz-bind-address=127.0.0.1: The IP address for the health check server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)
--healthz-port=10249: The port to bind the health check server. Use 0 to disable.
--hostname-override="": If non-empty, will use this string as identification instead of the actual hostname.
--iptables-sync-period=30s: How often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.
--kube-api-burst=10: Burst to use while talking with kubernetes apiserver
--kube-api-qps=5: QPS to use while talking with kubernetes apiserver
--kubeconfig="": Path to kubeconfig file with authorization information (the master location is set by the master flag).
--log-flush-frequency=5s: Maximum number of seconds between log flushes
--masquerade-all[=false]: If using the pure iptables proxy, SNAT everything
--master="": The address of the Kubernetes API server (overrides any value in kubeconfig)
--oom-score-adj=-999: The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]
--proxy-mode=userspace: Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.
--proxy-port-range=: Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
--udp-timeout=250ms: How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace
```
###### Auto generated by spf13/cobra on 27-Jan-2016
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[]()
<!-- END MUNGE: GENERATED_ANALYTICS -->
| docs/admin/kube-proxy.md | 1 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00018301495583727956,
0.00017018675862345845,
0.00016470164700876921,
0.00016861577751114964,
0.000005592851266555954
] |
{
"id": 4,
"code_window": [
"\t\tProxier: proxier,\n",
"\t\tBroadcaster: broadcaster,\n",
"\t\tRecorder: recorder,\n",
"\t\tConntracker: conntracker,\n",
"\t}, nil\n",
"}\n",
"\n",
"// NewProxyCommand creates a *cobra.Command object with default parameters\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tProxyMode: proxyMode,\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 94
} | // SIG(0)
//
// From RFC 2931:
//
// SIG(0) provides protection for DNS transactions and requests ....
// ... protection for glue records, DNS requests, protection for message headers
// on requests and responses, and protection of the overall integrity of a response.
//
// It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared
// secret approach in TSIG.
// Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and
// RSASHA512.
//
// Signing subsequent messages in multi-message sessions is not implemented.
//
package dns
import (
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/rand"
"crypto/rsa"
"math/big"
"strings"
"time"
)
// Sign signs a dns.Msg. It fills the signature with the appropriate data.
// The SIG record should have the SignerName, KeyTag, Algorithm, Inception
// and Expiration set.
func (rr *SIG) Sign(k PrivateKey, m *Msg) ([]byte, error) {
if k == nil {
return nil, ErrPrivKey
}
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
return nil, ErrKey
}
rr.Header().Rrtype = TypeSIG
rr.Header().Class = ClassANY
rr.Header().Ttl = 0
rr.Header().Name = "."
rr.OrigTtl = 0
rr.TypeCovered = 0
rr.Labels = 0
buf := make([]byte, m.Len()+rr.len())
mbuf, err := m.PackBuffer(buf)
if err != nil {
return nil, err
}
if &buf[0] != &mbuf[0] {
return nil, ErrBuf
}
off, err := PackRR(rr, buf, len(mbuf), nil, false)
if err != nil {
return nil, err
}
buf = buf[:off:cap(buf)]
var hash crypto.Hash
var intlen int
switch rr.Algorithm {
case DSA, RSASHA1:
hash = crypto.SHA1
case RSASHA256, ECDSAP256SHA256:
hash = crypto.SHA256
intlen = 32
case ECDSAP384SHA384:
hash = crypto.SHA384
intlen = 48
case RSASHA512:
hash = crypto.SHA512
default:
return nil, ErrAlg
}
hasher := hash.New()
// Write SIG rdata
hasher.Write(buf[len(mbuf)+1+2+2+4+2:])
// Write message
hasher.Write(buf[:len(mbuf)])
hashed := hasher.Sum(nil)
var sig []byte
switch p := k.(type) {
case *dsa.PrivateKey:
t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8)
r1, s1, err := dsa.Sign(rand.Reader, p, hashed)
if err != nil {
return nil, err
}
sig = append(sig, byte(t))
sig = append(sig, intToBytes(r1, 20)...)
sig = append(sig, intToBytes(s1, 20)...)
case *rsa.PrivateKey:
sig, err = rsa.SignPKCS1v15(rand.Reader, p, hash, hashed)
if err != nil {
return nil, err
}
case *ecdsa.PrivateKey:
r1, s1, err := ecdsa.Sign(rand.Reader, p, hashed)
if err != nil {
return nil, err
}
sig = intToBytes(r1, intlen)
sig = append(sig, intToBytes(s1, intlen)...)
default:
return nil, ErrAlg
}
rr.Signature = toBase64(sig)
buf = append(buf, sig...)
if len(buf) > int(^uint16(0)) {
return nil, ErrBuf
}
// Adjust sig data length
rdoff := len(mbuf) + 1 + 2 + 2 + 4
rdlen, _ := unpackUint16(buf, rdoff)
rdlen += uint16(len(sig))
buf[rdoff], buf[rdoff+1] = packUint16(rdlen)
// Adjust additional count
adc, _ := unpackUint16(buf, 10)
adc += 1
buf[10], buf[11] = packUint16(adc)
return buf, nil
}
// Verify validates the message buf using the key k.
// It's assumed that buf is a valid message from which rr was unpacked.
func (rr *SIG) Verify(k *KEY, buf []byte) error {
if k == nil {
return ErrKey
}
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
return ErrKey
}
var hash crypto.Hash
switch rr.Algorithm {
case DSA, RSASHA1:
hash = crypto.SHA1
case RSASHA256, ECDSAP256SHA256:
hash = crypto.SHA256
case ECDSAP384SHA384:
hash = crypto.SHA384
case RSASHA512:
hash = crypto.SHA512
default:
return ErrAlg
}
hasher := hash.New()
buflen := len(buf)
qdc, _ := unpackUint16(buf, 4)
anc, _ := unpackUint16(buf, 6)
auc, _ := unpackUint16(buf, 8)
adc, offset := unpackUint16(buf, 10)
var err error
for i := uint16(0); i < qdc && offset < buflen; i++ {
_, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// Skip past Type and Class
offset += 2 + 2
}
for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ {
_, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// Skip past Type, Class and TTL
offset += 2 + 2 + 4
if offset+1 >= buflen {
continue
}
var rdlen uint16
rdlen, offset = unpackUint16(buf, offset)
offset += int(rdlen)
}
if offset >= buflen {
return &Error{err: "overflowing unpacking signed message"}
}
// offset should be just prior to SIG
bodyend := offset
// owner name SHOULD be root
_, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// Skip Type, Class, TTL, RDLen
offset += 2 + 2 + 4 + 2
sigstart := offset
// Skip Type Covered, Algorithm, Labels, Original TTL
offset += 2 + 1 + 1 + 4
if offset+4+4 >= buflen {
return &Error{err: "overflow unpacking signed message"}
}
expire := uint32(buf[offset])<<24 | uint32(buf[offset+1])<<16 | uint32(buf[offset+2])<<8 | uint32(buf[offset+3])
offset += 4
incept := uint32(buf[offset])<<24 | uint32(buf[offset+1])<<16 | uint32(buf[offset+2])<<8 | uint32(buf[offset+3])
offset += 4
now := uint32(time.Now().Unix())
if now < incept || now > expire {
return ErrTime
}
// Skip key tag
offset += 2
var signername string
signername, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// If key has come from the DNS name compression might
// have mangled the case of the name
if strings.ToLower(signername) != strings.ToLower(k.Header().Name) {
return &Error{err: "signer name doesn't match key name"}
}
sigend := offset
hasher.Write(buf[sigstart:sigend])
hasher.Write(buf[:10])
hasher.Write([]byte{
byte((adc - 1) << 8),
byte(adc - 1),
})
hasher.Write(buf[12:bodyend])
hashed := hasher.Sum(nil)
sig := buf[sigend:]
switch k.Algorithm {
case DSA:
pk := k.publicKeyDSA()
sig = sig[1:]
r := big.NewInt(0)
r.SetBytes(sig[:len(sig)/2])
s := big.NewInt(0)
s.SetBytes(sig[len(sig)/2:])
if pk != nil {
if dsa.Verify(pk, hashed, r, s) {
return nil
}
return ErrSig
}
case RSASHA1, RSASHA256, RSASHA512:
pk := k.publicKeyRSA()
if pk != nil {
return rsa.VerifyPKCS1v15(pk, hash, hashed, sig)
}
case ECDSAP256SHA256, ECDSAP384SHA384:
pk := k.publicKeyCurve()
r := big.NewInt(0)
r.SetBytes(sig[:len(sig)/2])
s := big.NewInt(0)
s.SetBytes(sig[len(sig)/2:])
if pk != nil {
if ecdsa.Verify(pk, hashed, r, s) {
return nil
}
return ErrSig
}
}
return ErrKeyAlg
}
| Godeps/_workspace/src/github.com/miekg/dns/sig0.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00017785534146241844,
0.00017270585522055626,
0.00016169359150808305,
0.00017340137856081128,
0.00000372995646102936
] |
{
"id": 4,
"code_window": [
"\t\tProxier: proxier,\n",
"\t\tBroadcaster: broadcaster,\n",
"\t\tRecorder: recorder,\n",
"\t\tConntracker: conntracker,\n",
"\t}, nil\n",
"}\n",
"\n",
"// NewProxyCommand creates a *cobra.Command object with default parameters\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tProxyMode: proxyMode,\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 94
} | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the metrics API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"fmt"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/metrics"
"k8s.io/kubernetes/pkg/apis/metrics/v1alpha1"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets"
)
const importPrefix = "k8s.io/kubernetes/pkg/apis/metrics"
var accessor = meta.NewAccessor()
// availableVersions lists all known external versions for this group from most preferred to least preferred
var availableVersions = []unversioned.GroupVersion{v1alpha1.SchemeGroupVersion}
func init() {
registered.RegisterVersions(availableVersions)
externalVersions := []unversioned.GroupVersion{}
for _, v := range availableVersions {
if registered.IsAllowedVersion(v) {
externalVersions = append(externalVersions, v)
}
}
if len(externalVersions) == 0 {
glog.V(4).Infof("No version is registered for group %v", metrics.GroupName)
return
}
if err := registered.EnableVersions(externalVersions...); err != nil {
glog.V(4).Infof("%v", err)
return
}
if err := enableVersions(externalVersions); err != nil {
glog.V(4).Infof("%v", err)
return
}
}
// TODO: enableVersions should be centralized rather than spread in each API
// group.
// We can combine registered.RegisterVersions, registered.EnableVersions and
// registered.RegisterGroup once we have moved enableVersions there.
func enableVersions(externalVersions []unversioned.GroupVersion) error {
addVersionsToScheme(externalVersions...)
preferredExternalVersion := externalVersions[0]
groupMeta := apimachinery.GroupMeta{
GroupVersion: preferredExternalVersion,
GroupVersions: externalVersions,
RESTMapper: newRESTMapper(externalVersions),
SelfLinker: runtime.SelfLinker(accessor),
InterfacesFor: interfacesFor,
}
if err := registered.RegisterGroup(groupMeta); err != nil {
return err
}
api.RegisterRESTMapper(groupMeta.RESTMapper)
return nil
}
func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper {
// the list of kinds that are scoped at the root of the api hierarchy
// if a kind is not enumerated here, it is assumed to have a namespace scope
rootScoped := sets.NewString()
ignoredKinds := sets.NewString()
return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
}
// interfacesFor returns the default Codec and ResourceVersioner for a given version
// string, or an error if the version is not known.
func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) {
switch version {
case v1alpha1.SchemeGroupVersion:
return &meta.VersionInterfaces{
ObjectConvertor: api.Scheme,
MetadataAccessor: accessor,
}, nil
default:
g, _ := registered.Group(metrics.GroupName)
return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions)
}
}
func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) {
// add the internal version to Scheme
metrics.AddToScheme(api.Scheme)
// add the enabled external versions to Scheme
for _, v := range externalVersions {
if !registered.IsEnabledVersion(v) {
glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v)
continue
}
switch v {
case v1alpha1.SchemeGroupVersion:
v1alpha1.AddToScheme(api.Scheme)
}
}
}
| pkg/apis/metrics/install/install.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00017983722500503063,
0.00017309722898062319,
0.0001678379630902782,
0.0001719769206829369,
0.0000032519360502192285
] |
{
"id": 4,
"code_window": [
"\t\tProxier: proxier,\n",
"\t\tBroadcaster: broadcaster,\n",
"\t\tRecorder: recorder,\n",
"\t\tConntracker: conntracker,\n",
"\t}, nil\n",
"}\n",
"\n",
"// NewProxyCommand creates a *cobra.Command object with default parameters\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tProxyMode: proxyMode,\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 94
} | // Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v2
import (
"time"
// TODO(rjnagal): Remove dependency after moving all stats structs from v1.
// using v1 now for easy conversion.
"github.com/google/cadvisor/info/v1"
)
const (
TypeName = "name"
TypeDocker = "docker"
)
type CpuSpec struct {
// Requested cpu shares. Default is 1024.
Limit uint64 `json:"limit"`
// Requested cpu hard limit. Default is unlimited (0).
// Units: milli-cpus.
MaxLimit uint64 `json:"max_limit"`
// Cpu affinity mask.
// TODO(rjnagal): Add a library to convert mask string to set of cpu bitmask.
Mask string `json:"mask,omitempty"`
}
type MemorySpec struct {
// The amount of memory requested. Default is unlimited (-1).
// Units: bytes.
Limit uint64 `json:"limit,omitempty"`
// The amount of guaranteed memory. Default is 0.
// Units: bytes.
Reservation uint64 `json:"reservation,omitempty"`
// The amount of swap space requested. Default is unlimited (-1).
// Units: bytes.
SwapLimit uint64 `json:"swap_limit,omitempty"`
}
type ContainerInfo struct {
// Describes the container.
Spec ContainerSpec `json:"spec,omitempty"`
// Historical statistics gathered from the container.
Stats []*ContainerStats `json:"stats,omitempty"`
}
type ContainerSpec struct {
// Time at which the container was created.
CreationTime time.Time `json:"creation_time,omitempty"`
// Other names by which the container is known within a certain namespace.
// This is unique within that namespace.
Aliases []string `json:"aliases,omitempty"`
// Namespace under which the aliases of a container are unique.
// An example of a namespace is "docker" for Docker containers.
Namespace string `json:"namespace,omitempty"`
// Metadata labels associated with this container.
Labels map[string]string `json:"labels,omitempty"`
// Metadata envs associated with this container. Only whitelisted envs are added.
Envs map[string]string `json:"envs,omitempty"`
HasCpu bool `json:"has_cpu"`
Cpu CpuSpec `json:"cpu,omitempty"`
HasMemory bool `json:"has_memory"`
Memory MemorySpec `json:"memory,omitempty"`
HasCustomMetrics bool `json:"has_custom_metrics"`
CustomMetrics []v1.MetricSpec `json:"custom_metrics,omitempty"`
// Following resources have no associated spec, but are being isolated.
HasNetwork bool `json:"has_network"`
HasFilesystem bool `json:"has_filesystem"`
HasDiskIo bool `json:"has_diskio"`
// Image name used for this container.
Image string `json:"image,omitempty"`
}
type DeprecatedContainerStats struct {
// The time of this stat point.
Timestamp time.Time `json:"timestamp"`
// CPU statistics
HasCpu bool `json:"has_cpu"`
// In nanoseconds (aggregated)
Cpu v1.CpuStats `json:"cpu,omitempty"`
// In nanocores per second (instantaneous)
CpuInst *CpuInstStats `json:"cpu_inst,omitempty"`
// Disk IO statistics
HasDiskIo bool `json:"has_diskio"`
DiskIo v1.DiskIoStats `json:"diskio,omitempty"`
// Memory statistics
HasMemory bool `json:"has_memory"`
Memory v1.MemoryStats `json:"memory,omitempty"`
// Network statistics
HasNetwork bool `json:"has_network"`
Network NetworkStats `json:"network,omitempty"`
// Filesystem statistics
HasFilesystem bool `json:"has_filesystem"`
Filesystem []v1.FsStats `json:"filesystem,omitempty"`
// Task load statistics
HasLoad bool `json:"has_load"`
Load v1.LoadStats `json:"load_stats,omitempty"`
// Custom Metrics
HasCustomMetrics bool `json:"has_custom_metrics"`
CustomMetrics map[string][]v1.MetricVal `json:"custom_metrics,omitempty"`
}
type ContainerStats struct {
// The time of this stat point.
Timestamp time.Time `json:"timestamp"`
// CPU statistics
// In nanoseconds (aggregated)
Cpu *v1.CpuStats `json:"cpu,omitempty"`
// In nanocores per second (instantaneous)
CpuInst *CpuInstStats `json:"cpu_inst,omitempty"`
// Disk IO statistics
DiskIo *v1.DiskIoStats `json:"diskio,omitempty"`
// Memory statistics
Memory *v1.MemoryStats `json:"memory,omitempty"`
// Network statistics
Network *NetworkStats `json:"network,omitempty"`
// Filesystem statistics
Filesystem *FilesystemStats `json:"filesystem,omitempty"`
// Task load statistics
Load *v1.LoadStats `json:"load_stats,omitempty"`
// Custom Metrics
CustomMetrics map[string][]v1.MetricVal `json:"custom_metrics,omitempty"`
}
type Percentiles struct {
// Indicates whether the stats are present or not.
// If true, values below do not have any data.
Present bool `json:"present"`
// Average over the collected sample.
Mean uint64 `json:"mean"`
// Max seen over the collected sample.
Max uint64 `json:"max"`
// 50th percentile over the collected sample.
Fifty uint64 `json:"fifty"`
// 90th percentile over the collected sample.
Ninety uint64 `json:"ninety"`
// 95th percentile over the collected sample.
NinetyFive uint64 `json:"ninetyfive"`
}
type Usage struct {
// Indicates amount of data available [0-100].
// If we have data for half a day, we'll still process DayUsage,
// but set PercentComplete to 50.
PercentComplete int32 `json:"percent_complete"`
// Mean, Max, and 90p cpu rate value in milliCpus/seconds. Converted to milliCpus to avoid floats.
Cpu Percentiles `json:"cpu"`
// Mean, Max, and 90p memory size in bytes.
Memory Percentiles `json:"memory"`
}
// latest sample collected for a container.
type InstantUsage struct {
// cpu rate in cpu milliseconds/second.
Cpu uint64 `json:"cpu"`
// Memory usage in bytes.
Memory uint64 `json:"memory"`
}
type DerivedStats struct {
// Time of generation of these stats.
Timestamp time.Time `json:"timestamp"`
// Latest instantaneous sample.
LatestUsage InstantUsage `json:"latest_usage"`
// Percentiles in last observed minute.
MinuteUsage Usage `json:"minute_usage"`
// Percentile in last hour.
HourUsage Usage `json:"hour_usage"`
// Percentile in last day.
DayUsage Usage `json:"day_usage"`
}
type FsInfo struct {
// The block device name associated with the filesystem.
Device string `json:"device"`
// Path where the filesystem is mounted.
Mountpoint string `json:"mountpoint"`
// Filesystem usage in bytes.
Capacity uint64 `json:"capacity"`
// Bytes available for non-root use.
Available uint64 `json:"available"`
// Number of bytes used on this filesystem.
Usage uint64 `json:"usage"`
// Labels associated with this filesystem.
Labels []string `json:"labels"`
}
type RequestOptions struct {
// Type of container identifier specified - "name", "dockerid", dockeralias"
IdType string `json:"type"`
// Number of stats to return
Count int `json:"count"`
// Whether to include stats for child subcontainers.
Recursive bool `json:"recursive"`
}
type ProcessInfo struct {
User string `json:"user"`
Pid int `json:"pid"`
Ppid int `json:"parent_pid"`
StartTime string `json:"start_time"`
PercentCpu float32 `json:"percent_cpu"`
PercentMemory float32 `json:"percent_mem"`
RSS uint64 `json:"rss"`
VirtualSize uint64 `json:"virtual_size"`
Status string `json:"status"`
RunningTime string `json:"running_time"`
CgroupPath string `json:"cgroup_path"`
Cmd string `json:"cmd"`
}
type TcpStat struct {
Established uint64
SynSent uint64
SynRecv uint64
FinWait1 uint64
FinWait2 uint64
TimeWait uint64
Close uint64
CloseWait uint64
LastAck uint64
Listen uint64
Closing uint64
}
type NetworkStats struct {
// Network stats by interface.
Interfaces []v1.InterfaceStats `json:"interfaces,omitempty"`
// TCP connection stats (Established, Listen...)
Tcp TcpStat `json:"tcp"`
// TCP6 connection stats (Established, Listen...)
Tcp6 TcpStat `json:"tcp6"`
}
// Instantaneous CPU stats
type CpuInstStats struct {
Usage CpuInstUsage `json:"usage"`
}
// CPU usage time statistics.
type CpuInstUsage struct {
// Total CPU usage.
// Units: nanocores per second
Total uint64 `json:"total"`
// Per CPU/core usage of the container.
// Unit: nanocores per second
PerCpu []uint64 `json:"per_cpu_usage,omitempty"`
// Time spent in user space.
// Unit: nanocores per second
User uint64 `json:"user"`
// Time spent in kernel space.
// Unit: nanocores per second
System uint64 `json:"system"`
}
// Filesystem usage statistics.
type FilesystemStats struct {
// Total Number of bytes consumed by container.
TotalUsageBytes *uint64 `json:"totalUsageBytes,omitempty"`
// Number of bytes consumed by a container through its root filesystem.
BaseUsageBytes *uint64 `json:"baseUsageBytes,omitempty"`
}
| Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.0007149972952902317,
0.0001896830217447132,
0.00016378455620724708,
0.00017170413048006594,
0.00009764082642504945
] |
{
"id": 5,
"code_window": [
"\n",
"\tconntracker := realConntracker{}\n",
"\n",
"\treturn NewProxyServer(client, config, iptInterface, proxier, eventBroadcaster, recorder, conntracker)\n",
"}\n",
"\n",
"// Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set).\n",
"func (s *ProxyServer) Run() error {\n",
"\t// remove iptables rules and exit\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn NewProxyServer(client, config, iptInterface, proxier, eventBroadcaster, recorder, conntracker, proxyMode)\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "replace",
"edit_start_line_idx": 250
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
api "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait"
)
const (
endpointHttpPort = 8080
endpointUdpPort = 8081
testContainerHttpPort = 8080
clusterHttpPort = 80
clusterUdpPort = 90
nodeHttpPort = 32080
nodeUdpPort = 32081
loadBalancerHttpPort = 100
netexecImageName = "gcr.io/google_containers/netexec:1.0"
testPodName = "test-container-pod"
hostTestPodName = "host-test-container-pod"
nodePortServiceName = "node-port-service"
loadBalancerServiceName = "load-balancer-service"
enableLoadBalancerTest = false
)
type KubeProxyTestConfig struct {
testContainerPod *api.Pod
hostTestContainerPod *api.Pod
endpointPods []*api.Pod
f *Framework
nodePortService *api.Service
loadBalancerService *api.Service
externalAddrs []string
nodes []api.Node
}
var _ = Describe("KubeProxy", func() {
f := NewFramework("e2e-kubeproxy")
config := &KubeProxyTestConfig{
f: f,
}
// Slow issue #14204 (10 min)
It("should test kube-proxy [Slow]", func() {
By("cleaning up any pre-existing namespaces used by this test")
config.cleanup()
By("Setting up for the tests")
config.setup()
//TODO Need to add hit externalIPs test
By("TODO: Need to add hit externalIPs test")
By("Hit Test with All Endpoints")
config.hitAll()
config.deleteNetProxyPod()
By("Hit Test with Fewer Endpoints")
config.hitAll()
By("Deleting nodePortservice and ensuring that service cannot be hit")
config.deleteNodePortService()
config.hitNodePort(0) // expect 0 endpoints to be hit
if enableLoadBalancerTest {
By("Deleting loadBalancerService and ensuring that service cannot be hit")
config.deleteLoadBalancerService()
config.hitLoadBalancer(0) // expect 0 endpoints to be hit
}
})
})
func (config *KubeProxyTestConfig) hitAll() {
By("Hitting endpoints from host and container")
config.hitEndpoints()
By("Hitting clusterIP from host and container")
config.hitClusterIP(len(config.endpointPods))
By("Hitting nodePort from host and container")
config.hitNodePort(len(config.endpointPods))
if enableLoadBalancerTest {
By("Waiting for LoadBalancer Ingress Setup")
config.waitForLoadBalancerIngressSetup()
By("Hitting LoadBalancer")
config.hitLoadBalancer(len(config.endpointPods))
}
}
func (config *KubeProxyTestConfig) hitLoadBalancer(epCount int) {
lbIP := config.loadBalancerService.Status.LoadBalancer.Ingress[0].IP
hostNames := make(map[string]bool)
tries := epCount*epCount + 5
for i := 0; i < tries; i++ {
transport := &http.Transport{}
httpClient := createHTTPClient(transport)
resp, err := httpClient.Get(fmt.Sprintf("http://%s:%d/hostName", lbIP, loadBalancerHttpPort))
if err == nil {
defer resp.Body.Close()
hostName, err := ioutil.ReadAll(resp.Body)
if err == nil {
hostNames[string(hostName)] = true
}
}
transport.CloseIdleConnections()
}
Expect(len(hostNames)).To(BeNumerically("==", epCount), "LoadBalancer did not hit all pods")
}
func createHTTPClient(transport *http.Transport) *http.Client {
client := &http.Client{
Transport: transport,
Timeout: 5 * time.Second,
}
return client
}
func (config *KubeProxyTestConfig) hitClusterIP(epCount int) {
clusterIP := config.nodePortService.Spec.ClusterIP
tries := epCount*epCount + 15 // if epCount == 0
By("dialing(udp) node1 --> clusterIP:clusterUdpPort")
config.dialFromNode("udp", clusterIP, clusterUdpPort, tries, epCount)
By("dialing(http) node1 --> clusterIP:clusterHttpPort")
config.dialFromNode("http", clusterIP, clusterHttpPort, tries, epCount)
By("dialing(udp) test container --> clusterIP:clusterUdpPort")
config.dialFromTestContainer("udp", clusterIP, clusterUdpPort, tries, epCount)
By("dialing(http) test container --> clusterIP:clusterHttpPort")
config.dialFromTestContainer("http", clusterIP, clusterHttpPort, tries, epCount)
By("dialing(udp) endpoint container --> clusterIP:clusterUdpPort")
config.dialFromEndpointContainer("udp", clusterIP, clusterUdpPort, tries, epCount)
By("dialing(http) endpoint container --> clusterIP:clusterHttpPort")
config.dialFromEndpointContainer("http", clusterIP, clusterHttpPort, tries, epCount)
}
func (config *KubeProxyTestConfig) hitNodePort(epCount int) {
node1_IP := config.externalAddrs[0]
tries := epCount*epCount + 15 // if epCount == 0
By("dialing(udp) node1 --> node1:nodeUdpPort")
config.dialFromNode("udp", node1_IP, nodeUdpPort, tries, epCount)
By("dialing(http) node1 --> node1:nodeHttpPort")
config.dialFromNode("http", node1_IP, nodeHttpPort, tries, epCount)
By("dialing(udp) test container --> node1:nodeUdpPort")
config.dialFromTestContainer("udp", node1_IP, nodeUdpPort, tries, epCount)
By("dialing(http) test container --> node1:nodeHttpPort")
config.dialFromTestContainer("http", node1_IP, nodeHttpPort, tries, epCount)
By("dialing(udp) endpoint container --> node1:nodeUdpPort")
config.dialFromEndpointContainer("udp", node1_IP, nodeUdpPort, tries, epCount)
By("dialing(http) endpoint container --> node1:nodeHttpPort")
config.dialFromEndpointContainer("http", node1_IP, nodeHttpPort, tries, epCount)
// TODO: doesn't work because masquerading is not done
By("TODO: Test disabled. dialing(udp) node --> 127.0.0.1:nodeUdpPort")
//config.dialFromNode("udp", "127.0.0.1", nodeUdpPort, tries, epCount)
// TODO: doesn't work because masquerading is not done
By("Test disabled. dialing(http) node --> 127.0.0.1:nodeHttpPort")
//config.dialFromNode("http", "127.0.0.1", nodeHttpPort, tries, epCount)
node2_IP := config.externalAddrs[1]
By("dialing(udp) node1 --> node2:nodeUdpPort")
config.dialFromNode("udp", node2_IP, nodeUdpPort, tries, epCount)
By("dialing(http) node1 --> node2:nodeHttpPort")
config.dialFromNode("http", node2_IP, nodeHttpPort, tries, epCount)
}
func (config *KubeProxyTestConfig) hitEndpoints() {
for _, endpointPod := range config.endpointPods {
Expect(len(endpointPod.Status.PodIP)).To(BeNumerically(">", 0), "podIP is empty:%s", endpointPod.Status.PodIP)
By("dialing(udp) endpointPodIP:endpointUdpPort from node1")
config.dialFromNode("udp", endpointPod.Status.PodIP, endpointUdpPort, 5, 1)
By("dialing(http) endpointPodIP:endpointHttpPort from node1")
config.dialFromNode("http", endpointPod.Status.PodIP, endpointHttpPort, 5, 1)
By("dialing(udp) endpointPodIP:endpointUdpPort from test container")
config.dialFromTestContainer("udp", endpointPod.Status.PodIP, endpointUdpPort, 5, 1)
By("dialing(http) endpointPodIP:endpointHttpPort from test container")
config.dialFromTestContainer("http", endpointPod.Status.PodIP, endpointHttpPort, 5, 1)
}
}
func (config *KubeProxyTestConfig) dialFromEndpointContainer(protocol, targetIP string, targetPort, tries, expectedCount int) {
config.dialFromContainer(protocol, config.endpointPods[0].Status.PodIP, targetIP, endpointHttpPort, targetPort, tries, expectedCount)
}
func (config *KubeProxyTestConfig) dialFromTestContainer(protocol, targetIP string, targetPort, tries, expectedCount int) {
config.dialFromContainer(protocol, config.testContainerPod.Status.PodIP, targetIP, testContainerHttpPort, targetPort, tries, expectedCount)
}
func (config *KubeProxyTestConfig) dialFromContainer(protocol, containerIP, targetIP string, containerHttpPort, targetPort, tries, expectedCount int) {
cmd := fmt.Sprintf("curl -q 'http://%s:%d/dial?request=hostName&protocol=%s&host=%s&port=%d&tries=%d'",
containerIP,
containerHttpPort,
protocol,
targetIP,
targetPort,
tries)
By(fmt.Sprintf("Dialing from container. Running command:%s", cmd))
stdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, cmd)
var output map[string][]string
err := json.Unmarshal([]byte(stdout), &output)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Could not unmarshal curl response: %s", stdout))
hostNamesMap := array2map(output["responses"])
Expect(len(hostNamesMap)).To(BeNumerically("==", expectedCount), fmt.Sprintf("Response was:%v", output))
}
func (config *KubeProxyTestConfig) dialFromNode(protocol, targetIP string, targetPort, tries, expectedCount int) {
var cmd string
if protocol == "udp" {
cmd = fmt.Sprintf("echo 'hostName' | timeout -t 3 nc -w 1 -u %s %d", targetIP, targetPort)
} else {
cmd = fmt.Sprintf("curl -s --connect-timeout 1 http://%s:%d/hostName", targetIP, targetPort)
}
forLoop := fmt.Sprintf("for i in $(seq 1 %d); do %s; echo; done | grep -v '^\\s*$' |sort | uniq -c | wc -l", tries, cmd)
By(fmt.Sprintf("Dialing from node. command:%s", forLoop))
stdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, forLoop)
Expect(strconv.Atoi(strings.TrimSpace(stdout))).To(BeNumerically("==", expectedCount))
}
func (config *KubeProxyTestConfig) createNetShellPodSpec(podName string, node string) *api.Pod {
pod := &api.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
},
ObjectMeta: api.ObjectMeta{
Name: podName,
Namespace: config.f.Namespace.Name,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "webserver",
Image: netexecImageName,
ImagePullPolicy: api.PullIfNotPresent,
Command: []string{
"/netexec",
fmt.Sprintf("--http-port=%d", endpointHttpPort),
fmt.Sprintf("--udp-port=%d", endpointUdpPort),
},
Ports: []api.ContainerPort{
{
Name: "http",
ContainerPort: endpointHttpPort,
},
{
Name: "udp",
ContainerPort: endpointUdpPort,
Protocol: api.ProtocolUDP,
},
},
},
},
NodeName: node,
},
}
return pod
}
func (config *KubeProxyTestConfig) createTestPodSpec() *api.Pod {
pod := &api.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
},
ObjectMeta: api.ObjectMeta{
Name: testPodName,
Namespace: config.f.Namespace.Name,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "webserver",
Image: netexecImageName,
ImagePullPolicy: api.PullIfNotPresent,
Command: []string{
"/netexec",
fmt.Sprintf("--http-port=%d", endpointHttpPort),
fmt.Sprintf("--udp-port=%d", endpointUdpPort),
},
Ports: []api.ContainerPort{
{
Name: "http",
ContainerPort: testContainerHttpPort,
},
},
},
},
},
}
return pod
}
func (config *KubeProxyTestConfig) createNodePortService(selector map[string]string) {
serviceSpec := &api.Service{
ObjectMeta: api.ObjectMeta{
Name: nodePortServiceName,
},
Spec: api.ServiceSpec{
Type: api.ServiceTypeNodePort,
Ports: []api.ServicePort{
{Port: clusterHttpPort, Name: "http", Protocol: api.ProtocolTCP, NodePort: nodeHttpPort, TargetPort: intstr.FromInt(endpointHttpPort)},
{Port: clusterUdpPort, Name: "udp", Protocol: api.ProtocolUDP, NodePort: nodeUdpPort, TargetPort: intstr.FromInt(endpointUdpPort)},
},
Selector: selector,
},
}
config.nodePortService = config.createService(serviceSpec)
}
func (config *KubeProxyTestConfig) deleteNodePortService() {
err := config.getServiceClient().Delete(config.nodePortService.Name)
Expect(err).NotTo(HaveOccurred(), "error while deleting NodePortService. err:%v)", err)
time.Sleep(15 * time.Second) // wait for kube-proxy to catch up with the service being deleted.
}
func (config *KubeProxyTestConfig) createLoadBalancerService(selector map[string]string) {
serviceSpec := &api.Service{
ObjectMeta: api.ObjectMeta{
Name: loadBalancerServiceName,
},
Spec: api.ServiceSpec{
Type: api.ServiceTypeLoadBalancer,
Ports: []api.ServicePort{
{Port: loadBalancerHttpPort, Name: "http", Protocol: "TCP", TargetPort: intstr.FromInt(endpointHttpPort)},
},
Selector: selector,
},
}
config.createService(serviceSpec)
}
func (config *KubeProxyTestConfig) deleteLoadBalancerService() {
go func() { config.getServiceClient().Delete(config.loadBalancerService.Name) }()
time.Sleep(15 * time.Second) // wait for kube-proxy to catch up with the service being deleted.
}
func (config *KubeProxyTestConfig) waitForLoadBalancerIngressSetup() {
err := wait.Poll(2*time.Second, 120*time.Second, func() (bool, error) {
service, err := config.getServiceClient().Get(loadBalancerServiceName)
if err != nil {
return false, err
} else {
if len(service.Status.LoadBalancer.Ingress) > 0 {
return true, nil
} else {
return false, fmt.Errorf("Service LoadBalancer Ingress was not setup.")
}
}
})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to setup Load Balancer Service. err:%v", err))
config.loadBalancerService, _ = config.getServiceClient().Get(loadBalancerServiceName)
}
func (config *KubeProxyTestConfig) createTestPods() {
testContainerPod := config.createTestPodSpec()
hostTestContainerPod := NewHostExecPodSpec(config.f.Namespace.Name, hostTestPodName)
config.createPod(testContainerPod)
config.createPod(hostTestContainerPod)
expectNoError(config.f.WaitForPodRunning(testContainerPod.Name))
expectNoError(config.f.WaitForPodRunning(hostTestContainerPod.Name))
var err error
config.testContainerPod, err = config.getPodClient().Get(testContainerPod.Name)
if err != nil {
Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err)
}
config.hostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name)
if err != nil {
Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err)
}
}
func (config *KubeProxyTestConfig) createService(serviceSpec *api.Service) *api.Service {
_, err := config.getServiceClient().Create(serviceSpec)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
err = waitForService(config.f.Client, config.f.Namespace.Name, serviceSpec.Name, true, 5*time.Second, 45*time.Second)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err))
createdService, err := config.getServiceClient().Get(serviceSpec.Name)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
return createdService
}
func (config *KubeProxyTestConfig) setup() {
By("creating a selector")
selectorName := "selector-" + string(util.NewUUID())
serviceSelector := map[string]string{
selectorName: "true",
}
By("Getting node addresses")
nodeList := ListSchedulableNodesOrDie(config.f.Client)
config.externalAddrs = NodeAddresses(nodeList, api.NodeExternalIP)
if len(config.externalAddrs) < 2 {
// fall back to legacy IPs
config.externalAddrs = NodeAddresses(nodeList, api.NodeLegacyHostIP)
}
Expect(len(config.externalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
config.nodes = nodeList.Items
if enableLoadBalancerTest {
By("Creating the LoadBalancer Service on top of the pods in kubernetes")
config.createLoadBalancerService(serviceSelector)
}
By("Creating the service pods in kubernetes")
podName := "netserver"
config.endpointPods = config.createNetProxyPods(podName, serviceSelector)
By("Creating the service on top of the pods in kubernetes")
config.createNodePortService(serviceSelector)
By("Creating test pods")
config.createTestPods()
}
func (config *KubeProxyTestConfig) cleanup() {
nsClient := config.getNamespacesClient()
nsList, err := nsClient.List(api.ListOptions{})
if err == nil {
for _, ns := range nsList.Items {
if strings.Contains(ns.Name, config.f.BaseName) && ns.Name != config.f.Namespace.Name {
nsClient.Delete(ns.Name)
}
}
}
}
func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
nodes := ListSchedulableNodesOrDie(config.f.Client)
// create pods, one for each node
createdPods := make([]*api.Pod, 0, len(nodes.Items))
for i, n := range nodes.Items {
podName := fmt.Sprintf("%s-%d", podName, i)
pod := config.createNetShellPodSpec(podName, n.Name)
pod.ObjectMeta.Labels = selector
createdPod := config.createPod(pod)
createdPods = append(createdPods, createdPod)
}
// wait that all of them are up
runningPods := make([]*api.Pod, 0, len(nodes.Items))
for _, p := range createdPods {
expectNoError(config.f.WaitForPodRunning(p.Name))
rp, err := config.getPodClient().Get(p.Name)
expectNoError(err)
runningPods = append(runningPods, rp)
}
return runningPods
}
func (config *KubeProxyTestConfig) deleteNetProxyPod() {
pod := config.endpointPods[0]
config.getPodClient().Delete(pod.Name, api.NewDeleteOptions(0))
config.endpointPods = config.endpointPods[1:]
// wait for pod being deleted.
err := waitForPodToDisappear(config.f.Client, config.f.Namespace.Name, pod.Name, labels.Everything(), time.Second, util.ForeverTestTimeout)
if err != nil {
Failf("Failed to delete %s pod: %v", pod.Name, err)
}
// wait for endpoint being removed.
err = waitForServiceEndpointsNum(config.f.Client, config.f.Namespace.Name, nodePortServiceName, len(config.endpointPods), time.Second, util.ForeverTestTimeout)
if err != nil {
Failf("Failed to remove endpoint from service: %s", nodePortServiceName)
}
// wait for kube-proxy to catch up with the pod being deleted.
time.Sleep(5 * time.Second)
}
func (config *KubeProxyTestConfig) createPod(pod *api.Pod) *api.Pod {
createdPod, err := config.getPodClient().Create(pod)
if err != nil {
Failf("Failed to create %s pod: %v", pod.Name, err)
}
return createdPod
}
func (config *KubeProxyTestConfig) getPodClient() client.PodInterface {
return config.f.Client.Pods(config.f.Namespace.Name)
}
func (config *KubeProxyTestConfig) getServiceClient() client.ServiceInterface {
return config.f.Client.Services(config.f.Namespace.Name)
}
func (config *KubeProxyTestConfig) getNamespacesClient() client.NamespaceInterface {
return config.f.Client.Namespaces()
}
func array2map(arr []string) map[string]bool {
retval := make(map[string]bool)
if len(arr) == 0 {
return retval
}
for _, str := range arr {
retval[str] = true
}
return retval
}
| test/e2e/kubeproxy.go | 1 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.0008668560185469687,
0.0002598065184429288,
0.00016291573410853744,
0.0001746473863022402,
0.00016243084974121302
] |
{
"id": 5,
"code_window": [
"\n",
"\tconntracker := realConntracker{}\n",
"\n",
"\treturn NewProxyServer(client, config, iptInterface, proxier, eventBroadcaster, recorder, conntracker)\n",
"}\n",
"\n",
"// Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set).\n",
"func (s *ProxyServer) Run() error {\n",
"\t// remove iptables rules and exit\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn NewProxyServer(client, config, iptInterface, proxier, eventBroadcaster, recorder, conntracker, proxyMode)\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "replace",
"edit_start_line_idx": 250
} | package timeutils
import (
"strconv"
"strings"
"time"
)
// GetTimestamp tries to parse given string as golang duration,
// then RFC3339 time and finally as a Unix timestamp. If
// any of these were successful, it returns a Unix timestamp
// as string otherwise returns the given value back.
// In case of duration input, the returned timestamp is computed
// as the given reference time minus the amount of the duration.
func GetTimestamp(value string, reference time.Time) string {
if d, err := time.ParseDuration(value); value != "0" && err == nil {
return strconv.FormatInt(reference.Add(-d).Unix(), 10)
}
var format string
if strings.Contains(value, ".") {
format = time.RFC3339Nano
} else {
format = time.RFC3339
}
loc := time.FixedZone(time.Now().Zone())
if len(value) < len(format) {
format = format[:len(value)]
}
t, err := time.ParseInLocation(format, value, loc)
if err != nil {
return value
}
return strconv.FormatInt(t.Unix(), 10)
}
| Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/utils.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00017561408458277583,
0.00017191353254020214,
0.0001675327366683632,
0.00017225368355866522,
0.0000029741390790150035
] |
{
"id": 5,
"code_window": [
"\n",
"\tconntracker := realConntracker{}\n",
"\n",
"\treturn NewProxyServer(client, config, iptInterface, proxier, eventBroadcaster, recorder, conntracker)\n",
"}\n",
"\n",
"// Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set).\n",
"func (s *ProxyServer) Run() error {\n",
"\t// remove iptables rules and exit\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn NewProxyServer(client, config, iptInterface, proxier, eventBroadcaster, recorder, conntracker, proxyMode)\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "replace",
"edit_start_line_idx": 250
} | FROM google/golang:latest
ADD . /gopath/src/github.com/GoogleCloudPlatform/kubernetes/examples/guestbook-go/_src
WORKDIR /gopath/src/github.com/GoogleCloudPlatform/kubernetes/examples/guestbook-go/
RUN cd _src/ && go get && go build -o ../bin/guestbook
RUN cp _src/guestbook/Dockerfile .
CMD tar cvzf - .
| examples/guestbook-go/_src/Dockerfile | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00017217628192156553,
0.00017217628192156553,
0.00017217628192156553,
0.00017217628192156553,
0
] |
{
"id": 5,
"code_window": [
"\n",
"\tconntracker := realConntracker{}\n",
"\n",
"\treturn NewProxyServer(client, config, iptInterface, proxier, eventBroadcaster, recorder, conntracker)\n",
"}\n",
"\n",
"// Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set).\n",
"func (s *ProxyServer) Run() error {\n",
"\t// remove iptables rules and exit\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn NewProxyServer(client, config, iptInterface, proxier, eventBroadcaster, recorder, conntracker, proxyMode)\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "replace",
"edit_start_line_idx": 250
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// apiserver is the main api server and master for the cluster.
// it is responsible for serving the cluster management API.
package main
import (
"fmt"
"math/rand"
"os"
"runtime"
"time"
"k8s.io/kubernetes/cmd/kube-apiserver/app"
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/version/verflag"
"github.com/spf13/pflag"
)
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
rand.Seed(time.Now().UTC().UnixNano())
s := options.NewAPIServer()
s.AddFlags(pflag.CommandLine)
util.InitFlags()
util.InitLogs()
defer util.FlushLogs()
verflag.PrintAndExitIfRequested()
if err := app.Run(s); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
}
| cmd/kube-apiserver/apiserver.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.928232729434967,
0.15523189306259155,
0.0001707475312286988,
0.0001786357897799462,
0.3456974923610687
] |
{
"id": 6,
"code_window": [
"\n",
"\ts.Broadcaster.StartRecordingToSink(s.Client.Events(\"\"))\n",
"\n",
"\t// Start up Healthz service if requested\n",
"\tif s.Config.HealthzPort > 0 {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t// Start up a webserver if requested\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "replace",
"edit_start_line_idx": 267
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to configure and run a
// Kubernetes app process.
package app
import (
"errors"
"net"
"net/http"
_ "net/http/pprof"
"strconv"
"time"
"k8s.io/kubernetes/cmd/kube-proxy/app/options"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/record"
kubeclient "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/proxy"
proxyconfig "k8s.io/kubernetes/pkg/proxy/config"
"k8s.io/kubernetes/pkg/proxy/iptables"
"k8s.io/kubernetes/pkg/proxy/userspace"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
utildbus "k8s.io/kubernetes/pkg/util/dbus"
"k8s.io/kubernetes/pkg/util/exec"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilnet "k8s.io/kubernetes/pkg/util/net"
nodeutil "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/util/oom"
"github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
type ProxyServer struct {
Client *kubeclient.Client
Config *options.ProxyServerConfig
IptInterface utiliptables.Interface
Proxier proxy.ProxyProvider
Broadcaster record.EventBroadcaster
Recorder record.EventRecorder
Conntracker Conntracker // if nil, ignored
}
const (
proxyModeUserspace = "userspace"
proxyModeIptables = "iptables"
experimentalProxyModeAnnotation = options.ExperimentalProxyModeAnnotation
betaProxyModeAnnotation = "net.beta.kubernetes.io/proxy-mode"
)
func checkKnownProxyMode(proxyMode string) bool {
switch proxyMode {
case "", proxyModeUserspace, proxyModeIptables:
return true
}
return false
}
func NewProxyServer(
client *kubeclient.Client,
config *options.ProxyServerConfig,
iptInterface utiliptables.Interface,
proxier proxy.ProxyProvider,
broadcaster record.EventBroadcaster,
recorder record.EventRecorder,
conntracker Conntracker,
) (*ProxyServer, error) {
return &ProxyServer{
Client: client,
Config: config,
IptInterface: iptInterface,
Proxier: proxier,
Broadcaster: broadcaster,
Recorder: recorder,
Conntracker: conntracker,
}, nil
}
// NewProxyCommand creates a *cobra.Command object with default parameters
func NewProxyCommand() *cobra.Command {
s := options.NewProxyConfig()
s.AddFlags(pflag.CommandLine)
cmd := &cobra.Command{
Use: "kube-proxy",
Long: `The Kubernetes network proxy runs on each node. This
reflects services as defined in the Kubernetes API on each node and can do simple
TCP,UDP stream forwarding or round robin TCP,UDP forwarding across a set of backends.
Service cluster ips and ports are currently found through Docker-links-compatible
environment variables specifying ports opened by the service proxy. There is an optional
addon that provides cluster DNS for these cluster IPs. The user must create a service
with the apiserver API to configure the proxy.`,
Run: func(cmd *cobra.Command, args []string) {
},
}
return cmd
}
// NewProxyServerDefault creates a new ProxyServer object with default parameters.
func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, error) {
protocol := utiliptables.ProtocolIpv4
if net.ParseIP(config.BindAddress).To4() == nil {
protocol = utiliptables.ProtocolIpv6
}
// Create a iptables utils.
execer := exec.New()
dbus := utildbus.New()
iptInterface := utiliptables.New(execer, dbus, protocol)
// We omit creation of pretty much everything if we run in cleanup mode
if config.CleanupAndExit {
return &ProxyServer{
Config: config,
IptInterface: iptInterface,
}, nil
}
// TODO(vmarmol): Use container config for this.
var oomAdjuster *oom.OOMAdjuster
if config.OOMScoreAdj != nil {
oomAdjuster = oom.NewOOMAdjuster()
if err := oomAdjuster.ApplyOOMScoreAdj(0, *config.OOMScoreAdj); err != nil {
glog.V(2).Info(err)
}
}
if config.ResourceContainer != "" {
// Run in its own container.
if err := util.RunInResourceContainer(config.ResourceContainer); err != nil {
glog.Warningf("Failed to start in resource-only container %q: %v", config.ResourceContainer, err)
} else {
glog.V(2).Infof("Running in resource-only container %q", config.ResourceContainer)
}
}
// Create a Kube Client
// define api config source
if config.Kubeconfig == "" && config.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: config.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: config.Master}}).ClientConfig()
if err != nil {
return nil, err
}
// Override kubeconfig qps/burst settings from flags
kubeconfig.QPS = config.KubeAPIQPS
kubeconfig.Burst = config.KubeAPIBurst
client, err := kubeclient.New(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
// Create event recorder
hostname := nodeutil.GetHostname(config.HostnameOverride)
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: hostname})
var proxier proxy.ProxyProvider
var endpointsHandler proxyconfig.EndpointsConfigHandler
proxyMode := getProxyMode(string(config.Mode), client.Nodes(), hostname, iptInterface)
if proxyMode == proxyModeIptables {
glog.V(2).Info("Using iptables Proxier.")
proxierIptables, err := iptables.NewProxier(iptInterface, execer, config.IPTablesSyncPeriod.Duration, config.MasqueradeAll)
if err != nil {
glog.Fatalf("Unable to create proxier: %v", err)
}
proxier = proxierIptables
endpointsHandler = proxierIptables
// No turning back. Remove artifacts that might still exist from the userspace Proxier.
glog.V(2).Info("Tearing down userspace rules. Errors here are acceptable.")
userspace.CleanupLeftovers(iptInterface)
} else {
glog.V(2).Info("Using userspace Proxier.")
// This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for
// our config.EndpointsConfigHandler.
loadBalancer := userspace.NewLoadBalancerRR()
// set EndpointsConfigHandler to our loadBalancer
endpointsHandler = loadBalancer
proxierUserspace, err := userspace.NewProxier(
loadBalancer,
net.ParseIP(config.BindAddress),
iptInterface,
*utilnet.ParsePortRangeOrDie(config.PortRange),
config.IPTablesSyncPeriod.Duration,
config.UDPIdleTimeout.Duration,
)
if err != nil {
glog.Fatalf("Unable to create proxier: %v", err)
}
proxier = proxierUserspace
// Remove artifacts from the pure-iptables Proxier.
glog.V(2).Info("Tearing down pure-iptables proxy rules. Errors here are acceptable.")
iptables.CleanupLeftovers(iptInterface)
}
iptInterface.AddReloadFunc(proxier.Sync)
// Create configs (i.e. Watches for Services and Endpoints)
// Note: RegisterHandler() calls need to happen before creation of Sources because sources
// only notify on changes, and the initial update (on process start) may be lost if no handlers
// are registered yet.
serviceConfig := proxyconfig.NewServiceConfig()
serviceConfig.RegisterHandler(proxier)
endpointsConfig := proxyconfig.NewEndpointsConfig()
endpointsConfig.RegisterHandler(endpointsHandler)
proxyconfig.NewSourceAPI(
client,
config.ConfigSyncPeriod,
serviceConfig.Channel("api"),
endpointsConfig.Channel("api"),
)
config.NodeRef = &api.ObjectReference{
Kind: "Node",
Name: hostname,
UID: types.UID(hostname),
Namespace: "",
}
conntracker := realConntracker{}
return NewProxyServer(client, config, iptInterface, proxier, eventBroadcaster, recorder, conntracker)
}
// Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set).
func (s *ProxyServer) Run() error {
// remove iptables rules and exit
if s.Config.CleanupAndExit {
encounteredError := userspace.CleanupLeftovers(s.IptInterface)
encounteredError = iptables.CleanupLeftovers(s.IptInterface) || encounteredError
if encounteredError {
return errors.New("Encountered an error while tearing down rules.")
}
return nil
}
s.Broadcaster.StartRecordingToSink(s.Client.Events(""))
// Start up Healthz service if requested
if s.Config.HealthzPort > 0 {
go util.Until(func() {
err := http.ListenAndServe(s.Config.HealthzBindAddress+":"+strconv.Itoa(s.Config.HealthzPort), nil)
if err != nil {
glog.Errorf("Starting health server failed: %v", err)
}
}, 5*time.Second, util.NeverStop)
}
// Tune conntrack, if requested
if s.Conntracker != nil {
if s.Config.ConntrackMax > 0 {
if err := s.Conntracker.SetMax(s.Config.ConntrackMax); err != nil {
return err
}
}
if s.Config.ConntrackTCPEstablishedTimeout.Duration > 0 {
if err := s.Conntracker.SetTCPEstablishedTimeout(int(s.Config.ConntrackTCPEstablishedTimeout.Duration / time.Second)); err != nil {
return err
}
}
}
// Birth Cry after the birth is successful
s.birthCry()
// Just loop forever for now...
s.Proxier.SyncLoop()
return nil
}
type nodeGetter interface {
Get(hostname string) (*api.Node, error)
}
func getProxyMode(proxyMode string, client nodeGetter, hostname string, iptver iptables.IptablesVersioner) string {
if proxyMode == proxyModeUserspace {
return proxyModeUserspace
} else if proxyMode == proxyModeIptables {
return tryIptablesProxy(iptver)
} else if proxyMode != "" {
glog.V(1).Infof("Flag proxy-mode=%q unknown, assuming iptables proxy", proxyMode)
return tryIptablesProxy(iptver)
}
// proxyMode == "" - choose the best option.
if client == nil {
glog.Errorf("nodeGetter is nil: assuming iptables proxy")
return tryIptablesProxy(iptver)
}
node, err := client.Get(hostname)
if err != nil {
glog.Errorf("Can't get Node %q, assuming iptables proxy: %v", hostname, err)
return tryIptablesProxy(iptver)
}
if node == nil {
glog.Errorf("Got nil Node %q, assuming iptables proxy: %v", hostname)
return tryIptablesProxy(iptver)
}
proxyMode, found := node.Annotations[betaProxyModeAnnotation]
if found {
glog.V(1).Infof("Found beta annotation %q = %q", betaProxyModeAnnotation, proxyMode)
} else {
// We already published some information about this annotation with the "experimental" name, so we will respect it.
proxyMode, found = node.Annotations[experimentalProxyModeAnnotation]
if found {
glog.V(1).Infof("Found experimental annotation %q = %q", experimentalProxyModeAnnotation, proxyMode)
}
}
if proxyMode == proxyModeUserspace {
glog.V(1).Infof("Annotation demands userspace proxy")
return proxyModeUserspace
}
return tryIptablesProxy(iptver)
}
func tryIptablesProxy(iptver iptables.IptablesVersioner) string {
var err error
// guaranteed false on error, error only necessary for debugging
useIptablesProxy, err := iptables.CanUseIptablesProxier(iptver)
if err != nil {
glog.Errorf("Can't determine whether to use iptables proxy, using userspace proxier: %v", err)
return proxyModeUserspace
}
if useIptablesProxy {
return proxyModeIptables
}
// Fallback.
glog.V(1).Infof("Can't use iptables proxy, using userspace proxier: %v", err)
return proxyModeUserspace
}
func (s *ProxyServer) birthCry() {
s.Recorder.Eventf(s.Config.NodeRef, api.EventTypeNormal, "Starting", "Starting kube-proxy.")
}
| cmd/kube-proxy/app/server.go | 1 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.6042001247406006,
0.017588797956705093,
0.0001637928216950968,
0.00016922428039833903,
0.0979238748550415
] |
{
"id": 6,
"code_window": [
"\n",
"\ts.Broadcaster.StartRecordingToSink(s.Client.Events(\"\"))\n",
"\n",
"\t// Start up Healthz service if requested\n",
"\tif s.Config.HealthzPort > 0 {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t// Start up a webserver if requested\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "replace",
"edit_start_line_idx": 267
} | package stackevents
import (
"github.com/rackspace/gophercloud"
os "github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents"
"github.com/rackspace/gophercloud/pagination"
)
// Find retreives stack events for the given stack name.
func Find(c *gophercloud.ServiceClient, stackName string) os.FindResult {
return os.Find(c, stackName)
}
// List makes a request against the API to list resources for the given stack.
func List(c *gophercloud.ServiceClient, stackName, stackID string, opts os.ListOptsBuilder) pagination.Pager {
return os.List(c, stackName, stackID, opts)
}
// ListResourceEvents makes a request against the API to list resources for the given stack.
func ListResourceEvents(c *gophercloud.ServiceClient, stackName, stackID, resourceName string, opts os.ListResourceEventsOptsBuilder) pagination.Pager {
return os.ListResourceEvents(c, stackName, stackID, resourceName, opts)
}
// Get retreives data for the given stack resource.
func Get(c *gophercloud.ServiceClient, stackName, stackID, resourceName, eventID string) os.GetResult {
return os.Get(c, stackName, stackID, resourceName, eventID)
}
| Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/orchestration/v1/stackevents/delegate.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.0001937684864969924,
0.00017460672825109214,
0.00016310194041579962,
0.00016694972873665392,
0.000013640170436701737
] |
{
"id": 6,
"code_window": [
"\n",
"\ts.Broadcaster.StartRecordingToSink(s.Client.Events(\"\"))\n",
"\n",
"\t// Start up Healthz service if requested\n",
"\tif s.Config.HealthzPort > 0 {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t// Start up a webserver if requested\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "replace",
"edit_start_line_idx": 267
} | <!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
<!-- BEGIN STRIP_FOR_RELEASE -->
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
If you are using a released version of Kubernetes, you should
refer to the docs that go with that version.
<!-- TAG RELEASE_LINK, added by the munger automatically -->
<strong>
The latest release of this document can be found
[here](http://releases.k8s.io/release-1.1/docs/proposals/apiserver-watch.md).
Documentation for other releases can be found at
[releases.k8s.io](http://releases.k8s.io).
</strong>
--
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## Abstract
In the current system, most watch requests sent to apiserver are redirected to
etcd. This means that for every watch request the apiserver opens a watch on
etcd.
The purpose of the proposal is to improve the overall performance of the system
by solving the following problems:
- having too many open watches on etcd
- avoiding deserializing/converting the same objects multiple times in different
watch results
In the future, we would also like to add an indexing mechanism to the watch.
Although Indexer is not part of this proposal, it is supposed to be compatible
with it - in the future Indexer should be incorporated into the proposed new
watch solution in apiserver without requiring any redesign.
## High level design
We are going to solve those problems by allowing many clients to watch the same
storage in the apiserver, without being redirected to etcd.
At the high level, apiserver will have a single watch open to etcd, watching all
the objects (of a given type) without any filtering. The changes delivered from
etcd will then be stored in a cache in apiserver. This cache is in fact a
"rolling history window" that will support clients having some amount of latency
between their list and watch calls. Thus it will have a limited capacity and
whenever a new change comes from etcd when a cache is full, the oldest change
will be remove to make place for the new one.
When a client sends a watch request to apiserver, instead of redirecting it to
etcd, it will cause:
- registering a handler to receive all new changes coming from etcd
- iterating though a watch window, starting at the requested resourceVersion
to the head and sending filtered changes directory to the client, blocking
the above until this iteration has caught up
This will be done be creating a go-routine per watcher that will be responsible
for performing the above.
The following section describes the proposal in more details, analyzes some
corner cases and divides the whole design in more fine-grained steps.
## Proposal details
We would like the cache to be __per-resource-type__ and __optional__. Thanks to
it we will be able to:
- have different cache sizes for different resources (e.g. bigger cache
[= longer history] for pods, which can significantly affect performance)
- avoid any overhead for objects that are watched very rarely (e.g. events
are almost not watched at all, but there are a lot of them)
- filter the cache for each watcher more effectively
If we decide to support watches spanning different resources in the future and
we have an efficient indexing mechanisms, it should be relatively simple to unify
the cache to be common for all the resources.
The rest of this section describes the concrete steps that need to be done
to implement the proposal.
1. Since we want the watch in apiserver to be optional for different resource
types, this needs to be self-contained and hidden behind a well defined API.
This should be a layer very close to etcd - in particular all registries:
"pkg/registry/generic/etcd" should be built on top of it.
We will solve it by turning tools.EtcdHelper by extracting its interface
and treating this interface as this API - the whole watch mechanisms in
apiserver will be hidden behind that interface.
Thanks to it we will get an initial implementation for free and we will just
need to reimplement few relevant functions (probably just Watch and List).
Mover, this will not require any changes in other parts of the code.
This step is about extracting the interface of tools.EtcdHelper.
2. Create a FIFO cache with a given capacity. In its "rolling history window"
we will store two things:
- the resourceVersion of the object (being an etcdIndex)
- the object watched from etcd itself (in a deserialized form)
This should be as simple as having an array an treating it as a cyclic buffer.
Obviously resourceVersion of objects watched from etcd will be increasing, but
they are necessary for registering a new watcher that is interested in all the
changes since a given etcdIndex.
Additionally, we should support LIST operation, otherwise clients can never
start watching at now. We may consider passing lists through etcd, however
this will not work once we have Indexer, so we will need that information
in memory anyway.
Thus, we should support LIST operation from the "end of the history" - i.e.
from the moment just after the newest cached watched event. It should be
pretty simple to do, because we can incrementally update this list whenever
the new watch event is watched from etcd.
We may consider reusing existing structures cache.Store or cache.Indexer
("pkg/client/cache") but this is not a hard requirement.
3. Create the new implementation of the API, that will internally have a
single watch open to etcd and will store the data received from etcd in
the FIFO cache - this includes implementing registration of a new watcher
which will start a new go-routine responsible for iterating over the cache
and sending all the objects watcher is interested in (by applying filtering
function) to the watcher.
4. Add a support for processing "error too old" from etcd, which will require:
- disconnect all the watchers
- clear the internal cache and relist all objects from etcd
- start accepting watchers again
5. Enable watch in apiserver for some of the existing resource types - this
should require only changes at the initialization level.
6. The next step will be to incorporate some indexing mechanism, but details
of it are TBD.
### Future optimizations:
1. The implementation of watch in apiserver internally will open a single
watch to etcd, responsible for watching all the changes of objects of a given
resource type. However, this watch can potentially expire at any time and
reconnecting can return "too old resource version". In that case relisting is
necessary. In such case, to avoid LIST requests coming from all watchers at
the same time, we can introduce an additional etcd event type:
[EtcdResync](../../pkg/storage/etcd/etcd_watcher.go#L36)
Whenever relisting will be done to refresh the internal watch to etcd,
EtcdResync event will be send to all the watchers. It will contain the
full list of all the objects the watcher is interested in (appropriately
filtered) as the parameter of this watch event.
Thus, we need to create the EtcdResync event, extend watch.Interface and
its implementations to support it and handle those events appropriately
in places like
[Reflector](../../pkg/client/cache/reflector.go)
However, this might turn out to be unnecessary optimization if apiserver
will always keep up (which is possible in the new design). We will work
out all necessary details at that point.
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[]()
<!-- END MUNGE: GENERATED_ANALYTICS -->
| docs/proposals/apiserver-watch.md | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00017452651809435338,
0.00016779011639300734,
0.0001628866739338264,
0.00016848562518134713,
0.0000028960071176697966
] |
{
"id": 6,
"code_window": [
"\n",
"\ts.Broadcaster.StartRecordingToSink(s.Client.Events(\"\"))\n",
"\n",
"\t// Start up Healthz service if requested\n",
"\tif s.Config.HealthzPort > 0 {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t// Start up a webserver if requested\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "replace",
"edit_start_line_idx": 267
} | __434 | Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-283 | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00017600298451725394,
0.00017600298451725394,
0.00017600298451725394,
0.00017600298451725394,
0
] |
{
"id": 7,
"code_window": [
"\tif s.Config.HealthzPort > 0 {\n",
"\t\tgo util.Until(func() {\n",
"\t\t\terr := http.ListenAndServe(s.Config.HealthzBindAddress+\":\"+strconv.Itoa(s.Config.HealthzPort), nil)\n",
"\t\t\tif err != nil {\n",
"\t\t\t\tglog.Errorf(\"Starting health server failed: %v\", err)\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\thttp.HandleFunc(\"/proxyMode\", func(w http.ResponseWriter, r *http.Request) {\n",
"\t\t\tfmt.Fprintf(w, \"%s\", s.ProxyMode)\n",
"\t\t})\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 269
} | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package componentconfig
import "k8s.io/kubernetes/pkg/api/unversioned"
type KubeProxyConfiguration struct {
unversioned.TypeMeta
// bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0
// for all interfaces)
BindAddress string `json:"bindAddress"`
// healthzBindAddress is the IP address for the health check server to serve on,
// defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)
HealthzBindAddress string `json:"healthzBindAddress"`
// healthzPort is the port to bind the health check server. Use 0 to disable.
HealthzPort int `json:"healthzPort"`
// hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname.
HostnameOverride string `json:"hostnameOverride"`
// iptablesSyncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m',
// '2h22m'). Must be greater than 0.
IPTablesSyncPeriod unversioned.Duration `json:"iptablesSyncPeriodSeconds"`
// kubeconfigPath is the path to the kubeconfig file with authorization information (the
// master location is set by the master flag).
KubeconfigPath string `json:"kubeconfigPath"`
// masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode.
MasqueradeAll bool `json:"masqueradeAll"`
// master is the address of the Kubernetes API server (overrides any value in kubeconfig)
Master string `json:"master"`
// oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within
// the range [-1000, 1000]
OOMScoreAdj *int `json:"oomScoreAdj"`
// mode specifies which proxy mode to use.
Mode ProxyMode `json:"mode"`
// portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
// in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
PortRange string `json:"portRange"`
// resourceContainer is the bsolute name of the resource-only container to create and run
// the Kube-proxy in (Default: /kube-proxy).
ResourceContainer string `json:"resourceContainer"`
// udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
// Must be greater than 0. Only applicable for proxyMode=userspace.
UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"`
// conntrackMax is the maximum number of NAT connections to track (0 to leave as-is)")
ConntrackMax int `json:"conntrackMax"`
// conntrackTCPEstablishedTimeout is how long an idle UDP connection will be kept open
// (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxyMode is Userspace
ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"`
}
// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables'
// (experimental). If blank, look at the Node object on the Kubernetes API and respect the
// 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the
// best-available proxy (currently userspace, but may change in future versions). If the
// iptables proxy is selected, regardless of how, but the system's kernel or iptables
// versions are insufficient, this always falls back to the userspace proxy.
type ProxyMode string
const (
ProxyModeUserspace ProxyMode = "userspace"
ProxyModeIPTables ProxyMode = "iptables"
)
// TODO: curate the ordering and structure of this config object
type KubeletConfiguration struct {
// config is the path to the config file or directory of files
Config string `json:"config"`
// syncFrequency is the max period between synchronizing running
// containers and config
SyncFrequency unversioned.Duration `json:"syncFrequency"`
// fileCheckFrequency is the duration between checking config files for
// new data
FileCheckFrequency unversioned.Duration `json:"fileCheckFrequency"`
// httpCheckFrequency is the duration between checking http for new data
HTTPCheckFrequency unversioned.Duration `json:"httpCheckFrequency"`
// manifestURL is the URL for accessing the container manifest
ManifestURL string `json:"manifestURL"`
// manifestURLHeader is the HTTP header to use when accessing the manifest
// URL, with the key separated from the value with a ':', as in 'key:value'
ManifestURLHeader string `json:"manifestURLHeader"`
// enableServer enables the Kubelet's server
EnableServer bool `json:"enableServer"`
// address is the IP address for the Kubelet to serve on (set to 0.0.0.0
// for all interfaces)
Address string `json:"address"`
// port is the port for the Kubelet to serve on.
Port uint `json:"port"`
// readOnlyPort is the read-only port for the Kubelet to serve on with
// no authentication/authorization (set to 0 to disable)
ReadOnlyPort uint `json:"readOnlyPort"`
// tLSCertFile is the file containing x509 Certificate for HTTPS. (CA cert,
// if any, concatenated after server cert). If tlsCertFile and
// tlsPrivateKeyFile are not provided, a self-signed certificate
// and key are generated for the public address and saved to the directory
// passed to certDir.
TLSCertFile string `json:"tLSCertFile"`
// tLSPrivateKeyFile is the ile containing x509 private key matching
// tlsCertFile.
TLSPrivateKeyFile string `json:"tLSPrivateKeyFile"`
// certDirectory is the directory where the TLS certs are located (by
// default /var/run/kubernetes). If tlsCertFile and tlsPrivateKeyFile
// are provided, this flag will be ignored.
CertDirectory string `json:"certDirectory"`
// hostnameOverride is the hostname used to identify the kubelet instead
// of the actual hostname.
HostnameOverride string `json:"hostnameOverride"`
// podInfraContainerImage is the image whose network/ipc namespaces
// containers in each pod will use.
PodInfraContainerImage string `json:"podInfraContainerImage"`
// dockerEndpoint is the path to the docker endpoint to communicate with.
DockerEndpoint string `json:"dockerEndpoint"`
// rootDirectory is the directory path to place kubelet files (volume
// mounts,etc).
RootDirectory string `json:"rootDirectory"`
// allowPrivileged enables containers to request privileged mode.
// Defaults to false.
AllowPrivileged bool `json:"allowPrivileged"`
// hostNetworkSources is a comma-separated list of sources from which the
// Kubelet allows pods to use of host network. Defaults to "*".
HostNetworkSources string `json:"hostNetworkSources"`
// hostPIDSources is a comma-separated list of sources from which the
// Kubelet allows pods to use the host pid namespace. Defaults to "*".
HostPIDSources string `json:"hostPIDSources"`
// hostIPCSources is a comma-separated list of sources from which the
// Kubelet allows pods to use the host ipc namespace. Defaults to "*".
HostIPCSources string `json:"hostIPCSources"`
// registryPullQPS is the limit of registry pulls per second. If 0,
// unlimited. Set to 0 for no limit. Defaults to 5.0.
RegistryPullQPS float64 `json:"registryPullQPS"`
// registryBurst is the maximum size of a bursty pulls, temporarily allows
// pulls to burst to this number, while still not exceeding registryQps.
// Only used if registryQps > 0.
RegistryBurst int `json:"registryBurst"`
// eventRecordQPS is the maximum event creations per second. If 0, there
// is no limit enforced.
EventRecordQPS float32 `json:"eventRecordQPS"`
// eventBurst is the maximum size of a bursty event records, temporarily
// allows event records to burst to this number, while still not exceeding
// event-qps. Only used if eventQps > 0
EventBurst int `json:"eventBurst"`
// enableDebuggingHandlers enables server endpoints for log collection
// and local running of containers and commands
EnableDebuggingHandlers bool `json:"enableDebuggingHandlers"`
// minimumGCAge is the minimum age for a finished container before it is
// garbage collected.
MinimumGCAge unversioned.Duration `json:"minimumGCAge"`
// maxPerPodContainerCount is the maximum number of old instances to
// retain per container. Each container takes up some disk space.
MaxPerPodContainerCount int `json:"maxPerPodContainerCount"`
// maxContainerCount is the maximum number of old instances of containers
// to retain globally. Each container takes up some disk space.
MaxContainerCount int `json:"maxContainerCount"`
// cAdvisorPort is the port of the localhost cAdvisor endpoint
CAdvisorPort uint `json:"cAdvisorPort"`
// healthzPort is the port of the localhost healthz endpoint
HealthzPort int `json:"healthzPort"`
// healthzBindAddress is the IP address for the healthz server to serve
// on.
HealthzBindAddress string `json:"healthzBindAddress"`
// oomScoreAdj is The oom-score-adj value for kubelet process. Values
// must be within the range [-1000, 1000].
OOMScoreAdj int `json:"oomScoreAdj"`
// registerNode enables automatic registration with the apiserver.
RegisterNode bool `json:"registerNode"`
// clusterDomain is the DNS domain for this cluster. If set, kubelet will
// configure all containers to search this domain in addition to the
// host's search domains.
ClusterDomain string `json:"clusterDomain"`
// masterServiceNamespace is The namespace from which the kubernetes
// master services should be injected into pods.
MasterServiceNamespace string `json:"masterServiceNamespace"`
// clusterDNS is the IP address for a cluster DNS server. If set, kubelet
// will configure all containers to use this for DNS resolution in
// addition to the host's DNS servers
ClusterDNS string `json:"clusterDNS"`
// streamingConnectionIdleTimeout is the maximum time a streaming connection
// can be idle before the connection is automatically closed.
StreamingConnectionIdleTimeout unversioned.Duration `json:"streamingConnectionIdleTimeout"`
// nodeStatusUpdateFrequency is the frequency that kubelet posts node
// status to master. Note: be cautious when changing the constant, it
// must work with nodeMonitorGracePeriod in nodecontroller.
NodeStatusUpdateFrequency unversioned.Duration `json:"nodeStatusUpdateFrequency"`
// imageGCHighThresholdPercent is the percent of disk usage after which
// image garbage collection is always run.
ImageGCHighThresholdPercent int `json:"imageGCHighThresholdPercent"`
// imageGCLowThresholdPercent is the percent of disk usage before which
// image garbage collection is never run. Lowest disk usage to garbage
// collect to.
ImageGCLowThresholdPercent int `json:"imageGCLowThresholdPercent"`
// lowDiskSpaceThresholdMB is the absolute free disk space, in MB, to
// maintain. When disk space falls below this threshold, new pods would
// be rejected.
LowDiskSpaceThresholdMB int `json:"lowDiskSpaceThresholdMB"`
// networkPluginName is the name of the network plugin to be invoked for
// various events in kubelet/pod lifecycle
NetworkPluginName string `json:"networkPluginName"`
// networkPluginDir is the full path of the directory in which to search
// for network plugins
NetworkPluginDir string `json:"networkPluginDir"`
// volumePluginDir is the full path of the directory in which to search
// for additional third party volume plugins
VolumePluginDir string `json:"volumePluginDir"`
// cloudProvider is the provider for cloud services.
CloudProvider string `json:"cloudProvider,omitempty"`
// cloudConfigFile is the path to the cloud provider configuration file.
CloudConfigFile string `json:"cloudConfigFile,omitempty"`
// resourceContainer is the absolute name of the resource-only container
// to create and run the Kubelet in.
ResourceContainer string `json:"resourceContainer,omitempty"`
// cgroupRoot is the root cgroup to use for pods. This is handled by the
// container runtime on a best effort basis.
CgroupRoot string `json:"cgroupRoot,omitempty"`
// containerRuntime is the container runtime to use.
ContainerRuntime string `json:"containerRuntime"`
// rktPath is hte path of rkt binary. Leave empty to use the first rkt in
// $PATH.
RktPath string `json:"rktPath,omitempty"`
// rktStage1Image is the image to use as stage1. Local paths and
// http/https URLs are supported.
RktStage1Image string `json:"rktStage1Image,omitempty"`
// systemContainer is the resource-only container in which to place
// all non-kernel processes that are not already in a container. Empty
// for no container. Rolling back the flag requires a reboot.
SystemContainer string `json:"systemContainer"`
// configureCBR0 enables the kublet to configure cbr0 based on
// Node.Spec.PodCIDR.
ConfigureCBR0 bool `json:"configureCbr0"`
// maxPods is the number of pods that can run on this Kubelet.
MaxPods int `json:"maxPods"`
// dockerExecHandlerName is the handler to use when executing a command
// in a container. Valid values are 'native' and 'nsenter'. Defaults to
// 'native'.
DockerExecHandlerName string `json:"dockerExecHandlerName"`
// The CIDR to use for pod IP addresses, only used in standalone mode.
// In cluster mode, this is obtained from the master.
PodCIDR string `json:"podCIDR"`
// ResolverConfig is the resolver configuration file used as the basis
// for the container DNS resolution configuration."), []
ResolverConfig string `json:"resolvConf"`
// cpuCFSQuota is Enable CPU CFS quota enforcement for containers that
// specify CPU limits
CPUCFSQuota bool `json:"cpuCFSQuota"`
// containerized should be set to true if kubelet is running in a container.
Containerized bool `json:"containerized"`
// maxOpenFiles is Number of files that can be opened by Kubelet process.
MaxOpenFiles uint64 `json:"maxOpenFiles"`
// reconcileCIDR is Reconcile node CIDR with the CIDR specified by the
// API server. No-op if register-node or configure-cbr0 is false.
ReconcileCIDR bool `json:"reconcileCIDR"`
// registerSchedulable tells the kubelet to register the node as
// schedulable. No-op if register-node is false.
RegisterSchedulable bool `json:"registerSchedulable"`
// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver
KubeAPIQPS float32 `json:"kubeAPIQPS"`
// kubeAPIBurst is the burst to allow while talking with kubernetes
// apiserver
KubeAPIBurst int `json:"kubeAPIBurst"`
// serializeImagePulls when enabled, tells the Kubelet to pull images one
// at a time. We recommend *not* changing the default value on nodes that
// run docker daemon with version < 1.9 or an Aufs storage backend.
// Issue #10959 has more details.
SerializeImagePulls bool `json:"serializeImagePulls"`
// experimentalFlannelOverlay enables experimental support for starting the
// kubelet with the default overlay network (flannel). Assumes flanneld
// is already running in client mode.
ExperimentalFlannelOverlay bool `json:"experimentalFlannelOverlay"`
// outOfDiskTransitionFrequency is duration for which the kubelet has to
// wait before transitioning out of out-of-disk node condition status.
OutOfDiskTransitionFrequency unversioned.Duration `json:"outOfDiskTransitionFrequency,omitempty"`
// nodeIP is IP address of the node. If set, kubelet will use this IP
// address for the node.
NodeIP string `json:"nodeIP,omitempty"`
// nodeLabels to add when registering the node in the cluster.
NodeLabels map[string]string `json:"nodeLabels"`
// nonMasqueradeCIDR configures masquerading: traffic to IPs outside this range will use IP masquerade.
NonMasqueradeCIDR string `json:"nonMasqueradeCIDR"`
}
type KubeSchedulerConfiguration struct {
// port is the port that the scheduler's http service runs on.
Port int `json:"port"`
// address is the IP address to serve on.
Address string `json:"address"`
// algorithmProvider is the scheduling algorithm provider to use.
AlgorithmProvider string `json:"algorithmProvider"`
// policyConfigFile is the filepath to the scheduler policy configuration.
PolicyConfigFile string `json:"policyConfigFile"`
// enableProfiling enables profiling via web interface.
EnableProfiling bool `json:"enableProfiling"`
// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.
KubeAPIQPS float32 `json:"kubeAPIQPS"`
// kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver.
KubeAPIBurst int `json:"kubeAPIBurst"`
// schedulerName is name of the scheduler, used to select which pods
// will be processed by this scheduler, based on pod's annotation with
// key 'scheduler.alpha.kubernetes.io/name'.
SchedulerName string `json:"schedulerName"`
// leaderElection defines the configuration of leader election client.
LeaderElection LeaderElectionConfiguration `json:"leaderElection"`
}
// LeaderElectionConfiguration defines the configuration of leader election
// clients for components that can run with leader election enabled.
type LeaderElectionConfiguration struct {
// leaderElect enables a leader election client to gain leadership
// before executing the main loop. Enable this when running replicated
// components for high availability.
LeaderElect bool `json:"leaderElect"`
// leaseDuration is the duration that non-leader candidates will wait
// after observing a leadership renewal until attempting to acquire
// leadership of a led but unrenewed leader slot. This is effectively the
// maximum duration that a leader can be stopped before it is replaced
// by another candidate. This is only applicable if leader election is
// enabled.
LeaseDuration unversioned.Duration `json:"leaseDuration"`
// renewDeadline is the interval between attempts by the acting master to
// renew a leadership slot before it stops leading. This must be less
// than or equal to the lease duration. This is only applicable if leader
// election is enabled.
RenewDeadline unversioned.Duration `json:"renewDeadline"`
// retryPeriod is the duration the clients should wait between attempting
// acquisition and renewal of a leadership. This is only applicable if
// leader election is enabled.
RetryPeriod unversioned.Duration `json:"retryPeriod"`
}
| pkg/apis/componentconfig/types.go | 1 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.015446002595126629,
0.0007827702211216092,
0.0001591543696122244,
0.00016849723760969937,
0.0026502583641558886
] |
{
"id": 7,
"code_window": [
"\tif s.Config.HealthzPort > 0 {\n",
"\t\tgo util.Until(func() {\n",
"\t\t\terr := http.ListenAndServe(s.Config.HealthzBindAddress+\":\"+strconv.Itoa(s.Config.HealthzPort), nil)\n",
"\t\t\tif err != nil {\n",
"\t\t\t\tglog.Errorf(\"Starting health server failed: %v\", err)\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\thttp.HandleFunc(\"/proxyMode\", func(w http.ResponseWriter, r *http.Request) {\n",
"\t\t\tfmt.Fprintf(w, \"%s\", s.ProxyMode)\n",
"\t\t})\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 269
} | Hello from NFS!
| test/images/volumes-tester/nfs/index.html | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00016025002696551383,
0.00016025002696551383,
0.00016025002696551383,
0.00016025002696551383,
0
] |
{
"id": 7,
"code_window": [
"\tif s.Config.HealthzPort > 0 {\n",
"\t\tgo util.Until(func() {\n",
"\t\t\terr := http.ListenAndServe(s.Config.HealthzBindAddress+\":\"+strconv.Itoa(s.Config.HealthzPort), nil)\n",
"\t\t\tif err != nil {\n",
"\t\t\t\tglog.Errorf(\"Starting health server failed: %v\", err)\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\thttp.HandleFunc(\"/proxyMode\", func(w http.ResponseWriter, r *http.Request) {\n",
"\t\t\tfmt.Fprintf(w, \"%s\", s.ProxyMode)\n",
"\t\t})\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 269
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"os/exec"
"regexp"
"strings"
"time"
"github.com/golang/glog"
)
// TODO: These should really just use the GCE API client library or at least use
// better formatted output from the --format flag.
func createGCEStaticIP(name string) (string, error) {
// gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
// abshah@abhidesk:~/go/src/code.google.com/p/google-api-go-client/compute/v1$ gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
// Created [https://www.googleapis.com/compute/v1/projects/abshah-kubernetes-001/regions/us-central1/addresses/test-static-ip].
// NAME REGION ADDRESS STATUS
// test-static-ip us-central1 104.197.143.7 RESERVED
glog.Infof("Creating static IP with name %q in project %q", name, testContext.CloudConfig.ProjectID)
var outputBytes []byte
var err error
for attempts := 0; attempts < 4; attempts++ {
outputBytes, err = exec.Command("gcloud", "compute", "addresses", "create",
name, "--project", testContext.CloudConfig.ProjectID,
"--region", "us-central1", "-q").CombinedOutput()
if err == nil {
break
}
glog.Errorf("output from failed attempt to create static IP: %s", outputBytes)
time.Sleep(time.Duration(5*attempts) * time.Second)
}
if err != nil {
// Ditch the error, since the stderr in the output is what actually contains
// any useful info.
return "", fmt.Errorf("failed to create static IP: %s", outputBytes)
}
output := string(outputBytes)
if strings.Contains(output, "RESERVED") {
r, _ := regexp.Compile("[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+")
staticIP := r.FindString(output)
if staticIP == "" {
return "", fmt.Errorf("static IP not found in gcloud command output: %v", output)
} else {
return staticIP, nil
}
} else {
return "", fmt.Errorf("static IP %q could not be reserved: %v", name, output)
}
}
func deleteGCEStaticIP(name string) error {
// gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
// abshah@abhidesk:~/go/src/code.google.com/p/google-api-go-client/compute/v1$ gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
// Created [https://www.googleapis.com/compute/v1/projects/abshah-kubernetes-001/regions/us-central1/addresses/test-static-ip].
// NAME REGION ADDRESS STATUS
// test-static-ip us-central1 104.197.143.7 RESERVED
outputBytes, err := exec.Command("gcloud", "compute", "addresses", "delete",
name, "--project", testContext.CloudConfig.ProjectID,
"--region", "us-central1", "-q").CombinedOutput()
if err != nil {
// Ditch the error, since the stderr in the output is what actually contains
// any useful info.
return fmt.Errorf("failed to delete static IP %q: %v", name, string(outputBytes))
}
return nil
}
| test/e2e/google_compute.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.03570361062884331,
0.004442451987415552,
0.00016240528202615678,
0.00017778377514332533,
0.011075534857809544
] |
{
"id": 7,
"code_window": [
"\tif s.Config.HealthzPort > 0 {\n",
"\t\tgo util.Until(func() {\n",
"\t\t\terr := http.ListenAndServe(s.Config.HealthzBindAddress+\":\"+strconv.Itoa(s.Config.HealthzPort), nil)\n",
"\t\t\tif err != nil {\n",
"\t\t\t\tglog.Errorf(\"Starting health server failed: %v\", err)\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\thttp.HandleFunc(\"/proxyMode\", func(w http.ResponseWriter, r *http.Request) {\n",
"\t\t\tfmt.Fprintf(w, \"%s\", s.ProxyMode)\n",
"\t\t})\n"
],
"file_path": "cmd/kube-proxy/app/server.go",
"type": "add",
"edit_start_line_idx": 269
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// The kubelet binary is responsible for maintaining a set of containers on a particular host VM.
// It syncs data from both configuration file(s) as well as from a quorum of etcd servers.
// It then queries Docker to see what is currently running. It synchronizes the configuration data,
// with the running set of containers by starting or stopping Docker containers.
package main
import (
"fmt"
"os"
"runtime"
"k8s.io/kubernetes/cmd/kubelet/app"
"k8s.io/kubernetes/cmd/kubelet/app/options"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/version/verflag"
"github.com/spf13/pflag"
)
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
s := options.NewKubeletServer()
s.AddFlags(pflag.CommandLine)
util.InitFlags()
util.InitLogs()
defer util.FlushLogs()
verflag.PrintAndExitIfRequested()
if err := app.Run(s, nil); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
}
| cmd/kubelet/kubelet.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.001844953396357596,
0.00045387307181954384,
0.00016997155034914613,
0.00017451788880862296,
0.000622143445070833
] |
{
"id": 8,
"code_window": [
" --masquerade-all[=false]: If using the pure iptables proxy, SNAT everything\n",
" --master=\"\": The address of the Kubernetes API server (overrides any value in kubeconfig)\n",
" --oom-score-adj=-999: The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]\n",
" --proxy-mode=userspace: Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.\n",
" --proxy-port-range=: Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.\n",
" --udp-timeout=250ms: How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace\n",
"```\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" --proxy-mode=: Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.\n"
],
"file_path": "docs/admin/kube-proxy.md",
"type": "replace",
"edit_start_line_idx": 73
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package options contains flags for initializing a proxy.
package options
import (
_ "net/http/pprof"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/componentconfig"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/util"
"github.com/spf13/pflag"
)
const (
ExperimentalProxyModeAnnotation = "net.experimental.kubernetes.io/proxy-mode"
)
// ProxyServerConfig configures and runs a Kubernetes proxy server
type ProxyServerConfig struct {
componentconfig.KubeProxyConfiguration
ResourceContainer string
KubeAPIQPS float32
KubeAPIBurst int
ConfigSyncPeriod time.Duration
CleanupAndExit bool
NodeRef *api.ObjectReference
Master string
Kubeconfig string
}
func NewProxyConfig() *ProxyServerConfig {
return &ProxyServerConfig{
KubeProxyConfiguration: componentconfig.KubeProxyConfiguration{
BindAddress: "0.0.0.0",
HealthzPort: 10249,
HealthzBindAddress: "127.0.0.1",
OOMScoreAdj: util.IntPtr(qos.KubeProxyOOMScoreAdj),
ResourceContainer: "/kube-proxy",
IPTablesSyncPeriod: unversioned.Duration{30 * time.Second},
UDPIdleTimeout: unversioned.Duration{250 * time.Millisecond},
Mode: componentconfig.ProxyModeUserspace,
ConntrackMax: 256 * 1024, // 4x default (64k)
ConntrackTCPEstablishedTimeout: unversioned.Duration{Duration: 24 * time.Hour}, // 1 day (1/5 default)
},
KubeAPIQPS: 5.0,
KubeAPIBurst: 10,
ConfigSyncPeriod: 15 * time.Minute,
}
}
// AddFlags adds flags for a specific ProxyServer to the specified FlagSet
func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) {
fs.Var(componentconfig.IPVar{&s.BindAddress}, "bind-address", "The IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)")
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
fs.IntVar(&s.HealthzPort, "healthz-port", s.HealthzPort, "The port to bind the health check server. Use 0 to disable.")
fs.Var(componentconfig.IPVar{&s.HealthzBindAddress}, "healthz-bind-address", "The IP address for the health check server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)")
fs.IntVar(s.OOMScoreAdj, "oom-score-adj", util.IntPtrDerefOr(s.OOMScoreAdj, qos.KubeProxyOOMScoreAdj), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]")
fs.StringVar(&s.ResourceContainer, "resource-container", s.ResourceContainer, "Absolute name of the resource-only container to create and run the Kube-proxy in (Default: /kube-proxy).")
fs.MarkDeprecated("resource-container", "This feature will be removed in a later release.")
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization information (the master location is set by the master flag).")
fs.Var(componentconfig.PortRangeVar{&s.PortRange}, "proxy-port-range", "Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.")
fs.StringVar(&s.HostnameOverride, "hostname-override", s.HostnameOverride, "If non-empty, will use this string as identification instead of the actual hostname.")
fs.Var(&s.Mode, "proxy-mode", "Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the '"+ExperimentalProxyModeAnnotation+"' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.")
fs.DurationVar(&s.IPTablesSyncPeriod.Duration, "iptables-sync-period", s.IPTablesSyncPeriod.Duration, "How often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.")
fs.DurationVar(&s.ConfigSyncPeriod, "config-sync-period", s.ConfigSyncPeriod, "How often configuration from the apiserver is refreshed. Must be greater than 0.")
fs.BoolVar(&s.MasqueradeAll, "masquerade-all", false, "If using the pure iptables proxy, SNAT everything")
fs.BoolVar(&s.CleanupAndExit, "cleanup-iptables", false, "If true cleanup iptables rules and exit.")
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
fs.DurationVar(&s.UDPIdleTimeout.Duration, "udp-timeout", s.UDPIdleTimeout.Duration, "How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace")
fs.IntVar(&s.ConntrackMax, "conntrack-max", s.ConntrackMax, "Maximum number of NAT connections to track (0 to leave as-is)")
fs.DurationVar(&s.ConntrackTCPEstablishedTimeout.Duration, "conntrack-tcp-timeout-established", s.ConntrackTCPEstablishedTimeout.Duration, "Idle timeout for established TCP connections (0 to leave as-is)")
}
| cmd/kube-proxy/app/options/options.go | 1 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.035166893154382706,
0.004212819971144199,
0.000161137490067631,
0.00025387905770912766,
0.010357747785747051
] |
{
"id": 8,
"code_window": [
" --masquerade-all[=false]: If using the pure iptables proxy, SNAT everything\n",
" --master=\"\": The address of the Kubernetes API server (overrides any value in kubeconfig)\n",
" --oom-score-adj=-999: The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]\n",
" --proxy-mode=userspace: Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.\n",
" --proxy-port-range=: Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.\n",
" --udp-timeout=250ms: How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace\n",
"```\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" --proxy-mode=: Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.\n"
],
"file_path": "docs/admin/kube-proxy.md",
"type": "replace",
"edit_start_line_idx": 73
} | /*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package errorutil helps make better error messages.
package errorutil
import (
"bufio"
"bytes"
"fmt"
"io"
"strings"
)
// HighlightBytePosition takes a reader and the location in bytes of a parse
// error (for instance, from json.SyntaxError.Offset) and returns the line, column,
// and pretty-printed context around the error with an arrow indicating the exact
// position of the syntax error.
func HighlightBytePosition(f io.Reader, pos int64) (line, col int, highlight string) {
line = 1
br := bufio.NewReader(f)
lastLine := ""
thisLine := new(bytes.Buffer)
for n := int64(0); n < pos; n++ {
b, err := br.ReadByte()
if err != nil {
break
}
if b == '\n' {
lastLine = thisLine.String()
thisLine.Reset()
line++
col = 1
} else {
col++
thisLine.WriteByte(b)
}
}
if line > 1 {
highlight += fmt.Sprintf("%5d: %s\n", line-1, lastLine)
}
highlight += fmt.Sprintf("%5d: %s\n", line, thisLine.String())
highlight += fmt.Sprintf("%s^\n", strings.Repeat(" ", col+5))
return
}
| Godeps/_workspace/src/github.com/camlistore/go4/errorutil/highlight.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00017429603030905128,
0.00017135152302216738,
0.00016884856449905783,
0.00017182374722324312,
0.0000018904137277786504
] |
{
"id": 8,
"code_window": [
" --masquerade-all[=false]: If using the pure iptables proxy, SNAT everything\n",
" --master=\"\": The address of the Kubernetes API server (overrides any value in kubeconfig)\n",
" --oom-score-adj=-999: The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]\n",
" --proxy-mode=userspace: Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.\n",
" --proxy-port-range=: Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.\n",
" --udp-timeout=250ms: How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace\n",
"```\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" --proxy-mode=: Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.\n"
],
"file_path": "docs/admin/kube-proxy.md",
"type": "replace",
"edit_start_line_idx": 73
} | // mkerrors.sh -marm
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
// +build arm,netbsd
// Created by cgo -godefs - DO NOT EDIT
// cgo -godefs -- -marm _const.go
package unix
import "syscall"
const (
AF_APPLETALK = 0x10
AF_ARP = 0x1c
AF_BLUETOOTH = 0x1f
AF_CCITT = 0xa
AF_CHAOS = 0x5
AF_CNT = 0x15
AF_COIP = 0x14
AF_DATAKIT = 0x9
AF_DECnet = 0xc
AF_DLI = 0xd
AF_E164 = 0x1a
AF_ECMA = 0x8
AF_HYLINK = 0xf
AF_IEEE80211 = 0x20
AF_IMPLINK = 0x3
AF_INET = 0x2
AF_INET6 = 0x18
AF_IPX = 0x17
AF_ISDN = 0x1a
AF_ISO = 0x7
AF_LAT = 0xe
AF_LINK = 0x12
AF_LOCAL = 0x1
AF_MAX = 0x23
AF_MPLS = 0x21
AF_NATM = 0x1b
AF_NS = 0x6
AF_OROUTE = 0x11
AF_OSI = 0x7
AF_PUP = 0x4
AF_ROUTE = 0x22
AF_SNA = 0xb
AF_UNIX = 0x1
AF_UNSPEC = 0x0
ARPHRD_ARCNET = 0x7
ARPHRD_ETHER = 0x1
ARPHRD_FRELAY = 0xf
ARPHRD_IEEE1394 = 0x18
ARPHRD_IEEE802 = 0x6
ARPHRD_STRIP = 0x17
B0 = 0x0
B110 = 0x6e
B115200 = 0x1c200
B1200 = 0x4b0
B134 = 0x86
B14400 = 0x3840
B150 = 0x96
B1800 = 0x708
B19200 = 0x4b00
B200 = 0xc8
B230400 = 0x38400
B2400 = 0x960
B28800 = 0x7080
B300 = 0x12c
B38400 = 0x9600
B460800 = 0x70800
B4800 = 0x12c0
B50 = 0x32
B57600 = 0xe100
B600 = 0x258
B7200 = 0x1c20
B75 = 0x4b
B76800 = 0x12c00
B921600 = 0xe1000
B9600 = 0x2580
BIOCFEEDBACK = 0x8004427d
BIOCFLUSH = 0x20004268
BIOCGBLEN = 0x40044266
BIOCGDLT = 0x4004426a
BIOCGDLTLIST = 0xc0084277
BIOCGETIF = 0x4090426b
BIOCGFEEDBACK = 0x4004427c
BIOCGHDRCMPLT = 0x40044274
BIOCGRTIMEOUT = 0x400c427b
BIOCGSEESENT = 0x40044278
BIOCGSTATS = 0x4080426f
BIOCGSTATSOLD = 0x4008426f
BIOCIMMEDIATE = 0x80044270
BIOCPROMISC = 0x20004269
BIOCSBLEN = 0xc0044266
BIOCSDLT = 0x80044276
BIOCSETF = 0x80084267
BIOCSETIF = 0x8090426c
BIOCSFEEDBACK = 0x8004427d
BIOCSHDRCMPLT = 0x80044275
BIOCSRTIMEOUT = 0x800c427a
BIOCSSEESENT = 0x80044279
BIOCSTCPF = 0x80084272
BIOCSUDPF = 0x80084273
BIOCVERSION = 0x40044271
BPF_A = 0x10
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ALIGNMENT = 0x4
BPF_ALIGNMENT32 = 0x4
BPF_ALU = 0x4
BPF_AND = 0x50
BPF_B = 0x10
BPF_DFLTBUFSIZE = 0x100000
BPF_DIV = 0x30
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
BPF_JA = 0x0
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
BPF_JMP = 0x5
BPF_JSET = 0x40
BPF_K = 0x0
BPF_LD = 0x0
BPF_LDX = 0x1
BPF_LEN = 0x80
BPF_LSH = 0x60
BPF_MAJOR_VERSION = 0x1
BPF_MAXBUFSIZE = 0x1000000
BPF_MAXINSNS = 0x200
BPF_MEM = 0x60
BPF_MEMWORDS = 0x10
BPF_MINBUFSIZE = 0x20
BPF_MINOR_VERSION = 0x1
BPF_MISC = 0x7
BPF_MSH = 0xa0
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_OR = 0x40
BPF_RELEASE = 0x30bb6
BPF_RET = 0x6
BPF_RSH = 0x70
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
BPF_TAX = 0x0
BPF_TXA = 0x80
BPF_W = 0x0
BPF_X = 0x8
BRKINT = 0x2
CFLUSH = 0xf
CLOCAL = 0x8000
CREAD = 0x800
CS5 = 0x0
CS6 = 0x100
CS7 = 0x200
CS8 = 0x300
CSIZE = 0x300
CSTART = 0x11
CSTATUS = 0x14
CSTOP = 0x13
CSTOPB = 0x400
CSUSP = 0x1a
CTL_MAXNAME = 0xc
CTL_NET = 0x4
CTL_QUERY = -0x2
DIOCBSFLUSH = 0x20006478
DLT_A429 = 0xb8
DLT_A653_ICM = 0xb9
DLT_AIRONET_HEADER = 0x78
DLT_AOS = 0xde
DLT_APPLE_IP_OVER_IEEE1394 = 0x8a
DLT_ARCNET = 0x7
DLT_ARCNET_LINUX = 0x81
DLT_ATM_CLIP = 0x13
DLT_ATM_RFC1483 = 0xb
DLT_AURORA = 0x7e
DLT_AX25 = 0x3
DLT_AX25_KISS = 0xca
DLT_BACNET_MS_TP = 0xa5
DLT_BLUETOOTH_HCI_H4 = 0xbb
DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9
DLT_CAN20B = 0xbe
DLT_CAN_SOCKETCAN = 0xe3
DLT_CHAOS = 0x5
DLT_CISCO_IOS = 0x76
DLT_C_HDLC = 0x68
DLT_C_HDLC_WITH_DIR = 0xcd
DLT_DECT = 0xdd
DLT_DOCSIS = 0x8f
DLT_ECONET = 0x73
DLT_EN10MB = 0x1
DLT_EN3MB = 0x2
DLT_ENC = 0x6d
DLT_ERF = 0xc5
DLT_ERF_ETH = 0xaf
DLT_ERF_POS = 0xb0
DLT_FC_2 = 0xe0
DLT_FC_2_WITH_FRAME_DELIMS = 0xe1
DLT_FDDI = 0xa
DLT_FLEXRAY = 0xd2
DLT_FRELAY = 0x6b
DLT_FRELAY_WITH_DIR = 0xce
DLT_GCOM_SERIAL = 0xad
DLT_GCOM_T1E1 = 0xac
DLT_GPF_F = 0xab
DLT_GPF_T = 0xaa
DLT_GPRS_LLC = 0xa9
DLT_GSMTAP_ABIS = 0xda
DLT_GSMTAP_UM = 0xd9
DLT_HDLC = 0x10
DLT_HHDLC = 0x79
DLT_HIPPI = 0xf
DLT_IBM_SN = 0x92
DLT_IBM_SP = 0x91
DLT_IEEE802 = 0x6
DLT_IEEE802_11 = 0x69
DLT_IEEE802_11_RADIO = 0x7f
DLT_IEEE802_11_RADIO_AVS = 0xa3
DLT_IEEE802_15_4 = 0xc3
DLT_IEEE802_15_4_LINUX = 0xbf
DLT_IEEE802_15_4_NONASK_PHY = 0xd7
DLT_IEEE802_16_MAC_CPS = 0xbc
DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1
DLT_IPMB = 0xc7
DLT_IPMB_LINUX = 0xd1
DLT_IPNET = 0xe2
DLT_IPV4 = 0xe4
DLT_IPV6 = 0xe5
DLT_IP_OVER_FC = 0x7a
DLT_JUNIPER_ATM1 = 0x89
DLT_JUNIPER_ATM2 = 0x87
DLT_JUNIPER_CHDLC = 0xb5
DLT_JUNIPER_ES = 0x84
DLT_JUNIPER_ETHER = 0xb2
DLT_JUNIPER_FRELAY = 0xb4
DLT_JUNIPER_GGSN = 0x85
DLT_JUNIPER_ISM = 0xc2
DLT_JUNIPER_MFR = 0x86
DLT_JUNIPER_MLFR = 0x83
DLT_JUNIPER_MLPPP = 0x82
DLT_JUNIPER_MONITOR = 0xa4
DLT_JUNIPER_PIC_PEER = 0xae
DLT_JUNIPER_PPP = 0xb3
DLT_JUNIPER_PPPOE = 0xa7
DLT_JUNIPER_PPPOE_ATM = 0xa8
DLT_JUNIPER_SERVICES = 0x88
DLT_JUNIPER_ST = 0xc8
DLT_JUNIPER_VP = 0xb7
DLT_LAPB_WITH_DIR = 0xcf
DLT_LAPD = 0xcb
DLT_LIN = 0xd4
DLT_LINUX_EVDEV = 0xd8
DLT_LINUX_IRDA = 0x90
DLT_LINUX_LAPD = 0xb1
DLT_LINUX_SLL = 0x71
DLT_LOOP = 0x6c
DLT_LTALK = 0x72
DLT_MFR = 0xb6
DLT_MOST = 0xd3
DLT_MPLS = 0xdb
DLT_MTP2 = 0x8c
DLT_MTP2_WITH_PHDR = 0x8b
DLT_MTP3 = 0x8d
DLT_NULL = 0x0
DLT_PCI_EXP = 0x7d
DLT_PFLOG = 0x75
DLT_PFSYNC = 0x12
DLT_PPI = 0xc0
DLT_PPP = 0x9
DLT_PPP_BSDOS = 0xe
DLT_PPP_ETHER = 0x33
DLT_PPP_PPPD = 0xa6
DLT_PPP_SERIAL = 0x32
DLT_PPP_WITH_DIR = 0xcc
DLT_PRISM_HEADER = 0x77
DLT_PRONET = 0x4
DLT_RAIF1 = 0xc6
DLT_RAW = 0xc
DLT_RAWAF_MASK = 0x2240000
DLT_RIO = 0x7c
DLT_SCCP = 0x8e
DLT_SITA = 0xc4
DLT_SLIP = 0x8
DLT_SLIP_BSDOS = 0xd
DLT_SUNATM = 0x7b
DLT_SYMANTEC_FIREWALL = 0x63
DLT_TZSP = 0x80
DLT_USB = 0xba
DLT_USB_LINUX = 0xbd
DLT_USB_LINUX_MMAPPED = 0xdc
DLT_WIHART = 0xdf
DLT_X2E_SERIAL = 0xd5
DLT_X2E_XORAYA = 0xd6
DT_BLK = 0x6
DT_CHR = 0x2
DT_DIR = 0x4
DT_FIFO = 0x1
DT_LNK = 0xa
DT_REG = 0x8
DT_SOCK = 0xc
DT_UNKNOWN = 0x0
DT_WHT = 0xe
ECHO = 0x8
ECHOCTL = 0x40
ECHOE = 0x2
ECHOK = 0x4
ECHOKE = 0x1
ECHONL = 0x10
ECHOPRT = 0x20
EMUL_LINUX = 0x1
EMUL_LINUX32 = 0x5
EMUL_MAXID = 0x6
ETHERCAP_JUMBO_MTU = 0x4
ETHERCAP_VLAN_HWTAGGING = 0x2
ETHERCAP_VLAN_MTU = 0x1
ETHERMIN = 0x2e
ETHERMTU = 0x5dc
ETHERMTU_JUMBO = 0x2328
ETHERTYPE_8023 = 0x4
ETHERTYPE_AARP = 0x80f3
ETHERTYPE_ACCTON = 0x8390
ETHERTYPE_AEONIC = 0x8036
ETHERTYPE_ALPHA = 0x814a
ETHERTYPE_AMBER = 0x6008
ETHERTYPE_AMOEBA = 0x8145
ETHERTYPE_APOLLO = 0x80f7
ETHERTYPE_APOLLODOMAIN = 0x8019
ETHERTYPE_APPLETALK = 0x809b
ETHERTYPE_APPLITEK = 0x80c7
ETHERTYPE_ARGONAUT = 0x803a
ETHERTYPE_ARP = 0x806
ETHERTYPE_AT = 0x809b
ETHERTYPE_ATALK = 0x809b
ETHERTYPE_ATOMIC = 0x86df
ETHERTYPE_ATT = 0x8069
ETHERTYPE_ATTSTANFORD = 0x8008
ETHERTYPE_AUTOPHON = 0x806a
ETHERTYPE_AXIS = 0x8856
ETHERTYPE_BCLOOP = 0x9003
ETHERTYPE_BOFL = 0x8102
ETHERTYPE_CABLETRON = 0x7034
ETHERTYPE_CHAOS = 0x804
ETHERTYPE_COMDESIGN = 0x806c
ETHERTYPE_COMPUGRAPHIC = 0x806d
ETHERTYPE_COUNTERPOINT = 0x8062
ETHERTYPE_CRONUS = 0x8004
ETHERTYPE_CRONUSVLN = 0x8003
ETHERTYPE_DCA = 0x1234
ETHERTYPE_DDE = 0x807b
ETHERTYPE_DEBNI = 0xaaaa
ETHERTYPE_DECAM = 0x8048
ETHERTYPE_DECCUST = 0x6006
ETHERTYPE_DECDIAG = 0x6005
ETHERTYPE_DECDNS = 0x803c
ETHERTYPE_DECDTS = 0x803e
ETHERTYPE_DECEXPER = 0x6000
ETHERTYPE_DECLAST = 0x8041
ETHERTYPE_DECLTM = 0x803f
ETHERTYPE_DECMUMPS = 0x6009
ETHERTYPE_DECNETBIOS = 0x8040
ETHERTYPE_DELTACON = 0x86de
ETHERTYPE_DIDDLE = 0x4321
ETHERTYPE_DLOG1 = 0x660
ETHERTYPE_DLOG2 = 0x661
ETHERTYPE_DN = 0x6003
ETHERTYPE_DOGFIGHT = 0x1989
ETHERTYPE_DSMD = 0x8039
ETHERTYPE_ECMA = 0x803
ETHERTYPE_ENCRYPT = 0x803d
ETHERTYPE_ES = 0x805d
ETHERTYPE_EXCELAN = 0x8010
ETHERTYPE_EXPERDATA = 0x8049
ETHERTYPE_FLIP = 0x8146
ETHERTYPE_FLOWCONTROL = 0x8808
ETHERTYPE_FRARP = 0x808
ETHERTYPE_GENDYN = 0x8068
ETHERTYPE_HAYES = 0x8130
ETHERTYPE_HIPPI_FP = 0x8180
ETHERTYPE_HITACHI = 0x8820
ETHERTYPE_HP = 0x8005
ETHERTYPE_IEEEPUP = 0xa00
ETHERTYPE_IEEEPUPAT = 0xa01
ETHERTYPE_IMLBL = 0x4c42
ETHERTYPE_IMLBLDIAG = 0x424c
ETHERTYPE_IP = 0x800
ETHERTYPE_IPAS = 0x876c
ETHERTYPE_IPV6 = 0x86dd
ETHERTYPE_IPX = 0x8137
ETHERTYPE_IPXNEW = 0x8037
ETHERTYPE_KALPANA = 0x8582
ETHERTYPE_LANBRIDGE = 0x8038
ETHERTYPE_LANPROBE = 0x8888
ETHERTYPE_LAT = 0x6004
ETHERTYPE_LBACK = 0x9000
ETHERTYPE_LITTLE = 0x8060
ETHERTYPE_LOGICRAFT = 0x8148
ETHERTYPE_LOOPBACK = 0x9000
ETHERTYPE_MATRA = 0x807a
ETHERTYPE_MAX = 0xffff
ETHERTYPE_MERIT = 0x807c
ETHERTYPE_MICP = 0x873a
ETHERTYPE_MOPDL = 0x6001
ETHERTYPE_MOPRC = 0x6002
ETHERTYPE_MOTOROLA = 0x818d
ETHERTYPE_MPLS = 0x8847
ETHERTYPE_MPLS_MCAST = 0x8848
ETHERTYPE_MUMPS = 0x813f
ETHERTYPE_NBPCC = 0x3c04
ETHERTYPE_NBPCLAIM = 0x3c09
ETHERTYPE_NBPCLREQ = 0x3c05
ETHERTYPE_NBPCLRSP = 0x3c06
ETHERTYPE_NBPCREQ = 0x3c02
ETHERTYPE_NBPCRSP = 0x3c03
ETHERTYPE_NBPDG = 0x3c07
ETHERTYPE_NBPDGB = 0x3c08
ETHERTYPE_NBPDLTE = 0x3c0a
ETHERTYPE_NBPRAR = 0x3c0c
ETHERTYPE_NBPRAS = 0x3c0b
ETHERTYPE_NBPRST = 0x3c0d
ETHERTYPE_NBPSCD = 0x3c01
ETHERTYPE_NBPVCD = 0x3c00
ETHERTYPE_NBS = 0x802
ETHERTYPE_NCD = 0x8149
ETHERTYPE_NESTAR = 0x8006
ETHERTYPE_NETBEUI = 0x8191
ETHERTYPE_NOVELL = 0x8138
ETHERTYPE_NS = 0x600
ETHERTYPE_NSAT = 0x601
ETHERTYPE_NSCOMPAT = 0x807
ETHERTYPE_NTRAILER = 0x10
ETHERTYPE_OS9 = 0x7007
ETHERTYPE_OS9NET = 0x7009
ETHERTYPE_PACER = 0x80c6
ETHERTYPE_PAE = 0x888e
ETHERTYPE_PCS = 0x4242
ETHERTYPE_PLANNING = 0x8044
ETHERTYPE_PPP = 0x880b
ETHERTYPE_PPPOE = 0x8864
ETHERTYPE_PPPOEDISC = 0x8863
ETHERTYPE_PRIMENTS = 0x7031
ETHERTYPE_PUP = 0x200
ETHERTYPE_PUPAT = 0x200
ETHERTYPE_RACAL = 0x7030
ETHERTYPE_RATIONAL = 0x8150
ETHERTYPE_RAWFR = 0x6559
ETHERTYPE_RCL = 0x1995
ETHERTYPE_RDP = 0x8739
ETHERTYPE_RETIX = 0x80f2
ETHERTYPE_REVARP = 0x8035
ETHERTYPE_SCA = 0x6007
ETHERTYPE_SECTRA = 0x86db
ETHERTYPE_SECUREDATA = 0x876d
ETHERTYPE_SGITW = 0x817e
ETHERTYPE_SG_BOUNCE = 0x8016
ETHERTYPE_SG_DIAG = 0x8013
ETHERTYPE_SG_NETGAMES = 0x8014
ETHERTYPE_SG_RESV = 0x8015
ETHERTYPE_SIMNET = 0x5208
ETHERTYPE_SLOWPROTOCOLS = 0x8809
ETHERTYPE_SNA = 0x80d5
ETHERTYPE_SNMP = 0x814c
ETHERTYPE_SONIX = 0xfaf5
ETHERTYPE_SPIDER = 0x809f
ETHERTYPE_SPRITE = 0x500
ETHERTYPE_STP = 0x8181
ETHERTYPE_TALARIS = 0x812b
ETHERTYPE_TALARISMC = 0x852b
ETHERTYPE_TCPCOMP = 0x876b
ETHERTYPE_TCPSM = 0x9002
ETHERTYPE_TEC = 0x814f
ETHERTYPE_TIGAN = 0x802f
ETHERTYPE_TRAIL = 0x1000
ETHERTYPE_TRANSETHER = 0x6558
ETHERTYPE_TYMSHARE = 0x802e
ETHERTYPE_UBBST = 0x7005
ETHERTYPE_UBDEBUG = 0x900
ETHERTYPE_UBDIAGLOOP = 0x7002
ETHERTYPE_UBDL = 0x7000
ETHERTYPE_UBNIU = 0x7001
ETHERTYPE_UBNMC = 0x7003
ETHERTYPE_VALID = 0x1600
ETHERTYPE_VARIAN = 0x80dd
ETHERTYPE_VAXELN = 0x803b
ETHERTYPE_VEECO = 0x8067
ETHERTYPE_VEXP = 0x805b
ETHERTYPE_VGLAB = 0x8131
ETHERTYPE_VINES = 0xbad
ETHERTYPE_VINESECHO = 0xbaf
ETHERTYPE_VINESLOOP = 0xbae
ETHERTYPE_VITAL = 0xff00
ETHERTYPE_VLAN = 0x8100
ETHERTYPE_VLTLMAN = 0x8080
ETHERTYPE_VPROD = 0x805c
ETHERTYPE_VURESERVED = 0x8147
ETHERTYPE_WATERLOO = 0x8130
ETHERTYPE_WELLFLEET = 0x8103
ETHERTYPE_X25 = 0x805
ETHERTYPE_X75 = 0x801
ETHERTYPE_XNSSM = 0x9001
ETHERTYPE_XTP = 0x817d
ETHER_ADDR_LEN = 0x6
ETHER_CRC_LEN = 0x4
ETHER_CRC_POLY_BE = 0x4c11db6
ETHER_CRC_POLY_LE = 0xedb88320
ETHER_HDR_LEN = 0xe
ETHER_MAX_LEN = 0x5ee
ETHER_MAX_LEN_JUMBO = 0x233a
ETHER_MIN_LEN = 0x40
ETHER_PPPOE_ENCAP_LEN = 0x8
ETHER_TYPE_LEN = 0x2
ETHER_VLAN_ENCAP_LEN = 0x4
EVFILT_AIO = 0x2
EVFILT_PROC = 0x4
EVFILT_READ = 0x0
EVFILT_SIGNAL = 0x5
EVFILT_SYSCOUNT = 0x7
EVFILT_TIMER = 0x6
EVFILT_VNODE = 0x3
EVFILT_WRITE = 0x1
EV_ADD = 0x1
EV_CLEAR = 0x20
EV_DELETE = 0x2
EV_DISABLE = 0x8
EV_ENABLE = 0x4
EV_EOF = 0x8000
EV_ERROR = 0x4000
EV_FLAG1 = 0x2000
EV_ONESHOT = 0x10
EV_SYSFLAGS = 0xf000
EXTA = 0x4b00
EXTB = 0x9600
EXTPROC = 0x800
FD_CLOEXEC = 0x1
FD_SETSIZE = 0x100
FLUSHO = 0x800000
F_CLOSEM = 0xa
F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0xc
F_FSCTL = -0x80000000
F_FSDIRMASK = 0x70000000
F_FSIN = 0x10000000
F_FSINOUT = 0x30000000
F_FSOUT = 0x20000000
F_FSPRIV = 0x8000
F_FSVOID = 0x40000000
F_GETFD = 0x1
F_GETFL = 0x3
F_GETLK = 0x7
F_GETNOSIGPIPE = 0xd
F_GETOWN = 0x5
F_MAXFD = 0xb
F_OK = 0x0
F_PARAM_MASK = 0xfff
F_PARAM_MAX = 0xfff
F_RDLCK = 0x1
F_SETFD = 0x2
F_SETFL = 0x4
F_SETLK = 0x8
F_SETLKW = 0x9
F_SETNOSIGPIPE = 0xe
F_SETOWN = 0x6
F_UNLCK = 0x2
F_WRLCK = 0x3
HUPCL = 0x4000
ICANON = 0x100
ICMP6_FILTER = 0x12
ICRNL = 0x100
IEXTEN = 0x400
IFAN_ARRIVAL = 0x0
IFAN_DEPARTURE = 0x1
IFA_ROUTE = 0x1
IFF_ALLMULTI = 0x200
IFF_BROADCAST = 0x2
IFF_CANTCHANGE = 0x8f52
IFF_DEBUG = 0x4
IFF_LINK0 = 0x1000
IFF_LINK1 = 0x2000
IFF_LINK2 = 0x4000
IFF_LOOPBACK = 0x8
IFF_MULTICAST = 0x8000
IFF_NOARP = 0x80
IFF_NOTRAILERS = 0x20
IFF_OACTIVE = 0x400
IFF_POINTOPOINT = 0x10
IFF_PROMISC = 0x100
IFF_RUNNING = 0x40
IFF_SIMPLEX = 0x800
IFF_UP = 0x1
IFNAMSIZ = 0x10
IFT_1822 = 0x2
IFT_A12MPPSWITCH = 0x82
IFT_AAL2 = 0xbb
IFT_AAL5 = 0x31
IFT_ADSL = 0x5e
IFT_AFLANE8023 = 0x3b
IFT_AFLANE8025 = 0x3c
IFT_ARAP = 0x58
IFT_ARCNET = 0x23
IFT_ARCNETPLUS = 0x24
IFT_ASYNC = 0x54
IFT_ATM = 0x25
IFT_ATMDXI = 0x69
IFT_ATMFUNI = 0x6a
IFT_ATMIMA = 0x6b
IFT_ATMLOGICAL = 0x50
IFT_ATMRADIO = 0xbd
IFT_ATMSUBINTERFACE = 0x86
IFT_ATMVCIENDPT = 0xc2
IFT_ATMVIRTUAL = 0x95
IFT_BGPPOLICYACCOUNTING = 0xa2
IFT_BRIDGE = 0xd1
IFT_BSC = 0x53
IFT_CARP = 0xf8
IFT_CCTEMUL = 0x3d
IFT_CEPT = 0x13
IFT_CES = 0x85
IFT_CHANNEL = 0x46
IFT_CNR = 0x55
IFT_COFFEE = 0x84
IFT_COMPOSITELINK = 0x9b
IFT_DCN = 0x8d
IFT_DIGITALPOWERLINE = 0x8a
IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
IFT_DLSW = 0x4a
IFT_DOCSCABLEDOWNSTREAM = 0x80
IFT_DOCSCABLEMACLAYER = 0x7f
IFT_DOCSCABLEUPSTREAM = 0x81
IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd
IFT_DS0 = 0x51
IFT_DS0BUNDLE = 0x52
IFT_DS1FDL = 0xaa
IFT_DS3 = 0x1e
IFT_DTM = 0x8c
IFT_DVBASILN = 0xac
IFT_DVBASIOUT = 0xad
IFT_DVBRCCDOWNSTREAM = 0x93
IFT_DVBRCCMACLAYER = 0x92
IFT_DVBRCCUPSTREAM = 0x94
IFT_ECONET = 0xce
IFT_EON = 0x19
IFT_EPLRS = 0x57
IFT_ESCON = 0x49
IFT_ETHER = 0x6
IFT_FAITH = 0xf2
IFT_FAST = 0x7d
IFT_FASTETHER = 0x3e
IFT_FASTETHERFX = 0x45
IFT_FDDI = 0xf
IFT_FIBRECHANNEL = 0x38
IFT_FRAMERELAYINTERCONNECT = 0x3a
IFT_FRAMERELAYMPI = 0x5c
IFT_FRDLCIENDPT = 0xc1
IFT_FRELAY = 0x20
IFT_FRELAYDCE = 0x2c
IFT_FRF16MFRBUNDLE = 0xa3
IFT_FRFORWARD = 0x9e
IFT_G703AT2MB = 0x43
IFT_G703AT64K = 0x42
IFT_GIF = 0xf0
IFT_GIGABITETHERNET = 0x75
IFT_GR303IDT = 0xb2
IFT_GR303RDT = 0xb1
IFT_H323GATEKEEPER = 0xa4
IFT_H323PROXY = 0xa5
IFT_HDH1822 = 0x3
IFT_HDLC = 0x76
IFT_HDSL2 = 0xa8
IFT_HIPERLAN2 = 0xb7
IFT_HIPPI = 0x2f
IFT_HIPPIINTERFACE = 0x39
IFT_HOSTPAD = 0x5a
IFT_HSSI = 0x2e
IFT_HY = 0xe
IFT_IBM370PARCHAN = 0x48
IFT_IDSL = 0x9a
IFT_IEEE1394 = 0x90
IFT_IEEE80211 = 0x47
IFT_IEEE80212 = 0x37
IFT_IEEE8023ADLAG = 0xa1
IFT_IFGSN = 0x91
IFT_IMT = 0xbe
IFT_INFINIBAND = 0xc7
IFT_INTERLEAVE = 0x7c
IFT_IP = 0x7e
IFT_IPFORWARD = 0x8e
IFT_IPOVERATM = 0x72
IFT_IPOVERCDLC = 0x6d
IFT_IPOVERCLAW = 0x6e
IFT_IPSWITCH = 0x4e
IFT_ISDN = 0x3f
IFT_ISDNBASIC = 0x14
IFT_ISDNPRIMARY = 0x15
IFT_ISDNS = 0x4b
IFT_ISDNU = 0x4c
IFT_ISO88022LLC = 0x29
IFT_ISO88023 = 0x7
IFT_ISO88024 = 0x8
IFT_ISO88025 = 0x9
IFT_ISO88025CRFPINT = 0x62
IFT_ISO88025DTR = 0x56
IFT_ISO88025FIBER = 0x73
IFT_ISO88026 = 0xa
IFT_ISUP = 0xb3
IFT_L2VLAN = 0x87
IFT_L3IPVLAN = 0x88
IFT_L3IPXVLAN = 0x89
IFT_LAPB = 0x10
IFT_LAPD = 0x4d
IFT_LAPF = 0x77
IFT_LINEGROUP = 0xd2
IFT_LOCALTALK = 0x2a
IFT_LOOP = 0x18
IFT_MEDIAMAILOVERIP = 0x8b
IFT_MFSIGLINK = 0xa7
IFT_MIOX25 = 0x26
IFT_MODEM = 0x30
IFT_MPC = 0x71
IFT_MPLS = 0xa6
IFT_MPLSTUNNEL = 0x96
IFT_MSDSL = 0x8f
IFT_MVL = 0xbf
IFT_MYRINET = 0x63
IFT_NFAS = 0xaf
IFT_NSIP = 0x1b
IFT_OPTICALCHANNEL = 0xc3
IFT_OPTICALTRANSPORT = 0xc4
IFT_OTHER = 0x1
IFT_P10 = 0xc
IFT_P80 = 0xd
IFT_PARA = 0x22
IFT_PFLOG = 0xf5
IFT_PFSYNC = 0xf6
IFT_PLC = 0xae
IFT_PON155 = 0xcf
IFT_PON622 = 0xd0
IFT_POS = 0xab
IFT_PPP = 0x17
IFT_PPPMULTILINKBUNDLE = 0x6c
IFT_PROPATM = 0xc5
IFT_PROPBWAP2MP = 0xb8
IFT_PROPCNLS = 0x59
IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
IFT_PROPMUX = 0x36
IFT_PROPVIRTUAL = 0x35
IFT_PROPWIRELESSP2P = 0x9d
IFT_PTPSERIAL = 0x16
IFT_PVC = 0xf1
IFT_Q2931 = 0xc9
IFT_QLLC = 0x44
IFT_RADIOMAC = 0xbc
IFT_RADSL = 0x5f
IFT_REACHDSL = 0xc0
IFT_RFC1483 = 0x9f
IFT_RS232 = 0x21
IFT_RSRB = 0x4f
IFT_SDLC = 0x11
IFT_SDSL = 0x60
IFT_SHDSL = 0xa9
IFT_SIP = 0x1f
IFT_SIPSIG = 0xcc
IFT_SIPTG = 0xcb
IFT_SLIP = 0x1c
IFT_SMDSDXI = 0x2b
IFT_SMDSICIP = 0x34
IFT_SONET = 0x27
IFT_SONETOVERHEADCHANNEL = 0xb9
IFT_SONETPATH = 0x32
IFT_SONETVT = 0x33
IFT_SRP = 0x97
IFT_SS7SIGLINK = 0x9c
IFT_STACKTOSTACK = 0x6f
IFT_STARLAN = 0xb
IFT_STF = 0xd7
IFT_T1 = 0x12
IFT_TDLC = 0x74
IFT_TELINK = 0xc8
IFT_TERMPAD = 0x5b
IFT_TR008 = 0xb0
IFT_TRANSPHDLC = 0x7b
IFT_TUNNEL = 0x83
IFT_ULTRA = 0x1d
IFT_USB = 0xa0
IFT_V11 = 0x40
IFT_V35 = 0x2d
IFT_V36 = 0x41
IFT_V37 = 0x78
IFT_VDSL = 0x61
IFT_VIRTUALIPADDRESS = 0x70
IFT_VIRTUALTG = 0xca
IFT_VOICEDID = 0xd5
IFT_VOICEEM = 0x64
IFT_VOICEEMFGD = 0xd3
IFT_VOICEENCAP = 0x67
IFT_VOICEFGDEANA = 0xd4
IFT_VOICEFXO = 0x65
IFT_VOICEFXS = 0x66
IFT_VOICEOVERATM = 0x98
IFT_VOICEOVERCABLE = 0xc6
IFT_VOICEOVERFRAMERELAY = 0x99
IFT_VOICEOVERIP = 0x68
IFT_X213 = 0x5d
IFT_X25 = 0x5
IFT_X25DDN = 0x4
IFT_X25HUNTGROUP = 0x7a
IFT_X25MLP = 0x79
IFT_X25PLE = 0x28
IFT_XETHER = 0x1a
IGNBRK = 0x1
IGNCR = 0x80
IGNPAR = 0x4
IMAXBEL = 0x2000
INLCR = 0x40
INPCK = 0x10
IN_CLASSA_HOST = 0xffffff
IN_CLASSA_MAX = 0x80
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 0x18
IN_CLASSB_HOST = 0xffff
IN_CLASSB_MAX = 0x10000
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 0x10
IN_CLASSC_HOST = 0xff
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 0x8
IN_CLASSD_HOST = 0xfffffff
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 0x1c
IN_LOOPBACKNET = 0x7f
IPPROTO_AH = 0x33
IPPROTO_CARP = 0x70
IPPROTO_DONE = 0x101
IPPROTO_DSTOPTS = 0x3c
IPPROTO_EGP = 0x8
IPPROTO_ENCAP = 0x62
IPPROTO_EON = 0x50
IPPROTO_ESP = 0x32
IPPROTO_ETHERIP = 0x61
IPPROTO_FRAGMENT = 0x2c
IPPROTO_GGP = 0x3
IPPROTO_GRE = 0x2f
IPPROTO_HOPOPTS = 0x0
IPPROTO_ICMP = 0x1
IPPROTO_ICMPV6 = 0x3a
IPPROTO_IDP = 0x16
IPPROTO_IGMP = 0x2
IPPROTO_IP = 0x0
IPPROTO_IPCOMP = 0x6c
IPPROTO_IPIP = 0x4
IPPROTO_IPV4 = 0x4
IPPROTO_IPV6 = 0x29
IPPROTO_IPV6_ICMP = 0x3a
IPPROTO_MAX = 0x100
IPPROTO_MAXID = 0x34
IPPROTO_MOBILE = 0x37
IPPROTO_NONE = 0x3b
IPPROTO_PFSYNC = 0xf0
IPPROTO_PIM = 0x67
IPPROTO_PUP = 0xc
IPPROTO_RAW = 0xff
IPPROTO_ROUTING = 0x2b
IPPROTO_RSVP = 0x2e
IPPROTO_TCP = 0x6
IPPROTO_TP = 0x1d
IPPROTO_UDP = 0x11
IPPROTO_VRRP = 0x70
IPV6_CHECKSUM = 0x1a
IPV6_DEFAULT_MULTICAST_HOPS = 0x1
IPV6_DEFAULT_MULTICAST_LOOP = 0x1
IPV6_DEFHLIM = 0x40
IPV6_DONTFRAG = 0x3e
IPV6_DSTOPTS = 0x32
IPV6_FAITH = 0x1d
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
IPV6_FRAGTTL = 0x78
IPV6_HLIMDEC = 0x1
IPV6_HOPLIMIT = 0x2f
IPV6_HOPOPTS = 0x31
IPV6_IPSEC_POLICY = 0x1c
IPV6_JOIN_GROUP = 0xc
IPV6_LEAVE_GROUP = 0xd
IPV6_MAXHLIM = 0xff
IPV6_MAXPACKET = 0xffff
IPV6_MMTU = 0x500
IPV6_MULTICAST_HOPS = 0xa
IPV6_MULTICAST_IF = 0x9
IPV6_MULTICAST_LOOP = 0xb
IPV6_NEXTHOP = 0x30
IPV6_PATHMTU = 0x2c
IPV6_PKTINFO = 0x2e
IPV6_PORTRANGE = 0xe
IPV6_PORTRANGE_DEFAULT = 0x0
IPV6_PORTRANGE_HIGH = 0x1
IPV6_PORTRANGE_LOW = 0x2
IPV6_RECVDSTOPTS = 0x28
IPV6_RECVHOPLIMIT = 0x25
IPV6_RECVHOPOPTS = 0x27
IPV6_RECVPATHMTU = 0x2b
IPV6_RECVPKTINFO = 0x24
IPV6_RECVRTHDR = 0x26
IPV6_RECVTCLASS = 0x39
IPV6_RTHDR = 0x33
IPV6_RTHDRDSTOPTS = 0x23
IPV6_RTHDR_LOOSE = 0x0
IPV6_RTHDR_STRICT = 0x1
IPV6_RTHDR_TYPE_0 = 0x0
IPV6_SOCKOPT_RESERVED1 = 0x3
IPV6_TCLASS = 0x3d
IPV6_UNICAST_HOPS = 0x4
IPV6_USE_MIN_MTU = 0x2a
IPV6_V6ONLY = 0x1b
IPV6_VERSION = 0x60
IPV6_VERSION_MASK = 0xf0
IP_ADD_MEMBERSHIP = 0xc
IP_DEFAULT_MULTICAST_LOOP = 0x1
IP_DEFAULT_MULTICAST_TTL = 0x1
IP_DF = 0x4000
IP_DROP_MEMBERSHIP = 0xd
IP_EF = 0x8000
IP_ERRORMTU = 0x15
IP_HDRINCL = 0x2
IP_IPSEC_POLICY = 0x16
IP_MAXPACKET = 0xffff
IP_MAX_MEMBERSHIPS = 0x14
IP_MF = 0x2000
IP_MINFRAGSIZE = 0x45
IP_MINTTL = 0x18
IP_MSS = 0x240
IP_MULTICAST_IF = 0x9
IP_MULTICAST_LOOP = 0xb
IP_MULTICAST_TTL = 0xa
IP_OFFMASK = 0x1fff
IP_OPTIONS = 0x1
IP_PORTRANGE = 0x13
IP_PORTRANGE_DEFAULT = 0x0
IP_PORTRANGE_HIGH = 0x1
IP_PORTRANGE_LOW = 0x2
IP_RECVDSTADDR = 0x7
IP_RECVIF = 0x14
IP_RECVOPTS = 0x5
IP_RECVRETOPTS = 0x6
IP_RECVTTL = 0x17
IP_RETOPTS = 0x8
IP_RF = 0x8000
IP_TOS = 0x3
IP_TTL = 0x4
ISIG = 0x80
ISTRIP = 0x20
IXANY = 0x800
IXOFF = 0x400
IXON = 0x200
LOCK_EX = 0x2
LOCK_NB = 0x4
LOCK_SH = 0x1
LOCK_UN = 0x8
MADV_DONTNEED = 0x4
MADV_FREE = 0x6
MADV_NORMAL = 0x0
MADV_RANDOM = 0x1
MADV_SEQUENTIAL = 0x2
MADV_SPACEAVAIL = 0x5
MADV_WILLNEED = 0x3
MAP_ALIGNMENT_16MB = 0x18000000
MAP_ALIGNMENT_1TB = 0x28000000
MAP_ALIGNMENT_256TB = 0x30000000
MAP_ALIGNMENT_4GB = 0x20000000
MAP_ALIGNMENT_64KB = 0x10000000
MAP_ALIGNMENT_64PB = 0x38000000
MAP_ALIGNMENT_MASK = -0x1000000
MAP_ALIGNMENT_SHIFT = 0x18
MAP_ANON = 0x1000
MAP_FILE = 0x0
MAP_FIXED = 0x10
MAP_HASSEMAPHORE = 0x200
MAP_INHERIT = 0x80
MAP_INHERIT_COPY = 0x1
MAP_INHERIT_DEFAULT = 0x1
MAP_INHERIT_DONATE_COPY = 0x3
MAP_INHERIT_NONE = 0x2
MAP_INHERIT_SHARE = 0x0
MAP_NORESERVE = 0x40
MAP_PRIVATE = 0x2
MAP_RENAME = 0x20
MAP_SHARED = 0x1
MAP_STACK = 0x2000
MAP_TRYFIXED = 0x400
MAP_WIRED = 0x800
MSG_BCAST = 0x100
MSG_CMSG_CLOEXEC = 0x800
MSG_CONTROLMBUF = 0x2000000
MSG_CTRUNC = 0x20
MSG_DONTROUTE = 0x4
MSG_DONTWAIT = 0x80
MSG_EOR = 0x8
MSG_IOVUSRSPACE = 0x4000000
MSG_LENUSRSPACE = 0x8000000
MSG_MCAST = 0x200
MSG_NAMEMBUF = 0x1000000
MSG_NBIO = 0x1000
MSG_NOSIGNAL = 0x400
MSG_OOB = 0x1
MSG_PEEK = 0x2
MSG_TRUNC = 0x10
MSG_USERFLAGS = 0xffffff
MSG_WAITALL = 0x40
NAME_MAX = 0x1ff
NET_RT_DUMP = 0x1
NET_RT_FLAGS = 0x2
NET_RT_IFLIST = 0x5
NET_RT_MAXID = 0x6
NET_RT_OIFLIST = 0x4
NET_RT_OOIFLIST = 0x3
NOFLSH = 0x80000000
NOTE_ATTRIB = 0x8
NOTE_CHILD = 0x4
NOTE_DELETE = 0x1
NOTE_EXEC = 0x20000000
NOTE_EXIT = 0x80000000
NOTE_EXTEND = 0x4
NOTE_FORK = 0x40000000
NOTE_LINK = 0x10
NOTE_LOWAT = 0x1
NOTE_PCTRLMASK = 0xf0000000
NOTE_PDATAMASK = 0xfffff
NOTE_RENAME = 0x20
NOTE_REVOKE = 0x40
NOTE_TRACK = 0x1
NOTE_TRACKERR = 0x2
NOTE_WRITE = 0x2
OCRNL = 0x10
OFIOGETBMAP = 0xc004667a
ONLCR = 0x2
ONLRET = 0x40
ONOCR = 0x20
ONOEOT = 0x8
OPOST = 0x1
O_ACCMODE = 0x3
O_ALT_IO = 0x40000
O_APPEND = 0x8
O_ASYNC = 0x40
O_CLOEXEC = 0x400000
O_CREAT = 0x200
O_DIRECT = 0x80000
O_DIRECTORY = 0x200000
O_DSYNC = 0x10000
O_EXCL = 0x800
O_EXLOCK = 0x20
O_FSYNC = 0x80
O_NDELAY = 0x4
O_NOCTTY = 0x8000
O_NOFOLLOW = 0x100
O_NONBLOCK = 0x4
O_NOSIGPIPE = 0x1000000
O_RDONLY = 0x0
O_RDWR = 0x2
O_RSYNC = 0x20000
O_SHLOCK = 0x10
O_SYNC = 0x80
O_TRUNC = 0x400
O_WRONLY = 0x1
PARENB = 0x1000
PARMRK = 0x8
PARODD = 0x2000
PENDIN = 0x20000000
PROT_EXEC = 0x4
PROT_NONE = 0x0
PROT_READ = 0x1
PROT_WRITE = 0x2
PRI_IOFLUSH = 0x7c
PRIO_PGRP = 0x1
PRIO_PROCESS = 0x0
PRIO_USER = 0x2
RLIMIT_AS = 0xa
RLIMIT_CORE = 0x4
RLIMIT_CPU = 0x0
RLIMIT_DATA = 0x2
RLIMIT_FSIZE = 0x1
RLIMIT_NOFILE = 0x8
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0x7fffffffffffffff
RTAX_AUTHOR = 0x6
RTAX_BRD = 0x7
RTAX_DST = 0x0
RTAX_GATEWAY = 0x1
RTAX_GENMASK = 0x3
RTAX_IFA = 0x5
RTAX_IFP = 0x4
RTAX_MAX = 0x9
RTAX_NETMASK = 0x2
RTAX_TAG = 0x8
RTA_AUTHOR = 0x40
RTA_BRD = 0x80
RTA_DST = 0x1
RTA_GATEWAY = 0x2
RTA_GENMASK = 0x8
RTA_IFA = 0x20
RTA_IFP = 0x10
RTA_NETMASK = 0x4
RTA_TAG = 0x100
RTF_ANNOUNCE = 0x20000
RTF_BLACKHOLE = 0x1000
RTF_CLONED = 0x2000
RTF_CLONING = 0x100
RTF_DONE = 0x40
RTF_DYNAMIC = 0x10
RTF_GATEWAY = 0x2
RTF_HOST = 0x4
RTF_LLINFO = 0x400
RTF_MASK = 0x80
RTF_MODIFIED = 0x20
RTF_PROTO1 = 0x8000
RTF_PROTO2 = 0x4000
RTF_REJECT = 0x8
RTF_SRC = 0x10000
RTF_STATIC = 0x800
RTF_UP = 0x1
RTF_XRESOLVE = 0x200
RTM_ADD = 0x1
RTM_CHANGE = 0x3
RTM_CHGADDR = 0x15
RTM_DELADDR = 0xd
RTM_DELETE = 0x2
RTM_GET = 0x4
RTM_IEEE80211 = 0x11
RTM_IFANNOUNCE = 0x10
RTM_IFINFO = 0x14
RTM_LLINFO_UPD = 0x13
RTM_LOCK = 0x8
RTM_LOSING = 0x5
RTM_MISS = 0x7
RTM_NEWADDR = 0xc
RTM_OIFINFO = 0xf
RTM_OLDADD = 0x9
RTM_OLDDEL = 0xa
RTM_OOIFINFO = 0xe
RTM_REDIRECT = 0x6
RTM_RESOLVE = 0xb
RTM_RTTUNIT = 0xf4240
RTM_SETGATE = 0x12
RTM_VERSION = 0x4
RTV_EXPIRE = 0x4
RTV_HOPCOUNT = 0x2
RTV_MTU = 0x1
RTV_RPIPE = 0x8
RTV_RTT = 0x40
RTV_RTTVAR = 0x80
RTV_SPIPE = 0x10
RTV_SSTHRESH = 0x20
RUSAGE_CHILDREN = -0x1
RUSAGE_SELF = 0x0
SCM_CREDS = 0x4
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x8
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
SIOCADDMULTI = 0x80906931
SIOCADDRT = 0x8030720a
SIOCAIFADDR = 0x8040691a
SIOCALIFADDR = 0x8118691c
SIOCATMARK = 0x40047307
SIOCDELMULTI = 0x80906932
SIOCDELRT = 0x8030720b
SIOCDIFADDR = 0x80906919
SIOCDIFPHYADDR = 0x80906949
SIOCDLIFADDR = 0x8118691e
SIOCGDRVSPEC = 0xc01c697b
SIOCGETPFSYNC = 0xc09069f8
SIOCGETSGCNT = 0xc0147534
SIOCGETVIFCNT = 0xc0147533
SIOCGHIWAT = 0x40047301
SIOCGIFADDR = 0xc0906921
SIOCGIFADDRPREF = 0xc0946920
SIOCGIFALIAS = 0xc040691b
SIOCGIFBRDADDR = 0xc0906923
SIOCGIFCAP = 0xc0206976
SIOCGIFCONF = 0xc0086926
SIOCGIFDATA = 0xc0946985
SIOCGIFDLT = 0xc0906977
SIOCGIFDSTADDR = 0xc0906922
SIOCGIFFLAGS = 0xc0906911
SIOCGIFGENERIC = 0xc090693a
SIOCGIFMEDIA = 0xc0286936
SIOCGIFMETRIC = 0xc0906917
SIOCGIFMTU = 0xc090697e
SIOCGIFNETMASK = 0xc0906925
SIOCGIFPDSTADDR = 0xc0906948
SIOCGIFPSRCADDR = 0xc0906947
SIOCGLIFADDR = 0xc118691d
SIOCGLIFPHYADDR = 0xc118694b
SIOCGLINKSTR = 0xc01c6987
SIOCGLOWAT = 0x40047303
SIOCGPGRP = 0x40047309
SIOCGVH = 0xc0906983
SIOCIFCREATE = 0x8090697a
SIOCIFDESTROY = 0x80906979
SIOCIFGCLONERS = 0xc00c6978
SIOCINITIFADDR = 0xc0446984
SIOCSDRVSPEC = 0x801c697b
SIOCSETPFSYNC = 0x809069f7
SIOCSHIWAT = 0x80047300
SIOCSIFADDR = 0x8090690c
SIOCSIFADDRPREF = 0x8094691f
SIOCSIFBRDADDR = 0x80906913
SIOCSIFCAP = 0x80206975
SIOCSIFDSTADDR = 0x8090690e
SIOCSIFFLAGS = 0x80906910
SIOCSIFGENERIC = 0x80906939
SIOCSIFMEDIA = 0xc0906935
SIOCSIFMETRIC = 0x80906918
SIOCSIFMTU = 0x8090697f
SIOCSIFNETMASK = 0x80906916
SIOCSIFPHYADDR = 0x80406946
SIOCSLIFPHYADDR = 0x8118694a
SIOCSLINKSTR = 0x801c6988
SIOCSLOWAT = 0x80047302
SIOCSPGRP = 0x80047308
SIOCSVH = 0xc0906982
SIOCZIFDATA = 0xc0946986
SOCK_CLOEXEC = 0x10000000
SOCK_DGRAM = 0x2
SOCK_FLAGS_MASK = 0xf0000000
SOCK_NONBLOCK = 0x20000000
SOCK_NOSIGPIPE = 0x40000000
SOCK_RAW = 0x3
SOCK_RDM = 0x4
SOCK_SEQPACKET = 0x5
SOCK_STREAM = 0x1
SOL_SOCKET = 0xffff
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x2
SO_ACCEPTFILTER = 0x1000
SO_BROADCAST = 0x20
SO_DEBUG = 0x1
SO_DONTROUTE = 0x10
SO_ERROR = 0x1007
SO_KEEPALIVE = 0x8
SO_LINGER = 0x80
SO_NOHEADER = 0x100a
SO_NOSIGPIPE = 0x800
SO_OOBINLINE = 0x100
SO_OVERFLOWED = 0x1009
SO_RCVBUF = 0x1002
SO_RCVLOWAT = 0x1004
SO_RCVTIMEO = 0x100c
SO_REUSEADDR = 0x4
SO_REUSEPORT = 0x200
SO_SNDBUF = 0x1001
SO_SNDLOWAT = 0x1003
SO_SNDTIMEO = 0x100b
SO_TIMESTAMP = 0x2000
SO_TYPE = 0x1008
SO_USELOOPBACK = 0x40
SYSCTL_VERSION = 0x1000000
SYSCTL_VERS_0 = 0x0
SYSCTL_VERS_1 = 0x1000000
SYSCTL_VERS_MASK = 0xff000000
S_ARCH1 = 0x10000
S_ARCH2 = 0x20000
S_BLKSIZE = 0x200
S_IEXEC = 0x40
S_IFBLK = 0x6000
S_IFCHR = 0x2000
S_IFDIR = 0x4000
S_IFIFO = 0x1000
S_IFLNK = 0xa000
S_IFMT = 0xf000
S_IFREG = 0x8000
S_IFSOCK = 0xc000
S_IFWHT = 0xe000
S_IREAD = 0x100
S_IRGRP = 0x20
S_IROTH = 0x4
S_IRUSR = 0x100
S_IRWXG = 0x38
S_IRWXO = 0x7
S_IRWXU = 0x1c0
S_ISGID = 0x400
S_ISTXT = 0x200
S_ISUID = 0x800
S_ISVTX = 0x200
S_IWGRP = 0x10
S_IWOTH = 0x2
S_IWRITE = 0x80
S_IWUSR = 0x80
S_IXGRP = 0x8
S_IXOTH = 0x1
S_IXUSR = 0x40
TCIFLUSH = 0x1
TCIOFLUSH = 0x3
TCOFLUSH = 0x2
TCP_CONGCTL = 0x20
TCP_KEEPCNT = 0x6
TCP_KEEPIDLE = 0x3
TCP_KEEPINIT = 0x7
TCP_KEEPINTVL = 0x5
TCP_MAXBURST = 0x4
TCP_MAXSEG = 0x2
TCP_MAXWIN = 0xffff
TCP_MAX_WINSHIFT = 0xe
TCP_MD5SIG = 0x10
TCP_MINMSS = 0xd8
TCP_MSS = 0x218
TCP_NODELAY = 0x1
TCSAFLUSH = 0x2
TIOCCBRK = 0x2000747a
TIOCCDTR = 0x20007478
TIOCCONS = 0x80047462
TIOCDCDTIMESTAMP = 0x400c7458
TIOCDRAIN = 0x2000745e
TIOCEXCL = 0x2000740d
TIOCEXT = 0x80047460
TIOCFLAG_CDTRCTS = 0x10
TIOCFLAG_CLOCAL = 0x2
TIOCFLAG_CRTSCTS = 0x4
TIOCFLAG_MDMBUF = 0x8
TIOCFLAG_SOFTCAR = 0x1
TIOCFLUSH = 0x80047410
TIOCGETA = 0x402c7413
TIOCGETD = 0x4004741a
TIOCGFLAGS = 0x4004745d
TIOCGLINED = 0x40207442
TIOCGPGRP = 0x40047477
TIOCGQSIZE = 0x40047481
TIOCGRANTPT = 0x20007447
TIOCGSID = 0x40047463
TIOCGSIZE = 0x40087468
TIOCGWINSZ = 0x40087468
TIOCMBIC = 0x8004746b
TIOCMBIS = 0x8004746c
TIOCMGET = 0x4004746a
TIOCMSET = 0x8004746d
TIOCM_CAR = 0x40
TIOCM_CD = 0x40
TIOCM_CTS = 0x20
TIOCM_DSR = 0x100
TIOCM_DTR = 0x2
TIOCM_LE = 0x1
TIOCM_RI = 0x80
TIOCM_RNG = 0x80
TIOCM_RTS = 0x4
TIOCM_SR = 0x10
TIOCM_ST = 0x8
TIOCNOTTY = 0x20007471
TIOCNXCL = 0x2000740e
TIOCOUTQ = 0x40047473
TIOCPKT = 0x80047470
TIOCPKT_DATA = 0x0
TIOCPKT_DOSTOP = 0x20
TIOCPKT_FLUSHREAD = 0x1
TIOCPKT_FLUSHWRITE = 0x2
TIOCPKT_IOCTL = 0x40
TIOCPKT_NOSTOP = 0x10
TIOCPKT_START = 0x8
TIOCPKT_STOP = 0x4
TIOCPTMGET = 0x48087446
TIOCPTSNAME = 0x48087448
TIOCRCVFRAME = 0x80047445
TIOCREMOTE = 0x80047469
TIOCSBRK = 0x2000747b
TIOCSCTTY = 0x20007461
TIOCSDTR = 0x20007479
TIOCSETA = 0x802c7414
TIOCSETAF = 0x802c7416
TIOCSETAW = 0x802c7415
TIOCSETD = 0x8004741b
TIOCSFLAGS = 0x8004745c
TIOCSIG = 0x2000745f
TIOCSLINED = 0x80207443
TIOCSPGRP = 0x80047476
TIOCSQSIZE = 0x80047480
TIOCSSIZE = 0x80087467
TIOCSTART = 0x2000746e
TIOCSTAT = 0x80047465
TIOCSTI = 0x80017472
TIOCSTOP = 0x2000746f
TIOCSWINSZ = 0x80087467
TIOCUCNTL = 0x80047466
TIOCXMTFRAME = 0x80047444
TOSTOP = 0x400000
VDISCARD = 0xf
VDSUSP = 0xb
VEOF = 0x0
VEOL = 0x1
VEOL2 = 0x2
VERASE = 0x3
VINTR = 0x8
VKILL = 0x5
VLNEXT = 0xe
VMIN = 0x10
VQUIT = 0x9
VREPRINT = 0x6
VSTART = 0xc
VSTATUS = 0x12
VSTOP = 0xd
VSUSP = 0xa
VTIME = 0x11
VWERASE = 0x4
WALL = 0x8
WALLSIG = 0x8
WALTSIG = 0x4
WCLONE = 0x4
WCOREFLAG = 0x80
WNOHANG = 0x1
WNOWAIT = 0x10000
WNOZOMBIE = 0x20000
WOPTSCHECKED = 0x40000
WSTOPPED = 0x7f
WUNTRACED = 0x2
)
// Errors
const (
E2BIG = syscall.Errno(0x7)
EACCES = syscall.Errno(0xd)
EADDRINUSE = syscall.Errno(0x30)
EADDRNOTAVAIL = syscall.Errno(0x31)
EAFNOSUPPORT = syscall.Errno(0x2f)
EAGAIN = syscall.Errno(0x23)
EALREADY = syscall.Errno(0x25)
EAUTH = syscall.Errno(0x50)
EBADF = syscall.Errno(0x9)
EBADMSG = syscall.Errno(0x58)
EBADRPC = syscall.Errno(0x48)
EBUSY = syscall.Errno(0x10)
ECANCELED = syscall.Errno(0x57)
ECHILD = syscall.Errno(0xa)
ECONNABORTED = syscall.Errno(0x35)
ECONNREFUSED = syscall.Errno(0x3d)
ECONNRESET = syscall.Errno(0x36)
EDEADLK = syscall.Errno(0xb)
EDESTADDRREQ = syscall.Errno(0x27)
EDOM = syscall.Errno(0x21)
EDQUOT = syscall.Errno(0x45)
EEXIST = syscall.Errno(0x11)
EFAULT = syscall.Errno(0xe)
EFBIG = syscall.Errno(0x1b)
EFTYPE = syscall.Errno(0x4f)
EHOSTDOWN = syscall.Errno(0x40)
EHOSTUNREACH = syscall.Errno(0x41)
EIDRM = syscall.Errno(0x52)
EILSEQ = syscall.Errno(0x55)
EINPROGRESS = syscall.Errno(0x24)
EINTR = syscall.Errno(0x4)
EINVAL = syscall.Errno(0x16)
EIO = syscall.Errno(0x5)
EISCONN = syscall.Errno(0x38)
EISDIR = syscall.Errno(0x15)
ELAST = syscall.Errno(0x60)
ELOOP = syscall.Errno(0x3e)
EMFILE = syscall.Errno(0x18)
EMLINK = syscall.Errno(0x1f)
EMSGSIZE = syscall.Errno(0x28)
EMULTIHOP = syscall.Errno(0x5e)
ENAMETOOLONG = syscall.Errno(0x3f)
ENEEDAUTH = syscall.Errno(0x51)
ENETDOWN = syscall.Errno(0x32)
ENETRESET = syscall.Errno(0x34)
ENETUNREACH = syscall.Errno(0x33)
ENFILE = syscall.Errno(0x17)
ENOATTR = syscall.Errno(0x5d)
ENOBUFS = syscall.Errno(0x37)
ENODATA = syscall.Errno(0x59)
ENODEV = syscall.Errno(0x13)
ENOENT = syscall.Errno(0x2)
ENOEXEC = syscall.Errno(0x8)
ENOLCK = syscall.Errno(0x4d)
ENOLINK = syscall.Errno(0x5f)
ENOMEM = syscall.Errno(0xc)
ENOMSG = syscall.Errno(0x53)
ENOPROTOOPT = syscall.Errno(0x2a)
ENOSPC = syscall.Errno(0x1c)
ENOSR = syscall.Errno(0x5a)
ENOSTR = syscall.Errno(0x5b)
ENOSYS = syscall.Errno(0x4e)
ENOTBLK = syscall.Errno(0xf)
ENOTCONN = syscall.Errno(0x39)
ENOTDIR = syscall.Errno(0x14)
ENOTEMPTY = syscall.Errno(0x42)
ENOTSOCK = syscall.Errno(0x26)
ENOTSUP = syscall.Errno(0x56)
ENOTTY = syscall.Errno(0x19)
ENXIO = syscall.Errno(0x6)
EOPNOTSUPP = syscall.Errno(0x2d)
EOVERFLOW = syscall.Errno(0x54)
EPERM = syscall.Errno(0x1)
EPFNOSUPPORT = syscall.Errno(0x2e)
EPIPE = syscall.Errno(0x20)
EPROCLIM = syscall.Errno(0x43)
EPROCUNAVAIL = syscall.Errno(0x4c)
EPROGMISMATCH = syscall.Errno(0x4b)
EPROGUNAVAIL = syscall.Errno(0x4a)
EPROTO = syscall.Errno(0x60)
EPROTONOSUPPORT = syscall.Errno(0x2b)
EPROTOTYPE = syscall.Errno(0x29)
ERANGE = syscall.Errno(0x22)
EREMOTE = syscall.Errno(0x47)
EROFS = syscall.Errno(0x1e)
ERPCMISMATCH = syscall.Errno(0x49)
ESHUTDOWN = syscall.Errno(0x3a)
ESOCKTNOSUPPORT = syscall.Errno(0x2c)
ESPIPE = syscall.Errno(0x1d)
ESRCH = syscall.Errno(0x3)
ESTALE = syscall.Errno(0x46)
ETIME = syscall.Errno(0x5c)
ETIMEDOUT = syscall.Errno(0x3c)
ETOOMANYREFS = syscall.Errno(0x3b)
ETXTBSY = syscall.Errno(0x1a)
EUSERS = syscall.Errno(0x44)
EWOULDBLOCK = syscall.Errno(0x23)
EXDEV = syscall.Errno(0x12)
)
// Signals
const (
SIGABRT = syscall.Signal(0x6)
SIGALRM = syscall.Signal(0xe)
SIGBUS = syscall.Signal(0xa)
SIGCHLD = syscall.Signal(0x14)
SIGCONT = syscall.Signal(0x13)
SIGEMT = syscall.Signal(0x7)
SIGFPE = syscall.Signal(0x8)
SIGHUP = syscall.Signal(0x1)
SIGILL = syscall.Signal(0x4)
SIGINFO = syscall.Signal(0x1d)
SIGINT = syscall.Signal(0x2)
SIGIO = syscall.Signal(0x17)
SIGIOT = syscall.Signal(0x6)
SIGKILL = syscall.Signal(0x9)
SIGPIPE = syscall.Signal(0xd)
SIGPROF = syscall.Signal(0x1b)
SIGPWR = syscall.Signal(0x20)
SIGQUIT = syscall.Signal(0x3)
SIGSEGV = syscall.Signal(0xb)
SIGSTOP = syscall.Signal(0x11)
SIGSYS = syscall.Signal(0xc)
SIGTERM = syscall.Signal(0xf)
SIGTRAP = syscall.Signal(0x5)
SIGTSTP = syscall.Signal(0x12)
SIGTTIN = syscall.Signal(0x15)
SIGTTOU = syscall.Signal(0x16)
SIGURG = syscall.Signal(0x10)
SIGUSR1 = syscall.Signal(0x1e)
SIGUSR2 = syscall.Signal(0x1f)
SIGVTALRM = syscall.Signal(0x1a)
SIGWINCH = syscall.Signal(0x1c)
SIGXCPU = syscall.Signal(0x18)
SIGXFSZ = syscall.Signal(0x19)
)
// Error table
var errors = [...]string{
1: "operation not permitted",
2: "no such file or directory",
3: "no such process",
4: "interrupted system call",
5: "input/output error",
6: "device not configured",
7: "argument list too long",
8: "exec format error",
9: "bad file descriptor",
10: "no child processes",
11: "resource deadlock avoided",
12: "cannot allocate memory",
13: "permission denied",
14: "bad address",
15: "block device required",
16: "device busy",
17: "file exists",
18: "cross-device link",
19: "operation not supported by device",
20: "not a directory",
21: "is a directory",
22: "invalid argument",
23: "too many open files in system",
24: "too many open files",
25: "inappropriate ioctl for device",
26: "text file busy",
27: "file too large",
28: "no space left on device",
29: "illegal seek",
30: "read-only file system",
31: "too many links",
32: "broken pipe",
33: "numerical argument out of domain",
34: "result too large or too small",
35: "resource temporarily unavailable",
36: "operation now in progress",
37: "operation already in progress",
38: "socket operation on non-socket",
39: "destination address required",
40: "message too long",
41: "protocol wrong type for socket",
42: "protocol option not available",
43: "protocol not supported",
44: "socket type not supported",
45: "operation not supported",
46: "protocol family not supported",
47: "address family not supported by protocol family",
48: "address already in use",
49: "can't assign requested address",
50: "network is down",
51: "network is unreachable",
52: "network dropped connection on reset",
53: "software caused connection abort",
54: "connection reset by peer",
55: "no buffer space available",
56: "socket is already connected",
57: "socket is not connected",
58: "can't send after socket shutdown",
59: "too many references: can't splice",
60: "connection timed out",
61: "connection refused",
62: "too many levels of symbolic links",
63: "file name too long",
64: "host is down",
65: "no route to host",
66: "directory not empty",
67: "too many processes",
68: "too many users",
69: "disc quota exceeded",
70: "stale NFS file handle",
71: "too many levels of remote in path",
72: "RPC struct is bad",
73: "RPC version wrong",
74: "RPC prog. not avail",
75: "program version wrong",
76: "bad procedure for program",
77: "no locks available",
78: "function not implemented",
79: "inappropriate file type or format",
80: "authentication error",
81: "need authenticator",
82: "identifier removed",
83: "no message of desired type",
84: "value too large to be stored in data type",
85: "illegal byte sequence",
86: "not supported",
87: "operation Canceled",
88: "bad or Corrupt message",
89: "no message available",
90: "no STREAM resources",
91: "not a STREAM",
92: "STREAM ioctl timeout",
93: "attribute not found",
94: "multihop attempted",
95: "link has been severed",
96: "protocol error",
}
// Signal table
var signals = [...]string{
1: "hangup",
2: "interrupt",
3: "quit",
4: "illegal instruction",
5: "trace/BPT trap",
6: "abort trap",
7: "EMT trap",
8: "floating point exception",
9: "killed",
10: "bus error",
11: "segmentation fault",
12: "bad system call",
13: "broken pipe",
14: "alarm clock",
15: "terminated",
16: "urgent I/O condition",
17: "stopped (signal)",
18: "stopped",
19: "continued",
20: "child exited",
21: "stopped (tty input)",
22: "stopped (tty output)",
23: "I/O possible",
24: "cputime limit exceeded",
25: "filesize limit exceeded",
26: "virtual timer expired",
27: "profiling timer expired",
28: "window size changes",
29: "information request",
30: "user defined signal 1",
31: "user defined signal 2",
32: "power fail/restart",
}
| Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_netbsd_arm.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.0021748885046690702,
0.00022022983466740698,
0.00016089706332422793,
0.0001658691471675411,
0.00023956230143085122
] |
{
"id": 8,
"code_window": [
" --masquerade-all[=false]: If using the pure iptables proxy, SNAT everything\n",
" --master=\"\": The address of the Kubernetes API server (overrides any value in kubeconfig)\n",
" --oom-score-adj=-999: The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]\n",
" --proxy-mode=userspace: Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.\n",
" --proxy-port-range=: Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.\n",
" --udp-timeout=250ms: How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace\n",
"```\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" --proxy-mode=: Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.\n"
],
"file_path": "docs/admin/kube-proxy.md",
"type": "replace",
"edit_start_line_idx": 73
} | package jsonutil
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"reflect"
"time"
)
// UnmarshalJSON reads a stream and unmarshals the results in object v.
func UnmarshalJSON(v interface{}, stream io.Reader) error {
var out interface{}
b, err := ioutil.ReadAll(stream)
if err != nil {
return err
}
if len(b) == 0 {
return nil
}
if err := json.Unmarshal(b, &out); err != nil {
return err
}
return unmarshalAny(reflect.ValueOf(v), out, "")
}
func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error {
vtype := value.Type()
if vtype.Kind() == reflect.Ptr {
vtype = vtype.Elem() // check kind of actual element type
}
t := tag.Get("type")
if t == "" {
switch vtype.Kind() {
case reflect.Struct:
// also it can't be a time object
if _, ok := value.Interface().(*time.Time); !ok {
t = "structure"
}
case reflect.Slice:
// also it can't be a byte slice
if _, ok := value.Interface().([]byte); !ok {
t = "list"
}
case reflect.Map:
t = "map"
}
}
switch t {
case "structure":
if field, ok := vtype.FieldByName("_"); ok {
tag = field.Tag
}
return unmarshalStruct(value, data, tag)
case "list":
return unmarshalList(value, data, tag)
case "map":
return unmarshalMap(value, data, tag)
default:
return unmarshalScalar(value, data, tag)
}
}
func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error {
if data == nil {
return nil
}
mapData, ok := data.(map[string]interface{})
if !ok {
return fmt.Errorf("JSON value is not a structure (%#v)", data)
}
t := value.Type()
if value.Kind() == reflect.Ptr {
if value.IsNil() { // create the structure if it's nil
s := reflect.New(value.Type().Elem())
value.Set(s)
value = s
}
value = value.Elem()
t = t.Elem()
}
// unwrap any payloads
if payload := tag.Get("payload"); payload != "" {
field, _ := t.FieldByName(payload)
return unmarshalAny(value.FieldByName(payload), data, field.Tag)
}
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if field.PkgPath != "" {
continue // ignore unexported fields
}
// figure out what this field is called
name := field.Name
if locName := field.Tag.Get("locationName"); locName != "" {
name = locName
}
member := value.FieldByIndex(field.Index)
err := unmarshalAny(member, mapData[name], field.Tag)
if err != nil {
return err
}
}
return nil
}
func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error {
if data == nil {
return nil
}
listData, ok := data.([]interface{})
if !ok {
return fmt.Errorf("JSON value is not a list (%#v)", data)
}
if value.IsNil() {
l := len(listData)
value.Set(reflect.MakeSlice(value.Type(), l, l))
}
for i, c := range listData {
err := unmarshalAny(value.Index(i), c, "")
if err != nil {
return err
}
}
return nil
}
func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error {
if data == nil {
return nil
}
mapData, ok := data.(map[string]interface{})
if !ok {
return fmt.Errorf("JSON value is not a map (%#v)", data)
}
if value.IsNil() {
value.Set(reflect.MakeMap(value.Type()))
}
for k, v := range mapData {
kvalue := reflect.ValueOf(k)
vvalue := reflect.New(value.Type().Elem()).Elem()
unmarshalAny(vvalue, v, "")
value.SetMapIndex(kvalue, vvalue)
}
return nil
}
func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error {
errf := func() error {
return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
}
switch d := data.(type) {
case nil:
return nil // nothing to do here
case string:
switch value.Interface().(type) {
case *string:
value.Set(reflect.ValueOf(&d))
case []byte:
b, err := base64.StdEncoding.DecodeString(d)
if err != nil {
return err
}
value.Set(reflect.ValueOf(b))
default:
return errf()
}
case float64:
switch value.Interface().(type) {
case *int64:
di := int64(d)
value.Set(reflect.ValueOf(&di))
case *float64:
value.Set(reflect.ValueOf(&d))
case *time.Time:
t := time.Unix(int64(d), 0).UTC()
value.Set(reflect.ValueOf(&t))
default:
return errf()
}
case bool:
switch value.Interface().(type) {
case *bool:
value.Set(reflect.ValueOf(&d))
default:
return errf()
}
default:
return fmt.Errorf("unsupported JSON value (%v)", data)
}
return nil
}
| Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00017024744010996073,
0.00016766258340794593,
0.00016436904843430966,
0.00016806897474452853,
0.0000018286471004103078
] |
{
"id": 9,
"code_window": [
" --proxy-port-range=: Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.\n",
" --udp-timeout=250ms: How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace\n",
"```\n",
"\n",
"###### Auto generated by spf13/cobra on 27-Jan-2016\n",
"\n",
"\n",
"<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->\n",
"[]()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"###### Auto generated by spf13/cobra on 1-Feb-2016\n"
],
"file_path": "docs/admin/kube-proxy.md",
"type": "replace",
"edit_start_line_idx": 78
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package options contains flags for initializing a proxy.
package options
import (
_ "net/http/pprof"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/componentconfig"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/util"
"github.com/spf13/pflag"
)
const (
ExperimentalProxyModeAnnotation = "net.experimental.kubernetes.io/proxy-mode"
)
// ProxyServerConfig configures and runs a Kubernetes proxy server
type ProxyServerConfig struct {
componentconfig.KubeProxyConfiguration
ResourceContainer string
KubeAPIQPS float32
KubeAPIBurst int
ConfigSyncPeriod time.Duration
CleanupAndExit bool
NodeRef *api.ObjectReference
Master string
Kubeconfig string
}
func NewProxyConfig() *ProxyServerConfig {
return &ProxyServerConfig{
KubeProxyConfiguration: componentconfig.KubeProxyConfiguration{
BindAddress: "0.0.0.0",
HealthzPort: 10249,
HealthzBindAddress: "127.0.0.1",
OOMScoreAdj: util.IntPtr(qos.KubeProxyOOMScoreAdj),
ResourceContainer: "/kube-proxy",
IPTablesSyncPeriod: unversioned.Duration{30 * time.Second},
UDPIdleTimeout: unversioned.Duration{250 * time.Millisecond},
Mode: componentconfig.ProxyModeUserspace,
ConntrackMax: 256 * 1024, // 4x default (64k)
ConntrackTCPEstablishedTimeout: unversioned.Duration{Duration: 24 * time.Hour}, // 1 day (1/5 default)
},
KubeAPIQPS: 5.0,
KubeAPIBurst: 10,
ConfigSyncPeriod: 15 * time.Minute,
}
}
// AddFlags adds flags for a specific ProxyServer to the specified FlagSet
func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) {
fs.Var(componentconfig.IPVar{&s.BindAddress}, "bind-address", "The IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)")
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
fs.IntVar(&s.HealthzPort, "healthz-port", s.HealthzPort, "The port to bind the health check server. Use 0 to disable.")
fs.Var(componentconfig.IPVar{&s.HealthzBindAddress}, "healthz-bind-address", "The IP address for the health check server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)")
fs.IntVar(s.OOMScoreAdj, "oom-score-adj", util.IntPtrDerefOr(s.OOMScoreAdj, qos.KubeProxyOOMScoreAdj), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]")
fs.StringVar(&s.ResourceContainer, "resource-container", s.ResourceContainer, "Absolute name of the resource-only container to create and run the Kube-proxy in (Default: /kube-proxy).")
fs.MarkDeprecated("resource-container", "This feature will be removed in a later release.")
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization information (the master location is set by the master flag).")
fs.Var(componentconfig.PortRangeVar{&s.PortRange}, "proxy-port-range", "Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.")
fs.StringVar(&s.HostnameOverride, "hostname-override", s.HostnameOverride, "If non-empty, will use this string as identification instead of the actual hostname.")
fs.Var(&s.Mode, "proxy-mode", "Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the '"+ExperimentalProxyModeAnnotation+"' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.")
fs.DurationVar(&s.IPTablesSyncPeriod.Duration, "iptables-sync-period", s.IPTablesSyncPeriod.Duration, "How often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.")
fs.DurationVar(&s.ConfigSyncPeriod, "config-sync-period", s.ConfigSyncPeriod, "How often configuration from the apiserver is refreshed. Must be greater than 0.")
fs.BoolVar(&s.MasqueradeAll, "masquerade-all", false, "If using the pure iptables proxy, SNAT everything")
fs.BoolVar(&s.CleanupAndExit, "cleanup-iptables", false, "If true cleanup iptables rules and exit.")
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
fs.DurationVar(&s.UDPIdleTimeout.Duration, "udp-timeout", s.UDPIdleTimeout.Duration, "How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace")
fs.IntVar(&s.ConntrackMax, "conntrack-max", s.ConntrackMax, "Maximum number of NAT connections to track (0 to leave as-is)")
fs.DurationVar(&s.ConntrackTCPEstablishedTimeout.Duration, "conntrack-tcp-timeout-established", s.ConntrackTCPEstablishedTimeout.Duration, "Idle timeout for established TCP connections (0 to leave as-is)")
}
| cmd/kube-proxy/app/options/options.go | 1 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.003185636131092906,
0.000540614768397063,
0.00016192378825508058,
0.00020471095922403038,
0.0008874745108187199
] |
{
"id": 9,
"code_window": [
" --proxy-port-range=: Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.\n",
" --udp-timeout=250ms: How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace\n",
"```\n",
"\n",
"###### Auto generated by spf13/cobra on 27-Jan-2016\n",
"\n",
"\n",
"<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->\n",
"[]()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"###### Auto generated by spf13/cobra on 1-Feb-2016\n"
],
"file_path": "docs/admin/kube-proxy.md",
"type": "replace",
"edit_start_line_idx": 78
} | # kubernetes-bundle
The kubernetes-bundle allows you to deploy the many services of
Kubernetes to a cloud environment and get started using the Kubernetes
technology quickly.
## Kubernetes
Kubernetes is an open source system for managing containerized
applications. Kubernetes uses [Docker](http://docker.com) to run
containerized applications.
## Juju TL;DR
The [Juju](https://jujucharms.com) system provides provisioning and
orchestration across a variety of clouds and bare metal. A juju bundle
describes collection of services and how they interrelate. `juju
quickstart` allows you to bootstrap a deployment environment and
deploy a bundle.
## Dive in!
#### Install Juju Quickstart
You will need to
[install the Juju client](https://jujucharms.com/get-started) and
`juju-quickstart` as pre-requisites. To deploy the bundle use
`juju-quickstart` which runs on Mac OS (`brew install
juju-quickstart`) or Ubuntu (`apt-get install juju-quickstart`).
### Deploy a Kubernetes Bundle
Use the 'juju quickstart' command to deploy a Kubernetes cluster to any cloud
supported by Juju.
The charm store version of the Kubernetes bundle can be deployed as follows:
juju quickstart u/kubernetes/kubernetes-cluster
> Note: The charm store bundle may be locked to a specific Kubernetes release.
Alternately you could deploy a Kubernetes bundle straight from github or a file:
juju quickstart -i https://raw.githubusercontent.com/whitmo/bundle-kubernetes/master/bundles.yaml
The command above does few things for you:
- Starts a curses based gui for managing your cloud or MAAS credentials
- Looks for a bootstrapped deployment environment, and bootstraps if
required. This will launch a bootstrap node in your chosen
deployment environment (machine 0).
- Deploys the Juju GUI to your environment onto the bootstrap node.
- Provisions 4 machines, and deploys the Kubernetes services on top of
them (Kubernetes-master, two Kubernetes minions using flannel, and etcd).
- Orchestrates the relations among the services, and exits.
Now you should have a running Kubernetes. Run `juju status
--format=oneline` to see the address of your kubernetes-master unit.
For further reading on [Juju Quickstart](https://pypi.python.org/pypi/juju-quickstart)
Go to the [Getting started with Juju guide](https://github.com/kubernetes/kubernetes/blob/master/docs/getting-started-guides/juju.md)
for more information about deploying a development Kubernetes cluster.
### Using the Kubernetes Client
You'll need the Kubernetes command line client,
[kubectl](https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/kubectl/kubectl.md)
to interact with the created cluster. The kubectl command is
installed on the kubernetes-master charm. If you want to work with
the cluster from your computer you will need to install the binary
locally.
You can access kubectl by a number ways using juju.
via juju run:
juju run --service kubernetes-master/0 "sudo kubectl get nodes"
via juju ssh:
juju ssh kubernetes-master/0 -t "sudo kubectl get nodes"
You may also SSH to the kuberentes-master unit (`juju ssh kubernetes-master/0`)
and call kubectl from the command prompt.
See the
[kubectl documentation](https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/kubectl/kubectl.md)
for more details of what can be done with the command line tool.
### Scaling up the cluster
You can add capacity by adding more Docker units:
juju add-unit docker
### Known Limitations
Kubernetes currently has several platform specific functionality. For
example load balancers and persistence volumes only work with the
Google Compute provider at this time.
The Juju integration uses the Kubernetes null provider. This means
external load balancers and storage can't be directly driven through
Kubernetes config files at this time. We look forward to adding these
capabilities to the charms.
## More about the components the bundle deploys
### Kubernetes master
The master controls the Kubernetes cluster. It manages for the worker
nodes and provides the primary interface for control by the user.
### Kubernetes minion
The minions are the servers that perform the work. Minions must
communicate with the master and run the workloads that are assigned to
them.
### Flannel-docker
Flannel provides individual subnets for each machine in the cluster by
creating a
[software defined networking](http://en.wikipedia.org/wiki/Software-defined_networking).
### Docker
An open platform for distributed applications for developers and sysadmins.
### Etcd
Etcd persists state for Flannel and Kubernetes. It is a distributed
key-value store with an http interface.
## For further information on getting started with Juju
Juju has complete documentation with regard to setup, and cloud
configuration on it's own
[documentation site](https://jujucharms.com/docs/).
- [Getting Started](https://jujucharms.com/docs/stable/getting-started)
- [Using Juju](https://jujucharms.com/docs/stable/charms)
## Installing the kubectl outside of kubernetes-master unit
Download the Kubernetes release from:
https://github.com/kubernetes/kubernetes/releases and extract
the release, you can then just directly use the cli binary at
./kubernetes/platforms/linux/amd64/kubectl
You'll need the address of the kubernetes-master as environment variable :
juju status kubernetes-master/0
Grab the public-address there and export it as KUBERNETES_MASTER
environment variable :
export KUBERNETES_MASTER=$(juju status --format=oneline kubernetes-master | grep kubernetes-master | cut -d' ' -f3):8080
And now you can run kubectl on the command line :
kubectl get no
See the
[kubectl documentation](https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/kubectl/kubectl.md)
for more details of what can be done with the command line tool.
## Hacking on the kubernetes-bundle and associated charms
The kubernetes-bundle is open source and available on github.com. If
you want to get started developing on the bundle you can clone it from
github.
git clone https://github.com/kubernetes/kubernetes.git
Go to the [Getting started with Juju guide](https://github.com/kubernetes/kubernetes/blob/master/docs/getting-started-guides/juju.md)
for more information about the bundle or charms.
## How to contribute
Send us pull requests! We'll send you a cookie if they include tests and docs.
## Current and Most Complete Information
The charms and bundles are in the [kubernetes](https://github.com/kubernetes/kubernetes)
repository in github.
- [kubernetes-master charm on Github](https://github.com/kubernetes/kubernetes/tree/master/cluster/juju/charms/trusty/kubernetes-master)
- [kubernetes charm on GitHub](https://github.com/kubernetes/kubernetes/tree/master/cluster/juju/charms/trusty/kubernetes)
More information about the
[Kubernetes project](https://github.com/kubernetes/kubernetes)
or check out the
[Kubernetes Documentation](https://github.com/kubernetes/kubernetes/tree/master/docs)
for more details about the Kubernetes concepts and terminology.
Having a problem? Check the [Kubernetes issues database](https://github.com/kubernetes/kubernetes/issues)
for related issues.
[]()
| cluster/juju/bundles/README.md | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00152198679279536,
0.00028096133610233665,
0.00015680283831898123,
0.00016967796545941383,
0.000296357786282897
] |
{
"id": 9,
"code_window": [
" --proxy-port-range=: Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.\n",
" --udp-timeout=250ms: How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace\n",
"```\n",
"\n",
"###### Auto generated by spf13/cobra on 27-Jan-2016\n",
"\n",
"\n",
"<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->\n",
"[]()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"###### Auto generated by spf13/cobra on 1-Feb-2016\n"
],
"file_path": "docs/admin/kube-proxy.md",
"type": "replace",
"edit_start_line_idx": 78
} | // +build windows
package ioutils
import (
"io/ioutil"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath"
)
// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
func TempDir(dir, prefix string) (string, error) {
tempDir, err := ioutil.TempDir(dir, prefix)
if err != nil {
return "", err
}
return longpath.AddPrefix(tempDir), nil
}
| Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_windows.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00016644172137603164,
0.00016471384151373059,
0.00016298596165142953,
0.00016471384151373059,
0.0000017278798623010516
] |
{
"id": 9,
"code_window": [
" --proxy-port-range=: Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.\n",
" --udp-timeout=250ms: How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace\n",
"```\n",
"\n",
"###### Auto generated by spf13/cobra on 27-Jan-2016\n",
"\n",
"\n",
"<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->\n",
"[]()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"###### Auto generated by spf13/cobra on 1-Feb-2016\n"
],
"file_path": "docs/admin/kube-proxy.md",
"type": "replace",
"edit_start_line_idx": 78
} | package groups
import (
"fmt"
"github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/pagination"
)
// ListOpts allows the filtering and sorting of paginated collections through
// the API. Filtering is achieved by passing in struct field values that map to
// the floating IP attributes you want to see returned. SortKey allows you to
// sort by a particular network attribute. SortDir sets the direction, and is
// either `asc' or `desc'. Marker and Limit are used for pagination.
type ListOpts struct {
ID string `q:"id"`
Name string `q:"name"`
TenantID string `q:"tenant_id"`
Limit int `q:"limit"`
Marker string `q:"marker"`
SortKey string `q:"sort_key"`
SortDir string `q:"sort_dir"`
}
// List returns a Pager which allows you to iterate over a collection of
// security groups. It accepts a ListOpts struct, which allows you to filter
// and sort the returned collection for greater efficiency.
func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager {
q, err := gophercloud.BuildQueryString(&opts)
if err != nil {
return pagination.Pager{Err: err}
}
u := rootURL(c) + q.String()
return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page {
return SecGroupPage{pagination.LinkedPageBase{PageResult: r}}
})
}
var (
errNameRequired = fmt.Errorf("Name is required")
)
// CreateOpts contains all the values needed to create a new security group.
type CreateOpts struct {
// Required. Human-readable name for the VIP. Does not have to be unique.
Name string
// Required for admins. Indicates the owner of the VIP.
TenantID string
// Optional. Describes the security group.
Description string
}
// Create is an operation which provisions a new security group with default
// security group rules for the IPv4 and IPv6 ether types.
func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult {
var res CreateResult
// Validate required opts
if opts.Name == "" {
res.Err = errNameRequired
return res
}
type secgroup struct {
Name string `json:"name"`
TenantID string `json:"tenant_id,omitempty"`
Description string `json:"description,omitempty"`
}
type request struct {
SecGroup secgroup `json:"security_group"`
}
reqBody := request{SecGroup: secgroup{
Name: opts.Name,
TenantID: opts.TenantID,
Description: opts.Description,
}}
_, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil)
return res
}
// Get retrieves a particular security group based on its unique ID.
func Get(c *gophercloud.ServiceClient, id string) GetResult {
var res GetResult
_, res.Err = c.Get(resourceURL(c, id), &res.Body, nil)
return res
}
// Delete will permanently delete a particular security group based on its unique ID.
func Delete(c *gophercloud.ServiceClient, id string) DeleteResult {
var res DeleteResult
_, res.Err = c.Delete(resourceURL(c, id), nil)
return res
}
// IDFromName is a convenience function that returns a security group's ID given its name.
func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) {
securityGroupCount := 0
securityGroupID := ""
if name == "" {
return "", fmt.Errorf("A security group name must be provided.")
}
pager := List(client, ListOpts{})
pager.EachPage(func(page pagination.Page) (bool, error) {
securityGroupList, err := ExtractGroups(page)
if err != nil {
return false, err
}
for _, s := range securityGroupList {
if s.Name == name {
securityGroupCount++
securityGroupID = s.ID
}
}
return true, nil
})
switch securityGroupCount {
case 0:
return "", fmt.Errorf("Unable to find security group: %s", name)
case 1:
return securityGroupID, nil
default:
return "", fmt.Errorf("Found %d security groups matching %s", securityGroupCount, name)
}
}
| Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00020734345889650285,
0.00017085032595787197,
0.00016149527800735086,
0.00016799638979136944,
0.000010523736818868201
] |
{
"id": 10,
"code_window": [
"}\n",
"\n",
"// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables'\n",
"// (experimental). If blank, look at the Node object on the Kubernetes API and respect the\n",
"// 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"// (newer, faster). If blank, look at the Node object on the Kubernetes API and respect the\n"
],
"file_path": "pkg/apis/componentconfig/types.go",
"type": "replace",
"edit_start_line_idx": 65
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to configure and run a
// Kubernetes app process.
package app
import (
"errors"
"net"
"net/http"
_ "net/http/pprof"
"strconv"
"time"
"k8s.io/kubernetes/cmd/kube-proxy/app/options"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/record"
kubeclient "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/proxy"
proxyconfig "k8s.io/kubernetes/pkg/proxy/config"
"k8s.io/kubernetes/pkg/proxy/iptables"
"k8s.io/kubernetes/pkg/proxy/userspace"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
utildbus "k8s.io/kubernetes/pkg/util/dbus"
"k8s.io/kubernetes/pkg/util/exec"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilnet "k8s.io/kubernetes/pkg/util/net"
nodeutil "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/util/oom"
"github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
type ProxyServer struct {
Client *kubeclient.Client
Config *options.ProxyServerConfig
IptInterface utiliptables.Interface
Proxier proxy.ProxyProvider
Broadcaster record.EventBroadcaster
Recorder record.EventRecorder
Conntracker Conntracker // if nil, ignored
}
const (
proxyModeUserspace = "userspace"
proxyModeIptables = "iptables"
experimentalProxyModeAnnotation = options.ExperimentalProxyModeAnnotation
betaProxyModeAnnotation = "net.beta.kubernetes.io/proxy-mode"
)
func checkKnownProxyMode(proxyMode string) bool {
switch proxyMode {
case "", proxyModeUserspace, proxyModeIptables:
return true
}
return false
}
func NewProxyServer(
client *kubeclient.Client,
config *options.ProxyServerConfig,
iptInterface utiliptables.Interface,
proxier proxy.ProxyProvider,
broadcaster record.EventBroadcaster,
recorder record.EventRecorder,
conntracker Conntracker,
) (*ProxyServer, error) {
return &ProxyServer{
Client: client,
Config: config,
IptInterface: iptInterface,
Proxier: proxier,
Broadcaster: broadcaster,
Recorder: recorder,
Conntracker: conntracker,
}, nil
}
// NewProxyCommand creates a *cobra.Command object with default parameters
func NewProxyCommand() *cobra.Command {
s := options.NewProxyConfig()
s.AddFlags(pflag.CommandLine)
cmd := &cobra.Command{
Use: "kube-proxy",
Long: `The Kubernetes network proxy runs on each node. This
reflects services as defined in the Kubernetes API on each node and can do simple
TCP,UDP stream forwarding or round robin TCP,UDP forwarding across a set of backends.
Service cluster ips and ports are currently found through Docker-links-compatible
environment variables specifying ports opened by the service proxy. There is an optional
addon that provides cluster DNS for these cluster IPs. The user must create a service
with the apiserver API to configure the proxy.`,
Run: func(cmd *cobra.Command, args []string) {
},
}
return cmd
}
// NewProxyServerDefault creates a new ProxyServer object with default parameters.
func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, error) {
protocol := utiliptables.ProtocolIpv4
if net.ParseIP(config.BindAddress).To4() == nil {
protocol = utiliptables.ProtocolIpv6
}
// Create a iptables utils.
execer := exec.New()
dbus := utildbus.New()
iptInterface := utiliptables.New(execer, dbus, protocol)
// We omit creation of pretty much everything if we run in cleanup mode
if config.CleanupAndExit {
return &ProxyServer{
Config: config,
IptInterface: iptInterface,
}, nil
}
// TODO(vmarmol): Use container config for this.
var oomAdjuster *oom.OOMAdjuster
if config.OOMScoreAdj != nil {
oomAdjuster = oom.NewOOMAdjuster()
if err := oomAdjuster.ApplyOOMScoreAdj(0, *config.OOMScoreAdj); err != nil {
glog.V(2).Info(err)
}
}
if config.ResourceContainer != "" {
// Run in its own container.
if err := util.RunInResourceContainer(config.ResourceContainer); err != nil {
glog.Warningf("Failed to start in resource-only container %q: %v", config.ResourceContainer, err)
} else {
glog.V(2).Infof("Running in resource-only container %q", config.ResourceContainer)
}
}
// Create a Kube Client
// define api config source
if config.Kubeconfig == "" && config.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: config.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: config.Master}}).ClientConfig()
if err != nil {
return nil, err
}
// Override kubeconfig qps/burst settings from flags
kubeconfig.QPS = config.KubeAPIQPS
kubeconfig.Burst = config.KubeAPIBurst
client, err := kubeclient.New(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
// Create event recorder
hostname := nodeutil.GetHostname(config.HostnameOverride)
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: hostname})
var proxier proxy.ProxyProvider
var endpointsHandler proxyconfig.EndpointsConfigHandler
proxyMode := getProxyMode(string(config.Mode), client.Nodes(), hostname, iptInterface)
if proxyMode == proxyModeIptables {
glog.V(2).Info("Using iptables Proxier.")
proxierIptables, err := iptables.NewProxier(iptInterface, execer, config.IPTablesSyncPeriod.Duration, config.MasqueradeAll)
if err != nil {
glog.Fatalf("Unable to create proxier: %v", err)
}
proxier = proxierIptables
endpointsHandler = proxierIptables
// No turning back. Remove artifacts that might still exist from the userspace Proxier.
glog.V(2).Info("Tearing down userspace rules. Errors here are acceptable.")
userspace.CleanupLeftovers(iptInterface)
} else {
glog.V(2).Info("Using userspace Proxier.")
// This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for
// our config.EndpointsConfigHandler.
loadBalancer := userspace.NewLoadBalancerRR()
// set EndpointsConfigHandler to our loadBalancer
endpointsHandler = loadBalancer
proxierUserspace, err := userspace.NewProxier(
loadBalancer,
net.ParseIP(config.BindAddress),
iptInterface,
*utilnet.ParsePortRangeOrDie(config.PortRange),
config.IPTablesSyncPeriod.Duration,
config.UDPIdleTimeout.Duration,
)
if err != nil {
glog.Fatalf("Unable to create proxier: %v", err)
}
proxier = proxierUserspace
// Remove artifacts from the pure-iptables Proxier.
glog.V(2).Info("Tearing down pure-iptables proxy rules. Errors here are acceptable.")
iptables.CleanupLeftovers(iptInterface)
}
iptInterface.AddReloadFunc(proxier.Sync)
// Create configs (i.e. Watches for Services and Endpoints)
// Note: RegisterHandler() calls need to happen before creation of Sources because sources
// only notify on changes, and the initial update (on process start) may be lost if no handlers
// are registered yet.
serviceConfig := proxyconfig.NewServiceConfig()
serviceConfig.RegisterHandler(proxier)
endpointsConfig := proxyconfig.NewEndpointsConfig()
endpointsConfig.RegisterHandler(endpointsHandler)
proxyconfig.NewSourceAPI(
client,
config.ConfigSyncPeriod,
serviceConfig.Channel("api"),
endpointsConfig.Channel("api"),
)
config.NodeRef = &api.ObjectReference{
Kind: "Node",
Name: hostname,
UID: types.UID(hostname),
Namespace: "",
}
conntracker := realConntracker{}
return NewProxyServer(client, config, iptInterface, proxier, eventBroadcaster, recorder, conntracker)
}
// Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set).
func (s *ProxyServer) Run() error {
// remove iptables rules and exit
if s.Config.CleanupAndExit {
encounteredError := userspace.CleanupLeftovers(s.IptInterface)
encounteredError = iptables.CleanupLeftovers(s.IptInterface) || encounteredError
if encounteredError {
return errors.New("Encountered an error while tearing down rules.")
}
return nil
}
s.Broadcaster.StartRecordingToSink(s.Client.Events(""))
// Start up Healthz service if requested
if s.Config.HealthzPort > 0 {
go util.Until(func() {
err := http.ListenAndServe(s.Config.HealthzBindAddress+":"+strconv.Itoa(s.Config.HealthzPort), nil)
if err != nil {
glog.Errorf("Starting health server failed: %v", err)
}
}, 5*time.Second, util.NeverStop)
}
// Tune conntrack, if requested
if s.Conntracker != nil {
if s.Config.ConntrackMax > 0 {
if err := s.Conntracker.SetMax(s.Config.ConntrackMax); err != nil {
return err
}
}
if s.Config.ConntrackTCPEstablishedTimeout.Duration > 0 {
if err := s.Conntracker.SetTCPEstablishedTimeout(int(s.Config.ConntrackTCPEstablishedTimeout.Duration / time.Second)); err != nil {
return err
}
}
}
// Birth Cry after the birth is successful
s.birthCry()
// Just loop forever for now...
s.Proxier.SyncLoop()
return nil
}
type nodeGetter interface {
Get(hostname string) (*api.Node, error)
}
func getProxyMode(proxyMode string, client nodeGetter, hostname string, iptver iptables.IptablesVersioner) string {
if proxyMode == proxyModeUserspace {
return proxyModeUserspace
} else if proxyMode == proxyModeIptables {
return tryIptablesProxy(iptver)
} else if proxyMode != "" {
glog.V(1).Infof("Flag proxy-mode=%q unknown, assuming iptables proxy", proxyMode)
return tryIptablesProxy(iptver)
}
// proxyMode == "" - choose the best option.
if client == nil {
glog.Errorf("nodeGetter is nil: assuming iptables proxy")
return tryIptablesProxy(iptver)
}
node, err := client.Get(hostname)
if err != nil {
glog.Errorf("Can't get Node %q, assuming iptables proxy: %v", hostname, err)
return tryIptablesProxy(iptver)
}
if node == nil {
glog.Errorf("Got nil Node %q, assuming iptables proxy: %v", hostname)
return tryIptablesProxy(iptver)
}
proxyMode, found := node.Annotations[betaProxyModeAnnotation]
if found {
glog.V(1).Infof("Found beta annotation %q = %q", betaProxyModeAnnotation, proxyMode)
} else {
// We already published some information about this annotation with the "experimental" name, so we will respect it.
proxyMode, found = node.Annotations[experimentalProxyModeAnnotation]
if found {
glog.V(1).Infof("Found experimental annotation %q = %q", experimentalProxyModeAnnotation, proxyMode)
}
}
if proxyMode == proxyModeUserspace {
glog.V(1).Infof("Annotation demands userspace proxy")
return proxyModeUserspace
}
return tryIptablesProxy(iptver)
}
func tryIptablesProxy(iptver iptables.IptablesVersioner) string {
var err error
// guaranteed false on error, error only necessary for debugging
useIptablesProxy, err := iptables.CanUseIptablesProxier(iptver)
if err != nil {
glog.Errorf("Can't determine whether to use iptables proxy, using userspace proxier: %v", err)
return proxyModeUserspace
}
if useIptablesProxy {
return proxyModeIptables
}
// Fallback.
glog.V(1).Infof("Can't use iptables proxy, using userspace proxier: %v", err)
return proxyModeUserspace
}
func (s *ProxyServer) birthCry() {
s.Recorder.Eventf(s.Config.NodeRef, api.EventTypeNormal, "Starting", "Starting kube-proxy.")
}
| cmd/kube-proxy/app/server.go | 1 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.09477411210536957,
0.008191530592739582,
0.00016596059140283614,
0.0006638697232119739,
0.018144141882658005
] |
{
"id": 10,
"code_window": [
"}\n",
"\n",
"// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables'\n",
"// (experimental). If blank, look at the Node object on the Kubernetes API and respect the\n",
"// 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"// (newer, faster). If blank, look at the Node object on the Kubernetes API and respect the\n"
],
"file_path": "pkg/apis/componentconfig/types.go",
"type": "replace",
"edit_start_line_idx": 65
} | // Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package static
const jqueryJs = `
/*! jQuery v1.10.2 | (c) 2005, 2013 jQuery Foundation, Inc. | jquery.org/license
//@ sourceMappingURL=jquery.min.map
*/
(function(e,t){var n,r,i=typeof t,o=e.location,a=e.document,s=a.documentElement,l=e.jQuery,u=e.$,c={},p=[],f="1.10.2",d=p.concat,h=p.push,g=p.slice,m=p.indexOf,y=c.toString,v=c.hasOwnProperty,b=f.trim,x=function(e,t){return new x.fn.init(e,t,r)},w=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,T=/\S+/g,C=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,N=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,k=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,E=/^[\],:{}\s]*$/,S=/(?:^|:|,)(?:\s*\[)+/g,A=/\\(?:["\\\/bfnrt]|u[\da-fA-F]{4})/g,j=/"[^"\\\r\n]*"|true|false|null|-?(?:\d+\.|)\d+(?:[eE][+-]?\d+|)/g,D=/^-ms-/,L=/-([\da-z])/gi,H=function(e,t){return t.toUpperCase()},q=function(e){(a.addEventListener||"load"===e.type||"complete"===a.readyState)&&(_(),x.ready())},_=function(){a.addEventListener?(a.removeEventListener("DOMContentLoaded",q,!1),e.removeEventListener("load",q,!1)):(a.detachEvent("onreadystatechange",q),e.detachEvent("onload",q))};x.fn=x.prototype={jquery:f,constructor:x,init:function(e,n,r){var i,o;if(!e)return this;if("string"==typeof e){if(i="<"===e.charAt(0)&&">"===e.charAt(e.length-1)&&e.length>=3?[null,e,null]:N.exec(e),!i||!i[1]&&n)return!n||n.jquery?(n||r).find(e):this.constructor(n).find(e);if(i[1]){if(n=n instanceof x?n[0]:n,x.merge(this,x.parseHTML(i[1],n&&n.nodeType?n.ownerDocument||n:a,!0)),k.test(i[1])&&x.isPlainObject(n))for(i in n)x.isFunction(this[i])?this[i](n[i]):this.attr(i,n[i]);return this}if(o=a.getElementById(i[2]),o&&o.parentNode){if(o.id!==i[2])return r.find(e);this.length=1,this[0]=o}return this.context=a,this.selector=e,this}return e.nodeType?(this.context=this[0]=e,this.length=1,this):x.isFunction(e)?r.ready(e):(e.selector!==t&&(this.selector=e.selector,this.context=e.context),x.makeArray(e,this))},selector:"",length:0,toArray:function(){return g.call(this)},get:function(e){return null==e?this.toArray():0>e?this[this.length+e]:this[e]},pushStack:function(e){var t=x.merge(this.constructor(),e);return t.prevObject=this,t.context=this.context,t},each:function(e,t){return x.each(this,e,t)},ready:function(e){return x.ready.promise().done(e),this},slice:function(){return this.pushStack(g.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var t=this.length,n=+e+(0>e?t:0);return this.pushStack(n>=0&&t>n?[this[n]]:[])},map:function(e){return this.pushStack(x.map(this,function(t,n){return e.call(t,n,t)}))},end:function(){return this.prevObject||this.constructor(null)},push:h,sort:[].sort,splice:[].splice},x.fn.init.prototype=x.fn,x.extend=x.fn.extend=function(){var e,n,r,i,o,a,s=arguments[0]||{},l=1,u=arguments.length,c=!1;for("boolean"==typeof s&&(c=s,s=arguments[1]||{},l=2),"object"==typeof s||x.isFunction(s)||(s={}),u===l&&(s=this,--l);u>l;l++)if(null!=(o=arguments[l]))for(i in o)e=s[i],r=o[i],s!==r&&(c&&r&&(x.isPlainObject(r)||(n=x.isArray(r)))?(n?(n=!1,a=e&&x.isArray(e)?e:[]):a=e&&x.isPlainObject(e)?e:{},s[i]=x.extend(c,a,r)):r!==t&&(s[i]=r));return s},x.extend({expando:"jQuery"+(f+Math.random()).replace(/\D/g,""),noConflict:function(t){return e.$===x&&(e.$=u),t&&e.jQuery===x&&(e.jQuery=l),x},isReady:!1,readyWait:1,holdReady:function(e){e?x.readyWait++:x.ready(!0)},ready:function(e){if(e===!0?!--x.readyWait:!x.isReady){if(!a.body)return setTimeout(x.ready);x.isReady=!0,e!==!0&&--x.readyWait>0||(n.resolveWith(a,[x]),x.fn.trigger&&x(a).trigger("ready").off("ready"))}},isFunction:function(e){return"function"===x.type(e)},isArray:Array.isArray||function(e){return"array"===x.type(e)},isWindow:function(e){return null!=e&&e==e.window},isNumeric:function(e){return!isNaN(parseFloat(e))&&isFinite(e)},type:function(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?c[y.call(e)]||"object":typeof e},isPlainObject:function(e){var n;if(!e||"object"!==x.type(e)||e.nodeType||x.isWindow(e))return!1;try{if(e.constructor&&!v.call(e,"constructor")&&!v.call(e.constructor.prototype,"isPrototypeOf"))return!1}catch(r){return!1}if(x.support.ownLast)for(n in e)return v.call(e,n);for(n in e);return n===t||v.call(e,n)},isEmptyObject:function(e){var t;for(t in e)return!1;return!0},error:function(e){throw Error(e)},parseHTML:function(e,t,n){if(!e||"string"!=typeof e)return null;"boolean"==typeof t&&(n=t,t=!1),t=t||a;var r=k.exec(e),i=!n&&[];return r?[t.createElement(r[1])]:(r=x.buildFragment([e],t,i),i&&x(i).remove(),x.merge([],r.childNodes))},parseJSON:function(n){return e.JSON&&e.JSON.parse?e.JSON.parse(n):null===n?n:"string"==typeof n&&(n=x.trim(n),n&&E.test(n.replace(A,"@").replace(j,"]").replace(S,"")))?Function("return "+n)():(x.error("Invalid JSON: "+n),t)},parseXML:function(n){var r,i;if(!n||"string"!=typeof n)return null;try{e.DOMParser?(i=new DOMParser,r=i.parseFromString(n,"text/xml")):(r=new ActiveXObject("Microsoft.XMLDOM"),r.async="false",r.loadXML(n))}catch(o){r=t}return r&&r.documentElement&&!r.getElementsByTagName("parsererror").length||x.error("Invalid XML: "+n),r},noop:function(){},globalEval:function(t){t&&x.trim(t)&&(e.execScript||function(t){e.eval.call(e,t)})(t)},camelCase:function(e){return e.replace(D,"ms-").replace(L,H)},nodeName:function(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()},each:function(e,t,n){var r,i=0,o=e.length,a=M(e);if(n){if(a){for(;o>i;i++)if(r=t.apply(e[i],n),r===!1)break}else for(i in e)if(r=t.apply(e[i],n),r===!1)break}else if(a){for(;o>i;i++)if(r=t.call(e[i],i,e[i]),r===!1)break}else for(i in e)if(r=t.call(e[i],i,e[i]),r===!1)break;return e},trim:b&&!b.call("\ufeff\u00a0")?function(e){return null==e?"":b.call(e)}:function(e){return null==e?"":(e+"").replace(C,"")},makeArray:function(e,t){var n=t||[];return null!=e&&(M(Object(e))?x.merge(n,"string"==typeof e?[e]:e):h.call(n,e)),n},inArray:function(e,t,n){var r;if(t){if(m)return m.call(t,e,n);for(r=t.length,n=n?0>n?Math.max(0,r+n):n:0;r>n;n++)if(n in t&&t[n]===e)return n}return-1},merge:function(e,n){var r=n.length,i=e.length,o=0;if("number"==typeof r)for(;r>o;o++)e[i++]=n[o];else while(n[o]!==t)e[i++]=n[o++];return e.length=i,e},grep:function(e,t,n){var r,i=[],o=0,a=e.length;for(n=!!n;a>o;o++)r=!!t(e[o],o),n!==r&&i.push(e[o]);return i},map:function(e,t,n){var r,i=0,o=e.length,a=M(e),s=[];if(a)for(;o>i;i++)r=t(e[i],i,n),null!=r&&(s[s.length]=r);else for(i in e)r=t(e[i],i,n),null!=r&&(s[s.length]=r);return d.apply([],s)},guid:1,proxy:function(e,n){var r,i,o;return"string"==typeof n&&(o=e[n],n=e,e=o),x.isFunction(e)?(r=g.call(arguments,2),i=function(){return e.apply(n||this,r.concat(g.call(arguments)))},i.guid=e.guid=e.guid||x.guid++,i):t},access:function(e,n,r,i,o,a,s){var l=0,u=e.length,c=null==r;if("object"===x.type(r)){o=!0;for(l in r)x.access(e,n,l,r[l],!0,a,s)}else if(i!==t&&(o=!0,x.isFunction(i)||(s=!0),c&&(s?(n.call(e,i),n=null):(c=n,n=function(e,t,n){return c.call(x(e),n)})),n))for(;u>l;l++)n(e[l],r,s?i:i.call(e[l],l,n(e[l],r)));return o?e:c?n.call(e):u?n(e[0],r):a},now:function(){return(new Date).getTime()},swap:function(e,t,n,r){var i,o,a={};for(o in t)a[o]=e.style[o],e.style[o]=t[o];i=n.apply(e,r||[]);for(o in t)e.style[o]=a[o];return i}}),x.ready.promise=function(t){if(!n)if(n=x.Deferred(),"complete"===a.readyState)setTimeout(x.ready);else if(a.addEventListener)a.addEventListener("DOMContentLoaded",q,!1),e.addEventListener("load",q,!1);else{a.attachEvent("onreadystatechange",q),e.attachEvent("onload",q);var r=!1;try{r=null==e.frameElement&&a.documentElement}catch(i){}r&&r.doScroll&&function o(){if(!x.isReady){try{r.doScroll("left")}catch(e){return setTimeout(o,50)}_(),x.ready()}}()}return n.promise(t)},x.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(e,t){c["[object "+t+"]"]=t.toLowerCase()});function M(e){var t=e.length,n=x.type(e);return x.isWindow(e)?!1:1===e.nodeType&&t?!0:"array"===n||"function"!==n&&(0===t||"number"==typeof t&&t>0&&t-1 in e)}r=x(a),function(e,t){var n,r,i,o,a,s,l,u,c,p,f,d,h,g,m,y,v,b="sizzle"+-new Date,w=e.document,T=0,C=0,N=st(),k=st(),E=st(),S=!1,A=function(e,t){return e===t?(S=!0,0):0},j=typeof t,D=1<<31,L={}.hasOwnProperty,H=[],q=H.pop,_=H.push,M=H.push,O=H.slice,F=H.indexOf||function(e){var t=0,n=this.length;for(;n>t;t++)if(this[t]===e)return t;return-1},B="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",P="[\\x20\\t\\r\\n\\f]",R="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",W=R.replace("w","w#"),$="\\["+P+"*("+R+")"+P+"*(?:([*^$|!~]?=)"+P+"*(?:(['\"])((?:\\\\.|[^\\\\])*?)\\3|("+W+")|)|)"+P+"*\\]",I=":("+R+")(?:\\(((['\"])((?:\\\\.|[^\\\\])*?)\\3|((?:\\\\.|[^\\\\()[\\]]|"+$.replace(3,8)+")*)|.*)\\)|)",z=RegExp("^"+P+"+|((?:^|[^\\\\])(?:\\\\.)*)"+P+"+$","g"),X=RegExp("^"+P+"*,"+P+"*"),U=RegExp("^"+P+"*([>+~]|"+P+")"+P+"*"),V=RegExp(P+"*[+~]"),Y=RegExp("="+P+"*([^\\]'\"]*)"+P+"*\\]","g"),J=RegExp(I),G=RegExp("^"+W+"$"),Q={ID:RegExp("^#("+R+")"),CLASS:RegExp("^\\.("+R+")"),TAG:RegExp("^("+R.replace("w","w*")+")"),ATTR:RegExp("^"+$),PSEUDO:RegExp("^"+I),CHILD:RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+P+"*(even|odd|(([+-]|)(\\d*)n|)"+P+"*(?:([+-]|)"+P+"*(\\d+)|))"+P+"*\\)|)","i"),bool:RegExp("^(?:"+B+")$","i"),needsContext:RegExp("^"+P+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+P+"*((?:-\\d)?\\d*)"+P+"*\\)|)(?=[^-]|$)","i")},K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,et=/^(?:input|select|textarea|button)$/i,tt=/^h\d$/i,nt=/'|\\/g,rt=RegExp("\\\\([\\da-f]{1,6}"+P+"?|("+P+")|.)","ig"),it=function(e,t,n){var r="0x"+t-65536;return r!==r||n?t:0>r?String.fromCharCode(r+65536):String.fromCharCode(55296|r>>10,56320|1023&r)};try{M.apply(H=O.call(w.childNodes),w.childNodes),H[w.childNodes.length].nodeType}catch(ot){M={apply:H.length?function(e,t){_.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function at(e,t,n,i){var o,a,s,l,u,c,d,m,y,x;if((t?t.ownerDocument||t:w)!==f&&p(t),t=t||f,n=n||[],!e||"string"!=typeof e)return n;if(1!==(l=t.nodeType)&&9!==l)return[];if(h&&!i){if(o=Z.exec(e))if(s=o[1]){if(9===l){if(a=t.getElementById(s),!a||!a.parentNode)return n;if(a.id===s)return n.push(a),n}else if(t.ownerDocument&&(a=t.ownerDocument.getElementById(s))&&v(t,a)&&a.id===s)return n.push(a),n}else{if(o[2])return M.apply(n,t.getElementsByTagName(e)),n;if((s=o[3])&&r.getElementsByClassName&&t.getElementsByClassName)return M.apply(n,t.getElementsByClassName(s)),n}if(r.qsa&&(!g||!g.test(e))){if(m=d=b,y=t,x=9===l&&e,1===l&&"object"!==t.nodeName.toLowerCase()){c=mt(e),(d=t.getAttribute("id"))?m=d.replace(nt,"\\$&"):t.setAttribute("id",m),m="[id='"+m+"'] ",u=c.length;while(u--)c[u]=m+yt(c[u]);y=V.test(e)&&t.parentNode||t,x=c.join(",")}if(x)try{return M.apply(n,y.querySelectorAll(x)),n}catch(T){}finally{d||t.removeAttribute("id")}}}return kt(e.replace(z,"$1"),t,n,i)}function st(){var e=[];function t(n,r){return e.push(n+=" ")>o.cacheLength&&delete t[e.shift()],t[n]=r}return t}function lt(e){return e[b]=!0,e}function ut(e){var t=f.createElement("div");try{return!!e(t)}catch(n){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function ct(e,t){var n=e.split("|"),r=e.length;while(r--)o.attrHandle[n[r]]=t}function pt(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&(~t.sourceIndex||D)-(~e.sourceIndex||D);if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function ft(e){return function(t){var n=t.nodeName.toLowerCase();return"input"===n&&t.type===e}}function dt(e){return function(t){var n=t.nodeName.toLowerCase();return("input"===n||"button"===n)&&t.type===e}}function ht(e){return lt(function(t){return t=+t,lt(function(n,r){var i,o=e([],n.length,t),a=o.length;while(a--)n[i=o[a]]&&(n[i]=!(r[i]=n[i]))})})}s=at.isXML=function(e){var t=e&&(e.ownerDocument||e).documentElement;return t?"HTML"!==t.nodeName:!1},r=at.support={},p=at.setDocument=function(e){var n=e?e.ownerDocument||e:w,i=n.defaultView;return n!==f&&9===n.nodeType&&n.documentElement?(f=n,d=n.documentElement,h=!s(n),i&&i.attachEvent&&i!==i.top&&i.attachEvent("onbeforeunload",function(){p()}),r.attributes=ut(function(e){return e.className="i",!e.getAttribute("className")}),r.getElementsByTagName=ut(function(e){return e.appendChild(n.createComment("")),!e.getElementsByTagName("*").length}),r.getElementsByClassName=ut(function(e){return e.innerHTML="<div class='a'></div><div class='a i'></div>",e.firstChild.className="i",2===e.getElementsByClassName("i").length}),r.getById=ut(function(e){return d.appendChild(e).id=b,!n.getElementsByName||!n.getElementsByName(b).length}),r.getById?(o.find.ID=function(e,t){if(typeof t.getElementById!==j&&h){var n=t.getElementById(e);return n&&n.parentNode?[n]:[]}},o.filter.ID=function(e){var t=e.replace(rt,it);return function(e){return e.getAttribute("id")===t}}):(delete o.find.ID,o.filter.ID=function(e){var t=e.replace(rt,it);return function(e){var n=typeof e.getAttributeNode!==j&&e.getAttributeNode("id");return n&&n.value===t}}),o.find.TAG=r.getElementsByTagName?function(e,n){return typeof n.getElementsByTagName!==j?n.getElementsByTagName(e):t}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},o.find.CLASS=r.getElementsByClassName&&function(e,n){return typeof n.getElementsByClassName!==j&&h?n.getElementsByClassName(e):t},m=[],g=[],(r.qsa=K.test(n.querySelectorAll))&&(ut(function(e){e.innerHTML="<select><option selected=''></option></select>",e.querySelectorAll("[selected]").length||g.push("\\["+P+"*(?:value|"+B+")"),e.querySelectorAll(":checked").length||g.push(":checked")}),ut(function(e){var t=n.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("t",""),e.querySelectorAll("[t^='']").length&&g.push("[*^$]="+P+"*(?:''|\"\")"),e.querySelectorAll(":enabled").length||g.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),g.push(",.*:")})),(r.matchesSelector=K.test(y=d.webkitMatchesSelector||d.mozMatchesSelector||d.oMatchesSelector||d.msMatchesSelector))&&ut(function(e){r.disconnectedMatch=y.call(e,"div"),y.call(e,"[s!='']:x"),m.push("!=",I)}),g=g.length&&RegExp(g.join("|")),m=m.length&&RegExp(m.join("|")),v=K.test(d.contains)||d.compareDocumentPosition?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},A=d.compareDocumentPosition?function(e,t){if(e===t)return S=!0,0;var i=t.compareDocumentPosition&&e.compareDocumentPosition&&e.compareDocumentPosition(t);return i?1&i||!r.sortDetached&&t.compareDocumentPosition(e)===i?e===n||v(w,e)?-1:t===n||v(w,t)?1:c?F.call(c,e)-F.call(c,t):0:4&i?-1:1:e.compareDocumentPosition?-1:1}:function(e,t){var r,i=0,o=e.parentNode,a=t.parentNode,s=[e],l=[t];if(e===t)return S=!0,0;if(!o||!a)return e===n?-1:t===n?1:o?-1:a?1:c?F.call(c,e)-F.call(c,t):0;if(o===a)return pt(e,t);r=e;while(r=r.parentNode)s.unshift(r);r=t;while(r=r.parentNode)l.unshift(r);while(s[i]===l[i])i++;return i?pt(s[i],l[i]):s[i]===w?-1:l[i]===w?1:0},n):f},at.matches=function(e,t){return at(e,null,null,t)},at.matchesSelector=function(e,t){if((e.ownerDocument||e)!==f&&p(e),t=t.replace(Y,"='$1']"),!(!r.matchesSelector||!h||m&&m.test(t)||g&&g.test(t)))try{var n=y.call(e,t);if(n||r.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(i){}return at(t,f,null,[e]).length>0},at.contains=function(e,t){return(e.ownerDocument||e)!==f&&p(e),v(e,t)},at.attr=function(e,n){(e.ownerDocument||e)!==f&&p(e);var i=o.attrHandle[n.toLowerCase()],a=i&&L.call(o.attrHandle,n.toLowerCase())?i(e,n,!h):t;return a===t?r.attributes||!h?e.getAttribute(n):(a=e.getAttributeNode(n))&&a.specified?a.value:null:a},at.error=function(e){throw Error("Syntax error, unrecognized expression: "+e)},at.uniqueSort=function(e){var t,n=[],i=0,o=0;if(S=!r.detectDuplicates,c=!r.sortStable&&e.slice(0),e.sort(A),S){while(t=e[o++])t===e[o]&&(i=n.push(o));while(i--)e.splice(n[i],1)}return e},a=at.getText=function(e){var t,n="",r=0,i=e.nodeType;if(i){if(1===i||9===i||11===i){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=a(e)}else if(3===i||4===i)return e.nodeValue}else for(;t=e[r];r++)n+=a(t);return n},o=at.selectors={cacheLength:50,createPseudo:lt,match:Q,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(rt,it),e[3]=(e[4]||e[5]||"").replace(rt,it),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||at.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&at.error(e[0]),e},PSEUDO:function(e){var n,r=!e[5]&&e[2];return Q.CHILD.test(e[0])?null:(e[3]&&e[4]!==t?e[2]=e[4]:r&&J.test(r)&&(n=mt(r,!0))&&(n=r.indexOf(")",r.length-n)-r.length)&&(e[0]=e[0].slice(0,n),e[2]=r.slice(0,n)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(rt,it).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=N[e+" "];return t||(t=RegExp("(^|"+P+")"+e+"("+P+"|$)"))&&N(e,function(e){return t.test("string"==typeof e.className&&e.className||typeof e.getAttribute!==j&&e.getAttribute("class")||"")})},ATTR:function(e,t,n){return function(r){var i=at.attr(r,e);return null==i?"!="===t:t?(i+="","="===t?i===n:"!="===t?i!==n:"^="===t?n&&0===i.indexOf(n):"*="===t?n&&i.indexOf(n)>-1:"$="===t?n&&i.slice(-n.length)===n:"~="===t?(" "+i+" ").indexOf(n)>-1:"|="===t?i===n||i.slice(0,n.length+1)===n+"-":!1):!0}},CHILD:function(e,t,n,r,i){var o="nth"!==e.slice(0,3),a="last"!==e.slice(-4),s="of-type"===t;return 1===r&&0===i?function(e){return!!e.parentNode}:function(t,n,l){var u,c,p,f,d,h,g=o!==a?"nextSibling":"previousSibling",m=t.parentNode,y=s&&t.nodeName.toLowerCase(),v=!l&&!s;if(m){if(o){while(g){p=t;while(p=p[g])if(s?p.nodeName.toLowerCase()===y:1===p.nodeType)return!1;h=g="only"===e&&!h&&"nextSibling"}return!0}if(h=[a?m.firstChild:m.lastChild],a&&v){c=m[b]||(m[b]={}),u=c[e]||[],d=u[0]===T&&u[1],f=u[0]===T&&u[2],p=d&&m.childNodes[d];while(p=++d&&p&&p[g]||(f=d=0)||h.pop())if(1===p.nodeType&&++f&&p===t){c[e]=[T,d,f];break}}else if(v&&(u=(t[b]||(t[b]={}))[e])&&u[0]===T)f=u[1];else while(p=++d&&p&&p[g]||(f=d=0)||h.pop())if((s?p.nodeName.toLowerCase()===y:1===p.nodeType)&&++f&&(v&&((p[b]||(p[b]={}))[e]=[T,f]),p===t))break;return f-=i,f===r||0===f%r&&f/r>=0}}},PSEUDO:function(e,t){var n,r=o.pseudos[e]||o.setFilters[e.toLowerCase()]||at.error("unsupported pseudo: "+e);return r[b]?r(t):r.length>1?(n=[e,e,"",t],o.setFilters.hasOwnProperty(e.toLowerCase())?lt(function(e,n){var i,o=r(e,t),a=o.length;while(a--)i=F.call(e,o[a]),e[i]=!(n[i]=o[a])}):function(e){return r(e,0,n)}):r}},pseudos:{not:lt(function(e){var t=[],n=[],r=l(e.replace(z,"$1"));return r[b]?lt(function(e,t,n,i){var o,a=r(e,null,i,[]),s=e.length;while(s--)(o=a[s])&&(e[s]=!(t[s]=o))}):function(e,i,o){return t[0]=e,r(t,null,o,n),!n.pop()}}),has:lt(function(e){return function(t){return at(e,t).length>0}}),contains:lt(function(e){return function(t){return(t.textContent||t.innerText||a(t)).indexOf(e)>-1}}),lang:lt(function(e){return G.test(e||"")||at.error("unsupported lang: "+e),e=e.replace(rt,it).toLowerCase(),function(t){var n;do if(n=h?t.lang:t.getAttribute("xml:lang")||t.getAttribute("lang"))return n=n.toLowerCase(),n===e||0===n.indexOf(e+"-");while((t=t.parentNode)&&1===t.nodeType);return!1}}),target:function(t){var n=e.location&&e.location.hash;return n&&n.slice(1)===t.id},root:function(e){return e===d},focus:function(e){return e===f.activeElement&&(!f.hasFocus||f.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:function(e){return e.disabled===!1},disabled:function(e){return e.disabled===!0},checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,e.selected===!0},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeName>"@"||3===e.nodeType||4===e.nodeType)return!1;return!0},parent:function(e){return!o.pseudos.empty(e)},header:function(e){return tt.test(e.nodeName)},input:function(e){return et.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||t.toLowerCase()===e.type)},first:ht(function(){return[0]}),last:ht(function(e,t){return[t-1]}),eq:ht(function(e,t,n){return[0>n?n+t:n]}),even:ht(function(e,t){var n=0;for(;t>n;n+=2)e.push(n);return e}),odd:ht(function(e,t){var n=1;for(;t>n;n+=2)e.push(n);return e}),lt:ht(function(e,t,n){var r=0>n?n+t:n;for(;--r>=0;)e.push(r);return e}),gt:ht(function(e,t,n){var r=0>n?n+t:n;for(;t>++r;)e.push(r);return e})}},o.pseudos.nth=o.pseudos.eq;for(n in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})o.pseudos[n]=ft(n);for(n in{submit:!0,reset:!0})o.pseudos[n]=dt(n);function gt(){}gt.prototype=o.filters=o.pseudos,o.setFilters=new gt;function mt(e,t){var n,r,i,a,s,l,u,c=k[e+" "];if(c)return t?0:c.slice(0);s=e,l=[],u=o.preFilter;while(s){(!n||(r=X.exec(s)))&&(r&&(s=s.slice(r[0].length)||s),l.push(i=[])),n=!1,(r=U.exec(s))&&(n=r.shift(),i.push({value:n,type:r[0].replace(z," ")}),s=s.slice(n.length));for(a in o.filter)!(r=Q[a].exec(s))||u[a]&&!(r=u[a](r))||(n=r.shift(),i.push({value:n,type:a,matches:r}),s=s.slice(n.length));if(!n)break}return t?s.length:s?at.error(e):k(e,l).slice(0)}function yt(e){var t=0,n=e.length,r="";for(;n>t;t++)r+=e[t].value;return r}function vt(e,t,n){var r=t.dir,o=n&&"parentNode"===r,a=C++;return t.first?function(t,n,i){while(t=t[r])if(1===t.nodeType||o)return e(t,n,i)}:function(t,n,s){var l,u,c,p=T+" "+a;if(s){while(t=t[r])if((1===t.nodeType||o)&&e(t,n,s))return!0}else while(t=t[r])if(1===t.nodeType||o)if(c=t[b]||(t[b]={}),(u=c[r])&&u[0]===p){if((l=u[1])===!0||l===i)return l===!0}else if(u=c[r]=[p],u[1]=e(t,n,s)||i,u[1]===!0)return!0}}function bt(e){return e.length>1?function(t,n,r){var i=e.length;while(i--)if(!e[i](t,n,r))return!1;return!0}:e[0]}function xt(e,t,n,r,i){var o,a=[],s=0,l=e.length,u=null!=t;for(;l>s;s++)(o=e[s])&&(!n||n(o,r,i))&&(a.push(o),u&&t.push(s));return a}function wt(e,t,n,r,i,o){return r&&!r[b]&&(r=wt(r)),i&&!i[b]&&(i=wt(i,o)),lt(function(o,a,s,l){var u,c,p,f=[],d=[],h=a.length,g=o||Nt(t||"*",s.nodeType?[s]:s,[]),m=!e||!o&&t?g:xt(g,f,e,s,l),y=n?i||(o?e:h||r)?[]:a:m;if(n&&n(m,y,s,l),r){u=xt(y,d),r(u,[],s,l),c=u.length;while(c--)(p=u[c])&&(y[d[c]]=!(m[d[c]]=p))}if(o){if(i||e){if(i){u=[],c=y.length;while(c--)(p=y[c])&&u.push(m[c]=p);i(null,y=[],u,l)}c=y.length;while(c--)(p=y[c])&&(u=i?F.call(o,p):f[c])>-1&&(o[u]=!(a[u]=p))}}else y=xt(y===a?y.splice(h,y.length):y),i?i(null,a,y,l):M.apply(a,y)})}function Tt(e){var t,n,r,i=e.length,a=o.relative[e[0].type],s=a||o.relative[" "],l=a?1:0,c=vt(function(e){return e===t},s,!0),p=vt(function(e){return F.call(t,e)>-1},s,!0),f=[function(e,n,r){return!a&&(r||n!==u)||((t=n).nodeType?c(e,n,r):p(e,n,r))}];for(;i>l;l++)if(n=o.relative[e[l].type])f=[vt(bt(f),n)];else{if(n=o.filter[e[l].type].apply(null,e[l].matches),n[b]){for(r=++l;i>r;r++)if(o.relative[e[r].type])break;return wt(l>1&&bt(f),l>1&&yt(e.slice(0,l-1).concat({value:" "===e[l-2].type?"*":""})).replace(z,"$1"),n,r>l&&Tt(e.slice(l,r)),i>r&&Tt(e=e.slice(r)),i>r&&yt(e))}f.push(n)}return bt(f)}function Ct(e,t){var n=0,r=t.length>0,a=e.length>0,s=function(s,l,c,p,d){var h,g,m,y=[],v=0,b="0",x=s&&[],w=null!=d,C=u,N=s||a&&o.find.TAG("*",d&&l.parentNode||l),k=T+=null==C?1:Math.random()||.1;for(w&&(u=l!==f&&l,i=n);null!=(h=N[b]);b++){if(a&&h){g=0;while(m=e[g++])if(m(h,l,c)){p.push(h);break}w&&(T=k,i=++n)}r&&((h=!m&&h)&&v--,s&&x.push(h))}if(v+=b,r&&b!==v){g=0;while(m=t[g++])m(x,y,l,c);if(s){if(v>0)while(b--)x[b]||y[b]||(y[b]=q.call(p));y=xt(y)}M.apply(p,y),w&&!s&&y.length>0&&v+t.length>1&&at.uniqueSort(p)}return w&&(T=k,u=C),x};return r?lt(s):s}l=at.compile=function(e,t){var n,r=[],i=[],o=E[e+" "];if(!o){t||(t=mt(e)),n=t.length;while(n--)o=Tt(t[n]),o[b]?r.push(o):i.push(o);o=E(e,Ct(i,r))}return o};function Nt(e,t,n){var r=0,i=t.length;for(;i>r;r++)at(e,t[r],n);return n}function kt(e,t,n,i){var a,s,u,c,p,f=mt(e);if(!i&&1===f.length){if(s=f[0]=f[0].slice(0),s.length>2&&"ID"===(u=s[0]).type&&r.getById&&9===t.nodeType&&h&&o.relative[s[1].type]){if(t=(o.find.ID(u.matches[0].replace(rt,it),t)||[])[0],!t)return n;e=e.slice(s.shift().value.length)}a=Q.needsContext.test(e)?0:s.length;while(a--){if(u=s[a],o.relative[c=u.type])break;if((p=o.find[c])&&(i=p(u.matches[0].replace(rt,it),V.test(s[0].type)&&t.parentNode||t))){if(s.splice(a,1),e=i.length&&yt(s),!e)return M.apply(n,i),n;break}}}return l(e,f)(i,t,!h,n,V.test(e)),n}r.sortStable=b.split("").sort(A).join("")===b,r.detectDuplicates=S,p(),r.sortDetached=ut(function(e){return 1&e.compareDocumentPosition(f.createElement("div"))}),ut(function(e){return e.innerHTML="<a href='#'></a>","#"===e.firstChild.getAttribute("href")})||ct("type|href|height|width",function(e,n,r){return r?t:e.getAttribute(n,"type"===n.toLowerCase()?1:2)}),r.attributes&&ut(function(e){return e.innerHTML="<input/>",e.firstChild.setAttribute("value",""),""===e.firstChild.getAttribute("value")})||ct("value",function(e,n,r){return r||"input"!==e.nodeName.toLowerCase()?t:e.defaultValue}),ut(function(e){return null==e.getAttribute("disabled")})||ct(B,function(e,n,r){var i;return r?t:(i=e.getAttributeNode(n))&&i.specified?i.value:e[n]===!0?n.toLowerCase():null}),x.find=at,x.expr=at.selectors,x.expr[":"]=x.expr.pseudos,x.unique=at.uniqueSort,x.text=at.getText,x.isXMLDoc=at.isXML,x.contains=at.contains}(e);var O={};function F(e){var t=O[e]={};return x.each(e.match(T)||[],function(e,n){t[n]=!0}),t}x.Callbacks=function(e){e="string"==typeof e?O[e]||F(e):x.extend({},e);var n,r,i,o,a,s,l=[],u=!e.once&&[],c=function(t){for(r=e.memory&&t,i=!0,a=s||0,s=0,o=l.length,n=!0;l&&o>a;a++)if(l[a].apply(t[0],t[1])===!1&&e.stopOnFalse){r=!1;break}n=!1,l&&(u?u.length&&c(u.shift()):r?l=[]:p.disable())},p={add:function(){if(l){var t=l.length;(function i(t){x.each(t,function(t,n){var r=x.type(n);"function"===r?e.unique&&p.has(n)||l.push(n):n&&n.length&&"string"!==r&&i(n)})})(arguments),n?o=l.length:r&&(s=t,c(r))}return this},remove:function(){return l&&x.each(arguments,function(e,t){var r;while((r=x.inArray(t,l,r))>-1)l.splice(r,1),n&&(o>=r&&o--,a>=r&&a--)}),this},has:function(e){return e?x.inArray(e,l)>-1:!(!l||!l.length)},empty:function(){return l=[],o=0,this},disable:function(){return l=u=r=t,this},disabled:function(){return!l},lock:function(){return u=t,r||p.disable(),this},locked:function(){return!u},fireWith:function(e,t){return!l||i&&!u||(t=t||[],t=[e,t.slice?t.slice():t],n?u.push(t):c(t)),this},fire:function(){return p.fireWith(this,arguments),this},fired:function(){return!!i}};return p},x.extend({Deferred:function(e){var t=[["resolve","done",x.Callbacks("once memory"),"resolved"],["reject","fail",x.Callbacks("once memory"),"rejected"],["notify","progress",x.Callbacks("memory")]],n="pending",r={state:function(){return n},always:function(){return i.done(arguments).fail(arguments),this},then:function(){var e=arguments;return x.Deferred(function(n){x.each(t,function(t,o){var a=o[0],s=x.isFunction(e[t])&&e[t];i[o[1]](function(){var e=s&&s.apply(this,arguments);e&&x.isFunction(e.promise)?e.promise().done(n.resolve).fail(n.reject).progress(n.notify):n[a+"With"](this===r?n.promise():this,s?[e]:arguments)})}),e=null}).promise()},promise:function(e){return null!=e?x.extend(e,r):r}},i={};return r.pipe=r.then,x.each(t,function(e,o){var a=o[2],s=o[3];r[o[1]]=a.add,s&&a.add(function(){n=s},t[1^e][2].disable,t[2][2].lock),i[o[0]]=function(){return i[o[0]+"With"](this===i?r:this,arguments),this},i[o[0]+"With"]=a.fireWith}),r.promise(i),e&&e.call(i,i),i},when:function(e){var t=0,n=g.call(arguments),r=n.length,i=1!==r||e&&x.isFunction(e.promise)?r:0,o=1===i?e:x.Deferred(),a=function(e,t,n){return function(r){t[e]=this,n[e]=arguments.length>1?g.call(arguments):r,n===s?o.notifyWith(t,n):--i||o.resolveWith(t,n)}},s,l,u;if(r>1)for(s=Array(r),l=Array(r),u=Array(r);r>t;t++)n[t]&&x.isFunction(n[t].promise)?n[t].promise().done(a(t,u,n)).fail(o.reject).progress(a(t,l,s)):--i;return i||o.resolveWith(u,n),o.promise()}}),x.support=function(t){var n,r,o,s,l,u,c,p,f,d=a.createElement("div");if(d.setAttribute("className","t"),d.innerHTML=" <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",n=d.getElementsByTagName("*")||[],r=d.getElementsByTagName("a")[0],!r||!r.style||!n.length)return t;s=a.createElement("select"),u=s.appendChild(a.createElement("option")),o=d.getElementsByTagName("input")[0],r.style.cssText="top:1px;float:left;opacity:.5",t.getSetAttribute="t"!==d.className,t.leadingWhitespace=3===d.firstChild.nodeType,t.tbody=!d.getElementsByTagName("tbody").length,t.htmlSerialize=!!d.getElementsByTagName("link").length,t.style=/top/.test(r.getAttribute("style")),t.hrefNormalized="/a"===r.getAttribute("href"),t.opacity=/^0.5/.test(r.style.opacity),t.cssFloat=!!r.style.cssFloat,t.checkOn=!!o.value,t.optSelected=u.selected,t.enctype=!!a.createElement("form").enctype,t.html5Clone="<:nav></:nav>"!==a.createElement("nav").cloneNode(!0).outerHTML,t.inlineBlockNeedsLayout=!1,t.shrinkWrapBlocks=!1,t.pixelPosition=!1,t.deleteExpando=!0,t.noCloneEvent=!0,t.reliableMarginRight=!0,t.boxSizingReliable=!0,o.checked=!0,t.noCloneChecked=o.cloneNode(!0).checked,s.disabled=!0,t.optDisabled=!u.disabled;try{delete d.test}catch(h){t.deleteExpando=!1}o=a.createElement("input"),o.setAttribute("value",""),t.input=""===o.getAttribute("value"),o.value="t",o.setAttribute("type","radio"),t.radioValue="t"===o.value,o.setAttribute("checked","t"),o.setAttribute("name","t"),l=a.createDocumentFragment(),l.appendChild(o),t.appendChecked=o.checked,t.checkClone=l.cloneNode(!0).cloneNode(!0).lastChild.checked,d.attachEvent&&(d.attachEvent("onclick",function(){t.noCloneEvent=!1}),d.cloneNode(!0).click());for(f in{submit:!0,change:!0,focusin:!0})d.setAttribute(c="on"+f,"t"),t[f+"Bubbles"]=c in e||d.attributes[c].expando===!1;d.style.backgroundClip="content-box",d.cloneNode(!0).style.backgroundClip="",t.clearCloneStyle="content-box"===d.style.backgroundClip;for(f in x(t))break;return t.ownLast="0"!==f,x(function(){var n,r,o,s="padding:0;margin:0;border:0;display:block;box-sizing:content-box;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;",l=a.getElementsByTagName("body")[0];l&&(n=a.createElement("div"),n.style.cssText="border:0;width:0;height:0;position:absolute;top:0;left:-9999px;margin-top:1px",l.appendChild(n).appendChild(d),d.innerHTML="<table><tr><td></td><td>t</td></tr></table>",o=d.getElementsByTagName("td"),o[0].style.cssText="padding:0;margin:0;border:0;display:none",p=0===o[0].offsetHeight,o[0].style.display="",o[1].style.display="none",t.reliableHiddenOffsets=p&&0===o[0].offsetHeight,d.innerHTML="",d.style.cssText="box-sizing:border-box;-moz-box-sizing:border-box;-webkit-box-sizing:border-box;padding:1px;border:1px;display:block;width:4px;margin-top:1%;position:absolute;top:1%;",x.swap(l,null!=l.style.zoom?{zoom:1}:{},function(){t.boxSizing=4===d.offsetWidth}),e.getComputedStyle&&(t.pixelPosition="1%"!==(e.getComputedStyle(d,null)||{}).top,t.boxSizingReliable="4px"===(e.getComputedStyle(d,null)||{width:"4px"}).width,r=d.appendChild(a.createElement("div")),r.style.cssText=d.style.cssText=s,r.style.marginRight=r.style.width="0",d.style.width="1px",t.reliableMarginRight=!parseFloat((e.getComputedStyle(r,null)||{}).marginRight)),typeof d.style.zoom!==i&&(d.innerHTML="",d.style.cssText=s+"width:1px;padding:1px;display:inline;zoom:1",t.inlineBlockNeedsLayout=3===d.offsetWidth,d.style.display="block",d.innerHTML="<div></div>",d.firstChild.style.width="5px",t.shrinkWrapBlocks=3!==d.offsetWidth,t.inlineBlockNeedsLayout&&(l.style.zoom=1)),l.removeChild(n),n=d=o=r=null)}),n=s=l=u=r=o=null,t
}({});var B=/(?:\{[\s\S]*\}|\[[\s\S]*\])$/,P=/([A-Z])/g;function R(e,n,r,i){if(x.acceptData(e)){var o,a,s=x.expando,l=e.nodeType,u=l?x.cache:e,c=l?e[s]:e[s]&&s;if(c&&u[c]&&(i||u[c].data)||r!==t||"string"!=typeof n)return c||(c=l?e[s]=p.pop()||x.guid++:s),u[c]||(u[c]=l?{}:{toJSON:x.noop}),("object"==typeof n||"function"==typeof n)&&(i?u[c]=x.extend(u[c],n):u[c].data=x.extend(u[c].data,n)),a=u[c],i||(a.data||(a.data={}),a=a.data),r!==t&&(a[x.camelCase(n)]=r),"string"==typeof n?(o=a[n],null==o&&(o=a[x.camelCase(n)])):o=a,o}}function W(e,t,n){if(x.acceptData(e)){var r,i,o=e.nodeType,a=o?x.cache:e,s=o?e[x.expando]:x.expando;if(a[s]){if(t&&(r=n?a[s]:a[s].data)){x.isArray(t)?t=t.concat(x.map(t,x.camelCase)):t in r?t=[t]:(t=x.camelCase(t),t=t in r?[t]:t.split(" ")),i=t.length;while(i--)delete r[t[i]];if(n?!I(r):!x.isEmptyObject(r))return}(n||(delete a[s].data,I(a[s])))&&(o?x.cleanData([e],!0):x.support.deleteExpando||a!=a.window?delete a[s]:a[s]=null)}}}x.extend({cache:{},noData:{applet:!0,embed:!0,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000"},hasData:function(e){return e=e.nodeType?x.cache[e[x.expando]]:e[x.expando],!!e&&!I(e)},data:function(e,t,n){return R(e,t,n)},removeData:function(e,t){return W(e,t)},_data:function(e,t,n){return R(e,t,n,!0)},_removeData:function(e,t){return W(e,t,!0)},acceptData:function(e){if(e.nodeType&&1!==e.nodeType&&9!==e.nodeType)return!1;var t=e.nodeName&&x.noData[e.nodeName.toLowerCase()];return!t||t!==!0&&e.getAttribute("classid")===t}}),x.fn.extend({data:function(e,n){var r,i,o=null,a=0,s=this[0];if(e===t){if(this.length&&(o=x.data(s),1===s.nodeType&&!x._data(s,"parsedAttrs"))){for(r=s.attributes;r.length>a;a++)i=r[a].name,0===i.indexOf("data-")&&(i=x.camelCase(i.slice(5)),$(s,i,o[i]));x._data(s,"parsedAttrs",!0)}return o}return"object"==typeof e?this.each(function(){x.data(this,e)}):arguments.length>1?this.each(function(){x.data(this,e,n)}):s?$(s,e,x.data(s,e)):null},removeData:function(e){return this.each(function(){x.removeData(this,e)})}});function $(e,n,r){if(r===t&&1===e.nodeType){var i="data-"+n.replace(P,"-$1").toLowerCase();if(r=e.getAttribute(i),"string"==typeof r){try{r="true"===r?!0:"false"===r?!1:"null"===r?null:+r+""===r?+r:B.test(r)?x.parseJSON(r):r}catch(o){}x.data(e,n,r)}else r=t}return r}function I(e){var t;for(t in e)if(("data"!==t||!x.isEmptyObject(e[t]))&&"toJSON"!==t)return!1;return!0}x.extend({queue:function(e,n,r){var i;return e?(n=(n||"fx")+"queue",i=x._data(e,n),r&&(!i||x.isArray(r)?i=x._data(e,n,x.makeArray(r)):i.push(r)),i||[]):t},dequeue:function(e,t){t=t||"fx";var n=x.queue(e,t),r=n.length,i=n.shift(),o=x._queueHooks(e,t),a=function(){x.dequeue(e,t)};"inprogress"===i&&(i=n.shift(),r--),i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,a,o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return x._data(e,n)||x._data(e,n,{empty:x.Callbacks("once memory").add(function(){x._removeData(e,t+"queue"),x._removeData(e,n)})})}}),x.fn.extend({queue:function(e,n){var r=2;return"string"!=typeof e&&(n=e,e="fx",r--),r>arguments.length?x.queue(this[0],e):n===t?this:this.each(function(){var t=x.queue(this,e,n);x._queueHooks(this,e),"fx"===e&&"inprogress"!==t[0]&&x.dequeue(this,e)})},dequeue:function(e){return this.each(function(){x.dequeue(this,e)})},delay:function(e,t){return e=x.fx?x.fx.speeds[e]||e:e,t=t||"fx",this.queue(t,function(t,n){var r=setTimeout(t,e);n.stop=function(){clearTimeout(r)}})},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,n){var r,i=1,o=x.Deferred(),a=this,s=this.length,l=function(){--i||o.resolveWith(a,[a])};"string"!=typeof e&&(n=e,e=t),e=e||"fx";while(s--)r=x._data(a[s],e+"queueHooks"),r&&r.empty&&(i++,r.empty.add(l));return l(),o.promise(n)}});var z,X,U=/[\t\r\n\f]/g,V=/\r/g,Y=/^(?:input|select|textarea|button|object)$/i,J=/^(?:a|area)$/i,G=/^(?:checked|selected)$/i,Q=x.support.getSetAttribute,K=x.support.input;x.fn.extend({attr:function(e,t){return x.access(this,x.attr,e,t,arguments.length>1)},removeAttr:function(e){return this.each(function(){x.removeAttr(this,e)})},prop:function(e,t){return x.access(this,x.prop,e,t,arguments.length>1)},removeProp:function(e){return e=x.propFix[e]||e,this.each(function(){try{this[e]=t,delete this[e]}catch(n){}})},addClass:function(e){var t,n,r,i,o,a=0,s=this.length,l="string"==typeof e&&e;if(x.isFunction(e))return this.each(function(t){x(this).addClass(e.call(this,t,this.className))});if(l)for(t=(e||"").match(T)||[];s>a;a++)if(n=this[a],r=1===n.nodeType&&(n.className?(" "+n.className+" ").replace(U," "):" ")){o=0;while(i=t[o++])0>r.indexOf(" "+i+" ")&&(r+=i+" ");n.className=x.trim(r)}return this},removeClass:function(e){var t,n,r,i,o,a=0,s=this.length,l=0===arguments.length||"string"==typeof e&&e;if(x.isFunction(e))return this.each(function(t){x(this).removeClass(e.call(this,t,this.className))});if(l)for(t=(e||"").match(T)||[];s>a;a++)if(n=this[a],r=1===n.nodeType&&(n.className?(" "+n.className+" ").replace(U," "):"")){o=0;while(i=t[o++])while(r.indexOf(" "+i+" ")>=0)r=r.replace(" "+i+" "," ");n.className=e?x.trim(r):""}return this},toggleClass:function(e,t){var n=typeof e;return"boolean"==typeof t&&"string"===n?t?this.addClass(e):this.removeClass(e):x.isFunction(e)?this.each(function(n){x(this).toggleClass(e.call(this,n,this.className,t),t)}):this.each(function(){if("string"===n){var t,r=0,o=x(this),a=e.match(T)||[];while(t=a[r++])o.hasClass(t)?o.removeClass(t):o.addClass(t)}else(n===i||"boolean"===n)&&(this.className&&x._data(this,"__className__",this.className),this.className=this.className||e===!1?"":x._data(this,"__className__")||"")})},hasClass:function(e){var t=" "+e+" ",n=0,r=this.length;for(;r>n;n++)if(1===this[n].nodeType&&(" "+this[n].className+" ").replace(U," ").indexOf(t)>=0)return!0;return!1},val:function(e){var n,r,i,o=this[0];{if(arguments.length)return i=x.isFunction(e),this.each(function(n){var o;1===this.nodeType&&(o=i?e.call(this,n,x(this).val()):e,null==o?o="":"number"==typeof o?o+="":x.isArray(o)&&(o=x.map(o,function(e){return null==e?"":e+""})),r=x.valHooks[this.type]||x.valHooks[this.nodeName.toLowerCase()],r&&"set"in r&&r.set(this,o,"value")!==t||(this.value=o))});if(o)return r=x.valHooks[o.type]||x.valHooks[o.nodeName.toLowerCase()],r&&"get"in r&&(n=r.get(o,"value"))!==t?n:(n=o.value,"string"==typeof n?n.replace(V,""):null==n?"":n)}}}),x.extend({valHooks:{option:{get:function(e){var t=x.find.attr(e,"value");return null!=t?t:e.text}},select:{get:function(e){var t,n,r=e.options,i=e.selectedIndex,o="select-one"===e.type||0>i,a=o?null:[],s=o?i+1:r.length,l=0>i?s:o?i:0;for(;s>l;l++)if(n=r[l],!(!n.selected&&l!==i||(x.support.optDisabled?n.disabled:null!==n.getAttribute("disabled"))||n.parentNode.disabled&&x.nodeName(n.parentNode,"optgroup"))){if(t=x(n).val(),o)return t;a.push(t)}return a},set:function(e,t){var n,r,i=e.options,o=x.makeArray(t),a=i.length;while(a--)r=i[a],(r.selected=x.inArray(x(r).val(),o)>=0)&&(n=!0);return n||(e.selectedIndex=-1),o}}},attr:function(e,n,r){var o,a,s=e.nodeType;if(e&&3!==s&&8!==s&&2!==s)return typeof e.getAttribute===i?x.prop(e,n,r):(1===s&&x.isXMLDoc(e)||(n=n.toLowerCase(),o=x.attrHooks[n]||(x.expr.match.bool.test(n)?X:z)),r===t?o&&"get"in o&&null!==(a=o.get(e,n))?a:(a=x.find.attr(e,n),null==a?t:a):null!==r?o&&"set"in o&&(a=o.set(e,r,n))!==t?a:(e.setAttribute(n,r+""),r):(x.removeAttr(e,n),t))},removeAttr:function(e,t){var n,r,i=0,o=t&&t.match(T);if(o&&1===e.nodeType)while(n=o[i++])r=x.propFix[n]||n,x.expr.match.bool.test(n)?K&&Q||!G.test(n)?e[r]=!1:e[x.camelCase("default-"+n)]=e[r]=!1:x.attr(e,n,""),e.removeAttribute(Q?n:r)},attrHooks:{type:{set:function(e,t){if(!x.support.radioValue&&"radio"===t&&x.nodeName(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},propFix:{"for":"htmlFor","class":"className"},prop:function(e,n,r){var i,o,a,s=e.nodeType;if(e&&3!==s&&8!==s&&2!==s)return a=1!==s||!x.isXMLDoc(e),a&&(n=x.propFix[n]||n,o=x.propHooks[n]),r!==t?o&&"set"in o&&(i=o.set(e,r,n))!==t?i:e[n]=r:o&&"get"in o&&null!==(i=o.get(e,n))?i:e[n]},propHooks:{tabIndex:{get:function(e){var t=x.find.attr(e,"tabindex");return t?parseInt(t,10):Y.test(e.nodeName)||J.test(e.nodeName)&&e.href?0:-1}}}}),X={set:function(e,t,n){return t===!1?x.removeAttr(e,n):K&&Q||!G.test(n)?e.setAttribute(!Q&&x.propFix[n]||n,n):e[x.camelCase("default-"+n)]=e[n]=!0,n}},x.each(x.expr.match.bool.source.match(/\w+/g),function(e,n){var r=x.expr.attrHandle[n]||x.find.attr;x.expr.attrHandle[n]=K&&Q||!G.test(n)?function(e,n,i){var o=x.expr.attrHandle[n],a=i?t:(x.expr.attrHandle[n]=t)!=r(e,n,i)?n.toLowerCase():null;return x.expr.attrHandle[n]=o,a}:function(e,n,r){return r?t:e[x.camelCase("default-"+n)]?n.toLowerCase():null}}),K&&Q||(x.attrHooks.value={set:function(e,n,r){return x.nodeName(e,"input")?(e.defaultValue=n,t):z&&z.set(e,n,r)}}),Q||(z={set:function(e,n,r){var i=e.getAttributeNode(r);return i||e.setAttributeNode(i=e.ownerDocument.createAttribute(r)),i.value=n+="","value"===r||n===e.getAttribute(r)?n:t}},x.expr.attrHandle.id=x.expr.attrHandle.name=x.expr.attrHandle.coords=function(e,n,r){var i;return r?t:(i=e.getAttributeNode(n))&&""!==i.value?i.value:null},x.valHooks.button={get:function(e,n){var r=e.getAttributeNode(n);return r&&r.specified?r.value:t},set:z.set},x.attrHooks.contenteditable={set:function(e,t,n){z.set(e,""===t?!1:t,n)}},x.each(["width","height"],function(e,n){x.attrHooks[n]={set:function(e,r){return""===r?(e.setAttribute(n,"auto"),r):t}}})),x.support.hrefNormalized||x.each(["href","src"],function(e,t){x.propHooks[t]={get:function(e){return e.getAttribute(t,4)}}}),x.support.style||(x.attrHooks.style={get:function(e){return e.style.cssText||t},set:function(e,t){return e.style.cssText=t+""}}),x.support.optSelected||(x.propHooks.selected={get:function(e){var t=e.parentNode;return t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex),null}}),x.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){x.propFix[this.toLowerCase()]=this}),x.support.enctype||(x.propFix.enctype="encoding"),x.each(["radio","checkbox"],function(){x.valHooks[this]={set:function(e,n){return x.isArray(n)?e.checked=x.inArray(x(e).val(),n)>=0:t}},x.support.checkOn||(x.valHooks[this].get=function(e){return null===e.getAttribute("value")?"on":e.value})});var Z=/^(?:input|select|textarea)$/i,et=/^key/,tt=/^(?:mouse|contextmenu)|click/,nt=/^(?:focusinfocus|focusoutblur)$/,rt=/^([^.]*)(?:\.(.+)|)$/;function it(){return!0}function ot(){return!1}function at(){try{return a.activeElement}catch(e){}}x.event={global:{},add:function(e,n,r,o,a){var s,l,u,c,p,f,d,h,g,m,y,v=x._data(e);if(v){r.handler&&(c=r,r=c.handler,a=c.selector),r.guid||(r.guid=x.guid++),(l=v.events)||(l=v.events={}),(f=v.handle)||(f=v.handle=function(e){return typeof x===i||e&&x.event.triggered===e.type?t:x.event.dispatch.apply(f.elem,arguments)},f.elem=e),n=(n||"").match(T)||[""],u=n.length;while(u--)s=rt.exec(n[u])||[],g=y=s[1],m=(s[2]||"").split(".").sort(),g&&(p=x.event.special[g]||{},g=(a?p.delegateType:p.bindType)||g,p=x.event.special[g]||{},d=x.extend({type:g,origType:y,data:o,handler:r,guid:r.guid,selector:a,needsContext:a&&x.expr.match.needsContext.test(a),namespace:m.join(".")},c),(h=l[g])||(h=l[g]=[],h.delegateCount=0,p.setup&&p.setup.call(e,o,m,f)!==!1||(e.addEventListener?e.addEventListener(g,f,!1):e.attachEvent&&e.attachEvent("on"+g,f))),p.add&&(p.add.call(e,d),d.handler.guid||(d.handler.guid=r.guid)),a?h.splice(h.delegateCount++,0,d):h.push(d),x.event.global[g]=!0);e=null}},remove:function(e,t,n,r,i){var o,a,s,l,u,c,p,f,d,h,g,m=x.hasData(e)&&x._data(e);if(m&&(c=m.events)){t=(t||"").match(T)||[""],u=t.length;while(u--)if(s=rt.exec(t[u])||[],d=g=s[1],h=(s[2]||"").split(".").sort(),d){p=x.event.special[d]||{},d=(r?p.delegateType:p.bindType)||d,f=c[d]||[],s=s[2]&&RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),l=o=f.length;while(o--)a=f[o],!i&&g!==a.origType||n&&n.guid!==a.guid||s&&!s.test(a.namespace)||r&&r!==a.selector&&("**"!==r||!a.selector)||(f.splice(o,1),a.selector&&f.delegateCount--,p.remove&&p.remove.call(e,a));l&&!f.length&&(p.teardown&&p.teardown.call(e,h,m.handle)!==!1||x.removeEvent(e,d,m.handle),delete c[d])}else for(d in c)x.event.remove(e,d+t[u],n,r,!0);x.isEmptyObject(c)&&(delete m.handle,x._removeData(e,"events"))}},trigger:function(n,r,i,o){var s,l,u,c,p,f,d,h=[i||a],g=v.call(n,"type")?n.type:n,m=v.call(n,"namespace")?n.namespace.split("."):[];if(u=f=i=i||a,3!==i.nodeType&&8!==i.nodeType&&!nt.test(g+x.event.triggered)&&(g.indexOf(".")>=0&&(m=g.split("."),g=m.shift(),m.sort()),l=0>g.indexOf(":")&&"on"+g,n=n[x.expando]?n:new x.Event(g,"object"==typeof n&&n),n.isTrigger=o?2:3,n.namespace=m.join("."),n.namespace_re=n.namespace?RegExp("(^|\\.)"+m.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,n.result=t,n.target||(n.target=i),r=null==r?[n]:x.makeArray(r,[n]),p=x.event.special[g]||{},o||!p.trigger||p.trigger.apply(i,r)!==!1)){if(!o&&!p.noBubble&&!x.isWindow(i)){for(c=p.delegateType||g,nt.test(c+g)||(u=u.parentNode);u;u=u.parentNode)h.push(u),f=u;f===(i.ownerDocument||a)&&h.push(f.defaultView||f.parentWindow||e)}d=0;while((u=h[d++])&&!n.isPropagationStopped())n.type=d>1?c:p.bindType||g,s=(x._data(u,"events")||{})[n.type]&&x._data(u,"handle"),s&&s.apply(u,r),s=l&&u[l],s&&x.acceptData(u)&&s.apply&&s.apply(u,r)===!1&&n.preventDefault();if(n.type=g,!o&&!n.isDefaultPrevented()&&(!p._default||p._default.apply(h.pop(),r)===!1)&&x.acceptData(i)&&l&&i[g]&&!x.isWindow(i)){f=i[l],f&&(i[l]=null),x.event.triggered=g;try{i[g]()}catch(y){}x.event.triggered=t,f&&(i[l]=f)}return n.result}},dispatch:function(e){e=x.event.fix(e);var n,r,i,o,a,s=[],l=g.call(arguments),u=(x._data(this,"events")||{})[e.type]||[],c=x.event.special[e.type]||{};if(l[0]=e,e.delegateTarget=this,!c.preDispatch||c.preDispatch.call(this,e)!==!1){s=x.event.handlers.call(this,e,u),n=0;while((o=s[n++])&&!e.isPropagationStopped()){e.currentTarget=o.elem,a=0;while((i=o.handlers[a++])&&!e.isImmediatePropagationStopped())(!e.namespace_re||e.namespace_re.test(i.namespace))&&(e.handleObj=i,e.data=i.data,r=((x.event.special[i.origType]||{}).handle||i.handler).apply(o.elem,l),r!==t&&(e.result=r)===!1&&(e.preventDefault(),e.stopPropagation()))}return c.postDispatch&&c.postDispatch.call(this,e),e.result}},handlers:function(e,n){var r,i,o,a,s=[],l=n.delegateCount,u=e.target;if(l&&u.nodeType&&(!e.button||"click"!==e.type))for(;u!=this;u=u.parentNode||this)if(1===u.nodeType&&(u.disabled!==!0||"click"!==e.type)){for(o=[],a=0;l>a;a++)i=n[a],r=i.selector+" ",o[r]===t&&(o[r]=i.needsContext?x(r,this).index(u)>=0:x.find(r,this,null,[u]).length),o[r]&&o.push(i);o.length&&s.push({elem:u,handlers:o})}return n.length>l&&s.push({elem:this,handlers:n.slice(l)}),s},fix:function(e){if(e[x.expando])return e;var t,n,r,i=e.type,o=e,s=this.fixHooks[i];s||(this.fixHooks[i]=s=tt.test(i)?this.mouseHooks:et.test(i)?this.keyHooks:{}),r=s.props?this.props.concat(s.props):this.props,e=new x.Event(o),t=r.length;while(t--)n=r[t],e[n]=o[n];return e.target||(e.target=o.srcElement||a),3===e.target.nodeType&&(e.target=e.target.parentNode),e.metaKey=!!e.metaKey,s.filter?s.filter(e,o):e},props:"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(e,t){return null==e.which&&(e.which=null!=t.charCode?t.charCode:t.keyCode),e}},mouseHooks:{props:"button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(e,n){var r,i,o,s=n.button,l=n.fromElement;return null==e.pageX&&null!=n.clientX&&(i=e.target.ownerDocument||a,o=i.documentElement,r=i.body,e.pageX=n.clientX+(o&&o.scrollLeft||r&&r.scrollLeft||0)-(o&&o.clientLeft||r&&r.clientLeft||0),e.pageY=n.clientY+(o&&o.scrollTop||r&&r.scrollTop||0)-(o&&o.clientTop||r&&r.clientTop||0)),!e.relatedTarget&&l&&(e.relatedTarget=l===e.target?n.toElement:l),e.which||s===t||(e.which=1&s?1:2&s?3:4&s?2:0),e}},special:{load:{noBubble:!0},focus:{trigger:function(){if(this!==at()&&this.focus)try{return this.focus(),!1}catch(e){}},delegateType:"focusin"},blur:{trigger:function(){return this===at()&&this.blur?(this.blur(),!1):t},delegateType:"focusout"},click:{trigger:function(){return x.nodeName(this,"input")&&"checkbox"===this.type&&this.click?(this.click(),!1):t},_default:function(e){return x.nodeName(e.target,"a")}},beforeunload:{postDispatch:function(e){e.result!==t&&(e.originalEvent.returnValue=e.result)}}},simulate:function(e,t,n,r){var i=x.extend(new x.Event,n,{type:e,isSimulated:!0,originalEvent:{}});r?x.event.trigger(i,null,t):x.event.dispatch.call(t,i),i.isDefaultPrevented()&&n.preventDefault()}},x.removeEvent=a.removeEventListener?function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n,!1)}:function(e,t,n){var r="on"+t;e.detachEvent&&(typeof e[r]===i&&(e[r]=null),e.detachEvent(r,n))},x.Event=function(e,n){return this instanceof x.Event?(e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||e.returnValue===!1||e.getPreventDefault&&e.getPreventDefault()?it:ot):this.type=e,n&&x.extend(this,n),this.timeStamp=e&&e.timeStamp||x.now(),this[x.expando]=!0,t):new x.Event(e,n)},x.Event.prototype={isDefaultPrevented:ot,isPropagationStopped:ot,isImmediatePropagationStopped:ot,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=it,e&&(e.preventDefault?e.preventDefault():e.returnValue=!1)},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=it,e&&(e.stopPropagation&&e.stopPropagation(),e.cancelBubble=!0)},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=it,this.stopPropagation()}},x.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(e,t){x.event.special[e]={delegateType:t,bindType:t,handle:function(e){var n,r=this,i=e.relatedTarget,o=e.handleObj;return(!i||i!==r&&!x.contains(r,i))&&(e.type=o.origType,n=o.handler.apply(this,arguments),e.type=t),n}}}),x.support.submitBubbles||(x.event.special.submit={setup:function(){return x.nodeName(this,"form")?!1:(x.event.add(this,"click._submit keypress._submit",function(e){var n=e.target,r=x.nodeName(n,"input")||x.nodeName(n,"button")?n.form:t;r&&!x._data(r,"submitBubbles")&&(x.event.add(r,"submit._submit",function(e){e._submit_bubble=!0}),x._data(r,"submitBubbles",!0))}),t)},postDispatch:function(e){e._submit_bubble&&(delete e._submit_bubble,this.parentNode&&!e.isTrigger&&x.event.simulate("submit",this.parentNode,e,!0))},teardown:function(){return x.nodeName(this,"form")?!1:(x.event.remove(this,"._submit"),t)}}),x.support.changeBubbles||(x.event.special.change={setup:function(){return Z.test(this.nodeName)?(("checkbox"===this.type||"radio"===this.type)&&(x.event.add(this,"propertychange._change",function(e){"checked"===e.originalEvent.propertyName&&(this._just_changed=!0)}),x.event.add(this,"click._change",function(e){this._just_changed&&!e.isTrigger&&(this._just_changed=!1),x.event.simulate("change",this,e,!0)})),!1):(x.event.add(this,"beforeactivate._change",function(e){var t=e.target;Z.test(t.nodeName)&&!x._data(t,"changeBubbles")&&(x.event.add(t,"change._change",function(e){!this.parentNode||e.isSimulated||e.isTrigger||x.event.simulate("change",this.parentNode,e,!0)}),x._data(t,"changeBubbles",!0))}),t)},handle:function(e){var n=e.target;return this!==n||e.isSimulated||e.isTrigger||"radio"!==n.type&&"checkbox"!==n.type?e.handleObj.handler.apply(this,arguments):t},teardown:function(){return x.event.remove(this,"._change"),!Z.test(this.nodeName)}}),x.support.focusinBubbles||x.each({focus:"focusin",blur:"focusout"},function(e,t){var n=0,r=function(e){x.event.simulate(t,e.target,x.event.fix(e),!0)};x.event.special[t]={setup:function(){0===n++&&a.addEventListener(e,r,!0)},teardown:function(){0===--n&&a.removeEventListener(e,r,!0)}}}),x.fn.extend({on:function(e,n,r,i,o){var a,s;if("object"==typeof e){"string"!=typeof n&&(r=r||n,n=t);for(a in e)this.on(a,n,r,e[a],o);return this}if(null==r&&null==i?(i=n,r=n=t):null==i&&("string"==typeof n?(i=r,r=t):(i=r,r=n,n=t)),i===!1)i=ot;else if(!i)return this;return 1===o&&(s=i,i=function(e){return x().off(e),s.apply(this,arguments)},i.guid=s.guid||(s.guid=x.guid++)),this.each(function(){x.event.add(this,e,i,r,n)})},one:function(e,t,n,r){return this.on(e,t,n,r,1)},off:function(e,n,r){var i,o;if(e&&e.preventDefault&&e.handleObj)return i=e.handleObj,x(e.delegateTarget).off(i.namespace?i.origType+"."+i.namespace:i.origType,i.selector,i.handler),this;if("object"==typeof e){for(o in e)this.off(o,n,e[o]);return this}return(n===!1||"function"==typeof n)&&(r=n,n=t),r===!1&&(r=ot),this.each(function(){x.event.remove(this,e,r,n)})},trigger:function(e,t){return this.each(function(){x.event.trigger(e,t,this)})},triggerHandler:function(e,n){var r=this[0];return r?x.event.trigger(e,n,r,!0):t}});var st=/^.[^:#\[\.,]*$/,lt=/^(?:parents|prev(?:Until|All))/,ut=x.expr.match.needsContext,ct={children:!0,contents:!0,next:!0,prev:!0};x.fn.extend({find:function(e){var t,n=[],r=this,i=r.length;if("string"!=typeof e)return this.pushStack(x(e).filter(function(){for(t=0;i>t;t++)if(x.contains(r[t],this))return!0}));for(t=0;i>t;t++)x.find(e,r[t],n);return n=this.pushStack(i>1?x.unique(n):n),n.selector=this.selector?this.selector+" "+e:e,n},has:function(e){var t,n=x(e,this),r=n.length;return this.filter(function(){for(t=0;r>t;t++)if(x.contains(this,n[t]))return!0})},not:function(e){return this.pushStack(ft(this,e||[],!0))},filter:function(e){return this.pushStack(ft(this,e||[],!1))},is:function(e){return!!ft(this,"string"==typeof e&&ut.test(e)?x(e):e||[],!1).length},closest:function(e,t){var n,r=0,i=this.length,o=[],a=ut.test(e)||"string"!=typeof e?x(e,t||this.context):0;for(;i>r;r++)for(n=this[r];n&&n!==t;n=n.parentNode)if(11>n.nodeType&&(a?a.index(n)>-1:1===n.nodeType&&x.find.matchesSelector(n,e))){n=o.push(n);break}return this.pushStack(o.length>1?x.unique(o):o)},index:function(e){return e?"string"==typeof e?x.inArray(this[0],x(e)):x.inArray(e.jquery?e[0]:e,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){var n="string"==typeof e?x(e,t):x.makeArray(e&&e.nodeType?[e]:e),r=x.merge(this.get(),n);return this.pushStack(x.unique(r))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}});function pt(e,t){do e=e[t];while(e&&1!==e.nodeType);return e}x.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return x.dir(e,"parentNode")},parentsUntil:function(e,t,n){return x.dir(e,"parentNode",n)},next:function(e){return pt(e,"nextSibling")},prev:function(e){return pt(e,"previousSibling")},nextAll:function(e){return x.dir(e,"nextSibling")},prevAll:function(e){return x.dir(e,"previousSibling")},nextUntil:function(e,t,n){return x.dir(e,"nextSibling",n)},prevUntil:function(e,t,n){return x.dir(e,"previousSibling",n)},siblings:function(e){return x.sibling((e.parentNode||{}).firstChild,e)},children:function(e){return x.sibling(e.firstChild)},contents:function(e){return x.nodeName(e,"iframe")?e.contentDocument||e.contentWindow.document:x.merge([],e.childNodes)}},function(e,t){x.fn[e]=function(n,r){var i=x.map(this,t,n);return"Until"!==e.slice(-5)&&(r=n),r&&"string"==typeof r&&(i=x.filter(r,i)),this.length>1&&(ct[e]||(i=x.unique(i)),lt.test(e)&&(i=i.reverse())),this.pushStack(i)}}),x.extend({filter:function(e,t,n){var r=t[0];return n&&(e=":not("+e+")"),1===t.length&&1===r.nodeType?x.find.matchesSelector(r,e)?[r]:[]:x.find.matches(e,x.grep(t,function(e){return 1===e.nodeType}))},dir:function(e,n,r){var i=[],o=e[n];while(o&&9!==o.nodeType&&(r===t||1!==o.nodeType||!x(o).is(r)))1===o.nodeType&&i.push(o),o=o[n];return i},sibling:function(e,t){var n=[];for(;e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n}});function ft(e,t,n){if(x.isFunction(t))return x.grep(e,function(e,r){return!!t.call(e,r,e)!==n});if(t.nodeType)return x.grep(e,function(e){return e===t!==n});if("string"==typeof t){if(st.test(t))return x.filter(t,e,n);t=x.filter(t,e)}return x.grep(e,function(e){return x.inArray(e,t)>=0!==n})}function dt(e){var t=ht.split("|"),n=e.createDocumentFragment();if(n.createElement)while(t.length)n.createElement(t.pop());return n}var ht="abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",gt=/ jQuery\d+="(?:null|\d+)"/g,mt=RegExp("<(?:"+ht+")[\\s/>]","i"),yt=/^\s+/,vt=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,bt=/<([\w:]+)/,xt=/<tbody/i,wt=/<|&#?\w+;/,Tt=/<(?:script|style|link)/i,Ct=/^(?:checkbox|radio)$/i,Nt=/checked\s*(?:[^=]|=\s*.checked.)/i,kt=/^$|\/(?:java|ecma)script/i,Et=/^true\/(.*)/,St=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g,At={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],area:[1,"<map>","</map>"],param:[1,"<object>","</object>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:x.support.htmlSerialize?[0,"",""]:[1,"X<div>","</div>"]},jt=dt(a),Dt=jt.appendChild(a.createElement("div"));At.optgroup=At.option,At.tbody=At.tfoot=At.colgroup=At.caption=At.thead,At.th=At.td,x.fn.extend({text:function(e){return x.access(this,function(e){return e===t?x.text(this):this.empty().append((this[0]&&this[0].ownerDocument||a).createTextNode(e))},null,e,arguments.length)},append:function(){return this.domManip(arguments,function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=Lt(this,e);t.appendChild(e)}})},prepend:function(){return this.domManip(arguments,function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=Lt(this,e);t.insertBefore(e,t.firstChild)}})},before:function(){return this.domManip(arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return this.domManip(arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},remove:function(e,t){var n,r=e?x.filter(e,this):this,i=0;for(;null!=(n=r[i]);i++)t||1!==n.nodeType||x.cleanData(Ft(n)),n.parentNode&&(t&&x.contains(n.ownerDocument,n)&&_t(Ft(n,"script")),n.parentNode.removeChild(n));return this},empty:function(){var e,t=0;for(;null!=(e=this[t]);t++){1===e.nodeType&&x.cleanData(Ft(e,!1));while(e.firstChild)e.removeChild(e.firstChild);e.options&&x.nodeName(e,"select")&&(e.options.length=0)}return this},clone:function(e,t){return e=null==e?!1:e,t=null==t?e:t,this.map(function(){return x.clone(this,e,t)})},html:function(e){return x.access(this,function(e){var n=this[0]||{},r=0,i=this.length;if(e===t)return 1===n.nodeType?n.innerHTML.replace(gt,""):t;if(!("string"!=typeof e||Tt.test(e)||!x.support.htmlSerialize&&mt.test(e)||!x.support.leadingWhitespace&&yt.test(e)||At[(bt.exec(e)||["",""])[1].toLowerCase()])){e=e.replace(vt,"<$1></$2>");try{for(;i>r;r++)n=this[r]||{},1===n.nodeType&&(x.cleanData(Ft(n,!1)),n.innerHTML=e);n=0}catch(o){}}n&&this.empty().append(e)},null,e,arguments.length)},replaceWith:function(){var e=x.map(this,function(e){return[e.nextSibling,e.parentNode]}),t=0;return this.domManip(arguments,function(n){var r=e[t++],i=e[t++];i&&(r&&r.parentNode!==i&&(r=this.nextSibling),x(this).remove(),i.insertBefore(n,r))},!0),t?this:this.remove()},detach:function(e){return this.remove(e,!0)},domManip:function(e,t,n){e=d.apply([],e);var r,i,o,a,s,l,u=0,c=this.length,p=this,f=c-1,h=e[0],g=x.isFunction(h);if(g||!(1>=c||"string"!=typeof h||x.support.checkClone)&&Nt.test(h))return this.each(function(r){var i=p.eq(r);g&&(e[0]=h.call(this,r,i.html())),i.domManip(e,t,n)});if(c&&(l=x.buildFragment(e,this[0].ownerDocument,!1,!n&&this),r=l.firstChild,1===l.childNodes.length&&(l=r),r)){for(a=x.map(Ft(l,"script"),Ht),o=a.length;c>u;u++)i=l,u!==f&&(i=x.clone(i,!0,!0),o&&x.merge(a,Ft(i,"script"))),t.call(this[u],i,u);if(o)for(s=a[a.length-1].ownerDocument,x.map(a,qt),u=0;o>u;u++)i=a[u],kt.test(i.type||"")&&!x._data(i,"globalEval")&&x.contains(s,i)&&(i.src?x._evalUrl(i.src):x.globalEval((i.text||i.textContent||i.innerHTML||"").replace(St,"")));l=r=null}return this}});function Lt(e,t){return x.nodeName(e,"table")&&x.nodeName(1===t.nodeType?t:t.firstChild,"tr")?e.getElementsByTagName("tbody")[0]||e.appendChild(e.ownerDocument.createElement("tbody")):e}function Ht(e){return e.type=(null!==x.find.attr(e,"type"))+"/"+e.type,e}function qt(e){var t=Et.exec(e.type);return t?e.type=t[1]:e.removeAttribute("type"),e}function _t(e,t){var n,r=0;for(;null!=(n=e[r]);r++)x._data(n,"globalEval",!t||x._data(t[r],"globalEval"))}function Mt(e,t){if(1===t.nodeType&&x.hasData(e)){var n,r,i,o=x._data(e),a=x._data(t,o),s=o.events;if(s){delete a.handle,a.events={};for(n in s)for(r=0,i=s[n].length;i>r;r++)x.event.add(t,n,s[n][r])}a.data&&(a.data=x.extend({},a.data))}}function Ot(e,t){var n,r,i;if(1===t.nodeType){if(n=t.nodeName.toLowerCase(),!x.support.noCloneEvent&&t[x.expando]){i=x._data(t);for(r in i.events)x.removeEvent(t,r,i.handle);t.removeAttribute(x.expando)}"script"===n&&t.text!==e.text?(Ht(t).text=e.text,qt(t)):"object"===n?(t.parentNode&&(t.outerHTML=e.outerHTML),x.support.html5Clone&&e.innerHTML&&!x.trim(t.innerHTML)&&(t.innerHTML=e.innerHTML)):"input"===n&&Ct.test(e.type)?(t.defaultChecked=t.checked=e.checked,t.value!==e.value&&(t.value=e.value)):"option"===n?t.defaultSelected=t.selected=e.defaultSelected:("input"===n||"textarea"===n)&&(t.defaultValue=e.defaultValue)}}x.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(e,t){x.fn[e]=function(e){var n,r=0,i=[],o=x(e),a=o.length-1;for(;a>=r;r++)n=r===a?this:this.clone(!0),x(o[r])[t](n),h.apply(i,n.get());return this.pushStack(i)}});function Ft(e,n){var r,o,a=0,s=typeof e.getElementsByTagName!==i?e.getElementsByTagName(n||"*"):typeof e.querySelectorAll!==i?e.querySelectorAll(n||"*"):t;if(!s)for(s=[],r=e.childNodes||e;null!=(o=r[a]);a++)!n||x.nodeName(o,n)?s.push(o):x.merge(s,Ft(o,n));return n===t||n&&x.nodeName(e,n)?x.merge([e],s):s}function Bt(e){Ct.test(e.type)&&(e.defaultChecked=e.checked)}x.extend({clone:function(e,t,n){var r,i,o,a,s,l=x.contains(e.ownerDocument,e);if(x.support.html5Clone||x.isXMLDoc(e)||!mt.test("<"+e.nodeName+">")?o=e.cloneNode(!0):(Dt.innerHTML=e.outerHTML,Dt.removeChild(o=Dt.firstChild)),!(x.support.noCloneEvent&&x.support.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||x.isXMLDoc(e)))for(r=Ft(o),s=Ft(e),a=0;null!=(i=s[a]);++a)r[a]&&Ot(i,r[a]);if(t)if(n)for(s=s||Ft(e),r=r||Ft(o),a=0;null!=(i=s[a]);a++)Mt(i,r[a]);else Mt(e,o);return r=Ft(o,"script"),r.length>0&&_t(r,!l&&Ft(e,"script")),r=s=i=null,o},buildFragment:function(e,t,n,r){var i,o,a,s,l,u,c,p=e.length,f=dt(t),d=[],h=0;for(;p>h;h++)if(o=e[h],o||0===o)if("object"===x.type(o))x.merge(d,o.nodeType?[o]:o);else if(wt.test(o)){s=s||f.appendChild(t.createElement("div")),l=(bt.exec(o)||["",""])[1].toLowerCase(),c=At[l]||At._default,s.innerHTML=c[1]+o.replace(vt,"<$1></$2>")+c[2],i=c[0];while(i--)s=s.lastChild;if(!x.support.leadingWhitespace&&yt.test(o)&&d.push(t.createTextNode(yt.exec(o)[0])),!x.support.tbody){o="table"!==l||xt.test(o)?"<table>"!==c[1]||xt.test(o)?0:s:s.firstChild,i=o&&o.childNodes.length;while(i--)x.nodeName(u=o.childNodes[i],"tbody")&&!u.childNodes.length&&o.removeChild(u)}x.merge(d,s.childNodes),s.textContent="";while(s.firstChild)s.removeChild(s.firstChild);s=f.lastChild}else d.push(t.createTextNode(o));s&&f.removeChild(s),x.support.appendChecked||x.grep(Ft(d,"input"),Bt),h=0;while(o=d[h++])if((!r||-1===x.inArray(o,r))&&(a=x.contains(o.ownerDocument,o),s=Ft(f.appendChild(o),"script"),a&&_t(s),n)){i=0;while(o=s[i++])kt.test(o.type||"")&&n.push(o)}return s=null,f},cleanData:function(e,t){var n,r,o,a,s=0,l=x.expando,u=x.cache,c=x.support.deleteExpando,f=x.event.special;for(;null!=(n=e[s]);s++)if((t||x.acceptData(n))&&(o=n[l],a=o&&u[o])){if(a.events)for(r in a.events)f[r]?x.event.remove(n,r):x.removeEvent(n,r,a.handle);
u[o]&&(delete u[o],c?delete n[l]:typeof n.removeAttribute!==i?n.removeAttribute(l):n[l]=null,p.push(o))}},_evalUrl:function(e){return x.ajax({url:e,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0})}}),x.fn.extend({wrapAll:function(e){if(x.isFunction(e))return this.each(function(t){x(this).wrapAll(e.call(this,t))});if(this[0]){var t=x(e,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstChild&&1===e.firstChild.nodeType)e=e.firstChild;return e}).append(this)}return this},wrapInner:function(e){return x.isFunction(e)?this.each(function(t){x(this).wrapInner(e.call(this,t))}):this.each(function(){var t=x(this),n=t.contents();n.length?n.wrapAll(e):t.append(e)})},wrap:function(e){var t=x.isFunction(e);return this.each(function(n){x(this).wrapAll(t?e.call(this,n):e)})},unwrap:function(){return this.parent().each(function(){x.nodeName(this,"body")||x(this).replaceWith(this.childNodes)}).end()}});var Pt,Rt,Wt,$t=/alpha\([^)]*\)/i,It=/opacity\s*=\s*([^)]*)/,zt=/^(top|right|bottom|left)$/,Xt=/^(none|table(?!-c[ea]).+)/,Ut=/^margin/,Vt=RegExp("^("+w+")(.*)$","i"),Yt=RegExp("^("+w+")(?!px)[a-z%]+$","i"),Jt=RegExp("^([+-])=("+w+")","i"),Gt={BODY:"block"},Qt={position:"absolute",visibility:"hidden",display:"block"},Kt={letterSpacing:0,fontWeight:400},Zt=["Top","Right","Bottom","Left"],en=["Webkit","O","Moz","ms"];function tn(e,t){if(t in e)return t;var n=t.charAt(0).toUpperCase()+t.slice(1),r=t,i=en.length;while(i--)if(t=en[i]+n,t in e)return t;return r}function nn(e,t){return e=t||e,"none"===x.css(e,"display")||!x.contains(e.ownerDocument,e)}function rn(e,t){var n,r,i,o=[],a=0,s=e.length;for(;s>a;a++)r=e[a],r.style&&(o[a]=x._data(r,"olddisplay"),n=r.style.display,t?(o[a]||"none"!==n||(r.style.display=""),""===r.style.display&&nn(r)&&(o[a]=x._data(r,"olddisplay",ln(r.nodeName)))):o[a]||(i=nn(r),(n&&"none"!==n||!i)&&x._data(r,"olddisplay",i?n:x.css(r,"display"))));for(a=0;s>a;a++)r=e[a],r.style&&(t&&"none"!==r.style.display&&""!==r.style.display||(r.style.display=t?o[a]||"":"none"));return e}x.fn.extend({css:function(e,n){return x.access(this,function(e,n,r){var i,o,a={},s=0;if(x.isArray(n)){for(o=Rt(e),i=n.length;i>s;s++)a[n[s]]=x.css(e,n[s],!1,o);return a}return r!==t?x.style(e,n,r):x.css(e,n)},e,n,arguments.length>1)},show:function(){return rn(this,!0)},hide:function(){return rn(this)},toggle:function(e){return"boolean"==typeof e?e?this.show():this.hide():this.each(function(){nn(this)?x(this).show():x(this).hide()})}}),x.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=Wt(e,"opacity");return""===n?"1":n}}}},cssNumber:{columnCount:!0,fillOpacity:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":x.support.cssFloat?"cssFloat":"styleFloat"},style:function(e,n,r,i){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var o,a,s,l=x.camelCase(n),u=e.style;if(n=x.cssProps[l]||(x.cssProps[l]=tn(u,l)),s=x.cssHooks[n]||x.cssHooks[l],r===t)return s&&"get"in s&&(o=s.get(e,!1,i))!==t?o:u[n];if(a=typeof r,"string"===a&&(o=Jt.exec(r))&&(r=(o[1]+1)*o[2]+parseFloat(x.css(e,n)),a="number"),!(null==r||"number"===a&&isNaN(r)||("number"!==a||x.cssNumber[l]||(r+="px"),x.support.clearCloneStyle||""!==r||0!==n.indexOf("background")||(u[n]="inherit"),s&&"set"in s&&(r=s.set(e,r,i))===t)))try{u[n]=r}catch(c){}}},css:function(e,n,r,i){var o,a,s,l=x.camelCase(n);return n=x.cssProps[l]||(x.cssProps[l]=tn(e.style,l)),s=x.cssHooks[n]||x.cssHooks[l],s&&"get"in s&&(a=s.get(e,!0,r)),a===t&&(a=Wt(e,n,i)),"normal"===a&&n in Kt&&(a=Kt[n]),""===r||r?(o=parseFloat(a),r===!0||x.isNumeric(o)?o||0:a):a}}),e.getComputedStyle?(Rt=function(t){return e.getComputedStyle(t,null)},Wt=function(e,n,r){var i,o,a,s=r||Rt(e),l=s?s.getPropertyValue(n)||s[n]:t,u=e.style;return s&&(""!==l||x.contains(e.ownerDocument,e)||(l=x.style(e,n)),Yt.test(l)&&Ut.test(n)&&(i=u.width,o=u.minWidth,a=u.maxWidth,u.minWidth=u.maxWidth=u.width=l,l=s.width,u.width=i,u.minWidth=o,u.maxWidth=a)),l}):a.documentElement.currentStyle&&(Rt=function(e){return e.currentStyle},Wt=function(e,n,r){var i,o,a,s=r||Rt(e),l=s?s[n]:t,u=e.style;return null==l&&u&&u[n]&&(l=u[n]),Yt.test(l)&&!zt.test(n)&&(i=u.left,o=e.runtimeStyle,a=o&&o.left,a&&(o.left=e.currentStyle.left),u.left="fontSize"===n?"1em":l,l=u.pixelLeft+"px",u.left=i,a&&(o.left=a)),""===l?"auto":l});function on(e,t,n){var r=Vt.exec(t);return r?Math.max(0,r[1]-(n||0))+(r[2]||"px"):t}function an(e,t,n,r,i){var o=n===(r?"border":"content")?4:"width"===t?1:0,a=0;for(;4>o;o+=2)"margin"===n&&(a+=x.css(e,n+Zt[o],!0,i)),r?("content"===n&&(a-=x.css(e,"padding"+Zt[o],!0,i)),"margin"!==n&&(a-=x.css(e,"border"+Zt[o]+"Width",!0,i))):(a+=x.css(e,"padding"+Zt[o],!0,i),"padding"!==n&&(a+=x.css(e,"border"+Zt[o]+"Width",!0,i)));return a}function sn(e,t,n){var r=!0,i="width"===t?e.offsetWidth:e.offsetHeight,o=Rt(e),a=x.support.boxSizing&&"border-box"===x.css(e,"boxSizing",!1,o);if(0>=i||null==i){if(i=Wt(e,t,o),(0>i||null==i)&&(i=e.style[t]),Yt.test(i))return i;r=a&&(x.support.boxSizingReliable||i===e.style[t]),i=parseFloat(i)||0}return i+an(e,t,n||(a?"border":"content"),r,o)+"px"}function ln(e){var t=a,n=Gt[e];return n||(n=un(e,t),"none"!==n&&n||(Pt=(Pt||x("<iframe frameborder='0' width='0' height='0'/>").css("cssText","display:block !important")).appendTo(t.documentElement),t=(Pt[0].contentWindow||Pt[0].contentDocument).document,t.write("<!doctype html><html><body>"),t.close(),n=un(e,t),Pt.detach()),Gt[e]=n),n}function un(e,t){var n=x(t.createElement(e)).appendTo(t.body),r=x.css(n[0],"display");return n.remove(),r}x.each(["height","width"],function(e,n){x.cssHooks[n]={get:function(e,r,i){return r?0===e.offsetWidth&&Xt.test(x.css(e,"display"))?x.swap(e,Qt,function(){return sn(e,n,i)}):sn(e,n,i):t},set:function(e,t,r){var i=r&&Rt(e);return on(e,t,r?an(e,n,r,x.support.boxSizing&&"border-box"===x.css(e,"boxSizing",!1,i),i):0)}}}),x.support.opacity||(x.cssHooks.opacity={get:function(e,t){return It.test((t&&e.currentStyle?e.currentStyle.filter:e.style.filter)||"")?.01*parseFloat(RegExp.$1)+"":t?"1":""},set:function(e,t){var n=e.style,r=e.currentStyle,i=x.isNumeric(t)?"alpha(opacity="+100*t+")":"",o=r&&r.filter||n.filter||"";n.zoom=1,(t>=1||""===t)&&""===x.trim(o.replace($t,""))&&n.removeAttribute&&(n.removeAttribute("filter"),""===t||r&&!r.filter)||(n.filter=$t.test(o)?o.replace($t,i):o+" "+i)}}),x(function(){x.support.reliableMarginRight||(x.cssHooks.marginRight={get:function(e,n){return n?x.swap(e,{display:"inline-block"},Wt,[e,"marginRight"]):t}}),!x.support.pixelPosition&&x.fn.position&&x.each(["top","left"],function(e,n){x.cssHooks[n]={get:function(e,r){return r?(r=Wt(e,n),Yt.test(r)?x(e).position()[n]+"px":r):t}}})}),x.expr&&x.expr.filters&&(x.expr.filters.hidden=function(e){return 0>=e.offsetWidth&&0>=e.offsetHeight||!x.support.reliableHiddenOffsets&&"none"===(e.style&&e.style.display||x.css(e,"display"))},x.expr.filters.visible=function(e){return!x.expr.filters.hidden(e)}),x.each({margin:"",padding:"",border:"Width"},function(e,t){x.cssHooks[e+t]={expand:function(n){var r=0,i={},o="string"==typeof n?n.split(" "):[n];for(;4>r;r++)i[e+Zt[r]+t]=o[r]||o[r-2]||o[0];return i}},Ut.test(e)||(x.cssHooks[e+t].set=on)});var cn=/%20/g,pn=/\[\]$/,fn=/\r?\n/g,dn=/^(?:submit|button|image|reset|file)$/i,hn=/^(?:input|select|textarea|keygen)/i;x.fn.extend({serialize:function(){return x.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var e=x.prop(this,"elements");return e?x.makeArray(e):this}).filter(function(){var e=this.type;return this.name&&!x(this).is(":disabled")&&hn.test(this.nodeName)&&!dn.test(e)&&(this.checked||!Ct.test(e))}).map(function(e,t){var n=x(this).val();return null==n?null:x.isArray(n)?x.map(n,function(e){return{name:t.name,value:e.replace(fn,"\r\n")}}):{name:t.name,value:n.replace(fn,"\r\n")}}).get()}}),x.param=function(e,n){var r,i=[],o=function(e,t){t=x.isFunction(t)?t():null==t?"":t,i[i.length]=encodeURIComponent(e)+"="+encodeURIComponent(t)};if(n===t&&(n=x.ajaxSettings&&x.ajaxSettings.traditional),x.isArray(e)||e.jquery&&!x.isPlainObject(e))x.each(e,function(){o(this.name,this.value)});else for(r in e)gn(r,e[r],n,o);return i.join("&").replace(cn,"+")};function gn(e,t,n,r){var i;if(x.isArray(t))x.each(t,function(t,i){n||pn.test(e)?r(e,i):gn(e+"["+("object"==typeof i?t:"")+"]",i,n,r)});else if(n||"object"!==x.type(t))r(e,t);else for(i in t)gn(e+"["+i+"]",t[i],n,r)}x.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(e,t){x.fn[t]=function(e,n){return arguments.length>0?this.on(t,null,e,n):this.trigger(t)}}),x.fn.extend({hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)},bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)}});var mn,yn,vn=x.now(),bn=/\?/,xn=/#.*$/,wn=/([?&])_=[^&]*/,Tn=/^(.*?):[ \t]*([^\r\n]*)\r?$/gm,Cn=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Nn=/^(?:GET|HEAD)$/,kn=/^\/\//,En=/^([\w.+-]+:)(?:\/\/([^\/?#:]*)(?::(\d+)|)|)/,Sn=x.fn.load,An={},jn={},Dn="*/".concat("*");try{yn=o.href}catch(Ln){yn=a.createElement("a"),yn.href="",yn=yn.href}mn=En.exec(yn.toLowerCase())||[];function Hn(e){return function(t,n){"string"!=typeof t&&(n=t,t="*");var r,i=0,o=t.toLowerCase().match(T)||[];if(x.isFunction(n))while(r=o[i++])"+"===r[0]?(r=r.slice(1)||"*",(e[r]=e[r]||[]).unshift(n)):(e[r]=e[r]||[]).push(n)}}function qn(e,n,r,i){var o={},a=e===jn;function s(l){var u;return o[l]=!0,x.each(e[l]||[],function(e,l){var c=l(n,r,i);return"string"!=typeof c||a||o[c]?a?!(u=c):t:(n.dataTypes.unshift(c),s(c),!1)}),u}return s(n.dataTypes[0])||!o["*"]&&s("*")}function _n(e,n){var r,i,o=x.ajaxSettings.flatOptions||{};for(i in n)n[i]!==t&&((o[i]?e:r||(r={}))[i]=n[i]);return r&&x.extend(!0,e,r),e}x.fn.load=function(e,n,r){if("string"!=typeof e&&Sn)return Sn.apply(this,arguments);var i,o,a,s=this,l=e.indexOf(" ");return l>=0&&(i=e.slice(l,e.length),e=e.slice(0,l)),x.isFunction(n)?(r=n,n=t):n&&"object"==typeof n&&(a="POST"),s.length>0&&x.ajax({url:e,type:a,dataType:"html",data:n}).done(function(e){o=arguments,s.html(i?x("<div>").append(x.parseHTML(e)).find(i):e)}).complete(r&&function(e,t){s.each(r,o||[e.responseText,t,e])}),this},x.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){x.fn[t]=function(e){return this.on(t,e)}}),x.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:yn,type:"GET",isLocal:Cn.test(mn[1]),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Dn,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":x.parseJSON,"text xml":x.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?_n(_n(e,x.ajaxSettings),t):_n(x.ajaxSettings,e)},ajaxPrefilter:Hn(An),ajaxTransport:Hn(jn),ajax:function(e,n){"object"==typeof e&&(n=e,e=t),n=n||{};var r,i,o,a,s,l,u,c,p=x.ajaxSetup({},n),f=p.context||p,d=p.context&&(f.nodeType||f.jquery)?x(f):x.event,h=x.Deferred(),g=x.Callbacks("once memory"),m=p.statusCode||{},y={},v={},b=0,w="canceled",C={readyState:0,getResponseHeader:function(e){var t;if(2===b){if(!c){c={};while(t=Tn.exec(a))c[t[1].toLowerCase()]=t[2]}t=c[e.toLowerCase()]}return null==t?null:t},getAllResponseHeaders:function(){return 2===b?a:null},setRequestHeader:function(e,t){var n=e.toLowerCase();return b||(e=v[n]=v[n]||e,y[e]=t),this},overrideMimeType:function(e){return b||(p.mimeType=e),this},statusCode:function(e){var t;if(e)if(2>b)for(t in e)m[t]=[m[t],e[t]];else C.always(e[C.status]);return this},abort:function(e){var t=e||w;return u&&u.abort(t),k(0,t),this}};if(h.promise(C).complete=g.add,C.success=C.done,C.error=C.fail,p.url=((e||p.url||yn)+"").replace(xn,"").replace(kn,mn[1]+"//"),p.type=n.method||n.type||p.method||p.type,p.dataTypes=x.trim(p.dataType||"*").toLowerCase().match(T)||[""],null==p.crossDomain&&(r=En.exec(p.url.toLowerCase()),p.crossDomain=!(!r||r[1]===mn[1]&&r[2]===mn[2]&&(r[3]||("http:"===r[1]?"80":"443"))===(mn[3]||("http:"===mn[1]?"80":"443")))),p.data&&p.processData&&"string"!=typeof p.data&&(p.data=x.param(p.data,p.traditional)),qn(An,p,n,C),2===b)return C;l=p.global,l&&0===x.active++&&x.event.trigger("ajaxStart"),p.type=p.type.toUpperCase(),p.hasContent=!Nn.test(p.type),o=p.url,p.hasContent||(p.data&&(o=p.url+=(bn.test(o)?"&":"?")+p.data,delete p.data),p.cache===!1&&(p.url=wn.test(o)?o.replace(wn,"$1_="+vn++):o+(bn.test(o)?"&":"?")+"_="+vn++)),p.ifModified&&(x.lastModified[o]&&C.setRequestHeader("If-Modified-Since",x.lastModified[o]),x.etag[o]&&C.setRequestHeader("If-None-Match",x.etag[o])),(p.data&&p.hasContent&&p.contentType!==!1||n.contentType)&&C.setRequestHeader("Content-Type",p.contentType),C.setRequestHeader("Accept",p.dataTypes[0]&&p.accepts[p.dataTypes[0]]?p.accepts[p.dataTypes[0]]+("*"!==p.dataTypes[0]?", "+Dn+"; q=0.01":""):p.accepts["*"]);for(i in p.headers)C.setRequestHeader(i,p.headers[i]);if(p.beforeSend&&(p.beforeSend.call(f,C,p)===!1||2===b))return C.abort();w="abort";for(i in{success:1,error:1,complete:1})C[i](p[i]);if(u=qn(jn,p,n,C)){C.readyState=1,l&&d.trigger("ajaxSend",[C,p]),p.async&&p.timeout>0&&(s=setTimeout(function(){C.abort("timeout")},p.timeout));try{b=1,u.send(y,k)}catch(N){if(!(2>b))throw N;k(-1,N)}}else k(-1,"No Transport");function k(e,n,r,i){var c,y,v,w,T,N=n;2!==b&&(b=2,s&&clearTimeout(s),u=t,a=i||"",C.readyState=e>0?4:0,c=e>=200&&300>e||304===e,r&&(w=Mn(p,C,r)),w=On(p,w,C,c),c?(p.ifModified&&(T=C.getResponseHeader("Last-Modified"),T&&(x.lastModified[o]=T),T=C.getResponseHeader("etag"),T&&(x.etag[o]=T)),204===e||"HEAD"===p.type?N="nocontent":304===e?N="notmodified":(N=w.state,y=w.data,v=w.error,c=!v)):(v=N,(e||!N)&&(N="error",0>e&&(e=0))),C.status=e,C.statusText=(n||N)+"",c?h.resolveWith(f,[y,N,C]):h.rejectWith(f,[C,N,v]),C.statusCode(m),m=t,l&&d.trigger(c?"ajaxSuccess":"ajaxError",[C,p,c?y:v]),g.fireWith(f,[C,N]),l&&(d.trigger("ajaxComplete",[C,p]),--x.active||x.event.trigger("ajaxStop")))}return C},getJSON:function(e,t,n){return x.get(e,t,n,"json")},getScript:function(e,n){return x.get(e,t,n,"script")}}),x.each(["get","post"],function(e,n){x[n]=function(e,r,i,o){return x.isFunction(r)&&(o=o||i,i=r,r=t),x.ajax({url:e,type:n,dataType:o,data:r,success:i})}});function Mn(e,n,r){var i,o,a,s,l=e.contents,u=e.dataTypes;while("*"===u[0])u.shift(),o===t&&(o=e.mimeType||n.getResponseHeader("Content-Type"));if(o)for(s in l)if(l[s]&&l[s].test(o)){u.unshift(s);break}if(u[0]in r)a=u[0];else{for(s in r){if(!u[0]||e.converters[s+" "+u[0]]){a=s;break}i||(i=s)}a=a||i}return a?(a!==u[0]&&u.unshift(a),r[a]):t}function On(e,t,n,r){var i,o,a,s,l,u={},c=e.dataTypes.slice();if(c[1])for(a in e.converters)u[a.toLowerCase()]=e.converters[a];o=c.shift();while(o)if(e.responseFields[o]&&(n[e.responseFields[o]]=t),!l&&r&&e.dataFilter&&(t=e.dataFilter(t,e.dataType)),l=o,o=c.shift())if("*"===o)o=l;else if("*"!==l&&l!==o){if(a=u[l+" "+o]||u["* "+o],!a)for(i in u)if(s=i.split(" "),s[1]===o&&(a=u[l+" "+s[0]]||u["* "+s[0]])){a===!0?a=u[i]:u[i]!==!0&&(o=s[0],c.unshift(s[1]));break}if(a!==!0)if(a&&e["throws"])t=a(t);else try{t=a(t)}catch(p){return{state:"parsererror",error:a?p:"No conversion from "+l+" to "+o}}}return{state:"success",data:t}}x.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/(?:java|ecma)script/},converters:{"text script":function(e){return x.globalEval(e),e}}}),x.ajaxPrefilter("script",function(e){e.cache===t&&(e.cache=!1),e.crossDomain&&(e.type="GET",e.global=!1)}),x.ajaxTransport("script",function(e){if(e.crossDomain){var n,r=a.head||x("head")[0]||a.documentElement;return{send:function(t,i){n=a.createElement("script"),n.async=!0,e.scriptCharset&&(n.charset=e.scriptCharset),n.src=e.url,n.onload=n.onreadystatechange=function(e,t){(t||!n.readyState||/loaded|complete/.test(n.readyState))&&(n.onload=n.onreadystatechange=null,n.parentNode&&n.parentNode.removeChild(n),n=null,t||i(200,"success"))},r.insertBefore(n,r.firstChild)},abort:function(){n&&n.onload(t,!0)}}}});var Fn=[],Bn=/(=)\?(?=&|$)|\?\?/;x.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Fn.pop()||x.expando+"_"+vn++;return this[e]=!0,e}}),x.ajaxPrefilter("json jsonp",function(n,r,i){var o,a,s,l=n.jsonp!==!1&&(Bn.test(n.url)?"url":"string"==typeof n.data&&!(n.contentType||"").indexOf("application/x-www-form-urlencoded")&&Bn.test(n.data)&&"data");return l||"jsonp"===n.dataTypes[0]?(o=n.jsonpCallback=x.isFunction(n.jsonpCallback)?n.jsonpCallback():n.jsonpCallback,l?n[l]=n[l].replace(Bn,"$1"+o):n.jsonp!==!1&&(n.url+=(bn.test(n.url)?"&":"?")+n.jsonp+"="+o),n.converters["script json"]=function(){return s||x.error(o+" was not called"),s[0]},n.dataTypes[0]="json",a=e[o],e[o]=function(){s=arguments},i.always(function(){e[o]=a,n[o]&&(n.jsonpCallback=r.jsonpCallback,Fn.push(o)),s&&x.isFunction(a)&&a(s[0]),s=a=t}),"script"):t});var Pn,Rn,Wn=0,$n=e.ActiveXObject&&function(){var e;for(e in Pn)Pn[e](t,!0)};function In(){try{return new e.XMLHttpRequest}catch(t){}}function zn(){try{return new e.ActiveXObject("Microsoft.XMLHTTP")}catch(t){}}x.ajaxSettings.xhr=e.ActiveXObject?function(){return!this.isLocal&&In()||zn()}:In,Rn=x.ajaxSettings.xhr(),x.support.cors=!!Rn&&"withCredentials"in Rn,Rn=x.support.ajax=!!Rn,Rn&&x.ajaxTransport(function(n){if(!n.crossDomain||x.support.cors){var r;return{send:function(i,o){var a,s,l=n.xhr();if(n.username?l.open(n.type,n.url,n.async,n.username,n.password):l.open(n.type,n.url,n.async),n.xhrFields)for(s in n.xhrFields)l[s]=n.xhrFields[s];n.mimeType&&l.overrideMimeType&&l.overrideMimeType(n.mimeType),n.crossDomain||i["X-Requested-With"]||(i["X-Requested-With"]="XMLHttpRequest");try{for(s in i)l.setRequestHeader(s,i[s])}catch(u){}l.send(n.hasContent&&n.data||null),r=function(e,i){var s,u,c,p;try{if(r&&(i||4===l.readyState))if(r=t,a&&(l.onreadystatechange=x.noop,$n&&delete Pn[a]),i)4!==l.readyState&&l.abort();else{p={},s=l.status,u=l.getAllResponseHeaders(),"string"==typeof l.responseText&&(p.text=l.responseText);try{c=l.statusText}catch(f){c=""}s||!n.isLocal||n.crossDomain?1223===s&&(s=204):s=p.text?200:404}}catch(d){i||o(-1,d)}p&&o(s,c,p,u)},n.async?4===l.readyState?setTimeout(r):(a=++Wn,$n&&(Pn||(Pn={},x(e).unload($n)),Pn[a]=r),l.onreadystatechange=r):r()},abort:function(){r&&r(t,!0)}}}});var Xn,Un,Vn=/^(?:toggle|show|hide)$/,Yn=RegExp("^(?:([+-])=|)("+w+")([a-z%]*)$","i"),Jn=/queueHooks$/,Gn=[nr],Qn={"*":[function(e,t){var n=this.createTween(e,t),r=n.cur(),i=Yn.exec(t),o=i&&i[3]||(x.cssNumber[e]?"":"px"),a=(x.cssNumber[e]||"px"!==o&&+r)&&Yn.exec(x.css(n.elem,e)),s=1,l=20;if(a&&a[3]!==o){o=o||a[3],i=i||[],a=+r||1;do s=s||".5",a/=s,x.style(n.elem,e,a+o);while(s!==(s=n.cur()/r)&&1!==s&&--l)}return i&&(a=n.start=+a||+r||0,n.unit=o,n.end=i[1]?a+(i[1]+1)*i[2]:+i[2]),n}]};function Kn(){return setTimeout(function(){Xn=t}),Xn=x.now()}function Zn(e,t,n){var r,i=(Qn[t]||[]).concat(Qn["*"]),o=0,a=i.length;for(;a>o;o++)if(r=i[o].call(n,t,e))return r}function er(e,t,n){var r,i,o=0,a=Gn.length,s=x.Deferred().always(function(){delete l.elem}),l=function(){if(i)return!1;var t=Xn||Kn(),n=Math.max(0,u.startTime+u.duration-t),r=n/u.duration||0,o=1-r,a=0,l=u.tweens.length;for(;l>a;a++)u.tweens[a].run(o);return s.notifyWith(e,[u,o,n]),1>o&&l?n:(s.resolveWith(e,[u]),!1)},u=s.promise({elem:e,props:x.extend({},t),opts:x.extend(!0,{specialEasing:{}},n),originalProperties:t,originalOptions:n,startTime:Xn||Kn(),duration:n.duration,tweens:[],createTween:function(t,n){var r=x.Tween(e,u.opts,t,n,u.opts.specialEasing[t]||u.opts.easing);return u.tweens.push(r),r},stop:function(t){var n=0,r=t?u.tweens.length:0;if(i)return this;for(i=!0;r>n;n++)u.tweens[n].run(1);return t?s.resolveWith(e,[u,t]):s.rejectWith(e,[u,t]),this}}),c=u.props;for(tr(c,u.opts.specialEasing);a>o;o++)if(r=Gn[o].call(u,e,c,u.opts))return r;return x.map(c,Zn,u),x.isFunction(u.opts.start)&&u.opts.start.call(e,u),x.fx.timer(x.extend(l,{elem:e,anim:u,queue:u.opts.queue})),u.progress(u.opts.progress).done(u.opts.done,u.opts.complete).fail(u.opts.fail).always(u.opts.always)}function tr(e,t){var n,r,i,o,a;for(n in e)if(r=x.camelCase(n),i=t[r],o=e[n],x.isArray(o)&&(i=o[1],o=e[n]=o[0]),n!==r&&(e[r]=o,delete e[n]),a=x.cssHooks[r],a&&"expand"in a){o=a.expand(o),delete e[r];for(n in o)n in e||(e[n]=o[n],t[n]=i)}else t[r]=i}x.Animation=x.extend(er,{tweener:function(e,t){x.isFunction(e)?(t=e,e=["*"]):e=e.split(" ");var n,r=0,i=e.length;for(;i>r;r++)n=e[r],Qn[n]=Qn[n]||[],Qn[n].unshift(t)},prefilter:function(e,t){t?Gn.unshift(e):Gn.push(e)}});function nr(e,t,n){var r,i,o,a,s,l,u=this,c={},p=e.style,f=e.nodeType&&nn(e),d=x._data(e,"fxshow");n.queue||(s=x._queueHooks(e,"fx"),null==s.unqueued&&(s.unqueued=0,l=s.empty.fire,s.empty.fire=function(){s.unqueued||l()}),s.unqueued++,u.always(function(){u.always(function(){s.unqueued--,x.queue(e,"fx").length||s.empty.fire()})})),1===e.nodeType&&("height"in t||"width"in t)&&(n.overflow=[p.overflow,p.overflowX,p.overflowY],"inline"===x.css(e,"display")&&"none"===x.css(e,"float")&&(x.support.inlineBlockNeedsLayout&&"inline"!==ln(e.nodeName)?p.zoom=1:p.display="inline-block")),n.overflow&&(p.overflow="hidden",x.support.shrinkWrapBlocks||u.always(function(){p.overflow=n.overflow[0],p.overflowX=n.overflow[1],p.overflowY=n.overflow[2]}));for(r in t)if(i=t[r],Vn.exec(i)){if(delete t[r],o=o||"toggle"===i,i===(f?"hide":"show"))continue;c[r]=d&&d[r]||x.style(e,r)}if(!x.isEmptyObject(c)){d?"hidden"in d&&(f=d.hidden):d=x._data(e,"fxshow",{}),o&&(d.hidden=!f),f?x(e).show():u.done(function(){x(e).hide()}),u.done(function(){var t;x._removeData(e,"fxshow");for(t in c)x.style(e,t,c[t])});for(r in c)a=Zn(f?d[r]:0,r,u),r in d||(d[r]=a.start,f&&(a.end=a.start,a.start="width"===r||"height"===r?1:0))}}function rr(e,t,n,r,i){return new rr.prototype.init(e,t,n,r,i)}x.Tween=rr,rr.prototype={constructor:rr,init:function(e,t,n,r,i,o){this.elem=e,this.prop=n,this.easing=i||"swing",this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=o||(x.cssNumber[n]?"":"px")},cur:function(){var e=rr.propHooks[this.prop];return e&&e.get?e.get(this):rr.propHooks._default.get(this)},run:function(e){var t,n=rr.propHooks[this.prop];return this.pos=t=this.options.duration?x.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):rr.propHooks._default.set(this),this}},rr.prototype.init.prototype=rr.prototype,rr.propHooks={_default:{get:function(e){var t;return null==e.elem[e.prop]||e.elem.style&&null!=e.elem.style[e.prop]?(t=x.css(e.elem,e.prop,""),t&&"auto"!==t?t:0):e.elem[e.prop]},set:function(e){x.fx.step[e.prop]?x.fx.step[e.prop](e):e.elem.style&&(null!=e.elem.style[x.cssProps[e.prop]]||x.cssHooks[e.prop])?x.style(e.elem,e.prop,e.now+e.unit):e.elem[e.prop]=e.now}}},rr.propHooks.scrollTop=rr.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},x.each(["toggle","show","hide"],function(e,t){var n=x.fn[t];x.fn[t]=function(e,r,i){return null==e||"boolean"==typeof e?n.apply(this,arguments):this.animate(ir(t,!0),e,r,i)}}),x.fn.extend({fadeTo:function(e,t,n,r){return this.filter(nn).css("opacity",0).show().end().animate({opacity:t},e,n,r)},animate:function(e,t,n,r){var i=x.isEmptyObject(e),o=x.speed(t,n,r),a=function(){var t=er(this,x.extend({},e),o);(i||x._data(this,"finish"))&&t.stop(!0)};return a.finish=a,i||o.queue===!1?this.each(a):this.queue(o.queue,a)},stop:function(e,n,r){var i=function(e){var t=e.stop;delete e.stop,t(r)};return"string"!=typeof e&&(r=n,n=e,e=t),n&&e!==!1&&this.queue(e||"fx",[]),this.each(function(){var t=!0,n=null!=e&&e+"queueHooks",o=x.timers,a=x._data(this);if(n)a[n]&&a[n].stop&&i(a[n]);else for(n in a)a[n]&&a[n].stop&&Jn.test(n)&&i(a[n]);for(n=o.length;n--;)o[n].elem!==this||null!=e&&o[n].queue!==e||(o[n].anim.stop(r),t=!1,o.splice(n,1));(t||!r)&&x.dequeue(this,e)})},finish:function(e){return e!==!1&&(e=e||"fx"),this.each(function(){var t,n=x._data(this),r=n[e+"queue"],i=n[e+"queueHooks"],o=x.timers,a=r?r.length:0;for(n.finish=!0,x.queue(this,e,[]),i&&i.stop&&i.stop.call(this,!0),t=o.length;t--;)o[t].elem===this&&o[t].queue===e&&(o[t].anim.stop(!0),o.splice(t,1));for(t=0;a>t;t++)r[t]&&r[t].finish&&r[t].finish.call(this);delete n.finish})}});function ir(e,t){var n,r={height:e},i=0;for(t=t?1:0;4>i;i+=2-t)n=Zt[i],r["margin"+n]=r["padding"+n]=e;return t&&(r.opacity=r.width=e),r}x.each({slideDown:ir("show"),slideUp:ir("hide"),slideToggle:ir("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(e,t){x.fn[e]=function(e,n,r){return this.animate(t,e,n,r)}}),x.speed=function(e,t,n){var r=e&&"object"==typeof e?x.extend({},e):{complete:n||!n&&t||x.isFunction(e)&&e,duration:e,easing:n&&t||t&&!x.isFunction(t)&&t};return r.duration=x.fx.off?0:"number"==typeof r.duration?r.duration:r.duration in x.fx.speeds?x.fx.speeds[r.duration]:x.fx.speeds._default,(null==r.queue||r.queue===!0)&&(r.queue="fx"),r.old=r.complete,r.complete=function(){x.isFunction(r.old)&&r.old.call(this),r.queue&&x.dequeue(this,r.queue)},r},x.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2}},x.timers=[],x.fx=rr.prototype.init,x.fx.tick=function(){var e,n=x.timers,r=0;for(Xn=x.now();n.length>r;r++)e=n[r],e()||n[r]!==e||n.splice(r--,1);n.length||x.fx.stop(),Xn=t},x.fx.timer=function(e){e()&&x.timers.push(e)&&x.fx.start()},x.fx.interval=13,x.fx.start=function(){Un||(Un=setInterval(x.fx.tick,x.fx.interval))},x.fx.stop=function(){clearInterval(Un),Un=null},x.fx.speeds={slow:600,fast:200,_default:400},x.fx.step={},x.expr&&x.expr.filters&&(x.expr.filters.animated=function(e){return x.grep(x.timers,function(t){return e===t.elem}).length}),x.fn.offset=function(e){if(arguments.length)return e===t?this:this.each(function(t){x.offset.setOffset(this,e,t)});var n,r,o={top:0,left:0},a=this[0],s=a&&a.ownerDocument;if(s)return n=s.documentElement,x.contains(n,a)?(typeof a.getBoundingClientRect!==i&&(o=a.getBoundingClientRect()),r=or(s),{top:o.top+(r.pageYOffset||n.scrollTop)-(n.clientTop||0),left:o.left+(r.pageXOffset||n.scrollLeft)-(n.clientLeft||0)}):o},x.offset={setOffset:function(e,t,n){var r=x.css(e,"position");"static"===r&&(e.style.position="relative");var i=x(e),o=i.offset(),a=x.css(e,"top"),s=x.css(e,"left"),l=("absolute"===r||"fixed"===r)&&x.inArray("auto",[a,s])>-1,u={},c={},p,f;l?(c=i.position(),p=c.top,f=c.left):(p=parseFloat(a)||0,f=parseFloat(s)||0),x.isFunction(t)&&(t=t.call(e,n,o)),null!=t.top&&(u.top=t.top-o.top+p),null!=t.left&&(u.left=t.left-o.left+f),"using"in t?t.using.call(e,u):i.css(u)}},x.fn.extend({position:function(){if(this[0]){var e,t,n={top:0,left:0},r=this[0];return"fixed"===x.css(r,"position")?t=r.getBoundingClientRect():(e=this.offsetParent(),t=this.offset(),x.nodeName(e[0],"html")||(n=e.offset()),n.top+=x.css(e[0],"borderTopWidth",!0),n.left+=x.css(e[0],"borderLeftWidth",!0)),{top:t.top-n.top-x.css(r,"marginTop",!0),left:t.left-n.left-x.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent||s;while(e&&!x.nodeName(e,"html")&&"static"===x.css(e,"position"))e=e.offsetParent;return e||s})}}),x.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(e,n){var r=/Y/.test(n);x.fn[e]=function(i){return x.access(this,function(e,i,o){var a=or(e);return o===t?a?n in a?a[n]:a.document.documentElement[i]:e[i]:(a?a.scrollTo(r?x(a).scrollLeft():o,r?o:x(a).scrollTop()):e[i]=o,t)},e,i,arguments.length,null)}});function or(e){return x.isWindow(e)?e:9===e.nodeType?e.defaultView||e.parentWindow:!1}x.each({Height:"height",Width:"width"},function(e,n){x.each({padding:"inner"+e,content:n,"":"outer"+e},function(r,i){x.fn[i]=function(i,o){var a=arguments.length&&(r||"boolean"!=typeof i),s=r||(i===!0||o===!0?"margin":"border");return x.access(this,function(n,r,i){var o;return x.isWindow(n)?n.document.documentElement["client"+e]:9===n.nodeType?(o=n.documentElement,Math.max(n.body["scroll"+e],o["scroll"+e],n.body["offset"+e],o["offset"+e],o["client"+e])):i===t?x.css(n,r,s):x.style(n,r,i,s)},n,a?i:t,a,null)}})}),x.fn.size=function(){return this.length},x.fn.andSelf=x.fn.addBack,"object"==typeof module&&module&&"object"==typeof module.exports?module.exports=x:(e.jQuery=e.$=x,"function"==typeof define&&define.amd&&define("jquery",[],function(){return x}))})(window);
`
| Godeps/_workspace/src/github.com/google/cadvisor/pages/static/jquery_min_js.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00017534954531583935,
0.00017083901911973953,
0.00016622268594801426,
0.00017094481154344976,
0.0000037267757306835847
] |
{
"id": 10,
"code_window": [
"}\n",
"\n",
"// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables'\n",
"// (experimental). If blank, look at the Node object on the Kubernetes API and respect the\n",
"// 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"// (newer, faster). If blank, look at the Node object on the Kubernetes API and respect the\n"
],
"file_path": "pkg/apis/componentconfig/types.go",
"type": "replace",
"edit_start_line_idx": 65
} | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolumeclaim
import (
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
apitesting "k8s.io/kubernetes/pkg/api/testing"
)
func TestSelectableFieldLabelConversions(t *testing.T) {
apitesting.TestSelectableFieldLabelConversionsOfKind(t,
testapi.Default.GroupVersion().String(),
"PersistentVolumeClaim",
PersistentVolumeClaimToSelectableFields(&api.PersistentVolumeClaim{}),
map[string]string{"name": "metadata.name"},
)
}
| pkg/registry/persistentvolumeclaim/strategy_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00041654074448160827,
0.00023222177696879953,
0.000166200494277291,
0.00017307291273027658,
0.00010647498129401356
] |
{
"id": 10,
"code_window": [
"}\n",
"\n",
"// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables'\n",
"// (experimental). If blank, look at the Node object on the Kubernetes API and respect the\n",
"// 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"// (newer, faster). If blank, look at the Node object on the Kubernetes API and respect the\n"
],
"file_path": "pkg/apis/componentconfig/types.go",
"type": "replace",
"edit_start_line_idx": 65
} | /*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
api "k8s.io/kubernetes/pkg/api"
core "k8s.io/kubernetes/pkg/client/testing/core"
labels "k8s.io/kubernetes/pkg/labels"
watch "k8s.io/kubernetes/pkg/watch"
)
// FakeServiceAccounts implements ServiceAccountInterface
type FakeServiceAccounts struct {
Fake *FakeLegacy
ns string
}
func (c *FakeServiceAccounts) Create(serviceAccount *api.ServiceAccount) (result *api.ServiceAccount, err error) {
obj, err := c.Fake.
Invokes(core.NewCreateAction("serviceaccounts", c.ns, serviceAccount), &api.ServiceAccount{})
if obj == nil {
return nil, err
}
return obj.(*api.ServiceAccount), err
}
func (c *FakeServiceAccounts) Update(serviceAccount *api.ServiceAccount) (result *api.ServiceAccount, err error) {
obj, err := c.Fake.
Invokes(core.NewUpdateAction("serviceaccounts", c.ns, serviceAccount), &api.ServiceAccount{})
if obj == nil {
return nil, err
}
return obj.(*api.ServiceAccount), err
}
func (c *FakeServiceAccounts) Delete(name string, options *api.DeleteOptions) error {
_, err := c.Fake.
Invokes(core.NewDeleteAction("serviceaccounts", c.ns, name), &api.ServiceAccount{})
return err
}
func (c *FakeServiceAccounts) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error {
action := core.NewDeleteCollectionAction("events", c.ns, listOptions)
_, err := c.Fake.Invokes(action, &api.ServiceAccountList{})
return err
}
func (c *FakeServiceAccounts) Get(name string) (result *api.ServiceAccount, err error) {
obj, err := c.Fake.
Invokes(core.NewGetAction("serviceaccounts", c.ns, name), &api.ServiceAccount{})
if obj == nil {
return nil, err
}
return obj.(*api.ServiceAccount), err
}
func (c *FakeServiceAccounts) List(opts api.ListOptions) (result *api.ServiceAccountList, err error) {
obj, err := c.Fake.
Invokes(core.NewListAction("serviceaccounts", c.ns, opts), &api.ServiceAccountList{})
if obj == nil {
return nil, err
}
label := opts.LabelSelector
if label == nil {
label = labels.Everything()
}
list := &api.ServiceAccountList{}
for _, item := range obj.(*api.ServiceAccountList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested serviceAccounts.
func (c *FakeServiceAccounts) Watch(opts api.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(core.NewWatchAction("serviceaccounts", c.ns, opts))
}
| pkg/client/typed/generated/legacy/unversioned/fake/fake_serviceaccount.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.000537628831807524,
0.0002022169210249558,
0.00016329552454408258,
0.00016815209528431296,
0.00010615947394398972
] |
{
"id": 11,
"code_window": [
"// 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the\n",
"// best-available proxy (currently userspace, but may change in future versions). If the\n",
"// iptables proxy is selected, regardless of how, but the system's kernel or iptables\n",
"// versions are insufficient, this always falls back to the userspace proxy.\n",
"type ProxyMode string\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// best-available proxy (currently iptables, but may change in future versions). If the\n"
],
"file_path": "pkg/apis/componentconfig/types.go",
"type": "replace",
"edit_start_line_idx": 67
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to configure and run a
// Kubernetes app process.
package app
import (
"errors"
"net"
"net/http"
_ "net/http/pprof"
"strconv"
"time"
"k8s.io/kubernetes/cmd/kube-proxy/app/options"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/record"
kubeclient "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/proxy"
proxyconfig "k8s.io/kubernetes/pkg/proxy/config"
"k8s.io/kubernetes/pkg/proxy/iptables"
"k8s.io/kubernetes/pkg/proxy/userspace"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
utildbus "k8s.io/kubernetes/pkg/util/dbus"
"k8s.io/kubernetes/pkg/util/exec"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilnet "k8s.io/kubernetes/pkg/util/net"
nodeutil "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/util/oom"
"github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
type ProxyServer struct {
Client *kubeclient.Client
Config *options.ProxyServerConfig
IptInterface utiliptables.Interface
Proxier proxy.ProxyProvider
Broadcaster record.EventBroadcaster
Recorder record.EventRecorder
Conntracker Conntracker // if nil, ignored
}
const (
proxyModeUserspace = "userspace"
proxyModeIptables = "iptables"
experimentalProxyModeAnnotation = options.ExperimentalProxyModeAnnotation
betaProxyModeAnnotation = "net.beta.kubernetes.io/proxy-mode"
)
func checkKnownProxyMode(proxyMode string) bool {
switch proxyMode {
case "", proxyModeUserspace, proxyModeIptables:
return true
}
return false
}
func NewProxyServer(
client *kubeclient.Client,
config *options.ProxyServerConfig,
iptInterface utiliptables.Interface,
proxier proxy.ProxyProvider,
broadcaster record.EventBroadcaster,
recorder record.EventRecorder,
conntracker Conntracker,
) (*ProxyServer, error) {
return &ProxyServer{
Client: client,
Config: config,
IptInterface: iptInterface,
Proxier: proxier,
Broadcaster: broadcaster,
Recorder: recorder,
Conntracker: conntracker,
}, nil
}
// NewProxyCommand creates a *cobra.Command object with default parameters
func NewProxyCommand() *cobra.Command {
s := options.NewProxyConfig()
s.AddFlags(pflag.CommandLine)
cmd := &cobra.Command{
Use: "kube-proxy",
Long: `The Kubernetes network proxy runs on each node. This
reflects services as defined in the Kubernetes API on each node and can do simple
TCP,UDP stream forwarding or round robin TCP,UDP forwarding across a set of backends.
Service cluster ips and ports are currently found through Docker-links-compatible
environment variables specifying ports opened by the service proxy. There is an optional
addon that provides cluster DNS for these cluster IPs. The user must create a service
with the apiserver API to configure the proxy.`,
Run: func(cmd *cobra.Command, args []string) {
},
}
return cmd
}
// NewProxyServerDefault creates a new ProxyServer object with default parameters.
func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, error) {
protocol := utiliptables.ProtocolIpv4
if net.ParseIP(config.BindAddress).To4() == nil {
protocol = utiliptables.ProtocolIpv6
}
// Create a iptables utils.
execer := exec.New()
dbus := utildbus.New()
iptInterface := utiliptables.New(execer, dbus, protocol)
// We omit creation of pretty much everything if we run in cleanup mode
if config.CleanupAndExit {
return &ProxyServer{
Config: config,
IptInterface: iptInterface,
}, nil
}
// TODO(vmarmol): Use container config for this.
var oomAdjuster *oom.OOMAdjuster
if config.OOMScoreAdj != nil {
oomAdjuster = oom.NewOOMAdjuster()
if err := oomAdjuster.ApplyOOMScoreAdj(0, *config.OOMScoreAdj); err != nil {
glog.V(2).Info(err)
}
}
if config.ResourceContainer != "" {
// Run in its own container.
if err := util.RunInResourceContainer(config.ResourceContainer); err != nil {
glog.Warningf("Failed to start in resource-only container %q: %v", config.ResourceContainer, err)
} else {
glog.V(2).Infof("Running in resource-only container %q", config.ResourceContainer)
}
}
// Create a Kube Client
// define api config source
if config.Kubeconfig == "" && config.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: config.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: config.Master}}).ClientConfig()
if err != nil {
return nil, err
}
// Override kubeconfig qps/burst settings from flags
kubeconfig.QPS = config.KubeAPIQPS
kubeconfig.Burst = config.KubeAPIBurst
client, err := kubeclient.New(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
// Create event recorder
hostname := nodeutil.GetHostname(config.HostnameOverride)
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: hostname})
var proxier proxy.ProxyProvider
var endpointsHandler proxyconfig.EndpointsConfigHandler
proxyMode := getProxyMode(string(config.Mode), client.Nodes(), hostname, iptInterface)
if proxyMode == proxyModeIptables {
glog.V(2).Info("Using iptables Proxier.")
proxierIptables, err := iptables.NewProxier(iptInterface, execer, config.IPTablesSyncPeriod.Duration, config.MasqueradeAll)
if err != nil {
glog.Fatalf("Unable to create proxier: %v", err)
}
proxier = proxierIptables
endpointsHandler = proxierIptables
// No turning back. Remove artifacts that might still exist from the userspace Proxier.
glog.V(2).Info("Tearing down userspace rules. Errors here are acceptable.")
userspace.CleanupLeftovers(iptInterface)
} else {
glog.V(2).Info("Using userspace Proxier.")
// This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for
// our config.EndpointsConfigHandler.
loadBalancer := userspace.NewLoadBalancerRR()
// set EndpointsConfigHandler to our loadBalancer
endpointsHandler = loadBalancer
proxierUserspace, err := userspace.NewProxier(
loadBalancer,
net.ParseIP(config.BindAddress),
iptInterface,
*utilnet.ParsePortRangeOrDie(config.PortRange),
config.IPTablesSyncPeriod.Duration,
config.UDPIdleTimeout.Duration,
)
if err != nil {
glog.Fatalf("Unable to create proxier: %v", err)
}
proxier = proxierUserspace
// Remove artifacts from the pure-iptables Proxier.
glog.V(2).Info("Tearing down pure-iptables proxy rules. Errors here are acceptable.")
iptables.CleanupLeftovers(iptInterface)
}
iptInterface.AddReloadFunc(proxier.Sync)
// Create configs (i.e. Watches for Services and Endpoints)
// Note: RegisterHandler() calls need to happen before creation of Sources because sources
// only notify on changes, and the initial update (on process start) may be lost if no handlers
// are registered yet.
serviceConfig := proxyconfig.NewServiceConfig()
serviceConfig.RegisterHandler(proxier)
endpointsConfig := proxyconfig.NewEndpointsConfig()
endpointsConfig.RegisterHandler(endpointsHandler)
proxyconfig.NewSourceAPI(
client,
config.ConfigSyncPeriod,
serviceConfig.Channel("api"),
endpointsConfig.Channel("api"),
)
config.NodeRef = &api.ObjectReference{
Kind: "Node",
Name: hostname,
UID: types.UID(hostname),
Namespace: "",
}
conntracker := realConntracker{}
return NewProxyServer(client, config, iptInterface, proxier, eventBroadcaster, recorder, conntracker)
}
// Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set).
func (s *ProxyServer) Run() error {
// remove iptables rules and exit
if s.Config.CleanupAndExit {
encounteredError := userspace.CleanupLeftovers(s.IptInterface)
encounteredError = iptables.CleanupLeftovers(s.IptInterface) || encounteredError
if encounteredError {
return errors.New("Encountered an error while tearing down rules.")
}
return nil
}
s.Broadcaster.StartRecordingToSink(s.Client.Events(""))
// Start up Healthz service if requested
if s.Config.HealthzPort > 0 {
go util.Until(func() {
err := http.ListenAndServe(s.Config.HealthzBindAddress+":"+strconv.Itoa(s.Config.HealthzPort), nil)
if err != nil {
glog.Errorf("Starting health server failed: %v", err)
}
}, 5*time.Second, util.NeverStop)
}
// Tune conntrack, if requested
if s.Conntracker != nil {
if s.Config.ConntrackMax > 0 {
if err := s.Conntracker.SetMax(s.Config.ConntrackMax); err != nil {
return err
}
}
if s.Config.ConntrackTCPEstablishedTimeout.Duration > 0 {
if err := s.Conntracker.SetTCPEstablishedTimeout(int(s.Config.ConntrackTCPEstablishedTimeout.Duration / time.Second)); err != nil {
return err
}
}
}
// Birth Cry after the birth is successful
s.birthCry()
// Just loop forever for now...
s.Proxier.SyncLoop()
return nil
}
type nodeGetter interface {
Get(hostname string) (*api.Node, error)
}
func getProxyMode(proxyMode string, client nodeGetter, hostname string, iptver iptables.IptablesVersioner) string {
if proxyMode == proxyModeUserspace {
return proxyModeUserspace
} else if proxyMode == proxyModeIptables {
return tryIptablesProxy(iptver)
} else if proxyMode != "" {
glog.V(1).Infof("Flag proxy-mode=%q unknown, assuming iptables proxy", proxyMode)
return tryIptablesProxy(iptver)
}
// proxyMode == "" - choose the best option.
if client == nil {
glog.Errorf("nodeGetter is nil: assuming iptables proxy")
return tryIptablesProxy(iptver)
}
node, err := client.Get(hostname)
if err != nil {
glog.Errorf("Can't get Node %q, assuming iptables proxy: %v", hostname, err)
return tryIptablesProxy(iptver)
}
if node == nil {
glog.Errorf("Got nil Node %q, assuming iptables proxy: %v", hostname)
return tryIptablesProxy(iptver)
}
proxyMode, found := node.Annotations[betaProxyModeAnnotation]
if found {
glog.V(1).Infof("Found beta annotation %q = %q", betaProxyModeAnnotation, proxyMode)
} else {
// We already published some information about this annotation with the "experimental" name, so we will respect it.
proxyMode, found = node.Annotations[experimentalProxyModeAnnotation]
if found {
glog.V(1).Infof("Found experimental annotation %q = %q", experimentalProxyModeAnnotation, proxyMode)
}
}
if proxyMode == proxyModeUserspace {
glog.V(1).Infof("Annotation demands userspace proxy")
return proxyModeUserspace
}
return tryIptablesProxy(iptver)
}
func tryIptablesProxy(iptver iptables.IptablesVersioner) string {
var err error
// guaranteed false on error, error only necessary for debugging
useIptablesProxy, err := iptables.CanUseIptablesProxier(iptver)
if err != nil {
glog.Errorf("Can't determine whether to use iptables proxy, using userspace proxier: %v", err)
return proxyModeUserspace
}
if useIptablesProxy {
return proxyModeIptables
}
// Fallback.
glog.V(1).Infof("Can't use iptables proxy, using userspace proxier: %v", err)
return proxyModeUserspace
}
func (s *ProxyServer) birthCry() {
s.Recorder.Eventf(s.Config.NodeRef, api.EventTypeNormal, "Starting", "Starting kube-proxy.")
}
| cmd/kube-proxy/app/server.go | 1 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.9982442855834961,
0.17838816344738007,
0.0001640694827074185,
0.00021634552103932947,
0.3674735724925995
] |
{
"id": 11,
"code_window": [
"// 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the\n",
"// best-available proxy (currently userspace, but may change in future versions). If the\n",
"// iptables proxy is selected, regardless of how, but the system's kernel or iptables\n",
"// versions are insufficient, this always falls back to the userspace proxy.\n",
"type ProxyMode string\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// best-available proxy (currently iptables, but may change in future versions). If the\n"
],
"file_path": "pkg/apis/componentconfig/types.go",
"type": "replace",
"edit_start_line_idx": 67
} | <!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
<!-- BEGIN STRIP_FOR_RELEASE -->
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
If you are using a released version of Kubernetes, you should
refer to the docs that go with that version.
<!-- TAG RELEASE_LINK, added by the munger automatically -->
<strong>
The latest release of this document can be found
[here](http://releases.k8s.io/release-1.1/examples/blog-logging/diagrams/README.md).
Documentation for other releases can be found at
[releases.k8s.io](http://releases.k8s.io).
</strong>
--
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Diagrams for Cloud Logging Blog Article
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[]()
<!-- END MUNGE: GENERATED_ANALYTICS -->
| examples/blog-logging/diagrams/README.md | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.0002204024203820154,
0.00018286536214873195,
0.00016853073611855507,
0.000174794316990301,
0.00001932509803737048
] |
{
"id": 11,
"code_window": [
"// 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the\n",
"// best-available proxy (currently userspace, but may change in future versions). If the\n",
"// iptables proxy is selected, regardless of how, but the system's kernel or iptables\n",
"// versions are insufficient, this always falls back to the userspace proxy.\n",
"type ProxyMode string\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// best-available proxy (currently iptables, but may change in future versions). If the\n"
],
"file_path": "pkg/apis/componentconfig/types.go",
"type": "replace",
"edit_start_line_idx": 67
} | package flavors
import (
"fmt"
"net/http"
"testing"
th "github.com/rackspace/gophercloud/testhelper"
fake "github.com/rackspace/gophercloud/testhelper/client"
)
// HandleListCDNFlavorsSuccessfully creates an HTTP handler at `/flavors` on the test handler mux
// that responds with a `List` response.
func HandleListCDNFlavorsSuccessfully(t *testing.T) {
th.Mux.HandleFunc("/flavors", func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "GET")
th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `
{
"flavors": [
{
"id": "europe",
"providers": [
{
"provider": "Fastly",
"links": [
{
"href": "http://www.fastly.com",
"rel": "provider_url"
}
]
}
],
"links": [
{
"href": "https://www.poppycdn.io/v1.0/flavors/europe",
"rel": "self"
}
]
}
]
}
`)
})
}
// HandleGetCDNFlavorSuccessfully creates an HTTP handler at `/flavors/{id}` on the test handler mux
// that responds with a `Get` response.
func HandleGetCDNFlavorSuccessfully(t *testing.T) {
th.Mux.HandleFunc("/flavors/asia", func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "GET")
th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `
{
"id" : "asia",
"providers" : [
{
"provider" : "ChinaCache",
"links": [
{
"href": "http://www.chinacache.com",
"rel": "provider_url"
}
]
}
],
"links": [
{
"href": "https://www.poppycdn.io/v1.0/flavors/asia",
"rel": "self"
}
]
}
`)
})
}
| Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/fixtures.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00017479248344898224,
0.0001689772034296766,
0.0001611433399375528,
0.00016851788677740842,
0.0000040669701775186695
] |
{
"id": 11,
"code_window": [
"// 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the\n",
"// best-available proxy (currently userspace, but may change in future versions). If the\n",
"// iptables proxy is selected, regardless of how, but the system's kernel or iptables\n",
"// versions are insufficient, this always falls back to the userspace proxy.\n",
"type ProxyMode string\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// best-available proxy (currently iptables, but may change in future versions). If the\n"
],
"file_path": "pkg/apis/componentconfig/types.go",
"type": "replace",
"edit_start_line_idx": 67
} | <!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
<!-- BEGIN STRIP_FOR_RELEASE -->
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
If you are using a released version of Kubernetes, you should
refer to the docs that go with that version.
<!-- TAG RELEASE_LINK, added by the munger automatically -->
<strong>
The latest release of this document can be found
[here](http://releases.k8s.io/release-1.1/docs/user-guide/namespaces.md).
Documentation for other releases can be found at
[releases.k8s.io](http://releases.k8s.io).
</strong>
--
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Namespaces
Kubernetes supports multiple virtual clusters backed by the same physical cluster.
These virtual clusters are called namespaces.
## When to Use Multiple Namespaces
Namespaces are intended for use in environments with many users spread across multiple
teams, or projects. For clusters with a few to tens of users, you should not
need to create or think about namespaces at all. Start using namespaces when you
need the features they provide.
Namespaces provide a scope for names. Names of resources need to be unique within a namespace, but not across namespaces.
Namespaces are a way to divide cluster resources between multiple uses (via [resource quota](../../docs/admin/resource-quota.md)).
In future versions of Kubernetes, objects in the same namespace will have the same
access control policies by default.
It is not necessary to use multiple namespaces just to separate slightly different
resources, such as different versions of the same software: use [labels](#labels.md) to distinguish
resources within the same namespace.
## Working with Namespaces
Creation and deletion of namespaces is described in the [Admin Guide documentation
for namespaces](../../docs/admin/namespaces.md)
### Viewing namespaces
You can list the current namespaces in a cluster using:
```console
$ kubectl get namespaces
NAME LABELS STATUS
default <none> Active
kube-system <none> Active
```
Kubernetes starts with two initial namespaces:
* `default` The default namespace for objects with no other namespace
* `kube-system` The namespace for objects created by the Kubernetes system
### Setting the namespace for a request
To temporarily set the namespace for a request, use the `--namespace` flag.
For example:
```console
$ kubectl --namespace=<insert-namespace-name-here> run nginx --image=nginx
$ kubectl --namespace=<insert-namespace-name-here> get pods
```
### Setting the namespace preference
You can permanently save the namespace for all subsequent kubectl commands in that
context.
First get your current context:
```console
$ export CONTEXT=$(kubectl config view | grep current-context | awk '{print $2}')
```
Then update the default namespace:
```console
$ kubectl config set-context $CONTEXT --namespace=<insert-namespace-name-here>
# Validate it
$ kubectl config view | grep namespace:
```
## Namespaces and DNS
When you create a [Service](services.md), it creates a corresponding [DNS entry](../admin/dns.md).
This entry is of the form `<service-name>.<namespace-name>.svc.cluster.local`, which means
that if a container just uses `<service-name>` it will resolve to the service which
is local to a namespace. This is useful for using the same configuration across
multiple namespaces such as Development, Staging and Production. If you want to reach
across namespaces, you need to use the fully qualified domain name (FQDN).
## Not All Objects are in a Namespace
Most kubernetes resources (e.g. pods, services, replication controllers, and others) are
in a some namespace. However namespace resources are not themselves in a namespace.
And, low-level resources, such as [nodes](../../docs/admin/node.md) and
persistentVolumes, are not in any namespace. Events are an exception: they may or may not
have a namespace, depending on the object the event is about.
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[]()
<!-- END MUNGE: GENERATED_ANALYTICS -->
| docs/user-guide/namespaces.md | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.0006654650787822902,
0.0002544575836509466,
0.0001619611430214718,
0.00018134787387680262,
0.00014682287292089313
] |
{
"id": 12,
"code_window": [
"\t\t30*time.Second,\n",
"\t\tserviceConfig.Channel(\"api\"),\n",
"\t\tendpointsConfig.Channel(\"api\"),\n",
"\t)\n",
"\n",
"\thollowProxy, err := proxyapp.NewProxyServer(client, config, iptInterface, &FakeProxier{}, broadcaster, recorder, nil)\n",
"\tif err != nil {\n",
"\t\tglog.Fatalf(\"Error while creating ProxyServer: %v\\n\", err)\n",
"\t}\n",
"\treturn &HollowProxy{\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\thollowProxy, err := proxyapp.NewProxyServer(client, config, iptInterface, &FakeProxier{}, broadcaster, recorder, nil, \"fake\")\n"
],
"file_path": "pkg/kubemark/hollow_proxy.go",
"type": "replace",
"edit_start_line_idx": 77
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to configure and run a
// Kubernetes app process.
package app
import (
"errors"
"net"
"net/http"
_ "net/http/pprof"
"strconv"
"time"
"k8s.io/kubernetes/cmd/kube-proxy/app/options"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/record"
kubeclient "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/proxy"
proxyconfig "k8s.io/kubernetes/pkg/proxy/config"
"k8s.io/kubernetes/pkg/proxy/iptables"
"k8s.io/kubernetes/pkg/proxy/userspace"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
utildbus "k8s.io/kubernetes/pkg/util/dbus"
"k8s.io/kubernetes/pkg/util/exec"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilnet "k8s.io/kubernetes/pkg/util/net"
nodeutil "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/util/oom"
"github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
type ProxyServer struct {
Client *kubeclient.Client
Config *options.ProxyServerConfig
IptInterface utiliptables.Interface
Proxier proxy.ProxyProvider
Broadcaster record.EventBroadcaster
Recorder record.EventRecorder
Conntracker Conntracker // if nil, ignored
}
const (
proxyModeUserspace = "userspace"
proxyModeIptables = "iptables"
experimentalProxyModeAnnotation = options.ExperimentalProxyModeAnnotation
betaProxyModeAnnotation = "net.beta.kubernetes.io/proxy-mode"
)
func checkKnownProxyMode(proxyMode string) bool {
switch proxyMode {
case "", proxyModeUserspace, proxyModeIptables:
return true
}
return false
}
func NewProxyServer(
client *kubeclient.Client,
config *options.ProxyServerConfig,
iptInterface utiliptables.Interface,
proxier proxy.ProxyProvider,
broadcaster record.EventBroadcaster,
recorder record.EventRecorder,
conntracker Conntracker,
) (*ProxyServer, error) {
return &ProxyServer{
Client: client,
Config: config,
IptInterface: iptInterface,
Proxier: proxier,
Broadcaster: broadcaster,
Recorder: recorder,
Conntracker: conntracker,
}, nil
}
// NewProxyCommand creates a *cobra.Command object with default parameters
func NewProxyCommand() *cobra.Command {
s := options.NewProxyConfig()
s.AddFlags(pflag.CommandLine)
cmd := &cobra.Command{
Use: "kube-proxy",
Long: `The Kubernetes network proxy runs on each node. This
reflects services as defined in the Kubernetes API on each node and can do simple
TCP,UDP stream forwarding or round robin TCP,UDP forwarding across a set of backends.
Service cluster ips and ports are currently found through Docker-links-compatible
environment variables specifying ports opened by the service proxy. There is an optional
addon that provides cluster DNS for these cluster IPs. The user must create a service
with the apiserver API to configure the proxy.`,
Run: func(cmd *cobra.Command, args []string) {
},
}
return cmd
}
// NewProxyServerDefault creates a new ProxyServer object with default parameters.
func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, error) {
protocol := utiliptables.ProtocolIpv4
if net.ParseIP(config.BindAddress).To4() == nil {
protocol = utiliptables.ProtocolIpv6
}
// Create a iptables utils.
execer := exec.New()
dbus := utildbus.New()
iptInterface := utiliptables.New(execer, dbus, protocol)
// We omit creation of pretty much everything if we run in cleanup mode
if config.CleanupAndExit {
return &ProxyServer{
Config: config,
IptInterface: iptInterface,
}, nil
}
// TODO(vmarmol): Use container config for this.
var oomAdjuster *oom.OOMAdjuster
if config.OOMScoreAdj != nil {
oomAdjuster = oom.NewOOMAdjuster()
if err := oomAdjuster.ApplyOOMScoreAdj(0, *config.OOMScoreAdj); err != nil {
glog.V(2).Info(err)
}
}
if config.ResourceContainer != "" {
// Run in its own container.
if err := util.RunInResourceContainer(config.ResourceContainer); err != nil {
glog.Warningf("Failed to start in resource-only container %q: %v", config.ResourceContainer, err)
} else {
glog.V(2).Infof("Running in resource-only container %q", config.ResourceContainer)
}
}
// Create a Kube Client
// define api config source
if config.Kubeconfig == "" && config.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: config.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: config.Master}}).ClientConfig()
if err != nil {
return nil, err
}
// Override kubeconfig qps/burst settings from flags
kubeconfig.QPS = config.KubeAPIQPS
kubeconfig.Burst = config.KubeAPIBurst
client, err := kubeclient.New(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
// Create event recorder
hostname := nodeutil.GetHostname(config.HostnameOverride)
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: hostname})
var proxier proxy.ProxyProvider
var endpointsHandler proxyconfig.EndpointsConfigHandler
proxyMode := getProxyMode(string(config.Mode), client.Nodes(), hostname, iptInterface)
if proxyMode == proxyModeIptables {
glog.V(2).Info("Using iptables Proxier.")
proxierIptables, err := iptables.NewProxier(iptInterface, execer, config.IPTablesSyncPeriod.Duration, config.MasqueradeAll)
if err != nil {
glog.Fatalf("Unable to create proxier: %v", err)
}
proxier = proxierIptables
endpointsHandler = proxierIptables
// No turning back. Remove artifacts that might still exist from the userspace Proxier.
glog.V(2).Info("Tearing down userspace rules. Errors here are acceptable.")
userspace.CleanupLeftovers(iptInterface)
} else {
glog.V(2).Info("Using userspace Proxier.")
// This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for
// our config.EndpointsConfigHandler.
loadBalancer := userspace.NewLoadBalancerRR()
// set EndpointsConfigHandler to our loadBalancer
endpointsHandler = loadBalancer
proxierUserspace, err := userspace.NewProxier(
loadBalancer,
net.ParseIP(config.BindAddress),
iptInterface,
*utilnet.ParsePortRangeOrDie(config.PortRange),
config.IPTablesSyncPeriod.Duration,
config.UDPIdleTimeout.Duration,
)
if err != nil {
glog.Fatalf("Unable to create proxier: %v", err)
}
proxier = proxierUserspace
// Remove artifacts from the pure-iptables Proxier.
glog.V(2).Info("Tearing down pure-iptables proxy rules. Errors here are acceptable.")
iptables.CleanupLeftovers(iptInterface)
}
iptInterface.AddReloadFunc(proxier.Sync)
// Create configs (i.e. Watches for Services and Endpoints)
// Note: RegisterHandler() calls need to happen before creation of Sources because sources
// only notify on changes, and the initial update (on process start) may be lost if no handlers
// are registered yet.
serviceConfig := proxyconfig.NewServiceConfig()
serviceConfig.RegisterHandler(proxier)
endpointsConfig := proxyconfig.NewEndpointsConfig()
endpointsConfig.RegisterHandler(endpointsHandler)
proxyconfig.NewSourceAPI(
client,
config.ConfigSyncPeriod,
serviceConfig.Channel("api"),
endpointsConfig.Channel("api"),
)
config.NodeRef = &api.ObjectReference{
Kind: "Node",
Name: hostname,
UID: types.UID(hostname),
Namespace: "",
}
conntracker := realConntracker{}
return NewProxyServer(client, config, iptInterface, proxier, eventBroadcaster, recorder, conntracker)
}
// Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set).
func (s *ProxyServer) Run() error {
// remove iptables rules and exit
if s.Config.CleanupAndExit {
encounteredError := userspace.CleanupLeftovers(s.IptInterface)
encounteredError = iptables.CleanupLeftovers(s.IptInterface) || encounteredError
if encounteredError {
return errors.New("Encountered an error while tearing down rules.")
}
return nil
}
s.Broadcaster.StartRecordingToSink(s.Client.Events(""))
// Start up Healthz service if requested
if s.Config.HealthzPort > 0 {
go util.Until(func() {
err := http.ListenAndServe(s.Config.HealthzBindAddress+":"+strconv.Itoa(s.Config.HealthzPort), nil)
if err != nil {
glog.Errorf("Starting health server failed: %v", err)
}
}, 5*time.Second, util.NeverStop)
}
// Tune conntrack, if requested
if s.Conntracker != nil {
if s.Config.ConntrackMax > 0 {
if err := s.Conntracker.SetMax(s.Config.ConntrackMax); err != nil {
return err
}
}
if s.Config.ConntrackTCPEstablishedTimeout.Duration > 0 {
if err := s.Conntracker.SetTCPEstablishedTimeout(int(s.Config.ConntrackTCPEstablishedTimeout.Duration / time.Second)); err != nil {
return err
}
}
}
// Birth Cry after the birth is successful
s.birthCry()
// Just loop forever for now...
s.Proxier.SyncLoop()
return nil
}
type nodeGetter interface {
Get(hostname string) (*api.Node, error)
}
func getProxyMode(proxyMode string, client nodeGetter, hostname string, iptver iptables.IptablesVersioner) string {
if proxyMode == proxyModeUserspace {
return proxyModeUserspace
} else if proxyMode == proxyModeIptables {
return tryIptablesProxy(iptver)
} else if proxyMode != "" {
glog.V(1).Infof("Flag proxy-mode=%q unknown, assuming iptables proxy", proxyMode)
return tryIptablesProxy(iptver)
}
// proxyMode == "" - choose the best option.
if client == nil {
glog.Errorf("nodeGetter is nil: assuming iptables proxy")
return tryIptablesProxy(iptver)
}
node, err := client.Get(hostname)
if err != nil {
glog.Errorf("Can't get Node %q, assuming iptables proxy: %v", hostname, err)
return tryIptablesProxy(iptver)
}
if node == nil {
glog.Errorf("Got nil Node %q, assuming iptables proxy: %v", hostname)
return tryIptablesProxy(iptver)
}
proxyMode, found := node.Annotations[betaProxyModeAnnotation]
if found {
glog.V(1).Infof("Found beta annotation %q = %q", betaProxyModeAnnotation, proxyMode)
} else {
// We already published some information about this annotation with the "experimental" name, so we will respect it.
proxyMode, found = node.Annotations[experimentalProxyModeAnnotation]
if found {
glog.V(1).Infof("Found experimental annotation %q = %q", experimentalProxyModeAnnotation, proxyMode)
}
}
if proxyMode == proxyModeUserspace {
glog.V(1).Infof("Annotation demands userspace proxy")
return proxyModeUserspace
}
return tryIptablesProxy(iptver)
}
func tryIptablesProxy(iptver iptables.IptablesVersioner) string {
var err error
// guaranteed false on error, error only necessary for debugging
useIptablesProxy, err := iptables.CanUseIptablesProxier(iptver)
if err != nil {
glog.Errorf("Can't determine whether to use iptables proxy, using userspace proxier: %v", err)
return proxyModeUserspace
}
if useIptablesProxy {
return proxyModeIptables
}
// Fallback.
glog.V(1).Infof("Can't use iptables proxy, using userspace proxier: %v", err)
return proxyModeUserspace
}
func (s *ProxyServer) birthCry() {
s.Recorder.Eventf(s.Config.NodeRef, api.EventTypeNormal, "Starting", "Starting kube-proxy.")
}
| cmd/kube-proxy/app/server.go | 1 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.0329677015542984,
0.0022242856211960316,
0.00016393781697843224,
0.00028580063371919096,
0.005443920381367207
] |
{
"id": 12,
"code_window": [
"\t\t30*time.Second,\n",
"\t\tserviceConfig.Channel(\"api\"),\n",
"\t\tendpointsConfig.Channel(\"api\"),\n",
"\t)\n",
"\n",
"\thollowProxy, err := proxyapp.NewProxyServer(client, config, iptInterface, &FakeProxier{}, broadcaster, recorder, nil)\n",
"\tif err != nil {\n",
"\t\tglog.Fatalf(\"Error while creating ProxyServer: %v\\n\", err)\n",
"\t}\n",
"\treturn &HollowProxy{\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\thollowProxy, err := proxyapp.NewProxyServer(client, config, iptInterface, &FakeProxier{}, broadcaster, recorder, nil, \"fake\")\n"
],
"file_path": "pkg/kubemark/hollow_proxy.go",
"type": "replace",
"edit_start_line_idx": 77
} | // Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httptypes
import (
"encoding/json"
"github.com/coreos/etcd/pkg/types"
)
type Member struct {
ID string `json:"id"`
Name string `json:"name"`
PeerURLs []string `json:"peerURLs"`
ClientURLs []string `json:"clientURLs"`
}
type MemberCreateRequest struct {
PeerURLs types.URLs
}
type MemberUpdateRequest struct {
MemberCreateRequest
}
func (m *MemberCreateRequest) UnmarshalJSON(data []byte) error {
s := struct {
PeerURLs []string `json:"peerURLs"`
}{}
err := json.Unmarshal(data, &s)
if err != nil {
return err
}
urls, err := types.NewURLs(s.PeerURLs)
if err != nil {
return err
}
m.PeerURLs = urls
return nil
}
type MemberCollection []Member
func (c *MemberCollection) MarshalJSON() ([]byte, error) {
d := struct {
Members []Member `json:"members"`
}{
Members: []Member(*c),
}
return json.Marshal(d)
}
| Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdhttp/httptypes/member.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.0001800328172976151,
0.00017261019092984498,
0.00016436497389804572,
0.00017262683832086623,
0.00000489850708618178
] |
{
"id": 12,
"code_window": [
"\t\t30*time.Second,\n",
"\t\tserviceConfig.Channel(\"api\"),\n",
"\t\tendpointsConfig.Channel(\"api\"),\n",
"\t)\n",
"\n",
"\thollowProxy, err := proxyapp.NewProxyServer(client, config, iptInterface, &FakeProxier{}, broadcaster, recorder, nil)\n",
"\tif err != nil {\n",
"\t\tglog.Fatalf(\"Error while creating ProxyServer: %v\\n\", err)\n",
"\t}\n",
"\treturn &HollowProxy{\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\thollowProxy, err := proxyapp.NewProxyServer(client, config, iptInterface, &FakeProxier{}, broadcaster, recorder, nil, \"fake\")\n"
],
"file_path": "pkg/kubemark/hollow_proxy.go",
"type": "replace",
"edit_start_line_idx": 77
} | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package leaky holds bits of kubelet that should be internal but have leaked
// out through bad abstractions. TODO: delete all of this.
package leaky
const (
// This is used in a few places outside of Kubelet, such as indexing
// into the container info.
PodInfraContainerName = "POD"
)
| pkg/kubelet/leaky/leaky.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.0001800099853426218,
0.00017658055003266782,
0.0001697894767858088,
0.00017994218796957284,
0.000004802093826583587
] |
{
"id": 12,
"code_window": [
"\t\t30*time.Second,\n",
"\t\tserviceConfig.Channel(\"api\"),\n",
"\t\tendpointsConfig.Channel(\"api\"),\n",
"\t)\n",
"\n",
"\thollowProxy, err := proxyapp.NewProxyServer(client, config, iptInterface, &FakeProxier{}, broadcaster, recorder, nil)\n",
"\tif err != nil {\n",
"\t\tglog.Fatalf(\"Error while creating ProxyServer: %v\\n\", err)\n",
"\t}\n",
"\treturn &HollowProxy{\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\thollowProxy, err := proxyapp.NewProxyServer(client, config, iptInterface, &FakeProxier{}, broadcaster, recorder, nil, \"fake\")\n"
],
"file_path": "pkg/kubemark/hollow_proxy.go",
"type": "replace",
"edit_start_line_idx": 77
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package envvars_test
import (
"reflect"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/kubelet/envvars"
)
func TestFromServices(t *testing.T) {
sl := api.ServiceList{
Items: []api.Service{
{
ObjectMeta: api.ObjectMeta{Name: "foo-bar"},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
ClusterIP: "1.2.3.4",
Ports: []api.ServicePort{
{Port: 8080, Protocol: "TCP"},
},
},
},
{
ObjectMeta: api.ObjectMeta{Name: "abc-123"},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
ClusterIP: "5.6.7.8",
Ports: []api.ServicePort{
{Name: "u-d-p", Port: 8081, Protocol: "UDP"},
{Name: "t-c-p", Port: 8081, Protocol: "TCP"},
},
},
},
{
ObjectMeta: api.ObjectMeta{Name: "q-u-u-x"},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
ClusterIP: "9.8.7.6",
Ports: []api.ServicePort{
{Port: 8082, Protocol: "TCP"},
{Name: "8083", Port: 8083, Protocol: "TCP"},
},
},
},
{
ObjectMeta: api.ObjectMeta{Name: "svrc-clusterip-none"},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
ClusterIP: "None",
Ports: []api.ServicePort{
{Port: 8082, Protocol: "TCP"},
},
},
},
{
ObjectMeta: api.ObjectMeta{Name: "svrc-clusterip-empty"},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
ClusterIP: "",
Ports: []api.ServicePort{
{Port: 8082, Protocol: "TCP"},
},
},
},
},
}
vars := envvars.FromServices(&sl)
expected := []api.EnvVar{
{Name: "FOO_BAR_SERVICE_HOST", Value: "1.2.3.4"},
{Name: "FOO_BAR_SERVICE_PORT", Value: "8080"},
{Name: "FOO_BAR_PORT", Value: "tcp://1.2.3.4:8080"},
{Name: "FOO_BAR_PORT_8080_TCP", Value: "tcp://1.2.3.4:8080"},
{Name: "FOO_BAR_PORT_8080_TCP_PROTO", Value: "tcp"},
{Name: "FOO_BAR_PORT_8080_TCP_PORT", Value: "8080"},
{Name: "FOO_BAR_PORT_8080_TCP_ADDR", Value: "1.2.3.4"},
{Name: "ABC_123_SERVICE_HOST", Value: "5.6.7.8"},
{Name: "ABC_123_SERVICE_PORT", Value: "8081"},
{Name: "ABC_123_SERVICE_PORT_U_D_P", Value: "8081"},
{Name: "ABC_123_SERVICE_PORT_T_C_P", Value: "8081"},
{Name: "ABC_123_PORT", Value: "udp://5.6.7.8:8081"},
{Name: "ABC_123_PORT_8081_UDP", Value: "udp://5.6.7.8:8081"},
{Name: "ABC_123_PORT_8081_UDP_PROTO", Value: "udp"},
{Name: "ABC_123_PORT_8081_UDP_PORT", Value: "8081"},
{Name: "ABC_123_PORT_8081_UDP_ADDR", Value: "5.6.7.8"},
{Name: "ABC_123_PORT_8081_TCP", Value: "tcp://5.6.7.8:8081"},
{Name: "ABC_123_PORT_8081_TCP_PROTO", Value: "tcp"},
{Name: "ABC_123_PORT_8081_TCP_PORT", Value: "8081"},
{Name: "ABC_123_PORT_8081_TCP_ADDR", Value: "5.6.7.8"},
{Name: "Q_U_U_X_SERVICE_HOST", Value: "9.8.7.6"},
{Name: "Q_U_U_X_SERVICE_PORT", Value: "8082"},
{Name: "Q_U_U_X_SERVICE_PORT_8083", Value: "8083"},
{Name: "Q_U_U_X_PORT", Value: "tcp://9.8.7.6:8082"},
{Name: "Q_U_U_X_PORT_8082_TCP", Value: "tcp://9.8.7.6:8082"},
{Name: "Q_U_U_X_PORT_8082_TCP_PROTO", Value: "tcp"},
{Name: "Q_U_U_X_PORT_8082_TCP_PORT", Value: "8082"},
{Name: "Q_U_U_X_PORT_8082_TCP_ADDR", Value: "9.8.7.6"},
{Name: "Q_U_U_X_PORT_8083_TCP", Value: "tcp://9.8.7.6:8083"},
{Name: "Q_U_U_X_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "Q_U_U_X_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "Q_U_U_X_PORT_8083_TCP_ADDR", Value: "9.8.7.6"},
}
if len(vars) != len(expected) {
t.Errorf("Expected %d env vars, got: %+v", len(expected), vars)
return
}
for i := range expected {
if !reflect.DeepEqual(vars[i], expected[i]) {
t.Errorf("expected %#v, got %#v", vars[i], expected[i])
}
}
}
| pkg/kubelet/envvars/envvars_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00018006180471275002,
0.00017325286171399057,
0.0001672401704126969,
0.00017329378169961274,
0.000003522794486343628
] |
{
"id": 13,
"code_window": [
"\tBy(\"dialing(udp) node1 --> node2:nodeUdpPort\")\n",
"\tconfig.dialFromNode(\"udp\", node2_IP, nodeUdpPort, tries, epCount)\n",
"\tBy(\"dialing(http) node1 --> node2:nodeHttpPort\")\n",
"\tconfig.dialFromNode(\"http\", node2_IP, nodeHttpPort, tries, epCount)\n",
"}\n",
"\n",
"func (config *KubeProxyTestConfig) hitEndpoints() {\n",
"\tfor _, endpointPod := range config.endpointPods {\n",
"\t\tExpect(len(endpointPod.Status.PodIP)).To(BeNumerically(\">\", 0), \"podIP is empty:%s\", endpointPod.Status.PodIP)\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tBy(\"checking kube-proxy URLs\")\n",
"\tconfig.getSelfURL(\"/healthz\", \"ok\")\n",
"\tconfig.getSelfURL(\"/proxyMode\", \"iptables\") // the default\n"
],
"file_path": "test/e2e/kubeproxy.go",
"type": "add",
"edit_start_line_idx": 199
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to configure and run a
// Kubernetes app process.
package app
import (
"errors"
"net"
"net/http"
_ "net/http/pprof"
"strconv"
"time"
"k8s.io/kubernetes/cmd/kube-proxy/app/options"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/record"
kubeclient "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/proxy"
proxyconfig "k8s.io/kubernetes/pkg/proxy/config"
"k8s.io/kubernetes/pkg/proxy/iptables"
"k8s.io/kubernetes/pkg/proxy/userspace"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
utildbus "k8s.io/kubernetes/pkg/util/dbus"
"k8s.io/kubernetes/pkg/util/exec"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilnet "k8s.io/kubernetes/pkg/util/net"
nodeutil "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/util/oom"
"github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
type ProxyServer struct {
Client *kubeclient.Client
Config *options.ProxyServerConfig
IptInterface utiliptables.Interface
Proxier proxy.ProxyProvider
Broadcaster record.EventBroadcaster
Recorder record.EventRecorder
Conntracker Conntracker // if nil, ignored
}
const (
proxyModeUserspace = "userspace"
proxyModeIptables = "iptables"
experimentalProxyModeAnnotation = options.ExperimentalProxyModeAnnotation
betaProxyModeAnnotation = "net.beta.kubernetes.io/proxy-mode"
)
func checkKnownProxyMode(proxyMode string) bool {
switch proxyMode {
case "", proxyModeUserspace, proxyModeIptables:
return true
}
return false
}
func NewProxyServer(
client *kubeclient.Client,
config *options.ProxyServerConfig,
iptInterface utiliptables.Interface,
proxier proxy.ProxyProvider,
broadcaster record.EventBroadcaster,
recorder record.EventRecorder,
conntracker Conntracker,
) (*ProxyServer, error) {
return &ProxyServer{
Client: client,
Config: config,
IptInterface: iptInterface,
Proxier: proxier,
Broadcaster: broadcaster,
Recorder: recorder,
Conntracker: conntracker,
}, nil
}
// NewProxyCommand creates a *cobra.Command object with default parameters
func NewProxyCommand() *cobra.Command {
s := options.NewProxyConfig()
s.AddFlags(pflag.CommandLine)
cmd := &cobra.Command{
Use: "kube-proxy",
Long: `The Kubernetes network proxy runs on each node. This
reflects services as defined in the Kubernetes API on each node and can do simple
TCP,UDP stream forwarding or round robin TCP,UDP forwarding across a set of backends.
Service cluster ips and ports are currently found through Docker-links-compatible
environment variables specifying ports opened by the service proxy. There is an optional
addon that provides cluster DNS for these cluster IPs. The user must create a service
with the apiserver API to configure the proxy.`,
Run: func(cmd *cobra.Command, args []string) {
},
}
return cmd
}
// NewProxyServerDefault creates a new ProxyServer object with default parameters.
func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, error) {
protocol := utiliptables.ProtocolIpv4
if net.ParseIP(config.BindAddress).To4() == nil {
protocol = utiliptables.ProtocolIpv6
}
// Create a iptables utils.
execer := exec.New()
dbus := utildbus.New()
iptInterface := utiliptables.New(execer, dbus, protocol)
// We omit creation of pretty much everything if we run in cleanup mode
if config.CleanupAndExit {
return &ProxyServer{
Config: config,
IptInterface: iptInterface,
}, nil
}
// TODO(vmarmol): Use container config for this.
var oomAdjuster *oom.OOMAdjuster
if config.OOMScoreAdj != nil {
oomAdjuster = oom.NewOOMAdjuster()
if err := oomAdjuster.ApplyOOMScoreAdj(0, *config.OOMScoreAdj); err != nil {
glog.V(2).Info(err)
}
}
if config.ResourceContainer != "" {
// Run in its own container.
if err := util.RunInResourceContainer(config.ResourceContainer); err != nil {
glog.Warningf("Failed to start in resource-only container %q: %v", config.ResourceContainer, err)
} else {
glog.V(2).Infof("Running in resource-only container %q", config.ResourceContainer)
}
}
// Create a Kube Client
// define api config source
if config.Kubeconfig == "" && config.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: config.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: config.Master}}).ClientConfig()
if err != nil {
return nil, err
}
// Override kubeconfig qps/burst settings from flags
kubeconfig.QPS = config.KubeAPIQPS
kubeconfig.Burst = config.KubeAPIBurst
client, err := kubeclient.New(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
// Create event recorder
hostname := nodeutil.GetHostname(config.HostnameOverride)
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: hostname})
var proxier proxy.ProxyProvider
var endpointsHandler proxyconfig.EndpointsConfigHandler
proxyMode := getProxyMode(string(config.Mode), client.Nodes(), hostname, iptInterface)
if proxyMode == proxyModeIptables {
glog.V(2).Info("Using iptables Proxier.")
proxierIptables, err := iptables.NewProxier(iptInterface, execer, config.IPTablesSyncPeriod.Duration, config.MasqueradeAll)
if err != nil {
glog.Fatalf("Unable to create proxier: %v", err)
}
proxier = proxierIptables
endpointsHandler = proxierIptables
// No turning back. Remove artifacts that might still exist from the userspace Proxier.
glog.V(2).Info("Tearing down userspace rules. Errors here are acceptable.")
userspace.CleanupLeftovers(iptInterface)
} else {
glog.V(2).Info("Using userspace Proxier.")
// This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for
// our config.EndpointsConfigHandler.
loadBalancer := userspace.NewLoadBalancerRR()
// set EndpointsConfigHandler to our loadBalancer
endpointsHandler = loadBalancer
proxierUserspace, err := userspace.NewProxier(
loadBalancer,
net.ParseIP(config.BindAddress),
iptInterface,
*utilnet.ParsePortRangeOrDie(config.PortRange),
config.IPTablesSyncPeriod.Duration,
config.UDPIdleTimeout.Duration,
)
if err != nil {
glog.Fatalf("Unable to create proxier: %v", err)
}
proxier = proxierUserspace
// Remove artifacts from the pure-iptables Proxier.
glog.V(2).Info("Tearing down pure-iptables proxy rules. Errors here are acceptable.")
iptables.CleanupLeftovers(iptInterface)
}
iptInterface.AddReloadFunc(proxier.Sync)
// Create configs (i.e. Watches for Services and Endpoints)
// Note: RegisterHandler() calls need to happen before creation of Sources because sources
// only notify on changes, and the initial update (on process start) may be lost if no handlers
// are registered yet.
serviceConfig := proxyconfig.NewServiceConfig()
serviceConfig.RegisterHandler(proxier)
endpointsConfig := proxyconfig.NewEndpointsConfig()
endpointsConfig.RegisterHandler(endpointsHandler)
proxyconfig.NewSourceAPI(
client,
config.ConfigSyncPeriod,
serviceConfig.Channel("api"),
endpointsConfig.Channel("api"),
)
config.NodeRef = &api.ObjectReference{
Kind: "Node",
Name: hostname,
UID: types.UID(hostname),
Namespace: "",
}
conntracker := realConntracker{}
return NewProxyServer(client, config, iptInterface, proxier, eventBroadcaster, recorder, conntracker)
}
// Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set).
func (s *ProxyServer) Run() error {
// remove iptables rules and exit
if s.Config.CleanupAndExit {
encounteredError := userspace.CleanupLeftovers(s.IptInterface)
encounteredError = iptables.CleanupLeftovers(s.IptInterface) || encounteredError
if encounteredError {
return errors.New("Encountered an error while tearing down rules.")
}
return nil
}
s.Broadcaster.StartRecordingToSink(s.Client.Events(""))
// Start up Healthz service if requested
if s.Config.HealthzPort > 0 {
go util.Until(func() {
err := http.ListenAndServe(s.Config.HealthzBindAddress+":"+strconv.Itoa(s.Config.HealthzPort), nil)
if err != nil {
glog.Errorf("Starting health server failed: %v", err)
}
}, 5*time.Second, util.NeverStop)
}
// Tune conntrack, if requested
if s.Conntracker != nil {
if s.Config.ConntrackMax > 0 {
if err := s.Conntracker.SetMax(s.Config.ConntrackMax); err != nil {
return err
}
}
if s.Config.ConntrackTCPEstablishedTimeout.Duration > 0 {
if err := s.Conntracker.SetTCPEstablishedTimeout(int(s.Config.ConntrackTCPEstablishedTimeout.Duration / time.Second)); err != nil {
return err
}
}
}
// Birth Cry after the birth is successful
s.birthCry()
// Just loop forever for now...
s.Proxier.SyncLoop()
return nil
}
type nodeGetter interface {
Get(hostname string) (*api.Node, error)
}
func getProxyMode(proxyMode string, client nodeGetter, hostname string, iptver iptables.IptablesVersioner) string {
if proxyMode == proxyModeUserspace {
return proxyModeUserspace
} else if proxyMode == proxyModeIptables {
return tryIptablesProxy(iptver)
} else if proxyMode != "" {
glog.V(1).Infof("Flag proxy-mode=%q unknown, assuming iptables proxy", proxyMode)
return tryIptablesProxy(iptver)
}
// proxyMode == "" - choose the best option.
if client == nil {
glog.Errorf("nodeGetter is nil: assuming iptables proxy")
return tryIptablesProxy(iptver)
}
node, err := client.Get(hostname)
if err != nil {
glog.Errorf("Can't get Node %q, assuming iptables proxy: %v", hostname, err)
return tryIptablesProxy(iptver)
}
if node == nil {
glog.Errorf("Got nil Node %q, assuming iptables proxy: %v", hostname)
return tryIptablesProxy(iptver)
}
proxyMode, found := node.Annotations[betaProxyModeAnnotation]
if found {
glog.V(1).Infof("Found beta annotation %q = %q", betaProxyModeAnnotation, proxyMode)
} else {
// We already published some information about this annotation with the "experimental" name, so we will respect it.
proxyMode, found = node.Annotations[experimentalProxyModeAnnotation]
if found {
glog.V(1).Infof("Found experimental annotation %q = %q", experimentalProxyModeAnnotation, proxyMode)
}
}
if proxyMode == proxyModeUserspace {
glog.V(1).Infof("Annotation demands userspace proxy")
return proxyModeUserspace
}
return tryIptablesProxy(iptver)
}
func tryIptablesProxy(iptver iptables.IptablesVersioner) string {
var err error
// guaranteed false on error, error only necessary for debugging
useIptablesProxy, err := iptables.CanUseIptablesProxier(iptver)
if err != nil {
glog.Errorf("Can't determine whether to use iptables proxy, using userspace proxier: %v", err)
return proxyModeUserspace
}
if useIptablesProxy {
return proxyModeIptables
}
// Fallback.
glog.V(1).Infof("Can't use iptables proxy, using userspace proxier: %v", err)
return proxyModeUserspace
}
func (s *ProxyServer) birthCry() {
s.Recorder.Eventf(s.Config.NodeRef, api.EventTypeNormal, "Starting", "Starting kube-proxy.")
}
| cmd/kube-proxy/app/server.go | 1 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.0002850770251825452,
0.0001747838396113366,
0.00016324025637004524,
0.00017046448192559183,
0.00002032351585512515
] |
{
"id": 13,
"code_window": [
"\tBy(\"dialing(udp) node1 --> node2:nodeUdpPort\")\n",
"\tconfig.dialFromNode(\"udp\", node2_IP, nodeUdpPort, tries, epCount)\n",
"\tBy(\"dialing(http) node1 --> node2:nodeHttpPort\")\n",
"\tconfig.dialFromNode(\"http\", node2_IP, nodeHttpPort, tries, epCount)\n",
"}\n",
"\n",
"func (config *KubeProxyTestConfig) hitEndpoints() {\n",
"\tfor _, endpointPod := range config.endpointPods {\n",
"\t\tExpect(len(endpointPod.Status.PodIP)).To(BeNumerically(\">\", 0), \"podIP is empty:%s\", endpointPod.Status.PodIP)\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tBy(\"checking kube-proxy URLs\")\n",
"\tconfig.getSelfURL(\"/healthz\", \"ok\")\n",
"\tconfig.getSelfURL(\"/proxyMode\", \"iptables\") // the default\n"
],
"file_path": "test/e2e/kubeproxy.go",
"type": "add",
"edit_start_line_idx": 199
} | cmd/xurls/xurls
generate/tldsgen/tldsgen
generate/regexgen/regexgen
| Godeps/_workspace/src/github.com/mvdan/xurls/.gitignore | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.0001728894276311621,
0.0001728894276311621,
0.0001728894276311621,
0.0001728894276311621,
0
] |
{
"id": 13,
"code_window": [
"\tBy(\"dialing(udp) node1 --> node2:nodeUdpPort\")\n",
"\tconfig.dialFromNode(\"udp\", node2_IP, nodeUdpPort, tries, epCount)\n",
"\tBy(\"dialing(http) node1 --> node2:nodeHttpPort\")\n",
"\tconfig.dialFromNode(\"http\", node2_IP, nodeHttpPort, tries, epCount)\n",
"}\n",
"\n",
"func (config *KubeProxyTestConfig) hitEndpoints() {\n",
"\tfor _, endpointPod := range config.endpointPods {\n",
"\t\tExpect(len(endpointPod.Status.PodIP)).To(BeNumerically(\">\", 0), \"podIP is empty:%s\", endpointPod.Status.PodIP)\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tBy(\"checking kube-proxy URLs\")\n",
"\tconfig.getSelfURL(\"/healthz\", \"ok\")\n",
"\tconfig.getSelfURL(\"/proxyMode\", \"iptables\") // the default\n"
],
"file_path": "test/e2e/kubeproxy.go",
"type": "add",
"edit_start_line_idx": 199
} | nullvalue[].foo[].bar | Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-381 | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.0001727630296954885,
0.0001727630296954885,
0.0001727630296954885,
0.0001727630296954885,
0
] |
{
"id": 13,
"code_window": [
"\tBy(\"dialing(udp) node1 --> node2:nodeUdpPort\")\n",
"\tconfig.dialFromNode(\"udp\", node2_IP, nodeUdpPort, tries, epCount)\n",
"\tBy(\"dialing(http) node1 --> node2:nodeHttpPort\")\n",
"\tconfig.dialFromNode(\"http\", node2_IP, nodeHttpPort, tries, epCount)\n",
"}\n",
"\n",
"func (config *KubeProxyTestConfig) hitEndpoints() {\n",
"\tfor _, endpointPod := range config.endpointPods {\n",
"\t\tExpect(len(endpointPod.Status.PodIP)).To(BeNumerically(\">\", 0), \"podIP is empty:%s\", endpointPod.Status.PodIP)\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tBy(\"checking kube-proxy URLs\")\n",
"\tconfig.getSelfURL(\"/healthz\", \"ok\")\n",
"\tconfig.getSelfURL(\"/proxyMode\", \"iptables\") // the default\n"
],
"file_path": "test/e2e/kubeproxy.go",
"type": "add",
"edit_start_line_idx": 199
} | kind: PersistentVolume
apiVersion: v1
metadata:
name: pv0002
labels:
type: local
spec:
capacity:
storage: 8Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/somepath/data02"
persistentVolumeReclaimPolicy: Recycle
| docs/user-guide/persistent-volumes/volumes/local-02.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00017370587738696486,
0.00017160800052806735,
0.00016951010911725461,
0.00017160800052806735,
0.0000020978841348551214
] |
{
"id": 14,
"code_window": [
"\tstdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, forLoop)\n",
"\tExpect(strconv.Atoi(strings.TrimSpace(stdout))).To(BeNumerically(\"==\", expectedCount))\n",
"}\n",
"\n",
"func (config *KubeProxyTestConfig) createNetShellPodSpec(podName string, node string) *api.Pod {\n",
"\tpod := &api.Pod{\n",
"\t\tTypeMeta: unversioned.TypeMeta{\n",
"\t\t\tKind: \"Pod\",\n",
"\t\t\tAPIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (config *KubeProxyTestConfig) getSelfURL(path string, expected string) {\n",
"\tcmd := fmt.Sprintf(\"curl -s --connect-timeout 1 http://localhost:10249%s\", path)\n",
"\tBy(fmt.Sprintf(\"Getting kube-proxy self URL %s\", path))\n",
"\tstdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, cmd)\n",
"\tExpect(strings.Contains(stdout, expected)).To(BeTrue())\n",
"}\n",
"\n"
],
"file_path": "test/e2e/kubeproxy.go",
"type": "add",
"edit_start_line_idx": 254
} | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package componentconfig
import "k8s.io/kubernetes/pkg/api/unversioned"
type KubeProxyConfiguration struct {
unversioned.TypeMeta
// bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0
// for all interfaces)
BindAddress string `json:"bindAddress"`
// healthzBindAddress is the IP address for the health check server to serve on,
// defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)
HealthzBindAddress string `json:"healthzBindAddress"`
// healthzPort is the port to bind the health check server. Use 0 to disable.
HealthzPort int `json:"healthzPort"`
// hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname.
HostnameOverride string `json:"hostnameOverride"`
// iptablesSyncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m',
// '2h22m'). Must be greater than 0.
IPTablesSyncPeriod unversioned.Duration `json:"iptablesSyncPeriodSeconds"`
// kubeconfigPath is the path to the kubeconfig file with authorization information (the
// master location is set by the master flag).
KubeconfigPath string `json:"kubeconfigPath"`
// masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode.
MasqueradeAll bool `json:"masqueradeAll"`
// master is the address of the Kubernetes API server (overrides any value in kubeconfig)
Master string `json:"master"`
// oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within
// the range [-1000, 1000]
OOMScoreAdj *int `json:"oomScoreAdj"`
// mode specifies which proxy mode to use.
Mode ProxyMode `json:"mode"`
// portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
// in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
PortRange string `json:"portRange"`
// resourceContainer is the bsolute name of the resource-only container to create and run
// the Kube-proxy in (Default: /kube-proxy).
ResourceContainer string `json:"resourceContainer"`
// udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
// Must be greater than 0. Only applicable for proxyMode=userspace.
UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"`
// conntrackMax is the maximum number of NAT connections to track (0 to leave as-is)")
ConntrackMax int `json:"conntrackMax"`
// conntrackTCPEstablishedTimeout is how long an idle UDP connection will be kept open
// (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxyMode is Userspace
ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"`
}
// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables'
// (experimental). If blank, look at the Node object on the Kubernetes API and respect the
// 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the
// best-available proxy (currently userspace, but may change in future versions). If the
// iptables proxy is selected, regardless of how, but the system's kernel or iptables
// versions are insufficient, this always falls back to the userspace proxy.
type ProxyMode string
const (
ProxyModeUserspace ProxyMode = "userspace"
ProxyModeIPTables ProxyMode = "iptables"
)
// TODO: curate the ordering and structure of this config object
type KubeletConfiguration struct {
// config is the path to the config file or directory of files
Config string `json:"config"`
// syncFrequency is the max period between synchronizing running
// containers and config
SyncFrequency unversioned.Duration `json:"syncFrequency"`
// fileCheckFrequency is the duration between checking config files for
// new data
FileCheckFrequency unversioned.Duration `json:"fileCheckFrequency"`
// httpCheckFrequency is the duration between checking http for new data
HTTPCheckFrequency unversioned.Duration `json:"httpCheckFrequency"`
// manifestURL is the URL for accessing the container manifest
ManifestURL string `json:"manifestURL"`
// manifestURLHeader is the HTTP header to use when accessing the manifest
// URL, with the key separated from the value with a ':', as in 'key:value'
ManifestURLHeader string `json:"manifestURLHeader"`
// enableServer enables the Kubelet's server
EnableServer bool `json:"enableServer"`
// address is the IP address for the Kubelet to serve on (set to 0.0.0.0
// for all interfaces)
Address string `json:"address"`
// port is the port for the Kubelet to serve on.
Port uint `json:"port"`
// readOnlyPort is the read-only port for the Kubelet to serve on with
// no authentication/authorization (set to 0 to disable)
ReadOnlyPort uint `json:"readOnlyPort"`
// tLSCertFile is the file containing x509 Certificate for HTTPS. (CA cert,
// if any, concatenated after server cert). If tlsCertFile and
// tlsPrivateKeyFile are not provided, a self-signed certificate
// and key are generated for the public address and saved to the directory
// passed to certDir.
TLSCertFile string `json:"tLSCertFile"`
// tLSPrivateKeyFile is the ile containing x509 private key matching
// tlsCertFile.
TLSPrivateKeyFile string `json:"tLSPrivateKeyFile"`
// certDirectory is the directory where the TLS certs are located (by
// default /var/run/kubernetes). If tlsCertFile and tlsPrivateKeyFile
// are provided, this flag will be ignored.
CertDirectory string `json:"certDirectory"`
// hostnameOverride is the hostname used to identify the kubelet instead
// of the actual hostname.
HostnameOverride string `json:"hostnameOverride"`
// podInfraContainerImage is the image whose network/ipc namespaces
// containers in each pod will use.
PodInfraContainerImage string `json:"podInfraContainerImage"`
// dockerEndpoint is the path to the docker endpoint to communicate with.
DockerEndpoint string `json:"dockerEndpoint"`
// rootDirectory is the directory path to place kubelet files (volume
// mounts,etc).
RootDirectory string `json:"rootDirectory"`
// allowPrivileged enables containers to request privileged mode.
// Defaults to false.
AllowPrivileged bool `json:"allowPrivileged"`
// hostNetworkSources is a comma-separated list of sources from which the
// Kubelet allows pods to use of host network. Defaults to "*".
HostNetworkSources string `json:"hostNetworkSources"`
// hostPIDSources is a comma-separated list of sources from which the
// Kubelet allows pods to use the host pid namespace. Defaults to "*".
HostPIDSources string `json:"hostPIDSources"`
// hostIPCSources is a comma-separated list of sources from which the
// Kubelet allows pods to use the host ipc namespace. Defaults to "*".
HostIPCSources string `json:"hostIPCSources"`
// registryPullQPS is the limit of registry pulls per second. If 0,
// unlimited. Set to 0 for no limit. Defaults to 5.0.
RegistryPullQPS float64 `json:"registryPullQPS"`
// registryBurst is the maximum size of a bursty pulls, temporarily allows
// pulls to burst to this number, while still not exceeding registryQps.
// Only used if registryQps > 0.
RegistryBurst int `json:"registryBurst"`
// eventRecordQPS is the maximum event creations per second. If 0, there
// is no limit enforced.
EventRecordQPS float32 `json:"eventRecordQPS"`
// eventBurst is the maximum size of a bursty event records, temporarily
// allows event records to burst to this number, while still not exceeding
// event-qps. Only used if eventQps > 0
EventBurst int `json:"eventBurst"`
// enableDebuggingHandlers enables server endpoints for log collection
// and local running of containers and commands
EnableDebuggingHandlers bool `json:"enableDebuggingHandlers"`
// minimumGCAge is the minimum age for a finished container before it is
// garbage collected.
MinimumGCAge unversioned.Duration `json:"minimumGCAge"`
// maxPerPodContainerCount is the maximum number of old instances to
// retain per container. Each container takes up some disk space.
MaxPerPodContainerCount int `json:"maxPerPodContainerCount"`
// maxContainerCount is the maximum number of old instances of containers
// to retain globally. Each container takes up some disk space.
MaxContainerCount int `json:"maxContainerCount"`
// cAdvisorPort is the port of the localhost cAdvisor endpoint
CAdvisorPort uint `json:"cAdvisorPort"`
// healthzPort is the port of the localhost healthz endpoint
HealthzPort int `json:"healthzPort"`
// healthzBindAddress is the IP address for the healthz server to serve
// on.
HealthzBindAddress string `json:"healthzBindAddress"`
// oomScoreAdj is The oom-score-adj value for kubelet process. Values
// must be within the range [-1000, 1000].
OOMScoreAdj int `json:"oomScoreAdj"`
// registerNode enables automatic registration with the apiserver.
RegisterNode bool `json:"registerNode"`
// clusterDomain is the DNS domain for this cluster. If set, kubelet will
// configure all containers to search this domain in addition to the
// host's search domains.
ClusterDomain string `json:"clusterDomain"`
// masterServiceNamespace is The namespace from which the kubernetes
// master services should be injected into pods.
MasterServiceNamespace string `json:"masterServiceNamespace"`
// clusterDNS is the IP address for a cluster DNS server. If set, kubelet
// will configure all containers to use this for DNS resolution in
// addition to the host's DNS servers
ClusterDNS string `json:"clusterDNS"`
// streamingConnectionIdleTimeout is the maximum time a streaming connection
// can be idle before the connection is automatically closed.
StreamingConnectionIdleTimeout unversioned.Duration `json:"streamingConnectionIdleTimeout"`
// nodeStatusUpdateFrequency is the frequency that kubelet posts node
// status to master. Note: be cautious when changing the constant, it
// must work with nodeMonitorGracePeriod in nodecontroller.
NodeStatusUpdateFrequency unversioned.Duration `json:"nodeStatusUpdateFrequency"`
// imageGCHighThresholdPercent is the percent of disk usage after which
// image garbage collection is always run.
ImageGCHighThresholdPercent int `json:"imageGCHighThresholdPercent"`
// imageGCLowThresholdPercent is the percent of disk usage before which
// image garbage collection is never run. Lowest disk usage to garbage
// collect to.
ImageGCLowThresholdPercent int `json:"imageGCLowThresholdPercent"`
// lowDiskSpaceThresholdMB is the absolute free disk space, in MB, to
// maintain. When disk space falls below this threshold, new pods would
// be rejected.
LowDiskSpaceThresholdMB int `json:"lowDiskSpaceThresholdMB"`
// networkPluginName is the name of the network plugin to be invoked for
// various events in kubelet/pod lifecycle
NetworkPluginName string `json:"networkPluginName"`
// networkPluginDir is the full path of the directory in which to search
// for network plugins
NetworkPluginDir string `json:"networkPluginDir"`
// volumePluginDir is the full path of the directory in which to search
// for additional third party volume plugins
VolumePluginDir string `json:"volumePluginDir"`
// cloudProvider is the provider for cloud services.
CloudProvider string `json:"cloudProvider,omitempty"`
// cloudConfigFile is the path to the cloud provider configuration file.
CloudConfigFile string `json:"cloudConfigFile,omitempty"`
// resourceContainer is the absolute name of the resource-only container
// to create and run the Kubelet in.
ResourceContainer string `json:"resourceContainer,omitempty"`
// cgroupRoot is the root cgroup to use for pods. This is handled by the
// container runtime on a best effort basis.
CgroupRoot string `json:"cgroupRoot,omitempty"`
// containerRuntime is the container runtime to use.
ContainerRuntime string `json:"containerRuntime"`
// rktPath is hte path of rkt binary. Leave empty to use the first rkt in
// $PATH.
RktPath string `json:"rktPath,omitempty"`
// rktStage1Image is the image to use as stage1. Local paths and
// http/https URLs are supported.
RktStage1Image string `json:"rktStage1Image,omitempty"`
// systemContainer is the resource-only container in which to place
// all non-kernel processes that are not already in a container. Empty
// for no container. Rolling back the flag requires a reboot.
SystemContainer string `json:"systemContainer"`
// configureCBR0 enables the kublet to configure cbr0 based on
// Node.Spec.PodCIDR.
ConfigureCBR0 bool `json:"configureCbr0"`
// maxPods is the number of pods that can run on this Kubelet.
MaxPods int `json:"maxPods"`
// dockerExecHandlerName is the handler to use when executing a command
// in a container. Valid values are 'native' and 'nsenter'. Defaults to
// 'native'.
DockerExecHandlerName string `json:"dockerExecHandlerName"`
// The CIDR to use for pod IP addresses, only used in standalone mode.
// In cluster mode, this is obtained from the master.
PodCIDR string `json:"podCIDR"`
// ResolverConfig is the resolver configuration file used as the basis
// for the container DNS resolution configuration."), []
ResolverConfig string `json:"resolvConf"`
// cpuCFSQuota is Enable CPU CFS quota enforcement for containers that
// specify CPU limits
CPUCFSQuota bool `json:"cpuCFSQuota"`
// containerized should be set to true if kubelet is running in a container.
Containerized bool `json:"containerized"`
// maxOpenFiles is Number of files that can be opened by Kubelet process.
MaxOpenFiles uint64 `json:"maxOpenFiles"`
// reconcileCIDR is Reconcile node CIDR with the CIDR specified by the
// API server. No-op if register-node or configure-cbr0 is false.
ReconcileCIDR bool `json:"reconcileCIDR"`
// registerSchedulable tells the kubelet to register the node as
// schedulable. No-op if register-node is false.
RegisterSchedulable bool `json:"registerSchedulable"`
// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver
KubeAPIQPS float32 `json:"kubeAPIQPS"`
// kubeAPIBurst is the burst to allow while talking with kubernetes
// apiserver
KubeAPIBurst int `json:"kubeAPIBurst"`
// serializeImagePulls when enabled, tells the Kubelet to pull images one
// at a time. We recommend *not* changing the default value on nodes that
// run docker daemon with version < 1.9 or an Aufs storage backend.
// Issue #10959 has more details.
SerializeImagePulls bool `json:"serializeImagePulls"`
// experimentalFlannelOverlay enables experimental support for starting the
// kubelet with the default overlay network (flannel). Assumes flanneld
// is already running in client mode.
ExperimentalFlannelOverlay bool `json:"experimentalFlannelOverlay"`
// outOfDiskTransitionFrequency is duration for which the kubelet has to
// wait before transitioning out of out-of-disk node condition status.
OutOfDiskTransitionFrequency unversioned.Duration `json:"outOfDiskTransitionFrequency,omitempty"`
// nodeIP is IP address of the node. If set, kubelet will use this IP
// address for the node.
NodeIP string `json:"nodeIP,omitempty"`
// nodeLabels to add when registering the node in the cluster.
NodeLabels map[string]string `json:"nodeLabels"`
// nonMasqueradeCIDR configures masquerading: traffic to IPs outside this range will use IP masquerade.
NonMasqueradeCIDR string `json:"nonMasqueradeCIDR"`
}
type KubeSchedulerConfiguration struct {
// port is the port that the scheduler's http service runs on.
Port int `json:"port"`
// address is the IP address to serve on.
Address string `json:"address"`
// algorithmProvider is the scheduling algorithm provider to use.
AlgorithmProvider string `json:"algorithmProvider"`
// policyConfigFile is the filepath to the scheduler policy configuration.
PolicyConfigFile string `json:"policyConfigFile"`
// enableProfiling enables profiling via web interface.
EnableProfiling bool `json:"enableProfiling"`
// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.
KubeAPIQPS float32 `json:"kubeAPIQPS"`
// kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver.
KubeAPIBurst int `json:"kubeAPIBurst"`
// schedulerName is name of the scheduler, used to select which pods
// will be processed by this scheduler, based on pod's annotation with
// key 'scheduler.alpha.kubernetes.io/name'.
SchedulerName string `json:"schedulerName"`
// leaderElection defines the configuration of leader election client.
LeaderElection LeaderElectionConfiguration `json:"leaderElection"`
}
// LeaderElectionConfiguration defines the configuration of leader election
// clients for components that can run with leader election enabled.
type LeaderElectionConfiguration struct {
// leaderElect enables a leader election client to gain leadership
// before executing the main loop. Enable this when running replicated
// components for high availability.
LeaderElect bool `json:"leaderElect"`
// leaseDuration is the duration that non-leader candidates will wait
// after observing a leadership renewal until attempting to acquire
// leadership of a led but unrenewed leader slot. This is effectively the
// maximum duration that a leader can be stopped before it is replaced
// by another candidate. This is only applicable if leader election is
// enabled.
LeaseDuration unversioned.Duration `json:"leaseDuration"`
// renewDeadline is the interval between attempts by the acting master to
// renew a leadership slot before it stops leading. This must be less
// than or equal to the lease duration. This is only applicable if leader
// election is enabled.
RenewDeadline unversioned.Duration `json:"renewDeadline"`
// retryPeriod is the duration the clients should wait between attempting
// acquisition and renewal of a leadership. This is only applicable if
// leader election is enabled.
RetryPeriod unversioned.Duration `json:"retryPeriod"`
}
| pkg/apis/componentconfig/types.go | 1 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.002315629506483674,
0.00029751635156571865,
0.0001618246897123754,
0.0001675075473031029,
0.00047552172327414155
] |
{
"id": 14,
"code_window": [
"\tstdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, forLoop)\n",
"\tExpect(strconv.Atoi(strings.TrimSpace(stdout))).To(BeNumerically(\"==\", expectedCount))\n",
"}\n",
"\n",
"func (config *KubeProxyTestConfig) createNetShellPodSpec(podName string, node string) *api.Pod {\n",
"\tpod := &api.Pod{\n",
"\t\tTypeMeta: unversioned.TypeMeta{\n",
"\t\t\tKind: \"Pod\",\n",
"\t\t\tAPIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (config *KubeProxyTestConfig) getSelfURL(path string, expected string) {\n",
"\tcmd := fmt.Sprintf(\"curl -s --connect-timeout 1 http://localhost:10249%s\", path)\n",
"\tBy(fmt.Sprintf(\"Getting kube-proxy self URL %s\", path))\n",
"\tstdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, cmd)\n",
"\tExpect(strings.Contains(stdout, expected)).To(BeTrue())\n",
"}\n",
"\n"
],
"file_path": "test/e2e/kubeproxy.go",
"type": "add",
"edit_start_line_idx": 254
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package config provides utility objects for decoupling sources of configuration and the
// actual configuration state. Consumers must implement the Merger interface to unify
// the sources of change into an object.
package config
| pkg/util/config/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00017852509336080402,
0.00017332904099021107,
0.0001673510269029066,
0.00017411098815500736,
0.000004595180598698789
] |
{
"id": 14,
"code_window": [
"\tstdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, forLoop)\n",
"\tExpect(strconv.Atoi(strings.TrimSpace(stdout))).To(BeNumerically(\"==\", expectedCount))\n",
"}\n",
"\n",
"func (config *KubeProxyTestConfig) createNetShellPodSpec(podName string, node string) *api.Pod {\n",
"\tpod := &api.Pod{\n",
"\t\tTypeMeta: unversioned.TypeMeta{\n",
"\t\t\tKind: \"Pod\",\n",
"\t\t\tAPIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (config *KubeProxyTestConfig) getSelfURL(path string, expected string) {\n",
"\tcmd := fmt.Sprintf(\"curl -s --connect-timeout 1 http://localhost:10249%s\", path)\n",
"\tBy(fmt.Sprintf(\"Getting kube-proxy self URL %s\", path))\n",
"\tstdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, cmd)\n",
"\tExpect(strings.Contains(stdout, expected)).To(BeTrue())\n",
"}\n",
"\n"
],
"file_path": "test/e2e/kubeproxy.go",
"type": "add",
"edit_start_line_idx": 254
} | package yaml
import (
"bytes"
)
// The parser implements the following grammar:
//
// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
// implicit_document ::= block_node DOCUMENT-END*
// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
// block_node_or_indentless_sequence ::=
// ALIAS
// | properties (block_content | indentless_block_sequence)?
// | block_content
// | indentless_block_sequence
// block_node ::= ALIAS
// | properties block_content?
// | block_content
// flow_node ::= ALIAS
// | properties flow_content?
// | flow_content
// properties ::= TAG ANCHOR? | ANCHOR TAG?
// block_content ::= block_collection | flow_collection | SCALAR
// flow_content ::= flow_collection | SCALAR
// block_collection ::= block_sequence | block_mapping
// flow_collection ::= flow_sequence | flow_mapping
// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
// block_mapping ::= BLOCK-MAPPING_START
// ((KEY block_node_or_indentless_sequence?)?
// (VALUE block_node_or_indentless_sequence?)?)*
// BLOCK-END
// flow_sequence ::= FLOW-SEQUENCE-START
// (flow_sequence_entry FLOW-ENTRY)*
// flow_sequence_entry?
// FLOW-SEQUENCE-END
// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
// flow_mapping ::= FLOW-MAPPING-START
// (flow_mapping_entry FLOW-ENTRY)*
// flow_mapping_entry?
// FLOW-MAPPING-END
// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
// Peek the next token in the token queue.
func peek_token(parser *yaml_parser_t) *yaml_token_t {
if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
return &parser.tokens[parser.tokens_head]
}
return nil
}
// Remove the next token from the queue (must be called after peek_token).
func skip_token(parser *yaml_parser_t) {
parser.token_available = false
parser.tokens_parsed++
parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
parser.tokens_head++
}
// Get the next event.
func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
// Erase the event object.
*event = yaml_event_t{}
// No events after the end of the stream or error.
if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
return true
}
// Generate the next event.
return yaml_parser_state_machine(parser, event)
}
// Set parser error.
func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
parser.error = yaml_PARSER_ERROR
parser.problem = problem
parser.problem_mark = problem_mark
return false
}
func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
parser.error = yaml_PARSER_ERROR
parser.context = context
parser.context_mark = context_mark
parser.problem = problem
parser.problem_mark = problem_mark
return false
}
// State dispatcher.
func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
//trace("yaml_parser_state_machine", "state:", parser.state.String())
switch parser.state {
case yaml_PARSE_STREAM_START_STATE:
return yaml_parser_parse_stream_start(parser, event)
case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
return yaml_parser_parse_document_start(parser, event, true)
case yaml_PARSE_DOCUMENT_START_STATE:
return yaml_parser_parse_document_start(parser, event, false)
case yaml_PARSE_DOCUMENT_CONTENT_STATE:
return yaml_parser_parse_document_content(parser, event)
case yaml_PARSE_DOCUMENT_END_STATE:
return yaml_parser_parse_document_end(parser, event)
case yaml_PARSE_BLOCK_NODE_STATE:
return yaml_parser_parse_node(parser, event, true, false)
case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
return yaml_parser_parse_node(parser, event, true, true)
case yaml_PARSE_FLOW_NODE_STATE:
return yaml_parser_parse_node(parser, event, false, false)
case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
return yaml_parser_parse_block_sequence_entry(parser, event, true)
case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
return yaml_parser_parse_block_sequence_entry(parser, event, false)
case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
return yaml_parser_parse_indentless_sequence_entry(parser, event)
case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
return yaml_parser_parse_block_mapping_key(parser, event, true)
case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
return yaml_parser_parse_block_mapping_key(parser, event, false)
case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
return yaml_parser_parse_block_mapping_value(parser, event)
case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
return yaml_parser_parse_flow_sequence_entry(parser, event, true)
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
return yaml_parser_parse_flow_sequence_entry(parser, event, false)
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
return yaml_parser_parse_flow_mapping_key(parser, event, true)
case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
return yaml_parser_parse_flow_mapping_key(parser, event, false)
case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
return yaml_parser_parse_flow_mapping_value(parser, event, false)
case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
return yaml_parser_parse_flow_mapping_value(parser, event, true)
default:
panic("invalid parser state")
}
return false
}
// Parse the production:
// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
// ************
func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
token := peek_token(parser)
if token == nil {
return false
}
if token.typ != yaml_STREAM_START_TOKEN {
return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
}
parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
*event = yaml_event_t{
typ: yaml_STREAM_START_EVENT,
start_mark: token.start_mark,
end_mark: token.end_mark,
encoding: token.encoding,
}
skip_token(parser)
return true
}
// Parse the productions:
// implicit_document ::= block_node DOCUMENT-END*
// *
// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
// *************************
func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
token := peek_token(parser)
if token == nil {
return false
}
// Parse extra document end indicators.
if !implicit {
for token.typ == yaml_DOCUMENT_END_TOKEN {
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
}
}
if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
token.typ != yaml_DOCUMENT_START_TOKEN &&
token.typ != yaml_STREAM_END_TOKEN {
// Parse an implicit document.
if !yaml_parser_process_directives(parser, nil, nil) {
return false
}
parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
parser.state = yaml_PARSE_BLOCK_NODE_STATE
*event = yaml_event_t{
typ: yaml_DOCUMENT_START_EVENT,
start_mark: token.start_mark,
end_mark: token.end_mark,
}
} else if token.typ != yaml_STREAM_END_TOKEN {
// Parse an explicit document.
var version_directive *yaml_version_directive_t
var tag_directives []yaml_tag_directive_t
start_mark := token.start_mark
if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
return false
}
token = peek_token(parser)
if token == nil {
return false
}
if token.typ != yaml_DOCUMENT_START_TOKEN {
yaml_parser_set_parser_error(parser,
"did not find expected <document start>", token.start_mark)
return false
}
parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
end_mark := token.end_mark
*event = yaml_event_t{
typ: yaml_DOCUMENT_START_EVENT,
start_mark: start_mark,
end_mark: end_mark,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: false,
}
skip_token(parser)
} else {
// Parse the stream end.
parser.state = yaml_PARSE_END_STATE
*event = yaml_event_t{
typ: yaml_STREAM_END_EVENT,
start_mark: token.start_mark,
end_mark: token.end_mark,
}
skip_token(parser)
}
return true
}
// Parse the productions:
// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
// ***********
//
func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
token := peek_token(parser)
if token == nil {
return false
}
if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
token.typ == yaml_DOCUMENT_START_TOKEN ||
token.typ == yaml_DOCUMENT_END_TOKEN ||
token.typ == yaml_STREAM_END_TOKEN {
parser.state = parser.states[len(parser.states)-1]
parser.states = parser.states[:len(parser.states)-1]
return yaml_parser_process_empty_scalar(parser, event,
token.start_mark)
}
return yaml_parser_parse_node(parser, event, true, false)
}
// Parse the productions:
// implicit_document ::= block_node DOCUMENT-END*
// *************
// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
//
func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
token := peek_token(parser)
if token == nil {
return false
}
start_mark := token.start_mark
end_mark := token.start_mark
implicit := true
if token.typ == yaml_DOCUMENT_END_TOKEN {
end_mark = token.end_mark
skip_token(parser)
implicit = false
}
parser.tag_directives = parser.tag_directives[:0]
parser.state = yaml_PARSE_DOCUMENT_START_STATE
*event = yaml_event_t{
typ: yaml_DOCUMENT_END_EVENT,
start_mark: start_mark,
end_mark: end_mark,
implicit: implicit,
}
return true
}
// Parse the productions:
// block_node_or_indentless_sequence ::=
// ALIAS
// *****
// | properties (block_content | indentless_block_sequence)?
// ********** *
// | block_content | indentless_block_sequence
// *
// block_node ::= ALIAS
// *****
// | properties block_content?
// ********** *
// | block_content
// *
// flow_node ::= ALIAS
// *****
// | properties flow_content?
// ********** *
// | flow_content
// *
// properties ::= TAG ANCHOR? | ANCHOR TAG?
// *************************
// block_content ::= block_collection | flow_collection | SCALAR
// ******
// flow_content ::= flow_collection | SCALAR
// ******
func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
//defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
token := peek_token(parser)
if token == nil {
return false
}
if token.typ == yaml_ALIAS_TOKEN {
parser.state = parser.states[len(parser.states)-1]
parser.states = parser.states[:len(parser.states)-1]
*event = yaml_event_t{
typ: yaml_ALIAS_EVENT,
start_mark: token.start_mark,
end_mark: token.end_mark,
anchor: token.value,
}
skip_token(parser)
return true
}
start_mark := token.start_mark
end_mark := token.start_mark
var tag_token bool
var tag_handle, tag_suffix, anchor []byte
var tag_mark yaml_mark_t
if token.typ == yaml_ANCHOR_TOKEN {
anchor = token.value
start_mark = token.start_mark
end_mark = token.end_mark
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
if token.typ == yaml_TAG_TOKEN {
tag_token = true
tag_handle = token.value
tag_suffix = token.suffix
tag_mark = token.start_mark
end_mark = token.end_mark
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
}
} else if token.typ == yaml_TAG_TOKEN {
tag_token = true
tag_handle = token.value
tag_suffix = token.suffix
start_mark = token.start_mark
tag_mark = token.start_mark
end_mark = token.end_mark
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
if token.typ == yaml_ANCHOR_TOKEN {
anchor = token.value
end_mark = token.end_mark
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
}
}
var tag []byte
if tag_token {
if len(tag_handle) == 0 {
tag = tag_suffix
tag_suffix = nil
} else {
for i := range parser.tag_directives {
if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
tag = append([]byte(nil), parser.tag_directives[i].prefix...)
tag = append(tag, tag_suffix...)
break
}
}
if len(tag) == 0 {
yaml_parser_set_parser_error_context(parser,
"while parsing a node", start_mark,
"found undefined tag handle", tag_mark)
return false
}
}
}
implicit := len(tag) == 0
if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
end_mark = token.end_mark
parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
*event = yaml_event_t{
typ: yaml_SEQUENCE_START_EVENT,
start_mark: start_mark,
end_mark: end_mark,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
}
return true
}
if token.typ == yaml_SCALAR_TOKEN {
var plain_implicit, quoted_implicit bool
end_mark = token.end_mark
if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
plain_implicit = true
} else if len(tag) == 0 {
quoted_implicit = true
}
parser.state = parser.states[len(parser.states)-1]
parser.states = parser.states[:len(parser.states)-1]
*event = yaml_event_t{
typ: yaml_SCALAR_EVENT,
start_mark: start_mark,
end_mark: end_mark,
anchor: anchor,
tag: tag,
value: token.value,
implicit: plain_implicit,
quoted_implicit: quoted_implicit,
style: yaml_style_t(token.style),
}
skip_token(parser)
return true
}
if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
// [Go] Some of the events below can be merged as they differ only on style.
end_mark = token.end_mark
parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
*event = yaml_event_t{
typ: yaml_SEQUENCE_START_EVENT,
start_mark: start_mark,
end_mark: end_mark,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
}
return true
}
if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
end_mark = token.end_mark
parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
*event = yaml_event_t{
typ: yaml_MAPPING_START_EVENT,
start_mark: start_mark,
end_mark: end_mark,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
}
return true
}
if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
end_mark = token.end_mark
parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
*event = yaml_event_t{
typ: yaml_SEQUENCE_START_EVENT,
start_mark: start_mark,
end_mark: end_mark,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
}
return true
}
if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
end_mark = token.end_mark
parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
*event = yaml_event_t{
typ: yaml_MAPPING_START_EVENT,
start_mark: start_mark,
end_mark: end_mark,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
}
return true
}
if len(anchor) > 0 || len(tag) > 0 {
parser.state = parser.states[len(parser.states)-1]
parser.states = parser.states[:len(parser.states)-1]
*event = yaml_event_t{
typ: yaml_SCALAR_EVENT,
start_mark: start_mark,
end_mark: end_mark,
anchor: anchor,
tag: tag,
implicit: implicit,
quoted_implicit: false,
style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
}
return true
}
context := "while parsing a flow node"
if block {
context = "while parsing a block node"
}
yaml_parser_set_parser_error_context(parser, context, start_mark,
"did not find expected node content", token.start_mark)
return false
}
// Parse the productions:
// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
// ******************** *********** * *********
//
func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
if first {
token := peek_token(parser)
parser.marks = append(parser.marks, token.start_mark)
skip_token(parser)
}
token := peek_token(parser)
if token == nil {
return false
}
if token.typ == yaml_BLOCK_ENTRY_TOKEN {
mark := token.end_mark
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
return yaml_parser_parse_node(parser, event, true, false)
} else {
parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
return yaml_parser_process_empty_scalar(parser, event, mark)
}
}
if token.typ == yaml_BLOCK_END_TOKEN {
parser.state = parser.states[len(parser.states)-1]
parser.states = parser.states[:len(parser.states)-1]
parser.marks = parser.marks[:len(parser.marks)-1]
*event = yaml_event_t{
typ: yaml_SEQUENCE_END_EVENT,
start_mark: token.start_mark,
end_mark: token.end_mark,
}
skip_token(parser)
return true
}
context_mark := parser.marks[len(parser.marks)-1]
parser.marks = parser.marks[:len(parser.marks)-1]
return yaml_parser_set_parser_error_context(parser,
"while parsing a block collection", context_mark,
"did not find expected '-' indicator", token.start_mark)
}
// Parse the productions:
// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
// *********** *
func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
token := peek_token(parser)
if token == nil {
return false
}
if token.typ == yaml_BLOCK_ENTRY_TOKEN {
mark := token.end_mark
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
token.typ != yaml_KEY_TOKEN &&
token.typ != yaml_VALUE_TOKEN &&
token.typ != yaml_BLOCK_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
return yaml_parser_parse_node(parser, event, true, false)
}
parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
return yaml_parser_process_empty_scalar(parser, event, mark)
}
parser.state = parser.states[len(parser.states)-1]
parser.states = parser.states[:len(parser.states)-1]
*event = yaml_event_t{
typ: yaml_SEQUENCE_END_EVENT,
start_mark: token.start_mark,
end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
}
return true
}
// Parse the productions:
// block_mapping ::= BLOCK-MAPPING_START
// *******************
// ((KEY block_node_or_indentless_sequence?)?
// *** *
// (VALUE block_node_or_indentless_sequence?)?)*
//
// BLOCK-END
// *********
//
func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
if first {
token := peek_token(parser)
parser.marks = append(parser.marks, token.start_mark)
skip_token(parser)
}
token := peek_token(parser)
if token == nil {
return false
}
if token.typ == yaml_KEY_TOKEN {
mark := token.end_mark
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
if token.typ != yaml_KEY_TOKEN &&
token.typ != yaml_VALUE_TOKEN &&
token.typ != yaml_BLOCK_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
return yaml_parser_parse_node(parser, event, true, true)
} else {
parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
return yaml_parser_process_empty_scalar(parser, event, mark)
}
} else if token.typ == yaml_BLOCK_END_TOKEN {
parser.state = parser.states[len(parser.states)-1]
parser.states = parser.states[:len(parser.states)-1]
parser.marks = parser.marks[:len(parser.marks)-1]
*event = yaml_event_t{
typ: yaml_MAPPING_END_EVENT,
start_mark: token.start_mark,
end_mark: token.end_mark,
}
skip_token(parser)
return true
}
context_mark := parser.marks[len(parser.marks)-1]
parser.marks = parser.marks[:len(parser.marks)-1]
return yaml_parser_set_parser_error_context(parser,
"while parsing a block mapping", context_mark,
"did not find expected key", token.start_mark)
}
// Parse the productions:
// block_mapping ::= BLOCK-MAPPING_START
//
// ((KEY block_node_or_indentless_sequence?)?
//
// (VALUE block_node_or_indentless_sequence?)?)*
// ***** *
// BLOCK-END
//
//
func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
token := peek_token(parser)
if token == nil {
return false
}
if token.typ == yaml_VALUE_TOKEN {
mark := token.end_mark
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
if token.typ != yaml_KEY_TOKEN &&
token.typ != yaml_VALUE_TOKEN &&
token.typ != yaml_BLOCK_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
return yaml_parser_parse_node(parser, event, true, true)
}
parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
return yaml_parser_process_empty_scalar(parser, event, mark)
}
parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
}
// Parse the productions:
// flow_sequence ::= FLOW-SEQUENCE-START
// *******************
// (flow_sequence_entry FLOW-ENTRY)*
// * **********
// flow_sequence_entry?
// *
// FLOW-SEQUENCE-END
// *****************
// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
// *
//
func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
if first {
token := peek_token(parser)
parser.marks = append(parser.marks, token.start_mark)
skip_token(parser)
}
token := peek_token(parser)
if token == nil {
return false
}
if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
if !first {
if token.typ == yaml_FLOW_ENTRY_TOKEN {
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
} else {
context_mark := parser.marks[len(parser.marks)-1]
parser.marks = parser.marks[:len(parser.marks)-1]
return yaml_parser_set_parser_error_context(parser,
"while parsing a flow sequence", context_mark,
"did not find expected ',' or ']'", token.start_mark)
}
}
if token.typ == yaml_KEY_TOKEN {
parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
*event = yaml_event_t{
typ: yaml_MAPPING_START_EVENT,
start_mark: token.start_mark,
end_mark: token.end_mark,
implicit: true,
style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
}
skip_token(parser)
return true
} else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
return yaml_parser_parse_node(parser, event, false, false)
}
}
parser.state = parser.states[len(parser.states)-1]
parser.states = parser.states[:len(parser.states)-1]
parser.marks = parser.marks[:len(parser.marks)-1]
*event = yaml_event_t{
typ: yaml_SEQUENCE_END_EVENT,
start_mark: token.start_mark,
end_mark: token.end_mark,
}
skip_token(parser)
return true
}
//
// Parse the productions:
// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
// *** *
//
func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
token := peek_token(parser)
if token == nil {
return false
}
if token.typ != yaml_VALUE_TOKEN &&
token.typ != yaml_FLOW_ENTRY_TOKEN &&
token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
return yaml_parser_parse_node(parser, event, false, false)
}
mark := token.end_mark
skip_token(parser)
parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
return yaml_parser_process_empty_scalar(parser, event, mark)
}
// Parse the productions:
// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
// ***** *
//
func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
token := peek_token(parser)
if token == nil {
return false
}
if token.typ == yaml_VALUE_TOKEN {
skip_token(parser)
token := peek_token(parser)
if token == nil {
return false
}
if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
return yaml_parser_parse_node(parser, event, false, false)
}
}
parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
}
// Parse the productions:
// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
// *
//
func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
token := peek_token(parser)
if token == nil {
return false
}
parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
*event = yaml_event_t{
typ: yaml_MAPPING_END_EVENT,
start_mark: token.start_mark,
end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
}
return true
}
// Parse the productions:
// flow_mapping ::= FLOW-MAPPING-START
// ******************
// (flow_mapping_entry FLOW-ENTRY)*
// * **********
// flow_mapping_entry?
// ******************
// FLOW-MAPPING-END
// ****************
// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
// * *** *
//
func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
if first {
token := peek_token(parser)
parser.marks = append(parser.marks, token.start_mark)
skip_token(parser)
}
token := peek_token(parser)
if token == nil {
return false
}
if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
if !first {
if token.typ == yaml_FLOW_ENTRY_TOKEN {
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
} else {
context_mark := parser.marks[len(parser.marks)-1]
parser.marks = parser.marks[:len(parser.marks)-1]
return yaml_parser_set_parser_error_context(parser,
"while parsing a flow mapping", context_mark,
"did not find expected ',' or '}'", token.start_mark)
}
}
if token.typ == yaml_KEY_TOKEN {
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
if token.typ != yaml_VALUE_TOKEN &&
token.typ != yaml_FLOW_ENTRY_TOKEN &&
token.typ != yaml_FLOW_MAPPING_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
return yaml_parser_parse_node(parser, event, false, false)
} else {
parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
}
} else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
return yaml_parser_parse_node(parser, event, false, false)
}
}
parser.state = parser.states[len(parser.states)-1]
parser.states = parser.states[:len(parser.states)-1]
parser.marks = parser.marks[:len(parser.marks)-1]
*event = yaml_event_t{
typ: yaml_MAPPING_END_EVENT,
start_mark: token.start_mark,
end_mark: token.end_mark,
}
skip_token(parser)
return true
}
// Parse the productions:
// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
// * ***** *
//
func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
token := peek_token(parser)
if token == nil {
return false
}
if empty {
parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
}
if token.typ == yaml_VALUE_TOKEN {
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
return yaml_parser_parse_node(parser, event, false, false)
}
}
parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
}
// Generate an empty scalar event.
func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
*event = yaml_event_t{
typ: yaml_SCALAR_EVENT,
start_mark: mark,
end_mark: mark,
value: nil, // Empty
implicit: true,
style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
}
return true
}
var default_tag_directives = []yaml_tag_directive_t{
{[]byte("!"), []byte("!")},
{[]byte("!!"), []byte("tag:yaml.org,2002:")},
}
// Parse directives.
func yaml_parser_process_directives(parser *yaml_parser_t,
version_directive_ref **yaml_version_directive_t,
tag_directives_ref *[]yaml_tag_directive_t) bool {
var version_directive *yaml_version_directive_t
var tag_directives []yaml_tag_directive_t
token := peek_token(parser)
if token == nil {
return false
}
for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
if version_directive != nil {
yaml_parser_set_parser_error(parser,
"found duplicate %YAML directive", token.start_mark)
return false
}
if token.major != 1 || token.minor != 1 {
yaml_parser_set_parser_error(parser,
"found incompatible YAML document", token.start_mark)
return false
}
version_directive = &yaml_version_directive_t{
major: token.major,
minor: token.minor,
}
} else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
value := yaml_tag_directive_t{
handle: token.value,
prefix: token.prefix,
}
if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
return false
}
tag_directives = append(tag_directives, value)
}
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
}
for i := range default_tag_directives {
if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
return false
}
}
if version_directive_ref != nil {
*version_directive_ref = version_directive
}
if tag_directives_ref != nil {
*tag_directives_ref = tag_directives
}
return true
}
// Append a tag directive to the directives stack.
func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
for i := range parser.tag_directives {
if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
if allow_duplicates {
return true
}
return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
}
}
// [Go] I suspect the copy is unnecessary. This was likely done
// because there was no way to track ownership of the data.
value_copy := yaml_tag_directive_t{
handle: make([]byte, len(value.handle)),
prefix: make([]byte, len(value.prefix)),
}
copy(value_copy.handle, value.handle)
copy(value_copy.prefix, value.prefix)
parser.tag_directives = append(parser.tag_directives, value_copy)
return true
}
| Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00041821339982561767,
0.0001739981962600723,
0.0001641647977521643,
0.00017219959408976138,
0.000023523334675701335
] |
{
"id": 14,
"code_window": [
"\tstdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, forLoop)\n",
"\tExpect(strconv.Atoi(strings.TrimSpace(stdout))).To(BeNumerically(\"==\", expectedCount))\n",
"}\n",
"\n",
"func (config *KubeProxyTestConfig) createNetShellPodSpec(podName string, node string) *api.Pod {\n",
"\tpod := &api.Pod{\n",
"\t\tTypeMeta: unversioned.TypeMeta{\n",
"\t\t\tKind: \"Pod\",\n",
"\t\t\tAPIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (config *KubeProxyTestConfig) getSelfURL(path string, expected string) {\n",
"\tcmd := fmt.Sprintf(\"curl -s --connect-timeout 1 http://localhost:10249%s\", path)\n",
"\tBy(fmt.Sprintf(\"Getting kube-proxy self URL %s\", path))\n",
"\tstdout := RunHostCmdOrDie(config.f.Namespace.Name, config.hostTestContainerPod.Name, cmd)\n",
"\tExpect(strings.Contains(stdout, expected)).To(BeTrue())\n",
"}\n",
"\n"
],
"file_path": "test/e2e/kubeproxy.go",
"type": "add",
"edit_start_line_idx": 254
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package registrytest
import (
"sync"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/watch"
)
func NewServiceRegistry() *ServiceRegistry {
return &ServiceRegistry{}
}
type ServiceRegistry struct {
mu sync.Mutex
List api.ServiceList
Service *api.Service
Updates []api.Service
Err error
DeletedID string
GottenID string
UpdatedID string
}
func (r *ServiceRegistry) SetError(err error) {
r.mu.Lock()
defer r.mu.Unlock()
r.Err = err
}
func (r *ServiceRegistry) ListServices(ctx api.Context, options *api.ListOptions) (*api.ServiceList, error) {
r.mu.Lock()
defer r.mu.Unlock()
ns, _ := api.NamespaceFrom(ctx)
// Copy metadata from internal list into result
res := new(api.ServiceList)
res.TypeMeta = r.List.TypeMeta
res.ListMeta = r.List.ListMeta
if ns != api.NamespaceAll {
for _, service := range r.List.Items {
if ns == service.Namespace {
res.Items = append(res.Items, service)
}
}
} else {
res.Items = append([]api.Service{}, r.List.Items...)
}
return res, r.Err
}
func (r *ServiceRegistry) CreateService(ctx api.Context, svc *api.Service) (*api.Service, error) {
r.mu.Lock()
defer r.mu.Unlock()
r.Service = new(api.Service)
*r.Service = *svc
r.List.Items = append(r.List.Items, *svc)
return svc, r.Err
}
func (r *ServiceRegistry) GetService(ctx api.Context, id string) (*api.Service, error) {
r.mu.Lock()
defer r.mu.Unlock()
r.GottenID = id
return r.Service, r.Err
}
func (r *ServiceRegistry) DeleteService(ctx api.Context, id string) error {
r.mu.Lock()
defer r.mu.Unlock()
r.DeletedID = id
r.Service = nil
return r.Err
}
func (r *ServiceRegistry) UpdateService(ctx api.Context, svc *api.Service) (*api.Service, error) {
r.mu.Lock()
defer r.mu.Unlock()
r.UpdatedID = svc.Name
*r.Service = *svc
r.Updates = append(r.Updates, *svc)
return svc, r.Err
}
func (r *ServiceRegistry) WatchServices(ctx api.Context, options *api.ListOptions) (watch.Interface, error) {
r.mu.Lock()
defer r.mu.Unlock()
return nil, r.Err
}
| pkg/registry/registrytest/service.go | 0 | https://github.com/kubernetes/kubernetes/commit/7ed83ad4f94b94a80d89eded18eaffd5cdd5d63b | [
0.00017852509336080402,
0.00017220988229382783,
0.0001676079846220091,
0.00017156414105556905,
0.0000030056864943617256
] |
{
"id": 0,
"code_window": [
"\treturn nil\n",
"}\n",
"\n",
"// SetData sets new config file storage\n",
"func SetData(newData Storage) {\n",
"\tdata = newData\n",
"\tdataLoaded = false\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// If no config file, use in-memory config (which is the default)\n",
"\tif configPath == \"\" {\n",
"\t\treturn\n",
"\t}\n"
],
"file_path": "fs/config/config.go",
"type": "add",
"edit_start_line_idx": 334
} | // Package config reads, writes and edits the config file and deals with command line flags
package config
import (
"context"
"encoding/json"
"fmt"
"log"
mathrand "math/rand"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"time"
"github.com/mitchellh/go-homedir"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/random"
)
const (
configFileName = "rclone.conf"
hiddenConfigFileName = "." + configFileName
noConfigFile = "notfound"
// ConfigToken is the key used to store the token under
ConfigToken = "token"
// ConfigClientID is the config key used to store the client id
ConfigClientID = "client_id"
// ConfigClientSecret is the config key used to store the client secret
ConfigClientSecret = "client_secret"
// ConfigAuthURL is the config key used to store the auth server endpoint
ConfigAuthURL = "auth_url"
// ConfigTokenURL is the config key used to store the token server endpoint
ConfigTokenURL = "token_url"
// ConfigEncoding is the config key to change the encoding for a backend
ConfigEncoding = "encoding"
// ConfigEncodingHelp is the help for ConfigEncoding
ConfigEncodingHelp = "This sets the encoding for the backend.\n\nSee: the [encoding section in the overview](/overview/#encoding) for more info."
// ConfigAuthorize indicates that we just want "rclone authorize"
ConfigAuthorize = "config_authorize"
// ConfigAuthNoBrowser indicates that we do not want to open browser
ConfigAuthNoBrowser = "config_auth_no_browser"
)
// Storage defines an interface for loading and saving config to
// persistent storage. Rclone provides a default implementation to
// load and save to a config file when this is imported
//
// import "github.com/rclone/rclone/fs/config/configfile"
// configfile.Install()
type Storage interface {
// GetSectionList returns a slice of strings with names for all the
// sections
GetSectionList() []string
// HasSection returns true if section exists in the config file
HasSection(section string) bool
// DeleteSection removes the named section and all config from the
// config file
DeleteSection(section string)
// GetKeyList returns the keys in this section
GetKeyList(section string) []string
// GetValue returns the key in section with a found flag
GetValue(section string, key string) (value string, found bool)
// SetValue sets the value under key in section
SetValue(section string, key string, value string)
// DeleteKey removes the key under section
DeleteKey(section string, key string) bool
// Load the config from permanent storage
Load() error
// Save the config to permanent storage
Save() error
// Serialize the config into a string
Serialize() (string, error)
}
// Global
var (
// CacheDir points to the cache directory. Users of this
// should make a subdirectory and use MkdirAll() to create it
// and any parents.
CacheDir = makeCacheDir()
// Password can be used to configure the random password generator
Password = random.Password
)
var (
configPath string
data Storage
dataLoaded bool
)
func init() {
// Set the function pointers up in fs
fs.ConfigFileGet = FileGetFlag
fs.ConfigFileSet = SetValueAndSave
configPath = makeConfigPath()
data = newDefaultStorage()
}
// Join directory with filename, and check if exists
func findFile(dir string, name string) string {
path := filepath.Join(dir, name)
if _, err := os.Stat(path); err != nil {
return ""
}
return path
}
// Find current user's home directory
func findHomeDir() (string, error) {
path, err := homedir.Dir()
if err != nil {
fs.Debugf(nil, "Home directory lookup failed and cannot be used as configuration location: %v", err)
} else if path == "" {
// On Unix homedir return success but empty string for user with empty home configured in passwd file
fs.Debugf(nil, "Home directory not defined and cannot be used as configuration location")
}
return path, err
}
// Find rclone executable directory and look for existing rclone.conf there
// (<rclone_exe_dir>/rclone.conf)
func findLocalConfig() (configDir string, configFile string) {
if exePath, err := os.Executable(); err == nil {
configDir = filepath.Dir(exePath)
configFile = findFile(configDir, configFileName)
}
return
}
// Get path to Windows AppData config subdirectory for rclone and look for existing rclone.conf there
// ($AppData/rclone/rclone.conf)
func findAppDataConfig() (configDir string, configFile string) {
if appDataDir := os.Getenv("APPDATA"); appDataDir != "" {
configDir = filepath.Join(appDataDir, "rclone")
configFile = findFile(configDir, configFileName)
} else {
fs.Debugf(nil, "Environment variable APPDATA is not defined and cannot be used as configuration location")
}
return
}
// Get path to XDG config subdirectory for rclone and look for existing rclone.conf there
// (see XDG Base Directory specification: https://specifications.freedesktop.org/basedir-spec/latest/).
// ($XDG_CONFIG_HOME\rclone\rclone.conf)
func findXDGConfig() (configDir string, configFile string) {
if xdgConfigDir := os.Getenv("XDG_CONFIG_HOME"); xdgConfigDir != "" {
configDir = filepath.Join(xdgConfigDir, "rclone")
configFile = findFile(configDir, configFileName)
}
return
}
// Get path to .config subdirectory for rclone and look for existing rclone.conf there
// (~/.config/rclone/rclone.conf)
func findDotConfigConfig(home string) (configDir string, configFile string) {
if home != "" {
configDir = filepath.Join(home, ".config", "rclone")
configFile = findFile(configDir, configFileName)
}
return
}
// Look for existing .rclone.conf (legacy hidden filename) in root of user's home directory
// (~/.rclone.conf)
func findOldHomeConfig(home string) (configDir string, configFile string) {
if home != "" {
configDir = home
configFile = findFile(home, hiddenConfigFileName)
}
return
}
// Return the path to the configuration file
func makeConfigPath() string {
// Look for existing rclone.conf in prioritized list of known locations
// Also get configuration directory to use for new config file when no existing is found.
var (
configFile string
configDir string
primaryConfigDir string
fallbackConfigDir string
)
// <rclone_exe_dir>/rclone.conf
if _, configFile = findLocalConfig(); configFile != "" {
return configFile
}
// Windows: $AppData/rclone/rclone.conf
// This is also the default location for new config when no existing is found
if runtime.GOOS == "windows" {
if primaryConfigDir, configFile = findAppDataConfig(); configFile != "" {
return configFile
}
}
// $XDG_CONFIG_HOME/rclone/rclone.conf
// Also looking for this on Windows, for backwards compatibility reasons.
if configDir, configFile = findXDGConfig(); configFile != "" {
return configFile
}
if runtime.GOOS != "windows" {
// On Unix this is also the default location for new config when no existing is found
primaryConfigDir = configDir
}
// ~/.config/rclone/rclone.conf
// This is also the fallback location for new config
// (when $AppData on Windows and $XDG_CONFIG_HOME on Unix is not defined)
homeDir, homeDirErr := findHomeDir()
if fallbackConfigDir, configFile = findDotConfigConfig(homeDir); configFile != "" {
return configFile
}
// ~/.rclone.conf
if _, configFile = findOldHomeConfig(homeDir); configFile != "" {
return configFile
}
// No existing config file found, prepare proper default for a new one.
// But first check if if user supplied a --config variable or environment
// variable, since then we skip actually trying to create the default
// and report any errors related to it (we can't use pflag for this because
// it isn't initialised yet so we search the command line manually).
_, configSupplied := os.LookupEnv("RCLONE_CONFIG")
if !configSupplied {
for _, item := range os.Args {
if item == "--config" || strings.HasPrefix(item, "--config=") {
configSupplied = true
break
}
}
}
// If we found a configuration directory to be used for new config during search
// above, then create it to be ready for rclone.conf file to be written into it
// later, and also as a test of permissions to use fallback if not even able to
// create the directory.
if primaryConfigDir != "" {
configDir = primaryConfigDir
} else if fallbackConfigDir != "" {
configDir = fallbackConfigDir
} else {
configDir = ""
}
if configDir != "" {
configFile = filepath.Join(configDir, configFileName)
if configSupplied {
// User supplied custom config option, just return the default path
// as is without creating any directories, since it will not be used
// anyway and we don't want to unnecessarily create empty directory.
return configFile
}
var mkdirErr error
if mkdirErr = os.MkdirAll(configDir, os.ModePerm); mkdirErr == nil {
return configFile
}
// Problem: Try a fallback location. If we did find a home directory then
// just assume file .rclone.conf (legacy hidden filename) can be written in
// its root (~/.rclone.conf).
if homeDir != "" {
fs.Debugf(nil, "Configuration directory could not be created and will not be used: %v", mkdirErr)
return filepath.Join(homeDir, hiddenConfigFileName)
}
if !configSupplied {
fs.Errorf(nil, "Couldn't find home directory nor create configuration directory: %v", mkdirErr)
}
} else if !configSupplied {
if homeDirErr != nil {
fs.Errorf(nil, "Couldn't find configuration directory nor home directory: %v", homeDirErr)
} else {
fs.Errorf(nil, "Couldn't find configuration directory nor home directory")
}
}
// No known location that can be used: Did possibly find a configDir
// (XDG_CONFIG_HOME or APPDATA) which couldn't be created, but in any case
// did not find a home directory!
// Report it as an error, and return as last resort the path relative to current
// working directory, of .rclone.conf (legacy hidden filename).
if !configSupplied {
fs.Errorf(nil, "Defaulting to storing config in current directory.")
fs.Errorf(nil, "Use --config flag to workaround.")
}
return hiddenConfigFileName
}
// GetConfigPath returns the current config file path
func GetConfigPath() string {
return configPath
}
// SetConfigPath sets new config file path
//
// Checks for empty string, os null device, or special path, all of which indicates in-memory config.
func SetConfigPath(path string) (err error) {
var cfgPath string
if path == "" || path == os.DevNull {
cfgPath = ""
} else if filepath.Base(path) == noConfigFile {
cfgPath = ""
} else if err = file.IsReserved(path); err != nil {
return err
} else if cfgPath, err = filepath.Abs(path); err != nil {
return err
}
configPath = cfgPath
return nil
}
// SetData sets new config file storage
func SetData(newData Storage) {
data = newData
dataLoaded = false
}
// Data returns current config file storage
func Data() Storage {
return data
}
// LoadedData ensures the config file storage is loaded and returns it
func LoadedData() Storage {
if !dataLoaded {
// Set RCLONE_CONFIG_DIR for backend config and subprocesses
// If empty configPath (in-memory only) the value will be "."
_ = os.Setenv("RCLONE_CONFIG_DIR", filepath.Dir(configPath))
// Load configuration from file (or initialize sensible default if no file or error)
if err := data.Load(); err == nil {
fs.Debugf(nil, "Using config file from %q", configPath)
dataLoaded = true
} else if err == ErrorConfigFileNotFound {
if configPath == "" {
fs.Debugf(nil, "Config is memory-only - using defaults")
} else {
fs.Logf(nil, "Config file %q not found - using defaults", configPath)
}
dataLoaded = true
} else {
log.Fatalf("Failed to load config file %q: %v", configPath, err)
}
}
return data
}
// ErrorConfigFileNotFound is returned when the config file is not found
var ErrorConfigFileNotFound = errors.New("config file not found")
// SaveConfig calling function which saves configuration file.
// if SaveConfig returns error trying again after sleep.
func SaveConfig() {
if configPath == "" {
fs.Debugf(nil, "Skipping save for memory-only config")
return
}
ctx := context.Background()
ci := fs.GetConfig(ctx)
var err error
for i := 0; i < ci.LowLevelRetries+1; i++ {
if err = LoadedData().Save(); err == nil {
return
}
waitingTimeMs := mathrand.Intn(1000)
time.Sleep(time.Duration(waitingTimeMs) * time.Millisecond)
}
fs.Errorf(nil, "Failed to save config after %d tries: %v", ci.LowLevelRetries, err)
}
// SetValueAndSave sets the key to the value and saves just that
// value in the config file. It loads the old config file in from
// disk first and overwrites the given value only.
func SetValueAndSave(name, key, value string) error {
// Set the value in config in case we fail to reload it
LoadedData().SetValue(name, key, value)
// Save it again
SaveConfig()
return nil
}
// getWithDefault gets key out of section name returning defaultValue if not
// found.
func getWithDefault(name, key, defaultValue string) string {
value, found := LoadedData().GetValue(name, key)
if !found {
return defaultValue
}
return value
}
// UpdateRemoteOpt configures the remote update
type UpdateRemoteOpt struct {
// Treat all passwords as plain that need obscuring
Obscure bool `json:"obscure"`
// Treat all passwords as obscured
NoObscure bool `json:"noObscure"`
// Don't interact with the user - return questions
NonInteractive bool `json:"nonInteractive"`
// If set then supply state and result parameters to continue the process
Continue bool `json:"continue"`
// If set then ask all the questions, not just the post config questions
All bool `json:"all"`
// State to restart with - used with Continue
State string `json:"state"`
// Result to return - used with Continue
Result string `json:"result"`
// If set then edit existing values
Edit bool `json:"edit"`
}
func updateRemote(ctx context.Context, name string, keyValues rc.Params, opt UpdateRemoteOpt) (out *fs.ConfigOut, err error) {
if opt.Obscure && opt.NoObscure {
return nil, errors.New("can't use --obscure and --no-obscure together")
}
err = fspath.CheckConfigName(name)
if err != nil {
return nil, err
}
interactive := !(opt.NonInteractive || opt.Continue)
if interactive && !opt.All {
ctx = suppressConfirm(ctx)
}
fsType := FileGet(name, "type")
if fsType == "" {
return nil, errors.New("couldn't find type field in config")
}
ri, err := fs.Find(fsType)
if err != nil {
return nil, errors.Errorf("couldn't find backend for type %q", fsType)
}
// Work out which options need to be obscured
needsObscure := map[string]struct{}{}
if !opt.NoObscure {
for _, option := range ri.Options {
if option.IsPassword {
needsObscure[option.Name] = struct{}{}
}
}
}
choices := configmap.Simple{}
m := fs.ConfigMap(ri, name, nil)
// Set the config
for k, v := range keyValues {
vStr := fmt.Sprint(v)
// Obscure parameter if necessary
if _, ok := needsObscure[k]; ok {
_, err := obscure.Reveal(vStr)
if err != nil || opt.Obscure {
// If error => not already obscured, so obscure it
// or we are forced to obscure
vStr, err = obscure.Obscure(vStr)
if err != nil {
return nil, errors.Wrap(err, "UpdateRemote: obscure failed")
}
}
}
choices.Set(k, vStr)
if !strings.HasPrefix(k, fs.ConfigKeyEphemeralPrefix) {
m.Set(k, vStr)
}
}
if opt.Edit {
choices[fs.ConfigEdit] = "true"
}
if interactive {
var state = ""
if opt.All {
state = fs.ConfigAll
}
err = backendConfig(ctx, name, m, ri, choices, state)
} else {
// Start the config state machine
in := fs.ConfigIn{
State: opt.State,
Result: opt.Result,
}
if in.State == "" && opt.All {
in.State = fs.ConfigAll
}
out, err = fs.BackendConfig(ctx, name, m, ri, choices, in)
}
if err != nil {
return nil, err
}
SaveConfig()
cache.ClearConfig(name) // remove any remotes based on this config from the cache
return out, nil
}
// UpdateRemote adds the keyValues passed in to the remote of name.
// keyValues should be key, value pairs.
func UpdateRemote(ctx context.Context, name string, keyValues rc.Params, opt UpdateRemoteOpt) (out *fs.ConfigOut, err error) {
opt.Edit = true
return updateRemote(ctx, name, keyValues, opt)
}
// CreateRemote creates a new remote with name, type and a list of
// parameters which are key, value pairs. If update is set then it
// adds the new keys rather than replacing all of them.
func CreateRemote(ctx context.Context, name string, Type string, keyValues rc.Params, opts UpdateRemoteOpt) (out *fs.ConfigOut, err error) {
err = fspath.CheckConfigName(name)
if err != nil {
return nil, err
}
if !opts.Continue {
// Delete the old config if it exists
LoadedData().DeleteSection(name)
// Set the type
LoadedData().SetValue(name, "type", Type)
}
// Set the remaining values
return UpdateRemote(ctx, name, keyValues, opts)
}
// PasswordRemote adds the keyValues passed in to the remote of name.
// keyValues should be key, value pairs.
func PasswordRemote(ctx context.Context, name string, keyValues rc.Params) error {
ctx = suppressConfirm(ctx)
err := fspath.CheckConfigName(name)
if err != nil {
return err
}
for k, v := range keyValues {
keyValues[k] = obscure.MustObscure(fmt.Sprint(v))
}
_, err = UpdateRemote(ctx, name, keyValues, UpdateRemoteOpt{
NoObscure: true,
})
return err
}
// JSONListProviders prints all the providers and options in JSON format
func JSONListProviders() error {
b, err := json.MarshalIndent(fs.Registry, "", " ")
if err != nil {
return errors.Wrap(err, "failed to marshal examples")
}
_, err = os.Stdout.Write(b)
if err != nil {
return errors.Wrap(err, "failed to write providers list")
}
return nil
}
// fsOption returns an Option describing the possible remotes
func fsOption() *fs.Option {
o := &fs.Option{
Name: "Storage",
Help: "Type of storage to configure.",
Default: "",
}
for _, item := range fs.Registry {
example := fs.OptionExample{
Value: item.Name,
Help: item.Description,
}
o.Examples = append(o.Examples, example)
}
o.Examples.Sort()
return o
}
// FileGetFlag gets the config key under section returning the
// the value and true if found and or ("", false) otherwise
func FileGetFlag(section, key string) (string, bool) {
return LoadedData().GetValue(section, key)
}
// FileGet gets the config key under section returning the default if not set.
//
// It looks up defaults in the environment if they are present
func FileGet(section, key string) string {
var defaultVal string
envKey := fs.ConfigToEnv(section, key)
newValue, found := os.LookupEnv(envKey)
if found {
defaultVal = newValue
}
return getWithDefault(section, key, defaultVal)
}
// FileSet sets the key in section to value. It doesn't save
// the config file.
func FileSet(section, key, value string) {
if value != "" {
LoadedData().SetValue(section, key, value)
} else {
FileDeleteKey(section, key)
}
}
// FileDeleteKey deletes the config key in the config file.
// It returns true if the key was deleted,
// or returns false if the section or key didn't exist.
func FileDeleteKey(section, key string) bool {
return LoadedData().DeleteKey(section, key)
}
var matchEnv = regexp.MustCompile(`^RCLONE_CONFIG_(.*?)_TYPE=.*$`)
// FileSections returns the sections in the config file
// including any defined by environment variables.
func FileSections() []string {
sections := LoadedData().GetSectionList()
for _, item := range os.Environ() {
matches := matchEnv.FindStringSubmatch(item)
if len(matches) == 2 {
sections = append(sections, strings.ToLower(matches[1]))
}
}
return sections
}
// DumpRcRemote dumps the config for a single remote
func DumpRcRemote(name string) (dump rc.Params) {
params := rc.Params{}
for _, key := range LoadedData().GetKeyList(name) {
params[key] = FileGet(name, key)
}
return params
}
// DumpRcBlob dumps all the config as an unstructured blob suitable
// for the rc
func DumpRcBlob() (dump rc.Params) {
dump = rc.Params{}
for _, name := range LoadedData().GetSectionList() {
dump[name] = DumpRcRemote(name)
}
return dump
}
// Dump dumps all the config as a JSON file
func Dump() error {
dump := DumpRcBlob()
b, err := json.MarshalIndent(dump, "", " ")
if err != nil {
return errors.Wrap(err, "failed to marshal config dump")
}
_, err = os.Stdout.Write(b)
if err != nil {
return errors.Wrap(err, "failed to write config dump")
}
return nil
}
// makeCacheDir returns a directory to use for caching.
//
// Code borrowed from go stdlib until it is made public
func makeCacheDir() (dir string) {
// Compute default location.
switch runtime.GOOS {
case "windows":
dir = os.Getenv("LocalAppData")
case "darwin":
dir = os.Getenv("HOME")
if dir != "" {
dir += "/Library/Caches"
}
case "plan9":
dir = os.Getenv("home")
if dir != "" {
// Plan 9 has no established per-user cache directory,
// but $home/lib/xyz is the usual equivalent of $HOME/.xyz on Unix.
dir += "/lib/cache"
}
default: // Unix
// https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
dir = os.Getenv("XDG_CACHE_HOME")
if dir == "" {
dir = os.Getenv("HOME")
if dir != "" {
dir += "/.cache"
}
}
}
// if no dir found then use TempDir - we will have a cachedir!
if dir == "" {
dir = os.TempDir()
}
return filepath.Join(dir, "rclone")
}
| fs/config/config.go | 1 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.9960903525352478,
0.22578886151313782,
0.00016837622388266027,
0.004877584986388683,
0.3944661021232605
] |
{
"id": 0,
"code_window": [
"\treturn nil\n",
"}\n",
"\n",
"// SetData sets new config file storage\n",
"func SetData(newData Storage) {\n",
"\tdata = newData\n",
"\tdataLoaded = false\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// If no config file, use in-memory config (which is the default)\n",
"\tif configPath == \"\" {\n",
"\t\treturn\n",
"\t}\n"
],
"file_path": "fs/config/config.go",
"type": "add",
"edit_start_line_idx": 334
} | // Filesystem registry and backend options
package fs
import (
"context"
"encoding/json"
"fmt"
"log"
"reflect"
"sort"
"strings"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
)
// Registry of filesystems
var Registry []*RegInfo
// RegInfo provides information about a filesystem
type RegInfo struct {
// Name of this fs
Name string
// Description of this fs - defaults to Name
Description string
// Prefix for command line flags for this fs - defaults to Name if not set
Prefix string
// Create a new file system. If root refers to an existing
// object, then it should return an Fs which which points to
// the parent of that object and ErrorIsFile.
NewFs func(ctx context.Context, name string, root string, config configmap.Mapper) (Fs, error) `json:"-"`
// Function to call to help with config - see docs for ConfigIn for more info
Config func(ctx context.Context, name string, m configmap.Mapper, configIn ConfigIn) (*ConfigOut, error) `json:"-"`
// Options for the Fs configuration
Options Options
// The command help, if any
CommandHelp []CommandHelp
}
// FileName returns the on disk file name for this backend
func (ri *RegInfo) FileName() string {
return strings.Replace(ri.Name, " ", "", -1)
}
// Options is a slice of configuration Option for a backend
type Options []Option
// Set the default values for the options
func (os Options) setValues() {
for i := range os {
o := &os[i]
if o.Default == nil {
o.Default = ""
}
}
}
// Get the Option corresponding to name or return nil if not found
func (os Options) Get(name string) *Option {
for i := range os {
opt := &os[i]
if opt.Name == name {
return opt
}
}
return nil
}
// Overridden discovers which config items have been overridden in the
// configmap passed in, either by the config string, command line
// flags or environment variables
func (os Options) Overridden(m *configmap.Map) configmap.Simple {
var overridden = configmap.Simple{}
for i := range os {
opt := &os[i]
value, isSet := m.GetPriority(opt.Name, configmap.PriorityNormal)
if isSet {
overridden.Set(opt.Name, value)
}
}
return overridden
}
// NonDefault discovers which config values aren't at their default
func (os Options) NonDefault(m configmap.Getter) configmap.Simple {
var nonDefault = configmap.Simple{}
for i := range os {
opt := &os[i]
value, isSet := m.Get(opt.Name)
if !isSet {
continue
}
defaultValue := fmt.Sprint(opt.Default)
if value != defaultValue {
nonDefault.Set(opt.Name, value)
}
}
return nonDefault
}
// HasAdvanced discovers if any options have an Advanced setting
func (os Options) HasAdvanced() bool {
for i := range os {
opt := &os[i]
if opt.Advanced {
return true
}
}
return false
}
// OptionVisibility controls whether the options are visible in the
// configurator or the command line.
type OptionVisibility byte
// Constants Option.Hide
const (
OptionHideCommandLine OptionVisibility = 1 << iota
OptionHideConfigurator
OptionHideBoth = OptionHideCommandLine | OptionHideConfigurator
)
// Option is describes an option for the config wizard
//
// This also describes command line options and environment variables
type Option struct {
Name string // name of the option in snake_case
Help string // Help, the first line only is used for the command line help
Provider string // Set to filter on provider
Default interface{} // default value, nil => ""
Value interface{} // value to be set by flags
Examples OptionExamples `json:",omitempty"` // config examples
ShortOpt string // the short option for this if required
Hide OptionVisibility // set this to hide the config from the configurator or the command line
Required bool // this option is required
IsPassword bool // set if the option is a password
NoPrefix bool // set if the option for this should not use the backend prefix
Advanced bool // set if this is an advanced config option
Exclusive bool // set if the answer can only be one of the examples
}
// BaseOption is an alias for Option used internally
type BaseOption Option
// MarshalJSON turns an Option into JSON
//
// It adds some generated fields for ease of use
// - DefaultStr - a string rendering of Default
// - ValueStr - a string rendering of Value
// - Type - the type of the option
func (o *Option) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
BaseOption
DefaultStr string
ValueStr string
Type string
}{
BaseOption: BaseOption(*o),
DefaultStr: fmt.Sprint(o.Default),
ValueStr: o.String(),
Type: o.Type(),
})
}
// GetValue gets the current current value which is the default if not set
func (o *Option) GetValue() interface{} {
val := o.Value
if val == nil {
val = o.Default
if val == nil {
val = ""
}
}
return val
}
// String turns Option into a string
func (o *Option) String() string {
return fmt.Sprint(o.GetValue())
}
// Set an Option from a string
func (o *Option) Set(s string) (err error) {
newValue, err := configstruct.StringToInterface(o.GetValue(), s)
if err != nil {
return err
}
o.Value = newValue
return nil
}
// Type of the value
func (o *Option) Type() string {
return reflect.TypeOf(o.GetValue()).Name()
}
// FlagName for the option
func (o *Option) FlagName(prefix string) string {
name := strings.Replace(o.Name, "_", "-", -1) // convert snake_case to kebab-case
if !o.NoPrefix {
name = prefix + "-" + name
}
return name
}
// EnvVarName for the option
func (o *Option) EnvVarName(prefix string) string {
return OptionToEnv(prefix + "-" + o.Name)
}
// Copy makes a shallow copy of the option
func (o *Option) Copy() *Option {
copy := new(Option)
*copy = *o
return copy
}
// OptionExamples is a slice of examples
type OptionExamples []OptionExample
// Len is part of sort.Interface.
func (os OptionExamples) Len() int { return len(os) }
// Swap is part of sort.Interface.
func (os OptionExamples) Swap(i, j int) { os[i], os[j] = os[j], os[i] }
// Less is part of sort.Interface.
func (os OptionExamples) Less(i, j int) bool { return os[i].Help < os[j].Help }
// Sort sorts an OptionExamples
func (os OptionExamples) Sort() { sort.Sort(os) }
// OptionExample describes an example for an Option
type OptionExample struct {
Value string
Help string
Provider string
}
// Register a filesystem
//
// Fs modules should use this in an init() function
func Register(info *RegInfo) {
info.Options.setValues()
if info.Prefix == "" {
info.Prefix = info.Name
}
Registry = append(Registry, info)
}
// Find looks for a RegInfo object for the name passed in. The name
// can be either the Name or the Prefix.
//
// Services are looked up in the config file
func Find(name string) (*RegInfo, error) {
for _, item := range Registry {
if item.Name == name || item.Prefix == name || item.FileName() == name {
return item, nil
}
}
return nil, errors.Errorf("didn't find backend called %q", name)
}
// MustFind looks for an Info object for the type name passed in
//
// Services are looked up in the config file
//
// Exits with a fatal error if not found
func MustFind(name string) *RegInfo {
fs, err := Find(name)
if err != nil {
log.Fatalf("Failed to find remote: %v", err)
}
return fs
}
| fs/registry.go | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.012049349956214428,
0.001800972386263311,
0.00016810141096357256,
0.0005482864798977971,
0.002984205959364772
] |
{
"id": 0,
"code_window": [
"\treturn nil\n",
"}\n",
"\n",
"// SetData sets new config file storage\n",
"func SetData(newData Storage) {\n",
"\tdata = newData\n",
"\tdataLoaded = false\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// If no config file, use in-memory config (which is the default)\n",
"\tif configPath == \"\" {\n",
"\t\treturn\n",
"\t}\n"
],
"file_path": "fs/config/config.go",
"type": "add",
"edit_start_line_idx": 334
} | <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
<html>
<head>
<title>Index of /nick/pub</title>
</head>
<body>
<h1>Index of /nick/pub</h1>
<table><tr><th><img src="/icons/blank.gif" alt="[ICO]"></th><th><a href="?C=N;O=D">Name</a></th><th><a href="?C=M;O=A">Last modified</a></th><th><a href="?C=S;O=A">Size</a></th><th><a href="?C=D;O=A">Description</a></th></tr><tr><th colspan="5"><hr></th></tr>
<tr><td valign="top"><img src="/icons/back.gif" alt="[DIR]"></td><td><a href="/nick/">Parent Directory</a></td><td> </td><td align="right"> - </td><td> </td></tr>
<tr><td valign="top"><img src="/icons/compressed.gif" alt="[ ]"></td><td><a href="SWIG-embed.tar.gz">SWIG-embed.tar.gz</a></td><td align="right">29-Nov-2005 16:27 </td><td align="right">2.3K</td><td> </td></tr>
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="avi2dvd.pl">avi2dvd.pl</a></td><td align="right">14-Apr-2010 23:07 </td><td align="right"> 17K</td><td> </td></tr>
<tr><td valign="top"><img src="/icons/binary.gif" alt="[ ]"></td><td><a href="cambert.exe">cambert.exe</a></td><td align="right">15-Dec-2006 18:07 </td><td align="right"> 54K</td><td> </td></tr>
<tr><td valign="top"><img src="/icons/compressed.gif" alt="[ ]"></td><td><a href="cambert.gz">cambert.gz</a></td><td align="right">14-Apr-2010 23:07 </td><td align="right"> 18K</td><td> </td></tr>
<tr><td valign="top"><img src="/icons/compressed.gif" alt="[ ]"></td><td><a href="fedora_demo.gz">fedora_demo.gz</a></td><td align="right">08-Jun-2007 11:01 </td><td align="right">1.0M</td><td> </td></tr>
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="gchq-challenge/">gchq-challenge/</a></td><td align="right">24-Dec-2016 15:24 </td><td align="right"> - </td><td> </td></tr>
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="mandelterm/">mandelterm/</a></td><td align="right">13-Jul-2013 22:22 </td><td align="right"> - </td><td> </td></tr>
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="pgp-key.txt">pgp-key.txt</a></td><td align="right">14-Apr-2010 23:07 </td><td align="right">400 </td><td> </td></tr>
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="pymath/">pymath/</a></td><td align="right">24-Dec-2016 15:24 </td><td align="right"> - </td><td> </td></tr>
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="rclone">rclone</a></td><td align="right">09-May-2017 17:15 </td><td align="right"> 22M</td><td> </td></tr>
<tr><td valign="top"><img src="/icons/binary.gif" alt="[ ]"></td><td><a href="readdir.exe">readdir.exe</a></td><td align="right">21-Oct-2016 14:47 </td><td align="right">1.6M</td><td> </td></tr>
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="rush_hour_solver_cut_down.py">rush_hour_solver_cut_down.py</a></td><td align="right">23-Jul-2009 11:44 </td><td align="right"> 14K</td><td> </td></tr>
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="snake-puzzle/">snake-puzzle/</a></td><td align="right">25-Sep-2016 20:56 </td><td align="right"> - </td><td> </td></tr>
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="stressdisk/">stressdisk/</a></td><td align="right">08-Nov-2016 14:25 </td><td align="right"> - </td><td> </td></tr>
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="timer-test">timer-test</a></td><td align="right">09-May-2017 17:05 </td><td align="right">1.5M</td><td> </td></tr>
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="words-to-regexp.pl">words-to-regexp.pl</a></td><td align="right">01-Mar-2005 20:43 </td><td align="right">6.0K</td><td> </td></tr>
<tr><th colspan="5"><hr></th></tr>
<!-- some extras from https://github.com/rclone/rclone/issues/1573 -->
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20100%25%20better.mp3">Now 100% better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td> </td></tr>
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20better.mp3">Now better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td> </td></tr>
</table>
</body></html>
| backend/http/test/index_files/apache.html | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.00017371847934555262,
0.00016832338587846607,
0.0001642401039134711,
0.00016766745829954743,
0.00000381190398002218
] |
{
"id": 0,
"code_window": [
"\treturn nil\n",
"}\n",
"\n",
"// SetData sets new config file storage\n",
"func SetData(newData Storage) {\n",
"\tdata = newData\n",
"\tdataLoaded = false\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// If no config file, use in-memory config (which is the default)\n",
"\tif configPath == \"\" {\n",
"\t\treturn\n",
"\t}\n"
],
"file_path": "fs/config/config.go",
"type": "add",
"edit_start_line_idx": 334
} | // +build windows
package local
import (
"context"
"syscall"
"unsafe"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var available, total, free int64
_, _, e1 := getFreeDiskSpace.Call(
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
)
if e1 != syscall.Errno(0) {
return nil, errors.Wrap(e1, "failed to read disk usage")
}
usage := &fs.Usage{
Total: fs.NewUsageValue(total), // quota of bytes that can be used
Used: fs.NewUsageValue(total - free), // bytes in use
Free: fs.NewUsageValue(available), // bytes which can be uploaded before reaching the quota
}
return usage, nil
}
// check interface
var _ fs.Abouter = &Fs{}
| backend/local/about_windows.go | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.00018238034681417048,
0.00017325181397609413,
0.00016786516061984003,
0.00017138084513135254,
0.0000055012988013913855
] |
{
"id": 1,
"code_window": [
"var ErrorConfigFileNotFound = errors.New(\"config file not found\")\n",
"\n",
"// SaveConfig calling function which saves configuration file.\n",
"// if SaveConfig returns error trying again after sleep.\n",
"func SaveConfig() {\n",
"\tif configPath == \"\" {\n",
"\t\tfs.Debugf(nil, \"Skipping save for memory-only config\")\n",
"\t\treturn\n",
"\t}\n",
"\tctx := context.Background()\n",
"\tci := fs.GetConfig(ctx)\n",
"\tvar err error\n",
"\tfor i := 0; i < ci.LowLevelRetries+1; i++ {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "fs/config/config.go",
"type": "replace",
"edit_start_line_idx": 373
} | // Getters and Setters for ConfigMap
package fs
import (
"os"
"strings"
"github.com/rclone/rclone/fs/config/configmap"
)
// A configmap.Getter to read from the environment RCLONE_CONFIG_backend_option_name
type configEnvVars string
// Get a config item from the environment variables if possible
func (configName configEnvVars) Get(key string) (value string, ok bool) {
envKey := ConfigToEnv(string(configName), key)
value, ok = os.LookupEnv(envKey)
if ok {
Debugf(nil, "Setting %s=%q for %q from environment variable %s", key, value, configName, envKey)
}
return value, ok
}
// A configmap.Getter to read from the environment RCLONE_option_name
type optionEnvVars struct {
fsInfo *RegInfo
}
// Get a config item from the option environment variables if possible
func (oev optionEnvVars) Get(key string) (value string, ok bool) {
opt := oev.fsInfo.Options.Get(key)
if opt == nil {
return "", false
}
envKey := OptionToEnv(oev.fsInfo.Prefix + "-" + key)
value, ok = os.LookupEnv(envKey)
if ok {
Debugf(nil, "Setting %s_%s=%q from environment variable %s", oev.fsInfo.Prefix, key, value, envKey)
} else if opt.NoPrefix {
// For options with NoPrefix set, check without prefix too
envKey := OptionToEnv(key)
value, ok = os.LookupEnv(envKey)
if ok {
Debugf(nil, "Setting %s=%q for %s from environment variable %s", key, value, oev.fsInfo.Prefix, envKey)
}
}
return value, ok
}
// A configmap.Getter to read either the default value or the set
// value from the RegInfo.Options
type regInfoValues struct {
fsInfo *RegInfo
useDefault bool
}
// override the values in configMap with the either the flag values or
// the default values
func (r *regInfoValues) Get(key string) (value string, ok bool) {
opt := r.fsInfo.Options.Get(key)
if opt != nil && (r.useDefault || opt.Value != nil) {
return opt.String(), true
}
return "", false
}
// A configmap.Setter to read from the config file
type setConfigFile string
// Set a config item into the config file
func (section setConfigFile) Set(key, value string) {
if strings.HasPrefix(string(section), ":") {
Logf(nil, "Can't save config %q = %q for on the fly backend %q", key, value, section)
return
}
Debugf(nil, "Saving config %q = %q in section %q of the config file", key, value, section)
err := ConfigFileSet(string(section), key, value)
if err != nil {
Errorf(nil, "Failed saving config %q = %q in section %q of the config file: %v", key, value, section, err)
}
}
// A configmap.Getter to read from the config file
type getConfigFile string
// Get a config item from the config file
func (section getConfigFile) Get(key string) (value string, ok bool) {
value, ok = ConfigFileGet(string(section), key)
// Ignore empty lines in the config file
if value == "" {
ok = false
}
return value, ok
}
// ConfigMap creates a configmap.Map from the *RegInfo and the
// configName passed in. If connectionStringConfig has any entries (it may be nil),
// then it will be added to the lookup with the highest priority.
//
// If fsInfo is nil then the returned configmap.Map should only be
// used for reading non backend specific parameters, such as "type".
func ConfigMap(fsInfo *RegInfo, configName string, connectionStringConfig configmap.Simple) (config *configmap.Map) {
// Create the config
config = configmap.New()
// Read the config, more specific to least specific
// Config from connection string
if len(connectionStringConfig) > 0 {
config.AddGetter(connectionStringConfig, configmap.PriorityNormal)
}
// flag values
if fsInfo != nil {
config.AddGetter(®InfoValues{fsInfo, false}, configmap.PriorityNormal)
}
// remote specific environment vars
config.AddGetter(configEnvVars(configName), configmap.PriorityNormal)
// backend specific environment vars
if fsInfo != nil {
config.AddGetter(optionEnvVars{fsInfo: fsInfo}, configmap.PriorityNormal)
}
// config file
config.AddGetter(getConfigFile(configName), configmap.PriorityConfig)
// default values
if fsInfo != nil {
config.AddGetter(®InfoValues{fsInfo, true}, configmap.PriorityDefault)
}
// Set Config
config.AddSetter(setConfigFile(configName))
return config
}
| fs/configmap.go | 1 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.0138056306168437,
0.00132969010155648,
0.0001646421296754852,
0.00017589661001693457,
0.0035094197373837233
] |
{
"id": 1,
"code_window": [
"var ErrorConfigFileNotFound = errors.New(\"config file not found\")\n",
"\n",
"// SaveConfig calling function which saves configuration file.\n",
"// if SaveConfig returns error trying again after sleep.\n",
"func SaveConfig() {\n",
"\tif configPath == \"\" {\n",
"\t\tfs.Debugf(nil, \"Skipping save for memory-only config\")\n",
"\t\treturn\n",
"\t}\n",
"\tctx := context.Background()\n",
"\tci := fs.GetConfig(ctx)\n",
"\tvar err error\n",
"\tfor i := 0; i < ci.LowLevelRetries+1; i++ {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "fs/config/config.go",
"type": "replace",
"edit_start_line_idx": 373
} | package reveal
import (
"fmt"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefinition)
}
var commandDefinition = &cobra.Command{
Use: "reveal password",
Short: `Reveal obscured password from rclone.conf`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
cmd.Run(false, false, command, func() error {
revealed, err := obscure.Reveal(args[0])
if err != nil {
return err
}
fmt.Println(revealed)
return nil
})
},
Hidden: true,
}
| cmd/reveal/reveal.go | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.00017569413466844708,
0.00016906278324313462,
0.0001653311774134636,
0.00016761291772127151,
0.0000039416836443706416
] |
{
"id": 1,
"code_window": [
"var ErrorConfigFileNotFound = errors.New(\"config file not found\")\n",
"\n",
"// SaveConfig calling function which saves configuration file.\n",
"// if SaveConfig returns error trying again after sleep.\n",
"func SaveConfig() {\n",
"\tif configPath == \"\" {\n",
"\t\tfs.Debugf(nil, \"Skipping save for memory-only config\")\n",
"\t\treturn\n",
"\t}\n",
"\tctx := context.Background()\n",
"\tci := fs.GetConfig(ctx)\n",
"\tvar err error\n",
"\tfor i := 0; i < ci.LowLevelRetries+1; i++ {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "fs/config/config.go",
"type": "replace",
"edit_start_line_idx": 373
} | package api
// BIN protocol constants
const (
BinContentType = "application/x-www-form-urlencoded"
TreeIDLength = 12
DunnoNodeIDLength = 16
)
// Operations in binary protocol
const (
OperationAddFile = 103 // 0x67
OperationRename = 105 // 0x69
OperationCreateFolder = 106 // 0x6A
OperationFolderList = 117 // 0x75
OperationSharedFoldersList = 121 // 0x79
// TODO investigate opcodes below
Operation154MaybeItemInfo = 154 // 0x9A
Operation102MaybeAbout = 102 // 0x66
Operation104MaybeDelete = 104 // 0x68
)
// CreateDir protocol constants
const (
MkdirResultOK = 0
MkdirResultSourceNotExists = 1
MkdirResultAlreadyExists = 4
MkdirResultExistsDifferentCase = 9
MkdirResultInvalidName = 10
MkdirResultFailed254 = 254
)
// Move result codes
const (
MoveResultOK = 0
MoveResultSourceNotExists = 1
MoveResultFailed002 = 2
MoveResultAlreadyExists = 4
MoveResultFailed005 = 5
MoveResultFailed254 = 254
)
// AddFile result codes
const (
AddResultOK = 0
AddResultError01 = 1
AddResultDunno04 = 4
AddResultWrongPath = 5
AddResultNoFreeSpace = 7
AddResultDunno09 = 9
AddResultInvalidName = 10
AddResultNotModified = 12
AddResultFailedA = 253
AddResultFailedB = 254
)
// List request options
const (
ListOptTotalSpace = 1
ListOptDelete = 2
ListOptFingerprint = 4
ListOptUnknown8 = 8
ListOptUnknown16 = 16
ListOptFolderSize = 32
ListOptUsedSpace = 64
ListOptUnknown128 = 128
ListOptUnknown256 = 256
)
// ListOptDefaults ...
const ListOptDefaults = ListOptUnknown128 | ListOptUnknown256 | ListOptFolderSize | ListOptTotalSpace | ListOptUsedSpace
// List parse flags
const (
ListParseDone = 0
ListParseReadItem = 1
ListParsePin = 2
ListParsePinUpper = 3
ListParseUnknown15 = 15
)
// List operation results
const (
ListResultOK = 0
ListResultNotExists = 1
ListResultDunno02 = 2
ListResultDunno03 = 3
ListResultAlreadyExists04 = 4
ListResultDunno05 = 5
ListResultDunno06 = 6
ListResultDunno07 = 7
ListResultDunno08 = 8
ListResultAlreadyExists09 = 9
ListResultDunno10 = 10
ListResultDunno11 = 11
ListResultDunno12 = 12
ListResultFailedB = 253
ListResultFailedA = 254
)
// Directory item types
const (
ListItemMountPoint = 0
ListItemFile = 1
ListItemFolder = 2
ListItemSharedFolder = 3
)
| backend/mailru/api/bin.go | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.00017056723299901932,
0.00016736832913011312,
0.00016356153355445713,
0.00016736125689931214,
0.0000021059920527477516
] |
{
"id": 1,
"code_window": [
"var ErrorConfigFileNotFound = errors.New(\"config file not found\")\n",
"\n",
"// SaveConfig calling function which saves configuration file.\n",
"// if SaveConfig returns error trying again after sleep.\n",
"func SaveConfig() {\n",
"\tif configPath == \"\" {\n",
"\t\tfs.Debugf(nil, \"Skipping save for memory-only config\")\n",
"\t\treturn\n",
"\t}\n",
"\tctx := context.Background()\n",
"\tci := fs.GetConfig(ctx)\n",
"\tvar err error\n",
"\tfor i := 0; i < ci.LowLevelRetries+1; i++ {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "fs/config/config.go",
"type": "replace",
"edit_start_line_idx": 373
} | // Build for macos with the brew tag to handle the absence
// of fuse and print an appropriate error message
// +build brew
// +build darwin
package cmount
import (
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/vfs"
)
func init() {
name := "mount"
cmd := mountlib.NewMountCommand(name, false, mount)
cmd.Aliases = append(cmd.Aliases, "cmount")
mountlib.AddRc("cmount", mount)
}
// mount the file system
//
// The mount point will be ready when this returns.
//
// returns an error, and an error channel for the serve process to
// report an error when fusermount is called.
func mount(_ *vfs.VFS, _ string, _ *mountlib.Options) (<-chan error, func() error, error) {
return nil, nil, errors.New("mount is not supported on MacOS when installed via Homebrew. " +
"Please install the binaries available at https://rclone." +
"org/downloads/ instead if you want to use the mount command")
}
| cmd/cmount/mount_brew.go | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.00045811303425580263,
0.00024061193107627332,
0.0001666910684434697,
0.00016882180352695286,
0.00012558023445308208
] |
{
"id": 2,
"code_window": [
"\t\"bytes\"\n",
"\t\"io/ioutil\"\n",
"\t\"os\"\n",
"\t\"path/filepath\"\n",
"\t\"sync\"\n",
"\n",
"\t\"github.com/Unknwon/goconfig\"\n",
"\t\"github.com/pkg/errors\"\n",
"\t\"github.com/rclone/rclone/fs\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"strings\"\n"
],
"file_path": "fs/config/configfile/configfile.go",
"type": "add",
"edit_start_line_idx": 8
} | // Getters and Setters for ConfigMap
package fs
import (
"os"
"strings"
"github.com/rclone/rclone/fs/config/configmap"
)
// A configmap.Getter to read from the environment RCLONE_CONFIG_backend_option_name
type configEnvVars string
// Get a config item from the environment variables if possible
func (configName configEnvVars) Get(key string) (value string, ok bool) {
envKey := ConfigToEnv(string(configName), key)
value, ok = os.LookupEnv(envKey)
if ok {
Debugf(nil, "Setting %s=%q for %q from environment variable %s", key, value, configName, envKey)
}
return value, ok
}
// A configmap.Getter to read from the environment RCLONE_option_name
type optionEnvVars struct {
fsInfo *RegInfo
}
// Get a config item from the option environment variables if possible
func (oev optionEnvVars) Get(key string) (value string, ok bool) {
opt := oev.fsInfo.Options.Get(key)
if opt == nil {
return "", false
}
envKey := OptionToEnv(oev.fsInfo.Prefix + "-" + key)
value, ok = os.LookupEnv(envKey)
if ok {
Debugf(nil, "Setting %s_%s=%q from environment variable %s", oev.fsInfo.Prefix, key, value, envKey)
} else if opt.NoPrefix {
// For options with NoPrefix set, check without prefix too
envKey := OptionToEnv(key)
value, ok = os.LookupEnv(envKey)
if ok {
Debugf(nil, "Setting %s=%q for %s from environment variable %s", key, value, oev.fsInfo.Prefix, envKey)
}
}
return value, ok
}
// A configmap.Getter to read either the default value or the set
// value from the RegInfo.Options
type regInfoValues struct {
fsInfo *RegInfo
useDefault bool
}
// override the values in configMap with the either the flag values or
// the default values
func (r *regInfoValues) Get(key string) (value string, ok bool) {
opt := r.fsInfo.Options.Get(key)
if opt != nil && (r.useDefault || opt.Value != nil) {
return opt.String(), true
}
return "", false
}
// A configmap.Setter to read from the config file
type setConfigFile string
// Set a config item into the config file
func (section setConfigFile) Set(key, value string) {
if strings.HasPrefix(string(section), ":") {
Logf(nil, "Can't save config %q = %q for on the fly backend %q", key, value, section)
return
}
Debugf(nil, "Saving config %q = %q in section %q of the config file", key, value, section)
err := ConfigFileSet(string(section), key, value)
if err != nil {
Errorf(nil, "Failed saving config %q = %q in section %q of the config file: %v", key, value, section, err)
}
}
// A configmap.Getter to read from the config file
type getConfigFile string
// Get a config item from the config file
func (section getConfigFile) Get(key string) (value string, ok bool) {
value, ok = ConfigFileGet(string(section), key)
// Ignore empty lines in the config file
if value == "" {
ok = false
}
return value, ok
}
// ConfigMap creates a configmap.Map from the *RegInfo and the
// configName passed in. If connectionStringConfig has any entries (it may be nil),
// then it will be added to the lookup with the highest priority.
//
// If fsInfo is nil then the returned configmap.Map should only be
// used for reading non backend specific parameters, such as "type".
func ConfigMap(fsInfo *RegInfo, configName string, connectionStringConfig configmap.Simple) (config *configmap.Map) {
// Create the config
config = configmap.New()
// Read the config, more specific to least specific
// Config from connection string
if len(connectionStringConfig) > 0 {
config.AddGetter(connectionStringConfig, configmap.PriorityNormal)
}
// flag values
if fsInfo != nil {
config.AddGetter(®InfoValues{fsInfo, false}, configmap.PriorityNormal)
}
// remote specific environment vars
config.AddGetter(configEnvVars(configName), configmap.PriorityNormal)
// backend specific environment vars
if fsInfo != nil {
config.AddGetter(optionEnvVars{fsInfo: fsInfo}, configmap.PriorityNormal)
}
// config file
config.AddGetter(getConfigFile(configName), configmap.PriorityConfig)
// default values
if fsInfo != nil {
config.AddGetter(®InfoValues{fsInfo, true}, configmap.PriorityDefault)
}
// Set Config
config.AddSetter(setConfigFile(configName))
return config
}
| fs/configmap.go | 1 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.002833897015079856,
0.00045591345406137407,
0.000162973374244757,
0.00018643929797690362,
0.0006782105774618685
] |
{
"id": 2,
"code_window": [
"\t\"bytes\"\n",
"\t\"io/ioutil\"\n",
"\t\"os\"\n",
"\t\"path/filepath\"\n",
"\t\"sync\"\n",
"\n",
"\t\"github.com/Unknwon/goconfig\"\n",
"\t\"github.com/pkg/errors\"\n",
"\t\"github.com/rclone/rclone/fs\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"strings\"\n"
],
"file_path": "fs/config/configfile/configfile.go",
"type": "add",
"edit_start_line_idx": 8
} | package policy
import (
"context"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
func init() {
registerPolicy("mfs", &Mfs{})
}
// Mfs stands for most free space
// Search category: same as epmfs.
// Action category: same as epmfs.
// Create category: Pick the drive with the most free space.
type Mfs struct {
EpMfs
}
// Create category policy, governing the creation of files and directories
func (p *Mfs) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {
return nil, fs.ErrorObjectNotFound
}
upstreams = filterNC(upstreams)
if len(upstreams) == 0 {
return nil, fs.ErrorPermissionDenied
}
u, err := p.mfs(upstreams)
return []*upstream.Fs{u}, err
}
| backend/union/policy/mfs.go | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.003165098838508129,
0.0011154687963426113,
0.00019346240151207894,
0.0005516570527106524,
0.0012159880716353655
] |
{
"id": 2,
"code_window": [
"\t\"bytes\"\n",
"\t\"io/ioutil\"\n",
"\t\"os\"\n",
"\t\"path/filepath\"\n",
"\t\"sync\"\n",
"\n",
"\t\"github.com/Unknwon/goconfig\"\n",
"\t\"github.com/pkg/errors\"\n",
"\t\"github.com/rclone/rclone/fs\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"strings\"\n"
],
"file_path": "fs/config/configfile/configfile.go",
"type": "add",
"edit_start_line_idx": 8
} | ---
title: "rclone rcd"
description: "Run rclone listening to remote control commands only."
slug: rclone_rcd
url: /commands/rclone_rcd/
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/rcd/ and as part of making a release run "make commanddocs"
---
# rclone rcd
Run rclone listening to remote control commands only.
## Synopsis
This runs rclone so that it only listens to remote control commands.
This is useful if you are controlling rclone via the rc API.
If you pass in a path to a directory, rclone will serve that directory
for GET requests on the URL passed in. It will also open the URL in
the browser when rclone is run.
See the [rc documentation](/rc/) for more info on the rc flags.
```
rclone rcd <path to files to serve>* [flags]
```
## Options
```
-h, --help help for rcd
```
See the [global flags page](/flags/) for global options not listed here.
## SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
| docs/content/commands/rclone_rcd.md | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.0016281240386888385,
0.0008281563641503453,
0.00017182859301101416,
0.000668544031213969,
0.0005780257633887231
] |
{
"id": 2,
"code_window": [
"\t\"bytes\"\n",
"\t\"io/ioutil\"\n",
"\t\"os\"\n",
"\t\"path/filepath\"\n",
"\t\"sync\"\n",
"\n",
"\t\"github.com/Unknwon/goconfig\"\n",
"\t\"github.com/pkg/errors\"\n",
"\t\"github.com/rclone/rclone/fs\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"strings\"\n"
],
"file_path": "fs/config/configfile/configfile.go",
"type": "add",
"edit_start_line_idx": 8
} | // Package log provides logging for rclone
package log
import (
"context"
"io"
"log"
"os"
"reflect"
"runtime"
"strings"
systemd "github.com/iguanesolutions/go-systemd/v5"
"github.com/rclone/rclone/fs"
"github.com/sirupsen/logrus"
)
// Options contains options for controlling the logging
type Options struct {
File string // Log everything to this file
Format string // Comma separated list of log format options
UseSyslog bool // Use Syslog for logging
SyslogFacility string // Facility for syslog, e.g. KERN,USER,...
LogSystemdSupport bool // set if using systemd logging
}
// DefaultOpt is the default values used for Opt
var DefaultOpt = Options{
Format: "date,time",
SyslogFacility: "DAEMON",
}
// Opt is the options for the logger
var Opt = DefaultOpt
// fnName returns the name of the calling +2 function
func fnName() string {
pc, _, _, ok := runtime.Caller(2)
name := "*Unknown*"
if ok {
name = runtime.FuncForPC(pc).Name()
dot := strings.LastIndex(name, ".")
if dot >= 0 {
name = name[dot+1:]
}
}
return name
}
// Trace debugs the entry and exit of the calling function
//
// It is designed to be used in a defer statement so it returns a
// function that logs the exit parameters.
//
// Any pointers in the exit function will be dereferenced
func Trace(o interface{}, format string, a ...interface{}) func(string, ...interface{}) {
if fs.GetConfig(context.Background()).LogLevel < fs.LogLevelDebug {
return func(format string, a ...interface{}) {}
}
name := fnName()
fs.LogPrintf(fs.LogLevelDebug, o, name+": "+format, a...)
return func(format string, a ...interface{}) {
for i := range a {
// read the values of the pointed to items
typ := reflect.TypeOf(a[i])
if typ.Kind() == reflect.Ptr {
value := reflect.ValueOf(a[i])
if value.IsNil() {
a[i] = nil
} else {
pointedToValue := reflect.Indirect(value)
a[i] = pointedToValue.Interface()
}
}
}
fs.LogPrintf(fs.LogLevelDebug, o, ">"+name+": "+format, a...)
}
}
// Stack logs a stack trace of callers with the o and info passed in
func Stack(o interface{}, info string) {
if fs.GetConfig(context.Background()).LogLevel < fs.LogLevelDebug {
return
}
arr := [16 * 1024]byte{}
buf := arr[:]
n := runtime.Stack(buf, false)
buf = buf[:n]
fs.LogPrintf(fs.LogLevelDebug, o, "%s\nStack trace:\n%s", info, buf)
}
// InitLogging start the logging as per the command line flags
func InitLogging() {
flagsStr := "," + Opt.Format + ","
var flags int
if strings.Contains(flagsStr, ",date,") {
flags |= log.Ldate
}
if strings.Contains(flagsStr, ",time,") {
flags |= log.Ltime
}
if strings.Contains(flagsStr, ",microseconds,") {
flags |= log.Lmicroseconds
}
if strings.Contains(flagsStr, ",UTC,") {
flags |= log.LUTC
}
if strings.Contains(flagsStr, ",longfile,") {
flags |= log.Llongfile
}
if strings.Contains(flagsStr, ",shortfile,") {
flags |= log.Lshortfile
}
log.SetFlags(flags)
// Log file output
if Opt.File != "" {
f, err := os.OpenFile(Opt.File, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
if err != nil {
log.Fatalf("Failed to open log file: %v", err)
}
_, err = f.Seek(0, io.SeekEnd)
if err != nil {
fs.Errorf(nil, "Failed to seek log file to end: %v", err)
}
log.SetOutput(f)
logrus.SetOutput(f)
redirectStderr(f)
}
// Syslog output
if Opt.UseSyslog {
if Opt.File != "" {
log.Fatalf("Can't use --syslog and --log-file together")
}
startSysLog()
}
// Activate systemd logger support if systemd invocation ID is
// detected and output is going to stderr (not logging to a file or syslog)
if !Redirected() {
if _, usingSystemd := systemd.GetInvocationID(); usingSystemd {
Opt.LogSystemdSupport = true
}
}
// Systemd logging output
if Opt.LogSystemdSupport {
startSystemdLog()
}
}
// Redirected returns true if the log has been redirected from stdout
func Redirected() bool {
return Opt.UseSyslog || Opt.File != ""
}
| fs/log/log.go | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.003164740977808833,
0.0005863236146979034,
0.00016430918185506016,
0.00019708785112015903,
0.000827078940346837
] |
{
"id": 3,
"code_window": [
"\n",
"// SetValue sets the value under key in section\n",
"func (s *Storage) SetValue(section string, key string, value string) {\n",
"\ts.check()\n",
"\ts.gc.SetValue(section, key, value)\n",
"}\n",
"\n",
"// DeleteKey removes the key under section\n",
"func (s *Storage) DeleteKey(section string, key string) bool {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif strings.HasPrefix(section, \":\") {\n",
"\t\tfs.Logf(nil, \"Can't save config %q for on the fly backend %q\", key, section)\n",
"\t\treturn\n",
"\t}\n"
],
"file_path": "fs/config/configfile/configfile.go",
"type": "add",
"edit_start_line_idx": 226
} | // Package config reads, writes and edits the config file and deals with command line flags
package config
import (
"context"
"encoding/json"
"fmt"
"log"
mathrand "math/rand"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"time"
"github.com/mitchellh/go-homedir"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/random"
)
const (
configFileName = "rclone.conf"
hiddenConfigFileName = "." + configFileName
noConfigFile = "notfound"
// ConfigToken is the key used to store the token under
ConfigToken = "token"
// ConfigClientID is the config key used to store the client id
ConfigClientID = "client_id"
// ConfigClientSecret is the config key used to store the client secret
ConfigClientSecret = "client_secret"
// ConfigAuthURL is the config key used to store the auth server endpoint
ConfigAuthURL = "auth_url"
// ConfigTokenURL is the config key used to store the token server endpoint
ConfigTokenURL = "token_url"
// ConfigEncoding is the config key to change the encoding for a backend
ConfigEncoding = "encoding"
// ConfigEncodingHelp is the help for ConfigEncoding
ConfigEncodingHelp = "This sets the encoding for the backend.\n\nSee: the [encoding section in the overview](/overview/#encoding) for more info."
// ConfigAuthorize indicates that we just want "rclone authorize"
ConfigAuthorize = "config_authorize"
// ConfigAuthNoBrowser indicates that we do not want to open browser
ConfigAuthNoBrowser = "config_auth_no_browser"
)
// Storage defines an interface for loading and saving config to
// persistent storage. Rclone provides a default implementation to
// load and save to a config file when this is imported
//
// import "github.com/rclone/rclone/fs/config/configfile"
// configfile.Install()
type Storage interface {
// GetSectionList returns a slice of strings with names for all the
// sections
GetSectionList() []string
// HasSection returns true if section exists in the config file
HasSection(section string) bool
// DeleteSection removes the named section and all config from the
// config file
DeleteSection(section string)
// GetKeyList returns the keys in this section
GetKeyList(section string) []string
// GetValue returns the key in section with a found flag
GetValue(section string, key string) (value string, found bool)
// SetValue sets the value under key in section
SetValue(section string, key string, value string)
// DeleteKey removes the key under section
DeleteKey(section string, key string) bool
// Load the config from permanent storage
Load() error
// Save the config to permanent storage
Save() error
// Serialize the config into a string
Serialize() (string, error)
}
// Global
var (
// CacheDir points to the cache directory. Users of this
// should make a subdirectory and use MkdirAll() to create it
// and any parents.
CacheDir = makeCacheDir()
// Password can be used to configure the random password generator
Password = random.Password
)
var (
configPath string
data Storage
dataLoaded bool
)
func init() {
// Set the function pointers up in fs
fs.ConfigFileGet = FileGetFlag
fs.ConfigFileSet = SetValueAndSave
configPath = makeConfigPath()
data = newDefaultStorage()
}
// Join directory with filename, and check if exists
func findFile(dir string, name string) string {
path := filepath.Join(dir, name)
if _, err := os.Stat(path); err != nil {
return ""
}
return path
}
// Find current user's home directory
func findHomeDir() (string, error) {
path, err := homedir.Dir()
if err != nil {
fs.Debugf(nil, "Home directory lookup failed and cannot be used as configuration location: %v", err)
} else if path == "" {
// On Unix homedir return success but empty string for user with empty home configured in passwd file
fs.Debugf(nil, "Home directory not defined and cannot be used as configuration location")
}
return path, err
}
// Find rclone executable directory and look for existing rclone.conf there
// (<rclone_exe_dir>/rclone.conf)
func findLocalConfig() (configDir string, configFile string) {
if exePath, err := os.Executable(); err == nil {
configDir = filepath.Dir(exePath)
configFile = findFile(configDir, configFileName)
}
return
}
// Get path to Windows AppData config subdirectory for rclone and look for existing rclone.conf there
// ($AppData/rclone/rclone.conf)
func findAppDataConfig() (configDir string, configFile string) {
if appDataDir := os.Getenv("APPDATA"); appDataDir != "" {
configDir = filepath.Join(appDataDir, "rclone")
configFile = findFile(configDir, configFileName)
} else {
fs.Debugf(nil, "Environment variable APPDATA is not defined and cannot be used as configuration location")
}
return
}
// Get path to XDG config subdirectory for rclone and look for existing rclone.conf there
// (see XDG Base Directory specification: https://specifications.freedesktop.org/basedir-spec/latest/).
// ($XDG_CONFIG_HOME\rclone\rclone.conf)
func findXDGConfig() (configDir string, configFile string) {
if xdgConfigDir := os.Getenv("XDG_CONFIG_HOME"); xdgConfigDir != "" {
configDir = filepath.Join(xdgConfigDir, "rclone")
configFile = findFile(configDir, configFileName)
}
return
}
// Get path to .config subdirectory for rclone and look for existing rclone.conf there
// (~/.config/rclone/rclone.conf)
func findDotConfigConfig(home string) (configDir string, configFile string) {
if home != "" {
configDir = filepath.Join(home, ".config", "rclone")
configFile = findFile(configDir, configFileName)
}
return
}
// Look for existing .rclone.conf (legacy hidden filename) in root of user's home directory
// (~/.rclone.conf)
func findOldHomeConfig(home string) (configDir string, configFile string) {
if home != "" {
configDir = home
configFile = findFile(home, hiddenConfigFileName)
}
return
}
// Return the path to the configuration file
func makeConfigPath() string {
// Look for existing rclone.conf in prioritized list of known locations
// Also get configuration directory to use for new config file when no existing is found.
var (
configFile string
configDir string
primaryConfigDir string
fallbackConfigDir string
)
// <rclone_exe_dir>/rclone.conf
if _, configFile = findLocalConfig(); configFile != "" {
return configFile
}
// Windows: $AppData/rclone/rclone.conf
// This is also the default location for new config when no existing is found
if runtime.GOOS == "windows" {
if primaryConfigDir, configFile = findAppDataConfig(); configFile != "" {
return configFile
}
}
// $XDG_CONFIG_HOME/rclone/rclone.conf
// Also looking for this on Windows, for backwards compatibility reasons.
if configDir, configFile = findXDGConfig(); configFile != "" {
return configFile
}
if runtime.GOOS != "windows" {
// On Unix this is also the default location for new config when no existing is found
primaryConfigDir = configDir
}
// ~/.config/rclone/rclone.conf
// This is also the fallback location for new config
// (when $AppData on Windows and $XDG_CONFIG_HOME on Unix is not defined)
homeDir, homeDirErr := findHomeDir()
if fallbackConfigDir, configFile = findDotConfigConfig(homeDir); configFile != "" {
return configFile
}
// ~/.rclone.conf
if _, configFile = findOldHomeConfig(homeDir); configFile != "" {
return configFile
}
// No existing config file found, prepare proper default for a new one.
// But first check if if user supplied a --config variable or environment
// variable, since then we skip actually trying to create the default
// and report any errors related to it (we can't use pflag for this because
// it isn't initialised yet so we search the command line manually).
_, configSupplied := os.LookupEnv("RCLONE_CONFIG")
if !configSupplied {
for _, item := range os.Args {
if item == "--config" || strings.HasPrefix(item, "--config=") {
configSupplied = true
break
}
}
}
// If we found a configuration directory to be used for new config during search
// above, then create it to be ready for rclone.conf file to be written into it
// later, and also as a test of permissions to use fallback if not even able to
// create the directory.
if primaryConfigDir != "" {
configDir = primaryConfigDir
} else if fallbackConfigDir != "" {
configDir = fallbackConfigDir
} else {
configDir = ""
}
if configDir != "" {
configFile = filepath.Join(configDir, configFileName)
if configSupplied {
// User supplied custom config option, just return the default path
// as is without creating any directories, since it will not be used
// anyway and we don't want to unnecessarily create empty directory.
return configFile
}
var mkdirErr error
if mkdirErr = os.MkdirAll(configDir, os.ModePerm); mkdirErr == nil {
return configFile
}
// Problem: Try a fallback location. If we did find a home directory then
// just assume file .rclone.conf (legacy hidden filename) can be written in
// its root (~/.rclone.conf).
if homeDir != "" {
fs.Debugf(nil, "Configuration directory could not be created and will not be used: %v", mkdirErr)
return filepath.Join(homeDir, hiddenConfigFileName)
}
if !configSupplied {
fs.Errorf(nil, "Couldn't find home directory nor create configuration directory: %v", mkdirErr)
}
} else if !configSupplied {
if homeDirErr != nil {
fs.Errorf(nil, "Couldn't find configuration directory nor home directory: %v", homeDirErr)
} else {
fs.Errorf(nil, "Couldn't find configuration directory nor home directory")
}
}
// No known location that can be used: Did possibly find a configDir
// (XDG_CONFIG_HOME or APPDATA) which couldn't be created, but in any case
// did not find a home directory!
// Report it as an error, and return as last resort the path relative to current
// working directory, of .rclone.conf (legacy hidden filename).
if !configSupplied {
fs.Errorf(nil, "Defaulting to storing config in current directory.")
fs.Errorf(nil, "Use --config flag to workaround.")
}
return hiddenConfigFileName
}
// GetConfigPath returns the current config file path
func GetConfigPath() string {
return configPath
}
// SetConfigPath sets new config file path
//
// Checks for empty string, os null device, or special path, all of which indicates in-memory config.
func SetConfigPath(path string) (err error) {
var cfgPath string
if path == "" || path == os.DevNull {
cfgPath = ""
} else if filepath.Base(path) == noConfigFile {
cfgPath = ""
} else if err = file.IsReserved(path); err != nil {
return err
} else if cfgPath, err = filepath.Abs(path); err != nil {
return err
}
configPath = cfgPath
return nil
}
// SetData sets new config file storage
func SetData(newData Storage) {
data = newData
dataLoaded = false
}
// Data returns current config file storage
func Data() Storage {
return data
}
// LoadedData ensures the config file storage is loaded and returns it
func LoadedData() Storage {
if !dataLoaded {
// Set RCLONE_CONFIG_DIR for backend config and subprocesses
// If empty configPath (in-memory only) the value will be "."
_ = os.Setenv("RCLONE_CONFIG_DIR", filepath.Dir(configPath))
// Load configuration from file (or initialize sensible default if no file or error)
if err := data.Load(); err == nil {
fs.Debugf(nil, "Using config file from %q", configPath)
dataLoaded = true
} else if err == ErrorConfigFileNotFound {
if configPath == "" {
fs.Debugf(nil, "Config is memory-only - using defaults")
} else {
fs.Logf(nil, "Config file %q not found - using defaults", configPath)
}
dataLoaded = true
} else {
log.Fatalf("Failed to load config file %q: %v", configPath, err)
}
}
return data
}
// ErrorConfigFileNotFound is returned when the config file is not found
var ErrorConfigFileNotFound = errors.New("config file not found")
// SaveConfig calling function which saves configuration file.
// if SaveConfig returns error trying again after sleep.
func SaveConfig() {
if configPath == "" {
fs.Debugf(nil, "Skipping save for memory-only config")
return
}
ctx := context.Background()
ci := fs.GetConfig(ctx)
var err error
for i := 0; i < ci.LowLevelRetries+1; i++ {
if err = LoadedData().Save(); err == nil {
return
}
waitingTimeMs := mathrand.Intn(1000)
time.Sleep(time.Duration(waitingTimeMs) * time.Millisecond)
}
fs.Errorf(nil, "Failed to save config after %d tries: %v", ci.LowLevelRetries, err)
}
// SetValueAndSave sets the key to the value and saves just that
// value in the config file. It loads the old config file in from
// disk first and overwrites the given value only.
func SetValueAndSave(name, key, value string) error {
// Set the value in config in case we fail to reload it
LoadedData().SetValue(name, key, value)
// Save it again
SaveConfig()
return nil
}
// getWithDefault gets key out of section name returning defaultValue if not
// found.
func getWithDefault(name, key, defaultValue string) string {
value, found := LoadedData().GetValue(name, key)
if !found {
return defaultValue
}
return value
}
// UpdateRemoteOpt configures the remote update
type UpdateRemoteOpt struct {
// Treat all passwords as plain that need obscuring
Obscure bool `json:"obscure"`
// Treat all passwords as obscured
NoObscure bool `json:"noObscure"`
// Don't interact with the user - return questions
NonInteractive bool `json:"nonInteractive"`
// If set then supply state and result parameters to continue the process
Continue bool `json:"continue"`
// If set then ask all the questions, not just the post config questions
All bool `json:"all"`
// State to restart with - used with Continue
State string `json:"state"`
// Result to return - used with Continue
Result string `json:"result"`
// If set then edit existing values
Edit bool `json:"edit"`
}
func updateRemote(ctx context.Context, name string, keyValues rc.Params, opt UpdateRemoteOpt) (out *fs.ConfigOut, err error) {
if opt.Obscure && opt.NoObscure {
return nil, errors.New("can't use --obscure and --no-obscure together")
}
err = fspath.CheckConfigName(name)
if err != nil {
return nil, err
}
interactive := !(opt.NonInteractive || opt.Continue)
if interactive && !opt.All {
ctx = suppressConfirm(ctx)
}
fsType := FileGet(name, "type")
if fsType == "" {
return nil, errors.New("couldn't find type field in config")
}
ri, err := fs.Find(fsType)
if err != nil {
return nil, errors.Errorf("couldn't find backend for type %q", fsType)
}
// Work out which options need to be obscured
needsObscure := map[string]struct{}{}
if !opt.NoObscure {
for _, option := range ri.Options {
if option.IsPassword {
needsObscure[option.Name] = struct{}{}
}
}
}
choices := configmap.Simple{}
m := fs.ConfigMap(ri, name, nil)
// Set the config
for k, v := range keyValues {
vStr := fmt.Sprint(v)
// Obscure parameter if necessary
if _, ok := needsObscure[k]; ok {
_, err := obscure.Reveal(vStr)
if err != nil || opt.Obscure {
// If error => not already obscured, so obscure it
// or we are forced to obscure
vStr, err = obscure.Obscure(vStr)
if err != nil {
return nil, errors.Wrap(err, "UpdateRemote: obscure failed")
}
}
}
choices.Set(k, vStr)
if !strings.HasPrefix(k, fs.ConfigKeyEphemeralPrefix) {
m.Set(k, vStr)
}
}
if opt.Edit {
choices[fs.ConfigEdit] = "true"
}
if interactive {
var state = ""
if opt.All {
state = fs.ConfigAll
}
err = backendConfig(ctx, name, m, ri, choices, state)
} else {
// Start the config state machine
in := fs.ConfigIn{
State: opt.State,
Result: opt.Result,
}
if in.State == "" && opt.All {
in.State = fs.ConfigAll
}
out, err = fs.BackendConfig(ctx, name, m, ri, choices, in)
}
if err != nil {
return nil, err
}
SaveConfig()
cache.ClearConfig(name) // remove any remotes based on this config from the cache
return out, nil
}
// UpdateRemote adds the keyValues passed in to the remote of name.
// keyValues should be key, value pairs.
func UpdateRemote(ctx context.Context, name string, keyValues rc.Params, opt UpdateRemoteOpt) (out *fs.ConfigOut, err error) {
opt.Edit = true
return updateRemote(ctx, name, keyValues, opt)
}
// CreateRemote creates a new remote with name, type and a list of
// parameters which are key, value pairs. If update is set then it
// adds the new keys rather than replacing all of them.
func CreateRemote(ctx context.Context, name string, Type string, keyValues rc.Params, opts UpdateRemoteOpt) (out *fs.ConfigOut, err error) {
err = fspath.CheckConfigName(name)
if err != nil {
return nil, err
}
if !opts.Continue {
// Delete the old config if it exists
LoadedData().DeleteSection(name)
// Set the type
LoadedData().SetValue(name, "type", Type)
}
// Set the remaining values
return UpdateRemote(ctx, name, keyValues, opts)
}
// PasswordRemote adds the keyValues passed in to the remote of name.
// keyValues should be key, value pairs.
func PasswordRemote(ctx context.Context, name string, keyValues rc.Params) error {
ctx = suppressConfirm(ctx)
err := fspath.CheckConfigName(name)
if err != nil {
return err
}
for k, v := range keyValues {
keyValues[k] = obscure.MustObscure(fmt.Sprint(v))
}
_, err = UpdateRemote(ctx, name, keyValues, UpdateRemoteOpt{
NoObscure: true,
})
return err
}
// JSONListProviders prints all the providers and options in JSON format
func JSONListProviders() error {
b, err := json.MarshalIndent(fs.Registry, "", " ")
if err != nil {
return errors.Wrap(err, "failed to marshal examples")
}
_, err = os.Stdout.Write(b)
if err != nil {
return errors.Wrap(err, "failed to write providers list")
}
return nil
}
// fsOption returns an Option describing the possible remotes
func fsOption() *fs.Option {
o := &fs.Option{
Name: "Storage",
Help: "Type of storage to configure.",
Default: "",
}
for _, item := range fs.Registry {
example := fs.OptionExample{
Value: item.Name,
Help: item.Description,
}
o.Examples = append(o.Examples, example)
}
o.Examples.Sort()
return o
}
// FileGetFlag gets the config key under section returning the
// the value and true if found and or ("", false) otherwise
func FileGetFlag(section, key string) (string, bool) {
return LoadedData().GetValue(section, key)
}
// FileGet gets the config key under section returning the default if not set.
//
// It looks up defaults in the environment if they are present
func FileGet(section, key string) string {
var defaultVal string
envKey := fs.ConfigToEnv(section, key)
newValue, found := os.LookupEnv(envKey)
if found {
defaultVal = newValue
}
return getWithDefault(section, key, defaultVal)
}
// FileSet sets the key in section to value. It doesn't save
// the config file.
func FileSet(section, key, value string) {
if value != "" {
LoadedData().SetValue(section, key, value)
} else {
FileDeleteKey(section, key)
}
}
// FileDeleteKey deletes the config key in the config file.
// It returns true if the key was deleted,
// or returns false if the section or key didn't exist.
func FileDeleteKey(section, key string) bool {
return LoadedData().DeleteKey(section, key)
}
var matchEnv = regexp.MustCompile(`^RCLONE_CONFIG_(.*?)_TYPE=.*$`)
// FileSections returns the sections in the config file
// including any defined by environment variables.
func FileSections() []string {
sections := LoadedData().GetSectionList()
for _, item := range os.Environ() {
matches := matchEnv.FindStringSubmatch(item)
if len(matches) == 2 {
sections = append(sections, strings.ToLower(matches[1]))
}
}
return sections
}
// DumpRcRemote dumps the config for a single remote
func DumpRcRemote(name string) (dump rc.Params) {
params := rc.Params{}
for _, key := range LoadedData().GetKeyList(name) {
params[key] = FileGet(name, key)
}
return params
}
// DumpRcBlob dumps all the config as an unstructured blob suitable
// for the rc
func DumpRcBlob() (dump rc.Params) {
dump = rc.Params{}
for _, name := range LoadedData().GetSectionList() {
dump[name] = DumpRcRemote(name)
}
return dump
}
// Dump dumps all the config as a JSON file
func Dump() error {
dump := DumpRcBlob()
b, err := json.MarshalIndent(dump, "", " ")
if err != nil {
return errors.Wrap(err, "failed to marshal config dump")
}
_, err = os.Stdout.Write(b)
if err != nil {
return errors.Wrap(err, "failed to write config dump")
}
return nil
}
// makeCacheDir returns a directory to use for caching.
//
// Code borrowed from go stdlib until it is made public
func makeCacheDir() (dir string) {
// Compute default location.
switch runtime.GOOS {
case "windows":
dir = os.Getenv("LocalAppData")
case "darwin":
dir = os.Getenv("HOME")
if dir != "" {
dir += "/Library/Caches"
}
case "plan9":
dir = os.Getenv("home")
if dir != "" {
// Plan 9 has no established per-user cache directory,
// but $home/lib/xyz is the usual equivalent of $HOME/.xyz on Unix.
dir += "/lib/cache"
}
default: // Unix
// https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
dir = os.Getenv("XDG_CACHE_HOME")
if dir == "" {
dir = os.Getenv("HOME")
if dir != "" {
dir += "/.cache"
}
}
}
// if no dir found then use TempDir - we will have a cachedir!
if dir == "" {
dir = os.TempDir()
}
return filepath.Join(dir, "rclone")
}
| fs/config/config.go | 1 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.9982336759567261,
0.05816639959812164,
0.0001652285864111036,
0.00017365333042107522,
0.22791828215122223
] |
{
"id": 3,
"code_window": [
"\n",
"// SetValue sets the value under key in section\n",
"func (s *Storage) SetValue(section string, key string, value string) {\n",
"\ts.check()\n",
"\ts.gc.SetValue(section, key, value)\n",
"}\n",
"\n",
"// DeleteKey removes the key under section\n",
"func (s *Storage) DeleteKey(section string, key string) bool {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif strings.HasPrefix(section, \":\") {\n",
"\t\tfs.Logf(nil, \"Can't save config %q for on the fly backend %q\", key, section)\n",
"\t\treturn\n",
"\t}\n"
],
"file_path": "fs/config/configfile/configfile.go",
"type": "add",
"edit_start_line_idx": 226
} | ---
title: "Rclone"
description: "Rclone syncs your files to cloud storage: Google Drive, S3, Swift, Dropbox, Google Cloud Storage, Azure, Box and many more."
type: page
---
# Rclone syncs your files to cloud storage
{{< img width="50%" src="/img/logo_on_light__horizontal_color.svg" alt="rclone logo" style="float:right; padding: 5px;" >}}
- [About rclone](#about)
- [What can rclone do for you?](#what)
- [What features does rclone have?](#features)
- [What providers does rclone support?](#providers)
- [Download](/downloads/)
- [Install](/install/)
{{< rem MAINPAGELINK >}}
## About rclone {#about}
Rclone is a command line program to manage files on cloud storage. It
is a feature rich alternative to cloud vendors' web storage
interfaces. [Over 40 cloud storage products](#providers) support
rclone including S3 object stores, business & consumer file storage
services, as well as standard transfer protocols.
Rclone has powerful cloud equivalents to the unix commands rsync, cp,
mv, mount, ls, ncdu, tree, rm, and cat. Rclone's familiar syntax
includes shell pipeline support, and `--dry-run` protection. It is
used at the command line, in scripts or via its [API](/rc).
Users call rclone *"The Swiss army knife of cloud storage"*, and
*"Technology indistinguishable from magic"*.
Rclone really looks after your data. It preserves timestamps and
verifies checksums at all times. Transfers over limited bandwidth;
intermittent connections, or subject to quota can be restarted, from
the last good file transferred. You can
[check](/commands/rclone_check/) the integrity of your files. Where
possible, rclone employs server-side transfers to minimise local
bandwidth use and transfers from one provider to another without
using local disk.
Virtual backends wrap local and cloud file systems to apply
[encryption](/crypt/),
[compression](/compress/)
[chunking](/chunker/) and
[joining](/union/).
Rclone [mounts](/commands/rclone_mount/) any local, cloud or
virtual filesystem as a disk on Windows,
macOS, linux and FreeBSD, and also serves these over
[SFTP](/commands/rclone_serve_sftp/),
[HTTP](/commands/rclone_serve_http/),
[WebDAV](/commands/rclone_serve_webdav/),
[FTP](/commands/rclone_serve_ftp/) and
[DLNA](/commands/rclone_serve_dlna/).
Rclone is mature, open source software originally inspired by rsync
and written in [Go](https://golang.org). The friendly support
community are familiar with varied use cases. Official Ubuntu, Debian,
Fedora, Brew and Chocolatey repos. include rclone. For the latest
version [downloading from rclone.org](/downloads/) is recommended.
Rclone is widely used on Linux, Windows and Mac. Third party
developers create innovative backup, restore, GUI and business
process solutions using the rclone command line or API.
Rclone does the heavy lifting of communicating with cloud storage.
## What can rclone do for you? {#what}
Rclone helps you:
- Backup (and encrypt) files to cloud storage
- Restore (and decrypt) files from cloud storage
- Mirror cloud data to other cloud services or locally
- Migrate data to cloud, or between cloud storage vendors
- Mount multiple, encrypted, cached or diverse cloud storage as a disk
- Analyse and account for data held on cloud storage using [lsf](/commands/rclone_lsf/), [ljson](/commands/rclone_lsjson/), [size](/commands/rclone_size/), [ncdu](/commands/rclone_ncdu/)
- [Union](/union/) file systems together to present multiple local and/or cloud file systems as one
## Features {#features}
- Transfers
- MD5, SHA1 hashes are checked at all times for file integrity
- Timestamps are preserved on files
- Operations can be restarted at any time
- Can be to and from network, e.g. two different cloud providers
- Can use multi-threaded downloads to local disk
- [Copy](/commands/rclone_copy/) new or changed files to cloud storage
- [Sync](/commands/rclone_sync/) (one way) to make a directory identical
- [Move](/commands/rclone_move/) files to cloud storage deleting the local after verification
- [Check](/commands/rclone_check/) hashes and for missing/extra files
- [Mount](/commands/rclone_mount/) your cloud storage as a network disk
- [Serve](/commands/rclone_serve/) local or remote files over [HTTP](/commands/rclone_serve_http/)/[WebDav](/commands/rclone_serve_webdav/)/[FTP](/commands/rclone_serve_ftp/)/[SFTP](/commands/rclone_serve_sftp/)/[dlna](/commands/rclone_serve_dlna/)
- Experimental [Web based GUI](/gui/)
## Supported providers {#providers}
(There are many others, built on standard protocols such as
WebDAV or S3, that work out of the box.)
{{< provider_list >}}
{{< provider name="1Fichier" home="https://1fichier.com/" config="/fichier/" start="true">}}
{{< provider name="Alibaba Cloud (Aliyun) Object Storage System (OSS)" home="https://www.alibabacloud.com/product/oss/" config="/s3/#alibaba-oss" >}}
{{< provider name="Amazon Drive" home="https://www.amazon.com/clouddrive" config="/amazonclouddrive/" note="#status">}}
{{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
{{< provider name="Backblaze B2" home="https://www.backblaze.com/b2/cloud-storage.html" config="/b2/" >}}
{{< provider name="Box" home="https://www.box.com/" config="/box/" >}}
{{< provider name="Ceph" home="http://ceph.com/" config="/s3/#ceph" >}}
{{< provider name="Citrix ShareFile" home="http://sharefile.com/" config="/sharefile/" >}}
{{< provider name="C14" home="https://www.online.net/en/storage/c14-cold-storage" config="/sftp/#c14" >}}
{{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
{{< provider name="Dropbox" home="https://www.dropbox.com/" config="/dropbox/" >}}
{{< provider name="Enterprise File Fabric" home="https://storagemadeeasy.com/about/" config="/filefabric/" >}}
{{< provider name="FTP" home="https://en.wikipedia.org/wiki/File_Transfer_Protocol" config="/ftp/" >}}
{{< provider name="Google Cloud Storage" home="https://cloud.google.com/storage/" config="/googlecloudstorage/" >}}
{{< provider name="Google Drive" home="https://www.google.com/drive/" config="/drive/" >}}
{{< provider name="Google Photos" home="https://www.google.com/photos/about/" config="/googlephotos/" >}}
{{< provider name="HDFS" home="https://hadoop.apache.org/" config="/hdfs/" >}}
{{< provider name="HTTP" home="https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol" config="/http/" >}}
{{< provider name="Hubic" home="https://hubic.com/" config="/hubic/" >}}
{{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
{{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
{{< provider name="Koofr" home="https://koofr.eu/" config="/koofr/" >}}
{{< provider name="Mail.ru Cloud" home="https://cloud.mail.ru/" config="/mailru/" >}}
{{< provider name="Memset Memstore" home="https://www.memset.com/cloud/storage/" config="/swift/" >}}
{{< provider name="Mega" home="https://mega.nz/" config="/mega/" >}}
{{< provider name="Memory" home="/memory/" config="/memory/" >}}
{{< provider name="Microsoft Azure Blob Storage" home="https://azure.microsoft.com/en-us/services/storage/blobs/" config="/azureblob/" >}}
{{< provider name="Microsoft OneDrive" home="https://onedrive.live.com/" config="/onedrive/" >}}
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
{{< provider name="Nextcloud" home="https://nextcloud.com/" config="/webdav/#nextcloud" >}}
{{< provider name="OVH" home="https://www.ovh.co.uk/public-cloud/storage/object-storage/" config="/swift/" >}}
{{< provider name="OpenDrive" home="https://www.opendrive.com/" config="/opendrive/" >}}
{{< provider name="OpenStack Swift" home="https://docs.openstack.org/swift/latest/" config="/swift/" >}}
{{< provider name="Oracle Cloud Storage" home="https://cloud.oracle.com/storage-opc" config="/swift/" >}}
{{< provider name="ownCloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}}
{{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}}
{{< provider name="premiumize.me" home="https://premiumize.me/" config="/premiumizeme/" >}}
{{< provider name="put.io" home="https://put.io/" config="/putio/" >}}
{{< provider name="QingStor" home="https://www.qingcloud.com/products/storage" config="/qingstor/" >}}
{{< provider name="Rackspace Cloud Files" home="https://www.rackspace.com/cloud/files" config="/swift/" >}}
{{< provider name="rsync.net" home="https://rsync.net/products/rclone.html" config="/sftp/#rsync-net" >}}
{{< provider name="Scaleway" home="https://www.scaleway.com/object-storage/" config="/s3/#scaleway" >}}
{{< provider name="Seafile" home="https://www.seafile.com/" config="/seafile/" >}}
{{< provider name="SeaweedFS" home="https://github.com/chrislusf/seaweedfs/" config="/s3/#seaweedfs" >}}
{{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}}
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
{{< provider name="Tardigrade" home="https://tardigrade.io/" config="/tardigrade/" >}}
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
{{< provider name="Uptobox" home="https://uptobox.com" config="/uptobox/" >}}
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
{{< provider name="WebDAV" home="https://en.wikipedia.org/wiki/WebDAV" config="/webdav/" >}}
{{< provider name="Yandex Disk" home="https://disk.yandex.com/" config="/yandex/" >}}
{{< provider name="Zoho WorkDrive" home="https://www.zoho.com/workdrive/" config="/zoho/" >}}
{{< provider name="The local filesystem" home="/local/" config="/local/" end="true">}}
{{< /provider_list >}}
Links
* {{< icon "fa fa-home" >}} [Home page](https://rclone.org/)
* {{< icon "fab fa-github" >}} [GitHub project page for source and bug tracker](https://github.com/rclone/rclone)
* {{< icon "fa fa-comments" >}} [Rclone Forum](https://forum.rclone.org)
* {{< icon "fas fa-cloud-download-alt" >}}[Downloads](/downloads/)
| docs/content/_index.md | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.00017783380462788045,
0.00017033646872732788,
0.0001616812514839694,
0.00017119781114161015,
0.00000476427294415771
] |
{
"id": 3,
"code_window": [
"\n",
"// SetValue sets the value under key in section\n",
"func (s *Storage) SetValue(section string, key string, value string) {\n",
"\ts.check()\n",
"\ts.gc.SetValue(section, key, value)\n",
"}\n",
"\n",
"// DeleteKey removes the key under section\n",
"func (s *Storage) DeleteKey(section string, key string) bool {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif strings.HasPrefix(section, \":\") {\n",
"\t\tfs.Logf(nil, \"Can't save config %q for on the fly backend %q\", key, section)\n",
"\t\treturn\n",
"\t}\n"
],
"file_path": "fs/config/configfile/configfile.go",
"type": "add",
"edit_start_line_idx": 226
} | // Package file provides a version of os.OpenFile, the handles of
// which can be renamed and deleted under Windows.
package file
import "os"
// Open opens the named file for reading. If successful, methods on
// the returned file can be used for reading; the associated file
// descriptor has mode O_RDONLY.
// If there is an error, it will be of type *PathError.
func Open(name string) (*os.File, error) {
return OpenFile(name, os.O_RDONLY, 0)
}
// Create creates the named file with mode 0666 (before umask), truncating
// it if it already exists. If successful, methods on the returned
// File can be used for I/O; the associated file descriptor has mode
// O_RDWR.
// If there is an error, it will be of type *PathError.
func Create(name string) (*os.File, error) {
return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
}
| lib/file/file.go | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.00017094382201321423,
0.00016878162568900734,
0.0001657981629250571,
0.00016960292123258114,
0.000002179500597776496
] |
{
"id": 3,
"code_window": [
"\n",
"// SetValue sets the value under key in section\n",
"func (s *Storage) SetValue(section string, key string, value string) {\n",
"\ts.check()\n",
"\ts.gc.SetValue(section, key, value)\n",
"}\n",
"\n",
"// DeleteKey removes the key under section\n",
"func (s *Storage) DeleteKey(section string, key string) bool {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif strings.HasPrefix(section, \":\") {\n",
"\t\tfs.Logf(nil, \"Can't save config %q for on the fly backend %q\", key, section)\n",
"\t\treturn\n",
"\t}\n"
],
"file_path": "fs/config/configfile/configfile.go",
"type": "add",
"edit_start_line_idx": 226
} | package fs
import (
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
)
// BwPair represents an upload and a download bandwidth
type BwPair struct {
Tx SizeSuffix // upload bandwidth
Rx SizeSuffix // download bandwidth
}
// String returns a printable representation of a BwPair
func (bp *BwPair) String() string {
var out strings.Builder
out.WriteString(bp.Tx.String())
if bp.Rx != bp.Tx {
out.WriteRune(':')
out.WriteString(bp.Rx.String())
}
return out.String()
}
// Set the bandwidth from a string which is either
// SizeSuffix or SizeSuffix:SizeSuffix (for tx:rx bandwidth)
func (bp *BwPair) Set(s string) (err error) {
colon := strings.Index(s, ":")
stx, srx := s, ""
if colon >= 0 {
stx, srx = s[:colon], s[colon+1:]
}
err = bp.Tx.Set(stx)
if err != nil {
return err
}
if colon < 0 {
bp.Rx = bp.Tx
} else {
err = bp.Rx.Set(srx)
if err != nil {
return err
}
}
return nil
}
// IsSet returns true if either of the bandwidth limits are set
func (bp *BwPair) IsSet() bool {
return bp.Tx > 0 || bp.Rx > 0
}
// BwTimeSlot represents a bandwidth configuration at a point in time.
type BwTimeSlot struct {
DayOfTheWeek int
HHMM int
Bandwidth BwPair
}
// BwTimetable contains all configured time slots.
type BwTimetable []BwTimeSlot
// String returns a printable representation of BwTimetable.
func (x BwTimetable) String() string {
var out strings.Builder
bwOnly := len(x) == 1 && x[0].DayOfTheWeek == 0 && x[0].HHMM == 0
for _, ts := range x {
if out.Len() != 0 {
out.WriteRune(' ')
}
if !bwOnly {
_, _ = fmt.Fprintf(&out, "%s-%02d:%02d,", time.Weekday(ts.DayOfTheWeek).String()[:3], ts.HHMM/100, ts.HHMM%100)
}
out.WriteString(ts.Bandwidth.String())
}
return out.String()
}
// Basic hour format checking
func validateHour(HHMM string) error {
if len(HHMM) != 5 {
return errors.Errorf("invalid time specification (hh:mm): %q", HHMM)
}
hh, err := strconv.Atoi(HHMM[0:2])
if err != nil {
return errors.Errorf("invalid hour in time specification %q: %v", HHMM, err)
}
if hh < 0 || hh > 23 {
return errors.Errorf("invalid hour (must be between 00 and 23): %q", hh)
}
mm, err := strconv.Atoi(HHMM[3:])
if err != nil {
return errors.Errorf("invalid minute in time specification: %q: %v", HHMM, err)
}
if mm < 0 || mm > 59 {
return errors.Errorf("invalid minute (must be between 00 and 59): %q", hh)
}
return nil
}
// Basic weekday format checking
func parseWeekday(dayOfWeek string) (int, error) {
dayOfWeek = strings.ToLower(dayOfWeek)
if dayOfWeek == "sun" || dayOfWeek == "sunday" {
return 0, nil
}
if dayOfWeek == "mon" || dayOfWeek == "monday" {
return 1, nil
}
if dayOfWeek == "tue" || dayOfWeek == "tuesday" {
return 2, nil
}
if dayOfWeek == "wed" || dayOfWeek == "wednesday" {
return 3, nil
}
if dayOfWeek == "thu" || dayOfWeek == "thursday" {
return 4, nil
}
if dayOfWeek == "fri" || dayOfWeek == "friday" {
return 5, nil
}
if dayOfWeek == "sat" || dayOfWeek == "saturday" {
return 6, nil
}
return 0, errors.Errorf("invalid weekday: %q", dayOfWeek)
}
// Set the bandwidth timetable.
func (x *BwTimetable) Set(s string) error {
// The timetable is formatted as:
// "dayOfWeek-hh:mm,bandwidth dayOfWeek-hh:mm,bandwidth..." ex: "Mon-10:00,10G Mon-11:30,1G Tue-18:00,off"
// If only a single bandwidth identifier is provided, we assume constant bandwidth.
if len(s) == 0 {
return errors.New("empty string")
}
// Single value without time specification.
if !strings.Contains(s, " ") && !strings.Contains(s, ",") {
ts := BwTimeSlot{}
if err := ts.Bandwidth.Set(s); err != nil {
return err
}
ts.DayOfTheWeek = 0
ts.HHMM = 0
*x = BwTimetable{ts}
return nil
}
for _, tok := range strings.Split(s, " ") {
tv := strings.Split(tok, ",")
// Format must be dayOfWeek-HH:MM,BW
if len(tv) != 2 {
return errors.Errorf("invalid time/bandwidth specification: %q", tok)
}
weekday := 0
HHMM := ""
if !strings.Contains(tv[0], "-") {
HHMM = tv[0]
if err := validateHour(HHMM); err != nil {
return err
}
for i := 0; i < 7; i++ {
hh, _ := strconv.Atoi(HHMM[0:2])
mm, _ := strconv.Atoi(HHMM[3:])
ts := BwTimeSlot{
DayOfTheWeek: i,
HHMM: (hh * 100) + mm,
}
if err := ts.Bandwidth.Set(tv[1]); err != nil {
return err
}
*x = append(*x, ts)
}
} else {
timespec := strings.Split(tv[0], "-")
if len(timespec) != 2 {
return errors.Errorf("invalid time specification: %q", tv[0])
}
var err error
weekday, err = parseWeekday(timespec[0])
if err != nil {
return err
}
HHMM = timespec[1]
if err := validateHour(HHMM); err != nil {
return err
}
hh, _ := strconv.Atoi(HHMM[0:2])
mm, _ := strconv.Atoi(HHMM[3:])
ts := BwTimeSlot{
DayOfTheWeek: weekday,
HHMM: (hh * 100) + mm,
}
// Bandwidth limit for this time slot.
if err := ts.Bandwidth.Set(tv[1]); err != nil {
return err
}
*x = append(*x, ts)
}
}
return nil
}
// Difference in minutes between lateDayOfWeekHHMM and earlyDayOfWeekHHMM
func timeDiff(lateDayOfWeekHHMM int, earlyDayOfWeekHHMM int) int {
lateTimeMinutes := (lateDayOfWeekHHMM / 10000) * 24 * 60
lateTimeMinutes += ((lateDayOfWeekHHMM / 100) % 100) * 60
lateTimeMinutes += lateDayOfWeekHHMM % 100
earlyTimeMinutes := (earlyDayOfWeekHHMM / 10000) * 24 * 60
earlyTimeMinutes += ((earlyDayOfWeekHHMM / 100) % 100) * 60
earlyTimeMinutes += earlyDayOfWeekHHMM % 100
return lateTimeMinutes - earlyTimeMinutes
}
// LimitAt returns a BwTimeSlot for the time requested.
func (x BwTimetable) LimitAt(tt time.Time) BwTimeSlot {
// If the timetable is empty, we return an unlimited BwTimeSlot starting at Sunday midnight.
if len(x) == 0 {
return BwTimeSlot{Bandwidth: BwPair{-1, -1}}
}
dayOfWeekHHMM := int(tt.Weekday())*10000 + tt.Hour()*100 + tt.Minute()
// By default, we return the last element in the timetable. This
// satisfies two conditions: 1) If there's only one element it
// will always be selected, and 2) The last element of the table
// will "wrap around" until overridden by an earlier time slot.
// there's only one time slot in the timetable.
ret := x[len(x)-1]
mindif := 0
first := true
// Look for most recent time slot.
for _, ts := range x {
// Ignore the past
if dayOfWeekHHMM < (ts.DayOfTheWeek*10000)+ts.HHMM {
continue
}
dif := timeDiff(dayOfWeekHHMM, (ts.DayOfTheWeek*10000)+ts.HHMM)
if first {
mindif = dif
first = false
}
if dif <= mindif {
mindif = dif
ret = ts
}
}
return ret
}
// Type of the value
func (x BwTimetable) Type() string {
return "BwTimetable"
}
// UnmarshalJSON unmarshals a string value
func (x *BwTimetable) UnmarshalJSON(in []byte) error {
var s string
err := json.Unmarshal(in, &s)
if err != nil {
return err
}
return x.Set(s)
}
// MarshalJSON marshals as a string value
func (x BwTimetable) MarshalJSON() ([]byte, error) {
s := x.String()
return json.Marshal(s)
}
| fs/bwtimetable.go | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.012872336432337761,
0.0007956227636896074,
0.0001636334345676005,
0.00017454117187298834,
0.002338406629860401
] |
{
"id": 4,
"code_window": [
"\n",
"import (\n",
"\t\"os\"\n",
"\t\"strings\"\n",
"\n",
"\t\"github.com/rclone/rclone/fs/config/configmap\"\n",
")\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "fs/configmap.go",
"type": "replace",
"edit_start_line_idx": 6
} | // Getters and Setters for ConfigMap
package fs
import (
"os"
"strings"
"github.com/rclone/rclone/fs/config/configmap"
)
// A configmap.Getter to read from the environment RCLONE_CONFIG_backend_option_name
type configEnvVars string
// Get a config item from the environment variables if possible
func (configName configEnvVars) Get(key string) (value string, ok bool) {
envKey := ConfigToEnv(string(configName), key)
value, ok = os.LookupEnv(envKey)
if ok {
Debugf(nil, "Setting %s=%q for %q from environment variable %s", key, value, configName, envKey)
}
return value, ok
}
// A configmap.Getter to read from the environment RCLONE_option_name
type optionEnvVars struct {
fsInfo *RegInfo
}
// Get a config item from the option environment variables if possible
func (oev optionEnvVars) Get(key string) (value string, ok bool) {
opt := oev.fsInfo.Options.Get(key)
if opt == nil {
return "", false
}
envKey := OptionToEnv(oev.fsInfo.Prefix + "-" + key)
value, ok = os.LookupEnv(envKey)
if ok {
Debugf(nil, "Setting %s_%s=%q from environment variable %s", oev.fsInfo.Prefix, key, value, envKey)
} else if opt.NoPrefix {
// For options with NoPrefix set, check without prefix too
envKey := OptionToEnv(key)
value, ok = os.LookupEnv(envKey)
if ok {
Debugf(nil, "Setting %s=%q for %s from environment variable %s", key, value, oev.fsInfo.Prefix, envKey)
}
}
return value, ok
}
// A configmap.Getter to read either the default value or the set
// value from the RegInfo.Options
type regInfoValues struct {
fsInfo *RegInfo
useDefault bool
}
// override the values in configMap with the either the flag values or
// the default values
func (r *regInfoValues) Get(key string) (value string, ok bool) {
opt := r.fsInfo.Options.Get(key)
if opt != nil && (r.useDefault || opt.Value != nil) {
return opt.String(), true
}
return "", false
}
// A configmap.Setter to read from the config file
type setConfigFile string
// Set a config item into the config file
func (section setConfigFile) Set(key, value string) {
if strings.HasPrefix(string(section), ":") {
Logf(nil, "Can't save config %q = %q for on the fly backend %q", key, value, section)
return
}
Debugf(nil, "Saving config %q = %q in section %q of the config file", key, value, section)
err := ConfigFileSet(string(section), key, value)
if err != nil {
Errorf(nil, "Failed saving config %q = %q in section %q of the config file: %v", key, value, section, err)
}
}
// A configmap.Getter to read from the config file
type getConfigFile string
// Get a config item from the config file
func (section getConfigFile) Get(key string) (value string, ok bool) {
value, ok = ConfigFileGet(string(section), key)
// Ignore empty lines in the config file
if value == "" {
ok = false
}
return value, ok
}
// ConfigMap creates a configmap.Map from the *RegInfo and the
// configName passed in. If connectionStringConfig has any entries (it may be nil),
// then it will be added to the lookup with the highest priority.
//
// If fsInfo is nil then the returned configmap.Map should only be
// used for reading non backend specific parameters, such as "type".
func ConfigMap(fsInfo *RegInfo, configName string, connectionStringConfig configmap.Simple) (config *configmap.Map) {
// Create the config
config = configmap.New()
// Read the config, more specific to least specific
// Config from connection string
if len(connectionStringConfig) > 0 {
config.AddGetter(connectionStringConfig, configmap.PriorityNormal)
}
// flag values
if fsInfo != nil {
config.AddGetter(®InfoValues{fsInfo, false}, configmap.PriorityNormal)
}
// remote specific environment vars
config.AddGetter(configEnvVars(configName), configmap.PriorityNormal)
// backend specific environment vars
if fsInfo != nil {
config.AddGetter(optionEnvVars{fsInfo: fsInfo}, configmap.PriorityNormal)
}
// config file
config.AddGetter(getConfigFile(configName), configmap.PriorityConfig)
// default values
if fsInfo != nil {
config.AddGetter(®InfoValues{fsInfo, true}, configmap.PriorityDefault)
}
// Set Config
config.AddSetter(setConfigFile(configName))
return config
}
| fs/configmap.go | 1 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.9749113321304321,
0.07026226818561554,
0.00017691341054160148,
0.0002559996792115271,
0.25090712308883667
] |
{
"id": 4,
"code_window": [
"\n",
"import (\n",
"\t\"os\"\n",
"\t\"strings\"\n",
"\n",
"\t\"github.com/rclone/rclone/fs/config/configmap\"\n",
")\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "fs/configmap.go",
"type": "replace",
"edit_start_line_idx": 6
} | // Test Pcloud filesystem interface
package pcloud_test
import (
"testing"
"github.com/rclone/rclone/backend/pcloud"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestPcloud:",
NilObject: (*pcloud.Object)(nil),
})
}
| backend/pcloud/pcloud_test.go | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.0006289539160206914,
0.00041468098061159253,
0.0002004080597544089,
0.00041468098061159253,
0.00021427292085718364
] |
{
"id": 4,
"code_window": [
"\n",
"import (\n",
"\t\"os\"\n",
"\t\"strings\"\n",
"\n",
"\t\"github.com/rclone/rclone/fs/config/configmap\"\n",
")\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "fs/configmap.go",
"type": "replace",
"edit_start_line_idx": 6
} | package rcat
import (
"context"
"log"
"os"
"time"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
var (
size = int64(-1)
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.Int64VarP(cmdFlags, &size, "size", "", size, "File size hint to preallocate")
}
var commandDefinition = &cobra.Command{
Use: "rcat remote:path",
Short: `Copies standard input to file on remote.`,
Long: `
rclone rcat reads from standard input (stdin) and copies it to a
single remote file.
echo "hello world" | rclone rcat remote:path/to/file
ffmpeg - | rclone rcat remote:path/to/file
If the remote file already exists, it will be overwritten.
rcat will try to upload small files in a single request, which is
usually more efficient than the streaming/chunked upload endpoints,
which use multiple requests. Exact behaviour depends on the remote.
What is considered a small file may be set through
` + "`--streaming-upload-cutoff`" + `. Uploading only starts after
the cutoff is reached or if the file ends before that. The data
must fit into RAM. The cutoff needs to be small enough to adhere
the limits of your remote, please see there. Generally speaking,
setting this cutoff too high will decrease your performance.
Use the |--size| flag to preallocate the file in advance at the remote end
and actually stream it, even if remote backend doesn't support streaming.
|--size| should be the exact size of the input stream in bytes. If the
size of the stream is different in length to the |--size| passed in
then the transfer will likely fail.
Note that the upload can also not be retried because the data is
not kept around until the upload succeeds. If you need to transfer
a lot of data, you're better off caching locally and then
` + "`rclone move`" + ` it to the destination.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
stat, _ := os.Stdin.Stat()
if (stat.Mode() & os.ModeCharDevice) != 0 {
log.Fatalf("nothing to read from standard input (stdin).")
}
fdst, dstFileName := cmd.NewFsDstFile(args)
cmd.Run(false, false, command, func() error {
_, err := operations.RcatSize(context.Background(), fdst, dstFileName, os.Stdin, size, time.Now())
return err
})
},
}
| cmd/rcat/rcat.go | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.006786157377064228,
0.0011638673022389412,
0.00016667199088260531,
0.00026028984575532377,
0.0021375238429754972
] |
{
"id": 4,
"code_window": [
"\n",
"import (\n",
"\t\"os\"\n",
"\t\"strings\"\n",
"\n",
"\t\"github.com/rclone/rclone/fs/config/configmap\"\n",
")\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "fs/configmap.go",
"type": "replace",
"edit_start_line_idx": 6
} | // +build cmount
// +build cgo
// +build linux darwin freebsd windows
package cmount
import (
"io"
"os"
"path"
"sync"
"sync/atomic"
"time"
"github.com/billziss-gh/cgofuse/fuse"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/vfs"
)
const fhUnset = ^uint64(0)
// FS represents the top level filing system
type FS struct {
VFS *vfs.VFS
f fs.Fs
ready chan (struct{})
mu sync.Mutex // to protect the below
handles []vfs.Handle
destroyed int32 // read/write with sync/atomic
}
// NewFS makes a new FS
func NewFS(VFS *vfs.VFS) *FS {
fsys := &FS{
VFS: VFS,
f: VFS.Fs(),
ready: make(chan (struct{})),
}
return fsys
}
// Open a handle returning an integer file handle
func (fsys *FS) openHandle(handle vfs.Handle) (fh uint64) {
fsys.mu.Lock()
defer fsys.mu.Unlock()
var i int
var oldHandle vfs.Handle
for i, oldHandle = range fsys.handles {
if oldHandle == nil {
fsys.handles[i] = handle
goto found
}
}
fsys.handles = append(fsys.handles, handle)
i = len(fsys.handles) - 1
found:
return uint64(i)
}
// get the handle for fh, call with the lock held
func (fsys *FS) _getHandle(fh uint64) (i int, handle vfs.Handle, errc int) {
if fh > uint64(len(fsys.handles)) {
fs.Debugf(nil, "Bad file handle: too big: 0x%X", fh)
return i, nil, -fuse.EBADF
}
i = int(fh)
handle = fsys.handles[i]
if handle == nil {
fs.Debugf(nil, "Bad file handle: nil handle: 0x%X", fh)
return i, nil, -fuse.EBADF
}
return i, handle, 0
}
// Get the handle for the file handle
func (fsys *FS) getHandle(fh uint64) (handle vfs.Handle, errc int) {
fsys.mu.Lock()
_, handle, errc = fsys._getHandle(fh)
fsys.mu.Unlock()
return
}
// Close the handle
func (fsys *FS) closeHandle(fh uint64) (errc int) {
fsys.mu.Lock()
i, _, errc := fsys._getHandle(fh)
if errc == 0 {
fsys.handles[i] = nil
}
fsys.mu.Unlock()
return
}
// lookup a Node given a path
func (fsys *FS) lookupNode(path string) (node vfs.Node, errc int) {
node, err := fsys.VFS.Stat(path)
return node, translateError(err)
}
// lookup a Dir given a path
func (fsys *FS) lookupDir(path string) (dir *vfs.Dir, errc int) {
node, errc := fsys.lookupNode(path)
if errc != 0 {
return nil, errc
}
dir, ok := node.(*vfs.Dir)
if !ok {
return nil, -fuse.ENOTDIR
}
return dir, 0
}
// lookup a parent Dir given a path returning the dir and the leaf
func (fsys *FS) lookupParentDir(filePath string) (leaf string, dir *vfs.Dir, errc int) {
parentDir, leaf := path.Split(filePath)
dir, errc = fsys.lookupDir(parentDir)
return leaf, dir, errc
}
// lookup a File given a path
func (fsys *FS) lookupFile(path string) (file *vfs.File, errc int) {
node, errc := fsys.lookupNode(path)
if errc != 0 {
return nil, errc
}
file, ok := node.(*vfs.File)
if !ok {
return nil, -fuse.EISDIR
}
return file, 0
}
// get a node and handle from the path or from the fh if not fhUnset
//
// handle may be nil
func (fsys *FS) getNode(path string, fh uint64) (node vfs.Node, handle vfs.Handle, errc int) {
if fh == fhUnset {
node, errc = fsys.lookupNode(path)
} else {
handle, errc = fsys.getHandle(fh)
if errc == 0 {
node = handle.Node()
}
}
return
}
// stat fills up the stat block for Node
func (fsys *FS) stat(node vfs.Node, stat *fuse.Stat_t) (errc int) {
Size := uint64(node.Size())
Blocks := (Size + 511) / 512
modTime := node.ModTime()
Mode := node.Mode().Perm()
if node.IsDir() {
Mode |= fuse.S_IFDIR
} else {
Mode |= fuse.S_IFREG
}
//stat.Dev = 1
stat.Ino = node.Inode() // FIXME do we need to set the inode number?
stat.Mode = uint32(Mode)
stat.Nlink = 1
stat.Uid = fsys.VFS.Opt.UID
stat.Gid = fsys.VFS.Opt.GID
//stat.Rdev
stat.Size = int64(Size)
t := fuse.NewTimespec(modTime)
stat.Atim = t
stat.Mtim = t
stat.Ctim = t
stat.Blksize = 512
stat.Blocks = int64(Blocks)
stat.Birthtim = t
// fs.Debugf(nil, "stat = %+v", *stat)
return 0
}
// Init is called after the filesystem is ready
func (fsys *FS) Init() {
defer log.Trace(fsys.f, "")("")
close(fsys.ready)
}
// Destroy is called when it is unmounted (note that depending on how
// the file system is terminated the file system may not receive the
// Destroy call).
func (fsys *FS) Destroy() {
defer log.Trace(fsys.f, "")("")
atomic.StoreInt32(&fsys.destroyed, 1)
}
// Getattr reads the attributes for path
func (fsys *FS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int) {
defer log.Trace(path, "fh=0x%X", fh)("errc=%v", &errc)
node, _, errc := fsys.getNode(path, fh)
if errc == 0 {
errc = fsys.stat(node, stat)
}
return
}
// Opendir opens path as a directory
func (fsys *FS) Opendir(path string) (errc int, fh uint64) {
defer log.Trace(path, "")("errc=%d, fh=0x%X", &errc, &fh)
handle, err := fsys.VFS.OpenFile(path, os.O_RDONLY, 0777)
if err != nil {
return translateError(err), fhUnset
}
return 0, fsys.openHandle(handle)
}
// Readdir reads the directory at dirPath
func (fsys *FS) Readdir(dirPath string,
fill func(name string, stat *fuse.Stat_t, ofst int64) bool,
ofst int64,
fh uint64) (errc int) {
itemsRead := -1
defer log.Trace(dirPath, "ofst=%d, fh=0x%X", ofst, fh)("items=%d, errc=%d", &itemsRead, &errc)
dir, errc := fsys.lookupDir(dirPath)
if errc != 0 {
return errc
}
// We can't seek in directories and FUSE should know that so
// return an error if ofst is ever set.
if ofst > 0 {
return -fuse.ESPIPE
}
nodes, err := dir.ReadDirAll()
if err != nil {
return translateError(err)
}
// Optionally, create a struct stat that describes the file as
// for getattr (but FUSE only looks at st_ino and the
// file-type bits of st_mode).
//
// We have called host.SetCapReaddirPlus() so WinFsp will
// use the full stat information - a Useful optimization on
// Windows.
//
// NB we are using the first mode for readdir: The readdir
// implementation ignores the offset parameter, and passes
// zero to the filler function's offset. The filler function
// will not return '1' (unless an error happens), so the whole
// directory is read in a single readdir operation.
fill(".", nil, 0)
fill("..", nil, 0)
for _, node := range nodes {
name := node.Name()
if len(name) > mountlib.MaxLeafSize {
fs.Errorf(dirPath, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name)
continue
}
// We have called host.SetCapReaddirPlus() so supply the stat information
// It is very cheap at this point so supply it regardless of OS capabilities
var stat fuse.Stat_t
_ = fsys.stat(node, &stat) // not capable of returning an error
fill(name, &stat, 0)
}
itemsRead = len(nodes)
return 0
}
// Releasedir finished reading the directory
func (fsys *FS) Releasedir(path string, fh uint64) (errc int) {
defer log.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
return fsys.closeHandle(fh)
}
// Statfs reads overall stats on the filesystem
func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
defer log.Trace(path, "")("stat=%+v, errc=%d", stat, &errc)
const blockSize = 4096
total, _, free := fsys.VFS.Statfs()
stat.Blocks = uint64(total) / blockSize // Total data blocks in file system.
stat.Bfree = uint64(free) / blockSize // Free blocks in file system.
stat.Bavail = stat.Bfree // Free blocks in file system if you're not root.
stat.Files = 1e9 // Total files in file system.
stat.Ffree = 1e9 // Free files in file system.
stat.Bsize = blockSize // Block size
stat.Namemax = 255 // Maximum file name length?
stat.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
mountlib.ClipBlocks(&stat.Blocks)
mountlib.ClipBlocks(&stat.Bfree)
mountlib.ClipBlocks(&stat.Bavail)
return 0
}
// OpenEx opens a file
func (fsys *FS) OpenEx(path string, fi *fuse.FileInfo_t) (errc int) {
defer log.Trace(path, "flags=0x%X", fi.Flags)("errc=%d, fh=0x%X", &errc, &fi.Fh)
fi.Fh = fhUnset
// translate the fuse flags to os flags
flags := translateOpenFlags(fi.Flags)
handle, err := fsys.VFS.OpenFile(path, flags, 0777)
if err != nil {
return translateError(err)
}
// If size unknown then use direct io to read
if entry := handle.Node().DirEntry(); entry != nil && entry.Size() < 0 {
fi.DirectIo = true
}
fi.Fh = fsys.openHandle(handle)
return 0
}
// Open opens a file
func (fsys *FS) Open(path string, flags int) (errc int, fh uint64) {
var fi = fuse.FileInfo_t{
Flags: flags,
}
errc = fsys.OpenEx(path, &fi)
return errc, fi.Fh
}
// CreateEx creates and opens a file.
func (fsys *FS) CreateEx(filePath string, mode uint32, fi *fuse.FileInfo_t) (errc int) {
defer log.Trace(filePath, "flags=0x%X, mode=0%o", fi.Flags, mode)("errc=%d, fh=0x%X", &errc, &fi.Fh)
fi.Fh = fhUnset
leaf, parentDir, errc := fsys.lookupParentDir(filePath)
if errc != 0 {
return errc
}
file, err := parentDir.Create(leaf, fi.Flags)
if err != nil {
return translateError(err)
}
// translate the fuse flags to os flags
flags := translateOpenFlags(fi.Flags) | os.O_CREATE
handle, err := file.Open(flags)
if err != nil {
return translateError(err)
}
fi.Fh = fsys.openHandle(handle)
return 0
}
// Create creates and opens a file.
func (fsys *FS) Create(filePath string, flags int, mode uint32) (errc int, fh uint64) {
var fi = fuse.FileInfo_t{
Flags: flags,
}
errc = fsys.CreateEx(filePath, mode, &fi)
return errc, fi.Fh
}
// Truncate truncates a file to size
func (fsys *FS) Truncate(path string, size int64, fh uint64) (errc int) {
defer log.Trace(path, "size=%d, fh=0x%X", size, fh)("errc=%d", &errc)
node, handle, errc := fsys.getNode(path, fh)
if errc != 0 {
return errc
}
var err error
if handle != nil {
err = handle.Truncate(size)
} else {
err = node.Truncate(size)
}
if err != nil {
return translateError(err)
}
return 0
}
// Read data from file handle
func (fsys *FS) Read(path string, buff []byte, ofst int64, fh uint64) (n int) {
defer log.Trace(path, "ofst=%d, fh=0x%X", ofst, fh)("n=%d", &n)
handle, errc := fsys.getHandle(fh)
if errc != 0 {
return errc
}
n, err := handle.ReadAt(buff, ofst)
if err == io.EOF {
} else if err != nil {
return translateError(err)
}
return n
}
// Write data to file handle
func (fsys *FS) Write(path string, buff []byte, ofst int64, fh uint64) (n int) {
defer log.Trace(path, "ofst=%d, fh=0x%X", ofst, fh)("n=%d", &n)
handle, errc := fsys.getHandle(fh)
if errc != 0 {
return errc
}
n, err := handle.WriteAt(buff, ofst)
if err != nil {
return translateError(err)
}
return n
}
// Flush flushes an open file descriptor or path
func (fsys *FS) Flush(path string, fh uint64) (errc int) {
defer log.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
handle, errc := fsys.getHandle(fh)
if errc != 0 {
return errc
}
return translateError(handle.Flush())
}
// Release closes the file if still open
func (fsys *FS) Release(path string, fh uint64) (errc int) {
defer log.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
handle, errc := fsys.getHandle(fh)
if errc != 0 {
return errc
}
_ = fsys.closeHandle(fh)
return translateError(handle.Release())
}
// Unlink removes a file.
func (fsys *FS) Unlink(filePath string) (errc int) {
defer log.Trace(filePath, "")("errc=%d", &errc)
leaf, parentDir, errc := fsys.lookupParentDir(filePath)
if errc != 0 {
return errc
}
return translateError(parentDir.RemoveName(leaf))
}
// Mkdir creates a directory.
func (fsys *FS) Mkdir(dirPath string, mode uint32) (errc int) {
defer log.Trace(dirPath, "mode=0%o", mode)("errc=%d", &errc)
leaf, parentDir, errc := fsys.lookupParentDir(dirPath)
if errc != 0 {
return errc
}
_, err := parentDir.Mkdir(leaf)
return translateError(err)
}
// Rmdir removes a directory
func (fsys *FS) Rmdir(dirPath string) (errc int) {
defer log.Trace(dirPath, "")("errc=%d", &errc)
leaf, parentDir, errc := fsys.lookupParentDir(dirPath)
if errc != 0 {
return errc
}
return translateError(parentDir.RemoveName(leaf))
}
// Rename renames a file.
func (fsys *FS) Rename(oldPath string, newPath string) (errc int) {
defer log.Trace(oldPath, "newPath=%q", newPath)("errc=%d", &errc)
return translateError(fsys.VFS.Rename(oldPath, newPath))
}
// Windows sometimes seems to send times that are the epoch which is
// 1601-01-01 +/- timezone so filter out times that are earlier than
// this.
var invalidDateCutoff = time.Date(1601, 1, 2, 0, 0, 0, 0, time.UTC)
// Utimens changes the access and modification times of a file.
func (fsys *FS) Utimens(path string, tmsp []fuse.Timespec) (errc int) {
defer log.Trace(path, "tmsp=%+v", tmsp)("errc=%d", &errc)
node, errc := fsys.lookupNode(path)
if errc != 0 {
return errc
}
if tmsp == nil || len(tmsp) < 2 {
fs.Debugf(path, "Utimens: Not setting time as timespec isn't complete: %v", tmsp)
return 0
}
t := tmsp[1].Time()
if t.Before(invalidDateCutoff) {
fs.Debugf(path, "Utimens: Not setting out of range time: %v", t)
return 0
}
fs.Debugf(path, "Utimens: SetModTime: %v", t)
return translateError(node.SetModTime(t))
}
// Mknod creates a file node.
func (fsys *FS) Mknod(path string, mode uint32, dev uint64) (errc int) {
defer log.Trace(path, "mode=0x%X, dev=0x%X", mode, dev)("errc=%d", &errc)
return -fuse.ENOSYS
}
// Fsync synchronizes file contents.
func (fsys *FS) Fsync(path string, datasync bool, fh uint64) (errc int) {
defer log.Trace(path, "datasync=%v, fh=0x%X", datasync, fh)("errc=%d", &errc)
// This is a no-op for rclone
return 0
}
// Link creates a hard link to a file.
func (fsys *FS) Link(oldpath string, newpath string) (errc int) {
defer log.Trace(oldpath, "newpath=%q", newpath)("errc=%d", &errc)
return -fuse.ENOSYS
}
// Symlink creates a symbolic link.
func (fsys *FS) Symlink(target string, newpath string) (errc int) {
defer log.Trace(target, "newpath=%q", newpath)("errc=%d", &errc)
return -fuse.ENOSYS
}
// Readlink reads the target of a symbolic link.
func (fsys *FS) Readlink(path string) (errc int, linkPath string) {
defer log.Trace(path, "")("linkPath=%q, errc=%d", &linkPath, &errc)
return -fuse.ENOSYS, ""
}
// Chmod changes the permission bits of a file.
func (fsys *FS) Chmod(path string, mode uint32) (errc int) {
defer log.Trace(path, "mode=0%o", mode)("errc=%d", &errc)
// This is a no-op for rclone
return 0
}
// Chown changes the owner and group of a file.
func (fsys *FS) Chown(path string, uid uint32, gid uint32) (errc int) {
defer log.Trace(path, "uid=%d, gid=%d", uid, gid)("errc=%d", &errc)
// This is a no-op for rclone
return 0
}
// Access checks file access permissions.
func (fsys *FS) Access(path string, mask uint32) (errc int) {
defer log.Trace(path, "mask=0%o", mask)("errc=%d", &errc)
// This is a no-op for rclone
return 0
}
// Fsyncdir synchronizes directory contents.
func (fsys *FS) Fsyncdir(path string, datasync bool, fh uint64) (errc int) {
defer log.Trace(path, "datasync=%v, fh=0x%X", datasync, fh)("errc=%d", &errc)
// This is a no-op for rclone
return 0
}
// Setxattr sets extended attributes.
func (fsys *FS) Setxattr(path string, name string, value []byte, flags int) (errc int) {
return -fuse.ENOSYS
}
// Getxattr gets extended attributes.
func (fsys *FS) Getxattr(path string, name string) (errc int, value []byte) {
return -fuse.ENOSYS, nil
}
// Removexattr removes extended attributes.
func (fsys *FS) Removexattr(path string, name string) (errc int) {
return -fuse.ENOSYS
}
// Listxattr lists extended attributes.
func (fsys *FS) Listxattr(path string, fill func(name string) bool) (errc int) {
return -fuse.ENOSYS
}
// Translate errors from mountlib
func translateError(err error) (errc int) {
if err == nil {
return 0
}
switch errors.Cause(err) {
case vfs.OK:
return 0
case vfs.ENOENT, fs.ErrorDirNotFound, fs.ErrorObjectNotFound:
return -fuse.ENOENT
case vfs.EEXIST, fs.ErrorDirExists:
return -fuse.EEXIST
case vfs.EPERM, fs.ErrorPermissionDenied:
return -fuse.EPERM
case vfs.ECLOSED:
return -fuse.EBADF
case vfs.ENOTEMPTY:
return -fuse.ENOTEMPTY
case vfs.ESPIPE:
return -fuse.ESPIPE
case vfs.EBADF:
return -fuse.EBADF
case vfs.EROFS:
return -fuse.EROFS
case vfs.ENOSYS, fs.ErrorNotImplemented:
return -fuse.ENOSYS
case vfs.EINVAL:
return -fuse.EINVAL
}
fs.Errorf(nil, "IO error: %v", err)
return -fuse.EIO
}
// Translate Open Flags from FUSE to os (as used in the vfs layer)
func translateOpenFlags(inFlags int) (outFlags int) {
switch inFlags & fuse.O_ACCMODE {
case fuse.O_RDONLY:
outFlags = os.O_RDONLY
case fuse.O_WRONLY:
outFlags = os.O_WRONLY
case fuse.O_RDWR:
outFlags = os.O_RDWR
}
if inFlags&fuse.O_APPEND != 0 {
outFlags |= os.O_APPEND
}
if inFlags&fuse.O_CREAT != 0 {
outFlags |= os.O_CREATE
}
if inFlags&fuse.O_EXCL != 0 {
outFlags |= os.O_EXCL
}
if inFlags&fuse.O_TRUNC != 0 {
outFlags |= os.O_TRUNC
}
// NB O_SYNC isn't defined by fuse
return outFlags
}
// Make sure interfaces are satisfied
var (
_ fuse.FileSystemInterface = (*FS)(nil)
_ fuse.FileSystemOpenEx = (*FS)(nil)
//_ fuse.FileSystemChflags = (*FS)(nil)
//_ fuse.FileSystemSetcrtime = (*FS)(nil)
//_ fuse.FileSystemSetchgtime = (*FS)(nil)
)
| cmd/cmount/fs.go | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.0033040172420442104,
0.0003377752145752311,
0.00016504168161191046,
0.0001949536381289363,
0.00047974707558751106
] |
{
"id": 5,
"code_window": [
"type setConfigFile string\n",
"\n",
"// Set a config item into the config file\n",
"func (section setConfigFile) Set(key, value string) {\n",
"\tif strings.HasPrefix(string(section), \":\") {\n",
"\t\tLogf(nil, \"Can't save config %q = %q for on the fly backend %q\", key, value, section)\n",
"\t\treturn\n",
"\t}\n",
"\tDebugf(nil, \"Saving config %q = %q in section %q of the config file\", key, value, section)\n",
"\terr := ConfigFileSet(string(section), key, value)\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tDebugf(nil, \"Saving config %q in section %q of the config file\", key, section)\n"
],
"file_path": "fs/configmap.go",
"type": "replace",
"edit_start_line_idx": 72
} | // Package config reads, writes and edits the config file and deals with command line flags
package config
import (
"context"
"encoding/json"
"fmt"
"log"
mathrand "math/rand"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"time"
"github.com/mitchellh/go-homedir"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/random"
)
const (
configFileName = "rclone.conf"
hiddenConfigFileName = "." + configFileName
noConfigFile = "notfound"
// ConfigToken is the key used to store the token under
ConfigToken = "token"
// ConfigClientID is the config key used to store the client id
ConfigClientID = "client_id"
// ConfigClientSecret is the config key used to store the client secret
ConfigClientSecret = "client_secret"
// ConfigAuthURL is the config key used to store the auth server endpoint
ConfigAuthURL = "auth_url"
// ConfigTokenURL is the config key used to store the token server endpoint
ConfigTokenURL = "token_url"
// ConfigEncoding is the config key to change the encoding for a backend
ConfigEncoding = "encoding"
// ConfigEncodingHelp is the help for ConfigEncoding
ConfigEncodingHelp = "This sets the encoding for the backend.\n\nSee: the [encoding section in the overview](/overview/#encoding) for more info."
// ConfigAuthorize indicates that we just want "rclone authorize"
ConfigAuthorize = "config_authorize"
// ConfigAuthNoBrowser indicates that we do not want to open browser
ConfigAuthNoBrowser = "config_auth_no_browser"
)
// Storage defines an interface for loading and saving config to
// persistent storage. Rclone provides a default implementation to
// load and save to a config file when this is imported
//
// import "github.com/rclone/rclone/fs/config/configfile"
// configfile.Install()
type Storage interface {
// GetSectionList returns a slice of strings with names for all the
// sections
GetSectionList() []string
// HasSection returns true if section exists in the config file
HasSection(section string) bool
// DeleteSection removes the named section and all config from the
// config file
DeleteSection(section string)
// GetKeyList returns the keys in this section
GetKeyList(section string) []string
// GetValue returns the key in section with a found flag
GetValue(section string, key string) (value string, found bool)
// SetValue sets the value under key in section
SetValue(section string, key string, value string)
// DeleteKey removes the key under section
DeleteKey(section string, key string) bool
// Load the config from permanent storage
Load() error
// Save the config to permanent storage
Save() error
// Serialize the config into a string
Serialize() (string, error)
}
// Global
var (
// CacheDir points to the cache directory. Users of this
// should make a subdirectory and use MkdirAll() to create it
// and any parents.
CacheDir = makeCacheDir()
// Password can be used to configure the random password generator
Password = random.Password
)
var (
configPath string
data Storage
dataLoaded bool
)
func init() {
// Set the function pointers up in fs
fs.ConfigFileGet = FileGetFlag
fs.ConfigFileSet = SetValueAndSave
configPath = makeConfigPath()
data = newDefaultStorage()
}
// Join directory with filename, and check if exists
func findFile(dir string, name string) string {
path := filepath.Join(dir, name)
if _, err := os.Stat(path); err != nil {
return ""
}
return path
}
// Find current user's home directory
func findHomeDir() (string, error) {
path, err := homedir.Dir()
if err != nil {
fs.Debugf(nil, "Home directory lookup failed and cannot be used as configuration location: %v", err)
} else if path == "" {
// On Unix homedir return success but empty string for user with empty home configured in passwd file
fs.Debugf(nil, "Home directory not defined and cannot be used as configuration location")
}
return path, err
}
// Find rclone executable directory and look for existing rclone.conf there
// (<rclone_exe_dir>/rclone.conf)
func findLocalConfig() (configDir string, configFile string) {
if exePath, err := os.Executable(); err == nil {
configDir = filepath.Dir(exePath)
configFile = findFile(configDir, configFileName)
}
return
}
// Get path to Windows AppData config subdirectory for rclone and look for existing rclone.conf there
// ($AppData/rclone/rclone.conf)
func findAppDataConfig() (configDir string, configFile string) {
if appDataDir := os.Getenv("APPDATA"); appDataDir != "" {
configDir = filepath.Join(appDataDir, "rclone")
configFile = findFile(configDir, configFileName)
} else {
fs.Debugf(nil, "Environment variable APPDATA is not defined and cannot be used as configuration location")
}
return
}
// Get path to XDG config subdirectory for rclone and look for existing rclone.conf there
// (see XDG Base Directory specification: https://specifications.freedesktop.org/basedir-spec/latest/).
// ($XDG_CONFIG_HOME\rclone\rclone.conf)
func findXDGConfig() (configDir string, configFile string) {
if xdgConfigDir := os.Getenv("XDG_CONFIG_HOME"); xdgConfigDir != "" {
configDir = filepath.Join(xdgConfigDir, "rclone")
configFile = findFile(configDir, configFileName)
}
return
}
// Get path to .config subdirectory for rclone and look for existing rclone.conf there
// (~/.config/rclone/rclone.conf)
func findDotConfigConfig(home string) (configDir string, configFile string) {
if home != "" {
configDir = filepath.Join(home, ".config", "rclone")
configFile = findFile(configDir, configFileName)
}
return
}
// Look for existing .rclone.conf (legacy hidden filename) in root of user's home directory
// (~/.rclone.conf)
func findOldHomeConfig(home string) (configDir string, configFile string) {
if home != "" {
configDir = home
configFile = findFile(home, hiddenConfigFileName)
}
return
}
// Return the path to the configuration file
func makeConfigPath() string {
// Look for existing rclone.conf in prioritized list of known locations
// Also get configuration directory to use for new config file when no existing is found.
var (
configFile string
configDir string
primaryConfigDir string
fallbackConfigDir string
)
// <rclone_exe_dir>/rclone.conf
if _, configFile = findLocalConfig(); configFile != "" {
return configFile
}
// Windows: $AppData/rclone/rclone.conf
// This is also the default location for new config when no existing is found
if runtime.GOOS == "windows" {
if primaryConfigDir, configFile = findAppDataConfig(); configFile != "" {
return configFile
}
}
// $XDG_CONFIG_HOME/rclone/rclone.conf
// Also looking for this on Windows, for backwards compatibility reasons.
if configDir, configFile = findXDGConfig(); configFile != "" {
return configFile
}
if runtime.GOOS != "windows" {
// On Unix this is also the default location for new config when no existing is found
primaryConfigDir = configDir
}
// ~/.config/rclone/rclone.conf
// This is also the fallback location for new config
// (when $AppData on Windows and $XDG_CONFIG_HOME on Unix is not defined)
homeDir, homeDirErr := findHomeDir()
if fallbackConfigDir, configFile = findDotConfigConfig(homeDir); configFile != "" {
return configFile
}
// ~/.rclone.conf
if _, configFile = findOldHomeConfig(homeDir); configFile != "" {
return configFile
}
// No existing config file found, prepare proper default for a new one.
// But first check if if user supplied a --config variable or environment
// variable, since then we skip actually trying to create the default
// and report any errors related to it (we can't use pflag for this because
// it isn't initialised yet so we search the command line manually).
_, configSupplied := os.LookupEnv("RCLONE_CONFIG")
if !configSupplied {
for _, item := range os.Args {
if item == "--config" || strings.HasPrefix(item, "--config=") {
configSupplied = true
break
}
}
}
// If we found a configuration directory to be used for new config during search
// above, then create it to be ready for rclone.conf file to be written into it
// later, and also as a test of permissions to use fallback if not even able to
// create the directory.
if primaryConfigDir != "" {
configDir = primaryConfigDir
} else if fallbackConfigDir != "" {
configDir = fallbackConfigDir
} else {
configDir = ""
}
if configDir != "" {
configFile = filepath.Join(configDir, configFileName)
if configSupplied {
// User supplied custom config option, just return the default path
// as is without creating any directories, since it will not be used
// anyway and we don't want to unnecessarily create empty directory.
return configFile
}
var mkdirErr error
if mkdirErr = os.MkdirAll(configDir, os.ModePerm); mkdirErr == nil {
return configFile
}
// Problem: Try a fallback location. If we did find a home directory then
// just assume file .rclone.conf (legacy hidden filename) can be written in
// its root (~/.rclone.conf).
if homeDir != "" {
fs.Debugf(nil, "Configuration directory could not be created and will not be used: %v", mkdirErr)
return filepath.Join(homeDir, hiddenConfigFileName)
}
if !configSupplied {
fs.Errorf(nil, "Couldn't find home directory nor create configuration directory: %v", mkdirErr)
}
} else if !configSupplied {
if homeDirErr != nil {
fs.Errorf(nil, "Couldn't find configuration directory nor home directory: %v", homeDirErr)
} else {
fs.Errorf(nil, "Couldn't find configuration directory nor home directory")
}
}
// No known location that can be used: Did possibly find a configDir
// (XDG_CONFIG_HOME or APPDATA) which couldn't be created, but in any case
// did not find a home directory!
// Report it as an error, and return as last resort the path relative to current
// working directory, of .rclone.conf (legacy hidden filename).
if !configSupplied {
fs.Errorf(nil, "Defaulting to storing config in current directory.")
fs.Errorf(nil, "Use --config flag to workaround.")
}
return hiddenConfigFileName
}
// GetConfigPath returns the current config file path
func GetConfigPath() string {
return configPath
}
// SetConfigPath sets new config file path
//
// Checks for empty string, os null device, or special path, all of which indicates in-memory config.
func SetConfigPath(path string) (err error) {
var cfgPath string
if path == "" || path == os.DevNull {
cfgPath = ""
} else if filepath.Base(path) == noConfigFile {
cfgPath = ""
} else if err = file.IsReserved(path); err != nil {
return err
} else if cfgPath, err = filepath.Abs(path); err != nil {
return err
}
configPath = cfgPath
return nil
}
// SetData sets new config file storage
func SetData(newData Storage) {
data = newData
dataLoaded = false
}
// Data returns current config file storage
func Data() Storage {
return data
}
// LoadedData ensures the config file storage is loaded and returns it
func LoadedData() Storage {
if !dataLoaded {
// Set RCLONE_CONFIG_DIR for backend config and subprocesses
// If empty configPath (in-memory only) the value will be "."
_ = os.Setenv("RCLONE_CONFIG_DIR", filepath.Dir(configPath))
// Load configuration from file (or initialize sensible default if no file or error)
if err := data.Load(); err == nil {
fs.Debugf(nil, "Using config file from %q", configPath)
dataLoaded = true
} else if err == ErrorConfigFileNotFound {
if configPath == "" {
fs.Debugf(nil, "Config is memory-only - using defaults")
} else {
fs.Logf(nil, "Config file %q not found - using defaults", configPath)
}
dataLoaded = true
} else {
log.Fatalf("Failed to load config file %q: %v", configPath, err)
}
}
return data
}
// ErrorConfigFileNotFound is returned when the config file is not found
var ErrorConfigFileNotFound = errors.New("config file not found")
// SaveConfig calling function which saves configuration file.
// if SaveConfig returns error trying again after sleep.
func SaveConfig() {
if configPath == "" {
fs.Debugf(nil, "Skipping save for memory-only config")
return
}
ctx := context.Background()
ci := fs.GetConfig(ctx)
var err error
for i := 0; i < ci.LowLevelRetries+1; i++ {
if err = LoadedData().Save(); err == nil {
return
}
waitingTimeMs := mathrand.Intn(1000)
time.Sleep(time.Duration(waitingTimeMs) * time.Millisecond)
}
fs.Errorf(nil, "Failed to save config after %d tries: %v", ci.LowLevelRetries, err)
}
// SetValueAndSave sets the key to the value and saves just that
// value in the config file. It loads the old config file in from
// disk first and overwrites the given value only.
func SetValueAndSave(name, key, value string) error {
// Set the value in config in case we fail to reload it
LoadedData().SetValue(name, key, value)
// Save it again
SaveConfig()
return nil
}
// getWithDefault gets key out of section name returning defaultValue if not
// found.
func getWithDefault(name, key, defaultValue string) string {
value, found := LoadedData().GetValue(name, key)
if !found {
return defaultValue
}
return value
}
// UpdateRemoteOpt configures the remote update
type UpdateRemoteOpt struct {
// Treat all passwords as plain that need obscuring
Obscure bool `json:"obscure"`
// Treat all passwords as obscured
NoObscure bool `json:"noObscure"`
// Don't interact with the user - return questions
NonInteractive bool `json:"nonInteractive"`
// If set then supply state and result parameters to continue the process
Continue bool `json:"continue"`
// If set then ask all the questions, not just the post config questions
All bool `json:"all"`
// State to restart with - used with Continue
State string `json:"state"`
// Result to return - used with Continue
Result string `json:"result"`
// If set then edit existing values
Edit bool `json:"edit"`
}
func updateRemote(ctx context.Context, name string, keyValues rc.Params, opt UpdateRemoteOpt) (out *fs.ConfigOut, err error) {
if opt.Obscure && opt.NoObscure {
return nil, errors.New("can't use --obscure and --no-obscure together")
}
err = fspath.CheckConfigName(name)
if err != nil {
return nil, err
}
interactive := !(opt.NonInteractive || opt.Continue)
if interactive && !opt.All {
ctx = suppressConfirm(ctx)
}
fsType := FileGet(name, "type")
if fsType == "" {
return nil, errors.New("couldn't find type field in config")
}
ri, err := fs.Find(fsType)
if err != nil {
return nil, errors.Errorf("couldn't find backend for type %q", fsType)
}
// Work out which options need to be obscured
needsObscure := map[string]struct{}{}
if !opt.NoObscure {
for _, option := range ri.Options {
if option.IsPassword {
needsObscure[option.Name] = struct{}{}
}
}
}
choices := configmap.Simple{}
m := fs.ConfigMap(ri, name, nil)
// Set the config
for k, v := range keyValues {
vStr := fmt.Sprint(v)
// Obscure parameter if necessary
if _, ok := needsObscure[k]; ok {
_, err := obscure.Reveal(vStr)
if err != nil || opt.Obscure {
// If error => not already obscured, so obscure it
// or we are forced to obscure
vStr, err = obscure.Obscure(vStr)
if err != nil {
return nil, errors.Wrap(err, "UpdateRemote: obscure failed")
}
}
}
choices.Set(k, vStr)
if !strings.HasPrefix(k, fs.ConfigKeyEphemeralPrefix) {
m.Set(k, vStr)
}
}
if opt.Edit {
choices[fs.ConfigEdit] = "true"
}
if interactive {
var state = ""
if opt.All {
state = fs.ConfigAll
}
err = backendConfig(ctx, name, m, ri, choices, state)
} else {
// Start the config state machine
in := fs.ConfigIn{
State: opt.State,
Result: opt.Result,
}
if in.State == "" && opt.All {
in.State = fs.ConfigAll
}
out, err = fs.BackendConfig(ctx, name, m, ri, choices, in)
}
if err != nil {
return nil, err
}
SaveConfig()
cache.ClearConfig(name) // remove any remotes based on this config from the cache
return out, nil
}
// UpdateRemote adds the keyValues passed in to the remote of name.
// keyValues should be key, value pairs.
func UpdateRemote(ctx context.Context, name string, keyValues rc.Params, opt UpdateRemoteOpt) (out *fs.ConfigOut, err error) {
opt.Edit = true
return updateRemote(ctx, name, keyValues, opt)
}
// CreateRemote creates a new remote with name, type and a list of
// parameters which are key, value pairs. If update is set then it
// adds the new keys rather than replacing all of them.
func CreateRemote(ctx context.Context, name string, Type string, keyValues rc.Params, opts UpdateRemoteOpt) (out *fs.ConfigOut, err error) {
err = fspath.CheckConfigName(name)
if err != nil {
return nil, err
}
if !opts.Continue {
// Delete the old config if it exists
LoadedData().DeleteSection(name)
// Set the type
LoadedData().SetValue(name, "type", Type)
}
// Set the remaining values
return UpdateRemote(ctx, name, keyValues, opts)
}
// PasswordRemote adds the keyValues passed in to the remote of name.
// keyValues should be key, value pairs.
func PasswordRemote(ctx context.Context, name string, keyValues rc.Params) error {
ctx = suppressConfirm(ctx)
err := fspath.CheckConfigName(name)
if err != nil {
return err
}
for k, v := range keyValues {
keyValues[k] = obscure.MustObscure(fmt.Sprint(v))
}
_, err = UpdateRemote(ctx, name, keyValues, UpdateRemoteOpt{
NoObscure: true,
})
return err
}
// JSONListProviders prints all the providers and options in JSON format
func JSONListProviders() error {
b, err := json.MarshalIndent(fs.Registry, "", " ")
if err != nil {
return errors.Wrap(err, "failed to marshal examples")
}
_, err = os.Stdout.Write(b)
if err != nil {
return errors.Wrap(err, "failed to write providers list")
}
return nil
}
// fsOption returns an Option describing the possible remotes
func fsOption() *fs.Option {
o := &fs.Option{
Name: "Storage",
Help: "Type of storage to configure.",
Default: "",
}
for _, item := range fs.Registry {
example := fs.OptionExample{
Value: item.Name,
Help: item.Description,
}
o.Examples = append(o.Examples, example)
}
o.Examples.Sort()
return o
}
// FileGetFlag gets the config key under section returning the
// the value and true if found and or ("", false) otherwise
func FileGetFlag(section, key string) (string, bool) {
return LoadedData().GetValue(section, key)
}
// FileGet gets the config key under section returning the default if not set.
//
// It looks up defaults in the environment if they are present
func FileGet(section, key string) string {
var defaultVal string
envKey := fs.ConfigToEnv(section, key)
newValue, found := os.LookupEnv(envKey)
if found {
defaultVal = newValue
}
return getWithDefault(section, key, defaultVal)
}
// FileSet sets the key in section to value. It doesn't save
// the config file.
func FileSet(section, key, value string) {
if value != "" {
LoadedData().SetValue(section, key, value)
} else {
FileDeleteKey(section, key)
}
}
// FileDeleteKey deletes the config key in the config file.
// It returns true if the key was deleted,
// or returns false if the section or key didn't exist.
func FileDeleteKey(section, key string) bool {
return LoadedData().DeleteKey(section, key)
}
var matchEnv = regexp.MustCompile(`^RCLONE_CONFIG_(.*?)_TYPE=.*$`)
// FileSections returns the sections in the config file
// including any defined by environment variables.
func FileSections() []string {
sections := LoadedData().GetSectionList()
for _, item := range os.Environ() {
matches := matchEnv.FindStringSubmatch(item)
if len(matches) == 2 {
sections = append(sections, strings.ToLower(matches[1]))
}
}
return sections
}
// DumpRcRemote dumps the config for a single remote
func DumpRcRemote(name string) (dump rc.Params) {
params := rc.Params{}
for _, key := range LoadedData().GetKeyList(name) {
params[key] = FileGet(name, key)
}
return params
}
// DumpRcBlob dumps all the config as an unstructured blob suitable
// for the rc
func DumpRcBlob() (dump rc.Params) {
dump = rc.Params{}
for _, name := range LoadedData().GetSectionList() {
dump[name] = DumpRcRemote(name)
}
return dump
}
// Dump dumps all the config as a JSON file
func Dump() error {
dump := DumpRcBlob()
b, err := json.MarshalIndent(dump, "", " ")
if err != nil {
return errors.Wrap(err, "failed to marshal config dump")
}
_, err = os.Stdout.Write(b)
if err != nil {
return errors.Wrap(err, "failed to write config dump")
}
return nil
}
// makeCacheDir returns a directory to use for caching.
//
// Code borrowed from go stdlib until it is made public
func makeCacheDir() (dir string) {
// Compute default location.
switch runtime.GOOS {
case "windows":
dir = os.Getenv("LocalAppData")
case "darwin":
dir = os.Getenv("HOME")
if dir != "" {
dir += "/Library/Caches"
}
case "plan9":
dir = os.Getenv("home")
if dir != "" {
// Plan 9 has no established per-user cache directory,
// but $home/lib/xyz is the usual equivalent of $HOME/.xyz on Unix.
dir += "/lib/cache"
}
default: // Unix
// https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
dir = os.Getenv("XDG_CACHE_HOME")
if dir == "" {
dir = os.Getenv("HOME")
if dir != "" {
dir += "/.cache"
}
}
}
// if no dir found then use TempDir - we will have a cachedir!
if dir == "" {
dir = os.TempDir()
}
return filepath.Join(dir, "rclone")
}
| fs/config/config.go | 1 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.21586911380290985,
0.009920187294483185,
0.00016492149734403938,
0.0002994905808009207,
0.03442995250225067
] |
{
"id": 5,
"code_window": [
"type setConfigFile string\n",
"\n",
"// Set a config item into the config file\n",
"func (section setConfigFile) Set(key, value string) {\n",
"\tif strings.HasPrefix(string(section), \":\") {\n",
"\t\tLogf(nil, \"Can't save config %q = %q for on the fly backend %q\", key, value, section)\n",
"\t\treturn\n",
"\t}\n",
"\tDebugf(nil, \"Saving config %q = %q in section %q of the config file\", key, value, section)\n",
"\terr := ConfigFileSet(string(section), key, value)\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tDebugf(nil, \"Saving config %q in section %q of the config file\", key, section)\n"
],
"file_path": "fs/configmap.go",
"type": "replace",
"edit_start_line_idx": 72
} | package config
import (
"fmt"
"testing"
"github.com/rclone/rclone/fs/rc"
"github.com/stretchr/testify/assert"
)
func TestArgsToMap(t *testing.T) {
for _, test := range []struct {
args []string
want rc.Params
wantErr bool
}{
{
args: []string{},
want: rc.Params{},
},
{
args: []string{"hello", "42"},
want: rc.Params{"hello": "42"},
},
{
args: []string{"hello", "42", "bye", "43"},
want: rc.Params{"hello": "42", "bye": "43"},
},
{
args: []string{"hello=42", "bye", "43"},
want: rc.Params{"hello": "42", "bye": "43"},
},
{
args: []string{"hello", "42", "bye=43"},
want: rc.Params{"hello": "42", "bye": "43"},
},
{
args: []string{"hello=42", "bye=43"},
want: rc.Params{"hello": "42", "bye": "43"},
},
{
args: []string{"hello", "42", "bye", "43", "unused"},
wantErr: true,
},
{
args: []string{"hello=42", "bye=43", "unused"},
wantErr: true,
},
} {
what := fmt.Sprintf("args = %#v", test.args)
got, err := argsToMap(test.args)
if test.wantErr {
assert.Error(t, err, what)
} else {
assert.NoError(t, err, what)
assert.Equal(t, test.want, got, what)
}
}
}
| cmd/config/config_test.go | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.0001755318808136508,
0.0001701414439594373,
0.0001624657161301002,
0.0001703974267002195,
0.000003963085873692762
] |
{
"id": 5,
"code_window": [
"type setConfigFile string\n",
"\n",
"// Set a config item into the config file\n",
"func (section setConfigFile) Set(key, value string) {\n",
"\tif strings.HasPrefix(string(section), \":\") {\n",
"\t\tLogf(nil, \"Can't save config %q = %q for on the fly backend %q\", key, value, section)\n",
"\t\treturn\n",
"\t}\n",
"\tDebugf(nil, \"Saving config %q = %q in section %q of the config file\", key, value, section)\n",
"\terr := ConfigFileSet(string(section), key, value)\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tDebugf(nil, \"Saving config %q in section %q of the config file\", key, section)\n"
],
"file_path": "fs/configmap.go",
"type": "replace",
"edit_start_line_idx": 72
} | #!/bin/bash
# Thrash the VFS tests
set -e
# Optionally set the iterations with the first parameter
iterations=${1:-100}
base=$(dirname $(dirname $(realpath "$0")))
echo ${base}
run=${base}/bin/test-repeat.sh
echo ${run}
testdirs="
vfs
vfs/vfscache
vfs/vfscache/writeback
vfs/vfscache/downloaders
cmd/cmount
"
for testdir in ${testdirs}; do
echo "Testing ${testdir} with ${iterations} iterations"
cd ${base}/${testdir}
${run} -i=${iterations} -race -tags=cmount
done
| bin/test-repeat-vfs.sh | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.0001735216355882585,
0.00017193001986015588,
0.00016967716510407627,
0.00017259130254387856,
0.0000016376701523768133
] |
{
"id": 5,
"code_window": [
"type setConfigFile string\n",
"\n",
"// Set a config item into the config file\n",
"func (section setConfigFile) Set(key, value string) {\n",
"\tif strings.HasPrefix(string(section), \":\") {\n",
"\t\tLogf(nil, \"Can't save config %q = %q for on the fly backend %q\", key, value, section)\n",
"\t\treturn\n",
"\t}\n",
"\tDebugf(nil, \"Saving config %q = %q in section %q of the config file\", key, value, section)\n",
"\terr := ConfigFileSet(string(section), key, value)\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tDebugf(nil, \"Saving config %q in section %q of the config file\", key, section)\n"
],
"file_path": "fs/configmap.go",
"type": "replace",
"edit_start_line_idx": 72
} | // Serve sftp tests set up a server and run the integration tests
// for the sftp remote against it.
//
// We skip tests on platforms with troublesome character mappings
//+build !windows,!darwin,!plan9
package sftp
import (
"context"
"strings"
"testing"
"github.com/pkg/sftp"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/stretchr/testify/require"
)
const (
testBindAddress = "localhost:0"
testUser = "testuser"
testPass = "testpass"
)
// check interfaces
var (
_ sftp.FileReader = vfsHandler{}
_ sftp.FileWriter = vfsHandler{}
_ sftp.FileCmder = vfsHandler{}
_ sftp.FileLister = vfsHandler{}
)
// TestSftp runs the sftp server then runs the unit tests for the
// sftp remote against it.
func TestSftp(t *testing.T) {
// Configure and start the server
start := func(f fs.Fs) (configmap.Simple, func()) {
opt := DefaultOpt
opt.ListenAddr = testBindAddress
opt.User = testUser
opt.Pass = testPass
w := newServer(context.Background(), f, &opt)
require.NoError(t, w.serve())
// Read the host and port we started on
addr := w.Addr()
colon := strings.LastIndex(addr, ":")
// Config for the backend we'll use to connect to the server
config := configmap.Simple{
"type": "sftp",
"user": testUser,
"pass": obscure.MustObscure(testPass),
"host": addr[:colon],
"port": addr[colon+1:],
}
// return a stop function
return config, func() {
w.Close()
w.Wait()
}
}
servetest.Run(t, "sftp", start)
}
| cmd/serve/sftp/sftp_test.go | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.0004304842441342771,
0.0002269793039886281,
0.00016766376211307943,
0.00017170089995488524,
0.00009884811152005568
] |
{
"id": 6,
"code_window": [
"\terr := ConfigFileSet(string(section), key, value)\n",
"\tif err != nil {\n",
"\t\tErrorf(nil, \"Failed saving config %q = %q in section %q of the config file: %v\", key, value, section, err)\n",
"\t}\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tErrorf(nil, \"Failed saving config %q in section %q of the config file: %v\", key, section, err)\n"
],
"file_path": "fs/configmap.go",
"type": "replace",
"edit_start_line_idx": 79
} | // Getters and Setters for ConfigMap
package fs
import (
"os"
"strings"
"github.com/rclone/rclone/fs/config/configmap"
)
// A configmap.Getter to read from the environment RCLONE_CONFIG_backend_option_name
type configEnvVars string
// Get a config item from the environment variables if possible
func (configName configEnvVars) Get(key string) (value string, ok bool) {
envKey := ConfigToEnv(string(configName), key)
value, ok = os.LookupEnv(envKey)
if ok {
Debugf(nil, "Setting %s=%q for %q from environment variable %s", key, value, configName, envKey)
}
return value, ok
}
// A configmap.Getter to read from the environment RCLONE_option_name
type optionEnvVars struct {
fsInfo *RegInfo
}
// Get a config item from the option environment variables if possible
func (oev optionEnvVars) Get(key string) (value string, ok bool) {
opt := oev.fsInfo.Options.Get(key)
if opt == nil {
return "", false
}
envKey := OptionToEnv(oev.fsInfo.Prefix + "-" + key)
value, ok = os.LookupEnv(envKey)
if ok {
Debugf(nil, "Setting %s_%s=%q from environment variable %s", oev.fsInfo.Prefix, key, value, envKey)
} else if opt.NoPrefix {
// For options with NoPrefix set, check without prefix too
envKey := OptionToEnv(key)
value, ok = os.LookupEnv(envKey)
if ok {
Debugf(nil, "Setting %s=%q for %s from environment variable %s", key, value, oev.fsInfo.Prefix, envKey)
}
}
return value, ok
}
// A configmap.Getter to read either the default value or the set
// value from the RegInfo.Options
type regInfoValues struct {
fsInfo *RegInfo
useDefault bool
}
// override the values in configMap with the either the flag values or
// the default values
func (r *regInfoValues) Get(key string) (value string, ok bool) {
opt := r.fsInfo.Options.Get(key)
if opt != nil && (r.useDefault || opt.Value != nil) {
return opt.String(), true
}
return "", false
}
// A configmap.Setter to read from the config file
type setConfigFile string
// Set a config item into the config file
func (section setConfigFile) Set(key, value string) {
if strings.HasPrefix(string(section), ":") {
Logf(nil, "Can't save config %q = %q for on the fly backend %q", key, value, section)
return
}
Debugf(nil, "Saving config %q = %q in section %q of the config file", key, value, section)
err := ConfigFileSet(string(section), key, value)
if err != nil {
Errorf(nil, "Failed saving config %q = %q in section %q of the config file: %v", key, value, section, err)
}
}
// A configmap.Getter to read from the config file
type getConfigFile string
// Get a config item from the config file
func (section getConfigFile) Get(key string) (value string, ok bool) {
value, ok = ConfigFileGet(string(section), key)
// Ignore empty lines in the config file
if value == "" {
ok = false
}
return value, ok
}
// ConfigMap creates a configmap.Map from the *RegInfo and the
// configName passed in. If connectionStringConfig has any entries (it may be nil),
// then it will be added to the lookup with the highest priority.
//
// If fsInfo is nil then the returned configmap.Map should only be
// used for reading non backend specific parameters, such as "type".
func ConfigMap(fsInfo *RegInfo, configName string, connectionStringConfig configmap.Simple) (config *configmap.Map) {
// Create the config
config = configmap.New()
// Read the config, more specific to least specific
// Config from connection string
if len(connectionStringConfig) > 0 {
config.AddGetter(connectionStringConfig, configmap.PriorityNormal)
}
// flag values
if fsInfo != nil {
config.AddGetter(®InfoValues{fsInfo, false}, configmap.PriorityNormal)
}
// remote specific environment vars
config.AddGetter(configEnvVars(configName), configmap.PriorityNormal)
// backend specific environment vars
if fsInfo != nil {
config.AddGetter(optionEnvVars{fsInfo: fsInfo}, configmap.PriorityNormal)
}
// config file
config.AddGetter(getConfigFile(configName), configmap.PriorityConfig)
// default values
if fsInfo != nil {
config.AddGetter(®InfoValues{fsInfo, true}, configmap.PriorityDefault)
}
// Set Config
config.AddSetter(setConfigFile(configName))
return config
}
| fs/configmap.go | 1 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.9981734752655029,
0.0725407600402832,
0.00016701118147466332,
0.0008935164660215378,
0.25672778487205505
] |
{
"id": 6,
"code_window": [
"\terr := ConfigFileSet(string(section), key, value)\n",
"\tif err != nil {\n",
"\t\tErrorf(nil, \"Failed saving config %q = %q in section %q of the config file: %v\", key, value, section, err)\n",
"\t}\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tErrorf(nil, \"Failed saving config %q in section %q of the config file: %v\", key, section, err)\n"
],
"file_path": "fs/configmap.go",
"type": "replace",
"edit_start_line_idx": 79
} | // Textual user interface parts of the config system
package config
import (
"bufio"
"context"
"fmt"
"log"
"os"
"sort"
"strconv"
"strings"
"unicode/utf8"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/driveletter"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/lib/terminal"
"golang.org/x/text/unicode/norm"
)
// ReadLine reads some input
var ReadLine = func() string {
buf := bufio.NewReader(os.Stdin)
line, err := buf.ReadString('\n')
if err != nil {
log.Fatalf("Failed to read line: %v", err)
}
return strings.TrimSpace(line)
}
// ReadNonEmptyLine prints prompt and calls Readline until non empty
func ReadNonEmptyLine(prompt string) string {
result := ""
for result == "" {
fmt.Print(prompt)
result = strings.TrimSpace(ReadLine())
}
return result
}
// CommandDefault - choose one. If return is pressed then it will
// chose the defaultIndex if it is >= 0
func CommandDefault(commands []string, defaultIndex int) byte {
opts := []string{}
for i, text := range commands {
def := ""
if i == defaultIndex {
def = " (default)"
}
fmt.Printf("%c) %s%s\n", text[0], text[1:], def)
opts = append(opts, text[:1])
}
optString := strings.Join(opts, "")
optHelp := strings.Join(opts, "/")
for {
fmt.Printf("%s> ", optHelp)
result := strings.ToLower(ReadLine())
if len(result) == 0 && defaultIndex >= 0 {
return optString[defaultIndex]
}
if len(result) != 1 {
continue
}
i := strings.Index(optString, string(result[0]))
if i >= 0 {
return result[0]
}
}
}
// Command - choose one
func Command(commands []string) byte {
return CommandDefault(commands, -1)
}
// Confirm asks the user for Yes or No and returns true or false
//
// If the user presses enter then the Default will be used
func Confirm(Default bool) bool {
defaultIndex := 0
if !Default {
defaultIndex = 1
}
return CommandDefault([]string{"yYes", "nNo"}, defaultIndex) == 'y'
}
// Choose one of the defaults or type a new string if newOk is set
func Choose(what string, defaults, help []string, newOk bool) string {
valueDescription := "an existing"
if newOk {
valueDescription = "your own"
}
fmt.Printf("Choose a number from below, or type in %s value\n", valueDescription)
attributes := []string{terminal.HiRedFg, terminal.HiGreenFg}
for i, text := range defaults {
var lines []string
if help != nil {
parts := strings.Split(help[i], "\n")
lines = append(lines, parts...)
}
lines = append(lines, fmt.Sprintf("%q", text))
pos := i + 1
terminal.WriteString(attributes[i%len(attributes)])
if len(lines) == 1 {
fmt.Printf("%2d > %s\n", pos, text)
} else {
mid := (len(lines) - 1) / 2
for i, line := range lines {
var sep rune
switch i {
case 0:
sep = '/'
case len(lines) - 1:
sep = '\\'
default:
sep = '|'
}
number := " "
if i == mid {
number = fmt.Sprintf("%2d", pos)
}
fmt.Printf("%s %c %s\n", number, sep, line)
}
}
terminal.WriteString(terminal.Reset)
}
for {
fmt.Printf("%s> ", what)
result := ReadLine()
i, err := strconv.Atoi(result)
if err != nil {
if newOk {
return result
}
for _, v := range defaults {
if result == v {
return result
}
}
continue
}
if i >= 1 && i <= len(defaults) {
return defaults[i-1]
}
}
}
// ChooseNumber asks the user to enter a number between min and max
// inclusive prompting them with what.
func ChooseNumber(what string, min, max int) int {
for {
fmt.Printf("%s> ", what)
result := ReadLine()
i, err := strconv.Atoi(result)
if err != nil {
fmt.Printf("Bad number: %v\n", err)
continue
}
if i < min || i > max {
fmt.Printf("Out of range - %d to %d inclusive\n", min, max)
continue
}
return i
}
}
// ShowRemotes shows an overview of the config file
func ShowRemotes() {
remotes := LoadedData().GetSectionList()
if len(remotes) == 0 {
return
}
sort.Strings(remotes)
fmt.Printf("%-20s %s\n", "Name", "Type")
fmt.Printf("%-20s %s\n", "====", "====")
for _, remote := range remotes {
fmt.Printf("%-20s %s\n", remote, FileGet(remote, "type"))
}
}
// ChooseRemote chooses a remote name
func ChooseRemote() string {
remotes := LoadedData().GetSectionList()
sort.Strings(remotes)
return Choose("remote", remotes, nil, false)
}
// mustFindByName finds the RegInfo for the remote name passed in or
// exits with a fatal error.
func mustFindByName(name string) *fs.RegInfo {
fsType := FileGet(name, "type")
if fsType == "" {
log.Fatalf("Couldn't find type of fs for %q", name)
}
return fs.MustFind(fsType)
}
// ShowRemote shows the contents of the remote
func ShowRemote(name string) {
fmt.Printf("--------------------\n")
fmt.Printf("[%s]\n", name)
fs := mustFindByName(name)
for _, key := range LoadedData().GetKeyList(name) {
isPassword := false
for _, option := range fs.Options {
if option.Name == key && option.IsPassword {
isPassword = true
break
}
}
value := FileGet(name, key)
if isPassword && value != "" {
fmt.Printf("%s = *** ENCRYPTED ***\n", key)
} else {
fmt.Printf("%s = %s\n", key, value)
}
}
fmt.Printf("--------------------\n")
}
// OkRemote prints the contents of the remote and ask if it is OK
func OkRemote(name string) bool {
ShowRemote(name)
switch i := CommandDefault([]string{"yYes this is OK", "eEdit this remote", "dDelete this remote"}, 0); i {
case 'y':
return true
case 'e':
return false
case 'd':
LoadedData().DeleteSection(name)
return true
default:
fs.Errorf(nil, "Bad choice %c", i)
}
return false
}
// backendConfig configures the backend starting from the state passed in
//
// The is the user interface loop that drives the post configuration backend config.
func backendConfig(ctx context.Context, name string, m configmap.Mapper, ri *fs.RegInfo, choices configmap.Getter, startState string) error {
in := fs.ConfigIn{
State: startState,
}
for {
out, err := fs.BackendConfig(ctx, name, m, ri, choices, in)
if err != nil {
return err
}
if out == nil {
break
}
if out.Error != "" {
fmt.Println(out.Error)
}
in.State = out.State
in.Result = out.Result
if out.Option != nil {
fs.Debugf(name, "config: reading config parameter %q", out.Option.Name)
if out.Option.Default == nil {
out.Option.Default = ""
}
if Default, isBool := out.Option.Default.(bool); isBool &&
len(out.Option.Examples) == 2 &&
out.Option.Examples[0].Help == "Yes" &&
out.Option.Examples[0].Value == "true" &&
out.Option.Examples[1].Help == "No" &&
out.Option.Examples[1].Value == "false" &&
out.Option.Exclusive {
// Use Confirm for Yes/No questions as it has a nicer interface=
fmt.Println(out.Option.Help)
in.Result = fmt.Sprint(Confirm(Default))
} else {
value := ChooseOption(out.Option, "")
if value != "" {
err := out.Option.Set(value)
if err != nil {
return errors.Wrap(err, "failed to set option")
}
}
in.Result = out.Option.String()
}
}
if out.State == "" {
break
}
}
return nil
}
// PostConfig configures the backend after the main config has been done
//
// The is the user interface loop that drives the post configuration backend config.
func PostConfig(ctx context.Context, name string, m configmap.Mapper, ri *fs.RegInfo) error {
if ri.Config == nil {
return errors.New("backend doesn't support reconnect or authorize")
}
return backendConfig(ctx, name, m, ri, configmap.Simple{}, "")
}
// RemoteConfig runs the config helper for the remote if needed
func RemoteConfig(ctx context.Context, name string) error {
fmt.Printf("Remote config\n")
ri := mustFindByName(name)
m := fs.ConfigMap(ri, name, nil)
if ri.Config == nil {
return nil
}
return PostConfig(ctx, name, m, ri)
}
// ChooseOption asks the user to choose an option
func ChooseOption(o *fs.Option, name string) string {
fmt.Println(o.Help)
if o.IsPassword {
actions := []string{"yYes type in my own password", "gGenerate random password"}
defaultAction := -1
if !o.Required {
defaultAction = len(actions)
actions = append(actions, "nNo leave this optional password blank")
}
var password string
var err error
switch i := CommandDefault(actions, defaultAction); i {
case 'y':
password = ChangePassword("the")
case 'g':
for {
fmt.Printf("Password strength in bits.\n64 is just about memorable\n128 is secure\n1024 is the maximum\n")
bits := ChooseNumber("Bits", 64, 1024)
password, err = Password(bits)
if err != nil {
log.Fatalf("Failed to make password: %v", err)
}
fmt.Printf("Your password is: %s\n", password)
fmt.Printf("Use this password? Please note that an obscured version of this \npassword (and not the " +
"password itself) will be stored under your \nconfiguration file, so keep this generated password " +
"in a safe place.\n")
if Confirm(true) {
break
}
}
case 'n':
return ""
default:
fs.Errorf(nil, "Bad choice %c", i)
}
return obscure.MustObscure(password)
}
what := fmt.Sprintf("%T value", o.Default)
switch o.Default.(type) {
case bool:
what = "boolean value (true or false)"
case fs.SizeSuffix:
what = "size with suffix K,M,G,T"
case fs.Duration:
what = "duration s,m,h,d,w,M,y"
case int, int8, int16, int32, int64:
what = "signed integer"
case uint, byte, uint16, uint32, uint64:
what = "unsigned integer"
}
var in string
for {
fmt.Printf("Enter a %s. Press Enter for the default (%q).\n", what, fmt.Sprint(o.Default))
if len(o.Examples) > 0 {
var values []string
var help []string
for _, example := range o.Examples {
values = append(values, example.Value)
help = append(help, example.Help)
}
in = Choose(o.Name, values, help, !o.Exclusive)
} else {
fmt.Printf("%s> ", o.Name)
in = ReadLine()
}
if in == "" {
if o.Required && fmt.Sprint(o.Default) == "" {
fmt.Printf("This value is required and it has no default.\n")
continue
}
break
}
newIn, err := configstruct.StringToInterface(o.Default, in)
if err != nil {
fmt.Printf("Failed to parse %q: %v\n", in, err)
continue
}
in = fmt.Sprint(newIn) // canonicalise
break
}
return in
}
// NewRemoteName asks the user for a name for a new remote
func NewRemoteName() (name string) {
for {
fmt.Printf("name> ")
name = ReadLine()
if LoadedData().HasSection(name) {
fmt.Printf("Remote %q already exists.\n", name)
continue
}
err := fspath.CheckConfigName(name)
switch {
case name == "":
fmt.Printf("Can't use empty name.\n")
case driveletter.IsDriveLetter(name):
fmt.Printf("Can't use %q as it can be confused with a drive letter.\n", name)
case err != nil:
fmt.Printf("Can't use %q as %v.\n", name, err)
default:
return name
}
}
}
// NewRemote make a new remote from its name
func NewRemote(ctx context.Context, name string) error {
var (
newType string
ri *fs.RegInfo
err error
)
// Set the type first
for {
newType = ChooseOption(fsOption(), name)
ri, err = fs.Find(newType)
if err != nil {
fmt.Printf("Bad remote %q: %v\n", newType, err)
continue
}
break
}
LoadedData().SetValue(name, "type", newType)
_, err = CreateRemote(ctx, name, newType, nil, UpdateRemoteOpt{
All: true,
})
if err != nil {
return err
}
if OkRemote(name) {
SaveConfig()
return nil
}
return EditRemote(ctx, ri, name)
}
// EditRemote gets the user to edit a remote
func EditRemote(ctx context.Context, ri *fs.RegInfo, name string) error {
ShowRemote(name)
fmt.Printf("Edit remote\n")
for {
_, err := UpdateRemote(ctx, name, nil, UpdateRemoteOpt{
All: true,
})
if err != nil {
return err
}
if OkRemote(name) {
break
}
}
SaveConfig()
return nil
}
// DeleteRemote gets the user to delete a remote
func DeleteRemote(name string) {
LoadedData().DeleteSection(name)
SaveConfig()
}
// copyRemote asks the user for a new remote name and copies name into
// it. Returns the new name.
func copyRemote(name string) string {
newName := NewRemoteName()
// Copy the keys
for _, key := range LoadedData().GetKeyList(name) {
value := getWithDefault(name, key, "")
LoadedData().SetValue(newName, key, value)
}
return newName
}
// RenameRemote renames a config section
func RenameRemote(name string) {
fmt.Printf("Enter new name for %q remote.\n", name)
newName := copyRemote(name)
if name != newName {
LoadedData().DeleteSection(name)
SaveConfig()
}
}
// CopyRemote copies a config section
func CopyRemote(name string) {
fmt.Printf("Enter name for copy of %q remote.\n", name)
copyRemote(name)
SaveConfig()
}
// ShowConfigLocation prints the location of the config file in use
func ShowConfigLocation() {
if configPath := GetConfigPath(); configPath == "" {
fmt.Println("Configuration is in memory only")
} else {
if _, err := os.Stat(configPath); os.IsNotExist(err) {
fmt.Println("Configuration file doesn't exist, but rclone will use this path:")
} else {
fmt.Println("Configuration file is stored at:")
}
fmt.Printf("%s\n", configPath)
}
}
// ShowConfig prints the (unencrypted) config options
func ShowConfig() {
str, err := LoadedData().Serialize()
if err != nil {
log.Fatalf("Failed to serialize config: %v", err)
}
if str == "" {
str = "; empty config\n"
}
fmt.Printf("%s", str)
}
// EditConfig edits the config file interactively
func EditConfig(ctx context.Context) (err error) {
for {
haveRemotes := len(LoadedData().GetSectionList()) != 0
what := []string{"eEdit existing remote", "nNew remote", "dDelete remote", "rRename remote", "cCopy remote", "sSet configuration password", "qQuit config"}
if haveRemotes {
fmt.Printf("Current remotes:\n\n")
ShowRemotes()
fmt.Printf("\n")
} else {
fmt.Printf("No remotes found - make a new one\n")
// take 2nd item and last 2 items of menu list
what = append(what[1:2], what[len(what)-2:]...)
}
switch i := Command(what); i {
case 'e':
name := ChooseRemote()
fs := mustFindByName(name)
err = EditRemote(ctx, fs, name)
if err != nil {
return err
}
case 'n':
err = NewRemote(ctx, NewRemoteName())
if err != nil {
return err
}
case 'd':
name := ChooseRemote()
DeleteRemote(name)
case 'r':
RenameRemote(ChooseRemote())
case 'c':
CopyRemote(ChooseRemote())
case 's':
SetPassword()
case 'q':
return nil
}
}
}
// Suppress the confirm prompts by altering the context config
func suppressConfirm(ctx context.Context) context.Context {
newCtx, ci := fs.AddConfig(ctx)
ci.AutoConfirm = true
return newCtx
}
// checkPassword normalises and validates the password
func checkPassword(password string) (string, error) {
if !utf8.ValidString(password) {
return "", errors.New("password contains invalid utf8 characters")
}
// Check for leading/trailing whitespace
trimmedPassword := strings.TrimSpace(password)
// Warn user if password has leading+trailing whitespace
if len(password) != len(trimmedPassword) {
_, _ = fmt.Fprintln(os.Stderr, "Your password contains leading/trailing whitespace - in previous versions of rclone this was stripped")
}
// Normalize to reduce weird variations.
password = norm.NFKC.String(password)
if len(password) == 0 || len(trimmedPassword) == 0 {
return "", errors.New("no characters in password")
}
return password, nil
}
// GetPassword asks the user for a password with the prompt given.
func GetPassword(prompt string) string {
_, _ = fmt.Fprintln(PasswordPromptOutput, prompt)
for {
_, _ = fmt.Fprint(PasswordPromptOutput, "password:")
password := ReadPassword()
password, err := checkPassword(password)
if err == nil {
return password
}
_, _ = fmt.Fprintf(os.Stderr, "Bad password: %v\n", err)
}
}
// ChangePassword will query the user twice for the named password. If
// the same password is entered it is returned.
func ChangePassword(name string) string {
for {
a := GetPassword(fmt.Sprintf("Enter %s password:", name))
b := GetPassword(fmt.Sprintf("Confirm %s password:", name))
if a == b {
return a
}
fmt.Println("Passwords do not match!")
}
}
// SetPassword will allow the user to modify the current
// configuration encryption settings.
func SetPassword() {
for {
if len(configKey) > 0 {
fmt.Println("Your configuration is encrypted.")
what := []string{"cChange Password", "uUnencrypt configuration", "qQuit to main menu"}
switch i := Command(what); i {
case 'c':
changeConfigPassword()
SaveConfig()
fmt.Println("Password changed")
continue
case 'u':
configKey = nil
SaveConfig()
continue
case 'q':
return
}
} else {
fmt.Println("Your configuration is not encrypted.")
fmt.Println("If you add a password, you will protect your login information to cloud services.")
what := []string{"aAdd Password", "qQuit to main menu"}
switch i := Command(what); i {
case 'a':
changeConfigPassword()
SaveConfig()
fmt.Println("Password set")
continue
case 'q':
return
}
}
}
}
| fs/config/ui.go | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.0020729978568851948,
0.00028488380485214293,
0.00016113457968458533,
0.00017337063036393374,
0.00034180283546447754
] |
{
"id": 6,
"code_window": [
"\terr := ConfigFileSet(string(section), key, value)\n",
"\tif err != nil {\n",
"\t\tErrorf(nil, \"Failed saving config %q = %q in section %q of the config file: %v\", key, value, section, err)\n",
"\t}\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tErrorf(nil, \"Failed saving config %q in section %q of the config file: %v\", key, section, err)\n"
],
"file_path": "fs/configmap.go",
"type": "replace",
"edit_start_line_idx": 79
} | package fs
// SizeSuffix is parsed by flag with K/M/G binary suffixes
import (
"encoding/json"
"fmt"
"math"
"sort"
"strconv"
"strings"
"github.com/pkg/errors"
)
// SizeSuffix is an int64 with a friendly way of printing setting
type SizeSuffix int64
// Common multipliers for SizeSuffix
const (
SizeSuffixBase SizeSuffix = 1 << (iota * 10)
Kibi
Mebi
Gibi
Tebi
Pebi
Exbi
)
const (
// SizeSuffixMax is the largest SizeSuffix multiplier
SizeSuffixMax = Exbi
// SizeSuffixMaxValue is the largest value that can be used to create SizeSuffix
SizeSuffixMaxValue = math.MaxInt64
// SizeSuffixMinValue is the smallest value that can be used to create SizeSuffix
SizeSuffixMinValue = math.MinInt64
)
// Turn SizeSuffix into a string and a suffix
func (x SizeSuffix) string() (string, string) {
scaled := float64(0)
suffix := ""
switch {
case x < 0:
return "off", ""
case x == 0:
return "0", ""
case x < Kibi:
scaled = float64(x)
suffix = ""
case x < Mebi:
scaled = float64(x) / float64(Kibi)
suffix = "Ki"
case x < Gibi:
scaled = float64(x) / float64(Mebi)
suffix = "Mi"
case x < Tebi:
scaled = float64(x) / float64(Gibi)
suffix = "Gi"
case x < Pebi:
scaled = float64(x) / float64(Tebi)
suffix = "Ti"
case x < Exbi:
scaled = float64(x) / float64(Pebi)
suffix = "Pi"
default:
scaled = float64(x) / float64(Exbi)
suffix = "Ei"
}
if math.Floor(scaled) == scaled {
return fmt.Sprintf("%.0f", scaled), suffix
}
return fmt.Sprintf("%.3f", scaled), suffix
}
// String turns SizeSuffix into a string
func (x SizeSuffix) String() string {
val, suffix := x.string()
return val + suffix
}
// Unit turns SizeSuffix into a string with a unit
func (x SizeSuffix) unit(unit string) string {
val, suffix := x.string()
if val == "off" {
return val
}
var suffixUnit string
if suffix != "" && unit != "" {
suffixUnit = suffix + unit
} else {
suffixUnit = suffix + unit
}
return val + " " + suffixUnit
}
// BitUnit turns SizeSuffix into a string with bit unit
func (x SizeSuffix) BitUnit() string {
return x.unit("bit")
}
// BitRateUnit turns SizeSuffix into a string with bit rate unit
func (x SizeSuffix) BitRateUnit() string {
return x.unit("bit/s")
}
// ByteUnit turns SizeSuffix into a string with byte unit
func (x SizeSuffix) ByteUnit() string {
return x.unit("Byte")
}
// ByteRateUnit turns SizeSuffix into a string with byte rate unit
func (x SizeSuffix) ByteRateUnit() string {
return x.unit("Byte/s")
}
// ByteShortUnit turns SizeSuffix into a string with byte unit short form
func (x SizeSuffix) ByteShortUnit() string {
return x.unit("B")
}
// ByteRateShortUnit turns SizeSuffix into a string with byte rate unit short form
func (x SizeSuffix) ByteRateShortUnit() string {
return x.unit("B/s")
}
func (x *SizeSuffix) multiplierFromSymbol(s byte) (found bool, multiplier float64) {
switch s {
case 'k', 'K':
return true, float64(Kibi)
case 'm', 'M':
return true, float64(Mebi)
case 'g', 'G':
return true, float64(Gibi)
case 't', 'T':
return true, float64(Tebi)
case 'p', 'P':
return true, float64(Pebi)
case 'e', 'E':
return true, float64(Exbi)
default:
return false, float64(SizeSuffixBase)
}
}
// Set a SizeSuffix
func (x *SizeSuffix) Set(s string) error {
if len(s) == 0 {
return errors.New("empty string")
}
if strings.ToLower(s) == "off" {
*x = -1
return nil
}
suffix := s[len(s)-1]
suffixLen := 1
multiplierFound := false
var multiplier float64
switch suffix {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
suffixLen = 0
multiplier = float64(Kibi)
case 'b', 'B':
if len(s) > 2 && s[len(s)-2] == 'i' {
suffix = s[len(s)-3]
suffixLen = 3
if multiplierFound, multiplier = x.multiplierFromSymbol(suffix); !multiplierFound {
return errors.Errorf("bad suffix %q", suffix)
}
// Could also support SI form MB, and treat it equivalent to MiB, but perhaps better to reserve it for CountSuffix?
//} else if len(s) > 1 {
// suffix = s[len(s)-2]
// if multiplierFound, multiplier = x.multiplierFromSymbol(suffix); multiplierFound {
// suffixLen = 2
// }
//}
} else {
multiplier = float64(SizeSuffixBase)
}
case 'i', 'I':
if len(s) > 1 {
suffix = s[len(s)-2]
suffixLen = 2
multiplierFound, multiplier = x.multiplierFromSymbol(suffix)
}
if !multiplierFound {
return errors.Errorf("bad suffix %q", suffix)
}
default:
if multiplierFound, multiplier = x.multiplierFromSymbol(suffix); !multiplierFound {
return errors.Errorf("bad suffix %q", suffix)
}
}
s = s[:len(s)-suffixLen]
value, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
if value < 0 {
return errors.Errorf("size can't be negative %q", s)
}
value *= multiplier
*x = SizeSuffix(value)
return nil
}
// Type of the value
func (x *SizeSuffix) Type() string {
return "SizeSuffix"
}
// Scan implements the fmt.Scanner interface
func (x *SizeSuffix) Scan(s fmt.ScanState, ch rune) error {
token, err := s.Token(true, nil)
if err != nil {
return err
}
return x.Set(string(token))
}
// SizeSuffixList is a slice SizeSuffix values
type SizeSuffixList []SizeSuffix
func (l SizeSuffixList) Len() int { return len(l) }
func (l SizeSuffixList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l SizeSuffixList) Less(i, j int) bool { return l[i] < l[j] }
// Sort sorts the list
func (l SizeSuffixList) Sort() {
sort.Sort(l)
}
// UnmarshalJSONFlag unmarshals a JSON input for a flag. If the input
// is a string then it calls the Set method on the flag otherwise it
// calls the setInt function with a parsed int64.
func UnmarshalJSONFlag(in []byte, x interface{ Set(string) error }, setInt func(int64) error) error {
// Try to parse as string first
var s string
err := json.Unmarshal(in, &s)
if err == nil {
return x.Set(s)
}
// If that fails parse as integer
var i int64
err = json.Unmarshal(in, &i)
if err != nil {
return err
}
return setInt(i)
}
// UnmarshalJSON makes sure the value can be parsed as a string or integer in JSON
func (x *SizeSuffix) UnmarshalJSON(in []byte) error {
return UnmarshalJSONFlag(in, x, func(i int64) error {
*x = SizeSuffix(i)
return nil
})
}
| fs/sizesuffix.go | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.0017622722079977393,
0.0002920189290307462,
0.00016757745470385998,
0.00017532570927869529,
0.0003296676732134074
] |
{
"id": 6,
"code_window": [
"\terr := ConfigFileSet(string(section), key, value)\n",
"\tif err != nil {\n",
"\t\tErrorf(nil, \"Failed saving config %q = %q in section %q of the config file: %v\", key, value, section, err)\n",
"\t}\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tErrorf(nil, \"Failed saving config %q in section %q of the config file: %v\", key, section, err)\n"
],
"file_path": "fs/configmap.go",
"type": "replace",
"edit_start_line_idx": 79
} | package oauthutil
import (
"context"
"encoding/json"
"fmt"
"html/template"
"net"
"net/http"
"net/url"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/random"
"github.com/skratchdot/open-golang/open"
"golang.org/x/oauth2"
)
const (
// TitleBarRedirectURL is the OAuth2 redirect URL to use when the authorization
// code should be returned in the title bar of the browser, with the page text
// prompting the user to copy the code and paste it in the application.
TitleBarRedirectURL = "urn:ietf:wg:oauth:2.0:oob"
// bindPort is the port that we bind the local webserver to
bindPort = "53682"
// bindAddress is binding for local webserver when active
bindAddress = "127.0.0.1:" + bindPort
// RedirectURL is redirect to local webserver when active
RedirectURL = "http://" + bindAddress + "/"
// RedirectPublicURL is redirect to local webserver when active with public name
RedirectPublicURL = "http://localhost.rclone.org:" + bindPort + "/"
// RedirectLocalhostURL is redirect to local webserver when active with localhost
RedirectLocalhostURL = "http://localhost:" + bindPort + "/"
// RedirectPublicSecureURL is a public https URL which
// redirects to the local webserver
RedirectPublicSecureURL = "https://oauth.rclone.org/"
// AuthResponseTemplate is a template to handle the redirect URL for oauth requests
AuthResponseTemplate = `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>{{ if .OK }}Success!{{ else }}Failure!{{ end }}</title>
</head>
<body>
<h1>{{ if .OK }}Success!{{ else }}Failure!{{ end }}</h1>
<hr>
<pre style="width: 750px; white-space: pre-wrap;">
{{ if eq .OK false }}
Error: {{ .Name }}<br>
{{ if .Description }}Description: {{ .Description }}<br>{{ end }}
{{ if .Code }}Code: {{ .Code }}<br>{{ end }}
{{ if .HelpURL }}Look here for help: <a href="{{ .HelpURL }}">{{ .HelpURL }}</a><br>{{ end }}
{{ else }}
All done. Please go back to rclone.
{{ end }}
</pre>
</body>
</html>
`
)
// SharedOptions are shared between backends the utilize an OAuth flow
var SharedOptions = []fs.Option{{
Name: config.ConfigClientID,
Help: "OAuth Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "OAuth Client Secret\nLeave blank normally.",
}, {
Name: config.ConfigToken,
Help: "OAuth Access Token as a JSON blob.",
Advanced: true,
}, {
Name: config.ConfigAuthURL,
Help: "Auth server URL.\nLeave blank to use the provider defaults.",
Advanced: true,
}, {
Name: config.ConfigTokenURL,
Help: "Token server url.\nLeave blank to use the provider defaults.",
Advanced: true,
}}
// oldToken contains an end-user's tokens.
// This is the data you must store to persist authentication.
//
// From the original code.google.com/p/goauth2/oauth package - used
// for backwards compatibility in the rclone config file
type oldToken struct {
AccessToken string
RefreshToken string
Expiry time.Time
}
// GetToken returns the token saved in the config file under
// section name.
func GetToken(name string, m configmap.Mapper) (*oauth2.Token, error) {
tokenString, ok := m.Get(config.ConfigToken)
if !ok || tokenString == "" {
return nil, errors.Errorf("empty token found - please run \"rclone config reconnect %s:\"", name)
}
token := new(oauth2.Token)
err := json.Unmarshal([]byte(tokenString), token)
if err != nil {
return nil, err
}
// if has data then return it
if token.AccessToken != "" {
return token, nil
}
// otherwise try parsing as oldToken
oldtoken := new(oldToken)
err = json.Unmarshal([]byte(tokenString), oldtoken)
if err != nil {
return nil, err
}
// Fill in result into new token
token.AccessToken = oldtoken.AccessToken
token.RefreshToken = oldtoken.RefreshToken
token.Expiry = oldtoken.Expiry
// Save new format in config file
err = PutToken(name, m, token, false)
if err != nil {
return nil, err
}
return token, nil
}
// PutToken stores the token in the config file
//
// This saves the config file if it changes
func PutToken(name string, m configmap.Mapper, token *oauth2.Token, newSection bool) error {
tokenBytes, err := json.Marshal(token)
if err != nil {
return err
}
tokenString := string(tokenBytes)
old, ok := m.Get(config.ConfigToken)
if !ok || tokenString != old {
m.Set(config.ConfigToken, tokenString)
fs.Debugf(name, "Saved new token in config file")
}
return nil
}
// TokenSource stores updated tokens in the config file
type TokenSource struct {
mu sync.Mutex
name string
m configmap.Mapper
tokenSource oauth2.TokenSource
token *oauth2.Token
config *oauth2.Config
ctx context.Context
expiryTimer *time.Timer // signals whenever the token expires
}
// If token has expired then first try re-reading it (and its refresh token)
// from the config file in case a concurrently running rclone has updated them
// already.
// Returns whether either of the two tokens has been reread.
func (ts *TokenSource) reReadToken() (changed bool) {
tokenString, found := ts.m.Get(config.ConfigToken)
if !found || tokenString == "" {
fs.Debugf(ts.name, "Failed to read token out of config file")
return false
}
newToken := new(oauth2.Token)
err := json.Unmarshal([]byte(tokenString), newToken)
if err != nil {
fs.Debugf(ts.name, "Failed to parse token out of config file: %v", err)
return false
}
if !newToken.Valid() {
fs.Debugf(ts.name, "Loaded invalid token from config file - ignoring")
} else {
fs.Debugf(ts.name, "Loaded fresh token from config file")
changed = true
}
if newToken.RefreshToken != "" && newToken.RefreshToken != ts.token.RefreshToken {
fs.Debugf(ts.name, "Loaded new refresh token from config file")
changed = true
}
if changed {
ts.token = newToken
ts.tokenSource = nil // invalidate since we changed the token
}
return changed
}
// Token returns a token or an error.
// Token must be safe for concurrent use by multiple goroutines.
// The returned Token must not be modified.
//
// This saves the token in the config file if it has changed
func (ts *TokenSource) Token() (*oauth2.Token, error) {
ts.mu.Lock()
defer ts.mu.Unlock()
var (
token *oauth2.Token
err error
changed = false
)
const maxTries = 5
// Try getting the token a few times
for i := 1; i <= maxTries; i++ {
// Try reading the token from the config file in case it has
// been updated by a concurrent rclone process
if !ts.token.Valid() {
if ts.reReadToken() {
changed = true
} else if ts.token.RefreshToken == "" {
return nil, fserrors.FatalError(
fmt.Errorf("token expired and there's no refresh token - manually refresh with \"rclone config reconnect %s:\"", ts.name),
)
}
}
// Make a new token source if required
if ts.tokenSource == nil {
ts.tokenSource = ts.config.TokenSource(ts.ctx, ts.token)
}
token, err = ts.tokenSource.Token()
if err == nil {
break
}
fs.Debugf(ts.name, "Token refresh failed try %d/%d: %v", i, maxTries, err)
time.Sleep(1 * time.Second)
}
if err != nil {
return nil, errors.Wrapf(err, "couldn't fetch token - maybe it has expired? - refresh with \"rclone config reconnect %s:\"", ts.name)
}
changed = changed || (*token != *ts.token)
ts.token = token
if changed {
// Bump on the expiry timer if it is set
if ts.expiryTimer != nil {
ts.expiryTimer.Reset(ts.timeToExpiry())
}
err = PutToken(ts.name, ts.m, token, false)
if err != nil {
return nil, errors.Wrap(err, "couldn't store token")
}
}
return token, nil
}
// Invalidate invalidates the token
func (ts *TokenSource) Invalidate() {
ts.mu.Lock()
ts.token.AccessToken = ""
ts.mu.Unlock()
}
// timeToExpiry returns how long until the token expires
//
// Call with the lock held
func (ts *TokenSource) timeToExpiry() time.Duration {
t := ts.token
if t == nil {
return 0
}
if t.Expiry.IsZero() {
return 3e9 * time.Second // ~95 years
}
return t.Expiry.Sub(time.Now())
}
// OnExpiry returns a channel which has the time written to it when
// the token expires. Note that there is only one channel so if
// attaching multiple go routines it will only signal to one of them.
func (ts *TokenSource) OnExpiry() <-chan time.Time {
ts.mu.Lock()
defer ts.mu.Unlock()
if ts.expiryTimer == nil {
ts.expiryTimer = time.NewTimer(ts.timeToExpiry())
}
return ts.expiryTimer.C
}
// Check interface satisfied
var _ oauth2.TokenSource = (*TokenSource)(nil)
// Context returns a context with our HTTP Client baked in for oauth2
func Context(ctx context.Context, client *http.Client) context.Context {
return context.WithValue(ctx, oauth2.HTTPClient, client)
}
// overrideCredentials sets the ClientID and ClientSecret from the
// config file if they are not blank.
// If any value is overridden, true is returned.
// the origConfig is copied
func overrideCredentials(name string, m configmap.Mapper, origConfig *oauth2.Config) (newConfig *oauth2.Config, changed bool) {
newConfig = new(oauth2.Config)
*newConfig = *origConfig
changed = false
ClientID, ok := m.Get(config.ConfigClientID)
if ok && ClientID != "" {
newConfig.ClientID = ClientID
changed = true
}
ClientSecret, ok := m.Get(config.ConfigClientSecret)
if ok && ClientSecret != "" {
newConfig.ClientSecret = ClientSecret
changed = true
}
AuthURL, ok := m.Get(config.ConfigAuthURL)
if ok && AuthURL != "" {
newConfig.Endpoint.AuthURL = AuthURL
changed = true
}
TokenURL, ok := m.Get(config.ConfigTokenURL)
if ok && TokenURL != "" {
newConfig.Endpoint.TokenURL = TokenURL
changed = true
}
return newConfig, changed
}
// NewClientWithBaseClient gets a token from the config file and
// configures a Client with it. It returns the client and a
// TokenSource which Invalidate may need to be called on. It uses the
// httpClient passed in as the base client.
func NewClientWithBaseClient(ctx context.Context, name string, m configmap.Mapper, config *oauth2.Config, baseClient *http.Client) (*http.Client, *TokenSource, error) {
config, _ = overrideCredentials(name, m, config)
token, err := GetToken(name, m)
if err != nil {
return nil, nil, err
}
// Set our own http client in the context
ctx = Context(ctx, baseClient)
// Wrap the TokenSource in our TokenSource which saves changed
// tokens in the config file
ts := &TokenSource{
name: name,
m: m,
token: token,
config: config,
ctx: ctx,
}
return oauth2.NewClient(ctx, ts), ts, nil
}
// NewClient gets a token from the config file and configures a Client
// with it. It returns the client and a TokenSource which Invalidate may need to be called on
func NewClient(ctx context.Context, name string, m configmap.Mapper, oauthConfig *oauth2.Config) (*http.Client, *TokenSource, error) {
return NewClientWithBaseClient(ctx, name, m, oauthConfig, fshttp.NewClient(ctx))
}
// AuthResult is returned from the web server after authorization
// success or failure
type AuthResult struct {
OK bool // Failure or Success?
Name string
Description string
Code string
HelpURL string
Form url.Values // the complete contents of the form
Err error // any underlying error to report
}
// Error satisfies the error interface so AuthResult can be used as an error
func (ar *AuthResult) Error() string {
status := "Error"
if ar.OK {
status = "OK"
}
return fmt.Sprintf("%s: %s\nCode: %q\nDescription: %s\nHelp: %s",
status, ar.Name, ar.Code, ar.Description, ar.HelpURL)
}
// CheckAuthFn is called when a good Auth has been received
type CheckAuthFn func(*oauth2.Config, *AuthResult) error
// Options for the oauth config
type Options struct {
OAuth2Config *oauth2.Config // Basic config for oauth2
NoOffline bool // If set then "access_type=offline" parameter is not passed
CheckAuth CheckAuthFn // When the AuthResult is known the checkAuth function is called if set
OAuth2Opts []oauth2.AuthCodeOption // extra oauth2 options
StateBlankOK bool // If set, state returned as "" is deemed to be OK
}
// ConfigOut returns a config item suitable for the backend config
//
// state is the place to return the config to
// oAuth is the config to run the oauth with
func ConfigOut(state string, oAuth *Options) (*fs.ConfigOut, error) {
return &fs.ConfigOut{
State: state,
OAuth: oAuth,
}, nil
}
// ConfigOAuth does the oauth config specified in the config block
//
// This is called with a state which has pushed on it
//
// state prefixed with "*oauth"
// state for oauth to return to
// state that returned the OAuth when we wish to recall it
// value that returned the OAuth
func ConfigOAuth(ctx context.Context, name string, m configmap.Mapper, ri *fs.RegInfo, in fs.ConfigIn) (*fs.ConfigOut, error) {
stateParams, state := fs.StatePop(in.State)
// Make the next state
newState := func(state string) string {
return fs.StatePush(stateParams, state)
}
// Recall the Oauth state again by calling the Config with the same input again
getOAuth := func() (opt *Options, err error) {
tmpState, _ := fs.StatePop(stateParams)
tmpState, State := fs.StatePop(tmpState)
_, Result := fs.StatePop(tmpState)
out, err := ri.Config(ctx, name, m, fs.ConfigIn{State: State, Result: Result})
if err != nil {
return nil, err
}
if out.OAuth == nil {
return nil, errors.New("failed to recall OAuth state")
}
opt, ok := out.OAuth.(*Options)
if !ok {
return nil, errors.Errorf("internal error: oauth failed: wrong type in config: %T", out.OAuth)
}
if opt.OAuth2Config == nil {
return nil, errors.New("internal error: oauth failed: OAuth2Config not set")
}
return opt, nil
}
switch state {
case "*oauth":
// See if already have a token
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
return fs.ConfigConfirm(newState("*oauth-confirm"), true, "config_refresh_token", "Already have a token - refresh?")
}
return fs.ConfigGoto(newState("*oauth-confirm"))
case "*oauth-confirm":
if in.Result == "false" {
return fs.ConfigGoto(newState("*oauth-done"))
}
return fs.ConfigConfirm(newState("*oauth-islocal"), true, "config_is_local", "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n")
case "*oauth-islocal":
if in.Result == "true" {
return fs.ConfigGoto(newState("*oauth-do"))
}
return fs.ConfigGoto(newState("*oauth-remote"))
case "*oauth-remote":
opt, err := getOAuth()
if err != nil {
return nil, err
}
if noWebserverNeeded(opt.OAuth2Config) {
authURL, _, err := getAuthURL(name, m, opt.OAuth2Config, opt)
if err != nil {
return nil, err
}
return fs.ConfigInput(newState("*oauth-do"), "config_verification_code", fmt.Sprintf("Verification code\n\nGo to this URL, authenticate then paste the code here.\n\n%s\n", authURL))
}
var out strings.Builder
fmt.Fprintf(&out, `For this to work, you will need rclone available on a machine that has
a web browser available.
For more help and alternate methods see: https://rclone.org/remote_setup/
Execute the following on the machine with the web browser (same rclone
version recommended):
`)
// Find the overridden options
inM := ri.Options.NonDefault(m)
delete(inM, fs.ConfigToken) // delete token as we are refreshing it
for k, v := range inM {
fs.Debugf(nil, "sending %s = %q", k, v)
}
// Encode them into a string
mCopyString, err := inM.Encode()
if err != nil {
return nil, errors.Wrap(err, "oauthutil authorize encode")
}
// Write what the user has to do
if len(mCopyString) > 0 {
fmt.Fprintf(&out, "\trclone authorize %q %q\n", ri.Name, mCopyString)
} else {
fmt.Fprintf(&out, "\trclone authorize %q\n", ri.Name)
}
fmt.Fprintln(&out, "\nThen paste the result.")
return fs.ConfigInput(newState("*oauth-authorize"), "config_token", out.String())
case "*oauth-authorize":
// Read the updates to the config
outM := configmap.Simple{}
token := oauth2.Token{}
code := in.Result
newFormat := true
err := outM.Decode(code)
if err != nil {
newFormat = false
err = json.Unmarshal([]byte(code), &token)
}
if err != nil {
return fs.ConfigError(newState("*oauth-authorize"), fmt.Sprintf("Couldn't decode response - try again (make sure you are using a matching version of rclone on both sides: %v\n", err))
}
// Save the config updates
if newFormat {
for k, v := range outM {
m.Set(k, v)
fs.Debugf(nil, "received %s = %q", k, v)
}
} else {
m.Set(fs.ConfigToken, code)
}
return fs.ConfigGoto(newState("*oauth-done"))
case "*oauth-do":
code := in.Result
opt, err := getOAuth()
if err != nil {
return nil, err
}
oauthConfig, changed := overrideCredentials(name, m, opt.OAuth2Config)
if changed {
fs.Logf(nil, "Make sure your Redirect URL is set to %q in your custom config.\n", oauthConfig.RedirectURL)
}
if code == "" {
oauthConfig = fixRedirect(oauthConfig)
code, err = configSetup(ctx, ri.Name, name, m, oauthConfig, opt)
if err != nil {
return nil, errors.Wrap(err, "config failed to refresh token")
}
}
err = configExchange(ctx, name, m, oauthConfig, code)
if err != nil {
return nil, err
}
return fs.ConfigGoto(newState("*oauth-done"))
case "*oauth-done":
// Return to the state indicated in the State stack
_, returnState := fs.StatePop(stateParams)
return fs.ConfigGoto(returnState)
}
return nil, errors.Errorf("unknown internal oauth state %q", state)
}
func init() {
// Set the function to avoid circular import
fs.ConfigOAuth = ConfigOAuth
}
// Return true if can run without a webserver and just entering a code
func noWebserverNeeded(oauthConfig *oauth2.Config) bool {
return oauthConfig.RedirectURL == TitleBarRedirectURL
}
// get the URL we need to send the user to
func getAuthURL(name string, m configmap.Mapper, oauthConfig *oauth2.Config, opt *Options) (authURL string, state string, err error) {
oauthConfig, _ = overrideCredentials(name, m, oauthConfig)
// Make random state
state, err = random.Password(128)
if err != nil {
return "", "", err
}
// Generate oauth URL
opts := opt.OAuth2Opts
if !opt.NoOffline {
opts = append(opts, oauth2.AccessTypeOffline)
}
authURL = oauthConfig.AuthCodeURL(state, opts...)
return authURL, state, nil
}
// If TitleBarRedirect is set but we are doing a real oauth, then
// override our redirect URL
func fixRedirect(oauthConfig *oauth2.Config) *oauth2.Config {
switch oauthConfig.RedirectURL {
case TitleBarRedirectURL:
// copy the config and set to use the internal webserver
configCopy := *oauthConfig
oauthConfig = &configCopy
oauthConfig.RedirectURL = RedirectURL
}
return oauthConfig
}
// configSetup does the initial creation of the token
//
// If opt is nil it will use the default Options
//
// It will run an internal webserver to receive the results
func configSetup(ctx context.Context, id, name string, m configmap.Mapper, oauthConfig *oauth2.Config, opt *Options) (string, error) {
if opt == nil {
opt = &Options{}
}
authorizeNoAutoBrowserValue, ok := m.Get(config.ConfigAuthNoBrowser)
authorizeNoAutoBrowser := ok && authorizeNoAutoBrowserValue != ""
authURL, state, err := getAuthURL(name, m, oauthConfig, opt)
if err != nil {
return "", err
}
// Prepare webserver
server := newAuthServer(opt, bindAddress, state, authURL)
err = server.Init()
if err != nil {
return "", errors.Wrap(err, "failed to start auth webserver")
}
go server.Serve()
defer server.Stop()
authURL = "http://" + bindAddress + "/auth?state=" + state
if !authorizeNoAutoBrowser {
// Open the URL for the user to visit
_ = open.Start(authURL)
fs.Logf(nil, "If your browser doesn't open automatically go to the following link: %s\n", authURL)
} else {
fs.Logf(nil, "Please go to the following link: %s\n", authURL)
}
fs.Logf(nil, "Log in and authorize rclone for access\n")
// Read the code via the webserver
fs.Logf(nil, "Waiting for code...\n")
auth := <-server.result
if !auth.OK || auth.Code == "" {
return "", auth
}
fs.Logf(nil, "Got code\n")
if opt.CheckAuth != nil {
err = opt.CheckAuth(oauthConfig, auth)
if err != nil {
return "", err
}
}
return auth.Code, nil
}
// Exchange the code for a token
func configExchange(ctx context.Context, name string, m configmap.Mapper, oauthConfig *oauth2.Config, code string) error {
ctx = Context(ctx, fshttp.NewClient(ctx))
token, err := oauthConfig.Exchange(ctx, code)
if err != nil {
return errors.Wrap(err, "failed to get token")
}
return PutToken(name, m, token, true)
}
// Local web server for collecting auth
type authServer struct {
opt *Options
state string
listener net.Listener
bindAddress string
authURL string
server *http.Server
result chan *AuthResult
}
// newAuthServer makes the webserver for collecting auth
func newAuthServer(opt *Options, bindAddress, state, authURL string) *authServer {
return &authServer{
opt: opt,
state: state,
bindAddress: bindAddress,
authURL: authURL, // http://host/auth redirects to here
result: make(chan *AuthResult, 1),
}
}
// Receive the auth request
func (s *authServer) handleAuth(w http.ResponseWriter, req *http.Request) {
fs.Debugf(nil, "Received %s request on auth server to %q", req.Method, req.URL.Path)
// Reply with the response to the user and to the channel
reply := func(status int, res *AuthResult) {
w.WriteHeader(status)
w.Header().Set("Content-Type", "text/html")
var t = template.Must(template.New("authResponse").Parse(AuthResponseTemplate))
if err := t.Execute(w, res); err != nil {
fs.Debugf(nil, "Could not execute template for web response.")
}
s.result <- res
}
// Parse the form parameters and save them
err := req.ParseForm()
if err != nil {
reply(http.StatusBadRequest, &AuthResult{
Name: "Parse form error",
Description: err.Error(),
})
return
}
// get code, error if empty
code := req.Form.Get("code")
if code == "" {
reply(http.StatusBadRequest, &AuthResult{
Name: "Auth Error",
Description: "No code returned by remote server",
})
return
}
// check state
state := req.Form.Get("state")
if state != s.state && !(state == "" && s.opt.StateBlankOK) {
reply(http.StatusBadRequest, &AuthResult{
Name: "Auth state doesn't match",
Description: fmt.Sprintf("Expecting %q got %q", s.state, state),
})
return
}
// code OK
reply(http.StatusOK, &AuthResult{
OK: true,
Code: code,
Form: req.Form,
})
}
// Init gets the internal web server ready to receive config details
func (s *authServer) Init() error {
fs.Debugf(nil, "Starting auth server on %s", s.bindAddress)
mux := http.NewServeMux()
s.server = &http.Server{
Addr: s.bindAddress,
Handler: mux,
}
s.server.SetKeepAlivesEnabled(false)
mux.HandleFunc("/favicon.ico", func(w http.ResponseWriter, req *http.Request) {
http.Error(w, "", http.StatusNotFound)
return
})
mux.HandleFunc("/auth", func(w http.ResponseWriter, req *http.Request) {
state := req.FormValue("state")
if state != s.state {
fs.Debugf(nil, "State did not match: want %q got %q", s.state, state)
http.Error(w, "State did not match - please try again", http.StatusForbidden)
return
}
fs.Debugf(nil, "Redirecting browser to: %s", s.authURL)
http.Redirect(w, req, s.authURL, http.StatusTemporaryRedirect)
return
})
mux.HandleFunc("/", s.handleAuth)
var err error
s.listener, err = net.Listen("tcp", s.bindAddress)
if err != nil {
return err
}
return nil
}
// Serve the auth server, doesn't return
func (s *authServer) Serve() {
err := s.server.Serve(s.listener)
fs.Debugf(nil, "Closed auth server with error: %v", err)
}
// Stop the auth server by closing its socket
func (s *authServer) Stop() {
fs.Debugf(nil, "Closing auth server")
close(s.result)
_ = s.listener.Close()
// close the server
_ = s.server.Close()
}
| lib/oauthutil/oauthutil.go | 0 | https://github.com/rclone/rclone/commit/770b3496a10e38d3fb9ce34fb85e96d29880ef9f | [
0.00748019851744175,
0.0003808160254266113,
0.00016215973300859332,
0.00017294820281676948,
0.0008990287897177041
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.